seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
21366302281 | """
URL: https://www.lintcode.com/problem/invert-binary-tree/description
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
# My own solution, simple recursion.
class Solution:
"""
@param root: a TreeNode, the root of the binary tree
@return: nothing
"""
def invertBinaryTree(self, root):
# write your code here
if root is None:
return
self.invertBinaryTree(root.left)
self.invertBinaryTree(root.right)
root.left, root.right = root.right, root.left
# I referred to a solution provided by a student on Jiuzhang.com. It uses BFS, very simple. Next time I should
# think about using BFS first when facing problems related to trees. I was always thinking about DFS and cannot
# figure out a way to do it non-recursively.
from collections import deque
class Solution:
"""
@param root: a TreeNode, the root of the binary tree
@return: nothing
"""
def invertBinaryTree(self, root):
# write your code here
if root is None:
return
queue = deque()
queue.append(root)
while len(queue) > 0:
node = queue.popleft()
node.left, node.right = node.right, node.left
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
| simonfqy/SimonfqyGitHub | lintcode/easy/175_invert_binary_tree.py | 175_invert_binary_tree.py | py | 1,448 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 37,
"usage_type": "call"
}
] |
22450452976 | """
Team 46
Haoyue Xie 1003068 @Melbourne
Jiayu Li 713551 @Melbourne
Ruqi Li 1008342 @Melbourne
Yi Zhang 1032768 @Melbourne
Zimeng Jia 978322 @Hebei, China
"""
import json
from shapely.geometry import shape, Point
#current_region is a dictionary
def streaming_region(current_region, tweet):
if current_region != {}:
return current_region
else:
area_list = []
with open("City_geojson.json") as f:
data = json.load(f)
for area in data["features"]:
if area["geometry"] != None:
polygon = shape(area["geometry"])
area_list.append([polygon,area["properties"]])
if tweet["coordinates"] != None:
point = Point(tweet["coordinates"]["coordinates"][0],tweet["coordinates"]["coordinates"][1])
for plg in area_list:
if plg[0].contains(point):
return plg[1]
print("no sa4 area defined")
elif tweet["place"] != None:
coor1 = tweet["place"]["bounding_box"]["coordinates"][0][0]
coor2 = tweet["place"]["bounding_box"]["coordinates"][0][2]
point = Point((coor1[0]+coor2[0])/2,(coor1[1]+coor2[1])/2)
for plg in area_list:
if plg[0].contains(point):
return plg[1]
print("no sa4 area defined")
else:
print("no location info!")
return {}
| yzzhan4/COMP90024-AuzLife | TwitterStreaming/streaming_region.py | streaming_region.py | py | 1,436 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "shapely.geometry.shape",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "shapely.geometry.Point",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "shapely.geome... |
38336151204 | from streamlit_webrtc import webrtc_streamer
import av
import cv2
cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
class VideoProcessor:
def recv(self, frame):
frm = frame.to_ndarray(format="bgr24")
CONFIDENCE = 0.5
SCORE_THRESHOLD = 0.5
IOU_THRESHOLD = 0.5
font = cv2.FONT_HERSHEY_COMPLEX
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
rand_lvl = [random.randrange(70, 100) for i in range(0, 50)]
frame_cntr = 0
while True:
ret, frame = frm
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
i = 0
for (x, y, z, h) in faces:
frame_cntr += 1
cv2.rectangle(frm, (x, y), (x + z, y + h), (255, 0, 0), 2)
if frame_cntr < 100:
cv2.putText(frame, f'Вы junior-разработчик на:{random.randrange(0, 100)}%', (x - 6, y), font, 0.7, (255, 255, 255), 2, cv2.LINE_AA)
else:
i += 1
cv2.putText(frame, f'Вы junior-разработчик на:{rand_lvl[i]}%', (x - 6, y), font, 0.7, (255, 255, 255), 2, cv2.LINE_AA)
x, imag = cv2.imencode('.jpg', frame)
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + imag.tobytes() + b'\r\n\r\n')
for x, y, w, h in faces:
cv2.rectangle(frm, (x, y), (x + w, y + h), (0, 255, 0), 3)
return av.VideoFrame.from_ndarray(frm, format='bgr24')
webrtc_streamer(key="key", video_processor_factory=VideoProcessor)
| NeTRooo/CyberGarden2022-Atom | rtc_test.py | rtc_test.py | py | 1,710 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.CascadeClassifier",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_COMPLEX",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "cv2.CascadeClassifier",
"line_number": 16,
"usage_type": "call"
},
{
"api_name... |
37655164252 | from django.shortcuts import render
from django.http import HttpResponse
from hello.models import User
import random
# Create your views here.
count = 0
def World(request):
return HttpResponse('this is app2')
def Add_user(request):
global count
count += 1
user = User()
user.user_age = count
user.user_name = random.choice(['Wang', 'Chan', 'Liu', 'Lin'])
user.user_gender = not random.getrandbits(1)
user.save()
return render(request, ('add_user.html'))
def Get_user(request):
user1 = User.objects.values()
context = {
"sqllist":user1
}
print(user1)
return render(request, ('user_list.html'), context=context)
def Update_user(request):
pkv = User.objects.values_list()
# randompk = pkv[random.randint(0,len(pkv) -1)][0]
# user = User.objects.get(pk = randompk)
# user.user_name = 'Change'
# user.save()
# response = 'date has been updated'
# return HttpResponse(response)
pkv = len(User.objects.values_list())
print(pkv)
if(pkv > 1):
pkv = User.objects.values_list()
randompk = pkv[random.randint(0,len(pkv) -1)][0]
user = User.objects.get(pk = randompk)
user.user_name = 'Change'
user.save()
response = 'date has been updated'
return HttpResponse(response)
elif(pkv == 1) :
a = User.objects.values_list()[0][0]
user = User.objects.get(pk = a)
user.user_name = 'Change'
user.save()
response = 'date has been updated'
return HttpResponse(response)
else:
return HttpResponse('no information')
def Del_All(request):
pkv = len(User.objects.values_list())
print(pkv)
if(pkv > 1):
pkv = User.objects.values_list()
randompk = pkv[random.randint(0,len(pkv) -1)][0]
user = User.objects.get(pk = randompk)
user.delete()
response = 'PK ' + str(randompk) + ' has been delete'
return HttpResponse(response)
elif(pkv == 1) :
a = User.objects.values_list()[0][0]
user = User.objects.get(pk = a)
user.delete()
response = 'the ' +str(a)+ ' date has been delete'
return HttpResponse(response)
else:
return HttpResponse('no information') | yy1110/Mydjango | django_first/app2/views.py | views.py | py | 2,338 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.http.HttpResponse",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "hello.models.User",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "random.getran... |
26454404997 | import gc
import logging
import os
import glob
import pandas as pd
import sys
# sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import time
from collections import defaultdict
import torch
import torch.nn as nn
import torch.optim as optim
from math import exp
import numpy as np
torch.backends.cudnn.benchmark = True
from matplotlib import pyplot as plt
import matplotlib as mpl
import matplotlib.patches as patches
from matplotlib import pyplot as plt
from argoverse.map_representation.map_api import ArgoverseMap
from argoverse.data_loading.argoverse_forecasting_loader import ArgoverseForecastingLoader
from argoverse.visualization.visualize_sequences import viz_sequence
avm = ArgoverseMap()
num = 10
data_path="/datasets/argoverse/val/data"
infer_path="../../inn"
import os
import sys
sys.path.append("../ddn/")
sys.path.append("./")
import warnings
warnings.filterwarnings('ignore')
import torch
import numpy as np
import scipy.special
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from scipy.linalg import block_diag
from torch.utils.data import Dataset, DataLoader
#from bernstein import bernstesin_coeff_order10_new
from argoverse.map_representation.map_api import ArgoverseMap
from argoverse.data_loading.argoverse_forecasting_loader import ArgoverseForecastingLoader
from argoverse.visualization.visualize_sequences import viz_sequence
avm = ArgoverseMap()
def denoise(gt_x, gt_y, w = 7):
# denoising
gt_x_t = []
gt_y_t = []
for iq in range(len(gt_x)):
if iq >= w and iq + w <= len(gt_x):
gt_x_t.append(np.mean(gt_x[iq: iq + w]))
gt_y_t.append(np.mean(gt_y[iq: iq + w]))
elif iq < w:
okx = np.mean(gt_x[w: w + w])
gt_x_t.append(gt_x[0] + (okx - gt_x[0]) * (iq) / w)
oky = np.mean(gt_y[w: w + w])
gt_y_t.append(gt_y[0] + (oky - gt_y[0]) * (iq) / w)
else:
okx = np.mean(gt_x[len(gt_x) - w:len(gt_x) - w + w])
oky = np.mean(gt_y[len(gt_x) - w: len(gt_x) - w + w])
gt_x_t.append(okx + (gt_x[-1] - okx) * (w - (len(gt_x) - iq)) / w)
gt_y_t.append(oky + (gt_y[-1] - oky) * (w - (len(gt_y) - iq)) / w)
gt_x = gt_x_t
gt_y = gt_y_t
return gt_x, gt_y
from shapely.geometry.polygon import Polygon, Point
output_dir="../results/"
t_obs=20
dt=0.3
t_obs=20
pred=False
pred_array=None
batch_size = 512
dpi=100
w,h=512,512
res=0.5
paths = glob.glob(os.path.join(data_path, "*.csv"))
color = {
'polygon': '#e6cf93',
'polygon-outline': '#e6cf93',
'centerline': '#fceec7',
'agent': 'blue',
'av': 'grey',
'other': 'grey',
'outline': 'black'
}
color = {
'polygon': 'white',
'polygon-outline': 'white',
'centerline': 'white',
'agent': 'white',
'av': 'white',
'other': 'white',
'outline': 'black'
}
from tqdm import tqdm
for idx in tqdm(range(len(paths))):
if idx < 19:
continue
path = paths[idx]
dff = pd.read_csv(path)
city = dff['CITY_NAME'].values[0]
agent_df = dff[dff['OBJECT_TYPE'] == 'AGENT']
x_a = agent_df['X'].values
y_a = agent_df['Y'].values
x_a, y_a = denoise(x_a, y_a)
av_df = dff[dff['OBJECT_TYPE'] == 'AV']
x_av = av_df['X'].values
y_av = av_df['Y'].values
x_av, y_av = denoise(x_av, y_av)
others_df = dff[dff['OBJECT_TYPE'] == 'OTHERS']
others_dfs = np.array([v for k, v in others_df.groupby('TRACK_ID')], dtype=object)
x_o = {}
y_o = {}
for other_df in others_dfs:
x_other, y_other = other_df['X'].values, other_df['Y'].values
x_other, y_other = denoise(x_other, y_other)
x_o[other_df['TRACK_ID'].values[0]] = x_other
y_o[other_df['TRACK_ID'].values[0]] = other_df['Y'].values
# group by timestamp
dfs = [x for _, x in dff.groupby('TIMESTAMP')]
grids_lanes = np.zeros((20, h, w))
grids_obstacles = np.zeros((20, h, w))
grids_centerlines = np.zeros((20, h, w))
grids_agent = np.zeros((20, h, w))
total_successors = []
current = []
das_polygons = []
das_polygons_mp = []
das_ids = []
agent_polygons = []
others_polygons = []
for indd in range(0, 20):
lane_id = avm.get_nearest_centerline(np.array([x_a[indd],y_a[indd]]), city_name=city)[0].id
current.append(lane_id)
successors = avm.get_lane_segment_successor_ids(lane_id, city)
if successors == None:
continue
for successor in successors:
total_successors.append(successor)
successors_2d = avm.get_lane_segment_successor_ids(successor, city)
for successorr in successors_2d:
if successors_2d == None:
continue
total_successors.append(successorr)
polygons = [ avm.get_lane_segment_polygon(successor, city) for successor in successors]
current = np.unique(np.array(current))
total_successors = np.unique(np.array(total_successors))
for curr in current:
current_polygon = avm.get_lane_segment_polygon(curr, city)
das_polygons.append(current_polygon)
das_polygons_mp.append(avm.get_lane_segment_polygon(curr, city))
das_ids.append(curr)
# plt.fill(current_polygon[:, 0], current_polygon[:, 1], color='white', zorder=4)
for successor in total_successors :
polygon = avm.get_lane_segment_polygon(successor, city)
das_polygons.append(polygon)
das_polygons_mp.append(avm.get_lane_segment_polygon(successor, city))
das_ids.append(successor)
# plt.fill(polygon[:, 0], polygon[:, 1], color='white', zorder=4)
das_polygons_mp = np.array(das_polygons_mp)
x_off = 75
y_off = 75
points = np.array([[x_a[20] - x_off, y_a[20] + y_off],[x_a[20] + x_off, y_a[20] + y_off], [x_a[20] + x_off, y_a[20] - y_off],[x_a[20] - x_off, y_a[20] - y_off],[x_a[20] - x_off, y_a[20] + y_off]])
for ind, df in enumerate(dfs):
agent_df = df[df['OBJECT_TYPE'] == 'AGENT']
others_df = df[df['OBJECT_TYPE'] == 'OTHERS']
others_dfs = [x for _, x in others_df.groupby('TRACK_ID')]
av_df = df[df['OBJECT_TYPE'] == 'AV']
# agent
x_traj = agent_df['X'].values
y_traj = agent_df['Y'].values
offsets = [x_a[0], y_a[0]] # offsets for other agents
others_polyon = []
if ind < len(dfs) - 1:
x_off = 2 #0.75
y_off = 2.25 #1.25
points = np.array([[x_traj[0] - x_off, y_traj + y_off],[x_traj[0] + x_off, y_traj + y_off], [x_traj[0] + x_off, y_traj - y_off],[x_traj[0] - x_off, y_traj - y_off],[x_traj[0] - x_off, y_traj + y_off]])
theta = np.arctan2((y_a[ind + 1] - y_a[ind]) , (x_a[ind + 1] - x_a[ind])) - np.pi/2
ww = np.zeros(points.shape)
A = np.matrix([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]])
points = points - np.array([x_traj[0], y_traj[0]])
for i,v in enumerate(points): ww[i] = A @ points[i]
ww[:, 0] += x_traj[0]
ww[:, 1] += y_traj[0]
try:
agent_polygons.append(Polygon(ww))
except:
print("AGENT problem")
for indoo, other in enumerate(others_dfs):
x_traj = other['X'].values
y_traj = other['Y'].values
indo = other['TRACK_ID'].values[0]
if ind < len(dfs) - 1 and ind < len(x_o[indo]) - 1 and ind < len(y_o[indo]) - 1:
x_off = 2
y_off = 2.25
points = np.array([[x_traj[0] - x_off, y_traj + y_off],[x_traj[0] + x_off, y_traj + y_off], [x_traj[0] + x_off, y_traj - y_off],[x_traj[0] - x_off, y_traj - y_off],[x_traj[0] - x_off, y_traj + y_off]])
theta = np.arctan2((y_o[indo][ind + 1] - y_o[indo][ind]) , (x_o[indo][ind + 1] - x_o[indo][ind])) - np.pi/2
ww = np.zeros(points.shape)
A = np.matrix([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]])
points = points - np.array([x_traj[0], y_traj[0]])
for i,v in enumerate(points): ww[i] = A @ points[i]
ww[:, 0] += x_traj[0]
ww[:, 1] += y_traj[0]
try:
others_polyon.append(Polygon(ww))
except:
print("OTHERS")
others_polygons.append(others_polyon)
sample = np.zeros((h, w))
lx = x_a[20] - res*(h/2)
ly = y_a[20] - res*(w/2)
# seq_lane_props = avm.city_lane_centerlines_dict[city]
# for lane_id, lane_props in seq_lane_props.items():
# lane_cl = lane_props.centerline
# if (np.min(lane_cl[:, 0]) < x_max and np.min(lane_cl[:, 1]) < y_max and np.max(lane_cl[:, 0]) > x_min and np.max(lane_cl[:, 1]) > y_min):
# lane_centerlines.append(lane_cl)
for i in tqdm(range(h)):
for j in range(w):
px = lx + i * res
py = ly + j * res
point_xy = Point(px, py)
flag = 0
for k in range(len(das_polygons)):
if Polygon(das_polygons[k]).contains(point_xy):
flag = 1
sample[j,i] = flag
for k in range(20):
# get obstacle polygon
for l in range(len(others_polygons[k])):
if others_polygons[k][l].contains(point_xy):
grids_obstacles[k, j, i] = 1
# get agent polygon
if agent_polygons[k].contains(point_xy):
grids_agent[k, j, i] = 1
print("DONE")
print(grids_agent.shape)
for i in range(20): grids_lanes[i] = sample
print(str(infer_path) + "/das/{}.npy".format(idx))
np.save(str(infer_path) + "/das/{}.npy".format(idx), grids_lanes)
np.save(str(infer_path) + "/agents/{}.npy".format(idx), grids_agent)
np.save(str(infer_path) + "/others/{}.npy".format(idx), grids_obstacles)
| Vikr-182/ddn-forecasting | vis/infer.py | infer.py | py | 10,164 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.backends",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "argoverse.map_representation.map_api.ArgoverseMap",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"line_number": 40,
"usage_type": "call"
},
{
... |
4413470363 | '''
xを数字とセットにした二次元配列を作る
sを数字に置き換えたものと、元のままのものの三次元配列にする
→二次元配列では取り扱いきれないので、遠慮なく三次元へ
ソートして、出す
'''
from collections import defaultdict
x = input()
n = int(input())
s = [input() for _ in range(n)]
new = defaultdict(dict)
for i in range(len(x)):
new[x[i]] = i
ans = []
for i in s:
inner = []
for j in i:
inner.append(new[j])
ans.append([inner, i])
ans.sort()
for i in ans:
print(i[-1])
| burioden/atcoder | submissions/abc219/c.py | c.py | py | 566 | python | ja | code | 4 | github-code | 36 | [
{
"api_name": "collections.defaultdict",
"line_number": 13,
"usage_type": "call"
}
] |
73947800422 | from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('',views.ProductList,name='ProductList'),
path('productdetails',views.productdetails,name='productdetails'),
path('orderslist',views.OrdersList,name='OrdersList'),
path('addcolumns',views.AddColumns,name='AddColumns'),
path('addproduct',views.addproduct,name='addproduct'),
] | Fawazk/VofoxSolutions-test | vofox/purchase/urls.py | urls.py | py | 403 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
19453386854 | import requests #Requests é um biblioteca, um pacote de código. Para instalar usar: pip install requests
from tkinter import * #Pegando todas as informações da biblioteca tkinter.
def pegar_cotacoes():
requisicao = requests.get("https://economia.awesomeapi.com.br/last/USD-BRL,EUR-BRL,BTC-BRL")
requisicao_dic = requisicao.json()
cotacao_dolar = requisicao_dic['USDBRL']['bid']
cotacao_euro = requisicao_dic['EURBRL']['bid']
cotacao_btc = requisicao_dic['BTCBRL']['bid']
texto = f'''
Dólar: {cotacao_dolar}
Euro: {cotacao_euro}
BTC: {cotacao_btc}'''
texto_cotacoes["text"] = texto #editanto o parâmetro text do texto_cotacoes
janela = Tk() #Criando uma janela com tk. TK é um código do tkinter que cria a janela.
janela.title("Cotação Atual das Moedas") #Adicionando o título da janela.
texto_orientecao = Label(janela, text="Clique no botão para ver as cotações das moedas.") #Um pedaço de texto dentro da janela é chamado de Label.
texto_orientecao.grid(column=0, row=0, padx=10, pady=10) #grid, usado para escolher a posição do texto. Pad é a distância do texto e o que será inserido depois.
botao = Button(janela, text="Buscar cotações Dólar/Euro/BTC", command=pegar_cotacoes) #Button está na biblioteca do tkinter. Janela, lugar onde o botão vai ficar. Command, comando que irá executar a função pegar_cotacoes.
botao.grid(column=0, row=1, padx=10, pady=10)
texto_cotacoes = Label(janela, text="")
texto_cotacoes.grid(column=0, row=2, padx=10, pady=10)
janela.mainloop() #mainloop deixa a janela exibida. Garante que a janela vai funcionar.
| jessicarios-DevOps/Tkinter-python | janela.py | janela.py | py | 1,628 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 5,
"usage_type": "call"
}
] |
73011952423 | #-*- coding: utf-8 -*-
import csv
import os
import pymysql
import pandas as pd
# 一个根据pandas自动识别type来设定table的type
def make_table_sql(df):
columns = df.columns.tolist()
types = df.ftypes
# 添加id 制动递增主键模式
make_table = []
for item in columns:
if 'int' in types[item]:
char = item + ' INT'
elif 'float' in types[item]:
char = item + ' FLOAT'
elif 'object' in types[item]:
char = item + ' longtext'
elif 'datetime' in types[item]:
char = item + ' DATETIME'
make_table.append(char)
return ','.join(make_table)
# csv 格式输入 mysql 中
def csv2mysql(db_name, table_name, df):
# 创建database
cursor.execute('CREATE DATABASE IF NOT EXISTS {}'.format(db_name))
# 选择连接database
conn.select_db(db_name)
print("hello")
# 创建table
cursor.execute('DROP TABLE IF EXISTS {}'.format(table_name))
cursor.execute('CREATE TABLE {}({})'.format(table_name,make_table_sql(df)))
# 提取数据转list 这里有与pandas时间模式无法写入因此换成str 此时mysql上格式已经设置完成
# df['日期'] = df['日期'].astype('str')
values = df.values.tolist()
# 根据columns个数
s = ','.join(['%s' for _ in range(len(df.columns))])
# executemany批量操作 插入数据 批量操作比逐个操作速度快很多
cursor.executemany('INSERT INTO {} VALUES ({})'.format(table_name,s), values)
# 参数设置 DictCursor使输出为字典模式 连接到本地用户root 密码为kellydc
config = dict(host='localhost', user='root', password='kellydc',
cursorclass=pymysql.cursors.DictCursor
)
# 建立连接
conn = pymysql.Connect(**config)
# 自动确认commit True
conn.autocommit(1)
# 设置光标
cursor = conn.cursor()
df = pd.read_csv('/Users/daven/Github/MedDataPro/sampleData/clear/clear_set.csv', encoding='utf-8', low_memory=False)
df = df.astype(object).where(pd.notnull(df), None)
# print(df.head())
csv2mysql("MedData","RM_Report", df)
cursor.execute('SELECT * FROM RM_Report LIMIT 5')
cursor.scroll(4)
cursor.fetchall()
| cyj-user/MedData | sampleData/data_input.py | data_input.py | py | 2,208 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pymysql.cursors",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "pymysql.Connect",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "pandas.notnull",... |
73903114025 | import pandas as pd
from datetime import date, timedelta, datetime
from meteostat import Point, Daily
import statsmodels.api as sm
def read_data():
# Set time period
start = datetime(2010, 1, 1)
end = pd.to_datetime(datetime.now().strftime("%Y-%m-%d"))
# Create Point for Vancouver, BC
vancouver = Point(49.2497, -123.1193, 70)
#campinas = Point(-22.9056, -47.0608, 686)
#saopaulo = Point(-23.5475, -46.6361, 769)
# Get daily data for 2018
data = Daily(vancouver, start, end)
data = data.fetch()
data = data[['tavg', 'prcp']]
return data
def predict():
data = read_data()
returns = data['tavg']
valor_ontem = returns.tail(1)
model = sm.tsa.statespace.SARIMAX(returns , order=(1,1,3), seasonal_order=(0,1,1,7),
enforce_stationarity=False, enforce_invertibility=False, freq='D')
model = model.fit()
forecast = model.get_forecast(steps=1) # Previsão para 1 período à frente
conf_interval = forecast.conf_int(alpha=0.05) # Intervalo de confiança de 95%
pred = forecast.predicted_mean[0] # Previsão um dia a frente
lower_bound = conf_interval.iloc[0, 0] # Limite inferior do intervalo de confiança
upper_bound = conf_interval.iloc[0, 1] # Limite superior do intervalo de confiança
prediction = round(float(pred),4)
lower_bound = round(float(lower_bound),4)
upper_bound = round(float(upper_bound),4)
valor_ontem = round(float(valor_ontem),4)
data_atual = date.today()
data_amanha = data_atual + timedelta(days=1)
return [str(data_amanha), prediction, lower_bound, upper_bound]
| Marcosgrosso/automation_series | predict_model.py | predict_model.py | py | 1,724 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "datetime.date... |
71404383785 | import random, sys
# random.seed(42)
from person import Person
from logger import Logger
from virus import Virus
import argparse
class Simulation(object):
def __init__(self, pop_size, vacc_percentage, initial_infected, virus):
# TODO: Create a Logger object and bind it to self.logger.
# Remember to call the appropriate logger method in the corresponding parts of the simulation.
self.logger = Logger(virus.name + ".txt")
# TODO: Store the virus in an attribute
self.virus = virus
# TODO: Store pop_size in an attribute
self.original_pop_size = pop_size
self.pop_size = pop_size
# TODO: Store the vacc_percentage in a variable
self.vacc_percentage = vacc_percentage
self.vaccinated = []
# TODO: Store initial_infected in a variable
self.initial_infected = initial_infected
#to speed up looking for infected persons they are all stored here
self.infected = []
# You need to store a list of people (Person instances)
# Some of these people will be infected some will not.
# Use the _create_population() method to create the list and
# return it storing it in an attribute here.
# TODO: Call self._create_population() and pass in the correct parameters.
self.population = self._create_population(initial_infected)
def _create_population(self, initial_infected):
# TODO: Create a list of people (Person instances). This list
# should have a total number of people equal to the pop_size.
# Some of these people will be uninfected and some will be infected.
# The number of infected people should be equal to the the initial_infected
# TODO: Return the list of people
population = []
for i in range(self.pop_size):
population.append(Person(i))
vaccinated_i = (random.choices(range(1, self.pop_size), k=int(self.vacc_percentage*self.pop_size//1)))
self.vaccinated = []
for vaccinated in vaccinated_i:
population[vaccinated] = (Person(vaccinated, is_vaccinated=False, infection=self.virus))
self.vaccinated.append(population[vaccinated])
initial_infected_i = (random.choices(range(1, self.pop_size), k=initial_infected))
self.infected = []
for infected in initial_infected_i:
population[infected] = (Person(infected, is_vaccinated=False, infection=self.virus))
self.infected.append(population[infected])
return population
def _simulation_should_continue(self):
# This method will return a boolean indicating if the simulation
# should continue.
# The simulation should not continue if all of the people are dead,
# or if all of the living people have been vaccinated.
# TODO: Loop over the list of people in the population. Return True
# if the simulation should continue or False if not.
if self.pop_size <= 0 or len(self.vaccinated) >= self.pop_size:
return False
return True
def run(self):
# This method starts the simulation. It should track the number of
# steps the simulation has run and check if the simulation should
# continue at the end of each step.
should_continue = True
# TODO: Write meta data to the logger. This should be starting
# statistics for the simulation. It should include the initial
# population size and the virus.
self.step_number = 0
self.logger.write_metadata(self.pop_size, self.virus, self.initial_infected)
while should_continue:
# TODO: Increment the time_step_counter
# TODO: for every iteration of this loop, call self.time_step()
# Call the _simulation_should_continue method to determine if
# the simulation should continue
self.time_step()
should_continue = self._simulation_should_continue()
self.logger.log_time_step(self.step_number, self.pop_size)
# TODO: When the simulation completes you should conßclude this with
# the logger. Send the final data to the logger.
def time_step(self):
# This method will simulate interactions between people, calulate
# new infections, and determine if vaccinations and fatalities from infections
# The goal here is have each infected person interact with a number of other
# people in the population
# TODO: Loop over your population
# For each person if that person is infected
# have that person interact with 100 other living people
# Run interactions by calling the interaction method below. That method
# takes the infected person and a random person
new_deaths = 0
new_survivors = 0
number_of_new_interactions = 0
number_of_new_infections = 0
current_infected = []
for person in self.infected:
if person.is_alive:
current_infected.append(person)
for infected in current_infected:
new_interactions = self.interaction(100)
new_infections = self._infect_newly_infected(new_interactions)
if infected.did_survive_infection():
infected.is_vaccinated = True #since surviving a virus gives similar results to vaccine
self.vaccinated.append(infected)
new_survivors += 1
else:
infected.is_alive = False
self.pop_size -= 1
new_deaths += 1
self.step_number += 1
self.logger.log_interactions(self.step_number, self.pop_size, number_of_new_interactions)
self.logger.log_infections(self.step_number, self.pop_size, number_of_new_infections)
self.logger.log_infection_survival(self.step_number, self.pop_size, new_deaths)
def interaction(self, num_interactions):
# TODO: Finish this method.
# The possible cases you'll need to cover are listed below:
# random_person is vaccinated:
# nothing happens to random person.
# random_person is already infected:
# nothing happens to random person.
# random_person is healthy, but unvaccinated:
# generate a random number between 0.0 and 1.0. If that number is smaller
# than repro_rate, add that person to the newly infected array
# Simulation object's newly_infected array, so that their infected
# attribute can be changed to True at the end of the time step.
# TODO: Call logger method during this method.
infectable = list(set(self.population).difference(set(self.infected).union(set(self.vaccinated))))
if len(infectable) >= 100:
interacted_with = random.choices(infectable, k=100)
else:
interacted_with = random.choices(infectable, k=len(infectable))
return interacted_with
def _infect_newly_infected(self, interacted_with):
# TODO: Call this method at the end of every time step and infect each Person.
# TODO: Once you have iterated through the entire list of self.newly_infected, remember
# to reset self.newly_infected back to an empty list.
newly_infected = []
for infected in interacted_with:
if random.random() < self.virus.repro_rate and infected.infection == None:
newly_infected.append(infected)
self.population[infected.id].infection = self.virus
self.infected.append(infected)
return newly_infected
if __name__ == "__main__":
# # Test your simulation here
# virus_name = "Sniffles"
# repro_num = 0.5
# mortality_rate = 0.12
# virus = Virus(virus_name, repro_num, mortality_rate)
# # Set some values used by the simulation
# pop_size = 1000
# vacc_percentage = 0.1
# initial_infected = 10
# # Make a new instance of the simulation
# sim = Simulation(pop_size, vacc_percentage, initial_infected, virus)
parser = argparse.ArgumentParser()
parser.add_argument("population_size", help="size of the population you wish to simulate", type=int)
parser.add_argument("vacc_percentage", help="percent of people who start vaccinated within given population", type=float)
parser.add_argument("virus", help="name of the virus")
parser.add_argument("mortality_rate", help="the percent chance of dying after contracting the virus", type=float)
parser.add_argument("reproduction_rate", help="the percent chance of transmission per interaction", type=float)
parser.add_argument("initial_infected", help="the number of people who start with the virus", type=int)
args = parser.parse_args()
virus = Virus(args.virus, repro_rate=args.reproduction_rate, mortality_rate=args.mortality_rate)
sim = Simulation(args.population_size, args.vacc_percentage, args.initial_infected, virus)
# sim.run()
sim.run()
| b3fr4nk/Herd-Immunity-Sim | simulation.py | simulation.py | py | 9,203 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logger.Logger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "virus.name",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "person.Person",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "random.choices",
"lin... |
556091123 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import os
import scrapy
import json
from urllib.parse import urlparse
from pymongo import MongoClient
from scrapy.pipelines.images import ImagesPipeline
class DataEditPipeline(object):
@staticmethod
def process_item(item, spider):
data = json.loads('{' + item['data'].split(';')[0].split('{', maxsplit=1)[1])
item['price'] = int(data['entities']['products'][0]['discountedPrice'])/100
item['photos'] = [itm['url'] for itm in data['entities']['products'][0]['images']]
item['name'] = data['entities']['products'][0]['name']
item['params'] = {itm['slug']: itm['rawValue'] for itm in data['entities']['products'][0]['attributes']}
del(item['data'])
return item
class YoulaPhotosPipeline(ImagesPipeline):
def get_media_requests(self, item, info):
if item['photos']:
for img in item['photos']:
try:
yield scrapy.Request(img)
except Exception as e:
print(e)
def file_path(self, request, response=None, info=None):
return info.spider.start_urls[0].split('/')[-1] + '/' + request.url.split('/')[-1][:5] + '/' + \
os.path.basename(urlparse(request.url).path)
def item_completed(self, results, item, info):
if results:
item['photos'] = [itm[1] for itm in results if itm[0]]
return item
class DataBasePipeline(object):
def __init__(self):
client = MongoClient('localhost', 27017)
self.mongo_base = client.youla
def process_item(self, item, spider):
collection = self.mongo_base[spider.start_urls[0].split('/')[-1]]
collection.insert_one(item)
return item
| GruXsqK/Methods_scraping | Lesson_6/Youla_parser_project/youlaparser/pipelines.py | pipelines.py | py | 1,918 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.loads",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "scrapy.pipelines.images.ImagesPipeline",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "scrapy.Request",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.p... |
4861207569 | from dataclasses import dataclass
from datetime import datetime,date
import pytz
import dateparser
from typing import Union
import pandas as pd
from sqlalchemy import Column,Integer,DateTime,Text,TIMESTAMP,MetaData,Table
from sqlalchemy.engine import create_engine
from sqlalchemy.exc import OperationalError
from businessindia.helpers.exceptions import InvalidDateFormatException
import os
import logging
logger=logging.getLogger(__name__)
class DateHandler:
@staticmethod
def parse_date(datevalue:Union[datetime,date,str],return_string:bool=False,return_time:bool=False,use_DMY_order:bool=True):
parser_settings={
'DATE_ORDER': 'DMY',
'TIMEZONE': 'UTC',
'RETURN_AS_TIMEZONE_AWARE': True
}
if datevalue is None:
parsed_datetime=datetime.utcnow()
if return_string:
if return_time:
return parsed_datetime.strftime('&d-%m-%Y-%H:%M')
return parsed_datetime.date().strftime('%d-%m-%Y')
else:
if return_time:
return parsed_datetime
return parsed_datetime.date()
if isinstance(datevalue,str):
try:
if not use_DMY_order:
parser_settings.pop('DATE_ORDER')
parsed_datetime=dateparser.parse(datevalue,settings=parser_settings)
if return_string:
if return_time:
return parsed_datetime.strftime('%d-%m-%Y-%H:%M')
return parsed_datetime.date().strftime('%d-%m-%Y')
else:
if return_time:
return parsed_datetime
return parsed_datetime.date()
except AttributeError:
raise InvalidDateFormatException(f'Pass valid date in dd-mm-yyyy format only. Got:{datevalue}')
if isinstance(datevalue,datetime) or isinstance(datevalue,date):
if isinstance(datevalue,date):
datevalue=datetime.combine(datevalue,datetime.min.time())
localizeddt=pytz.utc.localize(datevalue)
if return_string:
if return_time:
return localizeddt.strftime('%d-%m-%Y-%H:%M')
return localizeddt.date().strftime('%d-%m-%Y')
else:
if return_time:
return localizeddt
return localizeddt.date()
@staticmethod
def parse_db_date(datevalue:str):
try:
date=datetime.strptime(datevalue,'%Y-%m-%d %H:%M:%S.%f').date()
return date
except AttributeError:
raise InvalidDateFormatException('Unable to parse the database datetime format try changing it.')
class ChecksumHandler:
def __init__(self,conn_string:str=None) -> None:
self.conn_string=os.environ.get('CHECKSUM_DB_CONN_STRING')
if self.conn_string is None:
self.conn_string=conn_string if conn_string else 'sqlite:///./checksum.db'
logger.info('Connected to Checksum Database')
self.engine=create_engine(self.conn_string)
def fetch_latest_date(self,org_url:str,datecolname:str='published_date',tablename:str='checksum_business'):
self.create_non_exist_table(tablename)
try:
unique_identifier=org_url.strip()
query=f"SELECT MAX({datecolname}) FROM {tablename} WHERE org_url='{unique_identifier}'"
with self.engine.connect() as conn:
max_date=None
for res in conn.execute(query):
max_date=res[0]
return max_date
except Exception as e:
logger.info(f'Unable to fetch latest date returning None Exception:{e}')
return None
def get_unique_csums(self,data:pd.DataFrame,tablename:str='checksum_business'):
#Generate csums for every provided data as hash of str and str and remove those that match in db and keep those that does not match
res=pd.read_sql(f'SELECT * FROM {tablename}',self.engine)
df = pd.merge(data,res,how='left',on=['news_url'],suffixes=('','_db'),indicator=True)
df=df[[c for c in df.columns if not c.endswith('_db')]]
df=df.loc[df._merge=='left_only',:]
df=df.drop(['_merge'],axis=1)
df=df.drop_duplicates().reset_index(drop=True)
final=df
final.columns=final.columns.str.strip()
return final
def create_non_exist_table(self,tablename:str):
meta=MetaData()
checksumtb=Table(
tablename,
meta,
Column('id',Integer,primary_key=True,autoincrement=True),
Column('org_url',Text,index=True),
Column('news_url',Text,index=True),
Column('published_date',DateTime,index=True),
Column('created_date',DateTime,server_default='now()')
)
meta.create_all(self.engine,checkfirst=True)
def push_to_business_table(self,df:pd.DataFrame,tablename:str='checksum_business'):
df=df.rename(columns={'org_url':'org_url','news_url':'news_url','published_date':'published_date'})
df['published_date']=pd.to_datetime(df['published_date'])
df['created_date']=datetime.utcnow()
#############
try:
final_df=self.get_unique_csums(df)
except OperationalError:
final_df=df
#print(final_df.shape)
df=final_df
##################
df.to_sql(tablename,self.engine,chunksize=1000,if_exists='append',index=False)
logger.info(f'Pushed to checksumdb df of shape {df.shape}')
class ProdDBPushHandler:
def __init__(self,conn_string:str=None) -> None:
self.conn_string=os.environ.get('PROD_DB_CONN_STRING')
if not self.conn_string:
self.conn_string=conn_string if conn_string else 'sqlite:///./prod.db'
logger.info('Connected to Production Database')
| nitesh1489/test | helpers/handlers.py | handlers.py | py | 6,204 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "datetime.date",
... |
32296716535 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="trello_client-basics-api-denisshvayko", version="0.0.1", author="denis", author_email="denis.shvayko@phystech.edu",
description="Обертка для trello API", long_description=long_description,
long_description_content_type="text/markdown", url="https://github.com/denisshvayko/D1.8.git",
packages=setuptools.find_packages(),
classifiers=["Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License",
"Operating System :: OS Independent", ], python_requires='>=3.6', ) | denisshvayko/D1.8 | setup.py | setup.py | py | 640 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "setuptools.setup",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 9,
"usage_type": "call"
}
] |
13989800577 | # -*- coding: utf-8 -*-
import copy
from io import BytesIO
from datetime import datetime
from xlwt import Workbook, XFStyle, Borders, Pattern
class ExcelWT(Workbook):
"""Excel生成工具
"""
def __init__(self, name, encoding=r'utf-8', style_compression=0):
super().__init__(encoding, style_compression)
self._book_name = name
self._current_sheet = None
self._default_style = XFStyle()
self._default_style.borders.left = Borders.THIN
self._default_style.borders.right = Borders.THIN
self._default_style.borders.top = Borders.THIN
self._default_style.borders.bottom = Borders.THIN
self._default_style.pattern.pattern = Pattern.SOLID_PATTERN
self._default_style.pattern.pattern_fore_colour = 0x01
self._default_title_style = copy.deepcopy(self._default_style)
self._default_title_style.font.bold = True
self._default_title_style.pattern.pattern_fore_colour = 0x16
def create_sheet(self, name, titles=[]):
sheet = self._current_sheet = self.add_sheet(name)
style = self._default_title_style
for index, title in enumerate(titles):
sheet.write(0, index, title, style)
sheet.col(index).width = 0x1200
def add_sheet_row(self, *args):
sheet = self._current_sheet
style = self._default_style
nrow = len(sheet.rows)
for index, value in enumerate(args):
sheet.write(nrow, index, value, style)
def get_file(self):
result = b''
with BytesIO() as stream:
self.save(stream)
result = stream.getvalue()
return result
def write_request(self, request):
filename = f"{self._book_name}.{datetime.today().strftime('%y%m%d.%H%M%S')}.xls"
request.set_header(r'Content-Type', r'application/vnd.ms-excel')
request.set_header(r'Content-Disposition', f'attachment;filename={filename}')
return request.finish(self.get_file())
| wsb310/hagworm | hagworm/extend/excel.py | excel.py | py | 2,028 | python | en | code | 13 | github-code | 36 | [
{
"api_name": "xlwt.Workbook",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "xlwt.XFStyle",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "xlwt.Borders.THIN",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "xlwt.Borders",
... |
44298786313 | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 30 08:29:53 2018
@author: Ahsan
"""
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit import compile, Aer
from QGates import gateArity , gateName
class QCircuit:
def __init__ (self,qBit,cBit,shot=1):
'''
This function is used to construct the base of quantum circuit
Currently by default backend used is 'qasm_simulator_py'.
NOTE: You can change the backend but would need to adjust the evaluate function as well.
This function accepts the following arguments:
Quantum Bits: [qBit] dataType: int
Classical Bits [cBit] dataType: int
shot is by default 1 dataType: int
'''
self.qBit=qBit
self.cBit=cBit
self.shot=shot
self.backend=Aer.get_backend('qasm_simulator')
# self.backend=Aer.get_backend('statevector_simulator_py')
self.qr=QuantumRegister(qBit)
self.cr=ClassicalRegister(cBit)
self.qCircuit=QuantumCircuit(self.qr,self.cr)
def evaluate(self):
'''
This function is used to evaluate the circuit
When quantum circuit is constructed call this function to evaluate
the circuit
'''
qobj = compile(self.qCircuit, self.backend,shots=self.shot)
job = self.backend.run(qobj)
result = job.result()
return result
def constructCircuit(self,code):
'''
This function recieves the list of tuples the first element of tuple
represent the gate and the second and onwards are their placement
position at the quantum circuit (depends upon the gate's arity)
'''
for i in code:
val=gateArity.get(i[0])
name=gateName.get(i[0])
if val==1:
getattr(self.qCircuit,name)( self.qr[ int(i[1]) ] )
elif val==2:
getattr(self.qCircuit,name)(self.qr[int(i[1])],self.qr[int(i[2])])
def measurement(self,m,useHadamard=True):
'''
This function takes the list of tuple m having first element as qubit and
second element as classical bit. It measures the qubit on the associated
classical bit
m : List of tuple [(qBit,cBit )]
useHadamard: Append hadamard just before
'''
if useHadamard:
endH=[]
for i in range(self.qBit):
endH.append(('Hadamard',i))
self.constructCircuit(endH)
for i in m:
q=i[0]
c=i[1]
self.qCircuit.measure(self.qr[q],self.cr[c])
| usamaahsan93/AutoQP | myQFn.py | myQFn.py | py | 2,868 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "qiskit.Aer.get_backend",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "qiskit.Aer",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "qiskit.QuantumRegister",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "qiskit.Class... |
35397958448 | from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
import mox
from pants.base.hash_utils import hash_all, hash_file
from pants.util.contextutil import temporary_file
class TestHashUtils(mox.MoxTestBase):
def setUp(self):
super(TestHashUtils, self).setUp()
self.digest = self.mox.CreateMockAnything()
def test_hash_all(self):
self.digest.update('jake')
self.digest.update('jones')
self.digest.hexdigest().AndReturn('42')
self.mox.ReplayAll()
self.assertEqual('42', hash_all(['jake', 'jones'], digest=self.digest))
def test_hash_file(self):
self.digest.update('jake jones')
self.digest.hexdigest().AndReturn('1137')
self.mox.ReplayAll()
with temporary_file() as fd:
fd.write('jake jones')
fd.close()
self.assertEqual('1137', hash_file(fd.name, digest=self.digest))
| fakeNetflix/square-repo-pants | tests/python/pants_test/base/test_hash_utils.py | test_hash_utils.py | py | 942 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mox.MoxTestBase",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "pants.base.hash_utils.hash_all",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pants.util.contextutil.temporary_file",
"line_number": 29,
"usage_type": "call"
},
... |
74630975144 | import math
import os.path
import re
import html
def calcScore(now, best):
if now<1e-9:
return 0
#return now / best
return best / now
def isBetter(now, best):
if now<1e-9:
return False
#return best < now
return now < best
def main():
import sqlite3
db = sqlite3.connect('mm.sqlite3')
cur = db.cursor()
cur.execute('select run_id, name, source, created_at from runs')
CREATED_AT = {}
NAME = {}
SRC = {}
for run_id, name, source, created_at in cur.fetchall():
NAME[run_id] = name
SRC[run_id] = source
CREATED_AT[run_id] = created_at
cur.execute('select run_id, test_id, sec, stdout, stderr from results order by run_id, test_id')
R = {}
for run_id, test_id, sec, text_stdout, text_stderr in cur.fetchall():
if run_id not in R:
R[run_id] = 0
R[run_id] += 1
pattern = re.compile(r'(\w+) *[=:] *([\d\.]+)')
T = {}
S = {}
TIME = {}
cur.execute('select run_id, test_id, sec, stdout, stderr from results order by run_id, test_id')
for run_id, test_id, sec, text_stdout, text_stderr in cur.fetchall():
if R[run_id] != 100:
continue
if run_id not in S:
S[run_id] = {}
if test_id not in T:
T[test_id] = {}
if run_id not in TIME:
TIME[run_id] = [sec, sec, sec]
else:
TIME[run_id][0] = min(TIME[run_id][0], sec)
TIME[run_id][1] += sec
TIME[run_id][2] = max(TIME[run_id][2], sec)
S[run_id][test_id] = -1
for text in (text_stdout, text_stderr):
for line in text.split("\n"):
m = pattern.match(line)
if m:
if m.group(1).lower()=='score':
S[run_id][test_id] = float(m.group(2))
else:
T[test_id][m.group(1)] = float(m.group(2))
BEST = {}
BEST_COUNT = {}
for run_id in S:
for test_id in S[run_id]:
if test_id not in BEST or isBetter(S[run_id][test_id], BEST[test_id]):
BEST[test_id] = S[run_id][test_id]
BEST_COUNT[test_id] = 1
elif BEST[test_id] == S[run_id][test_id]:
BEST_COUNT[test_id] += 1
T2 = {}
for test_id in T:
for name in T[test_id]:
if name not in T2:
T2[name] = []
T2[name].append(T[test_id][name])
T2 = {name: sorted(T2[name]) for name in T2}
print(T2)
def splitKind(values):
target = len(values) / 3
best = len(values)
best_i = 0
for i in range(1, len(values)):
if values[i-1]!=values[i]:
sc = abs(i-target)
if best is None or sc<best:
best = sc
best_i = i
assert best_i is not None
for j in range(10):
sep = ('{:.%df}' % (j, )).format((values[best_i-1]+values[best_i])/2)
sep_f = float(sep)
if values[best_i-1] < sep_f < values[best_i]:
break
best = len(values)
best_i = len(values)-1
for i in range(len(values)-1, 0, -1):
if values[i-1]!=values[i]:
sc = abs(len(values)-i-target)
if best is None or sc<best:
best = sc
best_i = i
assert best_i is not None
for j in range(10):
sep2 = ('{:.%df}' % (j, )).format((values[best_i-1]+values[best_i])/2)
sep2_f = float(sep2)
if values[best_i-1] < sep2_f < values[best_i]:
break
return sep, sep2
T3 = {name: splitKind(T2[name]) for name in T2}
print(T3)
import http.server
import urllib.parse
class MyHandler(http.server.BaseHTTPRequestHandler):
def getSource(self, query):
self.send_response(200)
self.send_header('Content-Type', 'text/plain; charset=utf-8')
self.end_headers()
run_id = int(query.get('id', [])[0])
self.wfile.write(SRC[run_id].encode())
def getDetail(self, query):
self.send_response(200)
self.send_header('Content-Type', 'text/html; charset=utf-8')
self.end_headers()
param = query.get('PARAM', [])
run_id = int(query.get('id', [])[0])
query.pop('id')
htmls = []
htmls.append('<html>')
htmls.append('<head>')
htmls.append('<title>MM Analytics</title>')
htmls.append('</head>')
htmls.append('<body>')
htmls.append(f'<h3>Name: {html.escape(f"{NAME[run_id]}")}</h3>')
htmls.append(f'<a href="/?{urllib.parse.urlencode(query, True)}">[TOP]</a>')
htmls.append(f'<a href="/source?id={run_id}">[SOURCE]</a>')
htmls.append('<hr />')
if 2<=len(param):
sum_score = [0]*9
sum2_score = [0]*9
count_score = [0]*9
bests = [0]*9
uniques = [0]*9
fails = [0]*9
for test_id in S[run_id]:
kind = 4
if T[test_id][param[0]]<float(T3[param[0]][0]):
kind -= 1
elif float(T3[param[0]][1])<T[test_id][param[0]]:
kind += 1
if T[test_id][param[1]]<float(T3[param[1]][0]):
kind -= 3
elif float(T3[param[1]][1])<T[test_id][param[1]]:
kind += 3
if 0 < S[run_id][test_id]:
sc1 = calcScore(S[run_id][test_id], BEST[test_id])
sum_score[kind] += sc1
sum2_score[kind] += sc1*sc1
else:
fails[kind] += 1
count_score[kind] += 1
if BEST[test_id] == S[run_id][test_id]:
bests[kind] += 1
if BEST_COUNT[test_id]==1:
uniques[kind] += 1
#for kind in range(3):
# score = '{:.3f}'.format(100 * sum_score[kind] / count_score[kind])
# htmls.append(f'<td align="right">{score}</td><td align="right">{bests[kind]}</td><td align="right">{uniques[kind]}</td><td align="right">{fails[kind]}</td>')
htmls.append('<table border="1">')
htmls.append(f'<tr><td rowspan="2"></td><th colspan="6">{T3[param[0]][0]}></th><th colspan="6">{param[0]}</th><th colspan="6">>{T3[param[0]][1]}</th></tr>')
htmls.append('<tr>')
for i in range(3):
htmls.append('<th>Score</th><th>Std</th><th>Bests</th><th>Uniqs</th><th>Fails</th><th>Tests</th>')
htmls.append('</tr>')
labels = [f'{T3[param[1]][0]}>', f'{param[1]}', f'>{T3[param[1]][1]}']
for y in range(3):
htmls.append(f'<tr><th>{labels[y]}</th>')
for x in range(3):
kind = y * 3 + x
avg_score = sum_score[kind] / count_score[kind]
score = '{:.3f}'.format(100 * avg_score)
std_score = '{:.3f}'.format(100 * math.sqrt((sum2_score[kind] - sum_score[kind]*avg_score) / count_score[kind]))
htmls.append(f'<td align="right">{score}</td><td align="right">{std_score}</td><td align="right">{bests[kind]}</td><td align="right">{uniques[kind]}</td><td align="right">{fails[kind]}</td><td align="right">{count_score[kind]}</td>')
htmls.append('</tr>')
htmls.append('</table>')
htmls.append('</body>')
htmls.append('</html>')
self.wfile.write("\n".join(htmls).encode())
def getIndex(self, query):
if 'id' in query or 'name' in query:
if 'id' in query and 'name' in query:
cur.execute('update runs set name = ? where run_id = ?', (query['name'][-1], int(query['id'][-1])))
NAME[int(query['id'][-1])] = query['name'][-1]
db.commit()
query.pop('id')
query.pop('name')
self.send_response(302)
self.send_header('Location', '/?' + urllib.parse.urlencode(query, True))
self.end_headers()
return
self.send_response(200)
self.send_header('Content-Type', 'text/html; charset=utf-8')
self.end_headers()
param = query.get('PARAM', [])
htmls = []
htmls.append('<html>')
htmls.append('<head>')
htmls.append('<title>MM Analytics</title>')
htmls.append('</head>')
htmls.append('<body>')
htmls.append('''
<script>
function change_name(id, value) {
var new_value = window.prompt(id + "'s name =", value);
if(new_value===null) {
return false;
}
var href = window.location.href;
if(0<=href.indexOf("?")) {
href = href + "&";
}
else {
href = href + "?";
}
window.location.href = href + new URLSearchParams({id: id, name: new_value}).toString();
}
</script>
''')
for name in T3:
if name not in param:
htmls.append(f'<p>_ <a href="/?{urllib.parse.urlencode({**query, "PARAM": param + [name]}, True)}">{name}: {T3[name][0]}, {T3[name][1]}</a></p>')
else:
param2 = list(param)
param2.remove(name)
htmls.append(f'<p>v <a href="/?{urllib.parse.urlencode({**query, "PARAM": param2}, True)}">{name}: {T3[name][0]}, {T3[name][1]}</a></p>')
htmls.append('<table border="1">')
htmls.append('<tr><th rowspan="2">ID</th><th rowspan="2">CREATED_AT</th><th rowspan="2">NAME</th><th colspan="3">Time</th><th colspan="6">Whole</th>')
for name in param:
htmls.append(f'<th colspan="6">{T3[name][0]}></th>')
htmls.append(f'<th colspan="6">{name}</th>')
htmls.append(f'<th colspan="6">>{T3[name][1]}</th>')
htmls.append('</tr>')
htmls.append('<tr>')
htmls.append('<th>MIN</th><th>AVG</th><th>MAX</th>')
htmls.append('<th>Score</th><th>Std</th><th>Bests</th><th>Uniqs</th><th>Fails</th><th>Tests</th>')
for name in param:
htmls.append('<th>Score</th><th>Std</th><th>Bests</th><th>Uniqs</th><th>Fails</th><th>Tests</th>')
htmls.append('<th>Score</th><th>Std</th><th>Bests</th><th>Uniqs</th><th>Fails</th><th>Tests</th>')
htmls.append('<th>Score</th><th>Std</th><th>Bests</th><th>Uniqs</th><th>Fails</th><th>Tests</th>')
htmls.append('</tr>')
for run_id in reversed(list(S.keys())):
sum_score = 0
sum2_score = 0
count_score = 0
bests = 0
uniques = 0
fails = 0
for test_id in S[run_id]:
if 0 < S[run_id][test_id]:
sc1 = calcScore(S[run_id][test_id], BEST[test_id])
sum_score += sc1
sum2_score += sc1*sc1
else:
fails += 1
count_score += 1
if BEST[test_id] == S[run_id][test_id]:
bests += 1
if BEST_COUNT[test_id]==1:
uniques += 1
avg_score = sum_score / count_score
score = '{:.3f}'.format(100 * avg_score)
std_score = '{:.3f}'.format(100 * math.sqrt((sum2_score - sum_score*avg_score) / count_score))
sec_min = '{:.3f}'.format(TIME[run_id][0])
sec_avg = '{:.3f}'.format(TIME[run_id][1] / count_score)
sec_max = '{:.3f}'.format(TIME[run_id][2])
htmls.append(f'<tr><td><a href="/detail?{urllib.parse.urlencode({**query, "id": run_id}, True)}">{run_id}</a></td><td>{CREATED_AT[run_id]}</td><td><a href="javascript: change_name({run_id}, "{urllib.parse.quote(f"{NAME[run_id]}")}")">{html.escape(f"{NAME[run_id]}")}</a></td><td align="right">{sec_min}</td><td align="right">{sec_avg}</td><td align="right">{sec_max}</td><td align="right">{score}</td><td align="right">{std_score}</td><td align="right">{bests}</td><td align="right">{uniques}</td><td align="right">{fails}</td><td align="right">{count_score}</td>')
for name in param:
sum_score = [0]*3
sum2_score = [0]*3
count_score = [0]*3
bests = [0]*3
uniques = [0]*3
fails = [0]*3
for test_id in S[run_id]:
kind = 1
if T[test_id][name]<float(T3[name][0]):
kind = 0
elif float(T3[name][1])<T[test_id][name]:
kind = 2
if 0 < S[run_id][test_id]:
sc1 = calcScore(S[run_id][test_id], BEST[test_id])
sum_score[kind] += sc1
sum2_score[kind] += sc1*sc1
else:
fails[kind] += 1
count_score[kind] += 1
if BEST[test_id] == S[run_id][test_id]:
bests[kind] += 1
if BEST_COUNT[test_id]==1:
uniques[kind] += 1
for kind in range(3):
avg_score = sum_score[kind] / count_score[kind]
score = '{:.3f}'.format(100 * avg_score)
std_score = '{:.3f}'.format(100 * math.sqrt((sum2_score[kind] - sum_score[kind]*avg_score) / count_score[kind]))
htmls.append(f'<td align="right">{score}</td><td align="right">{std_score}</td><td align="right">{bests[kind]}</td><td align="right">{uniques[kind]}</td><td align="right">{fails[kind]}</td><td align="right">{count_score[kind]}</td>')
htmls.append(f'</tr>')
htmls.append('</table>')
htmls.append('</body>')
htmls.append('</html>')
self.wfile.write("\n".join(htmls).encode())
def do_GET(self):
path, qs = (self.path.split('?') + [''])[:2]
query = urllib.parse.parse_qs(qs)
#query = {q: (query[q]+[''])[-1] for q in query}
if path=='/':
return self.getIndex(query)
if path=='/detail':
return self.getDetail(query)
elif path=='/source':
return self.getSource(query)
elif path=='/favicon.ico':
self.send_response(200)
self.send_header('Content-Type', 'image/x-icon')
self.end_headers()
self.wfile.write(open(os.path.join(os.path.dirname(__file__), 'favicon.ico'), 'rb').read())
else:
self.send_response(200)
self.send_header('Content-Type', 'text/html; charset=utf-8')
self.end_headers()
htmls = []
htmls.append('<html>')
htmls.append('<body>')
htmls.append(self.path)
htmls.append(f'{query}')
htmls.append('</body>')
htmls.append('</html>')
self.wfile.write("\n".join(htmls).encode())
with http.server.HTTPServer(('', 8080), MyHandler) as server:
print('start httpd ...')
server.serve_forever()
| colun/mmlang | src/mmhttpd.py | mmhttpd.py | py | 15,984 | python | en | code | 21 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "http.server.server",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "http.server",
... |
72508017705 | '''
liguangyao
10/25/2023
guangyaoli@ruc.edu.cn
'''
import os
import torch
from torchvision import transforms, utils
from PIL import Image
import numpy as np
import glob
from imagebind import data
from imagebind.models import imagebind_model
from imagebind.models.imagebind_model import ModalityType
device = "cuda:1" if torch.cuda.is_available() else "cpu"
# Instantiate model
model = imagebind_model.imagebind_huge(pretrained=True)
model.eval()
model.to(device)
def VideoLevelPrompt(video_label_list, video_name):
video_level_prompt = 'A photo of a dog.'
return video_level_prompt
def ImageBind_feat_extract(args, dir_audio_path, dir_viusal_path, dir_text_path, dst_audio_path, dst_visual_path, dst_text_path):
# 此处为文本
video_label_list = []
with open(dir_text_path, 'r') as dpp:
for line in dpp:
video_label_list.append(line.replace("\n", ""))
# print(video_label_list)
video_list = os.listdir(dir_viusal_path)
video_idx = 0
total_nums = len(video_list)
for video_name in video_list:
video_idx = video_idx + 1
print("\n--> ", video_idx, video_name)
audio_save_file = os.path.join(dst_audio_path, video_name + '.npy')
frame_save_file = os.path.join(dst_visual_path, video_name + '.npy')
text_save_file = os.path.join(dst_text_path, video_name + '.npy')
if os.path.exists(audio_save_file):
print(video_name + '.npy', "is already processed!")
continue
frame_list_load = sorted(glob.glob(os.path.join(dir_viusal_path, video_name, '*.jpg')))
audio_list_load = sorted(glob.glob(os.path.join(dir_audio_path, video_name, '*.wav')))
text_list = VideoLevelPrompt(video_label_list, video_name) # 例如:A photo of a dog. 保证文本是陈述语句即可,可自行设计
# 为了保证模型训练可以批处理,故需要保证每个数据样本后的长度一致。
# 然而由于不同的视频长度不一,采样出的帧数不一致,故此处对每个视频进行均匀采样。
frame_nums = len(frame_list_load)
if frame_nums < args.frame_nums:
frame_samples = np.round(np.linspace(0, frame_nums-2, args.frame_nums))
else:
frame_samples = np.round(np.linspace(0, args.frame_nums-1, args.frame_nums))
frame_list = [frame_list_load[int(sample)] for sample in frame_samples]
audio_nums = len(audio_list_load)
if audio_nums < args.audio_nums:
audio_samples = np.round(np.linspace(0, audio_nums-2, args.audio_nums))
else:
audio_samples = np.round(np.linspace(0, args.audio_nums-1, args.audio_nums))
audio_list = [audio_list_load[int(sample)] for sample in audio_samples]
# Load data
inputs = {
ModalityType.TEXT: data.load_and_transform_text(text_list, device),
ModalityType.VISION: data.load_and_transform_vision_data(frame_list, device),
ModalityType.AUDIO: data.load_and_transform_audio_data(audio_list, device),
}
with torch.no_grad():
embeddings = model(inputs)
text_feat = embeddings['text']
audio_feat = embeddings['audio']
visual_feat = embeddings['vision']
# print("\nimagebind text: ", text_feat.shape)
# print("imagebind audio: ", audio_feat.shape)
# print("imagebind visual: ", visual_feat.shape)
text_feat = text_feat.float().cpu().numpy()
np.save(text_save_file, text_feat)
audio_feat = audio_feat.float().cpu().numpy()
np.save(audio_save_file, audio_feat)
visual_feat = visual_feat.float().cpu().numpy()
np.save(frame_save_file, visual_feat)
print("Process: ", video_idx, " / ", total_nums, " ----- video id: ", video_idx)
print("T-A-V Feat shape: ", text_feat.shape, audio_feat.shape, visual_feat.shape)
def ImageBind_visaul_feat_extract(args, dir_viusal_path, dst_visual_path):
video_list = os.listdir(dir_viusal_path)
video_idx = 0
total_nums = len(video_list)
for video_name in video_list:
video_idx = video_idx + 1
print("\n--> ", video_idx, video_name)
frame_save_file = os.path.join(dst_visual_path, video_name + '.npy')
if os.path.exists(frame_save_file):
print(video_name + '.npy', "is already processed!")
continue
frame_list_load = sorted(glob.glob(os.path.join(dir_viusal_path, video_name, '*.jpg')))
frame_nums = len(frame_list_load)
if frame_nums < args.frame_nums:
frame_samples = np.round(np.linspace(0, frame_nums-2, args.frame_nums))
else:
frame_samples = np.round(np.linspace(0, args.frame_nums-1, args.frame_nums))
frame_list = [frame_list_load[int(sample)] for sample in frame_samples]
# Load data
inputs = {ModalityType.VISION: data.load_and_transform_vision_data(frame_list, device),}
with torch.no_grad():
embeddings = model(inputs)
visual_feat = embeddings['vision']
# print("imagebind visual: ", visual_feat.shape)
visual_feat = visual_feat.float().cpu().numpy()
np.save(frame_save_file, visual_feat)
print("Process: ", video_idx, " / ", total_nums, " ----- video id: ", video_idx)
print("V Feat shape: ", visual_feat.shape)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--dir_audio_path", type=str, default='data/users/guangyao_li/MUSIC-AVQA/audio_16kHz_2sec',
help='audio file path')
parser.add_argument("--dir_visual_path", type=str, default='/data/users/guangyao_li/MUSIC-AVQA/avqa-frames-1fps',
help='visual frames path')
parser.add_argument("--dir_text_path", type=str, default='../../dataset/split_que_id/music_avqa.json',
help='text file path')
parser.add_argument("--dst_audio_path", type=str, default='/data/users/guangyao_li/MUSIC-AVQA/imagebind_feat/imagebind_audio_16kHz',
help='audio feature path')
parser.add_argument("--dst_visual_path", type=str, default='/data/users/guangyao_li/MUSIC-AVQA/imagebind_feat/imagebind_frame_1fps',
help='visual frames feature path')
parser.add_argument("--dst_text_path", type=str, default='/data/users/guangyao_li/MUSIC-AVQA/imagebind_feat/imagebind_text',
help='text feature path')
parser.add_argument("--frame_nums", type=int, default=60,
help='frame sample numbers')
parser.add_argument("--audio_nums", type=int, default=60,
help='audio clip sample numbers')
# parser.add_argument("--gpu", dest='gpu', type=str, default='0',
# help='Set CUDA_VISIBLE_DEVICES environment variable, optional')
args = parser.parse_args()
# os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
params = vars(args)
# 同时提取audio, vsiual 和text的特征
ImageBind_feat_extract(args, args.dir_audio_path, args.dir_visual_path, args.dir_text_path,
args.dst_audio_path, args.dst_visual_path, args.dst_text_path)
# 只提取一个模态的特征,如visual
ImageBind_visual_feat_extract(args, dir_viusal_path, dst_visual_path)
| ayameyao/ResearchToolCode | FeatureExtraction/Extract_ImageBind_Features/extract_imagebind_feats.py | extract_imagebind_feats.py | py | 7,418 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "torch.cuda.is_available",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "imagebind.models.imagebind_model.imagebind_huge",
"line_number": 22,
"usage_type": "call"
},
... |
20655962047 | import dataclasses
import subprocess
from typing import Any, ClassVar, List, Optional
from fancy_dataclass.utils import DataclassMixin, issubclass_safe, obj_class_name
class SubprocessDataclass(DataclassMixin):
"""Mixin class providing a method for converting dataclass fields to command-line args that can be used to make a subprocess call.
Other arguments can be passed into the `metadata` argument of a `dataclasses.field`, namely:
- `exec` (boolean flag indicating that this field should be treated as the name of the executable, rather than an argument)
- `args` (list of command-line arguments corresponding to the field—only the first will be used, and only if it starts with a hyphen)
- `exclude` (boolean flag indicating that the field should not be included in the args)"""
def __post_init__(self) -> None:
exec_field = None
for (name, field) in self.__dataclass_fields__.items():
if field.metadata.get('exec', False):
if (exec_field is None):
exec_field = name
else:
raise TypeError("cannot have more than one field with 'exec' flag set to True")
def get_arg(self, name: str, suppress_defaults: bool = False) -> List[str]:
"""Given the name of a dataclass field, gets the command-line args for that field.
Args:
name: Name of dataclass field
suppress_defaults: If `True`, suppresses arguments that are equal to the default values
Returns:
List of command-line args corresponding to the field"""
field = self.__dataclass_fields__[name]
if field.metadata.get('exclude', False): # exclude the argument
return []
if getattr(field.type, '__origin__', None) is ClassVar:
# ignore fields associated with the class, rather than the instance
return []
val = getattr(self, name, None)
if (val is None): # optional value is None
return []
if issubclass_safe(field.type, SubprocessDataclass): # get args via nested SubprocessDataclass
return val.args(suppress_defaults = suppress_defaults)
if field.metadata.get('exec', False): # this field is the executable, so return no arguments
return []
if suppress_defaults: # if value matches the default, suppress the argument
default = None
has_default = True
if (field.default == dataclasses.MISSING):
if (field.default_factory == dataclasses.MISSING):
has_default = False
else:
default = field.default_factory()
else:
default = field.default
if has_default and (val == default):
return []
if field.metadata.get('args'): # use arg name provided by the metadata
arg = field.metadata['args'][0]
if (not arg.startswith('-')):
arg = None
else: # use the field name (assume a single dash if it is a single letter)
prefix = '-' if (len(name) == 1) else '--'
arg = prefix + name.replace('_', '-')
if isinstance(val, bool):
# make it a boolean flag if True, otherwise omit it
if (not val):
arg = None
val = []
elif isinstance(val, (list, tuple)):
if val:
val = [str(x) for x in val]
else:
arg = None
elif (val is not None): # convert the field value to a string
val = str(val)
args = [arg] if arg else []
args += val if isinstance(val, list) else [val]
return args
def get_executable(self) -> Optional[str]:
"""Gets the name of an executable to run with the appropriate arguments.
By default, this returns the name of the first dataclass field whose `exec` metadata flag is set to `True`, if one exists, and `None` otherwise.
Returns:
Name of the executable to run"""
name = None
for (name, field) in self.__dataclass_fields__.items():
if field.metadata.get('exec', False):
return getattr(self, name, None)
return None
def args(self, suppress_defaults: bool = False) -> List[str]:
"""Converts dataclass fields to a list of command-line arguments for a subprocess call.
Args:
suppress_defaults: If `True`, suppresses arguments that are equal to the default values
Returns:
List of command-line args corresponding to the dataclass fields"""
args = []
for name in self.__dataclass_fields__:
args += [arg for arg in self.get_arg(name, suppress_defaults = suppress_defaults) if arg]
return args
def run_subprocess(self, **kwargs: Any) -> subprocess.CompletedProcess:
"""Executes the full subprocess command corresponding to the dataclass parameters.
Args:
kwargs: Keyword arguments passed to `subprocess.run`
Returns:
`CompletedProcess` object produced by `subprocess.run`
Raises:
ValueError: If no executable was found from the `get_executable` method"""
executable = self.get_executable()
if (not executable):
raise ValueError(f'No executable identified for use with {obj_class_name(self)!r} instance')
args = [executable] + self.args()
return subprocess.run(args, **kwargs)
| jeremander/fancy-dataclass | fancy_dataclass/subprocess.py | subprocess.py | py | 5,568 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fancy_dataclass.utils.DataclassMixin",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "typing.ClassVar",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "fancy_dataclass.utils.issubclass_safe",
"line_number": 44,
"usage_type": "call"
},
... |
70308798185 | import wx
class SlideshowFrame(wx.Frame):
def __init__(self,**kwargs):
wx.Frame.__init__(self, **kwargs)
self.SetBackgroundColour(wx.BLACK)
self.panel = wx.Panel(self, pos=self.Rect.GetPosition(), size=self.Rect.GetSize())
self.empty_img = wx.EmptyImage(self.Rect.GetWidth(),
self.Rect.GetHeight())
self.imageCtrl = wx.StaticBitmap(self.panel, wx.ID_ANY,
wx.BitmapFromImage(self.empty_img))
#self.verSizer = wx.BoxSizer(wx.VERTICAL)
#self.horSizer = wx.BoxSizer(wx.HORIZONTAL)
#self.mainSizer.Add(self.imageCtrl, 0, wx.ALL|wx.ALIGN_CENTER, 0)
#self.panel.SetSizer(self.mainSizer)
#self.mainSizer.Fit(self)
#self.panel.Layout()
def load_img(self, img_path):
if img_path is None:
img = self.empty_img
else:
img = wx.Image(img_path, wx.BITMAP_TYPE_ANY)
#
# scale the image, preserving the aspect ratio
#
w = img.GetWidth()
h = img.GetHeight()
W = self.Rect.GetWidth()
H = self.Rect.GetHeight()
# scale w to match W, and see if height is over/under H. If so, scale
# h to match H instead.
w2, h2 = W, h*(float(W)/w)
if h2 > H:
w2, h2 = w*(float(H)/h), H
img = img.Scale(w2,h2,quality=wx.IMAGE_QUALITY_HIGH)
self.imageCtrl.SetBitmap(wx.BitmapFromImage(img))
#self.panel.Layout()
O = self.Rect.GetPosition() # frame origin
X,Y = (O[0] + (W-w2)/2, O[1] + (H-h2)/2)
self.panel.SetRect((X,Y,w2,h2))
#self.mainSizer.Fit(self)
#self.panel.Layout()
self.panel.Refresh()
| jamestunnell/auto-slideshow | slideshow_frame.py | slideshow_frame.py | py | 1,851 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "wx.Frame",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "wx.Frame.__init__",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "wx.Frame",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "wx.BLACK",
"line_numbe... |
8272148926 | #!/usr/bin/env python3.8
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 27 17:55:12 2023
@author: Carlos Gómez-Huélamo
"""
# General purpose imports
import sys
import os
import pdb
import git
if str(sys.version_info[0])+"."+str(sys.version_info[1]) >= "3.9": # Python >= 3.9
from math import gcd
else:
from fractions import gcd
# DL & Math imports
import math
import numpy as np
import torch
import pytorch_lightning as pl
from scipy import sparse
from torch import nn
from torch.nn import functional as F
from torch_geometric.nn import conv
from torch_geometric.utils import from_scipy_sparse_matrix
# Plot imports
# Custom imports
# Global variables
# https://pytorch.org/docs/stable/generated/torch.set_float32_matmul_precision.html#torch.set_float32_matmul_precision
torch.backends.cudnn.benchmark = True
torch.set_float32_matmul_precision("medium") # highest, high, medium
#######################################
class TMFModel(pl.LightningModule):
def __init__(self, args):
super(TMFModel, self).__init__() # allows us to avoid using the base class name explicitly
self.args = args
# Save model in log_dir as backup
self.save_hyperparameters() # It will enable Lightning to store all the provided arguments under the self.hparams attribute.
# These hyperparameters will also be stored within the model checkpoint, which simplifies model re-instantiation after training.
# Encoder
## Social
self.linear_embedding = LinearEmbedding(3,self.args)
self.pos_encoder= PositionalEncoding1D(self.args.social_latent_size)
self.encoder_transformer = EncoderTransformer(self.args)
self.agent_gnn = AgentGNN(self.args)
## Physical
if self.args.use_map:
self.map_sub_net = MapSubNet(self.args)
assert self.args.social_latent_size == self.args.map_latent_size
if self.args.final_latent_info == "concat":
self.args.decoder_latent_size = self.args.social_latent_size + self.args.map_latent_size
elif self.args.final_latent_info == "fuse":
self.A2L_1 = TransformerDecoder(self.args.social_latent_size, head_num=self.args.num_attention_heads)
self.L2A_1 = TransformerDecoder(self.args.social_latent_size, head_num=self.args.num_attention_heads)
self.A2L_2 = TransformerDecoder(self.args.social_latent_size, head_num=self.args.num_attention_heads)
self.L2A_2 = TransformerDecoder(self.args.social_latent_size, head_num=self.args.num_attention_heads)
self.args.decoder_latent_size = self.args.social_latent_size
else:
raise AssertionError
else:
self.args.decoder_latent_size = self.args.social_latent_size
if self.args.decoder == "decoder_residual": self.decoder = DecoderResidual(self.args)
elif self.args.decoder == "decoder_temporal": self.decoder = Temporal_Multimodal_Decoder(self.args)
# Metrics
self.reg_loss = nn.SmoothL1Loss(reduction="none")
if self.args.freeze_decoder:
self.initial_lr_conf = self.args.initial_lr_conf
self.min_lr_conf = self.args.min_lr_conf
else:
self.initial_lr_conf = 1e-3
self.min_lr_conf = 1e-6
self.is_frozen = False
self.save_model_script = True
@staticmethod
def init_args(parent_parser, BASE_DIR, DATASET_DIR):
parser_dataset = parent_parser.add_argument_group("dataset")
parser_dataset.add_argument(
"--BASE_DIR", type=str, default=BASE_DIR)
parser_dataset.add_argument(
"--DATASET_DIR", type=str, default=DATASET_DIR)
parser_dataset.add_argument(
"--LOG_DIR", type=str, default="non_specified")
parser_dataset.add_argument(
"--train_split", type=str, default=os.path.join(
BASE_DIR, DATASET_DIR, "train"))
parser_dataset.add_argument(
"--val_split", type=str, default=os.path.join(
BASE_DIR, DATASET_DIR, "val"))
parser_dataset.add_argument(
"--test_split", type=str, default=os.path.join(
BASE_DIR, DATASET_DIR, "test"))
# Social preprocess
parser_dataset.add_argument(
"--train_split_pre_social", type=str, default=os.path.join(
BASE_DIR, DATASET_DIR, "processed_social", "train_pre_clean.pkl"))
parser_dataset.add_argument(
"--val_split_pre_social", type=str, default=os.path.join(
BASE_DIR, DATASET_DIR, "processed_social", "val_pre_clean.pkl"))
parser_dataset.add_argument(
"--test_split_pre_social", type=str, default=os.path.join(
BASE_DIR, DATASET_DIR, "processed_social", "test_pre_clean.pkl"))
# Map preprocess
parser_dataset.add_argument(
"--train_split_pre_map", type=str, default=os.path.join(
BASE_DIR, DATASET_DIR, "processed_map", "train_map_data_rot_right_x_multi_agent.pkl"))
parser_dataset.add_argument(
"--val_split_pre_map", type=str, default=os.path.join(
BASE_DIR, DATASET_DIR, "processed_map", "val_map_data_rot_right_x_multi_agent.pkl"))
parser_dataset.add_argument(
"--test_split_pre_map", type=str, default=os.path.join(
BASE_DIR, DATASET_DIR, "processed_map", "test_map_data_rot_right_x_multi_agent.pkl"))
# Whole preprocess
parser_dataset.add_argument(
"--train_split_pre", type=str, default=os.path.join(
BASE_DIR, DATASET_DIR, "processed_full", "train_full_data_rot_right_x_multi_agent.pkl"))
parser_dataset.add_argument(
"--val_split_pre", type=str, default=os.path.join(
BASE_DIR, DATASET_DIR, "processed_full", "val_full_data_rot_right_x_multi_agent.pkl"))
parser_dataset.add_argument(
"--test_split_pre", type=str, default=os.path.join(
BASE_DIR, DATASET_DIR, "processed_full", "test_full_data_rot_right_x_multi_agent.pkl"))
parser_dataset.add_argument("--reduce_dataset_size", type=int, default=0)
parser_dataset.add_argument("--use_preprocessed", type=bool, default=False)
parser_dataset.add_argument("--use_map", type=bool, default=False)
parser_dataset.add_argument("--align_image_with_target_x", type=bool, default=True)
parser_training = parent_parser.add_argument_group("training")
parser_training.add_argument("--num_epochs", type=int, default=200)
parser_training.add_argument("--check_val_every_n_epoch", type=int, default=10)
parser_training.add_argument("--lr_values", type=list, default=[1e-3, 1e-4, 1e-3 , 1e-4])
parser_training.add_argument("--lr_step_epochs", type=list, default=[10, 20, 45])
parser_training.add_argument("--initial_lr_conf", type=float, default=5e-5)
parser_training.add_argument("--min_lr_conf", type=float, default=1e-6)
parser_training.add_argument("--wd", type=float, default=0.001)
parser_training.add_argument("--batch_size", type=int, default=128)
parser_training.add_argument("--val_batch_size", type=int, default=128)
parser_training.add_argument("--workers", type=int, default=0) # TODO: Not working with >= 0
parser_training.add_argument("--val_workers", type=int, default=0)
parser_training.add_argument("--gpus", type=int, default=1)
parser_model = parent_parser.add_argument_group("model")
parser_dataset.add_argument("--MODEL_DIR", type=str, default="non_specified")
parser_model.add_argument("--data_dim", type=int, default=2)
parser_model.add_argument("--obs_len", type=int, default=50)
parser_model.add_argument("--pred_len", type=int, default=60)
parser_model.add_argument("--centerline_length", type=int, default=40)
parser_model.add_argument("--num_centerlines", type=int, default=6)
parser_model.add_argument("--num_attention_heads", type=int, default=8)
parser_model.add_argument("--apply_dropout", type=float, default=0.2)
parser_model.add_argument("--data_aug_gaussian_noise", type=float, default=0.01)
parser_model.add_argument("--social_latent_size", type=int, default=64)
parser_model.add_argument("--map_latent_size", type=int, default=64)
parser_model.add_argument("--final_latent_info", type=str, default="non_specified")
parser_model.add_argument("--decoder_latent_size", type=int, default=-1)
parser_model.add_argument("--decoder_temporal_window_size", type=int, default=30) # 49
parser_model.add_argument("--num_modes", type=int, default=6)
parser_model.add_argument("--freeze_decoder", type=bool, default=False)
parser_model.add_argument("--mod_steps", type=list, default=[1, 5]) # First unimodal -> Freeze -> Multimodal
parser_model.add_argument("--mod_freeze_epoch", type=int, default=20)
parser_model.add_argument("--mod_full_unfreeze_epoch", type=int, default=60)
parser_model.add_argument("--reg_loss_weight", type=float, default=1) # xy predictions
parser_model.add_argument("--cls_loss_weight", type=float, default=1) # classification = confidences
parser_model.add_argument("--epsilon", type=float, default=0.0000001)
return parent_parser
def add_noise(self, input, factor=1):
"""_summary_
Args:
input (_type_): _description_
factor (int, optional): _description_. Defaults to 1.
Returns:
_type_: _description_
"""
noise = factor * torch.randn(input.shape).to(input)
noisy_input = input + noise
return noisy_input
def forward(self, batch):
# Set batch norm to eval mode in order to prevent updates on the running means,
# if the weights are frozen
if self.args.freeze_decoder:
if self.is_frozen:
for module in self.modules():
if isinstance(module, torch.nn.modules.BatchNorm1d):
module.eval()
# Encoder
## Social
### Extract the social features in each sample of the current batch
pdb.set_trace()
displ, centers = batch["displ"], batch["centers"]
rotation, origin = batch["rotation"], batch["origin"]
agents_per_sample = [x.shape[0] for x in displ]
batch_size = len(agents_per_sample)
### OBS: For each sequence, we always set the focal (target) agent as the first agent
### of the scene, then our ego-vehicle (AV) and finally the remanining agents
### (See extractor_proc.py preprocessing)
focal_agent_id = np.cumsum(agents_per_sample)
focal_agent_id = np.roll(focal_agent_id,1)
focal_agent_id[0] = 0
### Convert the list of tensors to tensors
displ_cat = torch.cat(displ, dim=0)
centers_cat = torch.cat(centers, dim=0)
### Data augmentation (TODO: It should be in collate_fn_dict, in the DataLoader)
if self.training:
displ_cat[:,:,:2] = self.add_noise(displ_cat[:,:,:2], self.args.data_aug_gaussian_noise)
centers_cat = self.add_noise(centers_cat, self.args.data_aug_gaussian_noise)
linear_output = self.linear_embedding(displ_cat)
pos_encoding = self.pos_encoder(linear_output)
pos_encoding = pos_encoding + linear_output
out_transformer = self.encoder_transformer(pos_encoding, agents_per_sample)
out_agent_gnn = self.agent_gnn(out_transformer, centers_cat, agents_per_sample)
social_info = torch.stack([x[0] for x in out_agent_gnn])
if torch.any(torch.isnan(social_info)):
pdb.set_trace()
## Physical
if self.args.use_map:
### Get relevant centerlines (non-padded) per scenario
rel_candidate_centerlines = batch["rel_candidate_centerlines"]
rel_candidate_centerlines = torch.stack(rel_candidate_centerlines,dim=0)
# Data augmentation (TODO: It should be in collate_fn_dict, in the DataLoader)
# if self.training:
# rel_candidate_centerlines = self.add_noise(rel_candidate_centerlines, self.args.data_aug_gaussian_noise)
### Get the map latent vector associated
_, num_centerlines, points_centerline, data_dim = rel_candidate_centerlines.shape
rel_candidate_centerlines = rel_candidate_centerlines.contiguous().view(-1, points_centerline, data_dim)
non_empty_mask = rel_candidate_centerlines.abs().sum(dim=1).sum(dim=1) # A padded-centerline must sum 0.0
# in each dimension, and after that both dimensions together
rows_mask = torch.where(non_empty_mask == 0.0)[0]
non_masked_centerlines = rel_candidate_centerlines.shape[0] - len(rows_mask)
rel_candidate_centerlines_mask = torch.zeros([rel_candidate_centerlines.shape[0]], device=rel_candidate_centerlines.device).type(torch.bool) # False
rel_candidate_centerlines_mask[rows_mask] = True # Padded-centerlines
rel_candidate_centerlines_mask_inverted = ~rel_candidate_centerlines_mask # Non-padded centerlines (so, relevant) to True
centerlines_per_sample = [] # Relevant centerlines (non-padded) per sequence
num_current_centerlines = 0
for i in range(rel_candidate_centerlines_mask.shape[0]+1):
if i % self.args.num_centerlines == 0 and i > 0: # Next traffic scenario
centerlines_per_sample.append(num_current_centerlines)
num_current_centerlines = 0
if i == rel_candidate_centerlines_mask.shape[0]:
break
if rel_candidate_centerlines_mask_inverted[i]: # Non-masked
num_current_centerlines += 1
assert non_masked_centerlines == sum(centerlines_per_sample), \
"The number of relevant centerlines do not match"
centerlines_per_sample = np.array(centerlines_per_sample)
rel_candidate_centerlines_ = rel_candidate_centerlines[rel_candidate_centerlines_mask_inverted,:,:]
rel_candidate_centerlines_mask_ = rel_candidate_centerlines_mask.reshape(-1,1).repeat_interleave(points_centerline,dim=1)
physical_info = self.map_sub_net(rel_candidate_centerlines, rel_candidate_centerlines_mask_)
# Decoder
if self.args.use_map:
if self.args.final_latent_info == "concat": # Concat info
merged_info = torch.cat([social_info,
physical_info],
dim=1)
if self.args.final_latent_info == "fuse": # Fuse info
physical_info = physical_info + self.A2L_1(physical_info, social_info)
social_info = social_info + self.L2A_1(social_info, physical_info)
physical_info = physical_info + self.A2L_2(physical_info, social_info)
social_info = social_info + self.L2A_2(social_info, physical_info)
merged_info = social_info
else:
merged_info = social_info
if torch.any(torch.isnan(merged_info)):
pdb.set_trace()
# If self.args.freeze_decoder is set to True, conf are useless
if self.args.decoder == "decoder_residual":
pred_traj, conf = self.decoder(merged_info, self.is_frozen, self.current_epoch)
elif self.args.decoder == "decoder_temporal":
traj_agent_abs_rel = displ_cat[focal_agent_id,:self.args.decoder_temporal_window_size,:self.args.data_dim]
last_obs_agent = centers_cat[focal_agent_id,:]
decoder_h = merged_info.unsqueeze(0)
decoder_c = torch.zeros(tuple(decoder_h.shape)).to(decoder_h)
state_tuple = (decoder_h, decoder_c)
pred_traj_rel, conf = self.decoder(traj_agent_abs_rel, state_tuple)
# Convert relative displacements to absolute coordinates (around origin)
pred_traj = relative_to_abs_multimodal(pred_traj_rel, last_obs_agent)
### In this model we are only predicting
### the focal agent. We would actually
### have batch_size x num_agents x num_modes x pred_len x data_dim
num_agents = 1
out = pred_traj.contiguous().view(batch_size, num_agents, -1, self.args.pred_len, self.args.data_dim)
if not self.args.freeze_decoder: conf = conf.view(batch_size, num_agents, -1)
# Iterate over each batch and transform predictions into the global coordinate frame
for i in range(len(out)):
out[i] = torch.matmul(out[i], rotation[i]) + origin[i].view(
1, 1, 1, -1
)
return out, conf
# Aux class functions
def freeze(self):
for param in self.parameters():
param.requires_grad = False
self.decoder.unfreeze_layers()
self.is_frozen = True
def full_unfreeze(self):
for param in self.parameters():
param.requires_grad = True
self.is_frozen = False
def prediction_loss(self, preds, gts, conf=None):
"""_summary_
Args:
preds (torch.tensor): batch_size x num_agents x num_modes x pred_len x data_dim
OBS: At this moment, num_agents = 1 since we are only predicting the focal agent
gts (list): list of gt of each scenario (num_agents x pred_len x 2)
conf (torch.tensor): batch_size x num_agents x 1
Returns:
_type_: _description_
"""
if self.args.freeze_decoder:
# # Stack all the predicted trajectories of the target agent
# num_mods = preds.shape[2]
# # [0] is required to remove the unneeded dimensions
# preds = torch.cat([x[0] for x in preds], 0)
# # Stack all the true trajectories of the target agent
# # Keep in mind, that there are multiple trajectories in each sample,
# # but only the first one ([0]) corresponds to the target agent
# gt_target = torch.cat([torch.unsqueeze(x[0], 0) for x in gts], 0)
# gt_target = torch.repeat_interleave(gt_target, num_mods, dim=0) # repeate the gt for all ks
# loss_single = self.reg_loss(preds, gt_target)
# loss_single = torch.sum(torch.sum(loss_single, dim=2), dim=1)
# loss_single = torch.split(loss_single, num_mods)
# # Tuple to tensor
# loss_single = torch.stack(list(loss_single), dim=0)
# min_loss_index = torch.argmin(loss_single, dim=1) # Get best mode
# min_loss_combined = [x[min_loss_index[i]] for i, x in enumerate(loss_single)]
# loss_out = torch.sum(torch.stack(min_loss_combined))
# # loss_out = torch.mean(torch.stack(min_loss_combined))
# return loss_out
# Stack all the predicted trajectories of the target agent
preds = preds.squeeze(1)
batch_size, num_modes, pred_len, data_dim = preds.shape
# Stack all the true trajectories of the target agent
# Keep in mind, that there are multiple trajectories in each sample, but only the first one ([0]) corresponds
# to the target agent
gt_target = torch.cat([torch.unsqueeze(x[0], 0) for x in gts], 0) # batch_size x pred_len x data_dim
gt_target_repeated = gt_target.unsqueeze(1).repeat(1,preds.shape[1],1,1) # repeate the gt for all ks
# batch_size x num_modes x pred_len x data_dim
fde_k = torch.sqrt((preds[:, :, -1, 0] - gt_target_repeated[:, :, -1, 0]) ** 2 + # x
(preds[:, :, -1, 1] - gt_target_repeated[:, :, -1, 1]) ** 2 + # y
self.args.epsilon) # to avoid division by zero
k_hat = torch.argmin(fde_k, dim=1)
index = torch.tensor(range(preds.shape[0]), dtype=torch.long)
pred_fut_traj = preds[index, k_hat] # Best trajectory in terms of FDE per sequence
batch_size, pred_len, _ = pred_fut_traj.shape
num_modes = preds.shape[1]
# Regression loss
# reg_loss = torch.zeros(1, dtype=torch.float32).to(preds)
mse_loss = F.mse_loss(pred_fut_traj, gt_target, reduction='none')
mse_loss = mse_loss.sum(dim=2) + self.args.epsilon # sum epsilon to avoid division by zero
mse_loss = torch.sqrt(mse_loss)
mse_loss = mse_loss.mean(dim=1)
fde_loss = fde_k[index, k_hat]
reg_loss = mse_loss * 0.5 + fde_loss * 0.5
reg_loss = reg_loss.mean()
return reg_loss
else:
# Stack all the predicted trajectories of the target agent
preds = preds.squeeze(1)
conf = conf.squeeze(1)
batch_size, num_modes, pred_len, data_dim = preds.shape
# Stack all the true trajectories of the target agent
# Keep in mind, that there are multiple trajectories in each sample, but only the first one ([0]) corresponds
# to the target agent
gt_target = torch.cat([torch.unsqueeze(x[0], 0) for x in gts], 0) # batch_size x pred_len x data_dim
gt_target_repeated = gt_target.unsqueeze(1).repeat(1,preds.shape[1],1,1) # repeate the gt for all ks
# batch_size x num_modes x pred_len x data_dim
fde_k = torch.sqrt((preds[:, :, -1, 0] - gt_target_repeated[:, :, -1, 0]) ** 2 + # x
(preds[:, :, -1, 1] - gt_target_repeated[:, :, -1, 1]) ** 2 + # y
self.args.epsilon) # to avoid division by zero
k_hat = torch.argmin(fde_k, dim=1)
index = torch.tensor(range(preds.shape[0]), dtype=torch.long)
pred_fut_traj = preds[index, k_hat] # Best trajectory in terms of FDE per sequence
batch_size, pred_len, _ = pred_fut_traj.shape
num_modes = preds.shape[1]
# Regression loss
# reg_loss = torch.zeros(1, dtype=torch.float32).to(preds)
mse_loss = F.mse_loss(pred_fut_traj, gt_target, reduction='none')
mse_loss = mse_loss.sum(dim=2) + self.args.epsilon # sum epsilon to avoid division by zero
mse_loss = torch.sqrt(mse_loss)
mse_loss = mse_loss.mean(dim=1)
fde_loss = fde_k[index, k_hat]
reg_loss = mse_loss * 0.5 + fde_loss * 0.5
reg_loss = reg_loss.mean()
# Classification loss (max-margin)
score_hat = conf[index, k_hat].unsqueeze(-1)
score_hat = score_hat.repeat(1, num_modes)
cls_loss = conf + 0.2 - score_hat
cls_loss[cls_loss < 0] = 0
cls_loss = cls_loss.sum(dim=-1).sum(dim=-1)
cls_loss = cls_loss /((num_modes-1) * batch_size)
# Final loss
loss = reg_loss * self.args.reg_loss_weight + \
cls_loss * self.args.cls_loss_weight
return loss
def get_lr(self, epoch):
lr_index = 0
for lr_epoch in self.args.lr_step_epochs:
if epoch < lr_epoch:
break
lr_index += 1
return self.args.lr_values[lr_index]
def get_best_predictions(self, pred, best_pred_indeces):
"""
pred: batch_size x num_modes x pred_len x data_dim
best_pred_indeces: batch_size x 1
Take the best prediction (best mode) according to the best confidence for each sequence
"""
return pred[torch.arange(pred.shape[0]), best_pred_indeces, :, :].squeeze()
def calc_prediction_metrics(self, preds, gts, conf=None):
if self.args.freeze_decoder:
# Calculate prediction error for each mode
# Output has shape (batch_size, n_modes, n_timesteps)
error_per_t = np.linalg.norm(preds - np.expand_dims(gts, axis=1), axis=-1)
# Calculate the error for the first mode (at index 0)
fde_1 = np.average(error_per_t[:, 0, -1])
ade_1 = np.average(error_per_t[:, 0, :])
# Calculate the error for all modes
# Best mode is always the one with the lowest final displacement
lowest_final_error_indices = np.argmin(error_per_t[:, :, -1], axis=1)
error_per_t = error_per_t[np.arange(
preds.shape[0]), lowest_final_error_indices]
fde = np.average(error_per_t[:, -1])
ade = np.average(error_per_t[:, :])
else:
# Calculate prediction error for each mode
# K = 1
# Calculate the error for the theoretically best mode (that with the highest confidence)
best_pred_traj_indeces = conf.argmax(1)
k1_predictions = self.get_best_predictions(preds,best_pred_traj_indeces)
error_per_t_k1 = np.linalg.norm(k1_predictions - gts, axis=-1)
fde_1 = np.average(error_per_t_k1[:, -1])
ade_1 = np.average(error_per_t_k1[:, :])
# K = 6
# Calculate the error for all modes
# Best mode is always the one with the lowest final displacement
error_per_t = np.linalg.norm(preds - np.expand_dims(gts, axis=1), axis=-1)
lowest_final_error_indices = np.argmin(error_per_t[:, :, -1], axis=1)
error_per_t = error_per_t[np.arange(
preds.shape[0]), lowest_final_error_indices]
fde = np.average(error_per_t[:, -1])
ade = np.average(error_per_t[:, :])
return ade_1, fde_1, ade, fde
# Overwrite Pytorch-Lightning functions
def configure_optimizers(self):
if self.args.freeze_decoder:
if self.current_epoch == self.args.mod_freeze_epoch:
optimizer = torch.optim.AdamW(
filter(lambda p: p.requires_grad, self.parameters()), weight_decay=self.args.wd) # Apply optimizer just to those parameters
# that require to be trained
else:
optimizer = torch.optim.AdamW(
self.parameters(), weight_decay=self.args.wd)
return optimizer
else:
optimizer = torch.optim.AdamW(self.parameters(),
weight_decay=self.args.wd,
lr=self.initial_lr_conf)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
mode='min',
factor=0.5,
patience=5,
min_lr=self.min_lr_conf,
verbose=True)
return {"optimizer": optimizer, "lr_scheduler": scheduler, "monitor": "ade_val"}
def on_train_epoch_start(self):
if self.args.freeze_decoder:
# Trigger weight freeze and optimizer reinit on mod_freeze_epoch
if self.current_epoch == self.args.mod_freeze_epoch:
self.freeze()
self.trainer.strategy.setup_optimizers(self.trainer)
if self.current_epoch == self.args.mod_full_unfreeze_epoch:
self.args.freeze_decoder = False
self.full_unfreeze()
self.trainer.strategy.setup_optimizers(self.trainer)
# Set learning rate according to current epoch
for single_param in self.optimizers().param_groups:
single_param["lr"] = self.get_lr(self.current_epoch)
self.log("lr", single_param["lr"], prog_bar=True, sync_dist=True)
else:
# Get learning rate according to current epoch
for single_param in self.optimizers().param_groups:
self.log("lr", single_param["lr"], prog_bar=True, sync_dist=True)
def training_step(self, train_batch, batch_idx):
out, conf = self.forward(train_batch)
loss = self.prediction_loss(out, train_batch["gt"], conf)
self.log("loss_train", loss, sync_dist=True)
return loss
def validation_step(self, val_batch, batch_idx):
out, conf = self.forward(val_batch)
loss = self.prediction_loss(out, val_batch["gt"], conf)
self.log("loss_val", loss, sync_dist=True)
# Extract target agent only
pred = [x[0].detach().cpu().numpy() for x in out]
gt = [x[0].detach().cpu().numpy() for x in val_batch["gt"]]
if not self.args.freeze_decoder: conf = [x[0].detach().cpu().numpy() for x in conf]
# if self.save_model_script:
# model_filename = os.path.join(self.args.BASE_DIR,
# self.args.MODEL_DIR,
# "TFMF_TGR.py")
# os.system(f"cp {model_filename} {self.args.LOG_DIR}")
# self.save_model_script = False
return {"predictions": pred,
"groundtruth": gt,
"confidences": conf} # = validation_outputs
def validation_epoch_end(self, validation_outputs):
# Extract predictions
pred = [out["predictions"] for out in validation_outputs]
pred = np.concatenate(pred, 0) # get predictions along all validation steps
gt = [out["groundtruth"] for out in validation_outputs]
gt = np.concatenate(gt, 0) # get ground-truth along all validation steps
if self.args.freeze_decoder:
conf = None
else:
conf = [out["confidences"] for out in validation_outputs]
conf = np.concatenate(conf, 0) # get confidences along all validation steps
ade1, fde1, ade, fde = self.calc_prediction_metrics(pred, gt, conf)
self.log("ade1_val", ade1, prog_bar=True, sync_dist=True)
self.log("fde1_val", fde1, prog_bar=True, sync_dist=True)
self.log("ade_val", ade, prog_bar=True, sync_dist=True)
self.log("fde_val", fde, prog_bar=True, sync_dist=True)
# Layers
class LinearEmbedding(nn.Module):
def __init__(self,input_size,args):
super(LinearEmbedding, self).__init__()
self.args = args
self.input_size = input_size
self.output_size = args.social_latent_size
self.encoder_input_layer = nn.Linear(
in_features=self.input_size,
out_features=self.output_size
)
def forward(self,linear_input):
linear_out = F.relu(self.encoder_input_layer(linear_input))
return linear_out
class PositionalEncoding1D(nn.Module):
def __init__(self, channels):
"""
:param channels: The last dimension of the tensor you want to apply pos emb to.
"""
super(PositionalEncoding1D, self).__init__()
self.org_channels = channels
channels = int(np.ceil(channels / 2) * 2)
self.channels = channels
inv_freq = 1.0 / (10000 ** (torch.arange(0, channels, 2).float() / channels))
self.register_buffer("inv_freq", inv_freq)
self.cached_penc = None
def forward(self, tensor):
"""
:param tensor: A 3d tensor of size (batch_size, x, ch)
:return: Positional Encoding Matrix of size (batch_size, x, ch)
"""
if len(tensor.shape) != 3:
raise RuntimeError("The input tensor has to be 3d!")
if self.cached_penc is not None and self.cached_penc.shape == tensor.shape:
return self.cached_penc
self.cached_penc = None
batch_size, x, orig_ch = tensor.shape
pos_x = torch.arange(x, device=tensor.device).type(self.inv_freq.type())
sin_inp_x = torch.einsum("i,j->ij", pos_x, self.inv_freq)
emb_x = torch.cat((sin_inp_x.sin(), sin_inp_x.cos()), dim=-1)
emb = torch.zeros((x, self.channels), device=tensor.device).type(tensor.type())
emb[:, : self.channels] = emb_x
self.cached_penc = emb[None, :, :orig_ch].repeat(batch_size, 1, 1)
return self.cached_penc
class EncoderTransformer(nn.Module):
def __init__(self, args):
super(EncoderTransformer, self).__init__()
self.args = args
self.d_model = self.args.social_latent_size # embedding dimension
# self.nhead = self.args.num_attention_heads # TODO: Is this correct?
self.nhead = self.args.social_latent_size
self.d_hid = 1 ## dimension of the feedforward network model in nn.TransformerEncoder
self.num_layers = 1
self.dropout = self.args.apply_dropout
self.encoder_layer = nn.TransformerEncoderLayer(self.d_model, self.nhead, self.d_hid , self.dropout, batch_first=True)
self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=self.num_layers)
def forward(self, transformer_in, agents_per_sample):
transformer_out = F.relu(self.transformer_encoder(transformer_in))
return transformer_out[:,-1,:]
class AgentGNN(nn.Module):
def __init__(self, args):
super(AgentGNN, self).__init__()
self.args = args
self.latent_size = args.social_latent_size
self.gcn1 = conv.CGConv(self.latent_size, dim=2, batch_norm=True)
self.gcn2 = conv.CGConv(self.latent_size, dim=2, batch_norm=True)
def forward(self, gnn_in, centers, agents_per_sample):
# gnn_in is a batch and has the shape (batch_size, number_of_agents, latent_size)
x, edge_index = gnn_in, self.build_fully_connected_edge_idx(
agents_per_sample).to(gnn_in.device)
edge_attr = self.build_edge_attr(edge_index, centers).to(gnn_in.device)
x = F.relu(self.gcn1(x, edge_index, edge_attr))
gnn_out = F.relu(self.gcn2(x, edge_index, edge_attr))
edge_index_out1 = []
for i in agents_per_sample:
edge_index_out1.append(gnn_out[0:i,:])
gnn_out = gnn_out[i:,:]
return edge_index_out1
def build_fully_connected_edge_idx(self, agents_per_sample):
edge_index = []
# In the for loop one subgraph is built (no self edges!)
# The subgraph gets offsetted and the full graph over all samples in the batch
# gets appended with the offsetted subgrah
offset = 0
for i in range(len(agents_per_sample)):
num_nodes = agents_per_sample[i]
adj_matrix = torch.ones((num_nodes, num_nodes))
adj_matrix = adj_matrix.fill_diagonal_(0)
sparse_matrix = sparse.csr_matrix(adj_matrix.numpy())
edge_index_subgraph, _ = from_scipy_sparse_matrix(sparse_matrix)
# Offset the list
edge_index_subgraph = torch.Tensor(
np.asarray(edge_index_subgraph) + offset)
offset += agents_per_sample[i]
edge_index.append(edge_index_subgraph)
# Concat the single subgraphs into one
edge_index = torch.LongTensor(np.column_stack(edge_index))
return edge_index
def build_edge_attr(self, edge_index, data):
edge_attr = torch.zeros((edge_index.shape[-1], 2), dtype=torch.float)
rows, cols = edge_index
# goal - origin
edge_attr = data[cols] - data[rows]
return edge_attr
class DecoderResidual(nn.Module):
def __init__(self, args):
super(DecoderResidual, self).__init__()
self.args = args
self.latent_size = self.args.decoder_latent_size
self.num_modes = self.args.num_modes
output = []
for i in range(sum(args.mod_steps)):
output.append(PredictionNet(args))
self.output = nn.ModuleList(output) # is just like a Python list. It was designed to store any desired number of nn.Module’s
if not self.args.freeze_decoder or self.args.mod_full_unfreeze_epoch != -1:
# Classification
norm = "BN"
ng = 1
self.latent_predictions = nn.Linear(self.args.num_modes * self.args.pred_len * self.args.data_dim,
self.latent_size)
self.confidences = nn.Sequential(LinearRes(self.latent_size*2, self.latent_size*2, norm=norm, ng=ng),
nn.Linear(self.latent_size*2, self.num_modes))
def forward(self, decoder_in, is_frozen, current_epoch):
batch_size = decoder_in.shape[0]
if self.args.freeze_decoder:
sample_wise_out = []
if self.training is False: # If you are validating or test, use all decoders
for out_subnet in self.output:
sample_wise_out.append(out_subnet(decoder_in))
elif is_frozen: # If the first decoder has been frozen, decode and train the remaining ones
for i in range(self.args.mod_steps[0], sum(self.args.mod_steps)):
sample_wise_out.append(self.output[i](decoder_in))
else: # If you are training and is_frozen = False, use only the first decoder
sample_wise_out.append(self.output[0](decoder_in))
decoder_out = torch.stack(sample_wise_out)
decoder_out = torch.swapaxes(decoder_out, 0, 1)
return decoder_out, []
else:
sample_wise_out = []
for out_subnet in self.output:
sample_wise_out.append(out_subnet(decoder_in))
decoder_out = torch.stack(sample_wise_out)
decoder_out = torch.swapaxes(decoder_out, 0, 1)
latent_predictions = self.latent_predictions(decoder_out.contiguous().view(batch_size,-1))
conf_latent = torch.cat([decoder_in,
latent_predictions],
dim=1)
conf = self.confidences(conf_latent)
conf = torch.softmax(conf.view(batch_size,-1), dim=1) # batch_size, num_modes
if not torch.allclose(torch.sum(conf, dim=1), conf.new_ones((batch_size,))):
pdb.set_trace()
return decoder_out, conf
def unfreeze_layers(self):
for layer in range(self.args.mod_steps[0], sum(self.args.mod_steps)): # Unfreeze all decoders except the first one
for param in self.output[layer].parameters():
param.requires_grad = True
class LinearRes(nn.Module):
def __init__(self, n_in, n_out, norm='GN', ng=32):
super(LinearRes, self).__init__()
assert(norm in ['GN', 'BN', 'SyncBN'])
self.linear1 = nn.Linear(n_in, n_out)
self.linear2 = nn.Linear(n_out, n_out)
self.linear3 = nn.Linear(n_out, n_out)
self.relu = nn.ReLU(inplace=True)
if norm == 'GN':
self.norm1 = nn.GroupNorm(gcd(ng, n_out), n_out)
self.norm2 = nn.GroupNorm(gcd(ng, n_out), n_out)
elif norm == 'BN':
self.norm1 = nn.BatchNorm1d(n_out)
self.norm2 = nn.BatchNorm1d(n_out)
self.norm3 = nn.BatchNorm1d(n_out)
else:
exit('SyncBN has not been added!')
if n_in != n_out:
if norm == 'GN':
self.transform = nn.Sequential(
nn.Linear(n_in, n_out, bias=False),
nn.GroupNorm(gcd(ng, n_out), n_out))
elif norm == 'BN':
self.transform = nn.Sequential(
nn.Linear(n_in, n_out, bias=False),
nn.BatchNorm1d(n_out))
else:
exit('SyncBN has not been added!')
else:
self.transform = None
def forward(self, x):
out = self.linear1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.linear2(out)
out = self.norm2(out)
out = self.relu(out)
out = self.linear3(out)
out = self.norm3(out)
if self.transform is not None:
out += self.transform(x)
else:
out += x
out = self.relu(out)
return out
class PredictionNet(nn.Module):
def __init__(self, args):
super(PredictionNet, self).__init__()
self.args = args
self.latent_size = args.decoder_latent_size
self.weight1 = nn.Linear(self.latent_size, self.latent_size)
self.norm1 = nn.GroupNorm(1, self.latent_size)
self.weight2 = nn.Linear(self.latent_size, self.latent_size)
self.norm2 = nn.GroupNorm(1, self.latent_size) # Batch normalization solves a major problem called internal covariate shift.
self.output_fc = nn.Linear(self.latent_size, args.pred_len * 2)
def forward(self, prednet_in):
# Residual layer
x = self.weight1(prednet_in)
x = self.norm1(x)
x = F.relu(x)
x = self.weight2(x)
x = self.norm2(x)
x += prednet_in
x = F.relu(x)
# Last layer has no activation function
prednet_out = self.output_fc(x)
return prednet_out
class map_smooth_decoder(nn.Module):
def __init__(self, args):
super(map_smooth_decoder, self).__init__()
self.args = args
self.latent_size = self.args.map_latent_size
self.norm0 = nn.BatchNorm1d(self.latent_size)
self.conv1 = nn.Conv1d(self.latent_size, self.latent_size // 4, kernel_size=3, padding=1)
self.norm1 = nn.BatchNorm1d(self.latent_size // 4)
self.conv2 = nn.Conv1d(self.latent_size // 4, self.latent_size // 8, kernel_size=3, padding=1)
self.norm2 = nn.BatchNorm1d(self.latent_size // 8)
self.linear3 = nn.Linear(self.args.centerline_length * (self.latent_size // 8), self.latent_size // 8)
self.norm3 = nn.BatchNorm1d(self.latent_size // 8)
self.linear4 = nn.Linear(self.args.num_centerlines * (self.latent_size // 8), self.latent_size)
def forward(self, x):
total_centerlines = x.shape[0]
batch_size = x.shape[0] // self.args.num_centerlines
x = x.permute(0, 2, 1)
x = self.norm0(x)
x = self.norm1(F.relu(self.conv1(x)))
x = self.norm2(F.relu(self.conv2(x)))
x = self.norm3(F.relu(self.linear3(x.contiguous().view(total_centerlines,-1))))
x = self.linear4(x.contiguous().view(batch_size,-1))
return x
class MLP(nn.Module):
def __init__(self, input_size, output_size) -> None:
super(MLP, self).__init__()
self.linear1 = nn.Linear(input_size, output_size // 2)
self.norm = nn.LayerNorm(output_size // 2)
self.GELU = nn.GELU()
self.linear2 = nn.Linear(output_size // 2, output_size)
# self.linear1 = nn.Linear(input_size, output_size)
def forward(self, x):
x = self.linear1(x)
x = self.norm(x)
x = self.GELU(x)
x = self.linear2(x)
return x
class MapSubNet(nn.Module):
def __init__(self, args, depth=None):
super(MapSubNet, self).__init__()
self.args = args
if depth is None:
depth = 2
self.hidden_size = self.args.map_latent_size
self.input_dim = self.args.data_dim
self.dropout = self.args.apply_dropout
self.MLPs = nn.ModuleList([MLP(self.input_dim, self.hidden_size // 8), MLP(self.hidden_size // 4, self.hidden_size // 2)])
self.Attn = nn.ModuleList([nn.MultiheadAttention(self.hidden_size // 8, self.args.num_attention_heads, dropout=self.dropout),
nn.MultiheadAttention(self.hidden_size // 2, self.args.num_attention_heads, dropout=self.dropout)])
self.Norms = nn.ModuleList([nn.LayerNorm(self.hidden_size // 4), nn.LayerNorm(self.hidden_size)])
self.final_layer = map_smooth_decoder(self.args)
def forward(self, inputs, inputs_mask):
hidden_states_batch = inputs
hidden_states_mask = inputs_mask
for layer_index, layer in enumerate(self.Attn):
hidden_states_batch = self.MLPs[layer_index](hidden_states_batch)
if torch.any(torch.isnan(hidden_states_batch)):
pdb.set_trace()
temp = hidden_states_batch
query = key = value = hidden_states_batch.permute(1,0,2)
# hidden_states_batch = layer(query, key, value=value, attn_mask=None, key_padding_mask=hidden_states_mask)[0].permute(1,0,2)
hidden_states_batch = layer(query, key, value=value)[0].permute(1,0,2)
if torch.any(torch.isnan(hidden_states_batch)):
pdb.set_trace()
hidden_states_batch = torch.cat([hidden_states_batch, temp], dim=2)
hidden_states_batch = self.Norms[layer_index](hidden_states_batch)
hidden_states_batch = F.relu(hidden_states_batch)
if torch.any(torch.isnan(hidden_states_batch)):
pdb.set_trace()
if torch.any(torch.isnan(hidden_states_batch)):
pdb.set_trace()
hidden_states_batch = self.final_layer(hidden_states_batch)
return hidden_states_batch
class TransformerDecoder(nn.Module):
def __init__(self, hidden_size, head_num=8, dropout=0.1) -> None:
super(TransformerDecoder, self).__init__()
self.self_attn = nn.MultiheadAttention(hidden_size, head_num, dropout)
self.cross_attn = nn.MultiheadAttention(hidden_size, head_num, dropout)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.dropout4 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(hidden_size)
self.norm2 = nn.LayerNorm(hidden_size)
self.norm3 = nn.LayerNorm(hidden_size)
self.linear1 = nn.Linear(hidden_size, 256)
self.linear2 = nn.Linear(256, hidden_size)
def forward(self, x_padding, y_padding):
self_attn_output = self.self_attn(query=x_padding,
key=x_padding,
value=x_padding)[0]
x_padding = x_padding + self.dropout1(self_attn_output)
x_padding = self.norm1(x_padding)
cross_attn_output = self.cross_attn(query=x_padding,
key=y_padding,
value=y_padding)[0]
x_padding = x_padding + self.dropout2(cross_attn_output)
x_padding = self.norm2(x_padding)
output = self.linear1(x_padding)
output = F.relu(output)
output = self.dropout3(output)
output = self.linear2(output)
x_padding = x_padding + self.dropout4(output)
x_padding = self.norm3(x_padding)
return x_padding
class Temporal_Multimodal_Decoder(nn.Module):
def __init__(self, args):
super(Temporal_Multimodal_Decoder, self).__init__()
self.args = args
self.data_dim = self.args.data_dim
self.obs_len = self.args.obs_len
self.pred_len = self.args.pred_len
self.window_size = self.args.decoder_temporal_window_size
self.decoder_h_dim = self.args.decoder_latent_size
self.num_modes = self.args.num_modes
self.spatial_embedding = nn.Linear(self.window_size*2, self.window_size*4)
self.decoder = nn.LSTM(self.window_size*4,
self.decoder_h_dim,
num_layers=1)
pred = []
for _ in range(self.num_modes):
pred.append(nn.Linear(self.decoder_h_dim,self.data_dim))
self.hidden2pos = nn.ModuleList(pred)
norm = "BN"
ng = 1
# Confidences
self.latent_predictions = nn.Linear(self.args.num_modes*self.args.pred_len*self.args.data_dim,
self.decoder_h_dim)
self.confidences = nn.Sequential(LinearRes(self.decoder_h_dim*2, self.decoder_h_dim*2, norm=norm, ng=ng),
nn.Linear(self.decoder_h_dim*2, self.num_modes))
def forward(self, traj_rel, state_tuple, num_mode=None, current_centerlines=None):
"""_summary_
Args:
traj_rel (_type_): _description_
state_tuple (_type_): _description_
num_mode (_type_, optional): _description_. Defaults to None.
current_centerlines (_type_, optional): _description_. Defaults to None.
Returns:
_type_: _description_
"""
traj_rel = traj_rel.permute(1,0,2)
num_displacements, batch_size, data_dim = traj_rel.shape
state_tuple_h, state_tuple_c = state_tuple
pred_traj_fake_rel = []
for num_mode in range(self.num_modes):
traj_rel_ = torch.clone(traj_rel)
decoder_input = F.leaky_relu(self.spatial_embedding(traj_rel_.permute(1,0,2).contiguous().view(batch_size,-1))) # bs x window_size·2
decoder_input = decoder_input.unsqueeze(0)
decoder_input = F.dropout(decoder_input, p=self.args.apply_dropout, training=self.training)
state_tuple_h_ = torch.clone(state_tuple_h)
state_tuple_c_ = torch.zeros(tuple(state_tuple_h_.shape)).to(state_tuple_h_)
curr_pred_traj_fake_rel = []
for _ in range(self.pred_len):
output, (state_tuple_h_, state_tuple_c_) = self.decoder(decoder_input, (state_tuple_h_, state_tuple_c_))
rel_pos = self.hidden2pos[num_mode](output.contiguous().view(-1, self.decoder_h_dim))
traj_rel_ = torch.roll(traj_rel_, -1, dims=(0))
traj_rel_[-1] = rel_pos
curr_pred_traj_fake_rel.append(rel_pos)
decoder_input = F.leaky_relu(self.spatial_embedding(traj_rel_.permute(1,0,2).contiguous().view(batch_size,-1))) # bs x window_size·2
decoder_input = decoder_input.unsqueeze(0)
decoder_input = F.dropout(decoder_input, p=self.args.apply_dropout, training=self.training)
curr_pred_traj_fake_rel = torch.stack(curr_pred_traj_fake_rel,dim=0)
curr_pred_traj_fake_rel = curr_pred_traj_fake_rel.permute(1,0,2)
pred_traj_fake_rel.append(curr_pred_traj_fake_rel)
pred_traj_fake_rel = torch.stack(pred_traj_fake_rel, dim=0) # num_modes, batch_size, pred_len, data_dim
pred_traj_fake_rel = pred_traj_fake_rel.permute(1,0,2,3) # batch_size, num_modes, pred_len, data_dim
# Obtain confidences based on the initial latent state and the predictions
predictions_latent = self.latent_predictions(pred_traj_fake_rel.contiguous().view(batch_size, -1))
state_tuple_h = state_tuple_h.squeeze(0)
conf_latent = torch.cat([state_tuple_h,
predictions_latent],
dim=1)
conf = self.confidences(conf_latent)
conf = torch.softmax(conf.view(batch_size,-1), dim=1) # batch_size, num_modes
if not torch.allclose(torch.sum(conf, dim=1), conf.new_ones((batch_size,))):
pdb.set_trace()
return pred_traj_fake_rel, conf
# Aux functions
def relative_to_abs_multimodal(rel_traj, start_pos):
"""
Inputs:
- rel_traj: pytorch tensor of shape (batch_size, num_modes, seq_len, 2)
- start_pos: pytorch tensor of shape (batch_size, 2)
N.B. If you only have the predictions, this must be the last observation.
If you have the whole trajectory (obs+pred), this must be the first observation,
since you must reconstruct the relative displacements from this position
Outputs:
- abs_traj: pytorch tensor of shape (seq_len, batch, 2) (around 0,0, not map coordinates)
"""
displacement = torch.cumsum(rel_traj, dim=2) # Sum along the seq_len dimension!
start_pos = torch.unsqueeze(torch.unsqueeze(start_pos, dim=1), dim=1) # batch, 1 (only one position) x 1 (same for all modes) x 2
abs_traj = displacement + start_pos
return abs_traj | Cram3r95/argo2_TGR | model/models/TFMF_TGR.py | TFMF_TGR.py | py | 53,803 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "sys.version_info",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "torch.backends",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "torch.set_float32_matmul_precision",
"line_number": 43,
"usage_type": "call"
},
{
"api_... |
39265812608 | import pandas as pd
import plotly.graph_objects as go
import prepare_data
population = {
'NSW':8089526,
'QLD':5095100,
'VIC':6594804,
'SA':1751693,
'WA':2621680,
'TAS':534281,
'ACT':426709,
'NT':245869,
'Total':25359662,
'DeathsNationally':25359662,
}
df_aus = prepare_data.australia()
df_aus_change = prepare_data.australia_change(df_aus)
# Let's plot this mofo
fig = go.Figure()
# Plot all the states!
for state in list(df_aus):
fig.add_trace(go.Scatter(
x=df_aus.index,
y=pd.to_numeric(df_aus[state]).divide(population[state])*100000,
name=state,
))
# Make the plot look fancy.
fig.update_layout(title='Per Capita COVID-19 Cases by State/Territory in Austalia',
xaxis_title='Date',
yaxis_title='Cases per 100,000 people')
fig.show()
# Let's plot this mofo
fig_change = go.Figure()
# Plot all the states!
for state in list(df_aus_change):
fig_change.add_trace(go.Scatter(
x=df_aus_change.index,
y=pd.to_numeric(df_aus_change[state]).divide(population[state])*100000,
name=state,
))
# Make the plot look fancy.
fig_change.update_layout(title='Per Capita Change in COVID-19 Cases by State/Territory in Austalia',
xaxis_title='Date',
yaxis_title='Change in cases per 100,000 people')
fig_change.show()
# Roll those numbers over a week
df_aus_change = df_aus_change.rolling(7).mean()
# Let's plot this mofo
fig_rolling_change = go.Figure()
# Plot all the states!
for state in list(df_aus):
fig_rolling_change.add_trace(go.Scatter(
x=df_aus_change.index,
y=pd.to_numeric(df_aus_change[state]).divide(population[state])*100000,
name=state,
))
# Make the plot look fancy.
fig_rolling_change.update_layout(
title='7-day Rolling Per Capita Change in COVID-19 Cases by State/Territory in Austalia',
xaxis_title='Date',
yaxis_title='Change in cases per 100,000 people'
)
fig_rolling_change.show()
| explodingdinosaurs/corona | aus_states_per_capita.py | aus_states_per_capita.py | py | 2,043 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "prepare_data.australia",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "prepare_data.australia_change",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objects.Figure",
"line_number": 22,
"usage_type": "call"
},
{
"... |
28891405121 | import collections
import difflib
import logging
import os
import re
from pytype.platform_utils import path_utils
from pytype.tools.merge_pyi import merge_pyi
import unittest
__all__ = ('TestBuilder', 'load_tests')
PY, PYI, EXPECTED = 'py', 'pyi', 'pep484.py'
OVERWRITE_EXPECTED = 0 # flip to regenerate expected files
def load_tests(unused_loader, standard_tests, unused_pattern):
root = path_utils.join(path_utils.dirname(__file__), 'test_data')
standard_tests.addTests(TestBuilder().build(root))
return standard_tests
class TestBuilder:
def build(self, data_dir):
"""Return a unittest.TestSuite with tests for the files in data_dir."""
suite = unittest.TestSuite()
files_by_base = self._get_files_by_base(data_dir)
for base, files_by_ext in sorted(files_by_base.items()):
if not (PY in files_by_ext and PYI in files_by_ext):
continue
if not OVERWRITE_EXPECTED and EXPECTED not in files_by_ext:
continue
py, pyi = (files_by_ext[x] for x in (PY, PYI))
outfile = path_utils.join(data_dir, base + '.' + EXPECTED)
test = build_regression_test(py, pyi, outfile)
suite.addTest(test)
return suite
def _get_files_by_base(self, data_dir):
files = os.listdir(data_dir)
file_pat = re.compile(r'(?P<filename>(?P<base>.+?)\.(?P<ext>.*))$')
matches = [m for m in map(file_pat.match, files) if m]
ret = collections.defaultdict(dict)
for m in matches:
base, ext, filename = m.group('base'), m.group('ext'), m.group('filename')
ret[base][ext] = path_utils.join(data_dir, filename)
return ret
def build_regression_test(py, pyi, outfile):
def regression_test(test_case):
py_input, pyi_src = (_read_file(f) for f in (py, pyi))
try:
output = merge_pyi.merge_sources(py=py_input, pyi=pyi_src)
except merge_pyi.MergeError:
pass
if OVERWRITE_EXPECTED:
with open(outfile, 'w') as f:
f.write(output)
else:
expected = _read_file(outfile)
test_case.assertEqual(expected, output, _get_diff(expected, output))
name = path_utils.splitext(path_utils.basename(outfile))[0].replace('.', '_')
test = f'test_{name}'
case = type('RegressionTest', (unittest.TestCase,), {test: regression_test})
return case(test)
def _read_file(filename):
with open(filename) as f:
return f.read()
def _get_diff(a, b):
a, b = a.split('\n'), b.split('\n')
diff = difflib.Differ().compare(a, b)
return '\n'.join(diff)
if __name__ == '__main__':
logging.basicConfig(level=logging.CRITICAL)
unittest.main()
| google/pytype | pytype/tools/merge_pyi/merge_pyi_test.py | merge_pyi_test.py | py | 2,585 | python | en | code | 4,405 | github-code | 36 | [
{
"api_name": "pytype.platform_utils.path_utils.join",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pytype.platform_utils.path_utils",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "pytype.platform_utils.path_utils.dirname",
"line_number": 20,
"usa... |
28148567000 | import datetime
import time
from gpiozero import LED, Device
from gpiozero.pins.pigpio import PiGPIOFactory
Device.pin_factory = PiGPIOFactory()
# NOTE: Change this to match the GPIO pin you're connecting the LED to
led = LED(18)
# NOTE: Change these values to set the time you want the light to turn on and off at
weekday_on_time = datetime.time(hour=7, minute=0, second=0)
weekday_off_time = datetime.time(hour=17, minute=0, second=0)
weekend_on_time = datetime.time(hour=7, minute=30, second=0)
weekend_off_time = datetime.time(hour=17, minute=0, second=0)
while True:
dayOfWeek = datetime.datetime.now().weekday()
currentTime = datetime.datetime.now().time()
on_time = weekday_on_time if dayOfWeek < 5 else weekend_on_time
off_time = weekday_off_time if dayOfWeek < 5 else weekend_off_time
if currentTime > on_time and currentTime < off_time:
led.on()
else:
led.off()
time.sleep(60)
| szh/pi-timedlight | timedlight.py | timedlight.py | py | 936 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "gpiozero.Device.pin_factory",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "gpiozero.Device",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "gpiozero.pins.pigpio.PiGPIOFactory",
"line_number": 7,
"usage_type": "call"
},
{
"... |
70943213544 | """Module to index columns of the paper-summarized CSV file."""
import pandas as pd
from loguru import logger
from omegaconf import OmegaConf
from utils import create_embeddings
# Load the configuration
cfg = OmegaConf.load("conf/config.yaml")
FILE_PATH = cfg.data.path
INDEXED_FILE_PATH = cfg.data.indexed_path
df = pd.read_csv(cfg.data.path, compression="gzip", header=0)
logger.info(f"Loaded {len(df)} rows from {cfg.data.path}")
logger.info(f"columns: {df.columns}")
logger.info("Creating embeddings for experiment time")
df = create_embeddings(df, ["experiment time"])
logger.info("Creating embeddings for device")
df = create_embeddings(df, ["device"])
print(df.head(5))
logger.info(f"Saving indexed file to {INDEXED_FILE_PATH}")
df.to_csv(INDEXED_FILE_PATH, compression="gzip", index=False)
logger.success("Done!")
| naarkhoo/LiteGrave | src/index_csv_columns.py | index_csv_columns.py | py | 831 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "omegaconf.OmegaConf.load",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "omegaconf.OmegaConf",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "loguru.log... |
29654762562 | import re
from io import StringIO
from flask import Flask, request, Response, redirect
import pandas as pd
app = Flask(__name__)
def is_valid_query(q):
'''
A query is valid if it is strictly consisted of the following three entries:
[(A-Z, a-z, 0-9)+ or *] [==, !=, $=, &=] ["..."]
Queries can be concat with 'and' & 'or'. The 'and' 'or' operators are executed in sequential order.
Entries and operators must be separated by at least one single space. i.e. {C1=="a"} is not acceptable.
Additional white spaces are allowed between entries.
For the "" inside the query, the query term is the content wrapped by the first and last occurance of "".
In the processing of query term, any sequence of consecutive spaces is reduced to a single space for clarity.
(since consecutive spaces usually do not convey any semantic meanings in phrases)
Output:
a message indicating the validity of query ("valid" or error message),
a list of valid queries (each query is represented by a 3-element list),
a list of and/or operators
'''
entries = q.split()
valid_q = [] # a 3-element list consist of 3 valid entries
queries = [] # list of valid_q
operators = [] # operators between queries
operand = ['==', '!=', '$=', '&='] # valid operand
# check the valid status of three entries defined above
valid_first = False
valid_second = False
valid_third = False
i = 0
while(i < len(entries)):
if not valid_first:
column = re.findall('[A-Za-z0-9]+|\*', entries[i])
# if valid, must be exactly one match
# i.e. "abc*123" will give three matches and is invalid
if len(column) != 1 or column[0] != entries[i]:
return "Invalid column name", queries, operators
else:
valid_q.append(entries[i])
valid_first = True
elif not valid_second:
if entries[i] not in operand:
return "Invalid operator, must be ==, !=, $=, &=", queries, operators
else:
# store as int if only numbers
valid_q.append(entries[i])
valid_second = True
elif not valid_third:
if entries[i][0] != '\"':
return "Invalid query term, must begin with \"", queries, operators
else: # traverse the list to find the last " before the next query
term = ""
# find the string before next query
if entries[i:].count('and') > 0:
end = entries[i:].index('and') + i
term = " ".join(entries[i:end])
elif entries[i:].count('or') > 0:
end = entries[i:].index('or') + i
term = " ".join(entries[i:end])
else:
end = len(entries)
term = " ".join(entries[i:])
# test the validity of term
if term[-1] != '\"':
return "Invalid query term, must end with \"", queries, operators
else:
i = end
valid_q.append(term[1:-1]) # remove the front and end "" when storing
valid_third = True
continue
else:
if i == len(queries) - 1:
return "Extra term after queries", queries, operators
if entries[i] == 'and' or entries[i] == 'or':
queries.append(valid_q)
operators.append(entries[i])
valid_q = []
valid_first = valid_second = valid_third = False
else:
return "Invalid and/or operand between queries", queries, operators
i += 1
# append the last valid query and check incomplete query
if valid_first and valid_second and valid_third:
queries.append(valid_q)
else:
return "Missing entries in queries", queries, operators
return "valid", queries, operators
def match_query(queries, operators, df):
'''
This function matches the queries associated with the operators to df.
Output:
a message indicating the validity of query matching ('valid' or error message)
matched rows in df
'''
columns = df.columns.tolist()
res_df = pd.DataFrame(columns = columns) # empty df to append matching rows
for i,q in enumerate(queries):
# if this is the first query or the operator connecting pervious query is 'or', check the entire df
if i - 1 < 0 or operators[i - 1] == 'or':
cur_df = df.astype(str) # convert the content of df to string for comparison
elif operators[i - 1] == 'and':
cur_df = res_df
# select rows from df
if q[0] == "*":
select_df = pd.DataFrame(columns = columns) # empty df to append matching rows
for (col, _) in cur_df.iteritems():
if q[1] == "==":
select_df = select_df.append(cur_df[cur_df[col] == q[2]])
elif q[1] == "!=":
drop_df = cur_df[cur_df[col] == q[2]]
select_df = select_df.append(cur_df.drop(index=drop_df.index.tolist()))
elif q[1] == "$=":
select_df = select_df.append(cur_df[cur_df[col].str.lower().isin([q[2].lower()])])
elif q[1] == "&=":
select_df = select_df.append(cur_df[cur_df[col].str.contains(q[2], case=True)])
cur_df = select_df.drop_duplicates(keep='first')
else:
if q[0] not in columns:
return 'No corresponding column name in data', res_df
elif q[0] not in cur_df.columns:
cur_df = pd.DataFrame(columns = columns) # no matching column, set the cur_df to empty
else:
if q[1] == "==":
cur_df = cur_df[cur_df[q[0]] == q[2]]
elif q[1] == "!=":
drop_df = cur_df[cur_df[q[0]] == q[2]]
cur_df = cur_df.drop(index=drop_df.index.tolist())
elif q[1] == "$=":
cur_df = cur_df[cur_df[q[0]].str.lower().isin([q[2].lower()])]
elif q[1] == "&=":
cur_df = cur_df[cur_df[q[0]].str.contains(q[2], case=True)]
# update res_df according to 'and' 'or' operators
if i - 1 < 0 or operators[i - 1] == 'or':
res_df = res_df.append(cur_df)
res_df.drop_duplicates(keep='first',inplace=True)
elif operators[i - 1] == 'and':
res_df = cur_df
if res_df.empty:
return 'No corresponding items for the query', res_df
return 'valid', res_df
@app.route('/')
def get_info():
args = request.args
query = args['query']
# '&' will separate the query to two items, append it back
for key, val in args.items():
if key == 'query':
continue
if key == '':
query += '&=' + val
else:
query += '&' + key
print(query)
# Query error checking and parsing
mes, queries, operators = is_valid_query(query)
if mes != "valid":
return mes
print(queries)
print(operators)
# Query match
df = pd.read_csv('data.csv')
mes, res_df = match_query(queries, operators, df)
if mes != "valid":
return mes
res_df.to_csv('res.csv')
return '''
<html><body>
The query has been successfully processed.
To download the extracted results in a csv file, <a href="/getCSV">click me.</a>
</body></html>
'''
@app.route("/getCSV")
def getCSV():
output = StringIO()
df = pd.read_csv('res.csv')
df.to_csv(output)
return Response(
output.getvalue(),
mimetype="text/csv",
headers={"Content-disposition":"attachment; filename=res.csv"})
if __name__ == '__main__':
app.run(host='127.0.0.1', port=9527)
| CandiceD17/Http-Server-Query-Retrieval | my_server.py | my_server.py | py | 8,078 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_... |
29730523882 | #coding:utf8
#login
import logging
logging.basicConfig(level=logging.DEBUG)
_logger = logging.getLogger(__name__)
#flask frame
from flask_restplus import Resource
#wechat frame
import flask_wechat_utils
from flask_wechat_utils.user.utils import auth
from flask_wechat_utils.config import api
#application config
import config as config_application
#application model
from models import MessageTemplate
#application
from utils import get_formid_and_delete
#-------------------------------------------
# blueprint/api/ns
#-------------------------------------------
ns = api.namespace(
config_application.APPLICATION_NAME,
description=config_application.APPLICATION_DESCRIPTION
)
# api = flask_wechat_utils.create_api()
# ns = api.namespace(
# config_application.APPLICATION_NAME,
# description=config_application.APPLICATION_DESCRIPTION
# )
#-------------------------------------------
# /parser/marshal
#-------------------------------------------
parser_messageTemplate_create = api.parser()
parser_messageTemplate_create.add_argument('form_id',type=str,required=True)
#-------------------------------------------
# route
#-------------------------------------------
@ns.route('/')
class MessageTemplateRoute(Resource):
@api.doc(parser=parser_messageTemplate_create)
@auth
def post(self):
args = parser_messageTemplate_create.parse_args()
message_template = MessageTemplate(
openid=self.wechat_user.openid,
form_id=args.get('form_id'),
)
message_template.save()
return {
'code':0,
}
@auth
def get(self):
form_id_result = get_formid_and_delete(self.wechat_user.openid)
return {
'code':0,
'openid':form_id_result.openid,
'created_ts':str(form_id_result.created_ts),
'_id':str(form_id_result.id),
}
| synctrust/flask-wechat-utils | flask_wechat_utils/message_template/routes.py | routes.py | py | 1,768 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask_wechat_ut... |
36709037388 | import rclpy
import rclpy.node
from airobot_interfaces.srv import StringCommand
from gtts import gTTS
import speech_recognition as sr
import subprocess
class SpeechService(rclpy.node.Node):
def __init__(self):
super().__init__('speech_service')
self.get_logger().info('音声サーバーを起動しました')
self.init_rec = sr.Recognizer()
self.service = self.create_service(
StringCommand, '/speech_service/wake_up', self.command_callback)
def command_callback(self, request, response):
self.synthesis('I\'m ready.')
text = None
while text is None:
text = self.recognition()
self.synthesis(text)
response.answer = text
return response
def recognition(self):
text = ''
with sr.Microphone() as source:
while text == '':
audio_data = self.init_rec.record(source, duration=5)
self.get_logger().info(f'音声認識を行います')
try:
text = self.init_rec.recognize_google(audio_data)
except sr.UnknownValueError:
pass
self.get_logger().info(f'認識したテキストは "{text}" です')
return text
def synthesis(self, text):
self.get_logger().info(f'音声合成を実行します')
self.get_logger().info(f'発話内容は "{text}"')
gTTS(text, lang='en').save('voice.mp3')
subprocess.run(['mpg123 voice.mp3'], shell=True)
def main():
rclpy.init()
speech_service = SpeechService()
try:
rclpy.spin(speech_service)
except KeyboardInterrupt:
pass
rclpy.shutdown()
| AI-Robot-Book/chapter3 | speech_service/speech_service/speech_service_mpg123.py | speech_service_mpg123.py | py | 1,728 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "rclpy.node",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "speech_recognition.Recognizer",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "airobot_interfaces.srv.StringCommand",
"line_number": 19,
"usage_type": "argument"
},
{... |
12032981505 | import itertools
import numpy as np
import networkx as nx
from sklearn.neighbors import kneighbors_graph
from sklearn.metrics.pairwise import euclidean_distances
from scipy.sparse.csgraph import minimum_spanning_tree
from ggc.utils import *
def knn_graph(X, k):
"""Returns k-Nearest Neighbor (MkNN) graph from the feature matrix.
Parameters
----------
X : ndarray, shape (N, F)
N samples and F-dimensional features.
k : int, k >= 1
Parameter for knn: the k-th nearest neighbour.
Returns
-------
adj : ndarray, shape (N, N)
The adjacency matrix of the constructed knn graph.
"""
assert k < X.shape[0]
adj_directed = kneighbors_graph(X=X,
n_neighbors=k,
p=2,
include_self=False,
).toarray()
adj = adj_directed + adj_directed.T
adj[adj > 0] = 1
np.fill_diagonal(adj,0)
return adj
def mknn_graph(X, k):
"""Returns Mutual k-Nearest Neighbor (MkNN) graph from the feature matrix.
Parameters
----------
X : ndarray, shape (N, F)
N samples and F-dimensional features.
k : int, k >= 1
Parameter for mknn: the k-th nearest neighbour.
Returns
-------
adj : ndarray, shape (N, N)
The adjacency matrix of the constructed mknn graph.
"""
assert k < X.shape[0]
adj_directed = kneighbors_graph(X=X,
n_neighbors=k,
p=2,
include_self=False,
).toarray()
adj = adj_directed + adj_directed.T
adj[adj < 2] = 0
adj[adj >= 2] = 1
np.fill_diagonal(adj,0)
return adj
def cknn_graph(X, delta, k):
"""Returns Continuous k-Nearest Neighbor (CkNN) graph from the feature matrix.
Parameters
----------
X : ndarray, shape (N, F)
N samples and F-dimensional features.
delta : float, delta > 0
Parameter for cknn.
k : int, k >= 1
Parameter for cknn: the k-th nearest neighbour.
Returns
-------
adj : ndarray, shape (N, N)
The adjacency matrix of the constructed cknn graph.
References
----------
.. [1] Tyrus Berry, Timothy Sauer. Consistent manifold representation for topological data analysis.
Foundations of Data Science, 2019, 1 (1) : 1-38. doi: 10.3934/fods.2019001
"""
assert k < X.shape[0]
D = euclidean_distances(X, X)
N = D.shape[0]
np.fill_diagonal(D,0)
D_k = np.sort(D)
adj = np.zeros([N, N])
adj[np.square(D) < delta * delta * np.dot(D_k[:,k].reshape(-1,1),D_k[:,k].reshape(1,-1))] = 1
np.fill_diagonal(adj,0)
return adj
def mst_graph(X):
"""Returns Minimum Spanning Tree (MST) graph from the feature matrix.
Parameters
----------
X : ndarray, shape (N, F)
N samples and F-dimensional features.
Returns
-------
adj : ndarray, shape (N, N)
The adjacency matrix of the constructed mst graph.
"""
D = euclidean_distances(X, X)
adj_directed = minimum_spanning_tree(D).toarray()
adj = adj_directed + adj_directed.T
adj[adj > 0] = 1
np.fill_diagonal(adj,0)
return adj
def rmst_graph(X, gamma, k):
"""Returns Relaxed Minimum Spanning Tree (RMST) graph from the feature matrix.
Parameters
----------
X : ndarray, shape (N, F)
N samples and F-dimensional features.
gamma : float, gamma > 0
Parameter for rmst.
k : int, k >= 1
Parameter for rmst: the k-th nearest neighbour.
Returns
-------
adj : ndarray, shape (N, N)
The adjacency matrix of the constructed rmst graph.
References
----------
.. [1] Beguerisse-Díaz, Mariano, Borislav Vangelov, and Mauricio Barahona.
"Finding role communities in directed networks using role-based similarity,
markov stability and the relaxed minimum spanning tree."
2013 IEEE Global Conference on Signal and Information Processing. IEEE, 2013.
"""
D = euclidean_distances(X, X)
N = D.shape[0]
assert k < N
np.fill_diagonal(D,0)
adj = np.zeros([N, N])
D_k = np.sort(D)
D_k = np.tile(D_k[:,k],(N,1))
D_k = gamma * (D_k + D_k.T)
np.fill_diagonal(D_k,0)
max_weight = np.zeros((N,N))
G = nx.Graph(D)
T = nx.minimum_spanning_tree(G)
path = dict(nx.all_pairs_dijkstra_path(T))
for i,j in itertools.combinations(range(N),2):
p = path[i][j]
path_weight = np.zeros(len(p)-1)
for k in range(len(p)-1):
path_weight[k] = T.edges[p[k],p[k+1]]['weight']
max_weight[i][j] = np.amax(path_weight)
max_weight = max_weight + max_weight.T
np.fill_diagonal(max_weight,0)
adj[D < (max_weight + D_k)] = 1
np.fill_diagonal(adj,0)
return adj
| haczqyf/ggc | ggc/graphs.py | graphs.py | py | 4,902 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "sklearn.neighbors.kneighbors_graph",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.fill_diagonal",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "sklearn.neighbors.kneighbors_graph",
"line_number": 56,
"usage_type": "call"
},
... |
14774852874 | import streamlit as st
from src.plotgraphs import make_radar_graph
from src.sentanalysis import hf_analysis
from src.sentanalysis import spacy_sentiment
if __name__ == "__main__":
st.write("Welcome")
user_input = st.text_input("Enter a sentence", key="name")
result = st.button("Submit")
if result:
new_sentiment = hf_analysis(user_input)
output = new_sentiment.pop()
doc = spacy_sentiment(user_input)
st.plotly_chart(make_radar_graph(doc))
st.write(doc)
st.write(
"Prediction by using SiEBERT - English-Language Sentiment Classification"
)
st.write(output)
| yugant10-commits/sentiment-analysis | main.py | main.py | py | 652 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "streamlit.write",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "streamlit.text_input",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "streamlit.button",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "src.sentanalysis.... |
37406492523 | import logging
logging.basicConfig(filename='test_logs.log', encoding='utf-8', level=logging.INFO)
logger = logging.getLogger('selenium')
logger.setLevel(logging.INFO)
disable_loggers = ['urllib3.connectionpool','faker.factory']
def pytest_configure():
for logger_name in disable_loggers:
logger_not = logging.getLogger(logger_name)
logger_not.disabled = True
| AlejandroPadilla99/mentoringPython | conftest.py | conftest.py | py | 383 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
... |
912034710 | import cv2
cap = cv2.VideoCapture('vtest.avi')
hog = cv2.HOGDescriptor() # 클래스 호출을 통해 객체 생성
# SVM: 머신러닝 기술 이름
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
while True:
ret, frame = cap.read() # 프레임을 읽어서 반환
# ret: true / false, true: 동영상 frame을 정상적으로 읽었을 때, false: 비정상적으로 읽었을 때
if not ret:
break
detected, _ = hog.detectMultiScale(frame)
for (x, y, w, h) in detected:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 200), 3)
cv2.imshow('CCTV', frame)
if cv2.waitKey(10) == 27: # 10: ascii 코드로 esc키를 의미, esc키를 누르면 waitKey 함수는 27 반환
break
cv2.destroyAllWindows() | yousung1020/OpenCV | 실습자료/chapter 13/hog.py | hog.py | py | 782 | python | ko | code | 0 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "cv2.HOGDescriptor",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.HOGDescriptor_getDefaultPeopleDetector",
"line_number": 8,
"usage_type": "call"
},
{
"api_na... |
3530324591 | # from threading import Thread
import speech_recognition as sr
import keyboard as k
import spotipy
import os
import pyttsx3
import random
import credentials
from spotipy.oauth2 import SpotifyOAuth
from spotipy.oauth2 import SpotifyClientCredentials
# from refresh import Refresh
# from googleText2Speech import synthesize_text
os.environ["SPOTIPY_CLIENT_ID"] = credentials.SPOTIPY_CLIENT_ID
os.environ["SPOTIPY_CLIENT_SECRET"] = credentials.SPOTIPY_CLIENT_SECRET
os.environ["SPOTIPY_REDIRECT_URI"] = credentials.SPOTIPY_REDIRECT_URI
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = credentials.GOOGLE_APPLICATION_CREDENTIALS
deviceId = credentials.DEVICE_ID
scope = "user-modify-playback-state"
auth_manager = SpotifyClientCredentials()
sp = spotipy.Spotify(auth_manager=SpotifyOAuth(scope=scope))
# TTS engine
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[1].id)
# Mic init
r = sr.Recognizer()
mic = sr.Microphone(device_index=2)
jarvisResponses = ["I'm on it.", "Consider it done.", "Right away, Sir.", "Yes sir."]
def speak(text):
engine.say(text)
engine.runAndWait()
def main():
while 1:
try:
with mic as source:
r.adjust_for_ambient_noise(source)
audio = r.listen(source)
response = (r.recognize_google(audio))
print(response)
if any(x in response for x in ["Jarvis", "Yaris", "Garvais", "Taurus"]):
speak("Sir?")
audio = r.listen(source)
response = (r.recognize_google(audio))
# print(response)
# Discord Functionality
if any(x in response for x in ["mute", "unmute", "mutiny"]):
k.press_and_release('F8')
speak("It's done.")
elif any(x in response for x in ["deafen", "undeafen", "quiet"]):
k.press_and_release('F9')
speak("It's done.")
# Spotify Functionality
if any(x in response for x in ["next", "skip"]):
speak(jarvisResponses[random.randint(0, 3)])
sp.next_track(deviceId)
if any(x in response for x in ["previous", "last", "replay"]):
speak(jarvisResponses[random.randint(0, 3)])
sp.previous_track(deviceId)
if any(x in response for x in ["pause", "stop"]):
try:
speak(jarvisResponses[random.randint(0, 3)])
sp.pause_playback(deviceId)
except spotipy.exceptions.SpotifyException:
pass
elif any(x in response for x in ["resume", "continue", "play"]):
try:
speak(jarvisResponses[random.randint(0, 3)])
sp.start_playback(deviceId)
except spotipy.exceptions.SpotifyException:
pass
if any(x in response for x in ["increase", "lower", "raise", "set", "volume"]) and any(
char.isdigit() for char in response):
speak(jarvisResponses[random.randint(0, 3)])
volume = [int(s) for s in response.split() if s.isdigit()]
sp.volume(volume[0], deviceId)
if any(x in response for x in ["fast-forward", "fast", "forward"]) and any(
char.isdigit() for char in response):
speak(jarvisResponses[random.randint(0, 3)])
time = [int(s) for s in response.split() if s.isdigit()]
sp.seek_track(time[0] * 1000, deviceId)
# Application Functionality
if "open" in response:
if "valorant" in response:
speak(jarvisResponses[random.randint(0, 3)])
os.startfile(r"C:\Users\Public\Desktop\VALORANT.lnk")
if any(x in response for x in ["Apex", "Legends", "legend"]):
speak(jarvisResponses[random.randint(0, 3)])
os.startfile(r"C:\Users\Nasir\Desktop\Apex Legends.url")
if any(x in response for x in ["aim", "labs", "lab"]):
speak(jarvisResponses[random.randint(0, 3)])
os.startfile(r"C:\Users\Nasir\Desktop\Aim Lab.url")
if "Spotify" in response:
speak(jarvisResponses[random.randint(0, 3)])
os.startfile(r"C:\Users\Nasir\AppData\Roaming\Spotify\Spotify.exe")
# PC Functionality
if "sleep" in response:
speak("Goodbye for now, sir.")
os.system("rundll32.exe powrprof.dll,SetSuspendState 0,1,0")
if "quit" in response:
speak("Goodbye for now, sir.")
break
except sr.RequestError:
# print("API unavailable")
pass
except sr.UnknownValueError:
# print("Unable to recognize speech or nothing said")
pass
if __name__ == '__main__':
main() | nsrehman/Virtual-Assistant | voiceRecognition.py | voiceRecognition.py | py | 5,520 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "credentials.SPOTIPY_CLIENT_ID",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "cr... |
31058374968 | import networkx as nx
import pandas as pd
from matplotlib import pyplot as plt
from networkx.generators.ego import ego_graph
from pyvis.network import Network
from sklearn.decomposition import PCA
def plot_network_with_edge_weights(G, figsize=(10, 10)):
elarge = [(u, v) for (u, v, d) in G.edges(data=True) if (d["weight"] > 0.8)]
emedium = [
(u, v) for (u, v, d) in G.edges(data=True) if (0.8 >= d["weight"] >= 0.5)
]
esmall = [(u, v) for (u, v, d) in G.edges(data=True) if (d["weight"] < 0.5)]
plt.figure(figsize=figsize)
pos = nx.spring_layout(G)
nx.draw_networkx_nodes(G, pos, node_color="red", node_size=300)
nx.draw_networkx_edges(G, pos, edgelist=elarge, width=8, alpha=0.2)
nx.draw_networkx_edges(G, pos, edgelist=emedium, width=5, alpha=0.2)
nx.draw_networkx_edges(G, pos, edgelist=esmall, width=2, alpha=0.2)
nx.draw_networkx_labels(
G,
pos,
font_size=10,
font_weight="bold",
font_family="sans-serif",
font_color="white",
)
plt.axis("off")
plt.show()
def plot_ego_network(G, n, radius, **options):
"""
plot ego network around a node n depending
on radius setting i.e. only include upto
n nodes directly or indirectly connected to
this node
"""
hub_ego = ego_graph(G, n, radius=radius)
pos = nx.spring_layout(hub_ego)
nx.draw(hub_ego, pos, node_color="b", node_size=50, with_labels=False)
nx.draw_networkx_nodes(hub_ego, pos, nodelist=[n], **options)
plt.show()
return hub_ego
def plot_centrality_hist(centrality, name):
plt.figure(figsize=(15, 8))
plt.hist(centrality.values(), bins=60)
plt.xticks(ticks=[0, 0.01, 0.02, 0.04, 0.06, 0.08])
plt.title(f"Histogram - {name} ", fontdict={"size": 35}, loc="center")
plt.xlabel(f"{name}", fontdict={"size": 20})
plt.ylabel("Counts", fontdict={"size": 20})
plt.show()
def interactive_network_vis(
dag, *widgets, options=None, weights=False, notebook=True, directed=True
):
nt = Network("800px", "800px", directed=directed, notebook=notebook)
nt.from_nx(dag)
if weights:
for edge in nt.edges:
edge["value"] = edge["weight"]
if options is not None:
nt.set_options(options=options)
return nt
else:
nt.show_buttons(filter=widgets)
return nt
def plot_community_class_count(communities):
count_list = []
class_list = []
for i, c in enumerate(communities):
class_list.append(i)
count_list.append(len(list(c)))
df = pd.DataFrame({"class": class_list, "count": count_list})
df.plot.bar(x="class", y="count")
return df
def plot_link_features_projection(n_components, link_features, labels_test):
pca = PCA(n_components=n_components)
X_transformed = pca.fit_transform(link_features)
plt.figure(figsize=(16, 12))
col = []
for label in labels_test:
if label == 1:
col.append("red")
else:
col.append("blue")
plt.scatter(
X_transformed[:, 0],
X_transformed[:, 1],
c=col,
alpha=0.5,
)
plt.show()
def plot_shortest_paths_hist(frequencies):
plt.figure(figsize=(15, 8))
plt.bar(x=[i + 1 for i in range(8)], height=frequencies)
plt.title(
"Percentages of Shortest Path Lengths", fontdict={"size": 35}, loc="center"
)
plt.xlabel("Shortest Path Length", fontdict={"size": 22})
plt.ylabel("Percentage", fontdict={"size": 22})
plt.show()
def plot_degree_freq_log_log(G, m=0):
degree_freq = G.degree_historgam(G)
degrees = range(len(degree_freq))
plt.figure(figsize=(10, 6))
plt.loglog(degrees[m:], degree_freq[m:], "go-")
plt.title("log log plot for degree freq")
plt.xlabel("degree")
plt.ylabel("frequency")
plt.show()
| ryankarlos/networks_algos | vis/visualize.py | visualize.py | py | 3,867 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "networkx.spring_layout",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "net... |
17192006071 | from typing import List
from app.api.validators import ValidatorsClass
from app.core.db import get_async_session
from app.core.user import current_superuser
from app.crud.charity_project import charity_crud
from app.models import Donation
from app.schemas.charity_project import CharityCreate, CharityDB, CharityUpdate
from app.services.investing import investing_process
from fastapi import APIRouter, Depends
from sqlalchemy.ext.asyncio import AsyncSession
router = APIRouter()
@router.get(
'/',
response_model=List[CharityDB],
response_model_exclude_none=True
)
async def get_all_charity_projects(session: AsyncSession = Depends(get_async_session)) -> List[CharityDB]:
"""
Получает список всех благотворительных проектов из базы данных.
Args:
session (AsyncSession, optional): Сессия базы данных. Defaults to Depends(get_async_session).
Returns:
List[CharityDB]: Список объектов благотворительных проектов из базы данных.
"""
all_charity_projects = await charity_crud.get_all_objects(session)
return all_charity_projects
@router.post(
'/',
response_model=CharityDB,
response_model_exclude_none=True,
dependencies=[Depends(current_superuser)]
)
async def create_charity_project(
charity_project: CharityCreate,
session: AsyncSession = Depends(get_async_session),
) -> CharityDB:
"""
Создает новый благотворительный проект в базе данных.
Args:
charity_project (CharityCreate): Объект создаваемого благотворительного проекта.
session (AsyncSession, optional): Сессия базы данных. Defaults to Depends(get_async_session).
Returns:
CharityDB: Объект созданного благотворительного проекта.
"""
await ValidatorsClass.check_name_duplicate(charity_project.name, session)
new_charity = await charity_crud.create(
charity_project,
session
)
await investing_process(new_charity, Donation, session)
return new_charity
@router.delete(
'/{project_id}',
response_model=CharityDB,
dependencies=[Depends(current_superuser)]
)
async def delete_charity_project(
project_id: int,
session: AsyncSession = Depends(get_async_session)
) -> CharityDB:
"""
Удаляет благотворительный проект из базы данных.
Args:
project_id (int): Идентификатор удаляемого благотворительного проекта.
session (AsyncSession, optional): Сессия базы данных. Defaults to Depends(get_async_session).
Returns:
CharityDB: Объект удаленного благотворительного проекта.
"""
delete_charity = await ValidatorsClass.check_charity_project_exists(project_id, session)
ValidatorsClass.check_invested_amount_in_project(delete_charity)
delete_charity = await charity_crud.delete(delete_charity, session)
return delete_charity
@router.patch(
'/{project_id}',
response_model=CharityDB,
dependencies=[Depends(current_superuser)]
)
async def update_charity_project(
project_id: int,
obj_in: CharityUpdate,
session: AsyncSession = Depends(get_async_session),
) -> CharityDB:
"""
Обновляет информацию о благотворительном проекте в базе данных.
Args:
project_id (int): Идентификатор благотворительного проекта для обновления.
obj_in (CharityUpdate): Объект с информацией для обновления благотворительного проекта.
session (AsyncSession, optional): Сессия базы данных. Defaults to Depends(get_async_session).
Returns:
CharityDB: Объект обновленного благотворительного проекта.
"""
charity_project = await ValidatorsClass.check_charity_project_exists(
project_id, session
)
ValidatorsClass.check_charity_project_closed(charity_project)
if obj_in.name is not None:
await ValidatorsClass.check_name_duplicate(
obj_in.name, session
)
if obj_in.full_amount is not None:
ValidatorsClass.count_sum_in_invested_amount(
charity_project, obj_in.full_amount
)
charity_project = await charity_crud.update(
charity_project, obj_in, session
)
return charity_project
| Lexxar91/QRkot_spreadsheets | app/api/endpoints/charity_project.py | charity_project.py | py | 4,780 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ext.asyncio.AsyncSession",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "fastapi.Depends",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": ... |
72076483945 | import numpy as np
import torch
from torch.utils.data import Dataset
import matplotlib
from matplotlib import pyplot as plt
import enum
import scipy
from scipy import ndimage, signal
import io
from . import fileloader, util, zernike
from skimage import restoration
@enum.unique
class Augmentation(enum.Enum):
PIXEL_SHIFT = 1
NOISE_GAUSSIAN =2
class BaseDataset(Dataset):
def __init__(self):
super().__init__()
self.target_is_image = False
class SimulatedImageDataset(BaseDataset):
"""
Base class.
"""
def __init__(self, out_size=(32, 32), length=512, dropout_p=0,
image_params={},
noise_params={'poisson':True, 'gaussian':10},
conv_kernel=None,
normalize=True, augmentations={Augmentation.PIXEL_SHIFT:[8,8]},
image_params_preset={}):
super().__init__()
for key in augmentations:
if not isinstance(key, Augmentation):
raise Exception("Augmentation '{}' not recognized. Use Augmentation enum.".format(key))
self.params_range = image_params
self.augmentations = augmentations
self.padding = augmentations.get(Augmentation.PIXEL_SHIFT, [0,0]) # x, y
self.gen_size = (out_size[0]+2*self.padding[0], out_size[1]+2*self.padding[1])
self.out_size = out_size
output_image_shape = np.atleast_1d(np.asarray(length))
if output_image_shape.shape[0]<2:
output_image_shape = np.concatenate([output_image_shape, [1]])
self.set_params(output_image_shape, image_params, image_params_preset)
shifts = np.stack([self.params['x'].flatten(), self.params['y'].flatten(), self.params['z'].flatten()], axis=-1)
images = self.generate_images(self.gen_size, output_image_shape, shifts, image_params)
if dropout_p > 0:
images = images * (np.random.rand(images.shape[0], 1, 1) > dropout_p)
images = images * self.params['A'].reshape(-1, 1, 1)
images = images.reshape(output_image_shape[0], output_image_shape[1], images.shape[1], images.shape[2])
images = images.sum(axis=1, keepdims=True)
images = images + self.params['bg'].reshape(-1, 1, 1, 1)
if not conv_kernel is None:
conv_kernel = torch.as_tensor(conv_kernel, dtype=torch.float).reshape(1, 1, conv_kernel.shape[-2], conv_kernel.shape[-1])
images = torch.as_tensor(images, dtype=torch.float)
images = torch.nn.functional.pad(images, (conv_kernel.shape[-1]//2,)*2 + (conv_kernel.shape[-2]//2,)*2, mode="reflect")
images = torch.nn.functional.conv2d(images, conv_kernel, padding=0).numpy()
if len(noise_params) > 0:
images = self.add_noise(images, noise_params)
if normalize:
images -= images.min(axis=(2,3), keepdims=True)
images /= images.max(axis=(2,3), keepdims=True)
self.images = images.astype(np.float32)
def set_params(self, output_image_shape, image_params, image_params_preset):
# print("Image parameters settings: {}".format(image_params))
self.params = {}
self.params['id'] = np.arange(output_image_shape[0])
self.params['A'] = np.random.uniform(image_params['A'][0], image_params['A'][1], output_image_shape).astype(np.float32)
self.params['bg'] = np.random.uniform(image_params['bg'][0], image_params['bg'][1], output_image_shape[0]).astype(np.float32)
self.params['x'] = np.random.uniform(image_params['x'][0], image_params['x'][1], output_image_shape).astype(np.float32)
self.params['y'] = np.random.uniform(image_params['y'][0], image_params['y'][1], output_image_shape).astype(np.float32)
if 'z' in image_params:
self.params['z'] = np.random.uniform(image_params['z'][0], image_params['z'][1], output_image_shape).astype(np.float32)
else:
self.params['z'] = np.zeros(output_image_shape).astype(np.float32)
self.params.update(image_params_preset)
def generate_images(self, size, length, shifts, image_params):
raise NotImplementedError()
def add_noise(self, images, noise_params):
ret = images.copy()
if noise_params.get('poisson', False) is True:
ret += np.random.poisson(images) - images
if 'gaussian' in noise_params:
ret += np.random.normal(np.zeros_like(images), noise_params['gaussian'])
return ret
def __len__(self):
return self.images.shape[0]
def __getitem__(self, key):
image = self.images[key]
label = {param_key: param_val[key] for param_key, param_val in self.params.items()}
if Augmentation.PIXEL_SHIFT in self.augmentations:
shift = [np.random.randint(0, 2*i+1) for i in self.padding]
label['x'] = label['x'] - shift[0] + self.padding[0]
label['y'] = label['y'] - shift[1] + self.padding[1]
image = image[:,shift[0]:shift[0]+self.out_size[0],shift[1]:shift[1]+self.out_size[1]]
if Augmentation.NOISE_GAUSSIAN in self.augmentations:
noise_sig = self.augmentations[Augmentation.NOISE_GAUSSIAN] * (image.max() - image.min())
image = np.random.normal(image, noise_sig).astype(np.float32)
return image, label
def to(self, device):
self.images = torch.as_tensor(self.images, device=device)
class SingleImageDataset(SimulatedImageDataset):
"""
Repeatedly sample a single image.
"""
def __init__(self, data, out_size=(64, 64), length=16, dropout_p=0,
image_params={},
noise_params={'poisson':True, 'gaussian':10},
conv_kernel = None,
normalize=True, augmentations={Augmentation.PIXEL_SHIFT:[8,8]},
image_params_preset={}):
default_image_params = {
'A': [0.5, 2.0],
'bg': [0, 10],
'x': [-5, 5],
'y': [-5, 5],
# 'conv':np.ones((3,3)),
}
_image_params = dict(default_image_params, **image_params)
_image_params['data'] = data
super().__init__(out_size=out_size, length=length, dropout_p=dropout_p,
image_params=_image_params,
noise_params=noise_params,
conv_kernel=conv_kernel,
normalize=normalize, augmentations=augmentations,
image_params_preset=image_params_preset)
def generate_images(self, size, length, shifts, image_params):
data = image_params['data']
# add padding larger than shifts
shift_max = [np.ceil(np.max([np.abs(shifts[:,i].min()), shifts[:,i].max()])).astype(int) for i in range(len(shifts.shape))]
crop_size = [size[i] + 2*shift_max[i] for i in range(len(data.shape))]
data = data[:crop_size[0],:crop_size[1]]
# zero padding for fft
padding = [(int(np.ceil(1.5 * data.shape[0])),)*2, (int(np.ceil(1.5 * data.shape[1])),)*2]
data = np.pad(data, padding, mode='wrap')
kx = np.fft.fftshift(np.fft.fftfreq(data.shape[0]))
ky = np.fft.fftshift(np.fft.fftfreq(data.shape[1]))
self.KX, self.KY = np.meshgrid(kx, ky, indexing='ij')
fft_image = np.fft.fftshift(np.fft.fft2(data))
fft_image_mag = np.abs(fft_image)
fft_image_phase = np.angle(fft_image)
# helps remove ringing artifacts
fft_image_mag = fft_image_mag * signal.windows.tukey(fft_image_mag.shape[0], alpha=0.5)[:,None]
fft_image_mag = fft_image_mag * signal.windows.tukey(fft_image_mag.shape[1], alpha=0.5)[None,:]
# x, y shift
fft_image_phase = fft_image_phase - 2 * np.pi * (self.KX[None,...] * shifts[:,0,None,None])
fft_image_phase = fft_image_phase - 2 * np.pi * (self.KY[None,...] * shifts[:,1,None,None])
shifted_fft = fft_image_mag * np.exp(1j * fft_image_phase)
shifted_img = np.fft.ifft2(np.fft.ifftshift(shifted_fft))
crop = np.concatenate([shift_max[i] + padding[i] for i in range(len(data.shape))])
shifted_img = shifted_img[:, crop[0]:-crop[1], crop[2]:-crop[3]]
return np.abs(shifted_img)
class SimulatedPSFDataset(SimulatedImageDataset):
def __init__(self, out_size=(32, 32), length=512, dropout_p=0,
image_params={},
noise_params={'poisson':True, 'gaussian':10},
normalize=True, augmentations={Augmentation.PIXEL_SHIFT:[8,8]},
image_params_preset={}):
default_image_params = {
'A': [500, 2000],
'bg': [0, 100],
'x': [-0.35*out_size[0], 0.35*out_size[0]],
'y': [-0.35*out_size[1], 0.35*out_size[1]],
}
_image_params = dict(default_image_params, **image_params)
super().__init__(out_size=out_size, length=length, dropout_p=dropout_p,
image_params=_image_params,
noise_params=noise_params,
normalize=normalize, augmentations=augmentations,
image_params_preset=image_params_preset)
def generate_images(self, size, length, shifts, image_params):
raise NotImplementedError()
class Gaussian2DPSFDataset(SimulatedPSFDataset):
def __init__(self, out_size=(32, 32), length=512, dropout_p=0,
psf_params={},
noise_params={'poisson':True, 'gaussian':100},
normalize=False, augmentations={},
image_params_preset={}):
default_image_params = {
'sig_x':[5, 5],
'sig_y':[5, 5],
}
_image_params = dict(default_image_params, **psf_params)
super().__init__(out_size=out_size, length=length, dropout_p=dropout_p,
image_params=_image_params,
noise_params=noise_params,
normalize=normalize, augmentations=augmentations,
image_params_preset=image_params_preset)
def generate_images(self, size, length, shifts, psf_params):
xs = np.arange(0, size[0]) - 0.5*(size[0]-1)
ys = np.arange(0, size[1]) - 0.5*(size[1]-1)
XS, YS = np.meshgrid(xs, ys, indexing='ij')
self.params['sig_x'] = np.random.uniform(*psf_params['sig_x'], length).astype(np.float32)
self.params['sig_y'] = np.random.uniform(*psf_params['sig_y'], length).astype(np.float32)
ret = np.exp(-((XS[None,...]-shifts[:,0,None,None])**2/(2*self.params['sig_x'].reshape(-1,1,1)) \
+ (YS[None,...]-shifts[:,1,None,None])**2/(2*self.params['sig_y'].reshape(-1,1,1))))
return ret
class FourierOpticsPSFDataset(SimulatedPSFDataset):
def __init__(self, out_size=(32, 32), length=512, dropout_p=0,
psf_params={}, psf_zerns={},
noise_params={'poisson':True, 'gaussian':100},
normalize=False, augmentations={},
image_params_preset={}):
default_psf_params = {
'apod':False,
'pupil_scale':0.75,
}
_psf_params = dict(default_psf_params, **psf_params)
_psf_params["psf_zerns"] = psf_zerns
super().__init__(out_size=out_size, length=length, dropout_p=dropout_p,
image_params=_psf_params,
noise_params=noise_params,
normalize=normalize, augmentations=augmentations,
image_params_preset=image_params_preset)
def generate_images(self, size, length, shifts, psf_params):
pupil_padding_factor = 4
pupil_padding_clip = 0.5 * (pupil_padding_factor - 1)
pupil_padding = int(pupil_padding_clip*size[0]), int(-pupil_padding_clip*size[0]), int(pupil_padding_clip*size[1]), int(-pupil_padding_clip*size[1])
kx = np.fft.fftshift(np.fft.fftfreq(pupil_padding_factor*size[0]))
ky = np.fft.fftshift(np.fft.fftfreq(pupil_padding_factor*size[1]))
self.KX, self.KY = np.meshgrid(kx, ky, indexing='ij')
us = np.linspace(-1, 1, pupil_padding_factor*size[0]) * (pupil_padding_factor*size[0]-1) / (size[0]-1) / psf_params.get('pupil_scale', 0.75)
vs = np.linspace(-1, 1, pupil_padding_factor*size[1]) * (pupil_padding_factor*size[0]-1) / (size[0]-1) / psf_params.get('pupil_scale', 0.75)
US, VS = np.meshgrid(us, vs, indexing='ij')
R = np.sqrt(US**2 + VS**2)
if psf_params.get('apod', False):
pupil_mag = np.sqrt(1-np.minimum(R, 1)**2)
else:
pupil_mag = (R <= 1).astype(np.float)
pupil_phase = zernike.calculate_pupil_phase(R*(R<=1), np.arctan2(US, VS), psf_params.get("psf_zerns", {}))
self.pupil = pupil_mag * np.exp(1j*pupil_phase)
self.pupil = self.pupil[pupil_padding[0]:pupil_padding[1], pupil_padding[2]:pupil_padding[3]]
self.pupil_suppl = {"radial_distance": (R*(R<=1))[pupil_padding[0]:pupil_padding[1], pupil_padding[2]:pupil_padding[3]],
"azimuthal_angle": np.arctan2(US, VS)[pupil_padding[0]:pupil_padding[1], pupil_padding[2]:pupil_padding[3]]}
shifted_pupil_phase = np.tile(pupil_phase, (shifts.shape[0], 1, 1))
shifted_pupil_phase = shifted_pupil_phase - 2 * np.pi * (self.KX[None,...] * shifts[:,0,None,None])
shifted_pupil_phase = shifted_pupil_phase - 2 * np.pi * (self.KY[None,...] * shifts[:,1,None,None])
shifted_pupil_phase = shifted_pupil_phase + np.sqrt(1-np.minimum(R, 1)**2) * shifts[:,2,None,None]
shifted_pupils = pupil_mag[None,...]*np.exp(1j*shifted_pupil_phase)
psfs = np.fft.ifftshift(np.fft.ifft2(np.fft.fftshift(shifted_pupils)))
psfs = psfs[:, pupil_padding[0]:pupil_padding[1], pupil_padding[2]:pupil_padding[3]]
psfs = np.abs(psfs)**2
ref_psf = np.fft.ifftshift(np.fft.ifft2(np.fft.fftshift(np.pad(self.pupil, ((pupil_padding[0], -pupil_padding[1]), (pupil_padding[2], -pupil_padding[3]))))))
ref_psf = ref_psf[pupil_padding[0]:pupil_padding[1], pupil_padding[2]:pupil_padding[3]]
ref_psf = np.abs(ref_psf)**2
psfs /= ref_psf.max()
return psfs
class FileDataset(BaseDataset):
def __init__(self, file_path,
transform=None,
image_slice=slice(None),
length=None,
file_loader=fileloader.PilImageFileLoader,
slices=(slice(None),), stack_to_volume=False, cache=True):
super().__init__()
self.file = self.load_file(file_path,
file_loader=file_loader,
slices=slices,
stack_to_volume=stack_to_volume,
cache=cache)
if length is None:
self.length = len(self.file)
else:
self.length = length
self.transform = transform
self.image_slice = np.arange(len(self.file), dtype=np.int32)[image_slice]
def load_file(self, file_path, file_loader, slices, stack_to_volume, cache):
file_loaded = file_loader(file_path, slices=slices,
stack_to_volume=stack_to_volume, cache=cache)
print(", ".join(["{}: {}".format(key, val) for key, val in
{"filepath":file_loaded.file_path,
"frames":len(file_loaded),
"image shape":file_loaded[0].shape}.items()]))
return file_loaded
def __len__(self):
return self.length
def __getitem__(self, key):
file_id = np.random.choice(self.image_slice)
img = torch.as_tensor(self.file[file_id])
if not self.transform is None:
img = self.transform(img)
return img, {'id': key}
class ResamplingFileDataset(FileDataset):
# overlap with SingleImageDataset?
def __init__(self, file_path, out_size=(64, 64, 64),
length=16,
file_loader=fileloader.PilImageFileLoader,
slices=(slice(None),), stack_to_volume=False, cache=True):
super().__init__(file_path=file_path, length=length,
file_loader=file_loader,
slices=slices, stack_to_volume=stack_to_volume,
cache=cache)
self.in_size = self.file[0][0].shape
self.out_size = [min(out_size[dim], self.in_size[dim]) for dim in range(len(out_size))]
if (self.out_size < list(out_size)):
print("out_size {} clipped to {}".format(out_size, self.out_size))
print(self.in_size, self.out_size)
def __getitem__(self, key):
file_id = np.random.randint(0, len(self.file), dtype=np.int32)
shifts = np.asarray([np.random.randint(0, self.in_size[dim] - self.out_size[dim] + 1) for dim in range(len(self.in_size))])
labels = {'id':file_id, }
labels.update({"slice_{}".format(['x','y','z'][i]): shift for i, shift in enumerate(shifts)})
slicing = np.stack([shifts, shifts + self.out_size], -1)
slicing = tuple([slice(None),] + [slice(a, b) for (a, b) in slicing])
return self.file[file_id][slicing], labels
class FilePairsDataset(FileDataset):
def __init__(self, file_path, target_file_path,
transform=None, target_transform=None,
image_slice=slice(None),
length=16,
file_loader=fileloader.PilImageFileLoader,
slices=(slice(None),), stack_to_volume=False, cache=True):
super().__init__(file_path=file_path,
file_loader=file_loader, slices=slices,
stack_to_volume=stack_to_volume,
cache=cache)
self.target_is_image = True
self.target_file = self.load_file(target_file_path,
file_loader=file_loader,
slices=slices,
stack_to_volume=stack_to_volume,
cache=cache)
self.transform = transform
self.target_transform = target_transform
self.image_slice = np.arange(len(self.file), dtype=np.int32)[image_slice]
def __getitem__(self, key):
file_id = np.random.choice(self.image_slice)
img = torch.as_tensor(self.file[file_id])
target = torch.as_tensor(self.target_file[file_id])
seed = np.random.randint(2147483648)
if not self.transform is None:
torch.manual_seed(seed)
img = self.transform(img)
if not self.target_transform is None:
torch.manual_seed(seed)
target = self.transform(target)
return img, target
def inspect_images(dataset, indices=None):
if indices is None:
indices = np.random.choice(len(dataset), min(8, len(dataset)), replace=False)
images, labels = zip(*[(dataset[i][0].detach().cpu().numpy() if torch.is_tensor(dataset[i][0]) else dataset[i][0], dataset[i][1]) for i in indices])
tiled_images, n_col, n_row = util.tile_images(util.reduce_images_dim(np.stack(images, axis=0)), full_output=True)
fig, axes = plt.subplots(2, 1, figsize=(4*n_col, 3*n_row*2))
im = axes[0].imshow(tiled_images)
plt.colorbar(im, ax=axes[0])
im = axes[1].imshow(np.log(tiled_images))
plt.colorbar(im, ax=axes[1])
axes_to_label = [axes,]
if dataset.target_is_image is True:
tiled_images, n_col, n_row = util.tile_images(util.reduce_images_dim(np.stack(labels, axis=0)), full_output=True)
fig, axes = plt.subplots(2, 1, figsize=(4*n_col, 3*n_row*2))
im = axes[0].imshow(tiled_images)
plt.colorbar(im, ax=axes[0])
im = axes[1].imshow(np.log(tiled_images))
plt.colorbar(im, ax=axes[1])
axes_to_label.append(axes)
for i, id in enumerate(indices):
label = "{}:\t".format(id)
if dataset.target_is_image is False:
for key, val in labels[i].items():
label += " [{} =".format(key)
for datum in np.atleast_1d(val.squeeze()):
label += " {:.3f},".format(datum)
label += "],"
print(label)
for axes in axes_to_label:
for j in range(2):
axes[j].text(i%n_col / n_col, i//n_col / n_row,
# label,
id,
bbox={'facecolor':'white', 'alpha':1},
ha='left', va='bottom',
fontsize='medium',
transform=axes[j].transAxes)
if hasattr(dataset, 'params'):
fig, axes = plt.subplots(1, len(dataset.params), figsize=(4*len(dataset.params), 3))
for i, (key, val) in enumerate(dataset.params.items()):
axes[i].hist(val.flatten(), bins=20)
axes[i].set_xlabel(key)
if hasattr(dataset, 'pupil'):
fig, axes = plt.subplots(1, 3, figsize=(4*2 + 8, 3), gridspec_kw={'width_ratios': [1,1,3]})
pupil_magnitude = np.abs(dataset.pupil)
pupil_magnitude_colored, norm, cmap = util.color_images(pupil_magnitude, full_output=True)
im = axes[0].imshow(pupil_magnitude_colored)
plt.colorbar(matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap), ax=axes[0])
axes[0].set_title('pupil mag')
pupil_phase = restoration.unwrap_phase(np.ma.array(np.angle(dataset.pupil), mask=np.abs(dataset.pupil)<=0))
pupil_phase_colored, norm, cmap = util.color_images(pupil_phase, vsym=True, full_output=True)
im = axes[1].imshow(pupil_phase_colored)
plt.colorbar(matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap), ax=axes[1])
axes[1].set_title('pupil phase')
zernike_coeffs = zernike.fit_zernike_from_pupil(dataset.pupil, 16, dataset.pupil_suppl["radial_distance"], dataset.pupil_suppl["azimuthal_angle"])
zernike.plot_zernike_coeffs(axes[2], zernike_coeffs)
fig.tight_layout() | kkhchung/smlm-dl | smlm_dl/dataset.py | dataset.py | py | 22,899 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "enum.Enum",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "enum.unique",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "numpy.atleas... |
11370466883 | import requests
from time import sleep
class Ark(object):
"""This is a python wrapper for the ARK api"""
def __init__(self,api_token):
self.api_token = api_token
self.header = {'api_token' : self.api_token }
def check_token(self,full_object=False):
"""Checks the number of calls your token has left"""
base_url = "https://testapi.ark.com"
url = base_url + "/token_request"
request = requests.get(url,headers=self.header)
while request.status_code == 302:
sleep(1)
request = requests.get(url,headers=self.header)
if request.status_code != 200:
return request.status_code
if full_object:
return request
else:
return request.json()['left']
def email(self, email, full_object=False):
"""Fetches a user profile via email"""
base_url = "https://testapi.ark.com/email/"
url = base_url + email
request = requests.get(url,headers=self.header)
while request.status_code == 302:
sleep(1)
request = requests.get(url,headers=self.header)
if request.status_code != 200:
return request.status_code
if full_object:
return request
else:
return request.json()
def twitter(self, handle, full_object=False):
"""Fetches a user profile via twitter handle"""
base_url = "https://testapi.ark.com/network/tw:"
url = base_url + handle
request = requests.get(url,headers=self.header)
while request.status_code == 302:
sleep(1)
request = requests.get(url,headers=self.header)
if request.status_code != 200:
return request.status_code
if full_object:
return request
else:
return request.json()
def facebook(self, facebook_url, full_object=False):
"""Fetches user profile via facebook url"""
base_url = "https://testapi.ark.com/network/fb:"
url = base_url + facebook_url
request = requests.get(url,headers=self.header)
while request.status_code == 302:
sleep(1)
request = requests.get(url,headers=self.header)
if request.status_code != 200:
return request.status_code
if full_object:
return request
else:
return request.json() | gregimba/Ark | ark.py | ark.py | py | 2,070 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number"... |
42776999573 | """canaryAPI URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from django.conf.urls import url, include
from rest_framework.permissions import IsAuthenticated
from rest_framework.documentation import include_docs_urls
from manage_api.admin import admin_site
from manage_api.views import AddExternalAPISetting
from manage_api.views import TriggerItem, DownloadItem
from manage_api.views import SysmonAlertItems, UserItem, UserItems
from canary_log_api.views import ViewLog
from canary_files.views import GenerateCanaryItem, DownloadCanaryItem
from alert_api.views import CanaryAlertItems, SysmonIncoming, FileItem
managepatterns = [
path('', UserItem.as_view(), name='user_item'),
path('users/', UserItems.as_view(), name='user_items'),
path('sysmon/', SysmonAlertItems.as_view(), name='sysmon_alert_items'),
path('sysmon/<int:id>', SysmonAlertItems.as_view(),
name='sysmon_alert_item'),
path('trigger/', TriggerItem.as_view(), name='trigger_item'),
path('trigger/<int:id>', TriggerItem.as_view(), name='trigger_item'),
path('api_settings/', AddExternalAPISetting.as_view(), name='external-setting'),
path(r'download/<md5>', DownloadItem.as_view(), name='download-sample'),
]
apipatterns = [
path('alert/', SysmonIncoming.as_view(), name='incoming-mimialert'),
path('alert/log/', CanaryAlertItems.as_view()),
path('alert/log/<int:id>', CanaryAlertItems.as_view(), name='triggered-alerts'),
path('alert/upload/<str:filename>/', FileItem.as_view(), name='incoming-sample'),
path('manage/', include(managepatterns)),
path('canary/', GenerateCanaryItem.as_view(), name='canary'),
path('log/', ViewLog.as_view(), name='logs'),
path('canary/download/<identifier>', DownloadCanaryItem.as_view(),
name='download-canary'),
]
urlpatterns = [
path('admin/', admin_site.urls),
path('api/', include(apipatterns)),
path('api-docs/', include_docs_urls(title='Canary API', public=False, permission_classes=[IsAuthenticated])),
url(r'^api-auth/', include('rest_framework.urls')),
]
| toucan-project/TOUCAN | toucan/canary_api/urls.py | urls.py | py | 2,670 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "manage_api.views.UserItem.as_view",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "manage_api.views.UserItem",
"line_number": 34,
"usage_type": "name"
},
{
"api_... |
12639565741 | from mainapp.model.Event import Event
from datetime import datetime
from django.core.cache import cache
from mainapp.Common import CacheUtil
from django.conf import settings
from django.utils import timezone
KEY_CACHE_DAO_GET_ALL_EVENT_ACTIVE = 'context-dao-all-event-active'
KEY_CACHE_DAO_GET_ALL_EVENT_NOT_ACTIVE = 'context-dao-all-event-not-active'
def get_all_event_not_active():
"""
Get all event active
"""
list_event = Event.objects.filter(active=False)
return list_event
def get_all_event_active():
"""
Get all event active
"""
list_event = Event.objects.filter(active=True)
return list_event
def get_all_event_active_running():
"""
Get all event running
"""
# Get current date
now = datetime.now(tz=timezone.utc)
list_event = Event.objects.filter(active=True, event_start__lte=now, event_end__gte=now)
return list_event
def get_all_event_active_is_comming():
"""
Get all event is comming
"""
# Get current date
now = datetime.now(tz=timezone.utc)
list_event = Event.objects.filter(active=True, event_start__gte=now, event_end__gte=now)
return list_event
def get_all_event_active_is_passed():
"""
Get all event is passed
"""
# Get current date
now = datetime.now(tz=timezone.utc)
list_event = Event.objects.filter(active=True, event_start__lte=now, event_end__lte=now)
return list_event
def get_event_detail_by_id(event_id):
"""
Get event detail by id
"""
event = Event.objects.get(pk=event_id)
return event
def insert_event(event):
"""
Insert event
"""
e = Event(event_name=event.event_name,
event_note=event.event_note,
event_slogun=event.event_slogun,
event_description=event.event_description,
event_image_banner_name=event.event_image_banner_name,
event_image_banner_path=event.event_image_banner_path,
active=event.active,
event_start=event.event_start,
event_end=event.event_end,
created_at=datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
e.save()
return e
def update_event(event):
"""
Update event
"""
e = Event.objects.get(pk=event.event_id)
e.event_name = event.event_name
e.event_note = event.event_note
e.event_slogun = event.event_slogun
e.event_description = event.event_description
e.event_image_banner_name = event.event_image_banner_name
e.event_image_banner_path = event.event_image_banner_path
e.active = event.active
e.event_start = event.event_start
e.event_end = event.event_end
e.save()
return e
def delete_event(event_id):
"""
Delete event by id
"""
e = Event.objects.get(pk=event_id)
e.delete() | trunganhvu/personalweb | mainapp/dao/Event/EventDao.py | EventDao.py | py | 2,795 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mainapp.model.Event.Event.objects.filter",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "mainapp.model.Event.Event.objects",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "mainapp.model.Event.Event",
"line_number": 15,
"usage_typ... |
36219148196 | import numpy as np
from numpy import array
from mSimplexFaseII import solve
from scipy.optimize import linprog
import pprint
from math import log, exp
from numpy.random import rand, normal
from numpy import round, int, abs, array, transpose
def main():
#Primer test
A = array([[1,0], [0, 2], [3, 2]])
b = [4, 12, 18]
c = array([-3, -5])
print('\n - - - - - - - - - - - \n')
print('TEST 1:\n')
print('Our solution:')
r = solve(A,b,c)
print("\n".join("{}:\t{}".format(k, v) for k, v in r.items()))
print('\nPython solution:')
print(linprog(c, A_ub=A, b_ub=b))
print('\n - - - - - - - - - - - \n')
#Segundo test
A = array([[-1, 1], [1, 0]])
b = [0, 2]
c = array([0, -1])
print('TEST 2:\n')
r = solve(A,b,c)
print('Our solution:')
print("\n".join("{}:\t{}".format(k, v) for k, v in r.items()))
print('\nPython solution:')
print(linprog(c, A_ub=A, b_ub=b))
#Random tests
num_random_tests = 5
eps = 1e-6
k = 1
for i in range(5):
print('\n - - - - - - - - - - - \n')
print('RANDOM TEST ', k,': ')
k += 1
m = int(round(10*exp(log(20)*rand())))
n = int(round(10*exp(log(20)*rand())))
sigma = 100
A = round(sigma*normal(0,1,(n,n)))
b = round(sigma*abs(normal(0,1,(n,1))))
b = b[:,0]
c = round(sigma*normal(0,1,(n,1)))
c = c[:,0]
our_ans = solve(A,b,c)
python_ans = linprog(c, A_ub=A, b_ub=b)
if our_ans['x0'] is None:
if 'The problem appears to be unbounded' in python_ans['message'] and our_ans['ban'] == 1:
print('Successfull test!')
else:
print('Something went wrong')
continue
if abs(python_ans['fun'] - our_ans['z0']) > eps:
print('Something went wrong')
continue
print('Successfull test!')
if __name__ == '__main__':
main() | SergioArnaud/Linear-programming | Practica1/testFaseII.py | testFaseII.py | py | 2,007 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "mSimplexFaseII.solve",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "scipy.optimize.linprog",... |
9228040497 | import torch
from torch._C import Value
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.loss import PoissonNLLLoss
from .MultiHeadAttention import MultiHeadAttention
from .Block import Block
class Decoder(nn.Module):
"""
add another attention between encoder's out and decoder
add one more normalize layer
"""
def __init__(self, d_model:int, q:int, v:int, h:int, dropout:float = 0.3) -> None:
super().__init__()
self._selfAttention = MultiHeadAttention(d_model, q, v, h)
self._encoderDecoderAttention = MultiHeadAttention(d_model, q, v, h)
self._feedforward = Block(d_model)
self._layerNorm1 = nn.LayerNorm(d_model)
self._layerNorm2 = nn.LayerNorm(d_model)
self._layerNorm3 = nn.LayerNorm(d_model)
self._dropout = nn.Dropout(dropout)
def forward(self, x:torch.Tensor, memory:torch.Tensor) -> torch.Tensor:
out = self._selfAttention(query=x, key=x, value=x, mask="subsequent")
out = self._dropout(out)
out = self._layerNorm1(out + x)
out1 = self._encoderDecoderAttention(query=x, key=x, value=memory)
out1 = self._dropout(out1)
out1 = self._layerNorm2(out1 + out)
out2 = self._feedforward(out1)
out2 = self._dropout(out2)
out2 = self._layerNorm3(out2 + out1)
return out2
| chenzhike110/Transformer | Tranformer/Modules/Decoder.py | Decoder.py | py | 1,380 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "MultiHeadAttention.MultiHeadAttention",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "M... |
43041308146 | from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QApplication,QWidget,QHBoxLayout,QVBoxLayout,QRadioButton,QGroupBox,QPushButton,QLabel,QListWidget,QLineEdit
from second_win import *
from instr import *
class FinalWin(QWidget):
def __init__(self,exp):
super().__init__()
self.exp = exp
self.set_appear()
self.initUI()
self.show()
def initUI(self):
self.work_text = QLabel(txt_workheart + self.results())
self.index_text = QLabel(txt_index + str(self.index))
self.layout_line = QVBoxLayout()
self.layout_line.addWidget(self.index_text, alignment = Qt.AlignCenter)
self.layout_line.addWidget(self.work_text, alignment = Qt.AlignCenter)
self.setLayout(self.layout_line)
def set_appear(self):
self.setWindowTitle(txt_finalwin)
self.resize(win_width,win_height)
self.move(win_x,win_y) | AlexanderKudelya/indexruf | index/final_win.py | final_win.py | py | 976 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "... |
21114082657 | from src import app
from flask import jsonify, request
import requests
import json
import os
slackToken = os.environ['SLACK_TOKEN']
botAccessToken = os.environ['BOT_ACCESS_TOKEN']
hasuraDataUrl = "http://data.hasura/v1/query"
chatUrl = "https://slack.com/api/chat.postMessage"
##################### APIs ######################
@app.route('/', methods=['GET'])
def test():
return "Slackbot is running."
@app.route('/echo', methods=['POST'])
def event():
data = request.form.to_dict()
print(data)
print("SlackToken: " + slackToken)
receivedToken = data["token"]
print("ReceivedToken: " + receivedToken)
if (receivedToken==slackToken):
receivedMessage= data["text"]
id = storeMsgToDB(receivedMessage)
sendConfirmation(id, receivedMessage, data["response_url"])
return "Waiting for confirmation"
else:
return "Invalid Token"
@app.route('/repo', methods=['POST'])
def repos():
data = request.form.to_dict()
print(data)
print("SlackToken: " + slackToken)
receivedToken = data["token"]
print("ReceivedToken: " + receivedToken)
if (receivedToken==slackToken):
receivedMessage= data["text"]
return getRepo(receivedMessage)
else:
return "Invalid Token"
@app.route('/issue', methods=['POST'])
def issues():
data = request.form.to_dict()
print(data)
print("SlackToken: " + slackToken)
receivedToken = data["token"]
print("ReceivedToken: " + receivedToken)
if (receivedToken==slackToken):
receivedMessage= data["text"]
return getIssue(receivedMessage)
else:
return "Invalid Token"
@app.route('/branch', methods=['POST'])
def branches():
data = request.form.to_dict()
print(data)
print("SlackToken: " + slackToken)
receivedToken = data["token"]
print("ReceivedToken: " + receivedToken)
if (receivedToken==slackToken):
receivedMessage= data["text"]
return getBranch(receivedMessage)
else:
return "Invalid Token"
@app.route('/helpme', methods=['POST'])
def helps():
data = request.form.to_dict()
print(data)
print("SlackToken: " + slackToken)
receivedToken = data["token"]
print("ReceivedToken: " + receivedToken)
if (receivedToken==slackToken):
receivedMessage= data["text"]
return getHelp(receivedMessage)
else:
return "Invalid Token"
@app.route('/member', methods=['POST'])
def members():
data = request.form.to_dict()
print(data)
print("SlackToken: " + slackToken)
receivedToken = data["token"]
print("ReceivedToken: " + receivedToken)
if (receivedToken==slackToken):
receivedMessage= data["text"]
return getMember(receivedMessage)
else:
return "Invalid Token"
@app.route('/tag', methods=['POST'])
def tags():
data = request.form.to_dict()
print(data)
print("SlackToken: " + slackToken)
receivedToken = data["token"]
print("ReceivedToken: " + receivedToken)
if (receivedToken==slackToken):
receivedMessage= data["text"]
return getTag(receivedMessage)
else:
return "Invalid Token"
@app.route('/confirm', methods=['POST'])
def confirm():
req = request.form.to_dict()
data = json.loads(req["payload"])
print (data)
receivedToken = data["token"]
channel = data["channel"]["id"]
if (receivedToken == slackToken):
if (data["actions"][0]["value"] == "yes"):
message = fetchFromDBAndSend(data["callback_id"], channel)
return ("Message Sent: " + str(message))
else:
return "Ok. Not sending. :confused:"
##################### Utility functions ######################
def getRepo(text):
strtext = ""
slashparts = text.split('/')
if text == "" or len(slashparts)<=1 or slashparts[1] == "":
strtext = "Please enter the deatils in proper order"
return strtexts
url = 'https://api.github.com/repos/'+ slashparts[0] + '/' + slashparts[1]
req = requests.get(url)
resp = req.json()
finalstr = ""
if 'message' not in resp:
resplist = [resp['language'],str(resp['forks']),str(resp['open_issues']),resp['html_url']]
strlist = ["Majority of the repo is written in ","No of Forks made ","No of open issues for this repo is ","Check here: "]
for i in range(0,4):
strlist[i] = strlist[i] + resplist[i]
for j in range(0,3):
finalstr = finalstr + strlist[j] + '\n'
finalstr = finalstr + strlist[3]
return finalstr
else:
finalstr = "We could not find the result" + '\n' + "Make sure you entered the correct details :confused:"
return finalstr
def getIssue(text):
strtext = ""
slashparts = text.split('/')
if text == "" or len(slashparts)<=2 or slashparts[2] == "":
strtext = "Please enter the deatils in proper order"
return strtext
url = 'https://api.github.com/repos/'+ slashparts[0] + '/' + slashparts[1] + '/issues/' + slashparts[2]
r = requests.get(url)
resp = r.json()
finalstr = ""
if 'message' not in resp:
resplist = [resp['title'],resp['user']['login'],resp['state'],resp['html_url']]
strlist = ["Issue title: ","Issue was opened by ","The issue is ","Check here: "]
for i in range(0,4):
strlist[i] = strlist[i] + resplist[i]
for j in range(0,3):
finalstr = finalstr + strlist[j] + '\n'
finalstr = finalstr + strlist[3]
return finalstr
else:
finalstr = "We could not find the result" + '\n' + "Make sure that the particular issue exists :confused:"
return finalstr
def getHelp(text):
str1 = ":robot_face: Bot works on the following Slash commands: \n"
sl_str = ["/repo <org_name>/<repo_name> \n","/issue <org_name>/<repo_name>/<issue_no> \n","/branch <org_name>/<repo_name>/<branch_name> \n","/member <org_name> \n","/tag <org_name>/<repo_name>"]
for i in range(0,5):
str1 = str1 + sl_str[i]
return str1
def getBranch(text):
strtext = ""
slashparts = text.split('/')
if text == "" or len(slashparts)<=2 or slashparts[2] == "":
strtext = "Please enter the deatils in proper order"
return strtext
url = 'https://api.github.com/repos/'+ slashparts[0] + '/' + slashparts[1] + '/branches/' + slashparts[2]
r = requests.get(url)
resp = r.json()
finalstr = ""
if 'message' not in resp:
resplist = [resp['commit']['author']['login'],resp['commit']['commit']['message'],resp['commit']['html_url']]
strlist = ["Author of this branch: ","Message: ","Check here: "]
for i in range(0,3):
strlist[i] = strlist[i] + resplist[i]
for j in range(0,2):
finalstr = finalstr + strlist[j] + '\n'
finalstr = finalstr + strlist[2]
return finalstr
else:
finalstr = "We could not find the result" + '\n' + "Are u sure about the typo :confused:??"
return finalstr
def getMember(text):
strtext = ""
if text == "":
strtext = "Please enter the deatils in proper order"
return strtext
url = 'https://api.github.com/orgs/'+text+'/public_members'
r = requests.get(url)
resp = r.json()
finalstr = ""
fstr = ""
if 'message' not in resp:
i = len(resp)
for j in range(0,i):
fstr = fstr + resp[j]['login'] + " "
finalstr = "Your organisation has " + fstr + "as their public members"
return finalstr
else:
finalstr = "We could not find the result" + '\n' + "Make sure that the particular organisation exists :confused:"
return finalstr
def getTag(text):
strtext = ""
slashparts = text.split('/')
if text == "" or len(slashparts)<=1 or slashparts[1] == "":
strtext = "Please enter the deatils in proper order"
return strtexts
url = 'https://api.github.com/repos/'+ slashparts[0] + '/' + slashparts[1] +'/tags'
req = requests.get(url)
resp = req.json()
finalstr = ""
if 'message' not in resp:
i = len(resp)
if i != 0:
finalstr = "The most recent release present for this repo is " + resp[0]['name']
else:
finalstr = "No tags are present in this repo :disappointed:"
return finalstr
else:
finalstr = "We could not find the result" + '\n' + "Make sure you entered the correct details :confused:"
return finalstr
def sendConfirmation(id, message, responseUrl):
payload = {
"text": "Are you sure you want to send a message?",
"attachments": [
{
"text": '"'+message+'"',
"fallback": "You are indecisive",
"callback_id": id,
"color": "#3AA3E3",
"attachment_type": "default",
"actions": [
{
"name": "yes",
"text": "Yep",
"type": "button",
"value": "yes"
},
{
"name": "no",
"text": "Nope",
"type": "button",
"value": "no"
}
]
}
]
}
headers = {
'content-type': "application/json",
}
response = requests.request("POST", responseUrl, data=json.dumps(payload), headers=headers)
print(response.text)
def storeMsgToDB(text):
"""
This function stores 'text' in the database, and
takes note of the auto-generated unique id for the message.
The table it stores it in is:
+-------------------------+----------------+
| id (auto-increment int) | message (text) |
+-------------------------+----------------+
Instead of contacting the postgres database directly
this function uses the Hasura Data APIs.
Try out the data APIs by running this from your terminal:
$ hasura api-console
Use the query builder and the API explorer to try out the
data APIs.
"""
requestPayload = {
"type": "insert",
"args": {
"table": "slack_messages",
"objects": [
{
"message": text,
}
],
"returning": [
"id"
]
}
}
# Setting headers
headers = {
"Content-Type": "application/json",
"X-Hasura-User-Id": "1",
"X-Hasura-Role": "admin"
}
# Make the query and store response in resp
resp = requests.request("POST", hasuraDataUrl, data=json.dumps(requestPayload), headers=headers)
respObj = resp.json()
print(respObj)
id = respObj["returning"][0]["id"]
return id
def fetchFromDBAndSend(id, channel):
"""
This function fetches the stored message from the database.
The table it fetches from is:
+-------------------------+----------------+
| id (auto-increment int) | message (text) |
+-------------------------+----------------+
Instead of contacting the postgres database directly
this function uses the Hasura Data APIs.
Try out the data APIs by running this from your terminal:
$ hasura api-console
Use the query builder and the API explorer to try out the
data APIs.
"""
requestPayload = {
"type": "select",
"args": {
"table": "slack_messages",
"columns": [
"message",
],
"where": {
"id": {
"$eq": id
}
}
}
}
# Setting headers
headers = {
"Content-Type": "application/json",
"X-Hasura-User-Id": "1",
"X-Hasura-Role": "admin"
}
# Make the query and store response in resp
resp = requests.request("POST", hasuraDataUrl, data=json.dumps(requestPayload), headers=headers)
respObj = resp.json()
print(respObj)
message = respObj[0]["message"]
return sendSlackMessage(message, channel)
def sendSlackMessage(message, channel):
payload = {
"token": botAccessToken,
"text": message,
"channel": channel
}
headers = {
'content-type': "application/json",
'Authorization': 'Bearer '+botAccessToken
}
response = requests.request("POST", chatUrl, data=json.dumps(payload), headers=headers)
print(response.json())
return message
| Satyabrat35/SlackGitBot | microservices/bot/app/src/server.py | server.py | py | 12,668 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "src.app.route",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "src.app",
"line_numbe... |
25770685168 | import numpy as np
import seaborn
from PIL import Image
import matplotlib.pyplot as plt
import tensorflow as tf
from keras import layers, models
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import StandardScaler, Normalizer
from sklearn import svm
from sklearn.metrics import f1_score, accuracy_score
import seaborn as sns
def get_images_and_labels(file, name_folder, x): #this is the function to read data from folders
training_images = []
training_labels = []
f = open(file, "r")
lines = f.readlines()
for line in lines[1:]: #from line 1 because the first line is "id,label"
#if we read data from a file that contains images and labels, we use rstrip (to cut \n) and split the line in 2 components [name_of_image, image_label]
#else if the file does not contain labels (we use the x variable to tell us if it does or not) and just read the name of images.
line = line.rstrip("\n").split(",") if x == 1 else line.rstrip("\n")
#if the file contains labels we open the image (name_folder + line[0] is the name of the image) using PIL library and transform that image into a np.array
image = np.array(Image.open(f"./{name_folder}/{line[0]}")) if x == 1 else np.array(Image.open(f"./{name_folder}/{line}")) #array of pixels
#append the image
training_images.append(image)
#if the file contains labels, we append the label as an int values
if x == 1:
training_labels.append(int(line[1]))
f.close()
if x == 0: #if the file does not contain labels, we return just the images
return training_images
#otherwise we return both, images and labels
return training_images, training_labels
#MODEL 1
(training_images, training_labels) = get_images_and_labels("train.txt", "train+validation", 1) #1 -> cu label, 0 -> fara
(validation_images, validation_labels) = get_images_and_labels("validation.txt", "train+validation", 1)
test_images = get_images_and_labels("test.txt", "test", 0)
training_images = np.array(training_images) #it was a simple list of np.arrays, now we transform it into an np.array of np.arrays (it's easier to work with them)
training_labels = np.array(training_labels)
validation_images = np.array(validation_images)
validation_labels = np.array(validation_labels)
test_images = np.array(test_images)
class_names = [0, 1, 2, 3, 4, 5, 6]
training_labels_one_hot = tf.keras.utils.to_categorical(training_labels) #for a better and faster operation we transform the array of labels into a matrix with length of the vector as number of line and
#len(class_names) as the number of columns
#example class 5 is transformed into -> [0. 0. 0. 0. 0. 1. 0.]
validation_labels_one_hot = tf.keras.utils.to_categorical(validation_labels)
training_images, validation_images, test_images = training_images / 255.0, validation_images / 255.0, test_images / 255.0 #for a better and faster operation
# we divide the value of the pixel to the max value that a pixel can get
# model = models.Sequential()
# model.add(layers.Conv2D(32, 2, padding="same",activation="relu", input_shape=(16, 16, 3)))
# model.add(layers.MaxPooling2D())
#
# model.add(layers.Conv2D(32, 2,padding="same", activation="relu"))
# model.add(layers.MaxPooling2D())
#
# model.add(layers.Conv2D(64, 2,padding="same", activation="relu"))
# model.add(layers.MaxPooling2D())
# model.add(layers.Dropout(0.6))
#
# model.add(layers.Flatten())
# model.add(layers.Dense(128, activation="relu"))
# model.add(layers.Dense(10, activation="softmax"))
#
# model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
#
# hist = model.fit(training_images, training_labels,epochs=10, validation_data=(validation_images, validation_labels))
#
# loss, accuracy = model.evaluate(validation_images, validation_labels)
#
# print(f"Loss:{loss}")
# print(f"Accuracy:{accuracy}")
#PART2
model = models.Sequential()
model.add(layers.Conv2D(32, 2, activation="relu", input_shape=(16, 16, 3))) #here i played with the values to get a better accuracy and this is the best i found
model.add(layers.MaxPooling2D())
model.add(layers.Conv2D(32, 2, activation="relu"))
model.add(layers.MaxPooling2D())
model.add(layers.Flatten())
model.add(layers.Dense(256, activation="relu"))
model.add(layers.Dropout(0.25))
model.add(layers.Dense(128, activation="relu"))
model.add(layers.Dropout(0.25))
model.add(layers.Dense(64, activation="relu"))
model.add(layers.Dense(7, activation="softmax"))
model.compile(loss= "categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) #Compile defines the loss function, the optimizer and the metrics
hist = model.fit(training_images, training_labels_one_hot, epochs=15, batch_size=32,validation_data=(validation_images, validation_labels_one_hot)) #fit the tranin_data, train_labels
#and validate with validation_images and validation labels
#batch_size is to group images and to approximates the loss function and propagates the gradients back to update the weights
plt.plot(hist.history['accuracy'], label='accuracy') #plotting accuracy
plt.plot(hist.history['val_accuracy'], label = 'val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0.5, 1])
plt.legend(loc='lower right')
plt.show()
plt.plot(hist.history['loss'], label='loss') #plotting loss
plt.plot(hist.history['val_loss'], label = 'val_loss')
plt.xlabel('Epoch')
plt.ylabel('loss')
plt.ylim([1, 2])
plt.legend(loc='lower right')
plt.show()
loss, accuracy = model.evaluate(validation_images, validation_labels_one_hot) #with this function we get the accuracy and loss
print(f"Loss:{loss}")
print(f"Accuracy:{accuracy}")
#
# FINISHED PART2
# model.save("image_classifier1.model") #pentru a nu mai rula proramul de fiecare data, il salvez si apoi import datele
# model = models.load_model("image_classifier.model")
pred1 = model.predict(test_images) #pred1 contains all predictions for test_images
predictions_test = []
for el in pred1:
index = np.argmax(el) #using argmax we get the maximum index and we're using that index to get the actual class of the image
predictions_test.append(class_names[index])
pred2 = model.predict(validation_images) #pred 2 contains all predictions for validation images
predictions_val = []
for el in pred2:
index = np.argmax(el)
predictions_val.append(class_names[index]) #same here, i did this for the confusion matrix and for the accuracy and loss plot
def sample_submission(file_r, file_w):
test_data = []
with open(file_r) as r: #simply read all lines from file_r
lines = r.readlines()
for line in lines[1:]: #for each line (exept line[0] beacause it contains id, label string) we store in test_data the names of each image
test_data.append(line.rstrip("\n")) #line.rstrip is tu cut \n
with open(file_w, "w") as w: #this is a file to output our submission
w.write("id,label\n") #first line written in our submission
for i in range(len(test_data)): #for each image, we write the name of the image and the class_name of this image
w.write(f"{test_data[i]},{class_names[predictions_test[i]]}\n")
sample_submission("test.txt", "date_out.txt") #call submission function
cf_matrix = confusion_matrix(validation_labels, predictions_val, labels=class_names) #here we display the confusion matrix
f = seaborn.heatmap(cf_matrix, annot=True, fmt="d")
plt.show()
#MODEL 2
# training_images = np.array(training_images).reshape(len(training_images), -1) # convertion from 4D to 2D the svm model works with only 2D data
# training_labels = np.array(training_labels)
# validation_images = np.array(validation_images).reshape(len(validation_images), -1)
# validation_labels = np.array(validation_labels)
# test_images = np.array(test_images).reshape(len(test_images), -1)
#
# def normalize_data(train_data, test_data, type=None): #function to normalize data using sklearn library
# if type == 'standard':
# std_scaler = StandardScaler()
# std_scaler.fit(train_data)
# train_data = std_scaler.transform(train_data)
# test_data = std_scaler.transform(test_data)
# elif type =='l2':
# normalized = Normalizer(norm='l2')
# train_data = normalized.transform(train_data)
# test_data = normalized.transform(test_data)
# elif type =='l1':
# normalized = Normalizer(norm='l1')
# train_data = normalized.transform(train_data)
# test_data = normalized.transform(test_data)
#
# return train_data, test_data
#
# training_images, test_images = normalize_data(training_images, test_images)
#
# svm_model = svm.SVC(C=1,kernel= "linear") #create the actual model
# hist = svm_model.fit(training_images, training_labels)
#
# pred_validation_labels = svm_model.predict(validation_images) #get the predictions, this is to get the accuracy
# pred_test_labels = svm_model.predict(test_images) #this is the actual predictions that we need
#
# def sample_submision(file_r, file_w): #this function works same as the other one
# test_data = []
# with open(file_r) as r:
# lines = r.readlines()
# for line in lines[1:]:
# test_data.append(line.rstrip("\n"))
#
# with open(file_w, "w") as w:
# w.write("id,label\n")
# for i in range(len(test_data)):
# w.write(f"{test_data[i]},{pred_test_labels[i]}\n")
#
# sample_submision("test.txt", "date_out.txt")
#
# cf_matrix = confusion_matrix(validation_labels, pred_validation_labels, labels=class_names) #to display the confusion matrix
# f = seaborn.heatmap(cf_matrix, annot=True, fmt="d")
# plt.show()
#
# print("Accuracy:", accuracy_score(validation_labels, pred_validation_labels)) #print the accuracy
# print("F1:", f1_score(validation_labels, pred_validation_labels, average=None))
| AndrewSSB/KaggleCompetition | main.py | main.py | py | 10,932 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number":... |
69801320106 | import csv
import sys
from collections import defaultdict
sys.setrecursionlimit(10**9)
array_words = []
with open('sgb-words.txt') as csv_file:
csv_reader = csv.reader(csv_file)
for row in csv_reader:
array_words.append(row[0])
def list_incident(word, array_words):
array = []
for w in array_words:
cnt = 0
for i in range(len(word)):
if i != 0 and word[i] in w:
if word[1:].count(word[i]) <= w.count(word[i]):
cnt += 1
if cnt == 4:
array.append(w)
return array
class GraphDirected:
def __init__(self):
self.graph = defaultdict(list)
def add_edge(self,word, incident):
self.graph[word].append(incident)
def DFS(self, start, discovered):
for v in self.graph[start]:
if v not in discovered:
discovered[v] = [start, v]
self.DFS(v, discovered)
def fillOrder(self, start, discovered, stack):
for v in self.graph[start]:
if v not in discovered:
discovered[v] = [start, v]
self.fillOrder(v, discovered, stack)
stack.append(start)
def getTranpose(self):
g = GraphDirected()
for w in self.graph.keys():
for u in self.graph[w]:
g.add_edge(u, w)
return g
def countandfindSCCs(self, word = None):
stack = []
discovered = {}
for w in self.graph.keys():
if w not in discovered:
discovered[w] = None
self.fillOrder(w, discovered,stack)
graph = self.getTranpose()
discovered = {}
count = 0
while len(stack) > 0:
i = stack.pop()
if i not in discovered:
discovered[i] = None
graph.DFS(i, discovered)
count += 1
if word is not None:
array = []
if word in discovered:
root_word = word
walk_edge = discovered[word]
while walk_edge is not None:
walk = walk_edge[0]
root_word = walk
walk_edge = discovered[walk]
small_discovered = {}
small_discovered[root_word] = None
graph.DFS(root_word, small_discovered)
for w in small_discovered.keys():
array.append(w)
return array
return count
g = GraphDirected()
for word in array_words:
array_incident = list_incident(word, array_words)
for w in array_incident:
g.add_edge(word, w)
print(g.countandfindSCCs("words"))
# LIst các từ trong cùng liên thông mạnh với input là từ
# print(g.countandfindSCCs())
# số liên thông mạnh trong đồ thị g | Chidt12/discreteMath | Bai3_Searching_on_graph/bai3b_searching_on_graph.py | bai3b_searching_on_graph.py | py | 2,846 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.setrecursionlimit",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 27,
"usage_type": "call"
}
] |
39844679092 | """
Iguana (c) by Marc Ammon, Moritz Fickenscher, Lukas Fridolin,
Michael Gunselmann, Katrin Raab, Christian Strate
Iguana is licensed under a
Creative Commons Attribution-ShareAlike 4.0 International License.
You should have received a copy of the license along with this
work. If not, see <http://creativecommons.org/licenses/by-sa/4.0/>.
"""
from django import template
register = template.Library()
@register.simple_tag(name='get_user_preference', takes_context=True)
def get_user_preference(context, key, default=None):
user = context['user']
return user.get_preference(key, default)
| midas66/iguana | src/common/templatetags/user_preference.py | user_preference.py | py | 603 | python | en | code | null | github-code | 36 | [
{
"api_name": "django.template.Library",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.template",
"line_number": 14,
"usage_type": "name"
}
] |
35854946253 | """
The flask application package.
"""
# newest 1.4 version of sqlalchemy not working please install 1.3.24
#pip install SQLAlchemy==1.3.24
async_mode = None
if async_mode is None:
try:
import gevent
async_mode = 'gevent'
except ImportError:
pass
if async_mode is None:
try:
import eventlet
async_mode = 'eventlet'
except ImportError:
pass
if async_mode is None:
async_mode = 'threading'
print('async_mode is ' + async_mode)
if __name__ == '__main__':
if async_mode == 'eventlet':
import eventlet
eventlet.monkey_patch()
if async_mode == 'gevent':
from gevent import monkey
monkey.patch_all()
from flask import Flask, redirect, url_for
from flask_sqlalchemy import SQLAlchemy
from flask_socketio import SocketIO
from flask_login import LoginManager,current_user
import HrnestBoss.app_config
from flask_session import Session
import sqlalchemy_utils
import os
import flask_admin as admin
from flask_admin import Admin, helpers, expose
from flask_admin.contrib.sqla import ModelView
#from flask_talisman import Talisman
import functools
#Set Main Configuration Type
#Conf_type='Development'
Conf_type='Production'
#Configuration Of working enviroment
#Developer_SQLALCHEMY_DATABASE_URI ='postgresql://TestAdmin:test@localhost/HrnestBoss_dev'
Production_SQLALCHEMY_DATABASE_URI = 'NEWDATABASE'
app = Flask(__name__)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SECRET_KEY'] = 'Hrnest!'
app.config.from_object(HrnestBoss.app_config)
Developer_SQLALCHEMY_DATABASE_URI =app.config['DATABASE_URL']
Session(app)
#Talisman(app)
#app.logger.level=logging.INFO
# Set enviromets from
if Conf_type=='Development':
app.config.update(
TESTING=False,
ENV='development',
DEBUG=True)
app.config['SQLALCHEMY_DATABASE_URI']=Developer_SQLALCHEMY_DATABASE_URI
else:
app.config.update(
TESTING=False,
ENV='production',
DEBUG=False)
app.config['SQLALCHEMY_DATABASE_URI']=Developer_SQLALCHEMY_DATABASE_URI
app.config['SQLALCHEMY_ENGINE_OPTIONS']={"connect_args": {"timeout": 100}}
app.jinja_env.add_extension('pyjade.ext.jinja.PyJadeExtension')
socket_ = SocketIO(app, async_mode=async_mode)
db = SQLAlchemy(app)
login = LoginManager(app)
def hrnestAccess(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not current_user.is_hrnest_access:
return {'message': 'Access Denied'}
else:
return f(*args, **kwargs)
return wrapped
import HrnestBoss.DbModel.populateTypesOfWork as check
# Check table Shift_typesfor presets data
check.check_Values()
import HrnestBoss.routes.views
import HrnestBoss.routes.user_routing
import HrnestBoss.routes.timetable_routing
import HrnestBoss.routes.request_routing
from HrnestBoss.DbModel.models import default_privileges, user, shift_type, work_group, user_request ,users , timetable
import uuid
class MyAdminIndexView(admin.AdminIndexView):
@expose('/')
def index(self):
if not current_user.is_authenticated:
return redirect(url_for('login'))
else :
if current_user.is_admin:
return super(MyAdminIndexView,self).index()
else:
return redirect(url_for('login'))
_admin = Admin(app,'HRnestBOSS Panel',index_view=MyAdminIndexView())
_admin.add_view(ModelView(default_privileges, db.session))
_admin.add_view(ModelView(user, db.session))
_admin.add_view(ModelView(shift_type, db.session))
_admin.add_view(ModelView(work_group, db.session))
_admin.add_view(ModelView(user_request, db.session))
_admin.add_view(ModelView(users, db.session))
_admin.add_view(ModelView(timetable, db.session))
if app.config['ENABLE_ANYMOUS_USERS']:
_user = user.query.filter_by(email='no_email@none.com',login='anymous').first()
if _user is None:
_user = user(login='anymous', email='no_email@none.com', uid=str(uuid.uuid4()),active=True, is_admin=False, hrnest_access=False )
_user.set_password('None')
db.session.add(_user)
db.session.commit()
_user = user.query.filter_by(email='no_validate@none.com',login='adminHB').first()
if _user is None:
_user = user(login='adminHB', email='no_validate@none.com', uid=str(uuid.uuid4()),active=True, is_admin=True, hrnest_access=True )
_user.set_password('adminHB')
db.session.add(_user)
db.session.commit()
| Radkos1976/Hrnest-FLask-enchacment | HrnestBoss/HrnestBoss/__init__.py | __init__.py | py | 4,549 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "eventlet.monkey_patch",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "gevent.monkey.patch_all",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "gevent.monkey",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "flask.Fla... |
34993839592 | # Thư viện
import pygame, sys
import numpy as np
import time
# Khởi tạo game
pygame.init()
# ---------
# CÁC HẰNG SỐ
# ---------
WIDTH = 600
HEIGHT = WIDTH
LINE_WIDTH = 15
WIN_LINE_WIDTH = 8
BOARD_ROWS = 5
BOARD_COLS = BOARD_ROWS
SQUARE_SIZE = WIDTH/BOARD_ROWS
CIRCLE_RADIUS = SQUARE_SIZE/3
CIRCLE_WIDTH = 15
CROSS_WIDTH = 25
SPACE = SQUARE_SIZE/4
RED = (235, 47, 6)
BG_COLOR = (72, 84, 96)
LINE_COLOR = (23, 145, 135)
CIRCLE_COLOR = (255, 211, 42)
CROSS_COLOR = (186, 220, 88)
WIN_COLOR = (66, 66, 66)
# ---------
# VARIABLES
# ---------
player = 1
game_over = False
# ------
# SCREEN
# ------
screen = pygame.display.set_mode( (WIDTH, HEIGHT) )
pygame.display.set_caption( 'Isolation' )
screen.fill( BG_COLOR )
# -------------
# CONSOLE BOARD
# -------------
board = np.zeros( (BOARD_ROWS, BOARD_COLS) )
# ---------
# FUNCTIONS
# ---------
def draw_lines():
for i in range(1,BOARD_ROWS):
# horizontal
pygame.draw.line( screen, LINE_COLOR, (0, SQUARE_SIZE*i), (WIDTH, SQUARE_SIZE*i), LINE_WIDTH )
for i in range(1,BOARD_COLS):
# vertical
pygame.draw.line( screen, LINE_COLOR, (i * SQUARE_SIZE, 0), (i * SQUARE_SIZE, HEIGHT), LINE_WIDTH )
def draw_figures():
for row in range(BOARD_ROWS):
for col in range(BOARD_COLS):
if board[row][col] == 1:
pygame.draw.circle( screen, CIRCLE_COLOR, (int( col * SQUARE_SIZE + SQUARE_SIZE//2 ), int( row * SQUARE_SIZE + SQUARE_SIZE//2 )), CIRCLE_RADIUS, CIRCLE_WIDTH )
elif board[row][col] == 2:
pygame.draw.line( screen, CROSS_COLOR, (col * SQUARE_SIZE + SPACE, row * SQUARE_SIZE + SQUARE_SIZE - SPACE), (col * SQUARE_SIZE + SQUARE_SIZE - SPACE, row * SQUARE_SIZE + SPACE), CROSS_WIDTH )
pygame.draw.line( screen, CROSS_COLOR, (col * SQUARE_SIZE + SPACE, row * SQUARE_SIZE + SPACE), (col * SQUARE_SIZE + SQUARE_SIZE - SPACE, row * SQUARE_SIZE + SQUARE_SIZE - SPACE), CROSS_WIDTH )
def mark_square(row, col, player):
board[row][col] = player
# print ("----------------------------------------------------")
# print("Player " + str(player) + " marked square : (" + str(row) + "," + str(col) + ")")
# print(board)
# print ("----------------------------------------------------")
def available_square(row, col):
return board[row][col] == 0
def is_board_full():
for row in range(BOARD_ROWS):
for col in range(BOARD_COLS):
if board[row][col] == 0:
return False
return True
WIN_LENGTH = 4
def check_win(player):
# Dọc
for col in range(BOARD_COLS):
for row in range(BOARD_ROWS - (WIN_LENGTH - 1)):
if all(board[row+i][col] == player for i in range(WIN_LENGTH)):
draw_vertical_winning_line(col, row, player)
return True
# Ngang
for row in range(BOARD_ROWS):
for col in range(BOARD_COLS - (WIN_LENGTH - 1)):
if all(board[row][col+i] == player for i in range(WIN_LENGTH)):
draw_horizontal_winning_line(col, row, player)
return True
# Chéo trái
for row in range(BOARD_ROWS - (WIN_LENGTH - 1)):
for col in range(BOARD_COLS - (WIN_LENGTH - 1)):
if all(board[row+i][col+i] == player for i in range(WIN_LENGTH)):
draw_asc_diagonal(col, row, player)
return True
# Chéo phải
for row in range((WIN_LENGTH - 1),BOARD_ROWS):
for col in range(BOARD_ROWS - (WIN_LENGTH - 1)):
if all(board[row-i][col+i] == player for i in range(WIN_LENGTH)):
draw_desc_diagonal(row, col, player)
# print(row,col)
return True
return False
# =========
# Hàm vẽ đường win
# =========
def draw_vertical_winning_line(col, row, player):
x = int(col * SQUARE_SIZE + SQUARE_SIZE / 2)
y1 = int(row * SQUARE_SIZE + SQUARE_SIZE / 2) - 48
y2 = int((row + (WIN_LENGTH - 1)) * SQUARE_SIZE + SQUARE_SIZE / 2) + 48
pygame.draw.line(screen, WIN_COLOR, (x, y1), (x, y2), WIN_LINE_WIDTH)
def draw_horizontal_winning_line(col, row, player):
x1 = int(col * SQUARE_SIZE + SQUARE_SIZE / 2) - 48
x2 = int((col + (WIN_LENGTH - 1)) * SQUARE_SIZE + SQUARE_SIZE / 2) + 48
y = int(row * SQUARE_SIZE + SQUARE_SIZE / 2)
pygame.draw.line(screen, WIN_COLOR, (x1, y), (x2, y), WIN_LINE_WIDTH)
def draw_asc_diagonal(col, row, player):
x1 = int(col * SQUARE_SIZE + SQUARE_SIZE / 2)
y1 = int(row * SQUARE_SIZE + SQUARE_SIZE / 2)
x2 = int((col + (WIN_LENGTH - 1)) * SQUARE_SIZE + SQUARE_SIZE / 2)
y2 = int((row + (WIN_LENGTH - 1)) * SQUARE_SIZE + SQUARE_SIZE / 2)
pygame.draw.line(screen, WIN_COLOR, (x1, y1), (x2, y2), WIN_LINE_WIDTH)
def draw_desc_diagonal(row,col, player):
x1 = int(col * SQUARE_SIZE + SQUARE_SIZE / 2)
y1 = int(row * SQUARE_SIZE + SQUARE_SIZE / 2)
x2 = int((col+WIN_LENGTH-1)* SQUARE_SIZE + SQUARE_SIZE / 2)
y2 = int((row-WIN_LENGTH+1)* SQUARE_SIZE + SQUARE_SIZE / 2)
pygame.draw.line(screen, WIN_COLOR, (x1,y1), (x2, y2), WIN_LINE_WIDTH)
def restart():
screen.fill( BG_COLOR )
draw_lines()
for row in range(BOARD_ROWS):
for col in range(BOARD_COLS):
board[row][col] = 0
def checkWinner():
# Ngang
for row in range(BOARD_ROWS):
for col in range(BOARD_COLS - 3):
if board[row][col] == board[row][col+1] == board[row][col+2] == board[row][col+3] != 0:
return board[row][col]
# Dọc
for row in range(BOARD_ROWS - 3):
for col in range(BOARD_COLS):
if board[row][col] == board[row+1][col] == board[row+2][col] == board[row+3][col] != 0:
return board[row][col]
# Chéo xuống
for row in range(BOARD_ROWS - 3):
for col in range(BOARD_COLS - 3):
if board[row][col] == board[row+1][col+1] == board[row+2][col+2] == board[row+3][col+3] != 0:
return board[row][col]
# Chéo lên
for row in range(3, BOARD_ROWS):
for col in range(BOARD_COLS - 3):
if board[row][col] == board[row-1][col+1] == board[row-2][col+2] == board[row-3][col+3] != 0:
return board[row][col]
# Đường chéo từ phải xuống trái
for row in range(BOARD_ROWS - 3):
for col in range(BOARD_COLS - 3):
if board[row][col+3] == board[row+1][col+2] == board[row+2][col+1] == board[row+3][col] != 0:
return board[row][col+3]
# Hòa
for row in range(BOARD_ROWS):
for col in range(BOARD_COLS):
if board[row][col] == 0:
return None
return 0
def numberplay():
n = 0
for row in range(BOARD_ROWS):
for col in range(BOARD_COLS):
if(board[row][col] == 2):
n = n + 1
if((n//2)%2==0):
return(n//2)
else:
return ((n//2)-1)
mytime = 0
def bestMove():
global mytime
n = 3
start_time = time.time()
bestScore = -100000
move = None
empty_cells = [(row, col) for row in range(BOARD_ROWS) for col in range(BOARD_COLS) if board[row][col] == 0]
if not empty_cells:
return (-1, -1)
for row, col in empty_cells:
board[row][col] = 2
score = minimax(board, 0,n, -100000, 100000, False)
board[row][col] = 0
if score > bestScore:
bestScore = score
move = (row, col)
if move:
mark_square(move[0], move[1], 2)
draw_figures()
end_time = time.time()
elapsed_time = end_time - start_time
mytime = mytime + elapsed_time
print("Caro_5x5:time to make first move :%f"%(mytime))
return move
scores = {
1: -10,
2: 10,
0: 0
}
i = 0
def minimax(board, depth,depthmax, alpha, beta, isMaximizing):
global i
i = i+1
print(i)
result = checkWinner()
if result is not None:
return scores[result]
if isMaximizing:
bestScore = -100000
for row in range(BOARD_ROWS):
for col in range(BOARD_COLS):
if board[row][col] == 0:
board[row][col] = 2
if(depth > depthmax):
board[row][col] = 0
break
score = minimax(board,depth+1,depthmax, alpha, beta, False)
board[row][col] = 0
bestScore = max(score, bestScore)
alpha = max(alpha, bestScore)
if beta <= alpha:
break
return bestScore
else:
bestScore = 100000
for row in range(BOARD_ROWS):
for col in range(BOARD_COLS):
if board[row][col] == 0:
board[row][col] = 1
if(depth > depthmax):
board[row][col] = 0
break
score = minimax(board, depth+1,depthmax, alpha, beta, True)
board[row][col] = 0
bestScore = min(score, bestScore)
beta = min(beta, bestScore)
if beta <= alpha:
break
return bestScore
draw_lines()
# --------
# MAINLOOP
# --------
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN and not game_over:
mouseX = event.pos[0] # x
mouseY = event.pos[1] # y
clicked_row = int(mouseY // SQUARE_SIZE)
clicked_col = int(mouseX // SQUARE_SIZE)
if available_square( clicked_row, clicked_col ):
player = 1
mark_square( clicked_row, clicked_col, player )
draw_figures()
if check_win( player ):
font = pygame.font.SysFont(None, 100)
text = font.render("You win", True, pygame.Color(RED))
text_rect = text.get_rect(center=(WIDTH/2, HEIGHT/2))
screen.blit(text, text_rect)
game_over = True
elif is_board_full():
font = pygame.font.SysFont(None, 100)
text = font.render("Hòa", True, pygame.Color(RED))
text_rect = text.get_rect(center=(WIDTH/2, HEIGHT/2))
screen.blit(text, text_rect)
game_over = True
else:
player = 2
draw_figures()
pygame.display.update()
bestMove()
draw_figures()
if check_win( player ):
font = pygame.font.SysFont(None, 100)
text = font.render("Máy win", True, pygame.Color(RED))
text_rect = text.get_rect(center=(WIDTH/2, HEIGHT/2))
screen.blit(text, text_rect)
game_over = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_r:
restart()
player = 1
game_over = False
draw_figures()
pygame.display.update()
| LeVan102/AI_Caro | Caro5x5.py | Caro5x5.py | py | 11,268 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "pygame.display... |
15480079320 | import io
from PIL import Image
from django.test import TestCase, Client
from django.urls import reverse
import numpy as np
from unittest.mock import patch
from mnist_predictor.views import make_prediction
class PredictViewTestCase(TestCase):
def setUp(self):
self.client = Client()
# Create a test image for the POST requests
self.image = Image.new('L', (28, 28), color=255)
self.image_bytes = io.BytesIO()
self.image.save(self.image_bytes, format='PNG')
self.image_bytes.seek(0)
def test_predict_view_with_valid_data(self):
with patch('mnist_predictor.views.make_prediction', return_value=3) as mock_make_prediction:
response = self.client.post(reverse('predict'), {'image': self.image_bytes}, format='multipart')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {'prediction': 3})
def test_predict_view_with_invalid_data(self):
response = self.client.post(reverse('predict'))
self.assertEqual(response.status_code, 400)
def test_make_prediction_function(self):
# Create a test image to use as input
image = np.ones((1, 28, 28, 1))
# Make a prediction using the make_prediction function
prediction = make_prediction(image)
# Assert that the prediction is of the expected type and value
self.assertIsInstance(prediction, int)
self.assertGreaterEqual(prediction, 0)
self.assertLessEqual(prediction, 9) | MichelWakim/mnist-api | mnist_predictor/tests.py | tests.py | py | 1,514 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.test.TestCase",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.test.Client",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "PIL.Image.new",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
... |
4492015736 | ## Load training SDFs
import argparse
import colorsys
import os
import numpy as np
import pathlib
import tqdm
import open3d as o3d
import random
from CARTO.simnet.lib.datapoint import decompress_datapoint
from CARTO.Decoder import utils
from CARTO.Decoder.data import dataset
from CARTO.Decoder import config
from CARTO.Decoder.visualizing import code_vis
from PIL import Image
import seaborn as sns
def main(args):
file_dir = pathlib.Path(args.file_dir)
out_dir = pathlib.Path(args.out_dir)
out_dir.mkdir(exist_ok=True, parents=True)
dataset_cfg: config.GenerationConfig = utils.load_cfg(
file_dir, cfg_class=config.GenerationConfig
)
all_files = list(file_dir.glob("*.zstd"))
if args.latest or args.earliest:
all_files.sort(key=lambda x: os.path.getmtime(x), reverse=args.earliest)
else:
print("Shuffling object list")
random.shuffle(all_files)
counts = utils.AccumulatorDict()
for file_name in all_files:
counts.increment(str(file_name).split("_")[-2], 1)
print(counts)
render = code_vis.get_o3d_render(frame_width=600, frame_height=600)
for i, file_path in tqdm.tqdm(enumerate(all_files[: args.n])):
with open(file_path, "rb") as fh:
buf = fh.read()
data_point: dataset.DataPoint = decompress_datapoint(buf)
# print(data_point.keys())
sdf = data_point.sdf_values[:, None]
points = data_point.points
# Assign inside/outside color
colors = np.where(
sdf < 0.0,
np.ones_like(points) * sns.color_palette("tab10")[0],
np.ones_like(points) * sns.color_palette("tab10")[1],
)
if len(points) == 0:
continue
points /= dataset_cfg.max_extent
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
pcd.colors = o3d.utility.Vector3dVector(colors)
img_np = code_vis.render_o3d_mesh(pcd, height_coloring=False, render=render)
img_PIL = Image.fromarray(img_np)
img_PIL.save(str(out_dir / f"{i}.png"))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("file_dir")
parser.add_argument("out_dir")
parser.add_argument("-n", type=int, default=100)
parser.add_argument("-l", "--latest", action="store_true", default=False)
parser.add_argument("-e", "--earliest", action="store_true", default=False)
args = parser.parse_args()
main(args)
| robot-learning-freiburg/CARTO | CARTO/Decoder/visualizing/visualize_sdf_values.py | visualize_sdf_values.py | py | 2,511 | python | en | code | 10 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "CARTO.Decoder.config.GenerationConfig",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "... |
2875218070 | #!/usr/bin/python3
import requests
def number_of_subscribers(subreddit):
""" Set a custom User-Agent in headers to prevent API errors"""
headers = {'User-Agent': 'MyRedditBot/1.0'}
""" Construct the API URL for the given subreddit"""
url = f'https://www.reddit.com/r/{subreddit}/about.json'
""" Make a GET request to the API"""
response = requests.get(url, headers=headers)
""" Check if the response is successful"""
if response.status_code == 200:
try:
data = response.json()
""" Extract the number of subscribers from the response"""
subscribers = data['data']['subscribers']
return subscribers
except (KeyError, ValueError):
return 0
else:
return 0
""" Test cases"""
subreddit_name = 'python'
subscribers = number_of_subscribers(subreddit_name)
print(f"Subscribers in /r/{subreddit_name}: {subscribers}")
| Ojobumiche/alx-higher_level_programming | 0x16-api_advanced/0-subs.py | 0-subs.py | py | 933 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
}
] |
73118843944 | import multiprocessing
from threading import Thread
import time
def is_prime(n):
if n <= 1:
return False
for i in range(2, int(n**0.5) + 1):
if n % i == 0:
return False
return True
def find_primes(end, start):
primes = []
for num in range(start, end - 1):
if is_prime(num):
primes.append(num)
return primes
if __name__ == "__main__":
star_time = time.time()
res1 = find_primes(10000, 3)
res2 = find_primes(20000, 10001)
res3 = find_primes(30000, 20001)
end_time = time.time()
timer = end_time - star_time
print(f"Затрачено времени на поэтапный запууск: {timer} сек")
start_time = time.perf_counter()
t1 = Thread(target=find_primes, args=(10000, 3))
t2 = Thread(target=find_primes, args=(20000, 10001))
t3 = Thread(target=find_primes, args=(30000, 20001))
t1.start()
t2.start()
t3.start()
t1.join()
t2.join()
t3.join()
print(f"Время выполнения в потоках {time.perf_counter() - start_time} сек")
start_time = time.perf_counter()
p1 = multiprocessing.Process(target=find_primes, args=(3, 10000))
p2 = multiprocessing.Process(target=find_primes, args=(10001, 20000))
p3 = multiprocessing.Process(target=find_primes, args=(20001, 30000))
p1.start()
p2.start()
p3.start()
p1.join()
p2.join()
p3.join()
print(f"Время выполнения в разных процессах {time.perf_counter() - start_time} сек")
# Если не выполнить start() в потоках и процессах, то они не будут запущены
# Если не выполнить join() в потоках и процессах, то программа не будет дожидаться завершения всех дочерних потоков
# и процессов
# Распараллеливание по потокам не дает преимущества во времени в задачах CPU- bound (происходит это по причине
# GIL - глобальной блокировки интерпретатора (каждый из потоков полностью "захватывает" процессор для своего выполнения)
# Распараллеливание по процессам не дало преимуществ в данной задаче, поскольку расходы на создание процессов
# не окупились, объемы вычислений не достаточно велики для вычисления в разных процессах
| IlyaOrlov/PythonCourse2.0_September23 | Practice/achernov/module_12/task_1.py | task_1.py | py | 2,712 | python | ru | code | 2 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_nu... |
26122545244 | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 14 09:43:42 2016
@author: sampepose
"""
import csv
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
data = []
TestData = []
# Read the training data
f = open('data/train.csv')
reader = csv.reader(f)
next(reader, None)
for row in reader:
data.append(row)
f.close()
X = np.array([x[1:] for x in data])
y = np.array([x[0] for x in data])
del data # free up the memory
print('loaded training data')
# Construct k-nearest neighbor classifier and 'fit' it
kNeigh = KNeighborsClassifier(n_neighbors=5, n_jobs=-1)
validation_X = X[-12000:]
validation_y = y[-12000:]
X = X[:-12000]
y = y[:-12000]
x_plot = []
y_plot = []
maxN = 30
for n in range(1, maxN + 1):
rand = np.random.choice(X.shape[0], n * 1000, replace=False)
rand_X = X[rand, :]
rand_y = y[rand]
kNeigh.fit(rand_X, rand_y)
# predict the test data
predict = kNeigh.predict(validation_X)
correct = 0
for r in range(0, validation_y.shape[0]):
if predict[r] == validation_y[r]:
correct += 1
x_plot.append(n)
y_plot.append(100.0 * (correct / validation_y.shape[0]))
print('finished n=',n)
print(x_plot)
print(y_plot)
plt.axis([1, maxN + 1, 85, 100])
plt.xlabel('training sample size (thousands)')
plt.ylabel('percent accuracy')
plt.scatter(x_plot, y_plot, marker='o')
plt.show()
| sampepose/digit-recognizer | kNearestNeighbor/test_increasing_sample_size.py | test_increasing_sample_size.py | py | 1,447 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "csv.reader",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sklearn.neighbors.KNeighborsClassi... |
42493212575 | """
WRITEME
"""
from __future__ import absolute_import, print_function, division
from copy import copy, deepcopy
from sys import getsizeof
import sys
import traceback
import numpy as np
import theano
from theano.compat import izip
from six import reraise
from six.moves import StringIO
from theano.gof import utils
from theano.gof import graph
from theano.gof.type import Type
from .utils import undef
__excepthook = sys.excepthook
def log_thunk_trace(value, f=sys.stderr):
"""
Log Theano's diagnostic stack trace for an exception
raised by raise_with_op.
"""
# in future, consider accepting `write` as arg rather than file
# to support writing to a logger
def write(msg):
print("log_thunk_trace: %s" % msg.strip(), file=f)
if hasattr(value, '__thunk_trace__'):
trace2 = value.__thunk_trace__
write("There was a problem executing an Op.")
if trace2 is None:
write("Could not find where this Op was defined.")
write(" * You might have instantiated this Op "
"directly instead of using a constructor.")
write(" * The Op you constructed might have been"
" optimized. Try turning off optimizations.")
elif trace2:
write("Definition in: ")
for line in traceback.format_list(trace2):
write(line)
write("For the full definition stack trace set"
" the Theano flags traceback.limit to -1")
def thunk_hook(type, value, trace):
"""
This function is meant to replace excepthook and do some
special work if the exception value has a __thunk_trace__
field.
In that case, it retrieves the field, which should
contain a trace as returned by L{traceback.extract_stack},
and prints it out on L{stderr}.
The normal excepthook is then called.
Parameters:
----------
type
Exception class
value
Exception instance
trace
Traceback object
Notes
-----
This hook replaced by nosetests, so it does not run in nose tests.
"""
log_thunk_trace(value)
__excepthook(type, value, trace)
sys.excepthook = thunk_hook
# TODO: Make this work with linker defined schedule
def raise_with_op(node, thunk=None, exc_info=None, storage_map=None):
"""
Re-raise an exception while annotating the exception object with
debug info.
Parameters
----------
node : Apply node
The Apply node object that resulted in the raised exception.
exc_info : tuple, optional
A tuple containing the exception type, exception object and
associated traceback, as would be returned by a call to
`sys.exc_info()` (which is done if `None` is passed).
storage_map: dict, optional
storage map of the theano function that resulted in the
raised exception.
Notes
-----
This re-raises the exception described by `exc_info` (or the last
one raised, if `exc_info` is omitted) and annotates the exception
object with several new members which may be helpful for debugging
Theano graphs. They are:
* __op_instance__: The Op that is responsible for the exception
being raised.
* __thunk_trace__: A traceback corresponding to the code that
actually generated the exception, if it is available.
* __applynode_index__: The index of the Apply node corresponding
to this op in `op.fgraph.toposort()`.
The exception is not annotated if it is of type `KeyboardInterrupt`.
"""
if exc_info is None:
exc_info = sys.exc_info()
exc_type, exc_value, exc_trace = exc_info
if exc_type == KeyboardInterrupt:
# print a simple traceback from KeyboardInterrupt
reraise(exc_type, exc_value, exc_trace)
try:
trace = node.outputs[0].tag.trace
except AttributeError:
try:
trace = node.op.tag.trace
except AttributeError:
trace = ()
exc_value.__thunk_trace__ = trace
exc_value.__op_instance__ = node
topo = node.fgraph.toposort()
if node in topo:
node_index = topo.index(node)
else:
node_index = None
exc_value.__applynode_index__ = node_index
hints = []
detailed_err_msg = "\nApply node that caused the error: " + str(node)
if exc_value.__applynode_index__ is not None:
detailed_err_msg += "\nToposort index: %d" % node_index
types = [getattr(ipt, 'type', 'No type') for ipt in node.inputs]
detailed_err_msg += "\nInputs types: %s\n" % types
if thunk is not None:
if hasattr(thunk, 'inputs'):
shapes = [getattr(ipt[0], 'shape', 'No shapes')
for ipt in thunk.inputs]
strides = [getattr(ipt[0], 'strides', 'No strides')
for ipt in thunk.inputs]
scalar_values = []
for ipt in thunk.inputs:
if getattr(ipt[0], "size", -1) <= 5:
scalar_values.append(ipt[0])
else:
scalar_values.append("not shown")
else:
shapes = "The thunk don't have an inputs attributes."
strides = "So we can't access the strides of inputs values"
scalar_values = "And can't print its inputs scalar value"
clients = [[c[0] for c in var.clients] for var in node.outputs]
detailed_err_msg += ("Inputs shapes: %s" % shapes +
"\nInputs strides: %s" % strides +
"\nInputs values: %s" % scalar_values)
if theano.config.exception_verbosity == 'high':
detailed_err_msg += "\nInputs type_num: %s" % str(
[getattr(getattr(i[0], 'dtype', ''), 'num', '') for i in thunk.inputs])
if hasattr(node.op, '__input_name__'):
detailed_err_msg += "\nInputs name: %s\n" % str(node.op.__input_name__)
detailed_err_msg += "\nOutputs clients: %s\n" % clients
else:
hints.append(
"HINT: Use another linker then the c linker to"
" have the inputs shapes and strides printed.")
# Print node backtraces
tr = getattr(node.outputs[0].tag, 'trace', [])
if isinstance(tr, list) and len(tr) > 0:
detailed_err_msg += "\nBacktrace when the node is created(use Theano flag traceback.limit=N to make it longer):\n"
# Print separate message for each element in the list of batcktraces
sio = StringIO()
for subtr in tr:
traceback.print_list(subtr, sio)
detailed_err_msg += str(sio.getvalue())
else:
hints.append(
"HINT: Re-running with most Theano optimization disabled could"
" give you a back-trace of when this node was created. This can"
" be done with by setting the Theano flag"
" 'optimizer=fast_compile'. If that does not work,"
" Theano optimizations can be disabled with 'optimizer=None'.")
if theano.config.exception_verbosity == 'high':
f = StringIO()
theano.printing.debugprint(node, file=f, stop_on_name=True,
print_type=True)
detailed_err_msg += "\nDebugprint of the apply node: \n"
detailed_err_msg += f.getvalue()
# Prints output_map
if theano.config.exception_verbosity == 'high' and storage_map is not None:
detailed_err_msg += "\nStorage map footprint:\n"
shared_input_list = [
item for item in node.fgraph.inputs
if isinstance(item, theano.compile.SharedVariable)]
nonshared_input_list = [
item for item in node.fgraph.inputs
if not isinstance(item, theano.compile.SharedVariable)]
storage_map_list = []
total_size = 0
total_size_inputs = 0
for k in storage_map:
storage_map_item = []
# storage_map_item[0]: the variable
storage_map_item.append(str(k))
# storage_map_item[1]: the shape
shapeinfo = None
if hasattr(storage_map[k][0], 'shape'):
shapeinfo = storage_map[k][0].shape
if len(shapeinfo) != 0:
storage_map_item.append(shapeinfo)
else:
storage_map_item.append(tuple())
else:
storage_map_item.append(None)
# storage_map_item[2]: itemsize
# storage_map_item[3]: bytes
if hasattr(storage_map[k][0], 'dtype'):
dtype = storage_map[k][0].dtype
storage_map_item.append(np.dtype(dtype).itemsize)
if shapeinfo is None:
storage_map_item.append(-1)
else:
sz = np.dtype(dtype).itemsize * np.prod(shapeinfo)
storage_map_item.append(sz)
total_size += sz
if not k.owner:
total_size_inputs += sz
else:
# If it is a view, don't count it twice.
if getattr(k.owner.op, 'view_map', None):
vmap = k.owner.op.view_map
out_idx = k.owner.outputs.index(k)
data = storage_map[k][0]
if out_idx in vmap:
assert len(vmap[out_idx]) == 1
input_data = storage_map[
k.owner.inputs[vmap[out_idx][0]]][0]
if k.type.may_share_memory(data, input_data):
total_size -= sz
# If it is a destroyed input, the input
# shouldn't be in the storage_map anymore
# except if there is a special flag used. So
# we still must check it.
if getattr(k.owner.op, 'destroy_map', None):
vmap = k.owner.op.destroy_map
out_idx = k.owner.outputs.index(k)
data = storage_map[k][0]
if out_idx in vmap:
assert len(vmap[out_idx]) == 1
input_data = storage_map[
k.owner.inputs[vmap[out_idx][0]]][0]
if k.type.may_share_memory(data, input_data):
total_size -= sz
else:
bytes = getsizeof(storage_map[k][0])
storage_map_item.append(bytes)
storage_map_item.append(-1)
# Flag of shared val
# storage_map_item[4]
if k in shared_input_list:
storage_map_item.append(True)
elif k in nonshared_input_list:
storage_map_item.append(False)
else:
storage_map_item.append(None)
storage_map_list.append(storage_map_item)
from operator import itemgetter
storage_map_list.sort(key=itemgetter(3), reverse=True)
for item in storage_map_list:
if item[3] == -1:
continue
detailed_err_msg += " - " + item[0] + ", "
if item[4] is True:
detailed_err_msg += "Shared Input, "
elif item[4] is False:
detailed_err_msg += "Input, "
if item[1] is not None:
detailed_err_msg += "Shape: %s, " % str(item[1])
detailed_err_msg += "ElemSize: %s Byte(s)" % item[2]
if item[3] is not None:
detailed_err_msg += ", TotalSize: %s Byte(s)\n" % item[3]
else:
detailed_err_msg += "\n"
detailed_err_msg += " TotalSize: %s Byte(s) %.3f GB\n" % (
total_size, total_size / 1024. / 1024 / 1024)
detailed_err_msg += " TotalSize inputs: %s Byte(s) %.3f GB\n" % (
total_size_inputs, total_size_inputs / 1024. / 1024 / 1024)
else:
hints.append(
"HINT: Use the Theano flag 'exception_verbosity=high'"
" for a debugprint and storage map footprint of this apply node.")
try:
exc_value = exc_type(str(exc_value) + detailed_err_msg +
'\n' + '\n'.join(hints))
except TypeError:
print("WARNING: %s error does not allow us to add extra error message" %
str(exc_type))
# Some exception need extra parameter in inputs. So forget the
# extra long error message in that case.
pass
reraise(exc_type, exc_value, exc_trace)
class Linker(object):
"""
WRITEME
"""
def clone(self, allow_gc=undef):
new = copy(self)
if allow_gc is not undef:
new.allow_gc = allow_gc
return new
def make_thunk(self):
"""
This function must return a triplet (function, input_variables,
output_variables) where function is a thunk that operates on the
returned variables. If inplace is True, the input_variables and
output_variables lists will be the same as the inputs and outputs
of the graph provided to the L{Linker}. Else, independent
variables will be returned.
Examples
--------
x, y = Variable(Double), Variable(Double)
e = x + y
fgraph = FunctionGraph([x, y], [e])
fn, (new_x, new_y), (new_e, ) = MyLinker(fgraph).make_thunk(inplace)
new_x.data = 1.0
new_y.data = 2.0
fn()
print new_e.data # 3.0
print e.data # 3.0 iff inplace == True (else unknown)
"""
raise utils.MethodNotDefined("make_thunk", type(self),
self.__class__.__name__)
# DELETEME #
def make_function(self, unpack_single=True, **kwargs):
"""
Returns a function that takes values corresponding to the inputs of the
fgraph used by this L{Linker} and returns values corresponding the the
outputs of that fgraph. If inplace is True, the calculations will
operate in the same storage the fgraph uses, else independent storage
will be allocated for the function.
Example
-------
e = x + y
fgraph = FunctionGraph([x, y], [e])
fn = MyLinker(fgraph).make_function(inplace)
print fn(1.0, 2.0) # 3.0
print e.data # 3.0 iff inplace == True (else unknown)
If unpack_single is True (default) and that the function has only one
output, then that output will be returned. Else, a list or tuple of
length 1 will be returned.
"""
thunk, inputs, outputs = self.make_thunk(**kwargs)
def execute(*args):
def e_arity(takes, got):
return 'Function call takes exactly %i %s (%i given)' % (
takes, ['argument', 'arguments'][takes > 1], got)
if (len(args) != len(inputs)):
raise TypeError(e_arity(len(inputs), len(args)))
for arg, variable in izip(args, inputs):
variable.data = arg
thunk()
if unpack_single:
return utils.to_return_values([variable.data
for variable in outputs])
else:
return [variable.data for variable in outputs]
execute.thunk = thunk
execute.inputs = inputs
execute.outputs = outputs
return execute
def schedule(self, fgraph):
return fgraph.toposort()
# TODO: Move this class to the compile module, where it is used (and for which it exists).
class Container(object):
"""
This class joins a variable with its computed value.
It is used in linkers, especially for the inputs and outputs of a Function.
Parameters
----------
r : a Variable or a Type
storage
A list of length 1, whose element is the value for `r`.
readonly : bool
True indicates that this should not be setable by Function[r] = val.
strict : bool
If True, we don't allow type casting.
allow_downcast
If True (and `strict` is False), allow upcasting of type, but not
downcasting. If False, prevent it. If None (default), allows only
downcasting of float to floatX scalar.
name : str
A string (for pretty-printing?)
"""
def __init__(self, r, storage, readonly=False, strict=False,
allow_downcast=None, name=None):
if not isinstance(storage, list) or not len(storage) >= 1:
raise TypeError("storage must be a list of length at least one")
# self.r = r
if isinstance(r, Type):
self.type = r
else:
self.type = r.type
if name is None:
# Some Type do not have a name field.
self.name = getattr(r, 'name', None)
else:
self.name = name
self.storage = storage
self.readonly = readonly
self.strict = strict
self.allow_downcast = allow_downcast
def __get__(self):
return self.storage[0]
def __set__(self, value):
if self.readonly:
raise Exception("Cannot set readonly storage: %s" % self.name)
try:
if value is None:
self.storage[0] = None
return
kwargs = {}
if self.strict:
kwargs['strict'] = True
if self.allow_downcast is not None:
kwargs['allow_downcast'] = self.allow_downcast
if hasattr(self.type, 'filter_inplace'):
self.storage[0] = self.type.filter_inplace(value,
self.storage[0],
**kwargs)
else:
self.storage[0] = self.type.filter(value, **kwargs)
except Exception as e:
e.args = e.args + (('Container name "%s"' % self.name),)
raise
data = property(__get__, __set__)
value = property(__get__, __set__)
def __str__(self):
return "<" + str(self.storage[0]) + ">"
def __repr__(self):
return "<" + repr(self.storage[0]) + ">"
def __deepcopy__(self, memo):
data_was_in_memo = id(self.storage[0]) in memo
r = type(self)(
deepcopy(self.type, memo=memo),
deepcopy(self.storage, memo=memo),
deepcopy(self.readonly, memo=memo),
deepcopy(self.strict, memo=memo),
deepcopy(self.allow_downcast, memo=memo),
deepcopy(self.name, memo=memo),
)
# Work around NumPy deepcopy of ndarray with 0 dimension that
# don't return an ndarray.
if (r.storage[0] is not None and
not self.type.is_valid_value(r.storage[0])):
assert not data_was_in_memo
assert self.type.is_valid_value(self.storage[0])
# This should also work for read only container.
r.storage[0] = self.type.filter(r.storage[0],
strict=False,
allow_downcast=False)
memo[id(self.storage[0])] = r.storage[0]
return r
def map_storage(fgraph, order, input_storage, output_storage, storage_map=None):
"""Ensure there is storage (a length-1 list) for inputs, outputs, and interior nodes.
:param fgraph: The current fgraph. This function uses the inputs and outputs attributes.
:param order: an iterable over Apply instances (in program running order)
:param input_storage: None or existing input storage (see below)
:param output_storage: None or existing output storage (see below)
:rtype: 3-tuple
:returns: (list of storage for inputs, list of storage for outputs, and the `storage_map`)
Parameters
----------
fgraph
The current fgraph. This function uses the inputs and outputs
attributes.
order
An iterable over Apply instances (in program running order).
input_storage
None or existing input storage (see below).
output_storage
None or existing output storage (see below).
Returns
-------
3-tuple
List of storage for inputs, list of storage for outputs, and
the `storage_map`.
Extended summary
----------------
This function iterates over the nodes in `order` and ensures that for every
input and output `Variable`, there is a unique storage container. This is
returned as a dictionary Variable -> storage called the `storage_map`.
This function also returns `input_storage`, which is a list of storages
corresponding to fgraph.inputs.
This function also returns `output_storage`, which is a list of storages
corresponding to fgraph.outputs.
"""
# each Apply argument's data is stored in a list of length 1 (these lists act like pointers)
if storage_map is None:
storage_map = {}
# input_storage is a list of data-containers for the inputs.
if input_storage is None:
input_storage = [[None] for input in fgraph.inputs]
else:
assert len(fgraph.inputs) == len(input_storage)
# add input storage into storage_map
for r, storage in zip(fgraph.inputs, input_storage):
if r in storage_map:
assert storage_map[r] is storage, ("Given input_storage conflicts "
"with storage in given storage_"
"map. Given input_storage: ",
storage, "Storage in storage_ma"
"p: ", storage_map[r])
else:
storage_map[r] = storage
# for orphan in fgraph.orphans:
# if not isinstance(orphan, Constant):
# raise TypeError("Cannot link a graph with non-constant orphans.", orphan)
# storage_map[orphan] = [orphan.data]
# allocate output storage
if output_storage is not None:
assert len(fgraph.outputs) == len(output_storage)
for r, storage in zip(fgraph.outputs, output_storage):
if r in storage_map:
assert storage_map[r] is storage, ("Given output_storage confl"
"icts with storage in given"
" storage_map. Given output"
"_storage: ", storage, "Sto"
"rage in storage_map: ",
storage_map[r])
else:
storage_map[r] = storage
# allocate storage for intermediate computation
for node in order:
for r in node.inputs:
if r not in storage_map:
assert isinstance(r, graph.Constant)
storage_map[r] = [r.data]
for r in node.outputs:
storage_map.setdefault(r, [None])
for r in fgraph.outputs:
if isinstance(r, graph.Constant):
storage_map.setdefault(r, [r.data])
# extract output storage
if output_storage is None:
output_storage = [storage_map[r] for r in fgraph.outputs]
return input_storage, output_storage, storage_map
def streamline(fgraph, thunks, order, post_thunk_old_storage=None,
no_recycling=None, nice_errors=True):
"""
WRITEME
Parameters
----------
fgraph
thunks
The list of program instructions.
order
The list of apply instances that gave rise to the thunks
(same order as thunks).
post_thunk_old_storage
A list (corresponding to thunks, order) whose elements are lists of
storage cells, that should be cleared after running thecorresponding
thunk. A value of None disables this functionality.
no_recycling
Storage elements that cannot be 'recycled' by repeatedly executing the
program. These storage elements are cleared before re-running.
nice_errors
Run in such a way that the double-traceback is printed. This costs a
bit of performance in the inner python loop.
"""
if no_recycling is None:
no_recycling = []
if len(thunks) != len(order):
raise ValueError('Length of thunks and order must match',
(len(thunks), len(order)))
if post_thunk_old_storage:
if len(thunks) != len(post_thunk_old_storage):
raise ValueError(
'Length of thunks and post_thunk_old_storage must match',
(len(thunks), len(post_thunk_old_storage)))
def streamline_default_f():
for x in no_recycling:
x[0] = None
try:
for thunk, node, old_storage in izip(thunks, order,
post_thunk_old_storage):
thunk()
for old_s in old_storage:
old_s[0] = None
except Exception:
raise_with_op(node, thunk)
f = streamline_default_f
elif nice_errors:
def streamline_nice_errors_f():
for x in no_recycling:
x[0] = None
try:
for thunk, node in izip(thunks, order):
thunk()
except Exception:
raise_with_op(node, thunk)
f = streamline_nice_errors_f
else:
# don't worry about raise_with_op, just go a little faster.
# there is a mix of python and c thunks
def streamline_fast_f():
for x in no_recycling:
x[0] = None
for thunk in thunks:
thunk()
f = streamline_fast_f
return f
class LocalLinker(Linker):
"""
Useful base class for L{Linker}s which keep all nodes in the graph, and run
a thunk associated with each node.
"""
def make_thunk(self, input_storage=None, output_storage=None, storage_map=None):
return self.make_all(input_storage=input_storage,
output_storage=output_storage,
storage_map=storage_map)[:3]
def make_all(self, input_storage, output_storage):
# By convention, subclasses of LocalLinker should implement this function!
#
# This function should return a tuple of 5 things
# 1. function to run the program
# 2. input storage
# 3. output storage
# 4. thunks: list of nodes' functions in the order they will be run by the function in (1)
# 5. order: list of nodes, in the order they will be run by the function in (1)
raise utils.MethodNotDefined("make_all", type(self),
self.__class__.__name__)
def gc_helper(node_list):
"""
Return the set of Variable instances which are computed by node_list.
Parameters
----------
node_list
List of Apply instances in program execution order.
Returns
-------
2-tuple
FIRST, the set of Variable instances which are computed by node_list,
and SECOND a dictionary that maps each Variable instance to a the last
node to use Variable as an input.
Extended Summary
----------------
This is used to allow garbage collection within graphs.
It ignores view_map and destroy_map. This isn't needed as python
have reference count. In Theano gc, we should not take into
account view_map and destroy_map as if the thunk decided to create
a new output, we would delay uselessly its gc by Python.
"""
# for freeing memory
last_user = {}
computed = set()
for node in node_list:
for input in node.inputs:
last_user[input] = node
for output in node.outputs:
computed.add(output)
return computed, last_user
class PerformLinker(LocalLinker):
"""
Basic L{Linker} subclass that calls the perform method on each L{Op} in
the L{FunctionGraph} in the order given by L{Linker.schedule}.
"""
def __init__(self, allow_gc=None, schedule=None):
if allow_gc is None:
allow_gc = theano.config.allow_gc
self.fgraph = None
self.allow_gc = allow_gc
if schedule:
self.schedule = schedule
def accept(self, fgraph, no_recycling=None, profile=None):
"""
Parameters
----------
fgraph
A PerformLinker can have accepted one FunctionGraph instance at a time.
no_recycling
WRITEME
Returns
-------
object
self (TODO: WHY? Who calls this function?)
"""
if no_recycling is None:
no_recycling = []
if self.fgraph is not None and self.fgraph is not fgraph:
return type(self)(allow_gc=self.allow_gc).accept(
fgraph, no_recycling, profile)
# raise Exception("Cannot accept from a Linker that is already tied to another FunctionGraph.")
self.fgraph = fgraph
self.no_recycling = no_recycling
return self
def make_all(self, input_storage=None, output_storage=None, storage_map=None):
"""
Returns Function to run all nodes, list of input containers, list of outputs
Parameters
----------
input_storage
list of storages corresponding to fgraph.inputs
output_storage
list of storages corresponding to fgraph.outputs
Returns
-------
object
Function to run all nodes, list of input containers, list of output
containers, list of thunks (for all programs), list of nodes
(for all programs).
"""
fgraph = self.fgraph
order = self.schedule(fgraph)
no_recycling = self.no_recycling
input_storage, output_storage, storage_map = map_storage(fgraph, order, input_storage, output_storage, storage_map)
compute_map = {}
for k in storage_map:
compute_map[k] = [k.owner is None]
thunks = []
for node in order:
# Maker sure we don't use C version of the code, but rather only
# the python version
# Note : ops that implement their own make thunk don't usually
# have this attribute defiend !!
thunks += [node.op.make_thunk(node,
storage_map,
compute_map,
no_recycling,
'py')]
thunks[-1].inputs = [storage_map[v] for v in node.inputs]
thunks[-1].outputs = [storage_map[v] for v in node.outputs]
computed, last_user = gc_helper(order)
if self.allow_gc:
post_thunk_old_storage = []
else:
post_thunk_old_storage = None
for node in order:
if self.allow_gc:
post_thunk_old_storage.append(
[storage_map[input]
for input in node.inputs
if (input in computed) and (
input not in fgraph.outputs) and (
node == last_user[input])])
if no_recycling is True:
# True seems like some special code for *everything*?? -JB
# FunctionMaker always passes a list I think -JB
no_recycling = list(storage_map.values())
no_recycling = utils.difference(no_recycling, input_storage)
else:
no_recycling = [storage_map[r] for r in no_recycling if r not in fgraph.inputs]
# The function that actually runs your program is one of the f's in streamline.
f = streamline(fgraph, thunks, order, post_thunk_old_storage,
no_recycling=no_recycling)
f.allow_gc = self.allow_gc # HACK: this is a way of passing an arg to Function.__call__
add_clear_storage(f, computed, storage_map)
f.storage_map = storage_map
return (f,
[Container(input, storage)
for input, storage in izip(fgraph.inputs, input_storage)],
[Container(output, storage, True)
for output, storage in izip(fgraph.outputs, output_storage)],
thunks,
order)
def add_clear_storage(f, computed, storage_map):
def clear_storage():
for c in computed:
storage_map[c][0] = None
f.clear_storage = clear_storage
class WrapLinker(Linker):
"""
This class makes it easier to run several L{LocalLinker}s in parallel, and
offers some control over how each thunk is run.
A wrapper function must be provided, and it can be used to execute the
thunks, inspect the nodes, print stuff out, etc.
The constructor initializes a WrapLinker.
Parameters
----------
linkers : list of L{LocalLinker} subclasses, whose make_all() method returns
thunks in the same order.
For each node in the graph, each linker will provide a
thunk. This class makes it possible to iterate over each linker's
program in parallel.
wrapper : lambda (i, i_node, i_thunk1, i_thunk2, ...) : None
Does some user-defined action for the i'th element of the program.
i_thunk<n> is the thunk returned by the n'th linker. (If you want
to run the program, make sure to call the necessary thunks in this
function.)
Notes
-----
The outputs of the first linker will be returned.
This linker ensures that each linker has its own storage for inputs and
outputs and intermediate variables. There is no interference between
linkers.
"""
def __init__(self, linkers, wrapper):
self.fgraph = None
self.linkers = linkers
self.wrapper = wrapper
def __copy__(self):
"""
Shallow copy of a WrapLinker.
Returns
-------
object
A copy of self, where each of the linkers in self.linkers
have been shallow-copied.
It is useful because in FunctionMaker, copy.copy is called on the
Mode's linker, so that it is not modified inplace when linker.accept()
is called. In this case, we want the wrapped linkers to be copied too.
"""
other = self.__class__(
linkers=[copy(l) for l in self.linkers],
wrapper=self.wrapper)
return other
def clone(self, allow_gc=undef):
return self.__class__(
linkers=[l.clone(allow_gc=allow_gc) for l in self.linkers],
wrapper=self.wrapper)
def accept(self, fgraph, no_recycling=None, profile=None):
"""
Parameters
----------
fgraph : gof.FunctionGraph
The fgraph which we will link.
no_recycling : a list of Variables that belong to fgraph.
If a Variable is in no_recycling, L{WrapLinker} will clear
the output storage associated to it (for each linker in linkers)
during the computation to avoid reusing it.
"""
if no_recycling is None:
no_recycling = []
if self.fgraph is not None and self.fgraph is not fgraph:
return type(self)(self.linkers, self.wrapper).accept(fgraph,
no_recycling)
self.fgraph = fgraph
self.no_recycling = no_recycling
self.linkers = [linker.accept(fgraph, no_recycling)
for linker in self.linkers]
return self
def pre(self, f, inputs, order, thunk_groups):
pass
def make_thunk(self, **kwargs):
no_recycling = self.no_recycling
make_all = [self.linkers[0].make_all(**kwargs)]
kwargs.pop('input_storage', None)
make_all += [l.make_all(**kwargs) for l in self.linkers[1:]]
fns, input_lists, output_lists, thunk_lists, order_lists \
= zip(*make_all)
order_list0 = order_lists[0]
for order_list in order_lists[1:]:
if not order_list0 == order_list:
raise Exception(
"All linkers to WrapLinker should execute operations in the same order.")
inputs0 = input_lists[0]
outputs0 = output_lists[0]
thunk_groups = list(zip(*thunk_lists))
order = [x[0] for x in zip(*order_lists)]
to_reset = []
for thunks, node in izip(thunk_groups, order):
for j, output in enumerate(node.outputs):
if output in no_recycling:
for thunk in thunks:
to_reset.append(thunk.outputs[j])
wrapper = self.wrapper
pre = self.pre
def f():
for inputs in input_lists[1:]:
for input1, input2 in izip(inputs0, inputs):
input2.storage[0] = copy(input1.storage[0])
for x in to_reset:
x[0] = None
pre(self, [input.data for input in input_lists[0]],
order, thunk_groups)
for i, (thunks, node) in enumerate(izip(thunk_groups, order)):
try:
wrapper(i, node, *thunks)
except Exception:
raise_with_op(node, *thunks)
f.thunk_groups = thunk_groups
return f, inputs0, outputs0
def WrapLinkerMany(linkers, wrappers):
"""
Variant on WrapLinker that runs a series of wrapper functions instead of
just one.
"""
def wrapper(*args):
for f in wrappers:
f(*args)
return WrapLinker(linkers, wrapper)
| Theano/Theano | theano/gof/link.py | link.py | py | 38,073 | python | en | code | 9,807 | github-code | 36 | [
{
"api_name": "sys.excepthook",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "traceback.format_list",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "sys.excepth... |
7557841018 | import sys
from collections import deque
def Run(fin, fout):
readline = fin.readline
N = int(readline())
to = [None] * (N + 1)
from_ = [set() for _ in range(N + 1)]
for i in range(1, N + 1):
a, v = map(int, readline().split())
to[i] = (a, v)
from_[a].add((i, v))
visited = set()
ans = 0
for i in range(1, N + 1):
if i in visited:
continue
loop = find_loop(to, from_, i)
totals = []
for j in loop:
total = dfs(to, from_, visited, loop, j)
totals.append(total)
ans += sum(totals) - min(totals)
fout.write("{}\n".format(ans))
def dfs(to, from_, visited, loop, start): #start = node in loop
sum = to[start][1]
queue = deque([(start, 0)])
while queue:
curr, v = queue.pop()
if curr in visited:
continue
visited.add(curr)
sum += v
for i, v in from_[curr]:
if i not in loop:
queue.append((i, v))
return sum
def find_loop(to, from_, start):
last_seen = [None] * len(to)
path = []
i = 0
curr = start
while True:
if last_seen[curr] is not None:
return set(path[last_seen[curr]:])
path.append(curr)
last_seen[curr] = i
curr = to[curr][0]
i += 1
Run(sys.stdin, sys.stdout) | chenant2017/USACO | Silver/2022 Open/p1.py | p1.py | py | 1,247 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout",
"line_number": 65,
"usage_type": "attribute"
}
] |
13100959928 | from __future__ import print_function
# import logging
import json
import sys
import uuid
from random import randrange # TODO remove this
import requests
import logging
from cakework import exceptions
from urllib3.exceptions import NewConnectionError
import os
# TODO: need to re-enable TLS for the handlers in the fly.toml file. Try these settings: https://community.fly.io/t/urgent-grpc-server-unreachable-via-grpcurl/2694/12 for alpn
# TODO figure out how to configure the settings for fly.toml for grpc!
# TODO also need to make sure different runs don't interfere with each other
# TODO add a parameter for an entry point into the system (currently, assume that using cakework_app.py)
logging.basicConfig(level=logging.INFO)
class Client:
def __init__(self, project, client_token, local=False): # TODO: infer user id // TODO revert local back to False
self.project = project
self.client_token = client_token
if local:
self.frontend_url = "http://localhost:8080"
else:
self.frontend_url = "https://cakework-frontend.fly.dev"
self.local = local
def get_run_status(self, run_id):
response = None
try:
# Q: status 200 vs 201??? what's the diff?
# TODO strip app from everywhere
response = requests.get(f"{self.frontend_url}/client/runs/{run_id}/status", params={"token": self.client_token})
response.raise_for_status()
# TODO: handle http error, or request id not found error
except requests.exceptions.HTTPError as err:
raise exceptions.CakeworkError("Http error while connecting to Cakework frontend") from err
except requests.exceptions.Timeout as err:
raise exceptions.CakeworkError("Timed out connecting to Cakework frontend") from err
except requests.exceptions.RequestException as err:
raise exceptions.CakeworkError("Request exception connecting Cakework frontend") from err
except (ConnectionRefusedError, ConnectionResetError) as err:
raise exceptions.CakeworkError("Failed to connect to Cakework frontend service") from err
except Exception as err:
# TODO catch and raise specific errors?
raise exceptions.CakeworkError("Error happened while getting status") from err
if response is not None:
if response.status_code == 200:
status = response.text
return json.loads(status)
elif response.status_code == 404:
return None
else:
raise exceptions.CakeworkError("Internal server exception")
else:
raise exceptions.CakeworkError("Internal server exception")
# TODO figure out how to refactor get_result and get_status
def get_run_result(self, run_id):
response = None
try:
# Q: status 200 vs 201??? what's the diff?
response = requests.get(f"{self.frontend_url}/client/runs/{run_id}/result", params={"token": self.client_token})
response.raise_for_status() # TODO delete this?
# TODO: handle http error, or request id not found error
except requests.exceptions.HTTPError as errh:
raise exceptions.CakeworkError("Http error while connecting to Cakework frontend")
except requests.exceptions.Timeout as errt:
raise exceptions.CakeworkError("Timed out connecting to Cakework frontend")
except requests.exceptions.RequestException as err:
raise exceptions.CakeworkError("Request exception connecting Cakework frontend")
except (ConnectionRefusedError, ConnectionResetError) as e:
raise exceptions.CakeworkError("Failed to connect to Cakework frontend service")
except Exception as e:
# TODO catch and raise specific errors?
raise exceptions.CakeworkError("Something unexpected happened")
if response is not None:
if response.status_code == 200:
result = json.loads(response.json())
return result
elif response.status_code == 404:
return None
else:
raise exceptions.CakeworkError("Internal server exception")
else:
raise exceptions.CakeworkError("Internal server exception")
def run(self, task, params, compute ={"cpu":1, "memory": 256}):
request = {
"parameters": params,
"compute": {}
}
cpu = compute.get("cpu")
if cpu is not None:
if cpu < 1 or cpu > 8:
raise exceptions.CakeworkError("Number of cpus must be between 1 and 8")
else:
request["compute"]["cpu"] = cpu
else:
request["compute"]['cpu'] = 1
memory = compute.get("memory")
if memory is not None:
if memory < 256 or memory > 16384:
raise exceptions.CakeworkError("Amount of memory must be between 256 and 16384 mb")
else:
request["compute"]["memory"] = memory
else:
request["compute"]['memory'] = 256
request["token"] = self.client_token
response = requests.post(f"{self.frontend_url}/client/projects/{self.project}/tasks/{task}/runs", json=request, params={"token": self.client_token})
response_json = response.json()
if response is None:
raise exceptions.CakeworkError("Did not get a response from the frontend")
if response.status_code == 201:
run_id = response_json["runId"]
return run_id
elif response.status_code == 404:
raise exceptions.CakeworkError("Task " + task + " for project " + self.project + " not found. Have you tried running `cakework deploy` first?")
else:
print(response) # TODO delete?
raise exceptions.CakeworkError("Internal server exception") | usecakework/async-backend | sdk/python/src/cakework/client.py | client.py | py | 6,074 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "requests.exception... |
10392633050 | import json
import os
import cv2
from cfg import cfg
import numpy as np
from collections import defaultdict as dd
from dsl.base_dsl import BaseDSL, one_hot_labels
class NSFWDSL(BaseDSL):
def __init__(self, batch_size, shuffle_each_epoch=False, seed=1337,
normalize=True, mode='train', val_frac=0.02, resize=None):
assert mode == "train" or mode == "val" or mode == "test"
self.shape = (cfg.img_size, cfg.img_size, 3)
self.ntest = cfg.ntest
self.mode = mode
self.normalize = normalize
if mode == 'val':
assert val_frac is not None
super(NSFWDSL, self).__init__(
batch_size,
shuffle_each_epoch=shuffle_each_epoch,
seed=seed,
normalize=False,
mode=mode,
val_frac=val_frac,
normalize_channels=False,
resize=resize
)
def is_multilabel(self):
return False
def load_variable(self, file_path, data_type, var_shape):
var = np.fromfile(file_path, dtype=data_type)
var.shape = var_shape
return var
def get_sample_shape(self):
return self.shape
def get_partition_to_idxs(self, samples):
partition_to_idxs = {
'train': [],
'test': []
}
prev_state = np.random.get_state()
np.random.seed(cfg.DS_SEED)
classidx_to_idxs = dd(list)
for idx, s in enumerate(samples):
classidx = s[1]
classidx_to_idxs[classidx].append(idx)
# Shuffle classidx_to_idx
for classidx, idxs in classidx_to_idxs.items():
np.random.shuffle(idxs)
for classidx, idxs in classidx_to_idxs.items():
partition_to_idxs['test'] += idxs[:self.ntest] # A constant no. kept aside for evaluation
partition_to_idxs['train'] += idxs[self.ntest:] # Train on remaining
# Revert randomness to original state
np.random.set_state(prev_state)
return partition_to_idxs
def create_label_dict(self):
label_dict = {}
for (img_name, pred_label) in zip(self.data, self.labels):
label_dict[img_name] = pred_label
return label_dict
def load_data(self, mode, val_frac):
with open("nsfw/nsfw_dict.json", 'r') as f:
nsfw_dict = json.load(f)
samples = nsfw_dict["normal"] + nsfw_dict["porn"] + nsfw_dict["sexy"]
partition_to_idxs = self.get_partition_to_idxs(samples)
if mode == 'test':
pruned_idxs = partition_to_idxs['test']
else:
assert mode == 'train' or mode == 'val'
pruned_idxs = partition_to_idxs['train']
samples = [samples[i] for i in pruned_idxs]
self.data = []
self.labels = []
for sample in samples:
self.data.append(sample[0])
self.labels.append(sample[1])
self.data = np.array(self.data)
self.labels = np.array(self.labels)
self.label_dict = self.create_label_dict()
# Perform splitting
if val_frac is not None:
self.partition_validation_set(mode, val_frac)
self.labels = np.squeeze(self.labels)
def convert_Y(self, Y):
return one_hot_labels(Y, 3)
| gongzhimin/ActiveThief-attack-MLaaS | dsl/nsfw_dsl.py | nsfw_dsl.py | py | 3,309 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "dsl.base_dsl.BaseDSL",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "cfg.cfg.img_size",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "cfg.cfg",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "cfg.cfg.ntest",
... |
74105735145 | from django.contrib import admin
from django.urls import path
from tareas import views
urlpatterns = [
path("admin/", admin.site.urls),
path ("", views.menu, name = "menu"),
path ("registro/", views.registro, name = "registro"),
path ("iniciar_sesion/", views.iniciar_sesion, name = "iniciar_sesion"),
path ("salir/", views.salir, name = "salir"),
path ("crear_tarea/", views.crear_tarea, name = "crear_tarea"),
path ("tareas/", views.tareas, name = "tareas"),
path ("tarea/<int:tarea_id>", views.tarea, name = "tarea"),
path ("tarea/<int:tarea_id>/completa", views.tarea_completa, name = "tarea_completa"),
path ("tarea/<int:tarea_id>/borada", views.borar_tarea, name = "borar_tarea")
]
| MallicTesla/Mis_primeros_pasos | Programacion/002 ejemplos/002 - 14 django/16 django proyrcto inicio de cesion/django_crud/urls.py | urls.py | py | 732 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "dja... |
36720200958 | #!/usr/bin/env python
"""
Parser for condor job log files to get information out
"""
from datetime import datetime, timedelta
from .logit import log
from . import jobsub_fetcher
from .poms_model import Submission
# our own logging handle, goes to cherrypy
def get_joblogs(dbhandle, jobsub_job_id, cert, key, experiment, role):
"""
get the condor joblog for a given job
"""
res = None
log("INFO", "entering get_joblogs")
if jobsub_job_id is None:
return None
fetcher = jobsub_fetcher.jobsub_fetcher(cert, key)
log("DEBUG", "checking index")
submission = dbhandle.query(Submission).filter(Submission.jobsub_job_id == jobsub_job_id).first()
if submission is None:
raise KeyError("submission with jobsub_job_id %s not found" % jobsub_job_id)
else:
submission_id = submission.submission_id
username = submission.experimenter_creator_obj.username
jobsub_job_id = jobsub_job_id.replace("@", ".0@")
files = fetcher.index(jobsub_job_id, experiment, role, True, user=username)
if files is None:
return None
log("DEBUG", "files: %s" % repr(files))
filename = None
for row in files:
if row[5].endswith(".log") and not row[5].endswith(".dagman.log"):
# pick the log we want, either the first non-dagman log
# or the nodes.log
if not filename:
filename = row[5]
if row[5].endswith("nodes.log"):
filename = row[5]
break
log("DEBUG", "checking file %s " % filename)
lines = fetcher.contents(filename, jobsub_job_id, experiment, role, user=username)
res = parse_condor_log(dbhandle, lines, jobsub_job_id[jobsub_job_id.find("@") + 1 :], submission_id)
del fetcher
return res
def fix_jobid(clust_proc, batchhost):
""" convert 123456.010.000 to 123456.10@batchost """
pos1 = clust_proc.find(".")
pos2 = clust_proc.find(".", pos1 + 1)
cluster = clust_proc[0:pos1]
proc = int(clust_proc[pos1 + 1 : pos2])
return "%s.%d@%s" % (cluster, proc, batchhost)
def compute_secs(time_str):
""" convert hh:mm:ss to seconds """
time_str = time_str.strip(",")
timelist = [int(x) for x in time_str.split(":")]
return (timelist[0] * 60 + timelist[1]) * 60 + timelist[2]
def parse_date(date_time_str):
try:
return parse_date_2(date_time_str)
except ValueError:
return datetime.now()
def parse_date_2(date_time_str):
""" condor just gives month/day, so add the year and parse
-- the trick is to add the *right* year. At the year boundary
(i.e. it's Jan 1, and the job started on Dec 31) we may
need to pick *yesterday's* year, not todays... so check
by checking yesterdays month.
... in fact we should go a little further back (27 days)
for to get last month right further into this month.
.. but this is a lie now, newer condor seems to use
proper ISO dates: 2021-10-11 02:01:00, so handle that, too
"""
# get todays, yesterdays year and month
t_year, t_month = datetime.now().strftime("%Y %m").split()
lm_year, lm_month = (datetime.now() - timedelta(days=27)).strftime("%Y %m").split()
if date_time_str[:4] == t_year or date_time_str[:4] == lm_year:
return datetime.strptime(date_time_str, "%Y-%m-%d %H:%M:%S")
elif date_time_str[:2] == t_month:
date_time_str = "%s/%s" % (t_year, date_time_str)
elif date_time_str[:2] == lm_month:
date_time_str = "%s/%s" % (lm_year, date_time_str)
else:
# if it is some other month, just guess this year.. sorry
date_time_str = "%s/%s" % (t_year, date_time_str)
return datetime.strptime(date_time_str, "%Y/%m/%d %H:%M:%S")
def parse_condor_log(dbhandle, lines, batchhost, submission_id):
""" read a condor log looking for start/end info """
log("DEBUG", "entering parse_condor_log %d lines" % len(lines))
in_termination = 0
itimes = {}
stimes = {}
etimes = {}
job_sites = {}
execute_hosts = {}
job_exit = None
jobsub_job_id = None
res = {}
for line in lines:
if line[:2] == "00" and line[3:5] == " (":
ppos = line.find(")")
jobsub_job_id = fix_jobid(line[5:ppos], batchhost)
if line[:5] == "000 (":
log("DEBUG", "submitted record start: %s" % line)
itimes[jobsub_job_id] = parse_date(line[ppos + 2 : ppos + 16])
if line[:5] == "001 (":
log("DEBUG", "start record start: %s" % line)
stimes[jobsub_job_id] = parse_date(line[ppos + 2 : ppos + 16])
if line[:10] == "JOB_Site =":
job_sites[jobsub_job_id] = line[11:-1]
if line[:13] == "ExecuteHost =":
execute_hosts[jobsub_job_id] = line[15:-2]
if line[:5] == "005 (":
log("DEBUG", "term record start: %s" % line)
in_termination = 1
finish_time = parse_date(line[ppos + 2 : ppos + 16])
etimes[jobsub_job_id] = finish_time
remote_cpu = None
disk_used = None
memory_used = None
continue
if line[:3] == "..." and in_termination:
log("DEBUG", "term record end %s" % line)
in_termination = 0
continue
if in_termination:
log("DEBUG", "saw: ", line)
if line.find("termination (signal ") > 0:
job_exit = 128 + int(line.split()[5].strip(")"))
if line.find("termination (return value") > 0:
job_exit = int(line.split()[5].strip(")"))
if line.find("Total Remote Usage") > 0:
remote_cpu = compute_secs(line.split()[2])
if line.find("Disk (KB)") > 0:
disk_used = line.split()[3]
if line.find("Memory (KB)") > 0:
memory_used = line.split()[3]
log(
"DEBUG",
"condor_log_parser: remote_cpu %s "
"disk_used %s memory_used %s job_exit %s" % (remote_cpu, disk_used, memory_used, job_exit),
)
return {"idle": itimes, "running": stimes, "completed": etimes}
| fermitools/poms | webservice/condor_log_parser.py | condor_log_parser.py | py | 6,245 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logit.log",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "logit.log",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "poms_model.Submission",
"line_number": 26,
"usage_type": "argument"
},
{
"api_name": "poms_model.Submission.j... |
15282409982 | import regTrees
from numpy import *
import matplotlib.pyplot as plt
myDat = regTrees.loadDataSet('ex00.txt')
myMat = mat(myDat)
print(regTrees.createTree(myMat))
plt.plot(myMat[:,0],myMat[:,1], 'ro')
plt.show()
myDat1 = regTrees.loadDataSet('ex0.txt')
myMat1 = mat(myDat1)
print(regTrees.createTree(myMat1))
plt.plot(myMat1[:,1],myMat1[:,2], 'ro')
plt.show()
| mengwangme/MachineLearninginAction | Ch09/test.py | test.py | py | 376 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "regTrees.loadDataSet",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "regTrees.createTree",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplotl... |
8445183338 | from numpy import prod
import cupy
from cupy.fft import config
from cupy.fft._fft import (_convert_fft_type, _default_fft_func, _fft,
_get_cufft_plan_nd, _get_fftn_out_size,
_output_dtype)
from cupy.fft._cache import get_plan_cache
def get_fft_plan(a, shape=None, axes=None, value_type='C2C'):
""" Generate a CUDA FFT plan for transforming up to three axes.
Args:
a (cupy.ndarray): Array to be transform, assumed to be either C- or
F- contiguous.
shape (None or tuple of ints): Shape of the transformed axes of the
output. If ``shape`` is not given, the lengths of the input along
the axes specified by ``axes`` are used.
axes (None or int or tuple of int): The axes of the array to
transform. If `None`, it is assumed that all axes are transformed.
Currently, for performing N-D transform these must be a set of up
to three adjacent axes, and must include either the first or the
last axis of the array.
value_type (str): The FFT type to perform. Acceptable values are:
* 'C2C': complex-to-complex transform (default)
* 'R2C': real-to-complex transform
* 'C2R': complex-to-real transform
Returns:
a cuFFT plan for either 1D transform (``cupy.cuda.cufft.Plan1d``) or
N-D transform (``cupy.cuda.cufft.PlanNd``).
.. note::
The returned plan can not only be passed as one of the arguments of
the functions in ``cupyx.scipy.fftpack``, but also be used as a
context manager for both ``cupy.fft`` and ``cupyx.scipy.fftpack``
functions:
.. code-block:: python
x = cupy.random.random(16).reshape(4, 4).astype(complex)
plan = cupyx.scipy.fftpack.get_fft_plan(x)
with plan:
y = cupy.fft.fftn(x)
# alternatively:
y = cupyx.scipy.fftpack.fftn(x) # no explicit plan is given!
# alternatively:
y = cupyx.scipy.fftpack.fftn(x, plan=plan) # pass plan explicitly
In the first case, no cuFFT plan will be generated automatically,
even if ``cupy.fft.config.enable_nd_planning = True`` is set.
.. note::
If this function is called under the context of
:func:`~cupy.fft.config.set_cufft_callbacks`, the generated plan will
have callbacks enabled.
.. warning::
This API is a deviation from SciPy's, is currently experimental, and
may be changed in the future version.
"""
from cupy.cuda import cufft
# check input array
if a.flags.c_contiguous:
order = 'C'
elif a.flags.f_contiguous:
order = 'F'
else:
raise ValueError('Input array a must be contiguous')
if isinstance(shape, int):
shape = (shape,)
if isinstance(axes, int):
axes = (axes,)
if (shape is not None) and (axes is not None) and len(shape) != len(axes):
raise ValueError('Shape and axes have different lengths.')
# check axes
# n=1: 1d (need axis1D); n>1: Nd
if axes is None:
n = a.ndim if shape is None else len(shape)
axes = tuple(i for i in range(-n, 0))
if n == 1:
axis1D = 0
else: # axes is a tuple
n = len(axes)
if n == 1:
axis1D = axes[0]
if axis1D >= a.ndim or axis1D < -a.ndim:
err = 'The chosen axis ({0}) exceeds the number of '\
'dimensions of a ({1})'.format(axis1D, a.ndim)
raise ValueError(err)
elif n > 3:
raise ValueError('Only up to three axes is supported')
# Note that "shape" here refers to the shape along trasformed axes, not
# the shape of the output array, and we need to convert it to the latter.
# The result is as if "a=_cook_shape(a); return a.shape" is called.
# Because of this, we need to use (possibly unsorted) axes.
transformed_shape = shape
shape = list(a.shape)
if transformed_shape is not None:
for s, axis in zip(transformed_shape, axes):
if s is not None:
if axis == axes[-1] and value_type == 'C2R':
s = s // 2 + 1
shape[axis] = s
shape = tuple(shape)
# check value_type
out_dtype = _output_dtype(a.dtype, value_type)
fft_type = _convert_fft_type(out_dtype, value_type)
# TODO(leofang): figure out if we really have to skip F-order?
if n > 1 and value_type != 'C2C' and a.flags.f_contiguous:
raise ValueError('C2R/R2C PlanNd for F-order arrays is not supported')
# generate plan
# (load from cache if it exists, otherwise create one but don't cache it)
if n > 1: # ND transform
if cupy.cuda.runtime.is_hip and value_type == 'C2R':
raise RuntimeError("hipFFT's C2R PlanNd is buggy and unsupported")
out_size = _get_fftn_out_size(
shape, transformed_shape, axes[-1], value_type)
# _get_cufft_plan_nd interacts with plan cache and callback
plan = _get_cufft_plan_nd(
shape, fft_type, axes=axes, order=order, out_size=out_size,
to_cache=False)
else: # 1D transform
# prepare plan arguments
if value_type != 'C2R':
out_size = shape[axis1D]
else:
out_size = _get_fftn_out_size(
shape, transformed_shape, axis1D, value_type)
batch = prod(shape) // shape[axis1D]
devices = None if not config.use_multi_gpus else config._devices
keys = (out_size, fft_type, batch, devices)
mgr = config.get_current_callback_manager()
if mgr is not None:
# to avoid a weird segfault, we generate and cache distinct plans
# for every possible (load_aux, store_aux) pairs; the plans are
# still generated from the same external Python module
load_aux = mgr.cb_load_aux_arr
store_aux = mgr.cb_store_aux_arr
keys += (mgr.cb_load, mgr.cb_store,
0 if load_aux is None else load_aux.data.ptr,
0 if store_aux is None else store_aux.data.ptr)
cache = get_plan_cache()
cached_plan = cache.get(keys)
if cached_plan is not None:
plan = cached_plan
elif mgr is None:
plan = cufft.Plan1d(out_size, fft_type, batch, devices=devices)
else: # has callback
# TODO(leofang): support multi-GPU callback (devices is ignored)
if devices:
raise NotImplementedError('multi-GPU cuFFT callbacks are not '
'yet supported')
plan = mgr.create_plan(('Plan1d', keys[:-3]))
mgr.set_callbacks(plan)
return plan
def fft(x, n=None, axis=-1, overwrite_x=False, plan=None):
"""Compute the one-dimensional FFT.
Args:
x (cupy.ndarray): Array to be transformed.
n (None or int): Length of the transformed axis of the output. If ``n``
is not given, the length of the input along the axis specified by
``axis`` is used.
axis (int): Axis over which to compute the FFT.
overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
plan (:class:`cupy.cuda.cufft.Plan1d` or ``None``): a cuFFT plan for
transforming ``x`` over ``axis``, which can be obtained using::
plan = cupyx.scipy.fftpack.get_fft_plan(x, axis)
Note that `plan` is defaulted to None, meaning CuPy will use an
auto-generated plan behind the scene.
Returns:
cupy.ndarray:
The transformed array which shape is specified by ``n`` and type
will convert to complex if that of the input is another.
.. note::
The argument `plan` is currently experimental and the interface may be
changed in the future version.
.. seealso:: :func:`scipy.fftpack.fft`
"""
from cupy.cuda import cufft
return _fft(x, (n,), (axis,), None, cufft.CUFFT_FORWARD,
overwrite_x=overwrite_x, plan=plan)
def ifft(x, n=None, axis=-1, overwrite_x=False, plan=None):
"""Compute the one-dimensional inverse FFT.
Args:
x (cupy.ndarray): Array to be transformed.
n (None or int): Length of the transformed axis of the output. If ``n``
is not given, the length of the input along the axis specified by
``axis`` is used.
axis (int): Axis over which to compute the FFT.
overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
plan (:class:`cupy.cuda.cufft.Plan1d` or ``None``): a cuFFT plan for
transforming ``x`` over ``axis``, which can be obtained using::
plan = cupyx.scipy.fftpack.get_fft_plan(x, axis)
Note that `plan` is defaulted to None, meaning CuPy will use an
auto-generated plan behind the scene.
Returns:
cupy.ndarray:
The transformed array which shape is specified by ``n`` and type
will convert to complex if that of the input is another.
.. note::
The argument `plan` is currently experimental and the interface may be
changed in the future version.
.. seealso:: :func:`scipy.fftpack.ifft`
"""
from cupy.cuda import cufft
return _fft(x, (n,), (axis,), None, cufft.CUFFT_INVERSE,
overwrite_x=overwrite_x, plan=plan)
def fft2(x, shape=None, axes=(-2, -1), overwrite_x=False, plan=None):
"""Compute the two-dimensional FFT.
Args:
x (cupy.ndarray): Array to be transformed.
shape (None or tuple of ints): Shape of the transformed axes of the
output. If ``shape`` is not given, the lengths of the input along
the axes specified by ``axes`` are used.
axes (tuple of ints): Axes over which to compute the FFT.
overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
plan (:class:`cupy.cuda.cufft.PlanNd` or ``None``): a cuFFT plan for
transforming ``x`` over ``axes``, which can be obtained using::
plan = cupyx.scipy.fftpack.get_fft_plan(x, axes)
Note that `plan` is defaulted to None, meaning CuPy will either
use an auto-generated plan behind the scene if cupy.fft.config.
enable_nd_planning = True, or use no cuFFT plan if it is set to
False.
Returns:
cupy.ndarray:
The transformed array which shape is specified by ``shape`` and
type will convert to complex if that of the input is another.
.. seealso:: :func:`scipy.fftpack.fft2`
.. note::
The argument `plan` is currently experimental and the interface may be
changed in the future version.
"""
from cupy.cuda import cufft
func = _default_fft_func(x, shape, axes, plan)
return func(x, shape, axes, None, cufft.CUFFT_FORWARD,
overwrite_x=overwrite_x, plan=plan)
def ifft2(x, shape=None, axes=(-2, -1), overwrite_x=False, plan=None):
"""Compute the two-dimensional inverse FFT.
Args:
x (cupy.ndarray): Array to be transformed.
shape (None or tuple of ints): Shape of the transformed axes of the
output. If ``shape`` is not given, the lengths of the input along
the axes specified by ``axes`` are used.
axes (tuple of ints): Axes over which to compute the FFT.
overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
plan (:class:`cupy.cuda.cufft.PlanNd` or ``None``): a cuFFT plan for
transforming ``x`` over ``axes``, which can be obtained using::
plan = cupyx.scipy.fftpack.get_fft_plan(x, axes)
Note that `plan` is defaulted to None, meaning CuPy will either
use an auto-generated plan behind the scene if cupy.fft.config.
enable_nd_planning = True, or use no cuFFT plan if it is set to
False.
Returns:
cupy.ndarray:
The transformed array which shape is specified by ``shape`` and
type will convert to complex if that of the input is another.
.. seealso:: :func:`scipy.fftpack.ifft2`
.. note::
The argument `plan` is currently experimental and the interface may be
changed in the future version.
"""
from cupy.cuda import cufft
func = _default_fft_func(x, shape, axes, plan)
return func(x, shape, axes, None, cufft.CUFFT_INVERSE,
overwrite_x=overwrite_x, plan=plan)
def fftn(x, shape=None, axes=None, overwrite_x=False, plan=None):
"""Compute the N-dimensional FFT.
Args:
x (cupy.ndarray): Array to be transformed.
shape (None or tuple of ints): Shape of the transformed axes of the
output. If ``shape`` is not given, the lengths of the input along
the axes specified by ``axes`` are used.
axes (tuple of ints): Axes over which to compute the FFT.
overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
plan (:class:`cupy.cuda.cufft.PlanNd` or ``None``): a cuFFT plan for
transforming ``x`` over ``axes``, which can be obtained using::
plan = cupyx.scipy.fftpack.get_fft_plan(x, axes)
Note that `plan` is defaulted to None, meaning CuPy will either
use an auto-generated plan behind the scene if cupy.fft.config.
enable_nd_planning = True, or use no cuFFT plan if it is set to
False.
Returns:
cupy.ndarray:
The transformed array which shape is specified by ``shape`` and
type will convert to complex if that of the input is another.
.. seealso:: :func:`scipy.fftpack.fftn`
.. note::
The argument `plan` is currently experimental and the interface may be
changed in the future version.
"""
from cupy.cuda import cufft
func = _default_fft_func(x, shape, axes, plan)
return func(x, shape, axes, None, cufft.CUFFT_FORWARD,
overwrite_x=overwrite_x, plan=plan)
def ifftn(x, shape=None, axes=None, overwrite_x=False, plan=None):
"""Compute the N-dimensional inverse FFT.
Args:
x (cupy.ndarray): Array to be transformed.
shape (None or tuple of ints): Shape of the transformed axes of the
output. If ``shape`` is not given, the lengths of the input along
the axes specified by ``axes`` are used.
axes (tuple of ints): Axes over which to compute the FFT.
overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
plan (:class:`cupy.cuda.cufft.PlanNd` or ``None``): a cuFFT plan for
transforming ``x`` over ``axes``, which can be obtained using::
plan = cupyx.scipy.fftpack.get_fft_plan(x, axes)
Note that `plan` is defaulted to None, meaning CuPy will either
use an auto-generated plan behind the scene if cupy.fft.config.
enable_nd_planning = True, or use no cuFFT plan if it is set to
False.
Returns:
cupy.ndarray:
The transformed array which shape is specified by ``shape`` and
type will convert to complex if that of the input is another.
.. seealso:: :func:`scipy.fftpack.ifftn`
.. note::
The argument `plan` is currently experimental and the interface may be
changed in the future version.
"""
from cupy.cuda import cufft
func = _default_fft_func(x, shape, axes, plan)
return func(x, shape, axes, None, cufft.CUFFT_INVERSE,
overwrite_x=overwrite_x, plan=plan)
def rfft(x, n=None, axis=-1, overwrite_x=False, plan=None):
"""Compute the one-dimensional FFT for real input.
The returned real array contains
.. code-block:: python
[y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2))] # if n is even
[y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2)),Im(y(n/2))] # if n is odd
Args:
x (cupy.ndarray): Array to be transformed.
n (None or int): Length of the transformed axis of the output. If ``n``
is not given, the length of the input along the axis specified by
``axis`` is used.
axis (int): Axis over which to compute the FFT.
overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
plan (:class:`cupy.cuda.cufft.Plan1d` or ``None``): a cuFFT plan for
transforming ``x`` over ``axis``, which can be obtained using::
plan = cupyx.scipy.fftpack.get_fft_plan(
x, axes, value_type='R2C')
Note that `plan` is defaulted to None, meaning CuPy will either
use an auto-generated plan behind the scene if cupy.fft.config.
enable_nd_planning = True, or use no cuFFT plan if it is set to
False.
Returns:
cupy.ndarray:
The transformed array.
.. seealso:: :func:`scipy.fftpack.rfft`
.. note::
The argument `plan` is currently experimental and the interface may be
changed in the future version.
"""
from cupy.cuda import cufft
if n is None:
n = x.shape[axis]
shape = list(x.shape)
shape[axis] = n
f = _fft(x, (n,), (axis,), None, cufft.CUFFT_FORWARD, 'R2C',
overwrite_x=overwrite_x, plan=plan)
z = cupy.empty(shape, f.real.dtype)
slice_z = [slice(None)] * x.ndim
slice_f = [slice(None)] * x.ndim
slice_z[axis] = slice(1)
slice_f[axis] = slice(1)
z[tuple(slice_z)] = f[tuple(slice_f)].real
slice_z[axis] = slice(1, None, 2)
slice_f[axis] = slice(1, None)
z[tuple(slice_z)] = f[tuple(slice_f)].real
slice_z[axis] = slice(2, None, 2)
slice_f[axis] = slice(1, n - f.shape[axis] + 1)
z[tuple(slice_z)] = f[tuple(slice_f)].imag
return z
def irfft(x, n=None, axis=-1, overwrite_x=False):
"""Compute the one-dimensional inverse FFT for real input.
Args:
x (cupy.ndarray): Array to be transformed.
n (None or int): Length of the transformed axis of the output. If ``n``
is not given, the length of the input along the axis specified by
``axis`` is used.
axis (int): Axis over which to compute the FFT.
overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
Returns:
cupy.ndarray:
The transformed array.
.. seealso:: :func:`scipy.fftpack.irfft`
.. note::
This function does not support a precomputed `plan`. If you need this
capability, please consider using :func:`cupy.fft.irfft` or :func:`
cupyx.scipy.fft.irfft`.
"""
from cupy.cuda import cufft
if n is None:
n = x.shape[axis]
m = min(n, x.shape[axis])
shape = list(x.shape)
shape[axis] = n // 2 + 1
if x.dtype in (cupy.float16, cupy.float32):
z = cupy.zeros(shape, dtype=cupy.complex64)
else:
z = cupy.zeros(shape, dtype=cupy.complex128)
slice_x = [slice(None)] * x.ndim
slice_z = [slice(None)] * x.ndim
slice_x[axis] = slice(1)
slice_z[axis] = slice(1)
z[tuple(slice_z)].real = x[tuple(slice_x)]
slice_x[axis] = slice(1, m, 2)
slice_z[axis] = slice(1, m // 2 + 1)
z[tuple(slice_z)].real = x[tuple(slice_x)]
slice_x[axis] = slice(2, m, 2)
slice_z[axis] = slice(1, (m + 1) // 2)
z[tuple(slice_z)].imag = x[tuple(slice_x)]
return _fft(z, (n,), (axis,), None, cufft.CUFFT_INVERSE, 'C2R',
overwrite_x=overwrite_x)
| cupy/cupy | cupyx/scipy/fftpack/_fft.py | _fft.py | py | 19,687 | python | en | code | 7,341 | github-code | 36 | [
{
"api_name": "cupy.fft._fft._output_dtype",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "cupy.fft._fft._convert_fft_type",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "cupy.cuda",
"line_number": 124,
"usage_type": "attribute"
},
{
"api... |
70091360103 | from datetime import datetime
from persistent.list import PersistentList
from zope.annotation import IAnnotations
import logging
TWITTER_KEY = "noise.addon.twitter"
FACEBOOK_KEY = "noise.addon.facebook"
EMAIL_KEY = "noise.addon.email"
HARDCOPY_KEY = "noise.addon.hardcopy"
TWITTER_CSV_HEADERS = ["timestamp", "twitter-text", "tweet-text",
"firstname", "lastname", "email", "phone", "keepposted"]
FACEBOOK_CSV_HEADERS = ["timestamp"]
EMAIL_CSV_HEADERS = ["timestamp", "email-text", "email_body", "firstname",
"lastname", "email", "phone", "keepposted"]
HARDCOPY_CSV_HEADERS = ["timestamp", "hardcopy-text", "hardcopy_body",
"firstname", "lastname", "address", "zipcode", "city",
"phone", "keepposted"]
logger = logging.getLogger('noise.addon')
class NoiseRecord(object):
""" A Noise Record containing form data
"""
def __init__(self, timestamp, record):
self._timestamp = timestamp
self._record = str(record)
@property
def get_record(self):
return eval(self._record)
@property
def get_timestamp(self):
return self._timestamp
def setupAnnotations(context, key, reset=False):
annotations = IAnnotations(context)
if reset or (not key in annotations):
annotations[key] = PersistentList()
return annotations
def add_noise(context, key, record):
annotations = setupAnnotations(context, key)
annotations[key].append(
NoiseRecord(datetime.now().strftime("%d-%m-%Y %H:%M"), record)
)
def get_noise(context, key):
annotations = setupAnnotations(context, key)
data = []
if key in annotations:
data = annotations[key]
data = [d for d in data if isinstance(d, NoiseRecord)]
return data
def status(context, key):
annotations = IAnnotations(context)
return annotations.get(key, [])
| cleanclothes/vmd.noise | noise/addon/storage.py | storage.py | py | 1,916 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "zope.annotation.IAnnotations",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "persistent.list.PersistentList",
"line_number": 44,
"usage_type": "call"
},
{
"api... |
875221895 | #!/usr/bin/python
from foo import bar
import datetime
import json
import pathlib
import shutil
import sys
import urllib.request
date_13w39a = datetime.datetime(2013, 9, 26, 15, 11, 19, tzinfo = datetime.timezone.utc)
date_17w15a = datetime.datetime(2017, 4, 12, 9, 30, 50, tzinfo = datetime.timezone.utc)
date_1_17_pre1 = datetime.datetime(2021, 5, 27, 9, 39, 21, tzinfo = datetime.timezone.utc)
date_1_18_1_rc3 = datetime.datetime(2021, 12, 10, 3, 36, 38, tzinfo = datetime.timezone.utc)
def main():
if len(sys.argv) != 2:
print('Usage: ' + sys.argv[0] + ' <version>')
return
version = sys.argv[1]
print('Fetching Minecraft versions')
with urllib.request.urlopen('https://piston-meta.mojang.com/mc/game/version_manifest_v2.json') as f:
version_manifest = json.load(f)
version_url = None
for ver in version_manifest['versions']:
if ver['id'] == version:
version_url = ver['url']
break
if version_url is None:
print('No such version: ' + version)
return
try:
pathlib.Path(version).mkdir()
except FileExistsError:
print('Version already downloaded: ' + version)
return
with urllib.request.urlopen(version_url) as f:
version_json = json.load(f)
if 'server' not in version_json['downloads']:
print('There is no server for ' + version)
return
release_time = datetime.datetime.fromisoformat(version_json['releaseTime'])
server_url = version_json['downloads']['server']['url']
print('Downloading server for ' + version)
with urllib.request.urlopen(server_url) as fin, open(version + '/server.jar', 'wb') as fout:
shutil.copyfileobj(fin, fout)
print('Finishing up')
with open(version + '/eula.txt', 'w') as f:
f.write('eula=true\n')
with open(version + '/server.properties', 'w') as f:
f.write('enable-command-block=true\n')
f.write('max-players=1\n')
f.write('sync-chunk-writes=false\n')
try:
with open('ops.json') as fin, open(version + '/ops.json', 'w') as fout:
fout.write(fin.read())
except FileNotFoundError:
pass
run_command = 'java'
if date_13w39a <= release_time < date_1_17_pre1:
if release_time < date_17w15a:
log4j_fix_url = 'https://launcher.mojang.com/v1/objects/4bb89a97a66f350bc9f73b3ca8509632682aea2e/log4j2_17-111.xml'
log4j_fix_file = 'log4j2_17-111.xml'
else:
log4j_fix_url = 'https://launcher.mojang.com/v1/objects/02937d122c86ce73319ef9975b58896fc1b491d1/log4j2_112-116.xml'
log4j_fix_file = 'log4j2_112-116.xml'
with urllib.request.urlopen(log4j_fix_url) as fin, open(version + '/' + log4j_fix_file, 'wb') as fout:
shutil.copyfileobj(fin, fout)
run_command += ' -Dlog4j.configurationFile=' + log4j_fix_file
elif date_1_17_pre1 <= release_time < date_1_18_1_rc3:
run_command += ' -Dlog4j2.formatMsgNoLookups=true'
run_command += ' -jar server.jar nogui'
with open(version + '/run_server', 'w') as f:
f.write(run_command + '\n')
pathlib.Path(version + '/run_server').chmod(0o755)
if __name__ == '__main__':
main()
| JWaters02/Hacknotts-23 | testclient/test_code.py | test_code.py | py | 3,249 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "datetime.datetime",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "datetime.timezone",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "datetime.t... |
42243497350 | import numpy as np
import matplotlib.pyplot as plt
from hs_digitizer import *
import glob
import scipy.signal as ss
from scipy.optimize import curve_fit
import re
import matplotlib
#Ns = 500000
#Fs = 200000.
path = "/data/20181030/bead1/high_speed_digitizer/golden_data/amp_ramp_50k_good"
files = glob.glob(path + "/*.h5")
fi_init = 1e5
init_file = 0
final_file = len(files)
n_file = final_file-init_file
sfun = lambda fname: int(re.findall('\d+.h5', fname)[0][:-3])
files.sort(key = sfun)
bw = 2000.
bw_sb = 0.02
obj0 = hsDat(files[init_file])
t0 = obj0.attribs['time']
Ns = obj0.attribs['nsamp']
Fs = obj0.attribs['fsamp']
freqs = np.fft.rfftfreq(Ns, d = 1./Fs)
tarr0 = np.linspace(0, Ns/Fs, Ns)
def line(x, m, b):
return m*x + b
def dec2(arr, fac):
return ss.decimate(ss.decimate(arr, fac), fac)
def sqrt_fun(x, a):
return a*np.sqrt(x)
fc = fi_init
plot_dat = True
matplotlib.rcParams.update({'font.size':12})
f, ax = plt.subplots(dpi = 200)
files = np.array(files)
inds = [0, 100, 200, 300, 400, 499]
files = files[inds]
labels = ["62.5kV/m", "50.0kV/m", "37.5kV/m", "25.0kV/m", "12.5kV/m", "0.0kV/m"]
files = list(files)
p_bool = np.abs(freqs-fc)<bw
freqs /= 1000
fc/=1000
bw/=1000
for i, f in enumerate(files):
print(i)
try:
obj = hsDat(f)
fft = np.fft.rfft(obj.dat[:, 0])
if plot_dat:
ax.plot(freqs, np.abs(fft), label = labels[i])
except:
print("bad file")
ax.set_yscale("log")
ax.set_xlim([fc-bw/2., fc+bw/2.])
plt.xlabel("Frequency[kHz]")
plt.ylabel("Optical Power [arb]")
plt.legend()
plt.tight_layout()
plt.show()
| charlesblakemore/opt_lev_analysis | scripts/spinning/old_scripts/ampt_ramp_spectra_plot.py | ampt_ramp_spectra_plot.py | py | 1,607 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "glob.glob",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.fft.rfftfreq",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.fft",
"line_number"... |
38400918265 | # This is a demo of running face recognition on a Raspberry Pi.
# This program will print out the names of anyone it recognizes to the console.
# To run this, you need a Raspberry Pi 2 (or greater) with face_recognition and
# the picamera[array] module installed.
# You can follow this installation instructions to get your RPi set up:
# https://gist.github.com/ageitgey/1ac8dbe8572f3f533df6269dab35df65
import face_recognition
import picamera
import numpy as np
import os
import shutil
from datetime import datetime
# Get a reference to the Raspberry Pi camera.
# If this fails, make sure you have a camera connected to the RPi and that you
# enabled your camera in raspi-config and rebooted first.
camera = picamera.PiCamera()
camera.resolution = (320, 240)
output = np.empty((240, 320, 3), dtype=np.uint8)
# Load a sample picture and learn how to recognize it.
print("Loading known face image(s)")
# Initialize some variables
face_locations = []
face_encodings = []
encoding_array = []
name_array = []
# Directory of training images
directory = "./training_images"
source = './training_images'
destination = './recognized_faces'
files = os.listdir(source)
def main():
def open_files(directory):
if len(os.listdir(directory)) == 0:
print("Directory is empty")
encoding_array = open("face_embeddings.txt", "r").read()
name_array = open("./person_names.txt", "a").read()
else:
print("Directory is not empty")
faces = open("./face_embeddings.txt", "a")
saved_names = open("./person_names.txt", "a")
for filename in os.listdir(directory):
print(filename)
if filename.endswith(".jpg"):
image_data = face_recognition.load_image_file(directory + '/' + filename)
temp_face_encoding = face_recognition.face_encodings(image_data)[0]
encoding_array.append(temp_face_encoding)
name_array.append(filename)
faces.write(encoding_array)
saved_names.write(name_array)
for f in files:
shutil.move(source+f, destination)
# print(os.path.join(directory, filename))
def add_person():
now = datetime.now()
local_time = now.strftime("%I-%M-%S_%Y-%d-%B")
camera.capture(directory+'/'+local_time+'.jpg', format="rgb")
print('New person added')
open_files(directory)
while True:
print("Capturing image.")
# Grab a single frame of video from the RPi camera as a numpy array
camera.capture(output, format="rgb")
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(output)
print("Found {} faces in image.".format(len(face_locations)))
face_encodings = face_recognition.face_encodings(output, face_locations)
match = []
person_name = ''
# Loop over each face found in the frame to see if it's someone we know.
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
match = face_recognition.compare_faces(encoding_array, face_encoding)
name = "<Unknown Person>"
print(match)
for validation in range(len(match)):
if match[validation]:
name = name_array[validation]
person_name = name.split('.')[0]
print("I see someone named {}!".format(person_name))
if __name__ == '__main__':
main() | minakhan01/LanguageLearning | PrototypingFiles/Python Vision Files/raspi_facerec.py | raspi_facerec.py | py | 3,217 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "picamera.PiCamera",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line... |
71249021545 | import json
from math import sqrt
# Returns a distance-based similarity score for person1 and person2
def sim_distance(prefs, person1, person2):
# Get the list of shared_items
si = {}
for item in prefs[person1]:
if item in prefs[person2]: si[item] = 1
# if they have no ratings in common, return 0
if len(si) == 0: return 0
# Add up the squares of all the differences
sum_of_squares = sum([pow(prefs[person1][item] - prefs[person2][item], 2)
for item in prefs[person1] if item in prefs[person2]])
return 1 / (1 + sum_of_squares)
# Returns the Pearson correlation coefficient for p1 and p2
def sim_pearson(prefs, p1, p2):
# Get the list of mutually rated items
si = {}
for item in prefs[p1]:
if item in prefs[p2]:
si[item] = 1
# if they are no ratings in common, return 0
if len(si) == 0:
return 0
# Sum calculations
n = len(si)
# Sums of all the preferences
sum1 = sum([prefs[p1][it] for it in si])
sum2 = sum([prefs[p2][it] for it in si])
# Sums of the squares
sum1Sq = sum([pow(prefs[p1][it], 2) for it in si])
sum2Sq = sum([pow(prefs[p2][it], 2) for it in si])
# Sum of the products
pSum = sum([prefs[p1][it] * prefs[p2][it] for it in si])
# Calculate r (Pearson score)
num = pSum - (sum1 * sum2 / n)
den = sqrt((sum1Sq - pow(sum1, 2) / n) * (sum2Sq - pow(sum2, 2) / n))
if den == 0:
return 0
r = num / den
return r
# Returns the best matches for person from the prefs dictionary.
# Number of results and similarity function are optional params.
def top_matches(prefs, person, n=5, similarity=sim_pearson):
scores = [(similarity(prefs, person, other), other)
for other in prefs if other != person]
scores.sort()
scores.reverse()
return scores[0:n]
def calculate_similar_items(prefs, n=10):
# Create a dictionary of items showing which other items they
# are most similar to.
result = {}
# Invert the preference matrix to be item-centric
c = 0
for item in prefs:
# Status updates for large datasets
c += 1
if c % 100 == 0: print("%d / %d" % (c, len(prefs)))
# Find the most similar items to this one
scores = top_matches(prefs, item, n=n, similarity=sim_distance)
result[item] = scores
return result
def get_recommended_items(prefs, item_match, user):
userRatings = prefs[user]
scores = {}
totalSim = {}
# Loop over items rated by this user
for (item, rating) in userRatings.items():
try:
# Loop over items similar to this one
for (similarity, item2) in item_match[item]:
# Ignore if this user has already rated this item
if item2 in userRatings: continue
# Weighted sum of rating times similarity
scores.setdefault(item2, 0)
scores[item2] += similarity * rating
# Sum of all the similarities
totalSim.setdefault(item2, 0)
totalSim[item2] += similarity
except KeyError:
print("Missing Key %s" % (item))
# Divide each total score by total weighting to get an average
# TODO avoid double lookups
rankings = [(score / totalSim[item], item) for item, score in scores.items() if totalSim[item] != 0]
# Return the rankings from highest to lowest
rankings.sort()
rankings.reverse()
return rankings
user_dict = {}
business_dict = {}
with open('/home/vicky/Documents/it/notes/AI/UW/Project/data/review.json') as f:
for line in f:
line = json.loads(line)
user = str(line['user_id'])
business = str(line['business_id'])
rate = line['stars']
if business not in business_dict:
business_dict[business] = {}
business_dict[business][user] = rate
if user not in user_dict:
user_dict[user] = {}
user_dict[user][business] = rate
# for key, value in user_dict.items():
# print("Key : %s, Value: %s"% (key,value))
# for key, values in items_similar.items():
# for i in range(len(values)):
# if values[i][0] > 0.5:
# print("Key : %s, Value : %s"% (values[i][0], values[i][1]))
# for j in range(len(values[i])):
# print(values[i][j])
# bus_6nnI3DfHn-DTd6tWnZu7Jg
users_similar = calculate_similar_items(user_dict)
print(get_recommended_items(business_dict, users_similar, 'bus_F1tOtPzcsQk8PqNOatVsCg'))
# usr_zsZBYWYEmLLs81_f-HHM8w
# buss_similar = calculate_similar_items(business_dict)
# print(get_recommended_items(user_dict, buss_similar, 'usr_zsZBYWYEmLLs81_f-HHM8w'))
| brokencranium/recommender | ItemBasedFiltering.py | ItemBasedFiltering.py | py | 4,740 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "math.sqrt",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 119,
"usage_type": "call"
}
] |
27517754092 | import torch
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable
import geojson
import json
import time
def chip_image1(img, chip_size=(300, 300)):
"""
Segment an image into NxWxH chips
Args:
img : Array of image to be chipped
chip_size : A list of (width,height) dimensions for chips
Outputs:
An ndarray of shape (N,W,H,3) where N is the number of chips,
W is the width per chip, and H is the height per chip.
"""
width, height, _ = img.shape
wn, hn = chip_size
images = np.zeros((int(width / wn) * int(height / hn), wn, hn, 3))
k = 0
for i in tqdm(range(int(width / wn))):
for j in range(int(height / hn)):
chip = img[wn * i:wn * (i + 1), hn * j:hn * (j + 1), :3]
images[k] = chip
k = k + 1
return images.astype(np.uint8)
with open(fname) as f:
data = json.load(f)
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(in_channels, conv_size, kernel_size=kernel_size, padding=2),
nn.BatchNorm2d(conv_size),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer2 = nn.Sequential(
nn.Conv2d(conv_size, conv_size*2, kernel_size=kernel_size, padding=2),
nn.BatchNorm2d(conv_size*2),
nn.ReLU(),
nn.MaxPool2d(2))
self.fc = nn.Linear(conv_size * in_channels * (conv_size*2), num_classes)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
# -----------------------------------------------------------------------------------
cnn = CNN()
cnn.cuda()
random.seed(0)
np.random.seed(0)
torch.manual_seed(0)
#if cuda:
torch.cuda.manual_seed(0)
torch.cuda.manual_seed_all(0)
torch.backends.cudnn.benchmark = True
# -----------------------------------------------------------------------------------
# Loss and Optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(cnn.parameters(), lr=learning_rate)
# -----------------------------------------------------------------------------------
# Train the Model
for epoch in range(num_epochs):
for images, labels in train_loader:
np.shape(images)
np.shape(lables)
images = torchvision.transforms.functional.to_tensor(images)
np.shape(images)
images = Variable(images).cuda()
labels = Variable(labels).cuda()
# Forward + Backward + Optimize
optimizer.zero_grad()
outputs = cnn(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
if (i + 1) % 100 == 0:
print('Epoch [%d/%d], Iter [%d/%d] Loss: %.4f'
% (epoch + 1, num_epochs, i + 1, len(train_dataset) // batch_size, loss.item()))
# -----------------------------------------------------------------------------------
# Test the Model
cnn.eval() # Change model to 'eval' mode (BN uses moving mean/var).
correct = 0
total = 0
for images, labels in test_loader:
images = Variable(images).cuda()
outputs = cnn(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted.cpu() == labels).sum()
# -----------------------------------------------------------------------------------
print('Test Accuracy of the model on the 10000 test images: %d %%' % (100 * correct / total))
# -----------------------------------------------------------------------------------
# Save the Trained Model
torch.save(cnn.state_dict(), 'cnn.pkl') | catsbergers/Final-Project-Group-2 | jiarong-che-final-project/Code/mywork.py | mywork.py | py | 3,764 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"li... |
72655514343 | import copy
import json
import os
import datetime
from json import dumps
import logging
import uuid
import tweepy
from flask import Flask, render_template, url_for, request, send_from_directory
from flask_pymongo import PyMongo
import folium
from geopy.exc import GeocoderTimedOut
from geopy.geocoders import Nominatim
import pymongo
from flask import Markup
from bson.objectid import ObjectId
from werkzeug.utils import redirect
from dotenv import load_dotenv
from dendritic_cell_algorithm.signal_generator import Signals, remove_urls, remove_user_mentions
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from python_kafka.SignalGenerator import startSignalGenerator
from python_kafka.TweetsLoader import startTweetsLoader
from python_kafka.TweetsLoaderWithParameters import startTweetsLoaderWithParameters
from python_kafka.BotDetector import startBotDetector
import multiprocessing
from confluent_kafka import Producer
load_dotenv()
logging.getLogger().setLevel(logging.INFO)
app = Flask(__name__, template_folder='frontend')
app.static_folder = 'frontend/static'
if int(os.environ['USE_DATABASE_SERVICE']):
print("use db service")
client = pymongo.MongoClient(os.environ['DATABASE_SERVICE'], int(os.environ['DATABASE_PORT']),
username=os.environ['DATABASE_USERNAME'],
password=os.environ['DATABASE_PASSWORD'])
else:
print("don't use db service")
client = pymongo.MongoClient(os.environ['DATABASE_URL'])
try:
db = client["TwitterData"]
col = db["Users1"]
except AttributeError as error:
print(error)
@app.route(os.environ['MS_SG_URL_PATH'] + "generate-signals", methods=['post', 'get'])
def generate_signals():
if request.method == 'POST':
producer_servers = request.form.get("producer_servers")
producer_topic = request.form.get("producer_topic")
consumer_servers = request.form.get("consumer_servers")
consumer_group_id = request.form.get("consumer_group_id")
consumer_offset = request.form.get("consumer_offset")
consumer_topic = request.form.get("consumer_topic")
consumer_key = request.form.get("consumer_key")
consumer_secret = request.form.get("consumer_secret")
access_token = request.form.get("access_token")
access_token_secret = request.form.get("access_token_secret")
bearer = request.form.get("bearer")
use_bearer = int(os.environ['USE_BEARER'])
if bearer is None:
use_bearer = False
if use_bearer:
print("use_bearer")
p2 = multiprocessing.Process(name='p2', target=startSignalGenerator, args=(
consumer_servers, consumer_group_id, consumer_offset, consumer_topic, producer_servers,
producer_topic,
None, None, None, None, bearer,))
else:
print("don't use_bearer")
p2 = multiprocessing.Process(name='p2', target=startSignalGenerator, args=(
consumer_servers, consumer_group_id, consumer_offset, consumer_topic, producer_servers,
producer_topic,
consumer_key, consumer_secret, access_token, access_token_secret, None,))
p2.start()
return "OK"
@app.route(os.environ['MS_SG_URL_PATH'] + "use-new-env-vars", methods=['post', 'get'])
def use_new_env_vars():
if request.method == 'POST':
col1 = db["ApplicationStatus"]
main_parameters = col1.find_one({"name": "MainValues"})
dca_coefficients = col1.find_one(
{"name": "DCACoefficients", "version": main_parameters["coefficients_collection_id"]})
for attr in list(dca_coefficients["coefficients"].keys()):
os.environ[attr] = str(dca_coefficients["coefficients"][attr])
return "SignalGenerator: Ok, DCACoefficients version " + main_parameters["coefficients_collection_id"]
else:
return 404
if __name__ == "__main__":
# app.run()
app.run(host='0.0.0.0')
| rwth-acis/bot-detector | web_application/ms_signal_generator.py | ms_signal_generator.py | py | 4,034 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "flask.Flask",
... |
25872641550 | __author__ = "Domenico Solazzo"
__version__ = "0.1"
RESPONSE_CODES = {
200: "OK: Success",
202: "Accepted: The request was accepted and the user was queued for processing",
401: "Not Authorized: either you need to provide authentication credentials, or the credentials provided aren't valid.",
403: "Bad Request: Your request is invalid and we'll return and error message that tells you why. This is the status code if you have exceeded the rate limit.",
404: "Not Found: either you are requesting an invalid URI or the resource in question doesn't exist.",
500: "Internal Server Error: we did something wrong.",
502: "Bad Gateway: returned if Klout is down or being upgraded.",
503: "Service Unavailable: the Klout servers are up, but are overloaded with requests. Try again later."
}
class KloutError( Exception ):
def __init__(self, code=0, msg=''):
super(KloutError, self).__init__()
self.code = code
self.msg = msg
def __str__(self):
return repr(self)
def __repr__(self):
return "%i: %s" % (self.code, self.msg)
class Klout( object ):
def __init__(self, key, serviceType="service"):
self._apiKey = key
self.__service = self.__getProxyFactory(serviceType)
def __getProxyFactory(self, serviceType):
service = None
if serviceType == "test":
service = TestKloutService(serviceType)
else:
service = KloutService(self._apiKey)
self.__service = service
return self.__service
def score(self, users):
"""
Retrieve a Klout score
@param: users - List of usernames
@return: A list of tuples in the form (username, klout_score)
"""
if not users:
raise KloutError(0, "No Users")
if not isinstance(users, (list, tuple)):
raise KloutError(0, "Wrong input.")
users = ",".join(users)
query = {"users": users}
result = self.__service.makeCall("score", query)
return result
def show(self, users):
"""
Retrieve a user object
@param: users - List of usernames
@return: A dictionary with the returned data
"""
if not users:
raise KloutError(0, "No Users.")
if not isinstance(users, (list, tuple)):
raise KloutError(0, "Wrong input.")
users = ",".join(users)
query = {"users":users}
result = self.__service.makeCall("user", query)
return result
def topics(self, users):
"""
Returns the top 3 topics objects
@param: users - A list of usernames
@return: A list of dicts in the form [{username:['topic1, topic2, topic3]..}
"""
if not users:
raise KloutError(0, "No Users")
if not isinstance(users, (list, tuple)):
raise KloutError(0, "Wrong Input.")
users = ",".join(users)
query = {"users":users}
result = self.__service.makeCall("topics", query)
return result
def influencerOf(self, users):
"""
Returns up to 5 user score pairs for user that are influencer for the given user
@param: users - A list of usernames
@return: A list of dicts in the form [{username:[(username, score),..}
"""
if not users:
raise KloutError(0, "No Users")
if not isinstance(users, (list, tuple)):
raise KloutError(0, "Wrong Input.")
users = ",".join(users)
query = {"users":users}
result = self.__service.makeCall("influencerOf", query)
return result
def influencedBy(self, users):
"""
Returns up to 5 user score pairs for user that are influenced by the given user
@param: users - A list of usernames
@return: A list of dicts in the form [{username:[(username, score),..}
"""
if not users:
raise KloutError(0, "No Users")
if not isinstance(users, (list, tuple)):
raise KloutError(0, "Wrong Input.")
users = ",".join(users)
query = {"users":users}
result = self.__service.makeCall("influencedBy", query)
return result
class KloutService(object):
def __init__(self, apiKey):
self.apiKey = apiKey
self.VERSION_API = "/1/"
self.API_URL = "api.klout.com"
def getCallUrl(self, callName):
servicePath = ""
if callName == "score":
servicePath = "klout.json"
elif callName == "user":
servicePath = "users/show.json"
elif callName == "topics":
servicePath = "users/topics.json"
elif callName == "influencedBy":
servicePath = "soi/influenced_by.json"
elif callName == "influencerOf":
servicePath = "soi/influencer_of.json"
else:
raise Exception("Url not available")
return self.VERSION_API + servicePath
def _remove_empty_params(self, query):
if not isinstance(query, type({})):
raise Exception("Wrong query in input")
returnedQuery = {}
for key in query:
if not query[key] == None:
returnedQuery[key] = query[key]
return returnedQuery
def makeCall(self, callName, query):
import urllib, httplib, json
url = self.getCallUrl(callName)
query = self._remove_empty_params(query)
if 'key' not in query:
query["key"] = self.apiKey
queryStr = urllib.urlencode(query)
if len(query) > 0:
if url.find("?") == -1:
url = url + "?" + queryStr
else:
url = url + "&" + queryStr
try:
conn = httplib.HTTPConnection(self.API_URL)
conn.request('GET', url)
response = conn.getresponse()
data = response.read()
data = json.loads(data)
except httplib.HTTPException as err:
msg = err.read() or RESPONSE_CODES.get(err.code, err.message)
raise KloutError(err.code, msg)
except ValueError:
msg = "Invalid data: %s" % data
raise KloutError(0, msg)
return data
class TestKloutService(KloutService):
def makeCall(self, callName, query):
if callName == "score":
return {"users":[{"twitter_screen_name":"user1","kscore":23.02}]}
elif callName == "user":
return {"users":[{
"twitter_id": "111111",
"twitter_screen_name":"name",
"score":{
"kscore":10,
"slope":1,
"description":"description",
"kclass_id":1,
"kclass":"Socializer",
"kclass_description":"kclass description",
"network_score":22,
"amplification_score":18,
"true_reach": 10,
"delta_1day": 0.2,
"delta_5day": 0.4
}
}]}
elif callName == "topics":
return {"users":[{"twitter_screen_name":"user1", "topics":["python"]}]}
elif callName == "influencedBy":
return {"users":[
{
"twitter_screen_name":"user1",
"influencers":[{"twitter_screen_name":"user2",
"kscore":10.00
}]
}
]
}
elif callName == "influencerOf":
return {"users":[
{
"twitter_screen_name":"user1",
"influencers":[{"twitter_screen_name":"user2",
"kscore":10.00
}]
}
]
}
elif callName == "history":
return {'dates':[], 'klout_score':[], 'amplification':[],
'retweets':[], 'mentions':[],'network':[],
'followers_following':[], 'followers_count':[], 'mentioners':[],
'retweeters':[],'true_reach':[],'in_out':[]
}
| domenicosolazzo/PythonKlout | pythonklout.py | pythonklout.py | py | 8,616 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "urllib.urlencode",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "httplib.HTTPConnection",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "httplib.HTTPExc... |
42867652572 | from utils import read_input
def age_and_spawn_the_fish(fishes):
baby_age = 8
spawns = determine_num_spawns(fishes)
for i, fish in enumerate(fishes):
fishes[i] = calc_next_age(fish)
for i in range(0, spawns):
fishes.append(baby_age)
return fishes
def calc_next_age(fish):
spawn_time = 6
if fish > 0:
return fish - 1
else:
return spawn_time
def determine_num_spawns(fishes):
return len([i for i in fishes if i == 0])
def p2_spawn_and_age_the_fish(fishes_dict):
new_counts = {}
new_counts[0] = fishes_dict[1]
new_counts[1] = fishes_dict[2]
new_counts[2] = fishes_dict[3]
new_counts[3] = fishes_dict[4]
new_counts[4] = fishes_dict[5]
new_counts[5] = fishes_dict[6]
new_counts[6] = fishes_dict[7] + fishes_dict[0]
new_counts[7] = fishes_dict[8]
new_counts[8] = fishes_dict[0]
return new_counts
if __name__ == "__main__":
fishes = [int(i) for i in read_input("day6_input.txt")[0].split(",")]
days = 256
# PART 1
# for i in range(0, days):
# print(f"DAY {i}")
# fishes = age_and_spawn_the_fish(fishes)
#
# print(f"FINAL: {len(fishes)}")
# PART 2
fishes_dict = {}
for i in range(0, 9):
fishes_dict[i] = len([f for f in fishes if f == i])
for i in range(0, days):
fishes_dict = p2_spawn_and_age_the_fish(fishes_dict)
print(f"PART 2: {sum([fishes_dict[i] for i in fishes_dict])}")
| tthompson691/AdventOfCode | src/2021/Day6/day6_solution.py | day6_solution.py | py | 1,479 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "utils.read_input",
"line_number": 46,
"usage_type": "call"
}
] |
18287559618 | from urllib.request import urlopen
from bs4 import BeautifulSoup
url = input('Enter URL:')
count = int(input('Enter count:'))
position = int(input('Enter position:'))-1
html = urlopen(url).read()
soup = BeautifulSoup(html,"html.parser")
href = soup('a')
#print href
for i in range(count):
link = href[position].get('href', None)
print (href[position].contents[0])
html = urlopen(link).read()
soup = BeautifulSoup(html,"html.parser")
href = soup('a') | Abhishek32971/python_my_code | college/ActivitySet01/problem16.py | problem16.py | py | 473 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "urllib.request.urlopen",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "bs4.Be... |
7862870875 | #!/usr/bin/env python3
import sys
import re
import glob
import prettytable
import pandas as pd
import argparse
import os
def readFile(filename):
fileContents = list()
with open(filename, "r") as f:
for line in f:
line = line.strip()
fileContents.append(line)
return fileContents
def getStatusLine(fileContents):
totallines = len(fileContents)
statusLine = -1
for i in range(totallines):
result = re.match(r"Resource", fileContents[i])
if result:
statusLine = i - 2
return(statusLine)
def splitStrip(i):
temp = i.split(sep=":")[1]
temp = temp.strip()
return temp
def getJobDetails(fileContents, lineNumber):
lines = fileContents[lineNumber:]
status = lines[0]
for i in lines:
if re.match(r"CPU", i):
cpu_time = splitStrip(i)
if re.match(r"Max Memory", i):
max_mem = splitStrip(i)
if re.match(r"Total Requested Memory", i):
total_mem = splitStrip(i)
if re.match(r"Max Processes", i):
max_proc = splitStrip(i)
if re.match(r"Max Threads", i):
max_threads = splitStrip(i)
if re.match(r"Run time", i):
run_time = splitStrip(i)
x = {'cpu_time':cpu_time,
'status': status,
'max_mem': max_mem,
'total_mem': total_mem,
'max_proc': max_proc,
'max_threads': max_threads,
'run_time': run_time
}
return(x)
def getStartEnd(fileContents):
for i in fileContents:
if re.match(r"Started at", i):
start = i.replace("Started at", "")
if re.match(r"Terminated at", i):
end = i.replace("Terminated at", "")
x = {"start": start, "end":end}
return(x)
def pullOutJobData(fileName):
#print(f"Pulling out job data from ... {fileName}", file = sys.stderr)
fileContents = readFile(fileName) # read file as a list
lineNumber = getStatusLine(fileContents) # get status line
if lineNumber == -1:
job_details = {"status": "running"}
return(job_details)
job_status = fileContents[lineNumber]
job_start_end = getStartEnd(fileContents)
job_details = getJobDetails(fileContents, lineNumber)
if not re.match(r"Successfully completed", job_status):
jminus1=fileContents[lineNumber - 1]
job_status = job_status + " - " + jminus1
job_details.update(job_start_end)
job_details.update({"status": job_status})
return(job_details)
class job:
counter = 0
def __init__(self, fileName):
self.fileName = fileName
temp = pullOutJobData(fileName)
self.status = temp['status']
if self.status == "running":
return
self.cpu_time = temp['cpu_time']
self.max_mem = temp['max_mem']
self.total_mem = temp['total_mem']
self.max_proc = temp['max_proc']
self.run_time = temp['run_time']
self.start = temp['start']
self.end = temp['end']
job.counter += 1
def details(self):
job_details = self.__dict__.items()
if self.counter == 1:
for k,v in job_details:
print("%s" % k, end = "\t")
print()
for k,v in job_details:
print("%s" % v, end = "\t")
print()
def forTable(self, onlyHeader = False):
job_details = self.__dict__.items()
x = list()
if onlyHeader:
for k,v in job_details:
x.append(k)
if not onlyHeader:
for k,v in job_details:
x.append(v)
return(x)
#print(f"{self.fileName}\t{self.status}\t{self.start}\t{self.end}\t{self.cpu_time}\t{self.max_mem}\t{self.total_mem}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--progArgs", default = "pretty", help="output type: [pretty, csv]")
parser.add_argument("--comments", default = "", help="filter for names")
args = parser.parse_args()
if not os.path.isdir(".bsub/"):
print("No farm job log found. See if .bsub exists", file = sys.stderr)
exit(0)
# Search files
search = ".bsub/*" + args.comments + "*.farm"
files = glob.glob(search)
c = 0
lof = list()
for f in files:
j = job(f)
if j.status == "running":
continue
if c == 0:
colnames = j.forTable(onlyHeader = True)
table = prettytable.PrettyTable(colnames)
lof.append(colnames)
l = j.forTable(onlyHeader = False)
table.add_row(l)
lof.append(l)
c += 1
if args.progArgs == "pretty":
print(table)
if args.progArgs == "csv":
df = pd.DataFrame(lof)
print(df.to_csv(index=False, header = False))
| vjbaskar/cscipipe | farm/farmhist.py | farmhist.py | py | 4,130 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.match",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 41,
"usa... |
21366953261 | '''
Link: https://www.lintcode.com/problem/shortest-path-in-undirected-graph/description
'''
# Uses bidirectional BFS. I closesly followed the teachings on Jiuzhang.com.
from collections import deque
class Solution:
"""
@param graph: a list of Undirected graph node
@param A: nodeA
@param B: nodeB
@return: the length of the shortest path
"""
def shortestPath(self, graph, A, B):
# Write your code here
length = 0
if A == B:
return length
queue_a, queue_b = deque([A]), deque([B])
a_visited, b_visited = set([A]), set([B])
while len(queue_a) and len(queue_b):
size_queue_a, size_queue_b = len(queue_a), len(queue_b)
if size_queue_a > 0:
length += 1
for _ in range(size_queue_a):
node = queue_a.popleft()
for neib in node.neighbors:
if neib in a_visited:
continue
if neib in b_visited:
return length
queue_a.append(neib)
a_visited.add(neib)
if size_queue_b > 0:
length += 1
for _ in range(size_queue_b):
node = queue_b.popleft()
for neib in node.neighbors:
if neib in b_visited:
continue
if neib in a_visited:
return length
queue_b.append(neib)
b_visited.add(neib)
return -1
| simonfqy/SimonfqyGitHub | lintcode/medium/814_shortest_path_in_undirected_graph.py | 814_shortest_path_in_undirected_graph.py | py | 1,593 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 19,
"usage_type": "call"
}
] |
18050976874 | from django.urls import path
from .views import RegistrationView, CustomLoginView, CustomLogoutView, ProfileView, UserProfileUpdateView, UserEducationalUpdateView
urlpatterns = [
path('register/', RegistrationView.as_view(), name='register'),
path('login/', CustomLoginView.as_view(), name='login'),
path('logout/', CustomLogoutView.as_view(), name='logout'),
path('profile/', ProfileView.as_view(), name='profile'),
path('profile-update/', UserProfileUpdateView.as_view(), name='profile_update'),
path('educational-update/', UserEducationalUpdateView.as_view(), name='educational_update'),
]
| Kamal123-cyber/skillshare | skillshare/skillapp/urls.py | urls.py | py | 618 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "views.RegistrationView.as_view",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "views.RegistrationView",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "d... |
72237218663 | """Form definitions."""
from braces.forms import UserKwargModelFormMixin
from crispy_forms.helper import FormHelper, Layout
from crispy_forms.layout import Fieldset, Submit
from django import forms
from django.utils.translation import gettext_lazy as _
from .models import Sheet
class SheetForm(UserKwargModelFormMixin, forms.ModelForm):
"""ModelForm for the Sheet model."""
class Meta: # noqa: D101
model = Sheet
fields = ['exercises']
def __init__(self, *args, **kwargs):
"""Add crispy-forms helper and layout to form."""
super(SheetForm, self).__init__(*args, **kwargs)
# add Crispy Forms foo
self.helper = FormHelper()
self.helper.form_id = 'id-SheetForm'
self.helper.add_input(Submit('continue', 'Save & continue editing'))
self.helper.add_input(Submit('submit', 'Save'))
self.helper.layout = Layout(
Fieldset(
_('sheet form'),
'exercises',
),
)
| FlowFX/unkenmathe.de | src/um/sheets/forms.py | forms.py | py | 1,017 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "braces.forms.UserKwargModelFormMixin",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.forms.ModelForm",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 13,
"usage_type": "name"
},
{
"api... |
69960188586 | from django.contrib.auth.models import AbstractUser, Group
from django.db import models
class User(AbstractUser):
CREATOR = 'CREATOR'
SUBSCRIBER = 'SUBSCRIBER'
ROLE_CHOICES = (
(CREATOR, 'Créateur'),
(SUBSCRIBER, 'Abonné'),
)
profile_photo = models.ImageField(verbose_name='Photo de profil')
role = models.CharField(max_length=30, choices=ROLE_CHOICES, verbose_name='Rôle')
follows = models.ManyToManyField(
'self', # Model en relation: les utilisateurs suivent d'autres utilisateurs. donc le même model
limit_choices_to={'role': CREATOR}, # On ne peut suivre que les créateurs
symmetrical=False, # True si on suit un utilisateur amis.
verbose_name='suit',
)
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
if self.role == self.CREATOR:
group = Group.objects.get(name='creators')
group.user_set.add(self)
elif self.role == self.SUBSCRIBER:
group = Group.objects.get(name='subscribers')
group.user_set.add(self) | TonyQuedeville/fotoblog | authentication/models.py | models.py | py | 1,089 | python | fr | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.auth.models.AbstractUser",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "django.db.models.ImageField",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 11,
"usage_type": "name"
},
{
... |
6239431595 | from datetime import datetime
import json
from odd_utils import *
VERSION = "1.0"
def shallow_copy(data) -> dict:
if type(data) is list:
return traverse(data)
elif(type(data) is str):
with open(data, "r") as f:
return shallow_copy(json.load(f))
else:
return traverse(data)
def traverse(data) -> dict:
fields = dict()
if(type(data) in odd_primitives):
return odd_primitives[type(data)]
elif(data is None or len(data) < 1):
raise Exception("Data provided is either invalid or empty")
elif(type(data) is list and len(data) > 0):
temp_list = list()
temp_list.append(traverse(data[0]))
return temp_list
elif(type(data) is dict and len(data) > 0):
for key, value in data.items():
d_type = type(value)
if(d_type in odd_primitives):
fields[key] = odd_primitives[d_type]
elif(d_type is dict and len(value) > 0):
fields[key] = traverse(value)
elif(d_type is list and len(value) > 0):
temp_list = list()
temp_list.append(traverse(value[0]))
fields[key] = temp_list
else:
fields[key] = odd.EMPTY.value
else:
fields[key] = odd.EMPTY.value
return fields
| SamuelMiddendorp/OpenDataDocumentor | odd_library.py | odd_library.py | py | 1,332 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 13,
"usage_type": "call"
}
] |
21365527624 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from dptb.nnet.mlp import MLP
from dptb.utils.tools import _get_activation_fn
from typing import Optional, Any, Union, Callable
class ResBlock(nn.Module):
def __init__(self, n_in, n_hidden, n_out, activation: Union[str, Callable[[Tensor], Tensor]] = F.relu, if_batch_normalized=False, device='cpu', dtype=torch.float32):
super(ResBlock, self).__init__()
self.layer = MLP(n_in, n_hidden, n_out, if_batch_normalized=if_batch_normalized, device=device, dtype=dtype, activation=activation)
self.n_out = n_out
self.n_in = n_in
if isinstance(activation, str):
self.activation = _get_activation_fn(activation)
else:
self.activation = activation
def __setstate__(self, state):
pass
# super(ResBlock, self).__setstate__(state)
def forward(self, x):
out = self.layer(x)
if self.n_in < self.n_out:
out = nn.functional.interpolate(x.unsqueeze(1), size=[self.n_out]).squeeze(1) + out
elif self.n_in == self.n_out:
out = x + out
else:
out = nn.functional.adaptive_avg_pool1d(input=x, output_size=self.n_out) + out
out = self.activation(out)
return out
class ResNet(nn.Module):
def __init__(self, config, activation, if_batch_normalized=False, device='cpu', dtype=torch.float32):
super(ResNet, self).__init__()
self.layers = nn.ModuleList([])
for kk in range(len(config)-1):
self.layers.append(ResBlock(**config[kk], if_batch_normalized=if_batch_normalized, activation=activation, device=device, dtype=dtype))
if isinstance(activation, str):
self.activation = _get_activation_fn(activation)
else:
self.activation = activation
if config[-1].get('n_hidden') is None:
self.out_layer = nn.Linear(in_features=config[-1]['n_in'], out_features=config[-1]['n_out'], device=device, dtype=dtype)
# nn.init.normal_(self.out_layer.weight, mean=0, std=1e-3)
# nn.init.normal_(self.out_layer.bias, mean=0, std=1e-3)
else:
self.out_layer = MLP(**config[-1], if_batch_normalized=False, activation=activation, device=device, dtype=dtype)
def forward(self, x):
for layer in self.layers:
x = layer(x)
x = self.activation(x)
return self.out_layer(x)
if __name__ == '__main__':
config = [
{'n_in': 3, 'n_hidden': 4, 'n_out': 8},
{'n_in': 8, 'n_hidden': 6, 'n_out': 4}
]
net = ResNet(config, activation='relu', if_batch_normalized=True)
a = torch.randn(100, 3)
print(net(a).size()) | deepmodeling/DeePTB | dptb/nnet/resnet.py | resnet.py | py | 2,761 | python | en | code | 21 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"lin... |
15290025439 | # -*- coding: utf-8 -*-
from threading import Thread, Event
from yasc.utils import CONFIG, state, ZoneAction, in_production, ControllerMode
from datetime import datetime, timedelta
from time import sleep
import logging
# RPi imports not working
if in_production():
from yasc.pi_controller import get_active_zone, activate_zone, stop_sprinkler
else:
__dev_zone = 0
def activate_zone(zone):
logging.debug('Activation zone {0}.'.format(zone))
global __dev_zone
__dev_zone = zone
state.zone_on(zone)
def get_active_zone():
return __dev_zone
def stop_sprinkler():
logging.debug('Stopping sprinkler zone')
global __dev_zone
if __dev_zone > 0:
logging.debug('Stopping zone {0}.'.format(__dev_zone))
state.zone_off(__dev_zone)
__dev_zone = 0
# FIXME: use thread pool
class ManualRunner(Thread):
def __init__(self, zone, interval):
Thread.__init__(self, name='Zone Run')
self.__interval = interval
self.__zone = zone
self.__stop = Event()
def stop(self):
logging.info('Stop manual run for zone {0}.'.format(self.__zone))
if not self.__stop.is_set():
self.__stop.set()
def run(self):
state.single_zone_on()
start_time = datetime.now()
activate_zone(self.__zone)
while not self.__stop.is_set():
now = datetime.now()
timediff = timedelta(minutes=self.__interval) if in_production() else timedelta(seconds=self.__interval)
if now - start_time > timediff:
self.__stop.set()
sleep(1)
stop_sprinkler()
state.run_off()
logging.info('Manual run for zone {0} end.'.format(self.__zone))
class CycleRunner(Thread):
def __init__(self, interval):
Thread.__init__(self, name='Cycle Run')
self.__interval = interval
self.__stop = Event()
def stop(self):
logging.info('Stop cycle.')
if not self.__stop.is_set():
self.__stop.set()
def __start_zone(self, zone_index):
zone_info = CONFIG.active_zones[zone_index]
activate_zone(zone_info.zone)
interval = getattr(zone_info, "interval", self.__interval)
logging.info('Running zone {0} for {1} min/sec.'.format(zone_info.zone, interval))
return datetime.now(), timedelta(minutes=interval) if in_production() else timedelta(seconds=interval)
def run(self):
logging.info('Starting cycle.')
state.cycle_on()
zone_index = 0
zone_count = len(CONFIG.active_zones)
start_time, interval = self.__start_zone(zone_index)
while not self.__stop.is_set():
now = datetime.now()
if now - start_time > interval:
zone_index += 1
if zone_index < zone_count:
stop_sprinkler()
start_time, interval = self.__start_zone(zone_index)
else:
self.__stop.set()
sleep(1)
stop_sprinkler()
state.run_off()
logging.info('Cycle end.')
class ZoneController(Thread):
def __init__(self):
Thread.__init__(self, name='Zone Controller')
self.__stop = Event()
self.__manual_runner = None
self.__cycle_runner = None
def __stop_cycle_runner(self):
if self.__cycle_runner is not None and self.__cycle_runner.is_alive():
logging.warning('Cycle is running. Terminating...')
self.__cycle_runner.stop()
self.__cycle_runner.join()
self.__cycle_runner = None
def is_cycle_running(self):
return self.__cycle_runner is not None and self.__cycle_runner.is_alive()
def __stop_manual_runner(self):
if self.__manual_runner is not None and self.__manual_runner.is_alive():
logging.warning('Manual runner is acitve. Terminating...')
self.__manual_runner.stop()
self.__manual_runner.join()
self.__manual_runner = None
def is_manual_running(self):
return self.__manual_runner is not None and self.__manual_runner.is_alive()
def get_active_zone(self):
return get_active_zone()
def stop(self):
if not self.__stop.is_set():
self.__stop.set()
self.__stop_manual_runner()
self.__stop_cycle_runner()
state.run_zone_action((ZoneAction.TERMINATE, 0))
self.join()
def control_mode_changed(self):
if state.active_controller_mode() is ControllerMode.OFF:
state.run_zone_action((ZoneAction.STOP, 0))
def __get_zone_index(self, zone):
for index, zone_info in enumerate(CONFIG.active_zones):
if zone_info.zone == zone:
return index
return -1
def __zone_in_active_zones(self, zone):
for zone_info in CONFIG.active_zones:
if zone_info.zone == zone:
return True
return False
def __queue_processor(self, queue):
action_type, event_value = queue.get()
logging.debug('Received action {0} with event value {1}.'.format(action_type, event_value))
self.__stop_manual_runner()
self.__stop_cycle_runner()
if action_type in [ZoneAction.TERMINATE, ZoneAction.STOP]:
# Leave dummy for now
pass
elif action_type == ZoneAction.RUN_CYCLE:
self.__cycle_runner = CycleRunner(CONFIG.default_interval)
self.__cycle_runner.start()
elif action_type == ZoneAction.NEXT:
current_active = get_active_zone()
current_index = self.__get_zone_index(current_active)
next_index = current_index + 1
if -1 < next_index < len(CONFIG.active_zones):
zone = CONFIG.active_zones[next_index].zone
self.__manual_runner = ManualRunner(zone, CONFIG.default_interval)
self.__manual_runner.start()
else:
logging.debug('Next index {0} outside active zone range. Stop yasc.'.format(next_index))
elif action_type == ZoneAction.ZONE:
if self.__zone_in_active_zones(event_value):
self.__manual_runner = ManualRunner(event_value, CONFIG.default_interval)
self.__manual_runner.start()
else:
logging.error('Zone {0} is not an active zone!'.format(event_value))
queue.task_done()
def run(self):
logging.info('Zone Controller started')
while not self.__stop.is_set():
state.process_queue(self.__queue_processor)
logging.info('Zone Controller stopped')
| asmyczek/YASC | yasc/zone_controller.py | zone_controller.py | py | 6,739 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "yasc.utils.in_production",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "yasc.utils.state.zone_on",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "yasc.... |
25677729371 | import cv2
import torch
from PIL import Image
from utils.segmenter import Segmenter
from utils.type_conversion import *
def resize(img, short_size):
w, h = img.size
if w < h:
nw, nh = short_size, int(w * short_size / h)
else:
nw, nh = int(h * short_size / w), short_size
return img.resize((nh, nw))
def test_image(args, model):
if args.detector == 'dlib':
import dlib
elif args.detector == 'faceboxes':
from utils.face_detector import FaceDetectorFaceboxes
model.eval()
device = torch.device("cuda" if args.gpu else "cpu")
image = Image.open(args.image).convert('RGB')
if args.resize > 0:
image = resize(image, args.resize)
detector = None
if args.detector == 'dlib':
detector = dlib.get_frontal_face_detector()
elif args.detector == 'faceboxes':
MODEL_PATH = 'model/faceboxes.pb'
detector = FaceDetectorFaceboxes(MODEL_PATH, gpu_memory_fraction=0.25, visible_device_list='0')
segmenter = Segmenter(model, device, detector, mode=args.detector)
result = segmenter.segment(PIL2opencv(image), args.remove_small_area)
result = opencv2PIL(result)
if args.save:
result.save(args.save)
if not args.unshow:
result.show()
image.show()
def test_video(args, model):
if args.video == '0':
cap = cv2.VideoCapture(0)
else:
cap = cv2.VideoCapture(args.video)
w_win = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h_win = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
print(w_win, h_win)
if args.resize > 0:
short_size = args.resize
if w_win > h_win:
nw, nh = short_size, int(w_win * short_size / h_win)
else:
nw, nh = int(h_win * short_size / w_win), short_size
else:
nw, nh = w_win, h_win
detector = None
if args.detector == 'dlib':
detector = dlib.get_frontal_face_detector()
elif args.detector == 'faceboxes':
MODEL_PATH = 'model/faceboxes.pb'
detector = FaceDetectorFaceboxes(MODEL_PATH, gpu_memory_fraction=0.25, visible_device_list='0')
device = torch.device("cuda" if args.gpu else "cpu")
segmenter = Segmenter(model, device, detector, mode=args.detector)
if args.save:
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
out = cv2.VideoWriter(args.save, fourcc, 20, (nh, nw), True)
while True:
frame = cap.read()[1]
if frame is None:
break
frame = cv2.resize(frame, (nh, nw))
result = segmenter.segment(frame, args.remove_small_area)
if args.save:
out.write(result)
if not args.unshow:
cv2.imshow('image', result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if args.save:
out.release()
| MondayYuan/HairSegmentation | scripts/test.py | test.py | py | 2,875 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "torch.device",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "dlib.get_frontal_face_detector"... |
21115457088 | """Django FilterSet classes for Nautobot."""
import django_filters
from nautobot.apps.filters import BaseFilterSet, NautobotFilterSet, SearchFilter
from nautobot_chatops.choices import PlatformChoices
from nautobot_chatops.models import CommandLog, AccessGrant, ChatOpsAccountLink, CommandToken
class CommandLogFilterSet(BaseFilterSet):
"""FilterSet for filtering a set of CommandLog objects."""
class Meta:
"""Metaclass attributes of CommandLogFilterSet."""
model = CommandLog
fields = [
"start_time",
"runtime",
"user_name",
"user_id",
"platform",
"command",
"subcommand",
"status",
"details",
]
class AccessGrantFilterSet(BaseFilterSet):
"""FilterSet for filtering a set of AccessGrant objects."""
class Meta:
"""Metaclass attributes of AccessGrantFilterSet."""
model = AccessGrant
fields = ["command", "subcommand", "grant_type", "value"]
class ChatOpsAccountLinkFilterSet(NautobotFilterSet):
"""FilterSet for filtering the ChatOps Account Links."""
q = SearchFilter(
filter_predicates={
"user_id": "icontains",
"platform": "icontains",
}
)
platform = django_filters.MultipleChoiceFilter(choices=PlatformChoices)
class Meta:
"""Metaclass attributes of ChatOpsAccountLinkFilterSet."""
model = ChatOpsAccountLink
fields = "__all__"
class CommandTokenFilterSet(BaseFilterSet):
"""FilterSet for filtering a set of CommandToken objects."""
class Meta:
"""Metaclass attributes of CommandTokenFilterSet."""
model = CommandToken
fields = ["comment", "platform"]
| nautobot/nautobot-plugin-chatops | nautobot_chatops/filters.py | filters.py | py | 1,773 | python | en | code | 47 | github-code | 36 | [
{
"api_name": "nautobot.apps.filters.BaseFilterSet",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "nautobot_chatops.models.CommandLog",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "nautobot.apps.filters.BaseFilterSet",
"line_number": 30,
"usage_ty... |
25450887207 | from django.urls import path, include
from . import views
app_name = "accounts"
urlpatterns = [
# login
path("login/", views.LoginView.as_view(), name="login"),
# logout
path("logout/", views.LogoutView.as_view(), name="logout"),
# signup
path("signup/", views.SignupView.as_view(), name="signup"),
# api
path("api/v1/", include("accounts.api.v1.urls")),
]
| AmirhosseinRafiee/Blog | mysite/accounts/urls.py | urls.py | py | 391 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
5812402236 | import unittest
import warnings
from datetime import date, datetime
from decimal import Decimal
import pytz
from babel import Locale
from fluent.runtime.types import FluentDateType, FluentNumber, fluent_date, fluent_number
class TestFluentNumber(unittest.TestCase):
locale = Locale.parse('en_US')
def setUp(self):
self.cur_pos = fluent_number(123456.78123,
currency='USD',
style='currency')
self.cur_neg = fluent_number(-123456.78123,
currency='USD',
style='currency')
def test_int(self):
i = fluent_number(1)
self.assertTrue(isinstance(i, int))
self.assertTrue(isinstance(i, FluentNumber))
self.assertEqual(i + 1, 2)
def test_float(self):
f = fluent_number(1.1)
self.assertTrue(isinstance(f, float))
self.assertTrue(isinstance(f, FluentNumber))
self.assertEqual(f + 1, 2.1)
def test_decimal(self):
d = Decimal('1.1')
self.assertTrue(isinstance(fluent_number(d), Decimal))
self.assertTrue(isinstance(fluent_number(d), FluentNumber))
self.assertEqual(d + 1, Decimal('2.1'))
def test_disallow_nonexistant_options(self):
self.assertRaises(
TypeError,
fluent_number,
1,
not_a_real_option=True,
)
def test_style_validation(self):
self.assertRaises(ValueError,
fluent_number,
1,
style='xyz')
def test_use_grouping(self):
f1 = fluent_number(123456.78, useGrouping=True)
f2 = fluent_number(123456.78, useGrouping=False)
self.assertEqual(f1.format(self.locale), "123,456.78")
self.assertEqual(f2.format(self.locale), "123456.78")
# ensure we didn't mutate anything when we created the new
# NumberPattern:
self.assertEqual(f1.format(self.locale), "123,456.78")
def test_use_grouping_decimal(self):
d = Decimal('123456.78')
f1 = fluent_number(d, useGrouping=True)
f2 = fluent_number(d, useGrouping=False)
self.assertEqual(f1.format(self.locale), "123,456.78")
self.assertEqual(f2.format(self.locale), "123456.78")
def test_minimum_integer_digits(self):
f = fluent_number(1.23, minimumIntegerDigits=3)
self.assertEqual(f.format(self.locale), "001.23")
def test_minimum_integer_digits_decimal(self):
f = fluent_number(Decimal('1.23'), minimumIntegerDigits=3)
self.assertEqual(f.format(self.locale), "001.23")
def test_minimum_fraction_digits(self):
f = fluent_number(1.2, minimumFractionDigits=3)
self.assertEqual(f.format(self.locale), "1.200")
def test_maximum_fraction_digits(self):
f1 = fluent_number(1.23456)
self.assertEqual(f1.format(self.locale), "1.235")
f2 = fluent_number(1.23456, maximumFractionDigits=5)
self.assertEqual(f2.format(self.locale), "1.23456")
def test_minimum_significant_digits(self):
f1 = fluent_number(123, minimumSignificantDigits=5)
self.assertEqual(f1.format(self.locale), "123.00")
f2 = fluent_number(12.3, minimumSignificantDigits=5)
self.assertEqual(f2.format(self.locale), "12.300")
def test_maximum_significant_digits(self):
f1 = fluent_number(123456, maximumSignificantDigits=3)
self.assertEqual(f1.format(self.locale), "123,000")
f2 = fluent_number(12.3456, maximumSignificantDigits=3)
self.assertEqual(f2.format(self.locale), "12.3")
f3 = fluent_number(12, maximumSignificantDigits=5)
self.assertEqual(f3.format(self.locale), "12")
def test_currency(self):
# This test the default currencyDisplay value
self.assertEqual(self.cur_pos.format(self.locale), "$123,456.78")
def test_currency_display_validation(self):
self.assertRaises(ValueError,
fluent_number,
1234,
currencyDisplay="junk")
def test_currency_display_symbol(self):
cur_pos_sym = fluent_number(self.cur_pos, currencyDisplay="symbol")
cur_neg_sym = fluent_number(self.cur_neg, currencyDisplay="symbol")
self.assertEqual(cur_pos_sym.format(self.locale), "$123,456.78")
self.assertEqual(cur_neg_sym.format(self.locale), "-$123,456.78")
def test_currency_display_code(self):
# Outputs here were determined by comparing with Javascrpt
# Intl.NumberFormat in Firefox.
cur_pos_code = fluent_number(self.cur_pos, currencyDisplay="code")
cur_neg_code = fluent_number(self.cur_neg, currencyDisplay="code")
self.assertEqual(cur_pos_code.format(self.locale), "USD123,456.78")
self.assertEqual(cur_neg_code.format(self.locale), "-USD123,456.78")
@unittest.skip("Babel doesn't provide support for this yet")
def test_currency_display_name(self):
cur_pos_name = fluent_number(self.cur_pos, currencyDisplay="name")
cur_neg_name = fluent_number(self.cur_neg, currencyDisplay="name")
self.assertEqual(cur_pos_name.format(self.locale), "123,456.78 US dollars")
self.assertEqual(cur_neg_name.format(self.locale), "-123,456.78 US dollars")
# Some others locales:
hr_BA = Locale.parse('hr_BA')
self.assertEqual(cur_pos_name.format(hr_BA),
"123.456,78 američkih dolara")
es_GT = Locale.parse('es_GT')
self.assertEqual(cur_pos_name.format(es_GT),
"dólares estadounidenses 123,456.78")
def test_copy_attributes(self):
f1 = fluent_number(123456.78, useGrouping=False)
self.assertEqual(f1.options.useGrouping, False)
# Check we didn't mutate anything
self.assertIs(FluentNumber.default_number_format_options.useGrouping, True)
f2 = fluent_number(f1, style="percent")
self.assertEqual(f2.options.style, "percent")
# Check we copied
self.assertEqual(f2.options.useGrouping, False)
# and didn't mutate anything
self.assertEqual(f1.options.style, "decimal")
self.assertEqual(FluentNumber.default_number_format_options.style, "decimal")
class TestFluentDate(unittest.TestCase):
locale = Locale.parse('en_US')
def setUp(self):
self.a_date = date(2018, 2, 1)
self.a_datetime = datetime(2018, 2, 1, 14, 15, 16, 123456,
tzinfo=pytz.UTC)
def test_date(self):
fd = fluent_date(self.a_date)
self.assertTrue(isinstance(fd, date))
self.assertTrue(isinstance(fd, FluentDateType))
self.assertEqual(fd.year, self.a_date.year)
self.assertEqual(fd.month, self.a_date.month)
self.assertEqual(fd.day, self.a_date.day)
def test_datetime(self):
fd = fluent_date(self.a_datetime)
self.assertTrue(isinstance(fd, datetime))
self.assertTrue(isinstance(fd, FluentDateType))
self.assertEqual(fd.year, self.a_datetime.year)
self.assertEqual(fd.month, self.a_datetime.month)
self.assertEqual(fd.day, self.a_datetime.day)
self.assertEqual(fd.hour, self.a_datetime.hour)
self.assertEqual(fd.minute, self.a_datetime.minute)
self.assertEqual(fd.second, self.a_datetime.second)
self.assertEqual(fd.microsecond, self.a_datetime.microsecond)
self.assertEqual(fd.tzinfo, self.a_datetime.tzinfo)
def test_format_defaults(self):
fd = fluent_date(self.a_date)
en_US = Locale.parse('en_US')
en_GB = Locale.parse('en_GB')
self.assertEqual(fd.format(en_GB), '1 Feb 2018')
self.assertEqual(fd.format(en_US), 'Feb 1, 2018')
def test_dateStyle_date(self):
fd = fluent_date(self.a_date, dateStyle='long')
en_US = Locale.parse('en_US')
en_GB = Locale.parse('en_GB')
self.assertEqual(fd.format(en_GB), '1 February 2018')
self.assertEqual(fd.format(en_US), 'February 1, 2018')
def test_dateStyle_datetime(self):
fd = fluent_date(self.a_datetime, dateStyle='long')
en_US = Locale.parse('en_US')
en_GB = Locale.parse('en_GB')
self.assertEqual(fd.format(en_GB), '1 February 2018')
self.assertEqual(fd.format(en_US), 'February 1, 2018')
def test_timeStyle_datetime(self):
fd = fluent_date(self.a_datetime, timeStyle='short')
en_US = Locale.parse('en_US')
en_GB = Locale.parse('en_GB')
self.assertRegex(fd.format(en_US), '^2:15\\sPM$')
self.assertEqual(fd.format(en_GB), '14:15')
def test_dateStyle_and_timeStyle_datetime(self):
fd = fluent_date(self.a_datetime, timeStyle='short', dateStyle='short')
en_US = Locale.parse('en_US')
en_GB = Locale.parse('en_GB')
self.assertRegex(fd.format(en_US), '^2/1/18, 2:15\\sPM$')
self.assertEqual(fd.format(en_GB), '01/02/2018, 14:15')
def test_validate_dateStyle(self):
self.assertRaises(ValueError,
fluent_date,
self.a_date,
dateStyle="nothing")
def test_validate_timeStyle(self):
self.assertRaises(ValueError,
fluent_date,
self.a_datetime,
timeStyle="nothing")
def test_timeZone(self):
en_GB = Locale.parse('en_GB')
LondonTZ = pytz.timezone('Europe/London')
# 1st July is a date in British Summer Time
# datetime object with tzinfo set to BST
dt1 = datetime(2018, 7, 1, 23, 30, 0, tzinfo=pytz.UTC).astimezone(LondonTZ)
fd1 = fluent_date(dt1, dateStyle='short', timeStyle='short')
self.assertEqual(fd1.format(en_GB), '02/07/2018, 00:30')
fd1b = fluent_date(dt1, dateStyle='full', timeStyle='full')
self.assertRegex(fd1b.format(en_GB), '^Monday, 2 July 2018(,| at) 00:30:00 British Summer Time$')
fd1c = fluent_date(dt1, dateStyle='short')
self.assertEqual(fd1c.format(en_GB), '02/07/2018')
fd1d = fluent_date(dt1, timeStyle='short')
self.assertEqual(fd1d.format(en_GB), '00:30')
# datetime object with no TZ, TZ passed in to fluent_date
dt2 = datetime(2018, 7, 1, 23, 30, 0) # Assumed UTC
fd2 = fluent_date(dt2, dateStyle='short', timeStyle='short',
timeZone='Europe/London')
self.assertEqual(fd2.format(en_GB), '02/07/2018, 00:30')
fd2b = fluent_date(dt2, dateStyle='full', timeStyle='full',
timeZone='Europe/London')
self.assertRegex(fd2b.format(en_GB), '^Monday, 2 July 2018(,| at) 00:30:00 British Summer Time$')
fd2c = fluent_date(dt2, dateStyle='short',
timeZone='Europe/London')
self.assertEqual(fd2c.format(en_GB), '02/07/2018')
fd2d = fluent_date(dt1, timeStyle='short',
timeZone='Europe/London')
self.assertEqual(fd2d.format(en_GB), '00:30')
def test_allow_unsupported_options(self):
# We are just checking that these don't raise exceptions
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fluent_date(self.a_date,
hour12=True,
weekday="narrow",
era="narrow",
year="numeric",
month="numeric",
day="numeric",
hour="numeric",
minute="numeric",
second="numeric",
timeZoneName="short",
)
def test_disallow_nonexistant_options(self):
self.assertRaises(
TypeError,
fluent_date,
self.a_date,
not_a_real_option=True,
)
def test_dont_wrap_unnecessarily(self):
f1 = fluent_date(self.a_date)
f2 = fluent_date(f1)
self.assertIs(f1, f2)
def test_copy_attributes(self):
f1 = fluent_date(self.a_date, dateStyle='long', hour12=False)
self.assertEqual(f1.options.dateStyle, 'long')
f2 = fluent_date(f1, hour12=False)
# Check we copied other attributes:
self.assertEqual(f2.options.dateStyle, "long")
self.assertEqual(f2.options.hour12, False)
# Check we can override
f3 = fluent_date(f2, dateStyle="full")
self.assertEqual(f3.options.dateStyle, "full")
# and didn't mutate anything
self.assertEqual(f1.options.dateStyle, "long")
self.assertEqual(f2.options.dateStyle, "long")
| projectfluent/python-fluent | fluent.runtime/tests/test_types.py | test_types.py | py | 12,837 | python | en | code | 185 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "babel.Locale.parse",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "babel.Locale",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "fluent.runtime... |
73198190823 | import json
import re
from typing import Any, Dict, List, Text
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.datacatalog import CloudDataCatalogHook
import google.auth.transport.requests
from google.auth.transport.urllib3 import AuthorizedHttp
from grizzly.config import Config
from grizzly.etl_action import parse_table
from grizzly.grizzly_typing import TGrizzlyOperator
_TPolicyTags = Dict[str, str]
class DataCatalogTag:
"""Perform actions with DataCatalog.
Should be used for Data Catalog Table and column tags.
Assign Column level security throw DataCatalog Taxonomy.
Attributes:
execution_context (GrizzlyOperator): Instance of GrizzlyOperator executed.
column_policy_tags (list[dict]): List of Column level policy (security) tags
to be applied in format
{ 'column_name': 'column_policy_tag_id'}
datacatalog_tags (list[dict]): Content of JSON file defined in
[data_catalog_tags] attribute of task YML file. Content is rendered as
JINJA2 template and loaded as list of dictionaries with definition of
table and column tags to be applied.
authed_http (google.auth.transport.urllib3.AuthorizedHttp): Authorized http
connection for work with Data Catalog Rest API.
base_api_url (string): Base URL for work with DataCatalog Rest API.
dc_hook (CloudDataCatalogHook):
Airflow predefined hooks for work with GCP Data Catalog.
"""
def __init__(self,
execution_context: TGrizzlyOperator,
column_policy_tags: List[_TPolicyTags],
datacatalog_tags: List[Text]) -> None:
"""Set up DataCatalogTag instance.
If [column_policy_tags] or [datacatalog_tags] was defined set up
correspondent class properties.
Args:
execution_context (TGrizzlyOperator): Instance of GrizzlyOperator
executed.
column_policy_tags (list): List of Column level policy (security)
tags to be applied in format
{'column_name': 'taxonomy|tag_hierarchy'}
Contains column level security configuration.
datacatalog_tags (list): Content of JSON file defined in
[data_catalog_tags] attribute of task YML file. Content is rendered as
JINJA2 template and loaded as list of dictionaries with definition of
table and column tags to be applied. Contains Table and column tags.
"""
self.execution_context = execution_context
if column_policy_tags or datacatalog_tags:
self.__setup_datacatalog_connection()
if column_policy_tags:
# Get list of DataCatalog security policy tag mapping
self.column_policy_tags = self.__get_column_policy_tags_mapping(
column_policy_tags)
else:
self.column_policy_tags = None
if datacatalog_tags:
self.datacatalog_tags = datacatalog_tags
else:
self.datacatalog_tags = None
def __get_table_entry_id(self, target_table: Dict[str, str]) -> Any:
"""Get an DataCatalog EntryId by table name."""
target_table = parse_table(target_table)
resource_name = (f'//bigquery.googleapis.com/'
f'projects/{target_table["project_id"]}/'
f'datasets/{target_table["dataset_id"]}/'
f'tables/{target_table["table_id"]}')
table_entry = self.dc_hook.lookup_entry(linked_resource=resource_name)
return table_entry
def __setup_datacatalog_connection(self) -> None:
"""Setup connection credentials for access Data Catalog API."""
scopes = ['https://www.googleapis.com/auth/cloud-platform']
# pylint: disable=unused-variable
credentials, project = google.auth.default(scopes=scopes)
auth_req = google.auth.transport.requests.Request()
credentials.refresh(auth_req)
self.authed_http = AuthorizedHttp(credentials)
access_token = credentials.token
self.base_api_url = (
'https://datacatalog.googleapis.com/v1/{api_call}?access_token='
+ access_token)
# setup datacatalog hooks
self.dc_hook = CloudDataCatalogHook()
def __get_column_policy_tags_mapping(
self,
column_policy_tags: List[_TPolicyTags]
) -> _TPolicyTags:
"""Return a list of all applicable taxonomies for job/table.
Parse user defined format from task YML file and transform it into format
consumable by DataCatalog Rest API.
Method gets all taxonomy list on environment. Then select taxonomy defined
by user and parses taxonomy tag hierarchy to find [column_policy_tag_id]
that matches with taxonomy tag hierarchy defined by user in task YML file
attribute [column_policy_tags].
Args:
column_policy_tags (list[dict]): List of column policy tag definition to
be parsed in format: {'column_name': 'taxonomy|tag_hierarchy'}
Raises:
AirflowException: Raise error in case if Column policy taxonomy as not
defined on target GCP project or if user defined reference to policy tag
that does not exist.
Returns:
(dict): List of column policy tag definition in format
{'column_name': 'column_policy_tag_id'}
"""
column_policy_tags_mapping = {}
# get a set of all applicable taxonomies
# accordingly to job YML configuration [column_policy_tags]
requested_taxonomies = set()
for c in column_policy_tags:
for v in c.values():
# Add taxonomy name to set
requested_taxonomies.add(v.split('|')[0])
# Get list of DataCatalog taxonomies
api_call = Config.DEFAULT_DATACATALOG_TAXONOMY_LOCATION
session_url = self.base_api_url.format(api_call=api_call)
r = self.authed_http.urlopen(method='get', url=session_url)
taxonomy_mapping = {
}
# looks like {'taxonomy_name': 'projects/prj_id/locations/us/taxonomies/64'}
if r.status == 200:
response = json.loads(r.data)
# work only with taxonomies that were requested in YML
taxonomy_mapping = {
i['displayName']: i['name']
for i in response['taxonomies']
if i['displayName'] in requested_taxonomies
}
# extract raw list of tags for each taxonomy
for k, v in taxonomy_mapping.items():
taxonomy_tag_list_raw = self.__get_taxonomy_policy_tags_raw(v)
for t in taxonomy_tag_list_raw:
column_policy_tags_mapping.update(
self.__get_tag_hierarchy(
taxonomy_name=k, raw_data=taxonomy_tag_list_raw, tag=t))
else:
raise AirflowException(
('Could not receive a list of taxonomies for '
f'project {Config.GCP_PROJECT_ID}. Check security configuration '
'for service account.')
)
# iterate requested tags.
# raise Exception if taxonomy does not exist in project
for ct in column_policy_tags:
for column, tag in ct.items():
if tag not in column_policy_tags_mapping:
raise AirflowException(
(f'Check your YML configuration. Column [{column}] : Tag [{tag}] '
'does not exist in GCP Data Catalog.')
)
# transform array column policy mapping into dictionary with correct tag Ids
column_policy_tags_resultset = dict()
for c in column_policy_tags:
for key in c:
column_policy_tags_resultset[key] = column_policy_tags_mapping[c[key]]
return column_policy_tags_resultset
def __get_tag_hierarchy(self,
taxonomy_name: str,
raw_data: Any,
tag: Dict[str, Any],
tag_display_name: str = '',
tag_id: str = '') -> Dict[str, Any]:
"""Get Data Catalog Taxonomy tag hierarchy mapping.
Method performs recursive scan of taxonomy tags hierarchy and creates
mapping between DataCatalog policy tag id and human-readable
representation of this tag in format similar to 'taxonomy|tag_hierarchy'
Args:
taxonomy_name (string): Human readable taxonomy name from
[column_policy_tags] attribute defined in task YML raw_data.
raw_data: Raw json response from DataCatalog Rest API.
tag (dict): Rest API definition of policy tag. More details about format
of dictionary you can find here:
https://cloud.google.com/data-catalog/docs/reference/rest/v1/projects.locations.taxonomies.policyTags#PolicyTag
tag_display_name (string): Tag name in human-readable format
'parent_tag_1|parent_tag_1.1|tag'
tag_id (string): Tag id in format supported by Data Catalog Rest API.
projects/{project}/locations/{location}/taxonomies/{taxonomies}/policyTags/{policytag}
Returns:
(dict): List of column policy tag definition in format
{'taxonomy_name|tag_display_name': 'tag_id'}
For example:
{
'proto_column_access_policy|PII|high':
'projects/prj/locations/us/taxonomies/11/policyTags/22'
}
"""
# parse raw taxonomy data and return tag hierarchy
parent_id = tag.get('parentPolicyTag', None)
tag_id = tag_id if tag_id else tag['name']
tag_display_name = '|'.join([tag['displayName'], tag_display_name
]) if tag_display_name else tag['displayName']
# if tag not in a root of hierarchy
if parent_id:
# get parent tag details
parent_tag = list(filter(lambda x: x['name'] == parent_id, raw_data))[0]
return self.__get_tag_hierarchy(
taxonomy_name=taxonomy_name,
raw_data=raw_data,
tag=parent_tag,
tag_display_name=tag['displayName'],
tag_id=tag_id)
else:
return {taxonomy_name + '|' + tag_display_name: tag_id}
def __get_taxonomy_policy_tags_raw(self,
taxonomy_id: str) -> List[Dict[str, Any]]:
"""Get a list of all policy tags inside Data Catalog Policy Tags taxonomy.
Next Rest API call is used
https://cloud.google.com/data-catalog/docs/reference/rest/v1/projects.locations.taxonomies.policyTags/list
Args:
taxonomy_id (string): Taxonomy id in format acceptable by Rest API
projects/{project}/locations/{location}/taxonomies/{taxonomies}
Raises:
AirflowException: Raise exception in case if Data Catalog Rest API not
able to retrieve list of tags inside taxonomy.
Returns:
(list(dict)): List of policy tags in format
https://cloud.google.com/data-catalog/docs/reference/rest/v1/projects.locations.taxonomies.policyTags#PolicyTag
"""
api_call = f'{taxonomy_id}/policyTags'
session_url = self.base_api_url.format(api_call=api_call)
r = self.authed_http.urlopen(method='GET', url=session_url)
if r.status == 200:
response = json.loads(r.data)
else:
raise AirflowException(
f'Could not receive a tag list for taxonomy {taxonomy_id}.')
return response['policyTags']
def set_column_policy_tags(self, target_table: str) -> None:
"""Update column policy tags on target table.
Assign Column policy tags from [self.column_policy_tags] to table columns on
a base of column level security defined in attribute [column_policy_tags] of
task YML file.
Args:
target_table (string): Name of a table on which you want to set up column
level security.
"""
if self.column_policy_tags:
target_table = parse_table(target_table)
table_schema_definition = self.execution_context.bq_cursor.get_schema(
dataset_id=target_table['dataset_id'],
table_id=target_table['table_id'])['fields']
tagged_column_list = [*self.column_policy_tags
] # get list of tagged columns from dictionary
# filter only columns that tagged
# iterate schema and set policy tags
for i in range(len(table_schema_definition)):
cn = table_schema_definition[i]['name']
if cn in tagged_column_list:
table_schema_definition[i]['policyTags'] = {
'names': [self.column_policy_tags[cn]]
}
# patch target table with updated fields
self.execution_context.bq_cursor.patch_table(
dataset_id=target_table['dataset_id'],
table_id=target_table['table_id'],
schema=table_schema_definition)
return
def set_table_tags(self, target_table: str) -> None:
"""Set DataCatalog tags on a table and table columns.
Apply tags from self.datacatalog_tags. All tags that were not defined in
JSON tag configuration file will be removed.
Args:
target_table (string): Target table for which data catalog tags should
be assigned.
Raises:
Exception: Exception raised in case if Rest API does not return Data
Catalog EntityId for requested table.
AirflowException: Also exception raised in case if application is not
able to delete or create tags due some security restriction or other
issues.
"""
if self.datacatalog_tags:
# get entry_id for target_table
entry_id = self.__get_table_entry_id(target_table)
# parse entry_id
entry_id_parsed = re.match(
(r'^projects/(?P<project_id>.+)/locations/(?P<location>.+)/'
r'entryGroups/(?P<entry_group>.+)/entries/(?P<entry_id>.+)$'),
entry_id.name)
if not entry_id_parsed:
raise AirflowException(
f'Could not extract entity_id for [{target_table}].')
# get a list of tags already assigned to table
existing_table_tags = self.dc_hook.list_tags(
location=entry_id_parsed['location'],
entry_group=entry_id_parsed['entry_group'],
entry=entry_id_parsed['entry_id'],
project_id=entry_id_parsed['project_id'],
page_size=500)
# construct a list of (template, column) for requested tags
requested_tags = [
(t['template'], t.get('column', '')) for t in self.datacatalog_tags
]
# drop existing tags in case of importance
for et in existing_table_tags:
tag_name = et.name
tag_template = et.template
tag_column = getattr(et, 'column', '')
if (tag_template, tag_column) in requested_tags:
# drop existing tag first for avoid ERROR 409
api_call = f'{tag_name}'
session_url = self.base_api_url.format(api_call=api_call)
r = self.authed_http.urlopen(method='DELETE', url=session_url)
if r.status != 200:
raise AirflowException(
(f'Could not delete tag from table table.\n'
f'ERROR: {r.status} - {r.data}')
)
for tag in self.datacatalog_tags:
api_call = f'{entry_id.name}/tags'
session_url = self.base_api_url.format(api_call=api_call)
session_body = json.dumps(tag)
r = self.authed_http.urlopen(
method='POST', url=session_url, body=session_body)
if r.status != 200:
raise AirflowException(
(f'Could not create new tag on target table. {tag} \n'
f'ERROR: {r.status} - {r.data}')
)
return
| google/grizzly | airflow/plugins/grizzly/data_catalog_tag.py | data_catalog_tag.py | py | 15,091 | python | en | code | 51 | github-code | 36 | [
{
"api_name": "typing.Dict",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "grizzly.grizzly_typing.TGrizzlyOperator",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "typin... |
5409351564 | import sys
from pathlib import Path
from shutil import copy, copytree, ignore_patterns
# This script initializes new pytorch project with the template files.
# Run `python3 new_project.py ../MyNewProject` then new project named
# MyNewProject will be made
current_dir = Path()
assert (
current_dir / "new_project.py"
).is_file(), "Script should be executed in the pytorch-template directory"
assert (
len(sys.argv) == 2
), "Specify a name for the new project. Example: python3 new_project.py MyNewProject"
project_name = Path(sys.argv[1])
target_dir = current_dir / project_name
package_dir = target_dir / "src"
package_dir.mkdir(parents=True)
ignore = [
".git",
"data",
"saved",
"new_project.py",
"LICENSE",
"README.md",
"__pycache__",
".mypy_cache",
]
copytree(
current_dir / "src",
package_dir / project_name.name,
ignore=ignore_patterns(*ignore),
)
(target_dir / "config").mkdir()
copy(current_dir / "config.json", target_dir / "config")
(target_dir / "datasets").mkdir()
(target_dir / "saved").mkdir()
copy(current_dir / ".gitignore", target_dir / "config")
copy(current_dir / ".flake8", target_dir / "config")
print("New project initialized at", target_dir.absolute().resolve())
| Ttayu/pytorch-template | new_project.py | new_project.py | py | 1,242 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": ... |
3829718910 | from __future__ import print_function
import io
import logging
import logging.handlers
import sys
import threading
import time
try:
import argparse
except ImportError:
sys.stderr.write("""
ntploggps: can't find the Python argparse module
If your Python version is < 2.7, then manual installation is needed:
# pip install argparse
""")
sys.exit(1)
try:
import gps
except ImportError as e:
sys.stderr.write("ntploggps: can't find Python GPSD library.\n")
sys.stderr.write("%s\n" % e)
sys.exit(1)
class logfile_header_class(logging.handlers.TimedRotatingFileHandler):
'A class to modify the file logging handler.'
def doRollover(self):
'function to add header to new file on rotation.'
if str is bytes:
super(logfile_header_class, self).doRollover()
else:
super().doRollover()
self.stream.write('# Time Device TDOP nSat\n')
def logging_setup():
"Create logging object"
logFormat = logging.Formatter('%(message)s')
# Create logger for gpsd
Logger = logging.getLogger()
Logger.setLevel(logging.INFO)
# Create file handler
if args.logfile:
# log to logfile
file = logfile_header_class(
args.logfile[0],
utc=True,
when='midnight',
interval=1)
else:
# log to stdout
file = logging.StreamHandler(sys.stdout)
file.setLevel(logging.INFO)
# Create the formatter and add it to the handler
file.setFormatter(logFormat)
# Add the handler to the logger
Logger.addHandler(file)
return Logger
parser = argparse.ArgumentParser(description="gpsd log file generator",
epilog="""
See the manual page for details.
""")
parser.add_argument('-l', '--logfile',
dest='logfile',
help="append log data to LOGFILE instead of stdout",
nargs=1)
parser.add_argument('-o', '--once',
action="store_true",
dest='once',
help="log one line, then exit")
parser.add_argument('-w', '--wait',
default=[5],
dest='wait',
help="wait WAIT seconds after each log line, default 5",
nargs=1,
type=int)
parser.add_argument('-v', '--verbose',
action="store_true",
dest='verbose',
help="be verbose")
parser.add_argument('-V', '--version',
action="version",
version="ntploggps ntpsec-@NTPSEC_VERSION_EXTENDED@")
args = parser.parse_args()
if args.verbose:
print("ntploggps: arguments:")
print(args)
if args.logfile:
# log to logfile
try:
out = open(args.logfile[0], mode='a')
except io.UnsupportedOperation as e:
sys.stderr.write("ntploggps: can't open logfile %s\n" % args.logfile)
sys.stderr.write("%s\n" % e)
sys.exit(1)
if args.verbose:
print("ntploggps: opened log file %s" % args.logfile[0])
else:
# log to stdout
out = sys.stdout
class GpsPoller(threading.Thread):
running = False # True when thread is running. Quit when set False
def __init__(self):
threading.Thread.__init__(self)
self.device = None
self.satellites_used = None
self.tdop = None
# start the streaming of gps data
try:
self.gpsd = gps.gps(mode=gps.WATCH_ENABLE)
except BaseException as e:
sys.stderr.write("ntploggps: Can't connect to gpsd, %s\n"
" Is gpsd running?\n" % e)
sys.exit(1)
self.running = True
def run(self):
while gpsp.running:
if self.gpsd.read() == -1:
self.running = False
break
if not hasattr(self.gpsd, "data"):
continue
if self.gpsd.data.get("class", None) != "SKY":
continue
satellite_list = self.gpsd.data.get(
"satellites", None
)
count_used_satellites = None
if satellite_list is not None:
count_used_satellites = sum(
map(lambda x: x.used, satellite_list)
)
time_dilution = self.gpsd.data.get("tdop", None)
device_path = self.gpsd.data.get("device", None)
if count_used_satellites is None:
count_used_satellites = self.gpsd.data.get(
"uSat", None
)
if None not in [
count_used_satellites,
time_dilution,
device_path,
]:
self.satellites_used = count_used_satellites
self.tdop = time_dilution
self.device = device_path
@property
def time(self):
"Return the gpsd time fix"
t = self.gpsd.fix.time
if isinstance(t, int):
return t
if isinstance(t, float):
if not gps.isfinite(t):
return None
return t
return gps.isotime(t)
if __name__ == '__main__':
# this is the main thread
if args.verbose:
print("ntploggps: creating poll thread")
gpsp = GpsPoller() # create the thread
try:
# Create the logger instance
Logger = logging_setup()
# Create data layout
Logger.info("# Time Device TDOP nSat")
gpsp.start() # start it up
last_time = 0
while gpsp.running:
# It may take a second or two to get good data
try:
current_time = gpsp.time
device = gpsp.device
tdop = gpsp.tdop
satellites_used = gpsp.satellites_used
if current_time is not None and \
device is not None and \
satellites_used is not None and \
tdop is not None:
if last_time != current_time:
s = '%i %s %f %d' % (current_time, device, tdop,
satellites_used)
Logger.info(s)
last_time = current_time
if args.once:
# just once
break
except AttributeError as e:
print('parse error\n')
# wait a bit before next log
time.sleep(args.wait[0])
except (KeyboardInterrupt, SystemExit): # when you press ctrl+c
args.once = True # stop the retry loop
if args.verbose:
print("\nKilling Thread...")
else:
# print a blank line to make bash happy
print("")
except Exception as e: # any error, signal
print(e)
# tell the thread to die
gpsp.running = False
# wait for the thread to finish what it's doing
gpsp.join()
if args.verbose:
print("ntploggps: Done -- Exiting.")
| ntpsec/ntpsec | ntpclients/ntploggps.py | ntploggps.py | py | 7,198 | python | en | code | 225 | github-code | 36 | [
{
"api_name": "sys.stderr.write",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sys.stderr.write",
"lin... |
14963542759 | import mysql.connector
import socket
import logging
from logging.config import fileConfig
fileConfig('log.ini', defaults={'logfilename': 'bee.log'})
logger = logging.getLogger('database')
mydb = mysql.connector.connect(
host="45.76.113.79",
database="hivekeeper",
user="pi_write",
password=")b*I/j3s,umyp0-8"
)
def upload_wx(wx, verbose=False):
mycursor = mydb.cursor()
sql = "INSERT INTO `weather` (dt, location, wind_deg, wind_gust, wind_speed, temp, temp_min, temp_max, temp_feels_like, humidity, pressure, clouds, sunrise, sunset, visibility, description) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
val = (
wx['calc_time'],
wx['location'],
wx['wind_deg'],
wx['wind_gust'],
wx['wind_speed'],
wx['temp'],
wx['temp_min'],
wx['temp_max'],
wx['temp_feels_like'],
wx['humidity'],
wx['pressure'],
wx['clouds'],
wx['sunrise'],
wx['sunset'],
wx['visibility'],
wx['wx_description'],
)
mycursor.execute(sql, val)
mydb.commit()
if verbose:
logger.debug (str(mycursor.rowcount) + " record inserted.")
return True
def get_host_name():
return socket.gethostname()
def send_data(sensor_id, sensor_value, table=u'raw_data', verbose=False):
mycursor = mydb.cursor()
sql = "INSERT INTO `" + table + "` (host, sensor_id, value) VALUES (%s, %s, %s)"
val = (socket.gethostname(), sensor_id, sensor_value)
mycursor.execute(sql, val)
mydb.commit()
if verbose:
logger.debug (str(mycursor.rowcount) + " record inserted.")
return True
| jenkinsbe/hivekeepers | database.py | database.py | py | 1,702 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.config.fileConfig",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "mysql.connector.connector.connect",
"line_number": 11,
"usage_type": "call"
},
{
"api_n... |
6750580086 | # -*- coding: utf-8 -*-
from PyQt5.QtWidgets import QDialog, QTreeWidgetItem, QMenu
from PyQt5.QtCore import pyqtSlot, QPoint
from labrecord.controllers.labrecordscontroller import LabrecordsController
from labrecord.modules.editobservationmodule import EditObservationModule
from labrecord.modules.checkreportmodule import CheckreportModule
from labrecord.views.editsamplerecorddetail import Ui_Dialog
from labrecord.modules.applycheckmodule import ApplycheckModule
from product.controllers.productcontroller import ProductController
import decimal
import user
class EditSampleRecordDetailModule(QDialog, Ui_Dialog):
def __init__(self, autoid, parent=None):
super(EditSampleRecordDetailModule, self).__init__(parent)
self.setupUi(self)
if '50' not in user.powers:
self.close()
if user.powers['10'] == 0:
self.close()
self.power = '{:03b}'.format(user.powers['10'])
if self.power[1] == '0':
self.pushButton_accept.setVisible(False)
self.pushButton_cancel.setVisible(False)
self.autoid = autoid
self.checkitem_id = None
self.ori_detail = object
self.new_detail = {}
self.lr_list = []
self.LC = LabrecordsController()
self.PC = ProductController()
self.get_detail()
self.get_observation_record()
self.get_labrecord_list()
def get_detail(self):
condition = {'autoid': self.autoid}
res = self.LC.get_data(6, False, **condition)
if len(res) != 1:
self.pushButton_accept.setEnabled(False)
self.pushButton_cancel.setEnabled(False)
return
self.ori_detail = res[0]
self.lineEdit_product.setText(
self.ori_detail.ppid.prodid + ' ' + self.ori_detail.ppid.prodname
)
self.lineEdit_commonname.setText(self.ori_detail.ppid.commonname)
self.lineEdit_batchno.setText(self.ori_detail.ppid.batchno)
self.lineEdit_spec.setText(self.ori_detail.ppid.spec)
self.lineEdit_package.setText(self.ori_detail.ppid.package)
self.lineEdit_makedate.setText(str(self.ori_detail.ppid.makedate))
self.lineEdit_samplequantity.setText(str(self.ori_detail.samplequantity))
self.lineEdit_unit.setText(self.ori_detail.unit)
self.comboBox_kind.setCurrentIndex(self.ori_detail.kind)
if self.ori_detail.status != 0:
self.pushButton_accept.setEnabled(False)
self.pushButton_cancel.setEnabled(False)
def get_observation_record(self):
self.treeWidget_observation.clear()
condition = {'srid': self.autoid}
res = self.LC.get_data(7, False, **condition)
if not len(res):
return
lrid_list = res.values_list(*VALUES_TUPLE_LRID, flat=True)
condition_lr={'ciid__in': lrid_list, 'labtype':6}
self.lr_list = self.LC.get_data(
0, False, *VALUES_TUPLE_LR, **(condition_lr)
)
for item in res.values(*VALUES_TUPLE_OB):
qtreeitem = QTreeWidgetItem(self.treeWidget_observation)
qtreeitem.setText(0, str(item['autoid']))
qtreeitem.setText(2, item['obsperiod'])
qtreeitem.setText(3, str(item['obsdate']))
qtreeitem.setText(4, str(item['samplequantity']))
qtreeitem.setText(5, item['unit'])
qtreeitem.setText(6, item['conclusion'])
for it in self.lr_list:
if it['ciid'] == item['autoid']:
qtreeitem.setText(1, str(it['autoid']))
qtreeitem.setText(7, STATUS[it['status']])
break
for i in range(2, 8):
self.treeWidget_observation.resizeColumnToContents(i)
def get_labrecord_list(self):
self.treeWidget_labrecord.clear()
if self.ori_detail is None:
return
for item in self.lr_list:
qtreeitem = QTreeWidgetItem(self.treeWidget_labrecord)
qtreeitem.setText(0, str(item['autoid']))
qtreeitem.setText(1, item['paperno'])
qtreeitem.setText(2, str(item['reportdate']))
qtreeitem.setText(3, STATUS[item['status']])
@pyqtSlot(QPoint)
def on_treeWidget_observation_customContextMenuRequested(self, pos):
if self.ori_detail is None:
return
if self.ori_detail.status != 0:
return
current_item = self.treeWidget_observation.currentItem()
menu = QMenu()
action_1 = menu.addAction("增加")
action_2 = menu.addAction("修改")
action_3 = menu.addAction("删除")
menu.addSeparator()
action_4 = menu.addAction("提交请验")
action_5 = menu.addAction("取消请验")
global_pos = self.treeWidget_observation.mapToGlobal(pos)
action = menu.exec(global_pos)
if action == action_1:
unit = self.lineEdit_unit.text()
detail = EditObservationModule(
srid=self.autoid, unit=unit, parent=self
)
detail.accepted.connect(self.get_observation_record)
detail.show()
elif action == action_2:
if current_item is None:
return
id = int(current_item.text(0))
detail = EditObservationModule(autoid=id, parent=self)
detail.accepted.connect(self.get_observation_record)
detail.show()
elif action == action_3:
if current_item is None:
return
condition = {'autoid': int(current_item.text(0))}
self.LC.delete_data(7, condition)
lrid = current_item.text(1)
if lrid != '':
self.LC.delete_labrecord_and_detail(int(lrid))
self.get_observation_record()
elif action == action_4:
if self.ori_detail is None or current_item is None:
return
if current_item.text(1) == '':
if self.checkitem_id is None:
prodid = self.ori_detail.ppid.prodid
condition = {'prodid': prodid}
res = self.PC.get_data(1, True, *VALUES_TUPLE_PD, **condition)
if not len(res):
return
self.checkitem_id = res[0]
kwargs = {
'labtype': 6,
'chkid': self.ori_detail.ppid.prodid,
'chkname': self.ori_detail.ppid.prodname,
'batchno': self.ori_detail.ppid.batchno,
'spec': self.ori_detail.ppid.spec,
'package': self.ori_detail.ppid.package,
'ciid': int(current_item.text(0)),
'createdate': user.now_date,
'checkamount': self.ori_detail.samplequantity,
'caunit': self.ori_detail.unit,
'sampleamount': decimal.Decimal(current_item.text(4)),
'sampleunit': current_item.text(5),
}
lrid = self.LC.create_labrecord(
self.checkitem_id, 6, user.now_date, **kwargs
)
else:
lrid = int(current_item.text(1))
detail = ApplycheckModule(lrid, 6, self)
detail.rejected.connect(lambda: self.delete_check_report(lrid))
detail.applyed.connect(detail.accept)
detail.accepted.connect(self.get_observation_record)
detail.accepted.connect(self.get_labrecord_list)
detail.show()
elif action == action_5:
if current_item is None:
return
lrid = current_item.text(1)
if lrid != '':
self.LC.delete_labrecord(int(lrid))
self.get_observation_record()
else:
pass
def delete_check_report(self, lrid):
self.LC.delete_labrecord(lrid)
self.get_observation_record()
@pyqtSlot(QTreeWidgetItem, int)
def on_treeWidget_observation_itemDoubleClicked(self, qtreeitem, p_int):
id = int(qtreeitem.text(0))
detail = EditObservationModule(autoid=id, parent=self)
detail.accepted.connect(self.get_observation_record)
detail.show()
@pyqtSlot(QTreeWidgetItem, int)
def on_treeWidget_labrecord_itemDoubleClicked(self, qtreeitem, p_int):
if self.power[1] == '0':
return
id = int(qtreeitem.text(0))
detail = CheckreportModule(id, True, self)
detail.show()
@pyqtSlot(str)
def on_lineEdit_samplequantity_textChanged(self, p_str):
p_data = decimal.Decimal(p_str)
try:
if p_data != self.ori_detail.samplequantity:
self.new_detail['samplequantity'] = p_data
else:
try:
del self.new_detail['samplequantity']
except KeyError:
pass
except ValueError:
self.new_detail['samplequantity'] = p_data
@pyqtSlot(str)
def on_lineEdit_unit_textChanged(self, p_str):
try:
if p_str != self.ori_detail.unit:
self.new_detail['unit'] = p_str
else:
try:
del self.new_detail['unit']
except KeyError:
pass
except ValueError:
self.new_detail['unit'] = p_str
@pyqtSlot(int)
def on_comboBox_kind_currentIndexChanged(self, p_int):
try:
if p_int != self.ori_detail.kind:
self.new_detail['kind'] = p_int
else:
try:
del self.new_detail['kind']
except KeyError:
pass
except ValueError:
self.new_detail['kind'] = p_int
@pyqtSlot()
def on_pushButton_accept_clicked(self):
if not len(self.new_detail):
return
condiition = {'autoid': self.autoid}
self.LC.update_data(6, condiition, **self.new_detail)
self.accept()
@pyqtSlot()
def on_pushButton_cancel_clicked(self):
self.close()
STATUS = ("待请验", "取样中", "检验中", "合格", "不合格")
VALUES_TUPLE_LRID = ('autoid',)
VALUES_TUPLE_OB = (
'autoid', 'obsperiod', 'obsdate', 'samplequantity', 'unit', 'conclusion'
)
VALUES_TUPLE_LR = ('autoid', 'ciid', 'paperno', 'reportdate', 'status')
VALUES_TUPLE_PD = ('autoid', ) | zxcvbnmz0x/gmpsystem | labrecord/modules/editsamplerecorddetailmodule.py | editsamplerecorddetailmodule.py | py | 10,508 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QDialog",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "labrecord.views.editsamplerecorddetail.Ui_Dialog",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "user.powers",
"line_number": 24,
"usage_type": "attribute"
},
... |
34998353566 | """
Created on Thu Mar 17 16:34:46 2022
@author: svein
"""
import speech_recognition as sr
import sounddevice as sd
from scipy.io.wavfile import write
import os
import ffmpeg
from scipy.io import wavfile
import numpy as np
def Speech_to_text():
myfile="output.wav"
## If file exists, delete it ##
if os.path.isfile(myfile):
os.remove(myfile)
else: ## Show an error ##
print("Error: %s file not found" % myfile)
##
print("recording start")
fs = 44100 # Sample rate
seconds = 4 # Duration of recording
sd.default.dtype='int32', 'int32'
myrecording = sd.rec(int(seconds * fs), samplerate=fs, channels=2)
sd.wait() # Wait until recording is finished
print("recording ended")
wavfile.write("output.wav", fs, myrecording) # Save as WAV file
def SpeechToText():
r = sr.Recognizer() #Speech recognition
audio = sr.AudioFile("output.wav")
with audio as source:
print("Wait. Program Starting")
audio = r.record(source)
message = r.recognize_google(audio)
print("Check: "+message)
return message
Ord=SpeechToText()
return Ord
if __name__ == "__main__":
print(Speech_to_text()) | klarahi/Fuzzy_project | voice_recognition.py | voice_recognition.py | py | 1,245 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.isfile",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sounddevice.default",
"line... |
34203743613 | import numpy as np
import torch
import torch.nn as nn
from pytorch_lightning.utilities.rank_zero import _get_rank
import models
from models.base import BaseModel
from models.utils import scale_anything, get_activation, cleanup, chunk_batch
from models.network_utils import get_encoding, get_mlp, get_encoding_with_network
class MarchingCubeHelper(nn.Module):
def __init__(self, resolution, use_torch=True):
super().__init__()
self.resolution = resolution
self.use_torch = use_torch
self.points_range = (0, 1)
if self.use_torch:
import torchmcubes
self.mc_func = torchmcubes.marching_cubes
else:
import mcubes
self.mc_func = mcubes.marching_cubes
self.verts = None
def grid_vertices(self):
if self.verts is None:
x, y, z = torch.linspace(*self.points_range, self.resolution), torch.linspace(*self.points_range, self.resolution), torch.linspace(*self.points_range, self.resolution)
x, y, z = torch.meshgrid(x, y, z)
verts = torch.cat([x.reshape(-1, 1), y.reshape(-1, 1), z.reshape(-1, 1)], dim=-1).reshape(-1, 3)
self.verts = verts.to(_get_rank())
return self.verts
def forward(self, level, threshold=0.):
level = level.float().view(self.resolution, self.resolution, self.resolution)
if self.use_torch:
verts, faces = self.mc_func(level.to(_get_rank()), threshold)
verts, faces = verts.cpu(), faces.cpu().long()
else:
verts, faces = self.mc_func(-level.numpy(), threshold) # transform to numpy
verts, faces = torch.from_numpy(verts.astype(np.float32)), torch.from_numpy(faces.astype(np.int64)) # transform back to pytorch
verts = verts / (self.resolution - 1.)
return {
'v_pos': verts,
't_pos_idx': faces
}
class BaseImplicitGeometry(BaseModel):
def __init__(self, config):
super().__init__(config)
if self.config.isosurface is not None:
assert self.config.isosurface.method in ['mc', 'mc-torch']
if self.config.isosurface.method == 'mc-torch':
raise NotImplementedError("Please do not use mc-torch. It currently has some scaling issues I haven't fixed yet.")
self.helper = MarchingCubeHelper(self.config.isosurface.resolution, use_torch=self.config.isosurface.method=='mc-torch')
def forward_level(self, points):
raise NotImplementedError
def isosurface_(self, vmin, vmax):
grid_verts = self.helper.grid_vertices()
grid_verts = torch.stack([
scale_anything(grid_verts[...,0], (0, 1), (vmin[0], vmax[0])),
scale_anything(grid_verts[...,1], (0, 1), (vmin[1], vmax[1])),
scale_anything(grid_verts[...,2], (0, 1), (vmin[2], vmax[2]))
], dim=-1)
def batch_func(x):
rv = self.forward_level(x).cpu()
cleanup()
return rv
level = chunk_batch(batch_func, self.config.isosurface.chunk, grid_verts)
mesh = self.helper(level, threshold=self.config.isosurface.threshold)
mesh['v_pos'] = torch.stack([
scale_anything(mesh['v_pos'][...,0], (0, 1), (vmin[0], vmax[0])),
scale_anything(mesh['v_pos'][...,1], (0, 1), (vmin[1], vmax[1])),
scale_anything(mesh['v_pos'][...,2], (0, 1), (vmin[2], vmax[2]))
], dim=-1)
return mesh
@torch.no_grad()
def isosurface(self):
if self.config.isosurface is None:
raise NotImplementedError
# coarse to fine extraction
# mesh_coarse = self.isosurface_((-self.radius, -self.radius, -self.radius), (self.radius, self.radius, self.radius))
# if mesh_coarse['v_pos'].shape[0] == 0:
# return mesh_coarse
# vmin, vmax = mesh_coarse['v_pos'].amin(dim=0), mesh_coarse['v_pos'].amax(dim=0)
# vmin_ = (vmin - (vmax - vmin) * 0.1).clamp(-self.radius, self.radius)
# vmax_ = (vmax + (vmax - vmin) * 0.1).clamp(-self.radius, self.radius)
# mesh_fine = self.isosurface_(vmin_, vmax_)
# extract in a fixed scale
# mesh_fine = self.isosurface_((-self.radius, -self.radius, -self.radius), (self.radius, self.radius, self.radius))
mesh_fine = self.isosurface_((-self.radius + 0.2, -self.radius+ 0.2, -self.radius+ 0.2), (self.radius - 0.2, self.radius - 0.2, self.radius - 0.2))
return mesh_fine
@models.register('volume-density')
class VolumeDensity(BaseImplicitGeometry):
def setup(self):
self.n_input_dims = self.config.get('n_input_dims', 3)
self.n_output_dims = self.config.feature_dim
self.encoding_with_network = get_encoding_with_network(self.n_input_dims, self.n_output_dims, self.config.xyz_encoding_config, self.config.mlp_network_config)
self.radius = self.config.radius
self.noises = 0.
self.raw_noise_std = self.config.get('raw_noise_std', 0.)
def forward(self, points):
points = scale_anything(points, (-self.radius, self.radius), (0, 1))
out = self.encoding_with_network(points.view(-1, self.n_input_dims)).view(*points.shape[:-1], self.n_output_dims).float()
density, feature = out[...,0], out
if 'density_activation' in self.config:
if self.raw_noise_std > 0.:
self.noises = (torch.randn(density.shape) * self.raw_noise_std).to(density)
density = get_activation(self.config.density_activation)(density + self.noises + float(self.config.density_bias))
if 'feature_activation' in self.config:
feature = get_activation(self.config.feature_activation)(feature)
return density, feature
def forward_level(self, points):
points = scale_anything(points, (-self.radius, self.radius), (0, 1))
density = self.encoding_with_network(points.reshape(-1, self.n_input_dims)).reshape(*points.shape[:-1], self.n_output_dims)[...,0].float()
if 'density_activation' in self.config:
density = get_activation(self.config.density_activation)(density + float(self.config.density_bias))
return -density # caution!!!
@torch.no_grad()
def extract_volume(self, res=128):
x = torch.linspace(0.02, 0.98, steps=res)
y = torch.linspace(0.02, 0.98, steps=res)
z = torch.linspace(0.02, 0.98, steps=res)
grid_x, grid_y, grid_z = torch.meshgrid(x, y, z, indexing='ij')
points = torch.cat((grid_x[..., None], grid_y[..., None], grid_z[..., None]), dim=3).to(self.rank) # (res, res, res, 3)
density = self.encoding_with_network(points.reshape(-1, self.n_input_dims)).reshape(*points.shape[:-1], self.n_output_dims)[...,0].float()
if 'density_activation' in self.config:
density = get_activation(self.config.density_activation)(density + float(self.config.density_bias))
return points, density
@models.register('volume-sdf')
class VolumeSDF(BaseImplicitGeometry):
def setup(self):
self.n_output_dims = self.config.feature_dim
encoding = get_encoding(3, self.config.xyz_encoding_config)
network = get_mlp(encoding.n_output_dims, self.n_output_dims, self.config.mlp_network_config)
self.encoding, self.network = encoding, network
self.radius = self.config.radius
self.grad_type = self.config.grad_type
# def forward(self, points, with_grad=True, with_feature=True):
# points = scale_anything(points, (-self.radius, self.radius), (0, 1))
# with torch.inference_mode(torch.is_inference_mode_enabled() and not (with_grad and self.grad_type == 'analytic')):
# with torch.set_grad_enabled(self.training or (with_grad and self.grad_type == 'analytic')):
# if with_grad and self.grad_type == 'analytic':
# if not self.training:
# points = points.clone() # points may be in inference mode, get a copy to enable grad
# points.requires_grad_(True)
# out = self.network(self.encoding(points.view(-1, 3))).view(*points.shape[:-1], self.n_output_dims).float()
# sdf, feature = out[...,0], out
# if 'sdf_activation' in self.config:
# sdf = get_activation(self.config.sdf_activation)(sdf + float(self.config.sdf_bias))
# if 'feature_activation' in self.config:
# feature = get_activation(self.config.feature_activation)(feature)
# if with_grad:
# if self.grad_type == 'analytic':
# grad = torch.autograd.grad(
# sdf, points, grad_outputs=torch.ones_like(sdf),
# create_graph=True, retain_graph=True, only_inputs=True
# )[0]
# elif self.grad_type == 'finite_difference':
# eps = 0.001
# points_d = torch.stack([
# points + torch.as_tensor([eps, 0.0, 0.0]).to(points),
# points + torch.as_tensor([-eps, 0.0, 0.0]).to(points),
# points + torch.as_tensor([0.0, eps, 0.0]).to(points),
# points + torch.as_tensor([0.0, -eps, 0.0]).to(points),
# points + torch.as_tensor([0.0, 0.0, eps]).to(points),
# points + torch.as_tensor([0.0, 0.0, -eps]).to(points)
# ], dim=0).clamp(0, 1)
# points_d_sdf = self.network(self.encoding(points_d.view(-1, 3)))[...,0].view(6, *points.shape[:-1]).float()
# grad = torch.stack([
# 0.5 * (points_d_sdf[0] - points_d_sdf[1]) / eps,
# 0.5 * (points_d_sdf[2] - points_d_sdf[3]) / eps,
# 0.5 * (points_d_sdf[4] - points_d_sdf[5]) / eps,
# ], dim=-1)
# rv = [sdf]
# if with_grad:
# rv.append(grad)
# if with_feature:
# rv.append(feature)
# rv = [v if self.training else v.detach() for v in rv]
# return rv[0] if len(rv) == 1 else rv
def forward(self, points, with_grad=True, with_feature=True):
with torch.inference_mode(torch.is_inference_mode_enabled() and not (with_grad and self.grad_type == 'analytic')):
with torch.set_grad_enabled(self.training or (with_grad and self.grad_type == 'analytic')):
if with_grad and self.grad_type == 'analytic':
if not self.training:
points = points.clone() # points may be in inference mode, get a copy to enable grad
points.requires_grad_(True)
points_ = points # points in the original scale
points = scale_anything(points_, (-self.radius, self.radius), (0, 1)) # points normalized to (0, 1)
out = self.network(self.encoding(points.view(-1, 3))).view(*points.shape[:-1], self.n_output_dims).float()
sdf, feature = out[...,0], out
if 'sdf_activation' in self.config:
sdf = get_activation(self.config.sdf_activation)(sdf + float(self.config.sdf_bias))
if 'feature_activation' in self.config:
feature = get_activation(self.config.feature_activation)(feature)
if with_grad:
if self.grad_type == 'analytic':
grad = torch.autograd.grad(
sdf, points_, grad_outputs=torch.ones_like(sdf),
create_graph=True, retain_graph=True, only_inputs=True
)[0]
elif self.grad_type == 'finite_difference':
eps = 0.001
points_d_ = torch.stack([
points_ + torch.as_tensor([eps, 0.0, 0.0]).to(points_),
points_ + torch.as_tensor([-eps, 0.0, 0.0]).to(points_),
points_ + torch.as_tensor([0.0, eps, 0.0]).to(points_),
points_ + torch.as_tensor([0.0, -eps, 0.0]).to(points_),
points_ + torch.as_tensor([0.0, 0.0, eps]).to(points_),
points_ + torch.as_tensor([0.0, 0.0, -eps]).to(points_)
], dim=0).clamp(0, 1)
points_d = scale_anything(points_d_, (-self.radius, self.radius), (0, 1))
points_d_sdf = self.network(self.encoding(points_d.view(-1, 3)))[...,0].view(6, *points.shape[:-1]).float()
grad = torch.stack([
0.5 * (points_d_sdf[0] - points_d_sdf[1]) / eps,
0.5 * (points_d_sdf[2] - points_d_sdf[3]) / eps,
0.5 * (points_d_sdf[4] - points_d_sdf[5]) / eps,
], dim=-1)
rv = [sdf]
if with_grad:
rv.append(grad)
if with_feature:
rv.append(feature)
rv = [v if self.training else v.detach() for v in rv]
return rv[0] if len(rv) == 1 else rv
def forward_level(self, points):
points = scale_anything(points, (-self.radius, self.radius), (0, 1))
sdf = self.network(self.encoding(points.view(-1, 3))).view(*points.shape[:-1], self.n_output_dims)[...,0].float()
if 'sdf_activation' in self.config:
sdf = get_activation(self.config.sdf_activation)(sdf + float(self.config.sdf_bias))
return sdf
| 3dlg-hcvc/paris | models/geometry.py | geometry.py | py | 13,820 | python | en | code | 31 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "torchmcubes.marching_cubes",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "mcubes.... |
1947036421 | from collections import defaultdict
def solution(genres, plays):
answer = []
stream = defaultdict(list)
# 같은 장르내에서는 plays수가 같을 수 있지만
# 장르 합은 다른 장르의 합과 다르다
for g,p in zip(genres, plays):
stream[g].append(p)
answer = []
stream = sorted(stream.items(), key = lambda x:-sum(x[1])) # list
# 인덱스는 앞에것부터 찾음
for i,j in stream:
j.sort(reverse=True)
for i in range(len(stream)): #장르가 2개 이상 가능
if len(stream[i][1]) == 1: answer.append(plays.index(stream[i][1][0]))
else: # 길이가 2이상 (2개 다 넣일 수 있음)
answer.append(plays.index(stream[i][1][0]))
plays[plays.index(stream[i][1][0])] = -1
answer.append(plays.index(stream[i][1][1]))
return answer
| hellokena/2022 | 프로그래머스/LV2/LV3_베스트앨범(해시).py | LV3_베스트앨범(해시).py | py | 862 | python | ko | code | 0 | github-code | 36 | [
{
"api_name": "collections.defaultdict",
"line_number": 4,
"usage_type": "call"
}
] |
9634671657 | import argparse
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("text")
parser.add_argument("repetitions")
args = parser.parse_args()
# Convert repetitions to integer
try:
text = args.text
repetitions = int(args.repetitions)
except:
quit(1)
# Create repeated repeated input text and write this to a file
if repetitions > 0 and len(text) > 0:
output_text = text * repetitions
with open("output.txt", "w") as outfile:
outfile.write(output_text)
else:
quit(1) | jdwijnbergen/CWL_workshop | 3_create-text-file.py | 3_create-text-file.py | py | 518 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 4,
"usage_type": "call"
}
] |
23597401890 | from tkinter import *
import mysql.connector
import matplotlib.pyplot as plt
import csv
root = Tk()
root.title('VINCI FarmDB')
root.geometry("400x700")
root.iconbitmap('Logo.ico')
# Connec to the MySQL Server
mydb = mysql.connector.connect(
host="localhost",
user = "", #Enter Your Username
passwd = "", #Enter Your Password
database = "warehouse"
)
#FUNCTIONS
#Clear Filed
def clear_field():
nbox.delete(0,END)
abox.delete(0,END)
pbox.delete(0,END)
qbox.delete(0,END)
debox.delete(0,END)
p1box.delete(0,END)
p2box.delete(0,END)
dabox.delete(0,END)
tbox.delete(0,END)
arbox.delete(0,END)
#Add Data to Database
def add_data():
sql_command = "INSERT INTO master (name,aadno,ph,catg,quant,des,plts,plte,date,intt,area) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
values = (nbox.get(), abox.get(), pbox.get(), clicked.get(), qbox.get(), debox.get(), p1box.get(), p2box.get(), dabox.get(), tbox.get(), arbox.get())
cursor.execute(sql_command, values)
mydb.commit()
clear_field()
#View Database
def view_db():
view = Tk()
view.title("List of All Stock In Warehouse")
view.geometry("800x600")
view.iconbitmap('Logo.ico')
cursor.execute("SELECT * FROM master")
result = cursor.fetchall()
n1=0
head = ['Name','AadharNo','PhNo','Type','Quantity','Description','PlotNo(Start)','PlotNo(End)','Date','InTime','Area']
for i in head:
hl = Label(view,text=i,fg="red")
hl.grid(row=0,column=n1)
n1+=1
for index, x in enumerate(result):
num = 0
for y in x:
ll = Label(view, text = y)
ll.grid(row=index+1, column=num)
num+=1
csv_b = Button(view, text="Save as Excel", command=lambda: wtocsv(result))
csv_b.grid(row=index+2, column=0)
def wtocsv(result):
with open('Warehouse.csv','a') as f:
w = csv.writer(f, dialect='excel')
for record in result:
w.writerow(record)
#Search Warehouse Function
def search_db():
search = Tk()
search.title("List of All Stock In Warehouse")
search.geometry("800x600")
search.iconbitmap('Logo.ico')
def search_now():
ans = searchbox.get()
sql = "SELECT * FROM master WHERE aadno = %s"
ano = (ans, )
result = cursor.execute(sql,ano)
result = cursor.fetchall()
if not result:
result = "No Record Found"
if result =="No Record Found":
ansl = Label(search, text=result)
ansl.grid(row=2,column=0,padx=10)
else:
n1=0
head = ['Name','AadharNo','PhNo','Type','Quantity','Description','PlotNo(Start)','PlotNo(End)','Date','InTime','Area']
for i in head:
hl = Label(search,text=i,fg="red")
hl.grid(row=3,column=n1)
n1+=1
for index, x in enumerate(result):
num = 0
for y in x:
ll = Label(search, text = y)
ll.grid(row=index+4, column=num)
num+=1
searchbox = Entry(search)
searchbox.grid(row=0,column=1,padx=10,pady=10)
slabel = Label(search, text="Enter Aadhar No:")
slabel.grid(row=0,column=0, padx=10,pady=10)
sb = Button(search, text="Search Warehouse", command=search_now)
sb.grid(row=1,column=0,padx=10,pady=10)
#Updating the Database
def update_db():
udate = Tk()
udate.title("Update Warehouse")
udate.geometry("800x600")
udate.iconbitmap('Logo.ico')
def update_now():
ans = searchbox.get()
sql = "SELECT * FROM master WHERE aadno = %s"
ano = (ans, )
result = cursor.execute(sql,ano)
result = cursor.fetchall()
name = Label(udate,text="Name").grid(row=2,column=0,sticky=W,padx=10)
aadno = Label(udate,text="Aadhar Number").grid(row=2+1,column=0,sticky=W,padx=10)
ph = Label(udate,text="Phone Number").grid(row=3+1,column=0,sticky=W,padx=10)
catg = Label(udate,text="Type").grid(row=4+1,column=0,sticky=W,padx=10)
quant = Label(udate,text="Quantity").grid(row=5+1,column=0,sticky=W,padx=10)
des = Label(udate,text="Description").grid(row=6+1,column=0,sticky=W,padx=10)
plts = Label(udate,text="Plot Number START").grid(row=7+1,column=0,sticky=W,padx=10)
plte = Label(udate,text="Plot Number END").grid(row=8+1,column=0,sticky=W,padx=10)
date = Label(udate,text="Date").grid(row=9+1,column=0,sticky=W,padx=10)
Time = Label(udate,text="Time").grid(row=10+1,column=0,sticky=W,padx=10)
area = Label(udate,text="Area Occupied").grid(row=11+1,column=0,sticky=W,padx=10)
#Creating Input Boxes
nbox = Entry(udate)
nbox.grid(row=1+1,column=1)
nbox.insert(0,result[0][0])
abox = Entry(udate)
abox.grid(row=2+1,column=1,pady = 5)
abox.insert(0,result[0][1])
pbox = Entry(udate)
pbox.grid(row=3+1,column=1,pady = 5)
pbox.insert(0,result[0][2])
clicked = StringVar()
clicked.set("Livestock")
cbox = OptionMenu(udate, clicked, "Livestock", "Grains", "Fruits", "Vegetable", "Fertilizers", "Milk", "Tools")
cbox.grid(row=4+1,column=1,pady = 5)
qbox = Entry(udate)
qbox.grid(row=5+1,column=1,pady = 5)
qbox.insert(0,result[0][4])
debox = Entry(udate)
debox.grid(row=6+1,column=1,pady = 5)
debox.insert(0,result[0][5])
p1box = Entry(udate)
p1box.grid(row=7+1,column=1,pady = 5)
p1box.insert(0,result[0][6])
p2box = Entry(udate)
p2box.grid(row=8+1,column=1,pady = 5)
p2box.insert(0,result[0][7])
dabox = Entry(udate)
dabox.grid(row=9+1,column=1,pady = 5)
dabox.insert(0,result[0][8])
tbox = Entry(udate)
tbox.grid(row=10+1,column=1,pady = 5)
tbox.insert(0,result[0][9])
arbox = Entry(udate)
arbox.grid(row=11+1,column=1,pady = 5)
arbox.insert(0,result[0][10])
def update_two():
sql_command = """UPDATE master SET name = %s,ph = %s,catg = %s,quant = %s,des = %s,plts = %s,plte = %s,date = %s,intt = %s,area = %s WHERE aadno = %s"""
values = (nbox.get(), pbox.get(), clicked.get(), qbox.get(), debox.get(), p1box.get(), p2box.get(), dabox.get(), tbox.get(), arbox.get(),abox.get())
cursor.execute(sql_command, values)
mydb.commit()
udate.destroy()
up = Button(udate,text="Update Record",command=update_two)
up.grid(row=13,column=0)
searchbox = Entry(udate)
searchbox.grid(row=0,column=1,padx=10,pady=10)
slabel = Label(udate, text="Enter Aadhar No:")
slabel.grid(row=0,column=0, padx=10,pady=10)
sb = Button(udate, text="Update Person With AadharNo", command=update_now)
sb.grid(row=1,column=0,padx=10,pady=10)
#Plotting Functions
def occupied_graph():
cursor.execute("SELECT SUM(area) FROM master")
val = cursor.fetchall()
val1 = val[0][0]
val2 = 100 - val1
label = 'Occupied' , 'Unoccupied'
sizes = [val1 , val2]
explode = (0.1,0)
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode, labels = label,autopct = '%1.1f%%',shadow=True, startangle = 90)
ax1.axis('equal')
plt.title("Occupancy Chart")
plt.show()
def cateo_chart():
cursor.execute("SELECT SUM(area) FROM master GROUP BY catg")
val = cursor.fetchall()
label = "Livestock", "Grains", "Fruits", "Vegetable", "Fertilizers", "Milk", "Tools"
sizes = [val[0][0], val[1][0] , val[2][0] , val[3][0], val[4][0], val[5][0], val[6][0]]
explode = (0.1,0,0,0,0,0,0)
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode, labels = label,autopct = '%1.1f%%',shadow=True, startangle = 90)
ax1.axis('equal')
plt.title("Category Wise Occupancy Chart")
plt.show()
#Calcuate Cost
def cal_cost():
return
#Cursor for MySQL
cursor = mydb.cursor()
#Creating Database
# cursor.execute("CREATE DATABASE warehouse")
#Creating the Table
# cursor.execute("CREATE TABLE master(name VARCHAR(255),aadno INT(12) PRIMARY KEY,ph INT(10),catg VARCHAR(255),quant INT(10),des TEXT,plts INT(10),plte INT(10),date DATE,intt TIME,area INT(10))")
tlt_label = Label(root, text="VINCI FarmDB",font=("Times","24","bold"))
tlt_label.grid(row=0,column=0,columnspan=2,pady="10")
#Creating the Form
name = Label(root,text="Name").grid(row=1,column=0,sticky=W,padx=10)
aadno = Label(root,text="Aadhar Number").grid(row=2,column=0,sticky=W,padx=10)
ph = Label(root,text="Phone Number").grid(row=3,column=0,sticky=W,padx=10)
catg = Label(root,text="Type").grid(row=4,column=0,sticky=W,padx=10)
quant = Label(root,text="Quantity").grid(row=5,column=0,sticky=W,padx=10)
des = Label(root,text="Description").grid(row=6,column=0,sticky=W,padx=10)
plts = Label(root,text="Plot Number START").grid(row=7,column=0,sticky=W,padx=10)
plte = Label(root,text="Plot Number END").grid(row=8,column=0,sticky=W,padx=10)
date = Label(root,text="Date").grid(row=9,column=0,sticky=W,padx=10)
Time = Label(root,text="Time").grid(row=10,column=0,sticky=W,padx=10)
area = Label(root,text="Area Occupied").grid(row=11,column=0,sticky=W,padx=10)
#Creating Input Boxes
nbox = Entry(root)
nbox.grid(row=1,column=1)
abox = Entry(root)
abox.grid(row=2,column=1,pady = 5)
pbox = Entry(root)
pbox.grid(row=3,column=1,pady = 5)
clicked = StringVar()
clicked.set("Livestock")
cbox = OptionMenu(root, clicked, "Livestock", "Grains", "Fruits", "Vegetable", "Fertilizers", "Milk", "Tools")
cbox.grid(row=4,column=1,pady = 5)
qbox = Entry(root)
qbox.grid(row=5,column=1,pady = 5)
debox = Entry(root)
debox.grid(row=6,column=1,pady = 5)
p1box = Entry(root)
p1box.grid(row=7,column=1,pady = 5)
p2box = Entry(root)
p2box.grid(row=8,column=1,pady = 5)
dabox = Entry(root)
dabox.grid(row=9,column=1,pady = 5)
tbox = Entry(root)
tbox.grid(row=10,column=1,pady = 5)
arbox = Entry(root)
arbox.grid(row=11,column=1,pady = 5)
#Buttons
add_b = Button(root, text="Add to Warehouse", command=add_data)
add_b.grid(row=12,column=0,padx=10,pady=10)
clear_b = Button(root, text="Clear Data", command=clear_field)
clear_b.grid(row=12,column=1)
view_b = Button(root, text="View The Entire Warehouse", command=view_db)
view_b.grid(row=13,column=0,sticky=W,padx=10)
search_b = Button(root, text="Search Warehouse", command=search_db)
search_b.grid(row=13,column=1, sticky=W, padx=10)
update_b = Button(root,text="Warehouse Update", command=update_db)
update_b.grid(row=14,column=0,sticky=W,padx=10,pady=10)
plot1 = Label(root,text="Plotting Functions",fg="red")
plot1.grid(row=15,column=0)
occ = Button(root,text="Occupancy Chart",command=occupied_graph)
occ.grid(row=16,column=0,sticky=W,padx=10,pady=10)
cato = Button(root,text="Category Chart",command=cateo_chart)
cato.grid(row=16,column=1,sticky=W,padx=10,pady=10)
plot2 = Label(root,text="Cost Calculator",fg="red")
plot2.grid(row=17,column=0)
cost_b = Button(root,text="Calculate Cost",command=cal_cost)
cost_b.grid(row=18,column=0,sticky=W,padx=10,pady=10)
root.mainloop()
| murali22chan/Aatmanirbhar-Bharat-Hackathon | main.py | main.py | py | 10,762 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mysql.connector.connector.connect",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "mysql.connector.connector",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "mysql.connector",
"line_number": 11,
"usage_type": "name"
},
{
"... |
21126149841 | """The core of p2pg."""
import logging
from threading import Lock
from .conf import conf, dump_after
__author__ = 'Michael Bradley <michael@sigm.io>'
__copyright__ = 'GNU General Public License V3'
__copy_link__ = 'https://www.gnu.org/licenses/gpl-3.0.txt'
__website__ = 'https://p2pg.sigm.io/'
__support__ = 'https://p2pg.sigm.io/support/'
info_form = {
'author': __author__,
'copyright': __copyright__,
'copy-link': __copy_link__,
'website': __website__,
'support': __support__
}
log = logging.getLogger(__name__)
class StopException(Exception):
def __init__(self, reason):
super().__init__()
log.info('stop exception raised because of %s', reason)
self.reason = reason
class StateTracker:
def __init__(self, n_state):
self._val = n_state
self._lock = Lock()
def __call__(self, *value):
with self._lock:
if value:
self._val = value[0]
else:
return self._val
# variable meant to be changed by main as signal to threads
STARTING = object()
RUNNING = object()
STOPPING = object()
state = StateTracker(None)
| TheDocTrier/p2pg | core/__init__.py | __init__.py | py | 1,156 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "threading.Lock",
"line_number": 37,
"usage_type": "call"
}
] |
39914557124 | from fastapi import APIRouter
from utils import model
from utils.socket import socket_connection
from services.event_service import write_log, write_video_log
from utils.plc_controller import *
from services.camera_service import camera_service
import time
import threading
router = APIRouter(prefix="/event")
@router.post("")
async def post_event(event: model.Event):
camera = camera_service.get_by_id(event.camera_id)
current_time = event.dict()['timestamp']
if current_time > camera['start_time'] and current_time < camera['end_time']:
await socket_connection.send_data(
channel="alert",
data=event.dict()
)
def connect_plc():
plc_controller_config = PLCControllerConfig(
plc_ip_address="192.168.1.250",
plc_port=502,
plc_address=1,
modbus_address=8212
)
_plc_controller = PLCController(plc_controller_config)
time.sleep(0.02)
_plc_controller.turn_on()
if camera is not None:
plc_ip = camera['plc']['ip']
list_config = {}
for i, device in enumerate(camera['plc']['device']):
if "Den" in device['device_name']:
plc_controller_config = PLCControllerConfig(
plc_ip_address=plc_ip,
plc_port=502,
plc_address=1,
modbus_address=device['var']
)
_plc_controller = PLCController(plc_controller_config)
time.sleep(0.02)
_plc_controller.turn_on()
background_thread = threading.Thread(target=connect_plc)
background_thread.start()
return "success"
return "fail"
@router.post('/video')
async def save_log(event: model.EventVideo):
print(event)
event = write_video_log(event)
return event
| ngocthien2306/be-cctv | src/router/event_router.py | event_router.py | py | 2,107 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "utils.model.Event",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "utils.model",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "services.camera_... |
33078309482 | from flask import request
from flask.ext.babel import Babel
from tweetmore import app
import re
babel = Babel(app)
# *_LINK_LENGTH constants must be get from help/configuration/short_url_length daily
# last update 14th November 2013
TWITTER_HTTPS_LINK_LENGTH = 23
TWITTER_HTTP_LINK_LENGTH = 22
TWITTER_MEDIA_LINK_LENGTH = 23
CONTINUATION_CHARARCTERS = u'… '
MAX_STATUS_TEXT_LENGTH = 140 - TWITTER_MEDIA_LINK_LENGTH - 1
# RegEx source: http://daringfireball.net/2010/07/improved_regex_for_matching_urls
url_regex_pattern = r"(?i)\b((?:[a-z][\w-]+:(?:/{1,3}|[a-z0-9%])|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'.,<>?«»“”‘’]))"
url_regex = re.compile(url_regex_pattern, re.I | re.M | re.U)
url_regex_pattern_no_protocol = r"(\w+\.(aero|asia|biz|cat|com|coop|edu|gov|info|int|jobs|mil|mobi|museum|name|net|org|pro|tel|travel|xxx){1}(\.(ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|za|zm|zw)){0,1})"
url_regex_no_protocol = re.compile(url_regex_pattern_no_protocol, re.I | re.M | re.U)
@babel.localeselector
def get_locale():
return request.accept_languages.best_match(app.config['LANGUAGES'].keys())
def get_remaining_chars(max_status_length, mentions, urls):
remaining_chars = max_status_length
remaining_chars -= len(' '.join(mentions))
#urls get shortened, and space seperated.
remaining_chars -= sum([get_short_url_length(url)+1 for url in urls])
#for ellipsis and space character
remaining_chars -= len(CONTINUATION_CHARARCTERS)
return remaining_chars
def get_status_text(tweet):
# Twitter also left-strips tweets
tweet = tweet.strip()
#reserve a place for the picture we're going to post
max_status_length=MAX_STATUS_TEXT_LENGTH
if(len(tweet)<(max_status_length)):
return tweet
urls = get_urls(tweet)
mentions = get_mentions_and_hashtags(tweet)
words = tweet.split(' ')
remaining_chars = get_remaining_chars(max_status_length, mentions, urls)
shortened_words = []
#if remaining characters is less than length of the cont. characters, don't bother
if(remaining_chars>len(CONTINUATION_CHARARCTERS)):
overflowed = False
for index, word in enumerate(words):
#length of an url is not len(word), but TWITTER_HTTP(s)_LINK_LENGTH
if (len(word)<remaining_chars or (word in urls and get_short_url_length(word)<remaining_chars)):
if(word in urls):
urls.remove(word)
shortened_words.append(word)
remaining_chars += len(word) - get_short_url_length(word)
elif(word in mentions):
shortened_words.append(word)
mentions.remove(word)
else:
shortened_words.append(word)
remaining_chars -= len(word) +1
else:
remaining_chars+=1 #for the space that doesn't exist (at the end)
overflowed = True
break
#append ellipsis to the last word
# CAUTION! below print causes unsolved encoding errors in (unknown)edge cases! Use in local only, if even necessary.
# print len(words), index, word, remaining_chars
if (len(shortened_words)>0 and overflowed):
shortened_words[-1] += CONTINUATION_CHARARCTERS
status = ' '.join(shortened_words)
# If there is no direct mention let urls appear before mentions
if tweet[0] != '@':
status += ' '.join(wrap_status_elements(urls+mentions))
else:
status += ' '.join(wrap_status_elements(mentions+urls))
# check if tweet is directly targeted to someone<br>
# If tweet is not directly targeted to someone than don't let a mention appear
# at the start of the line
if tweet[0] != '@' and len(mentions) > 0 and len(urls) == 0:
if status[0]=='@':
status = '.' + status
if(len(status)==0):
status = ''
return status
def wrap_status_elements(elements):
"""Discards elements who, when concatenated, would exceed twitter's status length"""
remaining_chars = MAX_STATUS_TEXT_LENGTH
wrapped = []
for element in elements:
if(len(element)<remaining_chars):
wrapped.append(element)
#if element is an url, it will get shortened to TWITTER_HTTP(S)_LINK_LENGTH
element_length = len(element) if element[0]=='#' or element[0]=='@' else get_short_url_length(element)
remaining_chars -= (element_length + 1)
return wrapped
def get_mentions_and_hashtags(tweet):
words = tweet.replace('\n', ' ').split(' ')
return [word for word in words if len(word)>0 and (word[0]=='@' or word[0]=='#')]
def get_urls(tweet):
return list(group[0] for group in url_regex.findall(tweet) ) + list(group[0] for group in url_regex_no_protocol.findall(tweet) )
def get_short_url_length(long_url):
if(long_url.startswith('https://')):
return TWITTER_HTTPS_LINK_LENGTH
return TWITTER_HTTP_LINK_LENGTH # maybe http, ftp or smth. else | dedeler/tweet-more | tweetmore/views/utils.py | utils.py | py | 5,555 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.ext.babel.Babel",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "tweetmore.app",
"line_number": 7,
"usage_type": "argument"
},
{
"api_name": "re.compile",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "re.I",
"line_num... |
8635223611 | import calendar
from datetime import date
from django.contrib.auth import get_user_model
from django.core.cache import cache
from rest_framework import generics, status
from rest_framework.permissions import IsAuthenticated
from rest_framework.renderers import TemplateHTMLRenderer
from rest_framework.response import Response
from rest_framework.views import APIView
from .serializers import *
from .models import *
User = get_user_model()
# 한상 식단 리스트 View
class TableListAPI(generics.ListAPIView):
serializer_class = TableSerializer
permission_classes = (IsAuthenticated,)
def get_queryset(self):
queryset = cache.get('table_list')
if not queryset:
tables = Table.objects.all()
if not tables:
return ""
cache.set('table_list', tables)
queryset = cache.get('table_list')
return queryset
# 이번 달 식단 리스트 View
class MonthlyTableListAPI(generics.ListAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = TableSerializer
def get_queryset(self):
queryset = cache.get('monthly_table_list')
if not queryset:
monthrange = calendar.monthrange(date.today().year, date.today().month)
from_date = date.today().replace(day=1)
to_date = date.today().replace(day=monthrange[1])
tables = Table.objects.filter(date__range=[from_date, to_date])
if not tables:
return ""
cache.set('monthly_table_list', tables)
queryset = cache.get('monthly_table_list')
return queryset
# 식단 검색 View
class TableSearchAPI(generics.ListAPIView):
serializer_class = TableSerializer
permission_classes = (IsAuthenticated,)
# Need Additional Parameter
def get_queryset(self):
if self.request.GET.get('keywords'):
keywords = self.request.GET.get('keywords')
queryset = Table.objects.filter(dietary_composition__icontains=keywords)
return queryset
else:
return ""
# 메인페이지 View(Calendar + Table Log for User)
class MainPageAPI(APIView):
permission_classes = (IsAuthenticated,)
def get(self, request):
# Calendar
cal = calendar.monthrange(date.today().year, date.today().month)
# Table Log
user_monthly_log = TableLog.objects.filter(
user=request.user,
date__range=[date.today().replace(day=1), date.today().replace(day=cal[1])]
)
serializers = TableLogSerializer(user_monthly_log, many=True)
log_data = {
"calendar": cal,
"userLog": serializers.data
}
return Response(log_data, status=status.HTTP_200_OK)
# Add New Table Log View
class MakeTableLogAPI(APIView):
permission_classes = (IsAuthenticated,)
def post(self, request):
serializer = MakeTableLogSerializer(data=request.data)
if serializer.is_valid():
given_pk = serializer.data["table_pk"]
given_meal_time = serializer.data["meal_time"]
try:
table_log = TableLog.objects.get(
user=request.user,
date=date.today(),
time=given_meal_time
)
table_log.table = Table.objects.get(pk=given_pk)
table_log.save()
return Response({
"message": "변경되었습니다.",
"tableLog": TableLogSerializer(table_log).data
}, status=status.HTTP_202_ACCEPTED)
except ObjectDoesNotExist:
table_log = TableLog.objects.create(
table=Table.objects.get(pk=given_pk),
user=request.user,
date=date.today(),
time=given_meal_time
)
return Response(
{
"message": "저장되었습니다.",
"tableLog": TableLogSerializer(table_log).data
},
status=status.HTTP_201_CREATED
)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| hanoul1124/healthcare2 | app/tables/apis.py | apis.py | py | 4,279 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "rest_framework.generics.ListAPIView",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 17,
"usage_type": "n... |
41847098946 | from telegram.ext import Updater
from telegram.ext import CommandHandler, CallbackQueryHandler
from telegram.ext import MessageHandler, Filters
import os
import square
import telegram
#initialize updater and dispatcher
updater = Updater(token='TOKEN', use_context=True)
dispatcher = updater.dispatcher
def start(update, context):
''' Replies with a Generic mesage to /start and /help commands'''
context.bot.send_message(chat_id = update.message.chat_id, text = "I'm Square It bot! Send me an image and I'll "
"square it for you!")
def Square_It(update, context):
''' Saves picture locally and asks the user for the color of padding '''
#Download photo
image = context.bot.getFile(update.message.photo[-1].file_id)
FILE_NAME = os.path.join(os.getcwd(), f"{image.file_id}.jpg")
image.download(custom_path = FILE_NAME)
#save path in file
with open("name.txt", 'w') as f:
f.write(FILE_NAME)
#Custom inline keyboard to present an option of black or white padding for
#squared image
custom_keyboard = [[telegram.InlineKeyboardButton('White', callback_data = 'White')],
[telegram.InlineKeyboardButton('Black', callback_data = 'Black')]]
reply_markup = telegram.InlineKeyboardMarkup(custom_keyboard)
context.bot.send_message(chat_id=update.message.chat_id,
text="Please choose the background colour",
reply_markup=reply_markup)
def callback(update, context):
'''
Sends the square image according to the
padding color choice of user.
'''
query = update.callback_query
colour = query.data #selected color as per user input
query.edit_message_text(text=f"Selected option: {colour}")
#get File path
with open("name.txt", 'r') as f:
FILE_NAME = f.read()
FILE_NAME = FILE_NAME.strip()
square.square_image(FILE_NAME, colour)
file = open(FILE_NAME, 'rb')
context.bot.send_photo(caption = "Here you go!", chat_id = query.message.chat_id, photo = file)
file.close()
os.remove(FILE_NAME)
os.remove('name.txt')
#Create Handlers
start_handler = CommandHandler(['start', 'help'], start)
photo_handler = MessageHandler(Filters.photo, Square_It)
callback_handler = CallbackQueryHandler(callback)
#Deploy Handlers
dispatcher.add_handler(start_handler)
dispatcher.add_handler(photo_handler)
dispatcher.add_handler(callback_handler)
#Check For updates
updater.start_polling() | sethiojas/Square_It_Bot | bot.py | bot.py | py | 2,367 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "telegram.ext.Updater",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_n... |
6786801031 | import unittest
from local import EXOLEVER_HOST
import requests
class ChatUserTest(unittest.TestCase):
def do_login(self):
url = '/api/accounts/login/'
prefix = ''
url = EXOLEVER_HOST + prefix + url
data = {
'username': 'gorkaarrizabalaga@example.com',
'password': '.eeepdExO'
}
return requests.post(url, data)
def get_messages(self, token, user_to=None):
url = '/api/conversations/'
prefix = '/conversations'
url = EXOLEVER_HOST + prefix + url
headers = {'Authorization': token}
params = {}
if user_to:
params['user_to'] = user_to
return requests.get(url, params=params, headers=headers)
def get_user_detail(self, token, slug):
url = '/api/profile-public/{}/'.format(slug)
prefix = ''
url = EXOLEVER_HOST + prefix + url
headers = {'Authorization': token}
return requests.get(url, headers=headers)
def get_token(self):
login_data = self.do_login()
user = login_data.json().get('token')
token = 'Bearer ' + user
return token
def test_start_conversation(self):
token = self.get_token()
response = self.get_user_detail(token, 'naina-lavrova')
self.assertEqual(response.status_code, 200)
user_pk = response.json().get('pk')
url = EXOLEVER_HOST + '/api/profile/{}/start-conversation/'.format(user_pk)
data = {'message': 'hello', 'files': []}
response = requests.post(url, data=data, headers={'Authorization': token})
self.assertEqual(response.status_code, 201)
response = self.get_messages(token)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.json(), 1))
response = self.get_messages(token, user_to=response.json().get('uuid'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.json(), 1))
| tomasgarzon/exo-services | service-exo-broker/tests/test_chat_user.py | test_chat_user.py | py | 1,990 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "local.EXOLEVER_HOST",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "requests.post",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "local.EXOLEVE... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.