seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
22439203560 | import ttkbootstrap as ttk
from ttkbootstrap.constants import *
from ttkbootstrap.dialogs import Dialog
from gui.realtime_graph import RealTimeGraph
import matplotlib.animation as animation
from gui.animation import Animation, network_traffic_in_filler, network_traffic_out_filler
from models.agents import Agent
from sqlalchemy.orm import Session
from sqlalchemy import create_engine
from gui.form import create_entry_with_label, create_form
from models.alert import Alert
create_agent_form = [{
"host_ip": {"title": "IP", "default": "0.0.0.0"},
"snmp_version": {"title": "SNMP version", "default": "3"}
}, {
"security_username": {"title": "Security username", "default": ""},
"privacy_password": {"title": "Privacy password", "default": ""},
"privacy_protocol": {"title": "Privacy protocol", "default": ""},
}, {
"auth_password": {"title": "Auth password", "default": ""},
"auth_protocol": {"title": "Auth protocol", "default": ""},
}]
traffic_in_animation = Animation(
"Network In Traffic", ylabel="Traffic In Rate (MBps)")
traffic_in_refresher = traffic_in_animation.create_animation(
network_traffic_in_filler)
traffic_out_animation = Animation(
"Network Out Traffic", ylabel="Traffic Out Rate (MBps)")
traffic_out_refresher = traffic_out_animation.create_animation(
network_traffic_out_filler)
class CreateAgentDialog(Dialog):
def __init__(self, parent=None, title='', alert=False):
super().__init__(parent, title, alert)
self.entries = {}
def create_body(self, master):
frame = ttk.Frame(master=master)
frame.pack(fill=X, ipadx=10, ipady=10, side=TOP)
self.entries = create_form(frame, create_agent_form)
return frame
def create_buttonbox(self, master):
frame = ttk.Frame(master=master)
frame.pack(fill=X, pady=1, ipadx=10, ipady=10, side=BOTTOM)
def on_click_save_agent():
agent = Agent(
host_ip=self.entries["host_ip"].get(),
snmp_version=int(self.entries["snmp_version"].get()),
security_username=self.entries["security_username"].get(),
privacy_password=self.entries["privacy_password"].get(),
privacy_protocol=self.entries["privacy_protocol"].get(),
auth_password=self.entries["auth_password"].get(),
auth_protocol=self.entries["auth_protocol"].get(),
)
engine = create_engine('sqlite:///db.sqlite3')
db = Session(engine)
db.add(agent)
db.commit()
master.destroy()
btn = ttk.Button(
master=master, text='Add agent',
compound=LEFT,
command=on_click_save_agent
)
btn.pack(side=RIGHT, ipadx=5, ipady=5, padx=(0, 15), pady=1)
return btn
create_alert_form = [{
"metric": {"title": "Metric", "default": "ifInOctets"},
"increase_threshold": {"title": "Increase threshold", "default": "30000"}
}]
class CreateAlertDialog(Dialog):
def __init__(self, parent=None, title='', alert=False):
super().__init__(parent, title, alert)
self.entries = {}
def create_body(self, master):
frame = ttk.Frame(master=master)
frame.pack(fill=X, ipadx=10, ipady=10, side=TOP)
self.entries = create_form(frame, create_alert_form)
return frame
def create_buttonbox(self, master):
frame = ttk.Frame(master=master)
frame.pack(fill=X, pady=1, ipadx=10, ipady=10, side=BOTTOM)
def on_click_save_alert():
alert = Alert(
metric=self.entries["metric"].get(),
increase_threshold=self.entries["increase_threshold"].get(),
)
engine = create_engine('sqlite:///db.sqlite3')
db = Session(engine)
db.add(alert)
db.commit()
master.destroy()
btn = ttk.Button(
master=master, text='Add alert',
compound=LEFT,
command=on_click_save_alert
)
btn.pack(side=RIGHT, ipadx=5, ipady=5, padx=(0, 15), pady=1)
return btn
class MainScreen(ttk.Frame):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.pack(fill=BOTH, expand=YES)
# buttonbar
buttonbar = ttk.Frame(self, style='secondary.TFrame')
buttonbar.pack(fill=X, pady=1, side=TOP)
# add agent button
def createAgentDialog(): return CreateAgentDialog(
parent=self, title="Add new agent").show()
btn = ttk.Button(
master=buttonbar, text='Add agent',
compound=LEFT,
style='secondary',
command=createAgentDialog
)
btn.pack(side=LEFT, ipadx=5, ipady=5, padx=(1, 0), pady=1)
# add alert button
def createAlertDialog(): return CreateAlertDialog(
parent=self, title="Add new Alert").show()
btn = ttk.Button(
master=buttonbar, text='Add Alert',
compound=LEFT,
style='secondary',
command=createAlertDialog
)
btn.pack(side=LEFT, ipadx=5, ipady=5, padx=(1, 0), pady=1)
# graph
label = ttk.Label(self, text="Traffic Monitor",
bootstyle="default", font=("", 20, "bold"))
label.pack(pady=10, padx=10)
graph_in = RealTimeGraph(self, traffic_in_animation.fig)
graph_in.pack(fill=X, pady=1, side=TOP)
graph_out = RealTimeGraph(self, traffic_out_animation.fig)
graph_out.pack(fill=X, pady=1, side=TOP)
def start():
app = ttk.Window(title="TeutoMonitor",
themename="superhero", minsize=(1280, 720))
MainScreen(app)
anim_in = animation.FuncAnimation(
traffic_in_animation.fig, traffic_in_refresher, interval=1000)
anim_out = animation.FuncAnimation(
traffic_out_animation.fig, traffic_out_refresher, interval=1000)
app.mainloop()
| MatheusWoeffel/TeutoMonitor | src/gui/window.py | window.py | py | 6,033 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "gui.animation.Animation",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "gui.animation.network_traffic_in_filler",
"line_number": 30,
"usage_type": "argument"
},
{
"api_name": "gui.animation.Animation",
"line_number": 32,
"usage_type": "call"
}... |
36351466606 | from flask import Blueprint, render_template, request
import logging
import functions
loader_blueprint = Blueprint('loader_blueprint', __name__, template_folder="templates")
logging.basicConfig(filename="basic.log")
@loader_blueprint.route("/post")
def post_page():
return render_template("post_form.html")
@loader_blueprint.route("/uploaded", methods=['POST'])
def uploaded_page():
try:
content = request.form['content']
picture = request.files.get("picture")
filename = picture.filename
try:
functions.check_extension(filename)
except functions.NotAllowedExtension:
return f"Данный формат файла не поддерживается"
except IndexError:
logging.exception("Ошибка загрузки")
return f"Файл не отправлен"
picture.save(f"./uploads/{filename}")
pic_path = f"/uploads/{filename}"
functions.save_data(pic_path, content)
return render_template("post_uploaded.html", content=content, pic_path=pic_path)
except PermissionError:
logging.exception("Ошибка загрузки")
return "Ошибка загрузки"
| PetrGurev/Lesson_121_homework | loader/views.py | views.py | py | 1,229 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Blueprint",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.request... |
7256329607 | import random
import sys
from tkinter.messagebox import QUESTION
import inquirer
words = open("words.txt", "r")
words_two = words.read()
word_bank = words_two.split()
hell = []
str = ' '
WRONG = []
past_guesses = []
difficulty = [
inquirer.List('mode',
message = "Choose Your Difficulty",
choices = ['Hell','Hard','Medium','Soft']
)
]
play_again = [
inquirer.List('play',
message= "Wanna Go Again?",
choices = ['Yeah!','Nah...']
)
]
def choose_difficulty():
answers = inquirer.prompt(difficulty)
if 'Hard' in answers.values():
guess_me = random.choice(word_bank)
while len(guess_me) < 8:
guess_me = random.choice(word_bank)
print("Hard Mode Selected")
return board_maker(guess_me)
if 'Medium' in answers.values():
guess_me = random.choice(word_bank)
while len(guess_me) < 6 or len(guess_me) > 8:
guess_me = random.choice(word_bank)
print("Medium Mode Selected")
return board_maker(guess_me)
if 'Soft' in answers.values():
guess_me = random.choice(word_bank)
while len(guess_me) > 6:
guess_me = random.choice(word_bank)
print("Soft Mode Selected")
return board_maker(guess_me)
if 'Hell' in answers.values():
guess_me = random.choice(word_bank)
breaker = list(guess_me)
new_board = "_" * len(breaker)
scoreboard = list(new_board)
hell.append("hell")
print("Heaven or Hell Let's Rock")
return play_game(scoreboard, breaker)
def board_maker(guess_me):
breaker = list(guess_me)
new_board = "_" * len(breaker)
scoreboard = list(new_board)
return play_game(scoreboard, breaker)
def play_game(scoreboard, breaker):
if len(WRONG) == 8:
print(f"\nLOSER!\nThe Word Was: {''.join(breaker)}\n")
mulligan = inquirer.prompt(play_again)
if 'Nah...' in mulligan.values():
sys.exit(0)
else:
WRONG.clear()
hell.clear()
past_guesses.clear()
choose_difficulty()
if scoreboard == breaker:
print(f"'\n',{''.join(breaker)}\nYOU WIN!")
mulligan = inquirer.prompt(play_again)
if 'Nah...' in mulligan.values():
sys.exit(0)
else:
WRONG.clear()
hell.clear()
past_guesses.clear()
choose_difficulty()
else:
print(f"{str.join(scoreboard)} \n GUESS ME!")
guess = input("Pick A Letter: ").lower()
if not guess.isalpha():
print("Letters only!")
play_game(scoreboard, breaker)
elif len(guess) > 1:
print("One at a time buddy.")
play_game(scoreboard, breaker)
elif guess in past_guesses:
print("You Already Tried That...")
play_game(scoreboard, breaker)
elif guess in breaker:
print("good guess!")
past_guesses.append(guess)
for correct in range(len(breaker)):
if breaker[correct] == guess:
scoreboard[correct] = breaker[correct]
play_game(scoreboard, breaker)
elif guess not in breaker:
past_guesses.append(guess)
WRONG.append("X")
print("\nwrong, dumbass\n","X" * len(WRONG),"\n",f'{8 - len(WRONG)} {"guess" if 8 - len(WRONG) == 1 else "guesses"} left!')
if "hell" in hell and len(WRONG) != 8:
past_guesses.clear()
hell_game()
play_game(scoreboard, breaker)
def hell_game():
guess_me = random.choice(word_bank)
breaker = list(guess_me)
new_board = "_" * len(breaker)
scoreboard = list(new_board)
print("\nThe Hell Continues")
return play_game(scoreboard, breaker)
if __name__ == "__main__":
choose_difficulty()
| Momentum-Team-13/python-mystery-word-samrespass | mystery_word.py | mystery_word.py | py | 3,941 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "inquirer.List",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "inquirer.List",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "inquirer.prompt",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line... |
70807032423 | import sys
from collections import deque
sys.stdin = open('input.txt')
def bfs(start):
global answer
q = deque(start)
while q:
node = q.popleft()
answer += visited[node[0]][node[1]]
for k in range(4):
y = node[0] + dr[k]
x = node[1] + dc[k]
if 0 <= y < N and 0 <= x < M and not visited[y][x]:
visited[y][x] = visited[node[0]][node[1]] + 1
q.append((y, x))
dr = [-1, 0, 1, 0]
dc = [0, 1, 0, -1]
T = int(input())
for tc in range(1, T+1):
N, M = map(int, input().split())
beach = [input() for _ in range(N)]
visited = [[0] * M for _ in range(N)]
start = []
answer = 0
for i in range(N):
for j in range(M):
if beach[i][j] == 'W':
visited[i][j] = -1
for k in range(4):
y = i + dr[k]
x = j + dc[k]
if 0 <= y < N and 0 <= x < M and beach[y][x] == 'L' and not visited[y][x]:
visited[y][x] = 1
start.append((y, x))
bfs(start)
print('#{} {}'.format(tc, answer)) | unho-lee/TIL | CodeTest/Python/SWEA/10966.py | 10966.py | py | 1,161 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.stdin",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "collections.deque",
"line_number": 9,
"usage_type": "call"
}
] |
41571056432 | import lightgbm as lgb
from sklearn.linear_model import LogisticRegression
import pandas as pd
import numpy as np
#读取数据
file_dir='E:\\GDBT_LR\\loan\\'
train_data='gbdt_train.csv'
test_data='gdbt_test.csv'
train=pd.read_csv(file_dir+train_data)
test=pd.read_csv(file_dir+test_data)
#删除无用参数
del train['Unnamed: 0']
del test['Unnamed: 0']
#取数据集
data=train[data_list]
test_data=test[data_list]
#构造训练集和测试集
feature=[x for x in data_list if x!='loan_status']
X_train=data[feature]
y_train=data['loan_status']
X_test=test_data[feature]
y_test=test_data['loan_status']
# 构造lgb分类器
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
params = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': {'binary_logloss'},
'num_leaves': 64,
'num_trees': 100,
'learning_rate': 0.01,
'feature_fraction': 0.9,
'bagging_fraction': 0.8,
'bagging_freq': 5,
'verbose': 0
}
# 设置叶子节点
num_leaf = 64
print('Start training...')
# train
gbm = lgb.train(params,
lgb_train,
num_boost_round=100,
valid_sets=lgb_train)
print('Save model...')
# save model to file
gbm.save_model('model.txt')
print('Start predicting...')
# predict and get data on leaves, training data
y_pred = gbm.predict(X_train, pred_leaf=True)
print(np.array(y_pred).shape)
print(y_pred[0])
#样本个数行,树个数*叶子树列矩阵
transformed_training_matrix = np.zeros([len(y_pred), len(y_pred[0]) * num_leaf],
dtype=np.int64) # N * num_tress * num_leafs
#将转换矩阵按叶子树划分,将叶子预测的节点位置添加标记,标记位置为temp数组,在大矩阵中,在相应位置处的元素加一
for i in range(0, len(y_pred)):
temp = np.arange(len(y_pred[0])) * num_leaf + np.array(y_pred[i])
transformed_training_matrix[i][temp] += 1
#预测集做同样的处理
y_pred = gbm.predict(X_test, pred_leaf=True)
print('Writing transformed testing data')
transformed_testing_matrix = np.zeros([len(y_pred), len(y_pred[0]) * num_leaf], dtype=np.int64)
for i in range(0, len(y_pred)):
temp = np.arange(len(y_pred[0])) * num_leaf + np.array(y_pred[i])
transformed_testing_matrix[i][temp] += 1
lm = LogisticRegression(penalty='l2',C=0.05) # logestic model construction
lm.fit(transformed_training_matrix,y_train) # fitting the data
y_pred_test = lm.predict_proba(transformed_testing_matrix) # Give the probabilty on each label
print(y_pred_test)
NE = (-1) / len(y_pred_test) * sum(((1+y_test)/2 * np.log(y_pred_test[:,1]) + (1-y_test)/2 * np.log(1 - y_pred_test[:,1])))
print("Normalized Cross Entropy " + str(NE))
#以阀值为0.5看查全率与查准率
def get_pcr(y_tar,y_pre):
id_list=[]
for i in range(len(y_tar)):
if y_tar[i]==1:
id_list.append(i)
right_n=0
for i in id_list:
if y_pre[i][0]<y_pre[i][1]:
right_n+=1
pre_id=[]
for i in range(len(y_pre)):
if y_pre[i][0]<y_pre[i][1]:
pre_id.append(i)
good_pre=set(pre_id)&set(id_list)
print('查准率为:{}'.format(len(good_pre)/len(pre_id)))
print('查全率为:{}'.format(right_n/len(id_list)))
get_pcr(y_test,y_pred_test)#查准率为:0.9205776173285198,查全率为:0.6623376623376623
y_pred_train = lm.predict_proba(transformed_training_matrix) # Give the probabilty on each label
get_pcr(y_train,y_pred_train)#查准率为:0.9971139971139971,查全率为:0.9262734584450402
| hu-minghao/my_program | 贷款违约预测/LGB_LR.py | LGB_LR.py | py | 3,631 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "lightgbm.Dataset",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "lightgbm.Dataset",
... |
26620703554 | import matplotlib.pyplot as plt
from matplotlib import style
import matplotlib.dates as mdates
import mpl_finance as mpl
from tkinter import *
from yahoo_fin.stock_info import get_data
import pandas as pd
import plotly.graph_objects as go
class AutoPlot:
def __init__(self):
master = Tk()
Label(master, text="Stock Ticker").grid(row=0)
Label(master, text="Range Start").grid(row=1)
Label(master, text="Range End").grid(row=2)
self.e1 = Entry(master)
self.e2 = Entry(master)
self.e3 = Entry(master)
self.e1.grid(row=0, column=1)
self.e2.grid(row=1, column=1)
self.e3.grid(row=2, column=1)
Button(master, text='Quit', command=master.destroy).grid(row=3, column=0, sticky=W, pady=4)
Button(master, text='Show', command=self.make_plot).grid(row=3, column=1, sticky=W, pady=4)
mainloop()
def make_plot(self):
#Extracting data
df = get_data("{ticker}".format(ticker=self.e1.get()),
start_date = self.e2.get(), end_date = self.e3.get())
df.index = pd.to_datetime(df.index)
fig = go.Figure(data=[go.Candlestick(x=df.index,
open=df['open'],
high=df['high'],
low=df['low'],
close=df['close'])])
fig.show()
AutoPlot()
| MihaiGroza/Automated-Candlestick-Chart-Plot | CandleStick_Chart_Building.py | CandleStick_Chart_Building.py | py | 1,397 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "yahoo_fin.stock_info.get_data",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objects.Figure",
"line_number": 40,
"usage_type": "call"
},
{
"api_... |
28919190196 | from PIL import Image
import glob
import random
import os
from collections import defaultdict
#################################
test_percentage = 0.20
def partitionRankings(rawRatings, testPercent):
# https://stackoverflow.com/questions/23299099/trying-to-split-list-by-percentage
howManyNumbers = int(round(testPercent*len(rawRatings)))
shuffled = rawRatings[:]
random.shuffle(shuffled)
return shuffled[howManyNumbers:], shuffled[:howManyNumbers]
#################################################################
#### Make Directories (if needed)
#################################################################
caboodle = defaultdict(list)
categories = [x[1] for x in os.walk('./DATA')][0]
if 'train' in categories:
categories.remove('train')
categories.remove('validate')
for category in categories:
val_dir = 'DATA/validate/'+str(category)
train_dir = 'DATA/train/'+str(category)
subdata = []
if not os.path.exists(val_dir):
os.makedirs(val_dir)
if not os.path.exists(train_dir):
os.makedirs(train_dir)
# Read images (currently either png or jpg format)
for filename in glob.glob('DATA/'+str(category)+'/*.jpg'):
im = Image.open(filename)
keep = im.copy()
subdata.append(keep)
im.close()
for filename in glob.glob('DATA/'+str(category)+'/*.png'):
im = Image.open(filename)
keep = im.copy()
subdata.append(keep)
im.close()
random.shuffle(subdata)
train_sample, test_sample = partitionRankings(subdata, test_percentage)
# Read images (save shuffled images to new train/validate folders in a jpg format)
for i in range(len(train_sample)):
train_sample[i].save(train_dir+'/'+str(category) + str(i) + '.jpg')
for i in range(len(test_sample)):
test_sample[i].save(val_dir+'/'+str(category) + str(i) + '.jpg')
#################################################################
#### Make labels.txt
#################################################################
f = open('DATA/labels.txt', 'w')
for category in categories:
f.write(category+'\n')
f.close()
| melissadale/YouTubeTutorials | TF-Records/DivideData.py | DivideData.py | py | 2,165 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "random.shuffle",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"... |
70447240745 | import os
import shutil
import subprocess
import random
import string
from cdifflib import CSequenceMatcher
from pathlib import Path
from typing import Any
from urllib.request import urlopen
import numpy as np
from rich import print as print
from shapely.geometry import MultiPolygon
from sqlalchemy import text
from src.db.db import Database
from functools import wraps
from src.core.enums import IfExistsType
import polars as pl
import csv
from io import StringIO
import time
from src.core.enums import TableDumpFormat
from src.core.config import settings
def timing(f):
@wraps(f)
def wrap(*args, **kw):
ts = time.time()
result = f(*args, **kw)
te = time.time()
total_time = te - ts
if total_time > 1:
total_time = round(total_time, 2)
total_time_string = f"{total_time} seconds"
elif total_time > 0.001:
time_miliseconds = int((total_time) * 1000)
total_time_string = f"{time_miliseconds} miliseconds"
else:
time_microseconds = int((total_time) * 1000000)
total_time_string = f"{time_microseconds} microseconds"
print(f"func: {f.__name__} took: {total_time_string}")
return result
return wrap
def make_dir(dir_path: str):
"""Creates a new directory if it doesn't already exist"""
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def delete_file(file_path: str) -> None:
"""Delete file from disk."""
try:
os.remove(file_path)
except OSError as e:
pass
def delete_dir(dir_path: str) -> None:
"""Delete file from disk."""
try:
shutil.rmtree(dir_path)
except OSError as e:
pass
def replace_dir(dir_path: str) -> None:
"""Delete folder from disk and recreate empty one with same path."""
delete_dir(dir_path)
os.mkdir(dir_path)
def print_hashtags():
print(
"#################################################################################################################"
)
def print_separator_message(message: str):
print_hashtags()
print_info(message)
print_hashtags()
def print_info(message: str):
print(f"[bold green]INFO[/bold green]: {message}")
def print_error(message: str):
print(f"[bold red]ERROR[/bold red]: {message}")
def print_warning(message: str):
print(f"[red magenta]WARNING[/red magenta]: {message}")
def download_link(directory: str, link: str, new_filename: str = None):
if new_filename is not None:
filename = new_filename
else:
filename = os.path.basename(link)
download_path = Path(directory) / filename
with urlopen(link) as image, download_path.open("wb") as f:
f.write(image.read())
print_info(f"Downloaded ended for {link}")
def check_string_similarity(
input_value: str, match_values: list[str], target_ratio: float
) -> bool:
"""Check if a string is similar to a list of strings.
Args:
input_value (str): Input value to check.
match_values (list[str]): List of strings to check against.
target_ratio (float): Target ratio to match.
Returns:
bool: True if the input value is similar to one of the match values.
"""
for match_value in match_values:
if input_value in match_value or match_value in input_value:
return True
elif CSequenceMatcher(None, input_value, match_value).ratio() >= target_ratio:
return True
else:
pass
return False
def check_string_similarity_bulk(
input_value: str, match_dict: dict, target_ratio: float
) -> bool:
"""Check if a string is similar to a dictionary with lists of strings.
Args:
input_value (str): Input value to check.
match_dict (dict): Dictionary with lists of strings to check against.
target_ratio (float): Target ratio to match.
Returns:
bool: True if the input value is similar to one of the match values.
"""
if input_value is None:
return False
for key, match_values in match_dict.items():
if check_string_similarity(
match_values=match_values,
input_value=input_value.lower(),
target_ratio=target_ratio,
):
return True
return False
vector_check_string_similarity_bulk = np.vectorize(check_string_similarity_bulk)
def create_pgpass(db_config):
"""Creates pgpass file for specified DB config
Args:
db_config: Database configuration.
"""
db_name = db_config.path[1:]
delete_file(f"""~/.pgpass_{db_name}""")
os.system(
"echo "
+ ":".join(
[
db_config.host,
str(db_config.port),
db_name,
db_config.user,
db_config.password,
]
)
+ f""" > ~/.pgpass_{db_name}"""
)
os.system(f"""chmod 0600 ~/.pgpass_{db_name}""")
def check_table_exists(db, table_name: str, schema: str) -> bool:
"""_summary_
Args:
db (_type_): _description_
table_name (str): _description_
schema (str): _description_
Returns:
bool: _description_
"""
check_if_exists = db.select(
f"""
SELECT EXISTS (
SELECT FROM information_schema.tables
WHERE table_schema = '{schema}'
AND table_name = '{table_name}'
);"""
)
return check_if_exists[0][0]
def create_table_dump(
db_config: dict, schema: str, table_name: str, data_only: bool = False
):
"""Create a dump from a table
Args:
db_config (str): Database configuration dictionary.
table_name (str): Specify the table name including the schema.
schema (str): Specify the schema.
data_only (bool, optional): Is it a data only dump. Defaults to False.
"""
try:
dir_output = os.path.join(settings.OUTPUT_DATA_DIR, table_name + ".dump")
# Delete the file if it already exists
delete_file(dir_output)
# Set the password to the environment variable
os.environ["PGPASSWORD"] = db_config.password
# Construct the pg_dump command
command = [
"pg_dump",
"-h",
db_config.host,
"-p",
db_config.port,
"-U",
db_config.user,
"-d",
db_config.path[1:],
"-t",
f"{schema}.{table_name}",
"-F",
"c",
"-f",
dir_output,
"--no-owner",
]
# Append to the end of the command if it is a data only dump
if data_only == True:
command.append("--data-only")
# Run the pg_dump command and capture the output
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
print_info(f"Successfully dumped {schema}.{table_name} to {dir_output}")
except Exception as e:
print_warning(f"The following exeption happened when dumping {table_name}: {e}")
def restore_table_dump(
db_config: dict, schema: str, table_name: str, data_only: bool = False
):
"""Restores the dump from a table
Args:
db_config (dict): Database configuration dictionary.
table_name (str): Specify the table name including the schema.
data_only (bool, optional): Is it a data only dump. Defaults to False.
Raises:
ValueError: If the file is not found.
"""
# Define the output directory
dir_output = os.path.join(settings.OUTPUT_DATA_DIR, table_name + ".dump")
# Check if the file exists
if not os.path.isfile(dir_output):
raise ValueError(f"File {dir_output} does not exist")
try:
# Set the password to the environment variable
os.environ["PGPASSWORD"] = db_config.password
# Construct the pg_dump command
command = [
"pg_restore",
"-h",
db_config.host,
"-p",
db_config.port,
"-U",
db_config.user,
"-d",
db_config.path[1:],
"--no-owner",
"--no-privileges",
dir_output,
]
# Append to -2 position of the command if it is a data only dump
if data_only == True:
command.insert(-2, "--data-only")
# Run the command
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
print_info(f"Successfully restored {table_name}.dump from {dir_output}")
except Exception as e:
print_warning(
f"The following exeption happened when restoring {table_name}: {e}"
)
def create_table_schema(db: Database, table_full_name: str):
"""Function that creates a table schema from a database dump.
Args:
db (Database): Database connection class.
table_full_name (str): Name with the schema of the table (e.g. basic.poi).
"""
db_config = db.db_config
db.perform(query="CREATE SCHEMA IF NOT EXISTS basic;")
db.perform(query="CREATE SCHEMA IF NOT EXISTS extra;")
db.perform(query="DROP TABLE IF EXISTS %s" % table_full_name)
table_name = table_full_name.split(".")[1]
# Set the password to the environment variable
os.environ["PGPASSWORD"] = db_config.password
subprocess.run(
f'pg_restore -U {db_config.user} --schema-only -h {db_config.host} --no-owner -n basic -d {db_config.path[1:]} -t {table_name} {"/app/src/data/input/dump.tar"}',
shell=True,
check=True,
)
# # TODO: Temp fix here only to convert poi.id a serial instead of integer
db.perform(
f"""
ALTER TABLE {table_full_name} DROP COLUMN IF EXISTS id;
ALTER TABLE {table_full_name} ADD COLUMN id SERIAL;
"""
)
def create_standard_indices(db: Database, table_full_name: str):
"""Create standard indices for the database on the id and geometry column.
Args:
db (Database): Database connection class.
"""
db.perform(
f"""
ALTER TABLE {table_full_name} ADD PRIMARY KEY (id);
CREATE INDEX IF NOT EXISTS {table_full_name.replace('.', '_')}_geom_idx ON {table_full_name} USING GIST (geom);
"""
)
def download_dir(self, prefix, local, bucket, client):
"""Downloads data directory from AWS S3
Args:
prefix (str): Path to the directory in S3
local (str): Path to the local directory
bucket (str): Name of the S3 bucket
client (obj): S3 client object
"""
keys = []
dirs = []
next_token = ""
base_kwargs = {
"Bucket": bucket,
"Prefix": prefix,
}
while next_token is not None:
kwargs = base_kwargs.copy()
if next_token != "":
kwargs.update({"ContinuationToken": next_token})
results = client.list_objects_v2(**kwargs)
contents = results.get("Contents")
for i in contents:
k = i.get("Key")
if k[-1] != "/":
keys.append(k)
else:
dirs.append(k)
next_token = results.get("NextContinuationToken")
for d in dirs:
dest_pathname = os.path.join(local, d)
if not os.path.exists(os.path.dirname(dest_pathname)):
os.makedirs(os.path.dirname(dest_pathname))
for k in keys:
dest_pathname = os.path.join(local, k)
if not os.path.exists(os.path.dirname(dest_pathname)):
os.makedirs(os.path.dirname(dest_pathname))
client.download_file(bucket, k, dest_pathname)
def upload_dir(self, prefix, local, bucket, client):
"""Uploads data directory to AWS S3
Args:
prefix (str): Path to the directory in S3
local (str): Path to the local directory
bucket (str): Name of the S3 bucket
client (obj): S3 client object
"""
for root, dirs, files in os.walk(local):
for filename in files:
# construct the full local path
local_path = os.path.join(root, filename)
def parse_poly(dir):
"""Parse an Osmosis polygon filter file.
Based on: https://wiki.openstreetmap.org/wiki/Osmosis/Polygon_Filter_File_Python_Parsing
Args:
dir (str): Path to the polygon filter file.
Returns:
(shapely.geometry.multipolygon): Returns the polygon in the poly foramat as a shapely multipolygon.
"""
in_ring = False
coords = []
with open(dir, "r") as polyfile:
for index, line in enumerate(polyfile):
if index == 0:
# first line is junk.
continue
elif index == 1:
# second line is the first polygon ring.
coords.append([[], []])
ring = coords[-1][0]
in_ring = True
elif in_ring and line.strip() == "END":
# we are at the end of a ring, perhaps with more to come.
in_ring = False
elif in_ring:
# we are in a ring and picking up new coordinates.
ring.append(list(map(float, line.split())))
elif not in_ring and line.strip() == "END":
# we are at the end of the whole polygon.
break
elif not in_ring and line.startswith("!"):
# we are at the start of a polygon part hole.
coords[-1][1].append([])
ring = coords[-1][1][-1]
in_ring = True
elif not in_ring:
# we are at the start of a polygon part.
coords.append([[], []])
ring = coords[-1][0]
in_ring = True
return MultiPolygon(coords)
# Copied from https://pynative.com/python-generate-random-string/
def get_random_string(length):
# choose from all lowercase letter
letters = string.ascii_lowercase
result_str = "".join(random.choice(letters) for i in range(length))
return result_str
def psql_insert_copy(table, conn, keys, data_iter):
"""
Execute SQL statement inserting data
Parameters
----------
table : pandas.io.sql.SQLTable
conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection
keys : list of str
Column names
data_iter : Iterable that iterates the values to be inserted
"""
# gets a DBAPI connection that can provide a cursor
dbapi_conn = conn.connection
with dbapi_conn.cursor() as cur:
s_buf = StringIO()
writer = csv.writer(s_buf)
writer.writerows(data_iter)
s_buf.seek(0)
columns = ", ".join(['"{}"'.format(k) for k in keys])
if table.schema:
table_name = "{}.{}".format(table.schema, table.name)
else:
table_name = table.name
if "this_is_the_geom_column" in keys:
columns.replace(
"this_is_the_geom_column", "ST_GEOMFROMTEXT(this_is_the_geom_column)"
)
if "this_is_the_jsonb_column" in keys:
columns.replace(
"this_is_the_jsonb_column", "this_is_the_jsonb_column::jsonb"
)
sql = "COPY {} ({}) FROM STDIN WITH CSV".format(table_name, columns)
cur.copy_expert(sql=sql, file=s_buf)
# TODO: Finish docstring and add comments. Check error handling
def polars_df_to_postgis(
engine,
df: pl.DataFrame,
table_name: str,
schema: str = "public",
if_exists: IfExistsType = "replace",
geom_column: str = "geom",
srid: int = 4326,
create_geom_index: bool = True,
jsonb_column: str = False,
):
"""Blazing fast method to import a polars DataFrame into a PostGIS database with geometry and JSONB column.
Avoid using 'this_is_the_geom_column' and 'this_is_the_jsonb_column' as column names in the dataframe as they are reserved for the geometry and JSONB columns during the import.
Args:
engine (SQLAlchemy): SQLAlchemy engine
df (pl.DataFrame): Polars DataFrame
table_name (str): Name of the table to be created
schema (str, optional): Schema name. Defaults to "public".
if_exists (IfExistsType, optional): What should happen if table exist. There are the options: 'fail', 'append', 'replace'. Defaults to "replace".
geom_column (str, optional): What is the name of the geometry column in the dataframe. The geometry column should be a WKT string. The same name will also be used in the PostGIS table. Defaults to "geom".
srid (int, optional): What is the SRID of the geom. Defaults to 4326.
create_geom_index (bool, optional): Should a GIST index be created on the geometry. Defaults to True.
jsonb_column (str, optional): Add the name of column that should added as JSONB. Defaults to False.
Raises:
ValueError: Name of the geometry column is not in the dataframe
ValueError: Name of the JSONB column is not in the dataframe
ValueError: If the if_exists parameter is 'fail'
"""
# make a connection
df_pd = df.to_pandas()
db = engine.connect()
# Check if table should be created or appended
if if_exists == IfExistsType.replace.value:
df_pd.head(0).to_sql(
table_name,
engine,
method=psql_insert_copy,
index=False,
if_exists=IfExistsType.replace.value,
chunksize=1,
schema=schema,
)
print_info("Table {} will be created in schema {}.".format(table_name, schema))
columns_to_rename = {}
# Check if geom column exists and if it should be converted to geometry
if geom_column in df_pd.columns and geom_column is not None:
# Get a uuid column
random_column_name_geom = "this_is_the_geom_column"
db.execute(
text(
"ALTER TABLE {}.{} RENAME COLUMN {} TO {};".format(
schema, table_name, geom_column, random_column_name_geom
)
)
)
db.execute(
text(
"ALTER TABLE {}.{} ALTER COLUMN {} TYPE geometry;".format(
schema, table_name, random_column_name_geom
)
)
)
db.execute(
text(
"SELECT UpdateGeometrySRID('{}','{}','{}', {})".format(
schema, table_name, random_column_name_geom, srid
)
)
)
columns_to_rename[geom_column] = random_column_name_geom
elif geom_column not in df_pd.columns and geom_column is not None:
raise ValueError("Spefified column for Geometry not found in DataFrame")
if jsonb_column in df_pd.columns and jsonb_column is not None:
random_column_name_jsonb = "this_is_the_jsonb_column"
db.execute(
text(
"ALTER TABLE {}.{} RENAME COLUMN {} TO {};".format(
schema, table_name, jsonb_column, random_column_name_jsonb
)
)
)
db.execute(
text(
"ALTER TABLE {}.{} ALTER COLUMN {} TYPE JSONB USING {}::jsonb".format(
schema,
table_name,
random_column_name_jsonb,
random_column_name_jsonb,
)
)
)
columns_to_rename[jsonb_column] = random_column_name_jsonb
elif jsonb_column not in df_pd.columns and jsonb_column is not None:
raise ValueError("Spefified column for JSONB not found in DataFrame")
elif if_exists.value == IfExistsType.append.value:
print_info("Table {} in schema {} already exists".format(table_name, schema))
elif if_exists.value == IfExistsType.fail.value:
raise ValueError(
"Table {} in schema {} already exists".format(table_name, schema)
)
df_pd = df_pd.rename(columns=columns_to_rename)
# Insert data into table
df_pd.to_sql(
table_name,
engine,
method=psql_insert_copy,
index=False,
if_exists="append",
chunksize=10000,
schema=schema,
)
# Rename columns back to original names
if "this_is_the_geom_column" in df_pd.columns:
db.execute(
text(
"ALTER TABLE {}.{} RENAME COLUMN this_is_the_geom_column TO {};".format(
schema, table_name, geom_column
)
)
)
if "this_is_the_jsonb_column" in df_pd.columns:
db.execute(
text(
"ALTER TABLE {}.{} RENAME COLUMN this_is_the_jsonb_column TO {};".format(
schema, table_name, jsonb_column
)
)
)
# Create index on geom column if it does not exist and is desired
if create_geom_index == True:
idx = db.execute(
text(
"SELECT indexdef FROM pg_indexes WHERE tablename = '{}';".format(
table_name
)
)
)
if "gist" not in idx and "(geom)" not in idx:
print_info("Creating index on geom column")
db.execute(
text(
"CREATE INDEX ON {}.{} USING GIST (geom);".format(
schema, table_name
)
)
)
else:
print_info("GIST-Index on geom column already exists")
# Close connection
db.close()
def osm_crop_to_polygon(orig_file_path: str, dest_file_path: str, poly_file_path: str):
"""
Crops OSM data as per polygon file
Args:
orig_file_path (str): Path to the input OSM data file
dest_file_path (str): Path to the output OSM data file (incl. filename with extension ".pbf") where OSM data is to be written
poly_file_path (str): Path to a polygon filter file (as per the format described here: https://wiki.openstreetmap.org/wiki/Osmosis/Polygon_Filter_File_Format)
"""
subprocess.run(
f"osmconvert {orig_file_path} -B={poly_file_path} --complete-ways -o={dest_file_path}",
shell=True,
check=True,
)
def osm_generate_polygon(db_rd, geom_query: str, dest_file_path: str):
"""
Generates a polygon filter file for cropping OSM data
Args:
db_rd (Database): A database connection object
geom_query (str): The query to be run for retrieving geometry data for a region (returned column must be named "geom")
dest_file_path (str): Path to the output file (incl. filename with extension ".poly") where polygon data is to be written
"""
coordinates = db_rd.select(f"""SELECT ST_x(coord.geom), ST_y(coord.geom)
FROM (
SELECT (ST_dumppoints(geom_data.geom)).geom
FROM (
{geom_query}
) geom_data
) coord;"""
)
with open(dest_file_path, "w") as file:
file.write("1\n")
file.write("polygon\n")
file.write("\n".join([f" {i[0]} {i[1]}" for i in coordinates]))
file.write("\nEND\nEND")
| goat-community/data_preparation | src/utils/utils.py | utils.py | py | 23,625 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "rich.print",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "functools.wraps",
"line_number": 2... |
21120525047 | # coding: utf-8
import torch
import sys
from torch import nn
from TTS.utils.text.symbols import symbols
from TTS.layers.tacotron import Prenet, Encoder, Decoder, PostCBHG
class Tacotron(nn.Module):
def __init__(self,
embedding_dim=256,
linear_dim=1025,
mel_dim=80,
r=5,
padding_idx=None):
super(Tacotron, self).__init__()
self.r = r
self.mel_dim = mel_dim
self.linear_dim = linear_dim
self.embedding = nn.Embedding(
len(symbols), embedding_dim, padding_idx=padding_idx)
#print(" | > Number of characters : {}".format(len(symbols)))
self.embedding.weight.data.normal_(0, 0.3)
self.encoder = Encoder(embedding_dim)
self.decoder = Decoder(256, mel_dim, r)
self.postnet = PostCBHG(mel_dim)
self.last_linear = nn.Sequential(
nn.Linear(self.postnet.cbhg.gru_features * 2, linear_dim),
nn.Sigmoid())
def forward(self, characters, mel_specs=None, mask=None):
B = characters.size(0)
inputs = self.embedding(characters)
# batch x time x dim
encoder_outputs = self.encoder(inputs)
# batch x time x dim*r
mel_outputs, alignments, stop_tokens = self.decoder(
encoder_outputs, mel_specs, mask)
# Reshape
# batch x time x dim
mel_outputs = mel_outputs.view(B, -1, self.mel_dim)
linear_outputs = self.postnet(mel_outputs)
linear_outputs = self.last_linear(linear_outputs)
return mel_outputs, linear_outputs, alignments, stop_tokens
| JRC1995/Chatbot | TTS/models/tacotron.py | tacotron.py | py | 1,644 | python | en | code | 79 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "torch.nn.Embedding",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_n... |
74579426342 | #generali
from django.views.generic import ListView
from django.shortcuts import render, get_object_or_404, redirect
from django.http import HttpResponse
from django.template import loader
from django.db.models import Count
from django.contrib import messages
from django.contrib.auth import get_user_model
from django_email_verification import send_email
#blog
#tag engine
from taggit.models import Tag
#paginator per blog
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
#modelli per gestire i post del blog
from .models import Post, Commenti, slider_1, slider_2, home_gallery, titoli_landing_page, content_chisiamo, content_mappa, candidato, link_footer, banner_lavora_con_noi,content_lavora_con_noi,Servizi,Certificati, img_hero
#autenticazione e registrazione
from django.contrib.auth import authenticate, logout
from django.contrib.auth.forms import UserCreationForm
#form vari
from .forms import nome_utente, password, EmailPostForm, FormCommenti, registrazione, lavoraconnoi
#chat
from django.core.mail import send_mail
import socket
#API REST
#framework
from rest_framework import generics
#serializzatore
from .serializers import post_serializer
#classi per API REST
#lista Post API
class post_list_api(generics.ListAPIView):
#query da visualizzare
queryset = Post.objects.all()
#serializzazione query
serializer_class = post_serializer
#dettagli Post API
class post_detail_api(generics.RetrieveAPIView):
#query da visualizzare
queryset = Post.objects.all()
#serializzazione query
serializer_class = post_serializer
#lista servizi
def serv_list(request,nome=None):
hero = img_hero.objects.filter(current = True).first()
servizi = Servizi.objects.all()
titoli = titoli_landing_page.objects.filter(current=True).first()
footer = link_footer.objects.filter(current=True).first()
#dettaglio servizio
if nome:
nome_servizio = get_object_or_404(Servizi, slug=nome)
template = loader.get_template('landing_page/servizio.html')
return render(request, 'landing_page/servizio.html', {'nome_servizio':nome_servizio , 'footer':footer , 'titoli':titoli,})
template = loader.get_template('landing_page/serv_list.html')
return render(request, 'landing_page/serv_list.html', {'servizi':servizi, 'footer':footer, 'titoli':titoli, })
#lista certificati
def cert_list(request,nome_certificato=None):
hero = img_hero.objects.filter(current = True).first()
certificati = Certificati.objects.all()[:5]
titoli = titoli_landing_page.objects.filter(current=True).first()
footer = link_footer.objects.filter(current=True).first()
#dettaglio certificato
if nome_certificato:
certificato = get_object_or_404(Certificati, slug=nome_certificato)
template = loader.get_template('landing_page/certificato.html')
return render(request, 'landing_page/certificato.html', {'certificato': certificato , 'footer':footer, 'titoli':titoli, 'hero': hero })
template = loader.get_template('landing_page/serv_list.html')
return render(request, 'landing_page/cert_list.html', {'certificati':certificati, 'footer':footer , 'titoli':titoli, 'hero': hero})
#home page e link homepage
def index(request):
hero = img_hero.objects.filter(current = True).first()
slide1 = slider_1.objects.filter(current=True).first()
slide2 = slider_2.objects.filter(current=True).first()
galleria = home_gallery.objects.filter(current=True).first()
titoli = titoli_landing_page.objects.filter(current=True).first()
footer = link_footer.objects.filter(current=True).first()
template = loader.get_template('landing_page/index.html')
return render(request, 'landing_page/index.html', {'slide1': slide1, 'slide2': slide2, 'galleria' : galleria, 'titoli':titoli, 'footer':footer, 'hero' : hero })
def chisiamo(request):
hero = img_hero.objects.filter(current = True).first()
galleria_chisiamo = Post.objects.all()
footer = link_footer.objects.filter(current=True).first()
titoli = titoli_landing_page.objects.filter(current=True).first()
contenuto = content_chisiamo.objects.filter(current=True).first()
template = loader.get_template('landing_page/chi_siamo.html')
return render(request, 'landing_page/chi_siamo.html', {'galleria_chisiamo' : galleria_chisiamo, 'contenuto' : contenuto, 'footer':footer , 'titoli':titoli, 'hero': hero})
def mappa(request):
hero = img_hero.objects.filter(current = True).first()
footer = link_footer.objects.filter(current=True).first()
titoli = titoli_landing_page.objects.filter(current=True).first()
contenuto = content_mappa.objects.filter(current=True).first()
template = loader.get_template('landing_page/mappa.html')
return render(request, 'landing_page/mappa.html', {'contenuto' : contenuto, 'footer':footer , 'titoli':titoli, 'hero': hero})
def registrati(request):
hero = img_hero.objects.filter(current = True).first()
footer = link_footer.objects.filter(current=True).first()
contenuto = content_mappa.objects.filter(current=True).first()
titoli = titoli_landing_page.objects.filter(current=True).first()
form = registrazione()
if request.method == "POST":
form = registrazione(request.POST)
if form.is_valid():
user = form.save()
username = form.cleaned_data.get("username")
#Conferma creazione account via email#
#da completare#
#-----------------------------------------------------------#
#email = form.cleaned_data.get("email")
#user = get_user_model().objects.create(username=usernameT, password=password, email=email)
#user.is_active = False # Example
#send_email(user)
#-----------------------------------------------------------#
messages.success(request, 'Account creato,benvenuto,' + username + '!')
return redirect('/landing_page/')
return render(request, 'landing_page/registrati.html', {'form' : form, 'contenuto' : contenuto, 'footer':footer , 'titoli':titoli, 'hero': hero})
def lavora_con_noi(request):
hero = img_hero.objects.filter(current = True).first()
titoli = titoli_landing_page.objects.filter(current=True).first()
footer = link_footer.objects.filter(current=True).first()
contenuto = content_lavora_con_noi.objects.filter(current=True).first()
banner = banner_lavora_con_noi.objects.filter(current=True).first()
if request.method == 'POST':
forml = lavoraconnoi(request.POST, request.FILES)
if forml.is_valid():
forml.save()
return redirect('/landing_page/')
else:
forml = lavoraconnoi()
return render(request, 'landing_page/lavora_con_noi.html', {'forml' : forml, 'footer':footer, 'contenuto':contenuto, 'banner':banner , 'titoli':titoli, 'hero': hero})
#gestione login
def login(request):
c = {}
c.update(csrf(request))
return render(request, 'landing_page/index.html', c)
def logout(request):
logout(request)
redirect('landing_page/index')
#autenticazione dei dati inseriti in "login"
def authentication(request):
username = request.POST.get('username', '')
password = request.POST.get('password', '')
user = auth.authenticate(username = username, password = password)
if user is not None:
auth.login(request, user)
return HttpResponseRedirect('/accounts/loggedin')
else:
return HttpResponseRedirect('/accounts/invalid')
#gestione post
#lista post
#-------------------------------------------------------------------------------------------------------------------------------------------------------#
def post_list(request, tag_slug=None):
hero = img_hero.objects.filter(current = True).first()
titoli = titoli_landing_page.objects.filter(current=True).first()
footer = link_footer.objects.filter(current=True).first()
object_list = Post.published.all()
tag = None
if tag_slug:
tag = get_object_or_404(Tag, slug=tag_slug)
object_list = object_list.filter(tags__in=[tag])
paginator = Paginator(object_list, 3)
page = request.GET.get('page')
try:
posts = paginator.page(page)
except PageNotAnInteger:
posts = paginator.page(1)
except EmptyPage:
posts = paginator.page(paginator.num_pages)
return render(request, 'landing_page/post/list.html', {'page': page, 'posts': posts, 'tag': tag, 'footer':footer , 'titoli':titoli, 'hero': hero})
#-------------------------------------------------------------------------------------------------------------------------------------------------------#
#view post singolo
def post_detail(request, year, month, day, post):
hero = img_hero.objects.filter(current = True).first()
titoli = titoli_landing_page.objects.filter(current=True).first()
footer = link_footer.objects.filter(current=True).first()
post = get_object_or_404(Post, slug=post, status='published', publish__year=year, publish__month=month, publish__day=day)
#lista di commenti attivi sul blog
commenti = post.commenti.filter(attivo=True)
new_comment = None
if request.method == 'POST':
#è stato postato un commento
form_commento = FormCommenti(data=request.POST)
if form_commento.is_valid():
#creazione di un oggetto commento senza salvataggio su db#
nuovo_commento = form_commento.save(commit=False)
#assegna il post corrente al commento#
nuovo_commento.post = post
#salvataggio del commento su db#
nuovo_commento.save()
#lista di post simili#
post_tags_ids = post.tags.values_list('id', flat=True)
post_simili = Post.published.filter(tags__in=post_tags_ids).exclude(id=post.id)
post_simili = post_simili.annotate(same_tags=Count('tags')).order_by('-same_tags','-publish')[:4]
return render(request, 'landing_page/post/detail.html', {'post' : post, 'commenti' : commenti, 'nuovo_commento' : nuovo_commento, 'form_commento' : form_commento, 'post_simili' : post_simili, 'footer':footer , 'titoli':titoli, 'hero': hero})
else:
form_commento = FormCommenti()
#lista di post simili#
post_tags_ids = post.tags.values_list('id', flat=True)
post_simili = Post.published.filter(tags__in=post_tags_ids).exclude(id=post.id)
post_simili = post_simili.annotate(same_tags=Count('tags')).order_by('-same_tags','-publish')[:4]
return render(request, 'landing_page/post/detail.html', {'post' : post, 'commenti' : commenti, 'form_commento' : form_commento, 'post_simili' : post_simili, 'footer':footer , 'titoli':titoli, 'hero': hero})
#condivisione post via e-mail
def post_share(request, post_id):
hero = img_hero.objects.filter(current = True).first()
titoli = titoli_landing_page.objects.filter(current=True).first()
post = get_object_or_404(Post, id=post_id, status='published')
sent = False
if request.method == 'POST':
form = EmailPostForm(request.POST)
if form.is_valid():
#validazione dati nel form
cd = form.cleaned_data
post_url = request.build_absolute_uri(post.get_absolute_url())
titolo = f"{cd['name']} vorrebbe farti leggere questo: {post.title}"
messaggio = f"{post_url}\n\n {cd['comments']}"
#invio effettivo mail#
#-----------------------------------------------------------------#
send_mail(titolo,messaggio,'espositogerardo94@gmail.com',[cd['to']])
#-----------------------------------------------------------------#
sent = True
else:
form = EmailPostForm()
return render(request, 'landing_page/post/share.html',{'post' : post, 'form' : form, 'sent' : sent, 'hero': hero })
| gitsh1t/vetrina_test | landing_page/views.py | views.py | py | 12,116 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.generics.ListAPIView",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "models.Post.objects.all",
"line_number": 35,
"usage_type": "call"
},
... |
5657068183 | import json
import os
import requests
from utils import is_snapshot_week, get_dependency_version, get_latest_tag, get_snapshot_branch, \
get_dependency_version_from_tags
github_token = os.getenv("GITHUB_TOKEN")
headers = {"Authorization": "Bearer " + github_token}
def build_message():
message = '@navigation-ios '
releases_url = "https://api.github.com/repos/mapbox/mapbox-navigation-ios/releases"
releases = requests.get(releases_url, headers=headers).json()
if is_snapshot_week(releases):
message += 'Navigation SDK snapshot must be released today (rc or GA release was not released this week).\n'
else:
message += 'Navigation SDK snapshot must not be released today (rc or GA release was released this week).\n'
return message
maps_releases = requests.get(
'https://api.github.com/repos/mapbox/mapbox-maps-ios/releases',
headers=headers
).json()
maps_version = get_dependency_version(maps_releases)
if maps_version:
message += ':white_check_mark: Maps ' + maps_version + ' is ready.\n'
else:
message += ':siren: Expected Maps release was not released.\n'
nav_native_tags = requests.get(
'https://api.github.com/repos/mapbox/mapbox-navigation-native-ios/tags',
headers=headers
).json()
nav_native_version = get_dependency_version_from_tags(nav_native_tags)
if nav_native_version:
message += ':white_check_mark: Nav Native ' + nav_native_version + ' is ready.\n'
else:
message += ':siren: Expected Nav Native release was not released.\n'
tags = requests.get('https://api.github.com/repos/mapbox/mapbox-navigation-ios/tags', headers=headers).json()
latest_tag = get_latest_tag(tags)
snapshot_branch = get_snapshot_branch(latest_tag)
message += 'Snapshot branch is *' + snapshot_branch + '*.\n'
message += '*Release time is today night.*\n'
return message
def send_message(message):
payload = {'text': message, 'link_names': 1}
slack_url = os.getenv("SLACK_WEBHOOK")
requests.post(slack_url, data=json.dumps(payload))
message = build_message()
send_message(message)
| mapbox/mapbox-navigation-ios | scripts/snapshot/pre-snapshot-check.py | pre-snapshot-check.py | py | 2,170 | python | en | code | 821 | github-code | 36 | [
{
"api_name": "os.getenv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "utils.is_snapshot_week",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line... |
24842877273 | # -*- coding: utf-8 -*-
from os import path
import os
from wordcloud import WordCloud, STOPWORDS
import requests
import matplotlib.pyplot as plt
# from scipy.misc import imread
import numpy as np
from PIL import Image
import jieba
import jieba.posseg as pseg
import jieba.analyse
def makeCiyun(file_name):
d = path.dirname(__file__)
# Read the whole text.
text = open(path.join(d, file_name), encoding="utf8").read()
jieba_info = jieba.cut(text, cut_all=True)
font = os.path.join(os.path.dirname(__file__), "ziti.otf")
imgmask = "255fk.jpg"
alice_mask = np.array(Image.open(path.join(d, imgmask)))
# lower max_font_size
wordcloud = WordCloud(
max_font_size=40, font_path=font, mask=alice_mask,
stopwords=STOPWORDS
).generate(jieba_info)
plt.figure()
plt.imshow(wordcloud)
plt.axis("off")
plt.show()
wordcloud.to_file(path.join(d, "xiaoguo.png"))
import json
def getInfo(productId, page):
url = "https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98vv7667&productId=" + \
productId + "&score=0&sortType=5&page=" + str(page) + "&pageSize=10&isShadowSku=0&fold=1"
header = {
'Host': 'club.jd.com',
'Referer': "https://item.jd.com/" + productId + ".html"
}
content = requests.get(url, headers=header).content
content = content[len("fetchJSON_comment98vv7667("):-2]
# print(type(content))
# open("li.txt", 'w').write(str(content))
# print(content)
content = json.loads((content).decode("GBK"))
comments = content['comments']
infos = ""
for item in comments:
# print(item['content'])
# files.write(item['content'] + "\n")
infos += item['content'] + "\n"
# break
return infos
# print(content)
# files.close()
def start(productId):
file_name = "jd_" + productId + ".txt"
try:
os.remove(file_name)
except Exception as ex:
pass
files = open(file_name, 'a', encoding="utf8")
for i in range(100):
infos = getInfo(productId, i)
files.write(infos)
print("finish", i)
files.write("//*\n")
files.close()
makeCiyun(file_name)
# start("4213316")
makeCiyun("jd_4213316.txt")
| Montage-LSM/ciyun | index_jieba.py | index_jieba.py | py | 2,267 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,... |
27006538329 | """Pytest fixtures for huesensors tests."""
from copy import deepcopy
from unittest.mock import MagicMock, patch
import pytest
from aiohue import Bridge
from aiohue.sensors import GenericSensor
from homeassistant.components.hue import DOMAIN as HUE_DOMAIN
from homeassistant.components.hue import HueBridge
from homeassistant.components.hue.sensor_base import SensorManager
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from homeassistant.util import slugify
from custom_components.huesensor.data_manager import (
BINARY_SENSOR_MODELS,
HueSensorBaseDevice,
HueSensorData,
)
from .sensor_samples import (
MOCK_GEOFENCE,
MOCK_ZLLLightlevel,
MOCK_ZLLPresence,
MOCK_ZLLTemperature,
)
DEV_ID_SENSOR_1 = "SML_00:17:88:01:02:00:af:28-02"
async def entity_test_added_to_hass(
data_manager: HueSensorData,
entity: HueSensorBaseDevice,
):
"""Test routine to mock the internals of async_added_to_hass."""
entity.hass = data_manager.hass
if entity.unique_id.startswith(BINARY_SENSOR_MODELS):
entity.entity_id = f"binary_sensor.test_{slugify(entity.name)}"
else:
entity.entity_id = f"remote.test_{slugify(entity.name)}"
await entity.async_added_to_hass()
assert data_manager.available
assert entity.unique_id in data_manager.sensors
class MockAsyncCounter:
"""
Call counter for the hue data coordinator.
Used to mock and count bridge updates done with
`await bridge.sensor_manager.coordinator.async_request_refresh()`.
"""
_counter: int = 0
def __await__(self):
"""Dumb await."""
yield
def __call__(self, *args, **kwargs):
"""Call just returns self, increasing counter."""
self._counter += 1
return self
@property
def call_count(self) -> int:
"""Return call counter."""
return self._counter
def add_sensor_data_to_bridge(bridge, sensor_key, raw_data):
"""Append a sensor raw data packed to the mocked bridge."""
bridge.sensors[sensor_key] = GenericSensor(
raw_data["uniqueid"], deepcopy(raw_data), None
)
def _make_mock_bridge(idx_bridge, *sensors):
bridge = MagicMock(spec=Bridge)
bridge.sensors = {}
for i, raw_data in enumerate(sensors):
add_sensor_data_to_bridge(
bridge, f"{raw_data['type']}_{idx_bridge}_{i}", raw_data
)
return bridge
def _mock_hue_bridges(bridges):
# mocking HueBridge at homeassistant.components.hue level
hue_bridges = {}
for i, bridge in enumerate(bridges):
coordinator = MagicMock(spec=DataUpdateCoordinator)
coordinator.async_request_refresh = MockAsyncCounter()
sensor_manager = MagicMock(spec=SensorManager)
sensor_manager.coordinator = coordinator
hue_bridge = MagicMock(spec=HueBridge)
hue_bridge.api = bridge
hue_bridge.sensor_manager = sensor_manager
hue_bridges[i] = hue_bridge
return hue_bridges
@pytest.fixture
def mock_hass():
"""Mock HA object for tests, including some sensors in hue integration."""
hass = MagicMock(spec=HomeAssistant)
hass.data = {
HUE_DOMAIN: _mock_hue_bridges(
[
_make_mock_bridge(
0,
MOCK_ZLLPresence,
MOCK_ZLLLightlevel,
MOCK_ZLLTemperature,
),
_make_mock_bridge(1, MOCK_GEOFENCE),
]
)
}
hass.config = MagicMock()
hass.states = MagicMock()
return hass
def patch_async_track_time_interval():
"""Mock hass.async_track_time_interval for tests."""
return patch(
"custom_components.huesensor.data_manager.async_track_time_interval",
autospec=True,
)
| robmarkcole/Hue-sensors-HASS | tests/conftest.py | conftest.py | py | 3,842 | python | en | code | 346 | github-code | 36 | [
{
"api_name": "custom_components.huesensor.data_manager.HueSensorData",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "custom_components.huesensor.data_manager.HueSensorBaseDevice",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "custom_components.huesensor.d... |
74791355622 | import constant
from loguru import logger
from managers import AudioManager
from threading import Event, Thread
class Autonomous(object):
def __init__(self, audio_manager: AudioManager):
self.audio_manager = audio_manager
self.event: Event = Event()
self.event.set()
self.thread: Thread | None = None
def start(self):
if not self.event.is_set():
return
logger.info("Starting Automation")
self.event.clear()
Thread(target=self.run, name="Autonomous Thread").start()
def stop(self):
if self.event.is_set():
return
logger.info("Stopping Automation")
self.event.set()
def toggle(self):
if self.event.is_set():
self.start()
else:
self.stop()
def run(self):
self.audio_manager.play_random_sound(constant.AUTO_CATEGORY)
while not self.event.wait(constant.AUTO_INTERVAL):
self.audio_manager.play_random_sound(constant.AUTO_CATEGORY)
| dezil/R2 | autonomous.py | autonomous.py | py | 1,035 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "managers.AudioManager",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "threading.Event",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "threading.Thread",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "loguru.logger.... |
24454040368 | import time
from tqdm.auto import tqdm
def show_info_me():
""" Показывает инфо о коллеге """
about_me = {
'ФИО': 'Левченко Алексей',
'Должность': 'Ведущий исследователь данных',
'Блок': 'Технологии',
'Делаю': 'рекомендательные системы в HR',
}
for k, v in about_me.items():
print(f'{k}:{v}')
else:
print('_' * 30)
def show_info_patrik():
about_me = {
'ФИО': 'Патрикеев Михаил Алексеевич',
'Должность': 'Ведущий инженер по разработке',
'Блок': 'Розничный бизнес',
'Делаю': 'Тестирую социальные и зарплатные решения',
}
for k, v in about_me.items():
print(f'{k}:{v}')
else:
print('_' * 30)
def show_info_you_v():
about_me = {
'Имя': 'Валерий',
'Город': 'Самара',
'Должность': 'Главный специалист',
'Блок': 'Операционный центр',
'Занимаюсь': 'Анализом счетов по банковским картам'
}
for k, v in about_me.items():
print(f'{k}:{v}')
else:
print('_' * 30)
def show_info_zylkov():
about_me = {
'ФИО': 'Зыльков Павел',
'Должность': 'Инженер по сопровождению',
'Блок': 'Технологии',
'Делаю': 'Социальные и зарплатные решения'
}
for k, v in about_me.items():
print(f'{k}:{v}')
else:
print('_' * 30)
def show_info_me_():
about_me = {
'ФИО': 'Шахиев Азамат Рафикович',
'Должность': 'Старший специалист отдела безопасности',
'Блок': 'Сервисы',
'Делаю': 'Сопровождение технических средств безопасности'
}
for k, v in about_me.items():
print(f'{k}:{v}')
else:
print('_' * 30)
def info_pro_menya():
about_me = {
'ФИО': 'Нетяга Светлана',
'Должность': 'Главный аудитор',
'Подразделение': 'Управление внутреннего аудита',
'Делаю': 'анализ розничных некредитных операций'
}
for k, v in about_me.items():
print(f'{k}:{v}')
else:
print('_' * 30)
def show_info_me2():
about_me = {
'ФИО': 'Солодова Наталья',
'Должность': 'клиентский менеджер',
'Блок': 'ДомКлик'
}
for k, v in about_me.items():
print(f'{k}:{v}')
else:
print('_' * 30)
def info_pro_menya_ii():
about_me = {
'ФИО': 'Исайкина Ирина',
'Должность': 'Заместитель руководителя ВСП',
'Подразделение': 'ВСП',
'Делаю': 'занимаюсь обслуживанием клиентов и решением их проблем',
}
for k, v in about_me.items():
print(f'{k}:{v}')
else:
print('_' * 30)
def show_info_me_eb():
about_me = {
'ФИО': 'Евгений Бодягин',
'Должность': 'Эксперт Центра подбора в инновационные направления бизнеса',
'Блок': 'HR',
'Делаю': 'Методологию подбора, в т.ч. и подбор D-people и сбор статистики по подбору'
}
for k, v in about_me.items():
print(f'{k}:{v}')
else:
print('_' * 30)
def show_info_you_eg():
about_me = {
'ФИО': 'Евгений Головачев',
'Город': 'Самара',
'Должность': 'клиентский менеджер',
'Блок': 'ДомКлик',
'Занимаюсь': 'Помощь клиентам ипотечного кредитования'
}
for k, v in about_me.items():
print(f'{k}:{v}')
else:
print('_' * 30)
def info_pro_menya_av():
about_me = {
'ФИО': 'Аня Великобратова',
'Должность': 'КМ',
'Подразделение': 'КИБ СРБ'
}
for k, v in about_me.items():
print(f'{k}:{v}')
else:
print('_' * 30)
info_list = [
show_info_me,
info_pro_menya,
show_info_me_,
show_info_me2,
show_info_patrik,
show_info_you_eg,
info_pro_menya_ii,
show_info_you_v,
show_info_zylkov,
show_info_me_eb,
info_pro_menya_av
]
if __name__ == "__main__":
for show_info in info_list:
show_info()
for i in tqdm(range(30)):
time.sleep(1)
print('Спасибо за инфо!')
print('_' * 30)
| kcundel/python_da_course | Lesson1/about.py | about.py | py | 5,203 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "tqdm.auto.tqdm",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 173,
"usage_type": "call"
}
] |
37039563232 | from django.contrib.gis.db import models
from django.contrib.auth.models import AbstractUser
from django.utils.encoding import smart_str
from django.utils.translation import gettext_lazy as _
# Create your models here.
class Country(models.Model):
"""Class for country info"""
name = models.CharField(max_length=255)
shortname = models.CharField(max_length=3)
geom = models.MultiPolygonField(srid=4326)
class Meta:
ordering = ["name"]
db_table = "country"
def __unicode__(self):
return smart_str(self.name)
def __str__(self):
return self.__unicode__()
def natural_key(self):
return self.__unicode__()
class Project(models.Model):
""""""
name = models.CharField(max_length=100)
start_year = models.IntegerField()
pager_status = models.URLField(null=True, blank=True)
mailing_list = models.URLField(null=True, blank=True)
template_link = models.URLField(null=True, blank=True)
termsofuse_link = models.URLField(null=True, blank=True)
class Meta:
ordering = ["name"]
db_table = "project"
def __unicode__(self):
return smart_str(self.name)
def __str__(self):
return self.__unicode__()
def natural_key(self):
return self.__unicode__()
class Organization(models.Model):
"""This table is useful to save info about organizations """
name = models.CharField(max_length=255)
shortname = models.CharField(max_length=25)
address = models.CharField(max_length=250, null=True, blank=True)
city = models.CharField(max_length=250, null=True, blank=True)
country = models.ForeignKey(
Country, on_delete=models.PROTECT, null=True, blank=True
)
email = models.EmailField(null=True, blank=True)
website = models.URLField(max_length=150, null=True, blank=True)
image = models.ImageField(upload_to="logo/organizations/", null=True, blank=True)
geom = models.PointField(
srid=4326,
null=True,
blank=True,
help_text=_("The position of the organization"),
)
class Meta:
ordering = ["name"]
db_table = "organization"
def __unicode__(self):
if self.country:
return smart_str(
"{na} ({co})".format(na=self.name, co=self.country.shortname)
)
else:
return smart_str("{na}".format(na=self.name))
def __str__(self):
return self.__unicode__()
def natural_key(self):
return self.__unicode__()
class ResearchGroup(models.Model):
""""""
name = models.CharField(max_length=255)
shortname = models.CharField(max_length=25)
organization = models.ForeignKey(Organization, on_delete=models.PROTECT)
email = models.EmailField(null=True, blank=True)
website = models.URLField(max_length=150, null=True, blank=True)
image = models.ImageField(upload_to="logo/organizations/", null=True, blank=True)
projects = models.ManyToManyField(Project, through='ResearchGroupProject')
geom = models.PointField(
srid=4326,
null=True,
blank=True,
help_text=_("The position of the organization"),
)
class Meta:
ordering = ["name"]
db_table = "research_group"
def __unicode__(self):
if self.organization.shortname:
return smart_str(
"{na} ({co})".format(na=self.name, co=self.organization.shortname)
)
if self.organization.name:
return smart_str(
"{na} ({co})".format(na=self.name, co=self.organization.name)
)
else:
return smart_str("{na}".format(na=self.name))
def __str__(self):
return self.__unicode__()
def natural_key(self):
return self.__unicode__()
class ResearchGroupProject(models.Model):
researchgroup = models.ForeignKey(ResearchGroup, on_delete=models.PROTECT)
project = models.ForeignKey(Project, on_delete=models.PROTECT)
year = models.IntegerField()
contact_people = models.TextField()
class User(AbstractUser):
"""Extent the abstract user class"""
bio = models.TextField(max_length=500, null=True, blank=True)
image = models.ImageField(upload_to="users/", null=True, blank=True)
#TODO could a person be connected with more then one group?
research_group = models.ManyToManyField(ResearchGroup)
projects = models.ManyToManyField(Project)
euromammals_username = models.TextField(max_length=500, null=True, blank=True)
| EUROMAMMALS/website | core/models.py | models.py | py | 4,541 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.gis.db.models.Model",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.gis.db.models",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.contrib.gis.db.models.CharField",
"line_number": 11,
"usage_typ... |
8522405005 | import unittest
from BaseTestCases.BaseTestCase import BaseTestCase, os
from Pages.Deployment_Group import DG_Create
from Pages.LoginPage import LoginPage
from DataSource.read_excel import read_excel
from time import sleep
from ddt import ddt,data,unpack
@ddt
class test_DG_Create (BaseTestCase):
@data(*read_excel.get_data_from_excel(os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)) + '\Data\Test_Data.xlsx','DG'))
@unpack
def test_Create_DG(self,DGname,DGdesc,DGDB):
self.driver.implicitly_wait(30)
LoginPage.login(self,'Administrator','P@ssw0rd')
sleep(3)
DG_Create.DG_screenlink(self)
sleep(3)
DG_Create.DG_createlink(self)
sleep(3)
DG_Create.DG_DetailsPopup(self,DGname,DGdesc,int(DGDB))
#Actual_Msg = DG_Create.DG_toast
DG_Create.save_close_btn(self)
sleep(3)
self.assertEqual(DG_Create.Toast(self),"Deployment Group " + DGname + " has been created.")
# self.assertTrue(DG_Create.DG_toast(DGname))
#print(DG_Create.DG_toast)
if __name__ == '__main__':
unittest.main()
| EFarag/ACE_Project | TestCases/test_DG_Valid_create.py | test_DG_Valid_create.py | py | 1,130 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "BaseTestCases.BaseTestCase.BaseTestCase",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "Pages.LoginPage.LoginPage.login",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "Pages.LoginPage.LoginPage",
"line_number": 18,
"usage_type": "nam... |
34627321771 | from pathlib import Path
import os
import datetime
import json
import h5py
import numpy as np
import pandas as pd
import click
import tensorflow as tf
from src.data.tf_data_hdf5 import get_tf_data, RandomStandardization
from src.models.models import unet_model, unetclassif_model
from src.models.losses import CustomLoss, MaskedDiceLoss
from src.models.callbacks import EarlyStopping
from src.models.evaluation import evaluate_pred_volume
DEBUG = False
project_dir = Path(__file__).resolve().parents[2]
splits_path = project_dir / "data/splits.json"
if DEBUG:
EPOCHS = 3
else:
EPOCHS = 400
plot_only_gtvl = False
@click.command()
@click.option("--config", type=click.Path(exists=True))
@click.option("--upsampling-kind", type=click.STRING, default="upsampling")
@click.option("--split", type=click.INT, default=0)
@click.option("--alpha", type=click.FLOAT, default=0.25)
@click.option("--w-gtvl", type=click.FLOAT, default=1.0)
@click.option("--w-gtvt", type=click.FLOAT, default=0.0)
@click.option("--w-lung", type=click.FLOAT, default=0.0)
@click.option("--gpu-id", type=click.STRING, default="0")
@click.option("--random-angle", type=click.FLOAT, default=None)
@click.option("--center-on", type=click.STRING, default="GTVl")
@click.option("--loss-type", type=click.STRING, default="sum_of_dice")
@click.option('--oversample/--no-oversample', default=False)
@click.option('--pretrained/--no-pretrained', default=True)
@click.option('--multitask/--no-multitask', default=False)
@click.option("--random-position/--no-random-position", default=True)
def main(config, upsampling_kind, split, alpha, w_gtvl, w_gtvt, w_lung, gpu_id,
random_angle, center_on, loss_type, oversample, pretrained, multitask,
random_position):
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id
h5_file = h5py.File(
project_dir / "data/processed/hdf5_2d/data_selected_slices.hdf5", "r")
if not pretrained:
n_channels = 2
else:
n_channels = 3
if oversample:
steps_per_epoch = 40
else:
steps_per_epoch = None
clinical_df = pd.read_csv(
project_dir /
"data/clinical_info_with_lung_info.csv").set_index("patient_id")
with open(splits_path, "r") as f:
splits_list = json.load(f)
ids_train = splits_list[split]["train"]
ids_val = splits_list[split]["val"]
ids_test = splits_list[split]["test"]
preprocessor = RandomStandardization()
preprocessor_nrdm = RandomStandardization(p=0.0)
if multitask:
f = lambda x, y, plc_status, patient: (preprocessor(x),
(y, plc_status))
f_nrdm = lambda x, y, plc_status, patient: (preprocessor_nrdm(x),
(y, plc_status))
else:
f = lambda x, y, plc_status, patient: (preprocessor(x), y)
f_nrdm = lambda x, y, plc_status, patient: (preprocessor_nrdm(x), y)
ds_train = get_tf_data(h5_file,
clinical_df,
patient_list=ids_train,
shuffle=True,
oversample=oversample,
random_angle=random_angle,
random_position=random_position,
center_on=center_on,
n_channels=n_channels).map(f).batch(16)
ds_val = get_tf_data(h5_file,
clinical_df,
patient_list=ids_val,
center_on="GTVl",
random_slice=False,
n_channels=n_channels).map(f_nrdm).batch(4)
ids_val_pos = [p for p in ids_val if clinical_df.loc[p, "plc_status"] == 1]
ids_val_neg = [p for p in ids_val if clinical_df.loc[p, "plc_status"] == 0]
ds_sample = get_tf_data(h5_file,
clinical_df,
patient_list=ids_val_pos[:2] + ids_val_neg[:1],
center_on="GTVt",
random_slice=False,
n_channels=n_channels).map(f_nrdm).batch(3)
if multitask:
sample_images, sample_outputs = next(
ds_sample.take(1).as_numpy_iterator())
sample_seg = sample_outputs[0]
model = unetclassif_model(3,
upsampling_kind=upsampling_kind,
pretrained=pretrained)
else:
sample_images, sample_seg = next(ds_sample.take(1).as_numpy_iterator())
model = unet_model(3,
upsampling_kind=upsampling_kind,
pretrained=pretrained)
sample_seg = np.stack(
[sample_seg[..., 0], sample_seg[..., 1], sample_seg[..., -1]], axis=-1)
if multitask:
losses = [
MaskedDiceLoss(
w_lung=w_lung,
w_gtvt=w_gtvt,
w_gtvl=w_gtvl,
),
tf.keras.losses.BinaryCrossentropy()
]
else:
losses = MaskedDiceLoss(
w_lung=w_lung,
w_gtvt=w_gtvt,
w_gtvl=w_gtvl,
)
model.compile(
optimizer=tf.keras.optimizers.Adam(1e-3),
loss=losses,
run_eagerly=False,
)
dir_name = (
"unet__" +
f"prtrnd_{pretrained}__a_{alpha}__wt_{w_gtvt}__wl_{w_lung}__wgtvl_{w_gtvl}"
f"upsmpl_{upsampling_kind}__" +
f"split_{split}__ovrsmpl_{oversample}__" + f"con_{center_on}" +
f"ltyp_{loss_type}__mltsk_{multitask}__" +
datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
callbacks = list()
if not DEBUG:
log_dir = str((project_dir / ("logs/fit/" + dir_name)).resolve())
callbacks.append(
tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1))
file_writer_image = tf.summary.create_file_writer(log_dir + '/images')
def log_prediction(epoch, logs):
# Use the model to predict the values from the validation dataset.
if multitask:
sample_pred, sample_pred_pstatus = model.predict(sample_images)
else:
sample_pred = model.predict(sample_images)
if plot_only_gtvl:
sample_pred[..., 0] = 0
sample_pred[..., 2] = 0
# Log the confusion matrix as an image summary.
with file_writer_image.as_default():
tf.summary.image("Validation images",
np.stack(
[
sample_images[..., 0],
sample_images[..., 1],
np.zeros_like(sample_images[..., 0]),
],
axis=-1,
),
step=epoch)
tf.summary.image("Predictions", sample_pred, step=epoch)
tf.summary.image("GTs", sample_seg, step=epoch)
callbacks.extend([
tf.keras.callbacks.LambdaCallback(on_epoch_end=log_prediction),
EarlyStopping(
minimal_num_of_epochs=350,
monitor='val_loss',
patience=20,
verbose=0,
mode='min',
restore_best_weights=True,
)
])
model.fit(
x=ds_train,
epochs=EPOCHS,
validation_data=ds_val,
callbacks=callbacks,
steps_per_epoch=steps_per_epoch,
)
if multitask:
model.trainable = True
callbacks.pop(-1)
callbacks.append(
EarlyStopping(
minimal_num_of_epochs=0,
monitor='val_loss',
patience=20,
verbose=0,
mode='min',
restore_best_weights=True,
))
model.compile(
optimizer=tf.keras.optimizers.Adam(1e-5),
loss=losses,
run_eagerly=False,
)
model.fit(
x=ds_train,
epochs=EPOCHS,
validation_data=ds_val,
callbacks=callbacks,
steps_per_epoch=steps_per_epoch,
)
model_dir = project_dir / ("models/" + dir_name)
model_dir.mkdir()
model.save(model_dir / "model_weight")
roc_test = evaluate_pred_volume(
model,
ids_test,
h5_file,
clinical_df,
n_channels=n_channels,
multitask=multitask,
preprocessor=preprocessor_nrdm,
)
roc_val = evaluate_pred_volume(
model,
ids_val,
h5_file,
clinical_df,
n_channels=n_channels,
multitask=multitask,
preprocessor=preprocessor_nrdm,
)
print(f"The ROC AUC for the val and "
f"test are {roc_val} and {roc_test} respectively.")
if __name__ == '__main__':
main() | voreille/plc_segmentation | src/models/train_model.py | train_model.py | py | 9,009 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "h5py.File",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_nu... |
24844526241 | from django.contrib import admin
from .models import PrivateChat, Message
# Register your models here.
@admin.register(PrivateChat)
class PrivateChatAdmin(admin.ModelAdmin):
"""Filters, displays and search for django admin"""
list_filter = ('user1', 'user2', )
list_display = ('user1', 'user2')
search_fields = ['user1', 'user2']
@admin.register(Message)
class MessageAdmin(admin.ModelAdmin):
"""Filters, displays and search for django admin"""
list_filter = ('chat', 'sender', )
list_display = ('chat', 'sender', 'message', 'date')
search_fields = ['chat', 'sender'] | lexach91/DateLoc | chat/admin.py | admin.py | py | 605 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.register",
"line_number": 5,
"usage_type": "call"
},
{
... |
20087427863 | from argparse import ArgumentParser
from copy import deepcopy
from pathlib import Path
def build_parser():
parser = ArgumentParser()
parser.add_argument(
'-i', '--input-filename', type=Path,
required=True
)
return parser
def string_to_integer_list(string):
return list(map(int, string))
def get_scenic_score(sequence):
scenic_score = [0] * len(sequence)
for index, element in enumerate(sequence):
left, right = sequence[:index], sequence[index + 1:]
left = left[::-1]
edge = not left or not right
if edge:
continue
score_left = next(
(
index for index, comparison_value in enumerate(left, 1)
if comparison_value >= element
), len(left)
)
score_right = next(
(
index for index, comparison_value in enumerate(right, 1)
if comparison_value >= element
), len(right)
)
scenic_score[index] = score_left * score_right
return scenic_score
def transpose(matrix):
return list(zip(*matrix))
def main():
args = build_parser().parse_args()
with open(args.input_filename) as fd:
data = fd.read()
lines = data.splitlines()
grid = [
string_to_integer_list(line)
for line in lines
]
row_scenic_score = [
get_scenic_score(row)
for row in grid
]
column_scenic_score = transpose([
get_scenic_score(column)
for column in transpose(grid)
])
total_scenic_score = deepcopy(row_scenic_score)
for row_index in range(len(column_scenic_score)):
for col_index in range(len(column_scenic_score)):
total_scenic_score[row_index][col_index] = (
row_scenic_score[row_index][col_index] *
column_scenic_score[row_index][col_index]
)
print(max(max(row) for row in total_scenic_score))
if __name__ == '__main__':
main()
| reynoldscem/aoc2022 | day_08/part2.py | part2.py | py | 2,023 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 73,
"usage_type": "call"
}
] |
33646432106 | import asyncio
import threading
import time
import speech_recognition as sr
r = sr.Recognizer()
# def do(audio):
def srcVoice(n, audio):
for i in range(n, 0, -1):
print('sssssss')
# threading.Thread(target=r.recognize_google, args=(audio))
words = r.recognize_google(audio)
print(words, '$$$$$$$$$$$$')
break
def audioLis(source):
print('running')
try:
audio = r.listen(source, 3, 6)
try:
DN = threading.Thread(target=srcVoice, args=(1, audio))
DN.start()
except Exception as e:pass
except Exception as e:
print(e, '<<<<<<<<<<<<<<<<<<<')
source = 'None'
with sr.Microphone() as source:
while True:
audioLis(source)
# async
# await
| giribabu22/assistant-Nikki-python | thread_voice_src/script.py | script.py | py | 770 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "speech_recognition.Recognizer",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "speech_recognition.Microphone",
"line_number": 31,
"usage_type": "call"
}
] |
27653577571 | from pages.courses.register_courses_page import Register_courses_page
import unittest
import pytest
from utilities.teststatus import StatusVerify
@pytest.mark.usefixtures("oneTimeSetUp", "setUp")
class Register_course_tests(unittest.TestCase):
@pytest.fixture(autouse=True)
def classSetup(self, oneTimeSetUp):
self.rcp = Register_courses_page(self.driver)
self.ts = StatusVerify(self.driver)
@pytest.mark.run(order=1)
def test_Invalid_Enrollment(self):
self.rcp.enterCourseToEnroll ("Javascript")
self.rcp.selectCourseToEnroll()
self.rcp.enterCreditCardinformation("4900000000000086", "1218", "123", "560102")
# self.rcp.enterCardNumber("4900000000000086")
# self.rcp.enterCardExp("1218")
# self.rcp.enterCardCvc("123")
# self.rcp.enterpostalcode("560102")
self.rcp.enrollInCourse()
result = self.rcp.captureErrorMsg()
#self.ts.markFinal("test_Invalid_Enrollment", result, "The card was declined.")
assert result == "The card was declined."
| akanksha2306/selenium_python_practice | tests/courses/test_register_courses.py | test_register_courses.py | py | 1,073 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pages.courses.register_courses_page.Register_courses_page",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "utilities.teststatus.StatusVerify",
"line_number": 12,
"u... |
1243604019 | from math import cos, pi, sin
import pygame as pg
from constants import consts as c
from id_mapping import id_map
from images import img as i
from ui.game_ui import ui
def move_player(keys_pressed):
if keys_pressed[pg.K_UP] or keys_pressed[pg.K_w]:
c.player_y -= c.player_speed * c.dt
if c.player_y < 0:
c.player_y = 0
if keys_pressed[pg.K_DOWN] or keys_pressed[pg.K_s]:
c.player_y += c.player_speed * c.dt
if c.player_y + c.sh > c.num_cells * c.cell_length:
c.player_y = c.num_cells * c.cell_length - c.sh
if keys_pressed[pg.K_LEFT] or keys_pressed[pg.K_a]:
c.player_x -= c.player_speed * c.dt
if c.player_x < 0:
c.player_x = 0
if keys_pressed[pg.K_RIGHT] or keys_pressed[pg.K_d]:
c.player_x += c.player_speed * c.dt
if c.player_x + c.sw > c.num_cells * c.cell_length:
c.player_x = c.num_cells * c.cell_length - c.sw
def get_pointer_params():
mouse_x, mouse_y = pg.mouse.get_pos()
cell_row = int((mouse_y + c.player_y) / c.cell_length)
cell_col = int((mouse_x + c.player_x) / c.cell_length)
cell_x = cell_col * c.cell_length - c.player_x + 2
cell_y = cell_row * c.cell_length - c.player_y + 2
return cell_row, cell_col, cell_x, cell_y
def draw_action(cell_x, cell_y):
pg.draw.circle(c.screen, c.action_color, (cell_x + c.cell_length // 2, cell_y + c.cell_length // 2), 4 * c.cell_length // 5, 2)
if c.const_state == 1:
c.screen.blit(i.images[id_map["conveyor"]][c.rot_state], (cell_x - 1, cell_y - 1))
ui.render_text("Place Conveyor: (L/R) to rotate")
if c.const_state == 2:
c.screen.blit(i.images[id_map["conveyor_underground"]][c.rot_state], (cell_x - 1, cell_y - 1))
translations = [(0, 1), (1, 0), (0, -1), (-1, 0)]
x = cell_x + translations[c.rot_state][0] * c.ug_state * c.cell_length
y = cell_y - translations[c.rot_state][1] * c.ug_state * c.cell_length
c.screen.blit(i.images[id_map["conveyor_underground"]][c.rot_state + 4], (x, y))
pg.draw.circle(c.screen, c.action_color, (x + c.cell_length // 2, y + c.cell_length // 2), 4 * c.cell_length // 5, 2)
ui.render_text("Place Underground Conveyor: (L/R) to rotate (Shift/Ctrl) to change length")
elif c.const_state == 3:
c.screen.blit(i.images[id_map["splitter"]][c.rot_state], (cell_x - 1, cell_y - 1))
translations = [[(-1, 1), (1, 1)], [(1, -1), (1, 1)], [(1, -1), (-1, -1)], [(-1, 1), (-1, -1)]]
x1 = cell_x + translations[c.rot_state][0][0] * c.cell_length
y1 = cell_y - translations[c.rot_state][0][1] * c.cell_length
x2 = cell_x + translations[c.rot_state][1][0] * c.cell_length
y2 = cell_y - translations[c.rot_state][1][1] * c.cell_length
pg.draw.rect(c.screen, c.target_color, (x1, y1, c.cell_length, c.cell_length), 3)
pg.draw.rect(c.screen, c.target_color, (x2, y2, c.cell_length, c.cell_length), 3)
ui.render_text("Place Splitter: (L/R) to rotate")
elif c.const_state == 4:
c.screen.blit(i.images[id_map["arm"]], (cell_x - 1, cell_y - 1))
angle = ((1 - (c.rot_state + 2) % 4) * pi / 2) % (2 * pi)
start_x = cell_x + c.cell_length // 2
start_y = cell_y + c.cell_length // 2
end_x = start_x + c.cell_length * cos(angle)
end_y = start_y - c.cell_length * sin(angle)
pg.draw.line(c.screen, c.arm_color, (start_x, start_y), (end_x, end_y), 2)
draw_source(cell_x, cell_y, c.rot_state)
draw_target(cell_x, cell_y, c.rot_state)
ui.render_text("Place Arm: (L/R) to rotate")
elif c.const_state == 5:
c.screen.blit(i.images[id_map["mine"]], (cell_x - 1, cell_y - 1))
draw_target(cell_x, cell_y, c.rot_state)
ui.render_text("Place Mine: (L/R) to rotate")
elif c.const_state == 6:
c.screen.blit(i.images[id_map["furnace"]], (cell_x - 1, cell_y - 1))
draw_target(cell_x, cell_y, c.rot_state)
ui.render_text("Place Furnace: (L/R) to rotate")
elif c.const_state == 7:
c.screen.blit(i.images[id_map["factory"]], (cell_x - 1, cell_y - 1))
draw_target(cell_x, cell_y, c.rot_state)
ui.render_text("Place Factory: (L/R) to rotate")
def draw_target(cell_x, cell_y, state):
translations = [(0, -1), (1, 0), (0, 1), (-1, 0)]
x = cell_x + translations[state][0] * c.cell_length
y = cell_y + translations[state][1] * c.cell_length
pg.draw.rect(c.screen, c.target_color, (x, y, c.cell_length, c.cell_length), 3)
def draw_source(source_x, source_y, state):
translations = [(0, 1), (-1, 0), (0, -1), (1, 0)]
x = source_x + translations[state][0] * c.cell_length
y = source_y + translations[state][1] * c.cell_length
pg.draw.rect(c.screen, c.source_color, (x, y, c.cell_length, c.cell_length), 3)
def draw_gridlines():
for x in range(0, c.num_cells * c.cell_length, c.cell_length):
pg.draw.line(c.screen, c.grid_color, (x - c.player_x, 0), (x - c.player_x, c.sh))
for y in range(0, c.num_cells * c.cell_length, c.cell_length):
pg.draw.line(c.screen, c.grid_color, (0, y - c.player_y), (c.sw, y - c.player_y)) | chanrt/py-factory | utils.py | utils.py | py | 5,214 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "pygame.K_UP",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_w",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "constants.consts.player_y",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "const... |
5066258041 | import math
import numpy as np
from queue import Queue, PriorityQueue
import time
import networkx as nx
import pymysql
def read_file(edges, degree, g_dict, connected_fields):
w = []
edge = {}
visit = {}
cnt = 1
sum = 0
n = 0
m = 0
for item in edges:
a = item[0]
b = item[1]
if a not in connected_fields or b not in connected_fields:
continue
m += 1
g_dict[(a, b)] = 1
g_dict[(b, a)] = 1
sum += 1
if a not in degree.keys():
degree[a] = 1
edge[a] = [b]
else:
degree[a] += 1
edge[a].append(b)
if b not in degree.keys():
degree[b] = 1
edge[b] = [a]
else:
degree[b] += 1
edge[b].append(a)
n = len(degree)
return n, m, degree, g_dict, sum, edge
def dfs(pos,id2tag,id2size,id2child):
if len(id2child[pos])==1:
return id2tag[pos],1,id2tag,id2size #返回叶子结点标签和大小
size=0
tag=0
tag2num={}
mx=0
for child in id2child[pos]:
child_tag,child_size,id2tag,id2size=dfs(child,id2tag,id2size,id2child)
if child_tag not in tag2num.keys():
tag2num[child_tag]=1
else:
tag2num[child_tag]+=1
if tag2num[child_tag]>mx:
mx=tag2num[child_tag]
mx_tag=child_tag
tag+=child_tag*child_size
size+=child_size
id2size[pos]=size
#id2tag[pos]=int(tag/size)
id2tag[pos]=mx_tag
return mx_tag,size,id2tag,id2size
def update(index, id2child, id2deep): # 更新qu[index]的所有子节点的深度
if len(id2child[index]) > 1:
for node in id2child[index]:
# qu[node]=(qu[node][0],qu[node][1],qu[node][2],qu[node][3],qu[node][4],qu[node][5],qu[node][6]+1)
id2deep[node] = id2deep[node] + 1
id2deep = update(node, id2child, id2deep)
return id2deep
def get_deep(index, id2child, id2deep):
deep = id2deep[index]
if len(id2child[index]) > 1:
for node in id2child[index]:
deep = max(deep, get_deep(node, id2child, id2deep))
return deep
def structual_entropy(edges, nodes, mx_deep,label,node_tags,dataset):
nodes = np.array(nodes)
edges = np.array(edges)
id2index = {j: i+1 for i, j in enumerate(nodes)} #从1编号
mapped_edge = np.array(list(map(id2index.get, edges.flatten())), dtype=np.int32).reshape(edges.shape)
nodes = [id2index[id] for id in nodes]
edges=list(mapped_edge)
degree = {}
g_dict = {}
n, m, degree, g_dict, sum, edge = read_file(edges, degree, g_dict, nodes)
#print("num of nodes:",n)
h1 = 0
#print(edges,nodes)
for i in range(1, n + 1):
h1 += (-degree[i] / (2.0 * sum) * math.log(degree[i] / (2.0 * sum), 2))
#print(h1)
nums = []
for i in range(1, n + 1):
nums.append(i)
qu = [(0, 2 * sum, [], [], 0)]
id2sister = {}
id2child = {0: nums}
id2deep = {0: 1}
id2fa = {0: -1}
I = {0:0}
for i in range(1, n + 1):
qu.append((degree[i], degree[i])) # 分别表示团的割边数,度的和
id2sister[i] = edge[i]
id2child[i] = [i]
id2deep[i] = 2
id2fa[i] = 0
I[i] = degree[i]
I[0]+=degree[i]
result = 0
cnt = n + 1
flag = True
#flag=False
flag2 = True
#flag2=False
delete_id = []
# print(id2sister)
iter = 1
while (flag or flag2):
# while(flag2):
flag2 = True
# while(flag2):
if flag2:
iter += 1
#print(iter)
mn = 1e9
mx = 1e-6
flag2 = False
for i in range(1, len(qu)):
if i in delete_id:
continue
item1 = qu[i]
g1 = item1[0]
for j in id2sister[i]:
item2 = qu[j]
if len(id2child[id2fa[i]]) <= 2 or j in delete_id:
# print("error")
continue
g2 = item2[0]
# new_edge=item1[3]+item2[3]
v = item1[1] + item2[1]
# new_node=item1[2]+item2[2]
v_fa = qu[id2fa[i]][1]
if (i, j) in g_dict.keys():
g = g1 + g2 - 2 * g_dict[(i, j)]
else:
g = g1 + g2
# 按照combine后熵减小最多的两个团combine
# 深度不能超过max_deep
if (g1 + g2 - g) / (2 * sum) * math.log((v_fa) / v, 2) > mx and get_deep(i, id2child,
id2deep) + 1 <= mx_deep and get_deep(
j, id2child, id2deep) + 1 <= mx_deep:
mx = (g1 + g2 - g) / (2 * sum) * math.log((v_fa) / v, 2)
add = mx
ans = (g, v)
id1 = i
id2 = j
flag2 = True
if flag2:
# print(len(qu),index1,index2)
#print('combine', id1, id2, cnt)
# 更新父节点
id2fa[cnt] = id2fa[id1]
id2fa[id1] = cnt
id2fa[id2] = cnt
# 更新子节点
id2child[cnt] = [id1, id2]
fa_id = id2fa[cnt]
# print('combine',fa_id,id1,id2)
id2child[fa_id].remove(id1)
id2child[fa_id].remove(id2)
id2child[fa_id].append(cnt)
# print(id2child)
# 更新深度
# print(qu[index1][0],qu[index2][0],ans[0])
id2deep[cnt] = id2deep[id1]
id2deep[id1] = id2deep[cnt] + 1
id2deep[id2] = id2deep[cnt] + 1
id2deep = update(id1, id2child, id2deep)
id2deep = update(id2, id2child, id2deep)
# print(mn)
result += add
# print(result)
# 更新g_dict
for i in range(0, len(qu)):
if id2deep[cnt] == id2deep[i] and id2fa[cnt] == id2fa[i] and i not in delete_id:
if (id1, i) in g_dict.keys():
c1 = g_dict[(id1, i)]
else:
c1 = 0
if (id2, i) in g_dict.keys():
c2 = g_dict[(id2, i)]
else:
c2 = 0
c = c1 + c2
if c > 0:
g_dict[(cnt, i)] = g_dict[(i, cnt)] = c
# 更新id2sister:
id2sister[id2].remove(id1)
id2sister[id1].remove(id2)
id2sister[cnt] = list(set(id2sister[id1] + id2sister[id2]))
for id in id2sister[id1]:
id2sister[id].remove(id1)
id2sister[id].append(cnt)
for id in id2sister[id2]:
id2sister[id].remove(id2)
if cnt not in id2sister[id]:
id2sister[id].append(cnt)
id2sister[id1] = [id2]
id2sister[id2] = [id1]
# print(id1,id2sister[id1])
# print(id2,id2sister[id2])
# print(cnt,id2sister[cnt])
# print(id2sister)
'''
for i in id2sister[cnt]:
if (id1, i) in g_dict.keys():
c1 = g_dict[(id1, i)]
else:
c1 = 0
if (id2, i) in g_dict.keys():
c2 = g_dict[(id2, i)]
else:
c2 = 0
c = c1 + c2
if c > 0:
g_dict[(cnt, i)] = g_dict[(i, cnt)] = c
'''
# 更新I
qu.append(ans)
I[cnt] = qu[id1][0] + qu[id2][0]
I[id2fa[cnt]] = I[id2fa[cnt]] - (qu[id1][0] + qu[id2][0] - qu[cnt][0])
#print(I)
cnt += 1
flag = True
while (flag):
iter += 1
#print(iter)
flag = False
mx = 1e-5
item1 = qu[cnt - 1]
if len(id2child[id2fa[cnt - 1]]) <= 2:
break
v1 = item1[1]
g1 = item1[0]
for j in id2sister[cnt - 1]:
# 计算merge cnt和j的收益
item2 = qu[j]
if j in delete_id:
continue
v2 = item2[1]
g2 = item2[0]
# print(item1[2],item2[2],new_node)
v12 = item1[1] + item2[1]
if (cnt - 1, j) in g_dict.keys():
g12 = g1 + g2 - 2 * g_dict[(cnt - 1, j)]
else:
g12 = g1 + g2
v = item1[1] + item2[1]
# new_node=item1[2]+item2[2]
v_fa = qu[id2fa[cnt - 1]][1]
I1 = I[cnt - 1] - g1
I2 = I[j] - g2
# print(I1, I2)
# dif = (g1+g2-g12)/(2*sum)*math.log(v_fa/v,2) - (I1-g1)/(2*sum)*math.log(v/v1,2) - (I2 - g2)/(2*sum)*math.log(v/v2,2)
dif = (g1 + g2 - g12) / (2 * sum) * math.log(v_fa, 2) + (I1) / (2 * sum) * math.log(v1, 2) \
+ (I2) / (2 * sum) * math.log(v2, 2) - (I[cnt - 1] + I[j] - g12) / (2 * sum) * math.log(v, 2)
# new_node=item1[2]+item2[2]
# 计算merge后的熵
'''
after_merge = -g12 / (2 * sum) * math.log(v12 / v_fa, 2)
for node in id2child[cnt - 1] + id2child[j]:
after_merge += -qu[node][0] / (2 * sum) * math.log(qu[node][1] / v12, 2)
# print(after_merge)
before_merge = -g1 / (2 * sum) * math.log(v1 / v_fa, 2) - g2 / (2 * sum) * math.log(v2 / v_fa, 2)
for node in id2child[cnt - 1]:
before_merge += -qu[node][0] / (2 * sum) * math.log(qu[node][1] / v1, 2)
for node in id2child[j]:
before_merge += -qu[node][0] / (2 * sum) * math.log(qu[node][1] / v2, 2)
dif = before_merge - after_merge
'''
'''
print(dif, dif2)
if math.fabs(dif-dif2)>1e-3:
print("!!!!!!!!!!!!!!!!!!!!!")
'''
# print(before_merge,after_merge)
if dif > mx:
mx = dif
ans = (g12, v12)
add = dif
id2 = j
flag = True
if flag:
id1 = cnt - 1
if len(id2child[id1]) > 1:
delete_id.append(id1)
if len(id2child[id2]) > 1:
delete_id.append(id2)
#print('merge', id1, id2, cnt)
# 更新父节点
id2fa[cnt] = id2fa[id1]
# 更新父亲id的子节点
id2child[cnt] = id2child[id1] + id2child[id2]
fa_id = id2fa[cnt]
# print('merge',fa_id,id1,id2)
id2child[fa_id].remove(id1)
id2child[fa_id].remove(id2)
id2child[fa_id].append(cnt)
# print(id2child)
# 更新深度和子节点的父节点
id2deep[cnt] = id2deep[id1]
for node in id2child[cnt]:
id2deep[node] = id2deep[cnt] + 1
id2fa[node] = cnt
result += add
'''
for i in range(0, len(qu)):
if id2deep[cnt] == id2deep[i] and id2fa[cnt] == id2fa[i] and i not in delete_id:
if (id1, i) in g_dict.keys():
c1 = g_dict[(id1, i)]
else:
c1 = 0
if (id2, i) in g_dict.keys():
c2 = g_dict[(id2, i)]
else:
c2 = 0
c = c1 + c2
if c > 0:
g_dict[(cnt, i)] = g_dict[(i, cnt)] = c
'''
# 更新id2sister
id2sister[id2].remove(id1)
id2sister[id1].remove(id2)
id2sister[cnt] = list(set(id2sister[id1] + id2sister[id2]))
# print(cnt,id2sister[cnt],id2sister[id1],id2sister[id2])
for id in id2sister[id1]:
id2sister[id].remove(id1)
id2sister[id].append(cnt)
for id in id2sister[id2]:
id2sister[id].remove(id2)
if cnt not in id2sister[id]:
id2sister[id].append(cnt)
for sub_id1 in id2child[id1] + id2child[id2]:
id2sister[sub_id1] = []
for sub_id2 in id2child[id1] + id2child[id2]:
if sub_id1 != sub_id2 and (sub_id1, sub_id2) in g_dict.keys():
id2sister[sub_id1].append(sub_id2)
for i in id2sister[cnt]:
if (id1, i) in g_dict.keys():
c1 = g_dict[(id1, i)]
else:
c1 = 0
if (id2, i) in g_dict.keys():
c2 = g_dict[(id2, i)]
else:
c2 = 0
c = c1 + c2
if c > 0:
g_dict[(cnt, i)] = g_dict[(i, cnt)] = c
# 更新I
qu.append(ans)
I[cnt] = I[id1] + I[id2]
I[id2fa[cnt]] = I[id2fa[cnt]] - (qu[id1][0] + qu[id2][0] - qu[cnt][0])
#print(I)
cnt += 1
flag = True
while (flag):
iter += 1
#print(iter)
flag = False
mx = 1e-5
item1 = qu[cnt - 1]
if len(id2child[id2fa[cnt - 1]]) <= 2:
break
v1 = item1[1]
g1 = item1[0]
for j in id2sister[cnt - 1]:
# 计算merge cnt和j的收益
item2 = qu[j]
if j in delete_id:
continue
v2 = item2[1]
g2 = item2[0]
# print(item1[2],item2[2],new_node)
v12 = item1[1] + item2[1]
v = item1[1] + item2[1]
if (cnt - 1, j) in g_dict.keys():
g12 = g1 + g2 - 2 * g_dict[(cnt - 1, j)]
else:
g12 = g1 + g2
v_fa = qu[id2fa[cnt - 1]][1]
I1 = I[cnt - 1] - g1
I2 = I[j] - g2
dif = (g1+g2-g12)/(2*sum)*math.log(v_fa/v,2) - (I1)/(2*sum)*math.log(v/v1,2) - (I2)/(2*sum)*math.log(v/v2,2)
#dif = (g1 + g2 - g12) / (2 * sum) * math.log(v_fa, 2) + (I1) / (2 * sum) * math.log(v1, 2) \
#+ (I2) / (2 * sum) * math.log(v2, 2) - (I[cnt - 1] + I[j] - g12) / (2 * sum) * math.log(v, 2)
# new_node=item1[2]+item2[2]
# 计算merge后的熵
'''
after_merge = -g12 / (2 * sum) * math.log(v12 / v_fa, 2)
for node in id2child[cnt - 1] + id2child[j]:
after_merge += -qu[node][0] / (2 * sum) * math.log(qu[node][1] / v12, 2)
# print(after_merge)
before_merge = -g1 / (2 * sum) * math.log(v1 / v_fa, 2) - g2 / (2 * sum) * math.log(v2 / v_fa, 2)
for node in id2child[cnt - 1]:
before_merge += -qu[node][0] / (2 * sum) * math.log(qu[node][1] / v1, 2)
for node in id2child[j]:
before_merge += -qu[node][0] / (2 * sum) * math.log(qu[node][1] / v2, 2)
dif2 = before_merge - after_merge
'''
#print("dif:",dif,dif2)
# print(before_merge,after_merge)
if dif >= mx:
mx = dif
ans = (g12, v12)
add = dif
id2 = j
flag = True
if flag:
id1 = cnt - 1
if len(id2child[id1]) > 1:
delete_id.append(id1)
if len(id2child[id2]) > 1:
delete_id.append(id2)
#print('merge', id1, id2, cnt)
# 更新父节点
id2fa[cnt] = id2fa[id1]
# 更新父亲id的子节点
id2child[cnt] = id2child[id1] + id2child[id2]
fa_id = id2fa[cnt]
# print('merge',fa_id,id1,id2)
id2child[fa_id].remove(id1)
id2child[fa_id].remove(id2)
id2child[fa_id].append(cnt)
# print(id2child)
# 更新深度和子节点的父节点
id2deep[cnt] = id2deep[id1]
for node in id2child[cnt]:
id2deep[node] = id2deep[cnt] + 1
id2fa[node] = cnt
result += add
'''
for i in range(0, len(qu)):
if id2deep[cnt] == id2deep[i] and id2fa[cnt] == id2fa[i] and i not in delete_id:
if (id1, i) in g_dict.keys():
c1 = g_dict[(id1, i)]
else:
c1 = 0
if (id2, i) in g_dict.keys():
c2 = g_dict[(id2, i)]
else:
c2 = 0
c = c1 + c2
if c > 0:
g_dict[(cnt, i)] = g_dict[(i, cnt)] = c
'''
# 更新id2sister
id2sister[id2].remove(id1)
id2sister[id1].remove(id2)
id2sister[cnt] = list(set(id2sister[id1] + id2sister[id2]))
# print(cnt,id2sister[cnt],id2sister[id1],id2sister[id2])
for id in id2sister[id1]:
id2sister[id].remove(id1)
id2sister[id].append(cnt)
for id in id2sister[id2]:
id2sister[id].remove(id2)
if cnt not in id2sister[id]:
id2sister[id].append(cnt)
for sub_id1 in id2child[id1] + id2child[id2]:
id2sister[sub_id1] = []
for sub_id2 in id2child[id1] + id2child[id2]:
if sub_id1 != sub_id2 and (sub_id1, sub_id2) in g_dict.keys():
id2sister[sub_id1].append(sub_id2)
for i in id2sister[cnt]:
if (id1, i) in g_dict.keys():
c1 = g_dict[(id1, i)]
else:
c1 = 0
if (id2, i) in g_dict.keys():
c2 = g_dict[(id2, i)]
else:
c2 = 0
c = c1 + c2
if c > 0:
g_dict[(cnt, i)] = g_dict[(i, cnt)] = c
qu.append(ans)
I[cnt] = I[id1] + I[id2]
I[id2fa[cnt]] = I[id2fa[cnt]] - (qu[id1][0] + qu[id2][0]-qu[cnt][0])
cnt += 1
g = nx.Graph()
ids = []
edges = []
id2tag = {}
id2size = {}
id2adj=[]
# 输出树上每个节点的信息
for i, item in enumerate(qu):
if i not in delete_id:
#print(i, id2fa[i], id2deep[i], id2child[i])
ids.append(i)
if len(id2child)>1:
if i==0:
tem=id2child[i]
#tem=[]
else:
tem=[id2fa[i]]+id2child[i]
#tem = [id2fa[i]]
else:
tem = [id2fa[i]]
id2adj.append(tem)
for child in id2child[i]:
#edges.append((i, child))
edges.append((child, i))
for i,tag in enumerate(node_tags):
id2tag[i+1]=tag
id2size[i+1]=1
_,_,id2tag,id2size=dfs(0,id2tag,id2size,id2child)
sort_tag=sorted(id2tag.items(), key=lambda x: x[0])
new_tag=[item[1] for item in sort_tag]
ids=np.array(ids)
edges=np.array(edges)
id2index = {j: i for i, j in enumerate(ids)}
for i,item in enumerate(id2adj):
id2adj[i]=[id2index[adj] for adj in id2adj[i]]
mapped_edge = np.array(list(map(id2index.get, edges.flatten())), dtype=np.int32).reshape(edges.shape)
ids=[id2index[id] for id in ids]
g.add_nodes_from(list(ids))
g.add_edges_from(list(mapped_edge))
g.label=label
g.node_tags=new_tag
'''
if dataset!='':
with open('../../data/' + dataset + '/' + dataset + '_aug_3layer.txt', 'a', encoding='utf8') as f1:
f1.write(str(len(ids)) +' '+str(label)+ '\n')
for i,adj in enumerate(id2adj):
num_adj=len(adj)
adj=[str(item) for item in adj]
adj_str=' '.join(adj)
f1.write(str(new_tag[i]) + ' ' + str(num_adj) +' '+adj_str+ '\n')
'''
#print(h1, h1-result)
return g,result
def graph_augment(g,dataset):
#g有node,edge,node_tags,g.label
edges=list(g.edges())
nodes=list(g.nodes())
max_deep=4
label=g.label
return structual_entropy(edges, nodes, max_deep,label,g.node_tags,dataset)
| ryy980622/Hi-PART | src/graph_augmentation.py | graph_augmentation.py | py | 22,560 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": ... |
70774463784 | from artist_data import ArtistData
import numpy as np
import igraph
class Network:
def __init__(self, data):
self._data = data
self._graph = igraph.Graph()
def graph(self):
return self._graph
def init(self):
self._graph.add_vertices(list(self._data.artists.keys()))
edges = [(max(key, i), min(key, i)) for key, item in self._data.adjacency.items() for i in item]
edges = list(set(edges))
self._graph.add_edges(edges)
self._graph.vs['name'] = [item['name'] for item in self._data.artists.values()]
self._graph.vs['followers'] = [item['followers'] for item in self._data.artists.values()]
self._graph.vs['popularity'] = [item['popularity'] for item in self._data.artists.values()]
self._graph.es['popularity'] = [(self._data.artists[edge[0]]['popularity'] + self._data.artists[edge[1]]['popularity']) / 2 for edge in edges]
def draw(self, layout_name='large', file_format='pdf'):
'''
Draw created artists graph. Showing labels only for artists with popularity in 0.9 quantile.
param: layout_name: name of algorithm to use for layout. Available options: see igraph documentation
type: layout_name: str
param: file_format: name of format to which graph should be saved
type: file_format: str
'''
visual_style = {}
visual_style['edge_width'] = [item/50 for item in self._graph.es['popularity']]
visual_style['vertex_color'] = [[0, 1, 0, 0.9] if item['name'] == self._data.name else [1, 0, 0, 0.4] for item in self._data.artists.values()]
quantile = np.quantile([item['popularity'] for item in self._data.artists.values()], 0.95)
visual_style['vertex_label'] = [item['name'] if item['popularity'] > quantile or item['name'] == self._data.name else '' for item in self._data.artists.values()]
visual_style['vertex_label_size'] = [20 if item['name'] == self._data.name else 7 for item in self._data.artists.values()]
visual_style['vertex_label_color'] = [[0, 1, 0, 1] if item['name'] == self._data.name else [0, 0, 0, .8] for item in self._data.artists.values()]
visual_style['vertex_size'] = [item/5 for item in self._graph.vs['popularity']]
name = self._data.name.replace(' ', '')
igraph.plot(
self._graph,
f'data/{name}/net_l{self._data.depth}.{file_format}',
**visual_style,
order=list(self._data.artists.keys()).reverse(),
vertex_frame_width=.1,
layout=self._graph.layout(layout_name),
bbox=(1000,1000),
autocurve=True)
if __name__ == '__main__':
data = ArtistData('Giuseppe Verdi', depth=3)
data.load_adjacency()
data.load_artists()
n = Network(data)
n.init()
n.draw(file_format='png')
| jakubsob/SpotifyArtistsNetwork | network.py | network.py | py | 2,904 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "igraph.Graph",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.quantile",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "igraph.plot",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "artist_data.ArtistData",
"... |
12087063894 | import os
import json
import argparse
from multiprocessing import Pool
import string
import shutil
# external libraries
from numpy import argmax
from rouge import Rouge
from tqdm import tqdm
def ROUGE(hypsumm, refsumm):
rouge = Rouge()
rouge.metrics = ['rouge-2']
rouge.stats = ['r']
ref = '\n'.join(refsumm)
hyp = '\n'.join(hypsumm)
if len(hyp.strip()) < 10: return 0
if len(ref.strip()) < 10: return 0
scores = rouge.get_scores(hyp, ref, avg = True)
return scores['rouge-2']['r']
def AVGROUGE(hypsumm, refsumm):
rouge = Rouge()
rouge.stats = ['f']
ref = '\n'.join(refsumm)
hyp = '\n'.join(hypsumm)
if len(hyp.strip()) < 10: return 0
if len(ref.strip()) < 10: return 0
scores = rouge.get_scores(hyp, ref, avg = True)
val = scores['rouge-1']['f'] + scores['rouge-2']['f'] + scores['rouge-l']['f']
return val / 3
###########################################################################
# This class selects 'num_sents' sentences from the full text
# for each sentence in the summary wrt the highest average ROUGE scores.
# Ref: Narayan et.al. NAACL 2018. "Ranking sentences for extractive
# summarization with reinforcement learning"
###########################################################################
class AVGROUGEscorer:
def __init__(self, judgesents, summsents):
self.judgesents = judgesents
self.summsents = summsents
self.labels = [False for sent in judgesents]
def getLabels(self, num_sent = 3):
# [facets: [support groups: [sent indices]]]
for sent in self.summsents:
# get scores with all judgesents
scores = list(map(lambda x: AVGROUGE([sent], [x]), self.judgesents))
# mark top labels
for i in range(num_sent):
index = int(argmax(scores))
self.labels[index] = True
scores[index] = -1
return self.labels
###########################################################################
# This class selects greedily selects the maximal sentences from full text
# to maximize ROUGE scores wrt the summary.
# Ref: Nallapati et. al. AAAI 2017. "SummaRuNNer: A Recurrent Neural Network
# based Sequence Model for Extractive Summarization of Documents"
###########################################################################
class ROUGEscorer:
def __init__(self, judgesents, summsents):
self.judgesents = judgesents
self.summsents = summsents
self.currsents = []
self.labels = [False for sent in judgesents]
def score(self, i):
if self.labels[i]: return 0
t = self.judgesents[i]
if len(t.translate(t.maketrans('', '', string.punctuation + string.whitespace))) < 5: return 0
new = self.currsents + [self.judgesents[i]]
score = ROUGE(new, self.summsents)
return score
def getmaxscore(self):
# with Pool(N_PROCESS) as p:
# scores = p.map(self.score, range(len(self.judgesents)))
# p.close()
# p.terminate()
scores = list(map(self.score, range(len(self.judgesents))))
index = argmax(scores)
return index, scores[index]
def getLabels(self, min_labels = 10):
currscore = 0.0
while True:
# select sent index which maximises ROUGE
index, maxscore = self.getmaxscore()
if maxscore <= currscore and len(self.currsents) >= min_labels: break
currscore = maxscore
self.currsents.append(self.judgesents[index])
self.labels[index] = True
#print(len(self.currsents), len(self.judgesents))
return self.labels
def prepare(judgepath, summarypath):
with open(judgepath) as fp:
judgesents = fp.read().splitlines()
with open(summarypath) as fp:
summsents = fp.read().splitlines()
data = {}
# prepare doc
data['doc'] = '\n'.join(judgesents)
# prepare summ
data['summaries'] = '\n'.join(summsents)
scorer = MODEL(judgesents, summsents)
labels = scorer.getLabels()
# prepare labels
data['labels'] = '\n'.join(map(lambda x: str(int(x)), labels))
return data
def generateData(f):
#print(f)
try:
d = prepare(os.path.join(JUDGEPATH, f), os.path.join(SUMMPATH, f))
d['file'] = f
assert len(d['doc'].splitlines()) == len(d['labels'].splitlines()), "INCORRECT Number of sentences and labels"
with open(os.path.join(tmpdir, f), 'w') as fout:
json.dump(d, fout)
except Exception as args:
print("ERROR in", f)
print(args)
#%% MAIN
if __name__ == '__main__':
#PARAMS
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("base_dir", type=str, help="base directory where the other files and folders are present")
parser.add_argument("--method", type=str, choices=['avg_rg', 'm_rg'], default="avg_rg", help="method to use for generating labels.")
parser.add_argument("--separator", type=str, default="$$$", help="separator used in output docs, to separate between the text and labels.")
parser.add_argument("--n_process", type=int, default=1, help="number of subprocesses to use (for parallel computation).")
parser.add_argument("--judgement_dir", type=str, default="judgement", help="subdirectory containing the judgements.")
parser.add_argument("--summary_dir", type=str, default="summary", help="subdirectory containing the summaries.")
parser.add_argument("--tmp_dir", type=str, default="tmp", help="temporary directory where the files will be stored. This directory can be deleted after running.")
parser.add_argument("--out_dir", type=str, default="labelled", help="subdirectory where the output will be stored.")
parser.add_argument("--out_json", type=str, default="labelled.jsonl", help="json-line file where the output will be stored.")
parser.add_argument("--remove_tmp", action='store_true', help="if given any existing files inside tmp_dir will be deleted first. Else they will be reused (they won't be calculated again).")
args = parser.parse_args()
BASE = args.base_dir
JUDGEPATH = os.path.join(BASE, args.judgement_dir)
SUMMPATH = os.path.join(BASE, args.summary_dir)
OUTPATH_JSON = os.path.join(BASE, args.out_json)
OUTPATH = os.path.join(BASE, args.out_dir)
tmpdir = os.path.join(BASE, args.tmp_dir)
METHOD = args.method
if METHOD == 'avg_rg': MODEL = AVGROUGEscorer
elif METHOD == 'm_rg': MODEL = ROUGEscorer
SEP = args.separator
KEEP_TMP = not args.remove_tmp
N_PROCESS = args.n_process
###########################################################################
# CODE STARTS
if not KEEP_TMP:
shutil.rmtree(tmpdir)
if not os.path.exists(tmpdir): os.mkdir(tmpdir)
files = set(next(os.walk(JUDGEPATH))[2])
excludefiles = set(next(os.walk(tmpdir))[2])
files = [f for f in (files - excludefiles)]# if int(f.split('.')[0]) <= 3500]
if N_PROCESS > 1:
with Pool(N_PROCESS) as p:
list(tqdm(p.imap_unordered(generateData, files), total = len(files)))
else:
list(tqdm(map(generateData, files), total = len(files)))
files = next(os.walk(tmpdir))[2]
if not os.path.exists(OUTPATH): os.mkdir(OUTPATH)
with open(OUTPATH_JSON, 'w') as fout:
for f in tqdm(files):
with open(os.path.join(tmpdir, f)) as fp:
try: d = json.load(fp)
except:
os.system('rm ' + os.path.join(tmpdir, f))
continue
with open(os.path.join(OUTPATH, f), 'w') as fout2:
for line, label in zip(d['doc'].split('\n'), d['labels'].split('\n')):
print(line, SEP, label, sep = '', file = fout2)
print(json.dumps(d), file = fout)
| Law-AI/summarization | extractive/abs_to_ext/extractive_labels.py | extractive_labels.py | py | 9,688 | python | en | code | 139 | github-code | 36 | [
{
"api_name": "rouge.Rouge",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "rouge.metrics",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "rouge.stats",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "rouge.get_scores",
... |
71591819304 | from django.shortcuts import render
from django.http.response import JsonResponse
from rest_framework.parsers import JSONParser
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import TokenAuthentication
from .models import Album, Track
from .serializer import AlbumSerializer
# Function based views
@api_view(['GET', 'POST'])
def album_list(request):
#Function to GET all albums
if request.method == 'GET':
albums = Album.objects.all()
title = request.GET.get('album_name', None)
if title is not None:
albums = albums.filter(title__icontains=title)
albums_serializer = AlbumSerializer(albums, many=True)
return JsonResponse(albums_serializer.data, safe=False)
# Function to POST new album
elif request.method == 'POST':
album_data = JSONParser().parse(request)
album_serializer = AlbumSerializer(data=album_data)
if album_serializer.is_valid():
album_serializer.save()
return JsonResponse(album_serializer.data, status=status.HTTP_201_CREATED)
return JsonResponse(album_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'DELETE'])
def album_detail(request, pk):
#Find album by id (pk)
try:
album = Album.objects.get(pk=pk)
except Album.DoesNotExist:
return JsonResponse({'message': 'El album no existe'}, status=status.HTTP_404_NOT_FOUND)
# Function to GET a single album
if request.method == 'GET':
authentication_class = (TokenAutentication,)
album_serializer = AlbumSerializer(album)
return JsonResponse(album_serializer.data)
# Function to PUT a single album
elif request.method == 'PUT':
album_data = JSONParser().parse(request)
album_serializer = AlbumSerializer(album, data=album_data)
if album_serializer.is_valid():
album_serializer.save()
return JsonResponse(album_serializer.data)
return JsonResponse(album_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# Function to DELETE a single album
elif request.method == 'DELETE':
album.delete()
return JsonResponse({'message':'El album ha sido borrado'}, status=status.HTTP_204_NO_CONTENT) | Gabospa/Rest_Framework_API | catalog/views.py | views.py | py | 2,419 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "models.Album.objects.all",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "models.Album.objects",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "models.Album",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "seria... |
37339853215 | from collections import deque
infinity = float("inf")
def make_graph():
# identical graph as the YouTube video: https://youtu.be/Tl90tNtKvxs
return [
[0, 10, 0, 10, 0, 0],
[0, 0, 4, 2, 8, 0],
[0, 0, 0, 0, 0, 10],
[0, 0, 0, 0, 9, 0],
[0, 0, 6, 0, 0, 10],
[0, 0, 0, 0, 0, 0],
]
# find paths from source to sink with breadth-first search
def bfs(G, source, sink, parent):
visited = [False] * len(G)
queue = deque()
queue.append(source)
visited[source] = True
while queue:
node = queue.popleft()
for i in range(len(G[node])):
if visited[i] is False and G[node][i] > 0:
queue.append(i)
visited[i] = True
parent[i] = node
return True if visited[sink] else False
def ford_fulkerson(G, source, sink):
# This array is filled by breadth-first search (bfs) and stores path
parent = [-1] * (len(G))
max_flow = 0
while bfs(G, source, sink, parent):
path_flow = infinity
s = sink
while s != source:
# Find the minimum value in selected path
path_flow = min(path_flow, G[parent[s]][s])
s = parent[s]
max_flow += path_flow
v = sink
# add or subtract flow based on path
while v != source:
u = parent[v]
G[u][v] -= path_flow
G[v][u] += path_flow
v = parent[v]
return max_flow
def main():
G = make_graph()
source = 0
sink = 5
max_flow = ford_fulkerson(G, source, sink)
print(f'Maximum flow: {max_flow}')
main()
| msambol/dsa | maximum_flow/ford_fulkerson.py | ford_fulkerson.py | py | 1,683 | python | en | code | 211 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 20,
"usage_type": "call"
}
] |
10204405519 | from flask import Flask,request,jsonify
from flask_mysqldb import MySQL
app = Flask(__name__)
app.config['MYSQL_HOST'] = 'localhost'
app.config['MYSQL_USER'] = 'root'
app.config['MYSQL_PASSWORD'] = ''
app.config['MYSQL_DB'] = 'library'
mysql = MySQL(app)
def getQuery(sql):
cursor = mysql.connection.cursor()
cursor.execute(sql)
data=cursor.fetchall()
return data
def Query(sql):
cursor = mysql.connection.cursor()
cursor.execute(sql)
mysql.connection.commit()
@app.route("/")
def index():
return {"code":200,"msg":"READY"}
@app.route("/get-books")
def getBooks():
try:
return {"code":200,"data":getQuery("select * from books")}
except Exception as e:
return {"code":400}
@app.route("/get-book/<book_id>")
def getBook(book_id):
try:
return {"code":200,"data":getQuery("select * from books where book_id={0}".format(book_id))}
except Exception as e:
return {"code":400}
@app.route("/new-book",methods=["POST"])
def addBook():
try:
name=request.form.get("name")
Query("insert into books(name) values('{0}')".format(name))
return {"code":200}
except Exception as e:
return {"code":400,"msg":str(e)}
@app.route("/update-book/<book_id>",methods=["PUT"])
def updateBook(book_id):
try:
name=request.form.get("name")
Query("update books set name='{0}' where book_id={1}".format(name,book_id))
return {"code":200}
except Exception as e:
return {"code":400}
@app.route("/delete-book/<book_id>",methods=["DELETE"])
def deleteBook(book_id):
try:
Query("delete from books where book_id={0}".format(book_id))
return {"code":200}
except Exception as e:
return {"code":400}
if __name__ == '__main__':
app.run(debug=True,use_reloader=True)
| EdgarPozas/APILibraryInFlask | app.py | app.py | py | 1,832 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "flask_mysqldb.MySQL",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.request.form.get",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "flask.request.f... |
7880901877 | import json
from enum import Enum
from typing import Union
from pyspark.sql import Column
import pyspark.sql.functions as F
class ModelType(Enum):
CLASSIFICATION = 1
REGRESSION = 2
class _Singleton(type):
""" A metaclass that creates a Singleton base class when called. """
_instances = {}
def __call__(cls, *args):
if cls not in cls._instances:
cls._instances[cls] = super(_Singleton, cls).__call__(*args)
return cls._instances[cls]
class Singleton(_Singleton('SingletonMeta', (object,), {})):
pass
class LossModelTypeMapper(Singleton):
"""
Mapper for losses -> model type
"""
def __init__(self):
loss_to_model_type = {}
loss_to_model_type.update(
{'mean_squared_error': ModelType.REGRESSION,
'mean_absolute_error': ModelType.REGRESSION,
'mse': ModelType.REGRESSION,
'mae': ModelType.REGRESSION,
'cosine_proximity': ModelType.REGRESSION,
'mean_absolute_percentage_error': ModelType.REGRESSION,
'mean_squared_logarithmic_error': ModelType.REGRESSION,
'logcosh': ModelType.REGRESSION,
'binary_crossentropy': ModelType.CLASSIFICATION,
'categorical_crossentropy': ModelType.CLASSIFICATION,
'sparse_categorical_crossentropy': ModelType.CLASSIFICATION})
self.__mapping = loss_to_model_type
def get_model_type(self, loss):
return self.__mapping.get(loss)
def register_loss(self, loss, model_type):
if callable(loss):
loss = loss.__name__
self.__mapping.update({loss: model_type})
class ModelTypeEncoder(json.JSONEncoder):
def default(self, obj):
if obj in [e for e in ModelType]:
return {"__enum__": str(obj)}
return json.JSONEncoder.default(self, obj)
def as_enum(d):
if "__enum__" in d:
name, member = d["__enum__"].split(".")
return getattr(ModelType, member)
else:
return d
def argmax(col: Union[str, Column]) -> Column:
"""
returns expression for finding the argmax in an array column
:param col: array column to find argmax of
:return: expression which can be used in `select` or `withColumn`
"""
return F.expr(f'array_position({col}, array_max({col})) - 1')
| maxpumperla/elephas | elephas/utils/model_utils.py | model_utils.py | py | 2,344 | python | en | code | 1,568 | github-code | 36 | [
{
"api_name": "enum.Enum",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "json.JSONEncoder",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "json.JSONEncoder.default",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "json.JSONEnco... |
11712750390 | import pyautogui
import pyperclip
import time
import schedule
# 카카오톡에 메시지를 보내는 코드를 send_message 함수로 생성
def send_message():
threading.Timer(10, send_message).start()
# KakaoPicture1.png 파일과 동일한 그림을 찾아 좌표 출력
picPosition = pyautogui.locateOnScreen(r'11. PC_Kakao_Talk_Automation_Using_Automouse\KakaoPicture1.png')
print(picPosition)
# 앞의 과정에서 찾지 못했다면 KakaoPicture2.png 파일과 동일한 그림을 찾아 좌표 출력
if picPosition is None:
picPosition = pyautogui.locateOnScreen(r'11. PC_Kakao_Talk_Automation_Using_Automouse\KakaoPicture2.png')
print(picPosition)
# 앞의 과정에서 찾지 못했다면 KakaoPicture3.png 파일과 동일한 그림을 찾아 좌표 출력
if picPosition is None:
picPosition = pyautogui.locateOnScreen(r'11. PC_Kakao_Talk_Automation_Using_Automouse\KakaoPicture3.png')
print(picPosition)
# 이미지에서 중간 좌표값 찾기
ClickPosition = pyautogui.center(picPosition)
pyautogui.doubleClick(ClickPosition) # 더블 클릭
# 이 메세지는 자동으로 보내는 메세지입니다를 붙여넣고 1초 동안 기다림
pyperclip.copy("이 메세지는 자동으로 보내는 메세지입니다")
pyautogui.hotkey("ctrl", "v")
time.sleep(1.0)
# 엔터를 누르고 1초 동안 기다림
pyautogui.write(["enter"])
time.sleep(1.0)
# esc를 눌러 창을 닫고 1초 동안 기다림
pyautogui.write(["escape"])
time.sleep(1.0)
# 매 10초마다 send_message 함수를 실행할 스케쥴 등록
schedule.every(10).seconds.do(send_message)
# schedule.run_pending() 함수는 계속 실행되면서 스케쥴에 등록된 함수를 설정 시간마다 실행
while True:
schedule.run_pending()
time.sleep(1) | WoojinJeonkr/Python-and-40-works-to-learn-while-making | 11. PC_Kakao_Talk_Automation_Using_Automouse/ScheduleRunAutomationKakaoTalk.py | ScheduleRunAutomationKakaoTalk.py | py | 1,883 | python | ko | code | 1 | github-code | 36 | [
{
"api_name": "pyautogui.locateOnScreen",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pyautogui.locateOnScreen",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pyautogui.locateOnScreen",
"line_number": 21,
"usage_type": "call"
},
{
"api_na... |
70858172905 | from functools import reduce, wraps
import tensorflow as tf
from tensorflow.keras.layers import Add, BatchNormalization, LeakyReLU, Conv2D, ZeroPadding2D, UpSampling2D
from tensorflow.keras.layers import Concatenate
from keras.layers.merge import add
from tensorflow.keras.regularizers import l2
L2_FACTOR = 1e-5
def compose(*funcs):
"""Compose arbitrarily many functions, evaluated left to right.
Reference: https://mathieularose.com/function-composition-in-python/
"""
# return lambda x: reduce(lambda v, f: f(v), funcs, x)
if funcs:
return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)
else:
raise ValueError('Composition of empty sequence not supported.')
def DarknetConv2D_BN_Leaky(*args, **kwargs):
"""Darknet Convolution2D followed by CustomBatchNormalization and LeakyReLU."""
no_bias_kwargs = {'use_bias': False}
no_bias_kwargs.update(kwargs)
interim_model = compose(
DarknetConv2D(*args, **no_bias_kwargs),
BatchNormalization(epsilon=0.001, trainable=False),
LeakyReLU(alpha=0.1 ))
return interim_model
def DarknetConv2D(*args, **kwargs):
"""Wrapper to set Darknet parameters for YoloConv2D."""
#darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}
#darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same'
darknet_conv_kwargs = {'padding': 'valid' if kwargs.get('strides')==(2,2) else 'same'}
darknet_conv_kwargs.update(kwargs)
return YoloConv2D(*args, **darknet_conv_kwargs)
@wraps(Conv2D)
def YoloConv2D(*args, **kwargs):
"""Wrapper to set Yolo parameters for Conv2D."""
yolo_conv_kwargs = {'kernel_regularizer': l2(L2_FACTOR)}
yolo_conv_kwargs['bias_regularizer'] = l2(L2_FACTOR)
yolo_conv_kwargs.update(kwargs)
#yolo_conv_kwargs = kwargs
return Conv2D(*args, **yolo_conv_kwargs)
def CustomBatchNormalization(*args, **kwargs):
if tf.__version__ >= '2.2':
from tensorflow.keras.layers.experimental import SyncBatchNormalization
BatchNorm = SyncBatchNormalization
else:
BatchNorm = BatchNormalization
return BatchNorm(*args, **kwargs)
def yolo3_predictions(feature_maps, feature_channel_nums, num_classes):
f13, f26, f52 = feature_maps
f13_channels, f26_channels, f52_channels = feature_channel_nums
# feature map 1 head & output (13x13 for 416 input) - starting with 1024 filters
x, y1 = make_last_layers(f13, f13_channels, 3 * (num_classes + 5), predict_id='1')
# upsample fpn merge for feature maps 1 and 2
x = compose(DarknetConv2D_BN_Leaky(f26_channels//2, (1,1)),
UpSampling2D(2))(x)
x = Concatenate()([x,f26])
# feature map 2 head & output (26x26 for 416 input) - starting with 512 filters
x, y2 = make_last_layers(f26, f26_channels, 3 * (num_classes + 5), predict_id='2')
# upsample fpn merge for feature maps 2 and 3
x = compose(DarknetConv2D_BN_Leaky(f52_channels//2, (1, 1)),
UpSampling2D(2))(x)
x = Concatenate()([x, f52])
# feature map 3 head & output (52x52 for 416 input) - starting with 128 filters
x, y3 = make_last_layers(f52, f52_channels//2, 3 * (num_classes + 5), predict_id='3')
return y1, y2, y3
def make_last_layers(x, num_filters, out_filters, predict_filters=None, predict_id='1'):
'''
Following the pred_yolo1, pred_yolo2 and pred_yolo3 as per exriencor code
https://github.com/experiencor/keras-yolo3
'''
# if predict_id == '1' or predict_id == '2':
# # Conv2D_BN_Leaky layers followed by a Conv2D_linear layer
# y = compose(
# DarknetConv2D_BN_Leaky(num_filters, (3, 3)),
# DarknetConv2D(out_filters, (1, 1), name='predict_conv_' + predict_id))(x)
#
# if predict_id == '3':
# 6 Conv2D_BN_Leaky layers followed by a Conv2D_linear layer
# num_filters here 128
x = compose(
DarknetConv2D_BN_Leaky(num_filters, (1, 1)),
DarknetConv2D_BN_Leaky(num_filters * 2, (3, 3)),
DarknetConv2D_BN_Leaky(num_filters, (1, 1)),
DarknetConv2D_BN_Leaky(num_filters * 2, (3, 3)),
DarknetConv2D_BN_Leaky(num_filters, (1, 1)))(x)
if predict_filters is None:
predict_filters = num_filters * 2
y = compose(
DarknetConv2D_BN_Leaky(predict_filters, (3, 3)),
DarknetConv2D(out_filters, (1, 1), name='predict_conv_' + predict_id))(x)
return x, y
def DarknetConv2D(*args, **kwargs):
"""Wrapper to set Darknet parameters for YoloConv2D."""
# darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}
# darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same'
darknet_conv_kwargs = {'padding': 'valid' if kwargs.get('strides') == (2, 2) else 'same'}
darknet_conv_kwargs.update(kwargs)
return YoloConv2D(*args, **darknet_conv_kwargs)
@wraps(Conv2D)
def YoloConv2D(*args, **kwargs):
"""Wrapper to set Yolo parameters for Conv2D."""
yolo_conv_kwargs = {'kernel_regularizer': l2(L2_FACTOR)}
yolo_conv_kwargs['bias_regularizer'] = l2(L2_FACTOR)
yolo_conv_kwargs.update(kwargs)
#yolo_conv_kwargs = kwargs
return Conv2D(*args, **yolo_conv_kwargs)
def conv_block(inp, convs, do_skip=True):
x = inp
count = 0
for conv in convs:
if count == (len(convs) - 2) and do_skip:
skip_connection = x
count += 1
if conv['stride'] > 1: x = ZeroPadding2D(((1, 0), (1, 0)))(x) # unlike tensorflow darknet prefer left and top paddings
x = Conv2D(conv['filter'],
conv['kernel'],
strides=conv['stride'],
padding='valid' if conv['stride'] > 1 else 'same',
# unlike tensorflow darknet prefer left and top paddings
name='conv_' + str(conv['layer_idx']),
use_bias=False if conv['bnorm'] else True)(x)
if conv['bnorm']: x = BatchNormalization(epsilon=0.001, trainable=False,
name='bnorm_' + str(conv['layer_idx']))(x)
if conv['leaky']: x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']))(x)
return add([skip_connection, x]) if do_skip else x
##### | jmajumde/MyMScProj | jmod/onestage/yolov3/models/layers.py | layers.py | py | 6,268 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "functools.reduce",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers.BatchNormalization",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers.LeakyReLU",
"line_number": 30,
"usage_type": "call"
... |
497207117 | import abc
import six
from dagster_spark.configs_spark import spark_config
from dagster_spark.utils import flatten_dict
from pyspark.sql import SparkSession
from dagster import Field, check, resource
def spark_session_from_config(spark_conf=None):
spark_conf = check.opt_dict_param(spark_conf, 'spark_conf')
builder = SparkSession.builder
flat = flatten_dict(spark_conf)
for key, value in flat:
builder = builder.config(key, value)
return builder.getOrCreate()
class PySparkResourceDefinition(six.with_metaclass(abc.ABCMeta)):
def __init__(self, spark_conf):
self._spark_session = spark_session_from_config(spark_conf)
@property
def spark_session(self):
return self._spark_session
@property
def spark_context(self):
return self.spark_session.sparkContext
def stop(self):
self._spark_session.stop()
@abc.abstractmethod
def get_compute_fn(self, fn, solid_name):
pass
class SystemPySparkResource(PySparkResourceDefinition):
def get_compute_fn(self, fn, solid_name):
return fn
@resource(
{
'spark_conf': spark_config(),
'stop_session': Field(
bool,
is_optional=True,
default_value=True,
description='Whether to stop the Spark session on pipeline completion. '
'Defaults to True.',
),
}
)
def pyspark_resource(init_context):
pyspark = SystemPySparkResource(init_context.resource_config['spark_conf'])
try:
yield pyspark
finally:
if init_context.resource_config['stop_session']:
pyspark.stop()
| helloworld/continuous-dagster | deploy/dagster_modules/libraries/dagster-pyspark/dagster_pyspark/resources.py | resources.py | py | 1,647 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "dagster.check.opt_dict_param",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "dagster.check",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.SparkSession.builder",
"line_number": 13,
"usage_type": "attribute"
},
{
"... |
21877547543 | from TeamCloud_Modul.Blockchain import Transaction
import requests
import json
import os
from TeamCloud_Modul.Node import Node
from TeamCloud_Modul.json_parser import Message, JSON_Parser, get_checksum
from cryptography.hazmat.primitives import serialization
from requests.api import request
from create_Keys import create_key
class Agent:
def __init__(self, name, debug=True):
self.name = name
self.url = "https://mastpamarkt.azurewebsites.net/"
# Init paths
self.filepath = os.path.dirname(os.path.abspath(__file__))
self.backup_path = self.filepath + "/backup.txt"
self.public_key_path = self.filepath + "/public.pem"
self.private_key_path = self.filepath + "/private.pem"
self.json_parser = JSON_Parser()
# Init public and private key
if not (os.path.exists(self.public_key_path) and
os.path.getsize(self.public_key_path) > 0 and
os.path.exists(self.private_key_path) and
os.path.getsize(self.private_key_path) > 0):
create_key()
print("Keys being created")
with open(self.public_key_path, "rb") as key_file:
pubkey = key_file.read()
self.__public_key = serialization.load_pem_public_key(pubkey)
with open(self.private_key_path, "rb") as key_file:
self.__private_key = serialization.load_pem_private_key(
key_file.read(),
password=None,
)
self.node = Node(name = name, private_key = self.__private_key, public_key = self.__public_key)
# Init from Backup
self.read_backup()
self.registration(debug=debug)
def print_chain(self, pretty=True):
self.node.print_chain(pretty=pretty)
def print_balance(self, all=False):
self.node.print_balance(all=all)
def print_quotes(self, product):
self.node.print_quotes(product = product)
def get_balance(self, all=False):
return self.node.get_balance(all=all)
def get_quotes(self, product):
return self.node.get_quotes(product = product)
def registration(self, debug=True):
try:
# Built Message
pem = self.__public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
request_msg = Message(sender=self.name,receiver='Cloud',parser_type='type_default', message_type='default', payload={"user": self.name, "password": pem.decode('utf-8')},checksum='checksum')
# Parse Message to JSON
json_request_msg = self.json_parser.parse_message_to_dump(request_msg)
# Request
json_response_msg = requests.post(url=self.url + "/Registration/", json=json_request_msg).json()
# Parse JSON to Message
response_msg = self.json_parser.parse_dump_to_message(json_response_msg)
# Sync Blockchain
if response_msg.payload['status'] < 2:
self.__sync_blockchain()
info_handler={
0:'[Info] Successfully registered',
1:'[Info] Successfully logged in',
2:'[Error] Name already exists. Choose another username',
3:'[Error] Registration failed',
}
if debug==True: print(info_handler.get(response_msg.payload['status'],"Error occured"))
except Exception as e:
if debug==True: print('[Error] Error occured. Registration call failed.')
if debug==True: print(e)
def send_cloud_missing_blocks(self):
last_cloud_hash = self._get_last_cloud_hash()
for idx, block in enumerate(self.node.blockchain.chain):
if block.hash == last_cloud_hash:
break
payload = self.json_parser.parse_chain_to_dump(self.node.blockchain.chain[idx+1:])
request_msg = Message(sender=self.name,
receiver='receiver',
parser_type='type_default',
message_type='block_msg',
payload=payload,
checksum=get_checksum(payload))
json_message = self.json_parser.parse_message_to_dump(request_msg)
json_response_msg = requests.put(url=self.url + "/CloudInitialization/",json=json_message).json()
return json_response_msg
def _get_last_cloud_hash(self):
json_response_msg = requests.get(url=self.url + "/CloudInitialization/").json()
return json_response_msg
def __sync_blockchain(self):
############################################# Get Header Message #############################################
start_hash, stop_hash = self.node.get_payload_for_get_headers_msg()
payload = [start_hash, stop_hash]
# Built Message
request_msg = Message(sender=self.name,
receiver='receiver',
parser_type='type_default',
message_type='get_headers_msg',
payload=payload,
checksum=get_checksum(payload))
# Parse Message to JSON
json_request_msg = self.json_parser.parse_message_to_dump(request_msg)
# Request
json_response_msg = requests.post(url=self.url + "/Blockchain/", json=json_request_msg).json()
# Parse JSON to Message
response_msg = self.json_parser.parse_dump_to_message(json_response_msg)
self.node.handle_incoming_message(response_msg)
# workaround
self.node.handle_incoming_message(response_msg)
############################################# Get Blocks Message #############################################
payload = self.node.get_payload_for_get_blocks_msg()
request_msg = Message(sender=self.name,
receiver='receiver',
parser_type='type_default',
message_type='get_blocks_msg',
payload=payload,
checksum=get_checksum(payload))
# Parse Message to JSON
json_request_msg = self.json_parser.parse_message_to_dump(request_msg)
# Request
json_response_msg = requests.post(url=self.url + "/Blockchain/", json=json_request_msg).json()
# Parse JSON to Message
response_msg = self.json_parser.parse_dump_to_message(json_response_msg)
self.node.handle_incoming_message(response_msg)
# Update Backup
self.write_backup()
def quote(self, quote_list=[], debug=True):
try:
self.send_cloud_missing_blocks()
payload = {"quote_list": quote_list}
# Built Message
request_msg = Message(sender=self.name,receiver='Cloud',parser_type='type_default', message_type='default', payload=payload,checksum='checksum')
# Parse Message to JSON
json_request_msg = self.json_parser.parse_message_to_dump(request_msg)
# Request
json_response_msg = requests.post(url=self.url + "/Quote/", json=json_request_msg).json()
# Parse JSON to Message
response_msg = self.json_parser.parse_dump_to_message(json_response_msg)
info_handler={
0:'[Info] Successfully Quote Call.',
1:'[Warning] Quotes List is Empty. Try later again.',
2:'[Warning] Quote Call failed. Syntax Error.',
}
if debug==True: print(info_handler.get(response_msg.payload['status'],"Error occured"))
if response_msg.payload['status'] == 0:
# Extract Response
response = response_msg.payload['quotes']['List']
return {'Status': True, 'Response': response}
except Exception as e:
if debug==True: print('[Error] Error occured. Quote call failed.')
if debug==True: print(e)
return {'Status': False, 'Response': {}}
def buy(self, product, quantity, debug=True):
try:
# Get Quote Data
response_quote = self.quote([product], debug=debug)
# Check Quote Call was successfully
if response_quote["Status"] == True:
payload = {"product": product, "quantity": quantity}
signature = self.node.create_signature(payload)
payload.update({'signature':signature})
# Built Message
request_msg = Message(sender=self.name,receiver='Cloud',parser_type='type_default', message_type='default', payload=payload,checksum='checksum')
# Parse Message to JSON
json_request_msg = self.json_parser.parse_message_to_dump(request_msg)
# Request
json_response_msg = requests.post(url=self.url + "/Buy/", json=json_request_msg).json()
# Parse JSON to Message
response_msg = self.json_parser.parse_dump_to_message(json_response_msg)
info_handler={
0:'[Info] Transaction successfully.',
1:'[Warning] Buy Call failed caused by Quote.',
2:'[Warning] Buy Call failed. Validity check failed.',
3:'[Error] Signature comparison faced an issue.',
4:'[Error] Buy Call failed. Syntax Error.',
}
if debug==True: print(info_handler.get(response_msg.payload['status'],"Error occured"))
if response_msg.payload['status'] == 0:
# Sync Blockchain
self.__sync_blockchain()
return {'Status': True, 'Response': None}
else:
if debug==True: print("[Warning] Buy Call failed. Validity check failed.")
return {'Status': False, 'Response': None}
except Exception as e:
if debug==True: print('[Error] Error occured. Buy call failed.')
if debug==True: print(e)
return {'Status': False, 'Response': None}
def sell(self, product, quantity, debug=True):
try:
# Get Quote Data
response_quote = self.quote([product], debug=debug)
# Check Quote Call was successfully
if response_quote["Status"] == True:
payload = {"product": product, "quantity": quantity}
signature = self.node.create_signature(payload)
payload.update({'signature':signature})
# Built Message
request_msg = Message(sender=self.name,receiver='Cloud',parser_type='type_default', message_type='default', payload=payload,checksum='checksum')
# Parse Message to JSON
json_request_msg = self.json_parser.parse_message_to_dump(request_msg)
# Request
json_response_msg = requests.post(url=self.url + "/Sell/", json=json_request_msg).json()
# Parse JSON to Message
response_msg = self.json_parser.parse_dump_to_message(json_response_msg)
info_handler={
0:'[Info] Transaction successfully.',
1:'[Warning] Sell Call failed caused by Quote.',
2:'[Warning] Sell Call failed. Validity check failed.',
3:'[Error] Signature comparison faced an issue.',
4:'[Error] Sell Call failed. Syntax Error.',
}
if debug==True: print(info_handler.get(response_msg.payload['status'],"Error occured"))
if response_msg.payload['status'] == 0:
# Sync Blockchain
self.__sync_blockchain()
return {'Status': True, 'Response': None}
else:
if debug==True: print("[Warning] Sell Call failed. Validity check failed.")
return {'Status': False, 'Response': None}
except Exception as e:
if debug==True: print('[Error] Error occured. Sell call failed.')
if debug==True: print(e)
return {'Status': False, 'Response': None}
def write_backup(self):
# Check Text-File already Exists and isn't empty
if os.path.exists(self.backup_path) and os.path.getsize(self.backup_path) > 0:
os.remove(self.backup_path)
json_obj = {
"Name": self.name,
"Blockchain": self.json_parser.parse_chain_to_dump(self.node.blockchain.chain)
}
with open(self.backup_path, "w") as f:
json.dump(json_obj,f,indent=4)
def read_backup(self):
# Check Text-File already Exists and isn't empty
if os.path.exists(self.backup_path) and os.path.getsize(self.backup_path) > 0:
with open(self.backup_path, "r") as f:
json_obj = json.loads(f.read())
self.node.blockchain.chain = self.json_parser.parse_dump_to_chain(json_obj["Blockchain"])
self.node.create_user_public_key_map()
return True
else:
return False
| Marcus11Dev/Blockchain_Lesson_Agent | agent.py | agent.py | py | 13,436 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "TeamCloud_Modul.json_par... |
35319837996 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: mcxiaoke
# @Date: 2015-08-18 20:14:05
from __future__ import unicode_literals, division, absolute_import, print_function
import requests
import shutil
import sys
import signal
import os
import traceback
import time
import logging
import bs4
from lxml import html
from concurrent.futures import ThreadPoolExecutor
from multiprocessing import Pool
from fake_useragent import UserAgent
from .const import USER_AGENT_WIN, DEFAULT_REQUEST_TIMEOUT
from .compat import urlparse, json, basestring
from .utils import url_to_filename
logger = logging.getLogger('commons')
############################################################
#
# Network Functions
#
############################################################
random_ua = UserAgent()
default_timeout = DEFAULT_REQUEST_TIMEOUT
def get_headers(url):
u = urlparse(url)
return {
'Referer': '{0}://{1}/'.format(u.scheme, u.netloc),
'User-Agent': '%s' % random_ua.chrome
# 'User-Agent': '%s %s' % (USER_AGENT_WIN, time.time())
}
def request(method, url, encoding=None, **kwargs):
r = requests.request(method, url, timeout=default_timeout,
headers=get_headers(url), **kwargs)
r.encoding = encoding or 'utf-8'
if r.status_code >= 400:
raise IOError("HTTP %s [%s]" % (r.status_code, r.url))
return r
def get(url, encoding=None, **kwargs):
return request('get', url, encoding=encoding, **kwargs)
def post(url, encoding=None, **kwargs):
return request('post', url, encoding=encoding, **kwargs)
def get_stream(url, encoding=None, **kwargs):
return request('get', url, encoding=encoding, stream=True, **kwargs)
def clean_html(text, **kwargs):
c = html.clean.Cleaner(page_structure=False, style=True, **kwargs)
return c.clean_html(html.fromstring(text))
def soup(url, encoding=None, clean=False):
r = get(url, encoding)
text = clean_html(r.text) if clean else r.text
return bs4.BeautifulSoup(text, 'html.parser')
def download_file(url, output=None, filename=None, **kwargs):
assert isinstance(url, basestring), 'url must be basestring'
assert not filename or isinstance(filename, basestring), 'filename must be None or basestring'
assert not output or isinstance(output, basestring), 'output must be None or basestring'
filename = filename or url_to_filename(url)
output = output or 'output'
if not os.path.exists(output):
os.makedirs(output)
filepath = os.path.join(output, filename)
logger.debug('download_file from=%s, to=%s' % (url, filepath))
if not os.path.exists(filepath):
r = get_stream(url, **kwargs)
with open(filepath, 'wb') as f:
shutil.copyfileobj(r.raw, f)
logger.info('download_file saved %s' % url)
else:
logger.info('download_file skip %s' % url)
return filepath
############################################################
#
# Thread and Process Functions
#
############################################################
class ThreadPoolExecutorStackTraced(ThreadPoolExecutor):
#https://stackoverflow.com/questions/19309514
def submit(self, fn, *args, **kwargs):
"""Submits the wrapped function instead of `fn`"""
return super(ThreadPoolExecutorStackTraced, self).submit(
self._function_wrapper, fn, *args, **kwargs)
def _function_wrapper(self, fn, *args, **kwargs):
"""Wraps `fn` in order to preserve the traceback of any kind of
raised exception
"""
try:
return fn(*args, **kwargs)
except Exception:
# Creates an exception of the same type with the traceback as message
raise sys.exc_info()[0](traceback.format_exc())
def run_in_thread(func, *args, **kwargs):
"""Run function in thread, return a Thread object"""
from threading import Thread
thread = Thread(target=func, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
return thread
def run_in_subprocess(func, *args, **kwargs):
"""Run function in subprocess, return a Process object"""
from multiprocessing import Process
thread = Process(target=func, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
return thread
def run_in_pool(func, args, pool_size=4, retry_max=0, sleep=60):
def _initializer():
signal.signal(signal.SIGINT, signal.SIG_IGN)
r = None
retry = 0
while retry <= retry_max:
pool = Pool(pool_size, _initializer)
try:
r = pool.map_async(func, args)
r.get(999999)
pool.close()
logger.info('Task execution completely.')
break
except KeyboardInterrupt as e:
logging.info('Task terminated by user.', e)
pool.terminate()
break
except Exception as e:
pool.terminate()
retry += 1
traceback.print_exc()
if retry <= retry_max:
next_delay = sleep * (retry % 6 + 1)
logger.info('Task error: {0}, {1} retry in {2}s'.format(
e, retry_max - retry, next_delay))
time.sleep(sleep * next_delay)
finally:
pool.join()
return r.get()
| mcxiaoke/python-labs | lib/commons.py | commons.py | py | 5,317 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "fake_useragent.UserAgent",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "const.DEFAULT_REQUEST_TIMEOUT",
"line_number": 32,
"usage_type": "name"
},
{
"api_name... |
2671254266 | from typing import TypeAlias, Union
from const import MAX_SLOT_NUM, DiffusionSVCInferenceType, EnumInferenceTypes, EmbedderType, VoiceChangerType
from dataclasses import dataclass, asdict, field
import os
import json
@dataclass
class ModelSlot:
slotIndex: int = -1
voiceChangerType: VoiceChangerType | None = None
name: str = ""
description: str = ""
credit: str = ""
termsOfUseUrl: str = ""
iconFile: str = ""
speakers: dict = field(default_factory=lambda: {})
@dataclass
class RVCModelSlot(ModelSlot):
voiceChangerType: VoiceChangerType = "RVC"
modelFile: str = ""
indexFile: str = ""
defaultTune: int = 0
defaultIndexRatio: int = 0
defaultProtect: float = 0.5
isONNX: bool = False
modelType: str = EnumInferenceTypes.pyTorchRVC.value
samplingRate: int = -1
f0: bool = True
embChannels: int = 256
embOutputLayer: int = 9
useFinalProj: bool = True
deprecated: bool = False
embedder: EmbedderType = "hubert_base"
sampleId: str = ""
speakers: dict = field(default_factory=lambda: {0: "target"})
@dataclass
class MMVCv13ModelSlot(ModelSlot):
voiceChangerType: VoiceChangerType = "MMVCv13"
modelFile: str = ""
configFile: str = ""
srcId: int = 107
dstId: int = 100
isONNX: bool = False
samplingRate: int = 24000
speakers: dict = field(default_factory=lambda: {107: "user", 100: "zundamon", 101: "sora", 102: "methane", 103: "tsumugi"})
@dataclass
class MMVCv15ModelSlot(ModelSlot):
voiceChangerType: VoiceChangerType = "MMVCv15"
modelFile: str = ""
configFile: str = ""
srcId: int = 0
dstId: int = 101
f0Factor: float = 1.0
isONNX: bool = False
samplingRate: int = 24000
speakers: dict = field(default_factory=lambda: {})
f0: dict = field(default_factory=lambda: {})
@dataclass
class SoVitsSvc40ModelSlot(ModelSlot):
voiceChangerType: VoiceChangerType = "so-vits-svc-40"
modelFile: str = ""
configFile: str = ""
clusterFile: str = ""
dstId: int = 0
isONNX: bool = False
sampleId: str = ""
defaultTune: int = 0
defaultClusterInferRatio: float = 0.0
noiseScale: float = 0.0
speakers: dict = field(default_factory=lambda: {1: "user"})
@dataclass
class DDSPSVCModelSlot(ModelSlot):
voiceChangerType: VoiceChangerType = "DDSP-SVC"
modelFile: str = ""
configFile: str = ""
diffModelFile: str = ""
diffConfigFile: str = ""
dstId: int = 0
isONNX: bool = False
sampleId: str = ""
defaultTune: int = 0
enhancer: bool = False
diffusion: bool = True
acc: int = 20
kstep: int = 100
speakers: dict = field(default_factory=lambda: {1: "user"})
@dataclass
class DiffusionSVCModelSlot(ModelSlot):
voiceChangerType: VoiceChangerType = "Diffusion-SVC"
modelFile: str = ""
isONNX: bool = False
modelType: DiffusionSVCInferenceType = "combo"
dstId: int = 1
sampleId: str = ""
defaultTune: int = 0
defaultKstep: int = 20
defaultSpeedup: int = 10
kStepMax: int = 100
nLayers: int = 20
nnLayers: int = 20
speakers: dict = field(default_factory=lambda: {1: "user"})
embedder: EmbedderType = "hubert_base"
samplingRate: int = 44100
embChannels: int = 768
@dataclass
class BeatriceModelSlot(ModelSlot):
voiceChangerType: VoiceChangerType = "Beatrice"
modelFile: str = ""
dstId: int = 1
speakers: dict = field(default_factory=lambda: {1: "user1", 2: "user2"})
ModelSlots: TypeAlias = Union[ModelSlot, RVCModelSlot, MMVCv13ModelSlot, MMVCv15ModelSlot, SoVitsSvc40ModelSlot, DDSPSVCModelSlot, DiffusionSVCModelSlot, BeatriceModelSlot]
def loadSlotInfo(model_dir: str, slotIndex: int) -> ModelSlots:
slotDir = os.path.join(model_dir, str(slotIndex))
jsonFile = os.path.join(slotDir, "params.json")
if not os.path.exists(jsonFile):
return ModelSlot()
jsonDict = json.load(open(os.path.join(slotDir, "params.json")))
slotInfoKey = list(ModelSlot.__annotations__.keys())
slotInfo = ModelSlot(**{k: v for k, v in jsonDict.items() if k in slotInfoKey})
if slotInfo.voiceChangerType == "RVC":
slotInfoKey.extend(list(RVCModelSlot.__annotations__.keys()))
return RVCModelSlot(**{k: v for k, v in jsonDict.items() if k in slotInfoKey})
elif slotInfo.voiceChangerType == "MMVCv13":
slotInfoKey.extend(list(MMVCv13ModelSlot.__annotations__.keys()))
return MMVCv13ModelSlot(**{k: v for k, v in jsonDict.items() if k in slotInfoKey})
elif slotInfo.voiceChangerType == "MMVCv15":
slotInfoKey.extend(list(MMVCv15ModelSlot.__annotations__.keys()))
return MMVCv15ModelSlot(**{k: v for k, v in jsonDict.items() if k in slotInfoKey})
elif slotInfo.voiceChangerType == "so-vits-svc-40":
slotInfoKey.extend(list(SoVitsSvc40ModelSlot.__annotations__.keys()))
return SoVitsSvc40ModelSlot(**{k: v for k, v in jsonDict.items() if k in slotInfoKey})
elif slotInfo.voiceChangerType == "DDSP-SVC":
slotInfoKey.extend(list(DDSPSVCModelSlot.__annotations__.keys()))
return DDSPSVCModelSlot(**{k: v for k, v in jsonDict.items() if k in slotInfoKey})
elif slotInfo.voiceChangerType == "Diffusion-SVC":
slotInfoKey.extend(list(DiffusionSVCModelSlot.__annotations__.keys()))
return DiffusionSVCModelSlot(**{k: v for k, v in jsonDict.items() if k in slotInfoKey})
elif slotInfo.voiceChangerType == "Beatrice":
slotInfoKey.extend(list(BeatriceModelSlot.__annotations__.keys()))
return BeatriceModelSlot(**{k: v for k, v in jsonDict.items() if k in slotInfoKey})
else:
return ModelSlot()
def loadAllSlotInfo(model_dir: str):
slotInfos: list[ModelSlots] = []
for slotIndex in range(MAX_SLOT_NUM):
slotInfo = loadSlotInfo(model_dir, slotIndex)
slotInfo.slotIndex = slotIndex # スロットインデックスは動的に注入
slotInfos.append(slotInfo)
return slotInfos
def saveSlotInfo(model_dir: str, slotIndex: int, slotInfo: ModelSlots):
slotDir = os.path.join(model_dir, str(slotIndex))
print("SlotInfo:::", slotInfo)
slotInfoDict = asdict(slotInfo)
slotInfo.slotIndex = -1 # スロットインデックスは動的に注入
json.dump(slotInfoDict, open(os.path.join(slotDir, "params.json"), "w"), indent=4)
| w-okada/voice-changer | server/data/ModelSlot.py | ModelSlot.py | py | 6,366 | python | en | code | 12,673 | github-code | 36 | [
{
"api_name": "const.VoiceChangerType",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "dataclasses.field",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "const.... |
72000657063 | # -*- coding: utf-8 -*-
# French language sounds configuration
from tts import filename, NO_ALTERNATE, PROMPT_SYSTEM_BASE, PROMPT_CUSTOM_BASE
systemSounds = []
sounds = []
for i in range(100):
systemSounds.append((str(i), filename(PROMPT_SYSTEM_BASE + i)))
for i in range(10):
systemSounds.append((str(100 * (i + 1)), filename(PROMPT_SYSTEM_BASE + 100 + i)))
for i, s in enumerate(["une", "onze", "vingt et une", "trente et une", "quarante et une", "cinquante et une", "soixante et une", "soixante et onze", "quatre vingt une"]):
systemSounds.append((s, filename(PROMPT_SYSTEM_BASE + 110 + i)))
for i, s in enumerate(["virgule", "et", "moins", "minuit", "midi"]):
systemSounds.append((s, filename(PROMPT_SYSTEM_BASE + 119 + i)))
for i, s in enumerate(["volts", u"ampères", u"milli ampères", u"knots", u"mètres seconde", u"pieds par seconde", u"kilomètre heure", u"miles par heure", u"mètres", "pieds", u"degrés", u"degrés fahrenheit", "pourcents", u"milli ampères / heure", "watt", "db", "tours minute", "g", u"degrés", "millilitres", "onces", "heure", "minute", "seconde"]):
systemSounds.append((s, filename(PROMPT_SYSTEM_BASE + 125 + i)))
# for i, s in enumerate(["timer", "", "tension", "tension", u"émission", u"réception", "altitude", "moteur",
# "essence", u"température", u"température", "vitesse", "distance", "altitude", u"élément lipo",
# "total lipo", "tension", "courant", "consommation", "puissance", u"accelération X", u"accelération Y", u"accelération Z",
# "orientation", "vario"]):
# systemSounds.append((s, filename(PROMPT_SYSTEM_BASE+146+i)))
for i, s in enumerate(["virgule 0", "virgule 1", "virgule 2", "virgule 3", "virgule 4", "virgule 5", "virgule 6", "virgule 7", "virgule 8", "virgule 9"]):
systemSounds.append((s, filename(PROMPT_SYSTEM_BASE + 180 + i)))
for s, f, a in [(u"Trim centré", "midtrim", 495),
(u"Trim maximum atteint", "endtrim", NO_ALTERNATE),
(u"Batterie radio faible !", "lowbatt", 485),
(u"Radio inactive !", "inactiv", 486),
(u"Alerte manche des gaz", "thralert", 481),
(u"Alerte inters", "swalert", 482),
(u"éprome corrompue", "eebad", 483),
(u"Bienvenue sur Open TI X!", "tada", 480),
(u"vingt secondes", "timer20", 500),
(u"trente secondes", "timer30", 501),
(u"A1,faible", "a1_org", NO_ALTERNATE),
(u"A1,critique", "a1_red", NO_ALTERNATE),
(u"A2,faible", "a2_org", NO_ALTERNATE),
(u"A2,critique", "a2_red", NO_ALTERNATE),
(u"A3,faible", "a3_org", NO_ALTERNATE),
(u"A3,critique", "a3_red", NO_ALTERNATE),
(u"A4,faible", "a4_org", NO_ALTERNATE),
(u"A4,critique", "a4_red", NO_ALTERNATE),
(u"Signal RF, faible", "rssi_org", NO_ALTERNATE),
(u"Signal RF, critique", "rssi_red", NO_ALTERNATE),
(u"Antenne défectueuse", "swr_red", NO_ALTERNATE),
(u"Plus de télémétrie", "telemko", NO_ALTERNATE),
(u"Télémétrie retrouvée", "telemok", NO_ALTERNATE),
(u"Signal écolage perdu", "trainko", NO_ALTERNATE),
(u"Signal écolage retrouvé", "trainok", NO_ALTERNATE),
]:
systemSounds.append((s, filename(f, a)))
for i, (s, f) in enumerate([(u"altitude", "altitude"),
(u"température moteur", "moteur"),
(u"température contrôleur", "cntrleur"),
(u"train rentré", "gearup"),
(u"train sorti", "geardn"),
(u"volets rentrés", "flapup"),
(u"volets sortis", "flapdn"),
(u"atterrissage", "attero"),
(u"écolage", "trnon"),
(u"fin écolage", "trnoff"),
(u"moteur coupé", "engoff"),
]):
sounds.append((s, filename(f, PROMPT_CUSTOM_BASE + i)))
| Ingwie/NextStepRc-2.18 | radio/util/tts_fr.py | tts_fr.py | py | 4,215 | python | en | code | 14 | github-code | 36 | [
{
"api_name": "tts.filename",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "tts.PROMPT_SYSTEM_BASE",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "tts.filename",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "tts.PROMPT_SYSTEM_BA... |
10829429705 | #!/usr/bin/env python
import os, sys, pkg_resources
import json
from collections import namedtuple
from functools import partial
import html5lib
from ..vendor.pluginbase.pluginbase import PluginBase
Key = namedtuple("Key", ["name","version"])
__all__ = ['plugins_get_mgr', 'plugins_load',
'plugins_show', 'plugins_close']
class PluginManager(object):
"""
Manage the various plugins in the project
"""
def __init__(self, paths=[]):
self.order = ['backend', 'repomanager',
'metadata',
'validator', 'transformer',
'instrumentation',
'representation']
self.plugins = {
'backend': {},
'instrumentation': {},
'repomanager': {},
'metadata': {},
'validator': {},
'transformer': {},
'representation': {}
}
self.sources = {}
thisdir = os.path.abspath(os.path.dirname(__file__))
def get_path(p):
return os.path.abspath(os.path.join(thisdir,
"../contrib",
p))
allplugins = [
{
'package': 'backend',
'base': get_path('backends'),
},
{
'package': 'instrumentation',
'base': get_path('instrumentations'),
},
{
'package': 'repomanager',
'base': get_path('repomanagers'),
},
{
'package': 'metadata',
'base': get_path('metadata'),
},
{
'package': 'validator',
'base': get_path('validators'),
},
{
'package': 'transformer',
'base': get_path('transformers'),
},
{
'package': 'representation',
'base': get_path('representations'),
},
]
for p in allplugins:
plugin_base = PluginBase(package=p['package'],
searchpath=[p['base']])
source = plugin_base.make_plugin_source(
searchpath=[],
identifier="Plugin Manager")
for plugin_name in source.list_plugins():
# print("Loading plugin", p['base'], plugin_name)
plugin = source.load_plugin(plugin_name)
plugin.setup(self)
self.sources[p['package']] = source
self.discover_all_plugins()
def discover_all_plugins(self):
"""
Load all plugins from dgit extension
"""
for v in pkg_resources.iter_entry_points('dgit.plugins'):
m = v.load()
m.setup(self)
def register(self, what, obj):
"""
Registering a plugin
Params
------
what: Nature of the plugin (backend, instrumentation, repo)
obj: Instance of the plugin
"""
# print("Registering pattern", name, pattern)
name = obj.name
version = obj.version
enable = obj.enable
if enable == 'n':
return
key = Key(name, version)
self.plugins[what][key] = obj
def search(self, what, name=None, version=None):
"""
Search for a plugin
"""
filtered = {}
# The search may for a scan (what is None) or
if what is None:
whats = list(self.plugins.keys())
elif what is not None:
if what not in self.plugins:
raise Exception("Unknown class of plugins")
whats = [what]
for what in whats:
if what not in filtered:
filtered[what] = []
for key in self.plugins[what].keys():
(k_name, k_version) = key
if name is not None and k_name != name:
continue
if version is not None and k_version != version:
continue
if self.plugins[what][key].enable == 'n':
continue
filtered[what].append(key)
# print(filtered)
return filtered
def gather_configs(self):
"""
Gather configuration requirements of all plugins
"""
configs = []
for what in self.order:
for key in self.plugins[what]:
mgr = self.plugins[what][key]
c = mgr.config(what='get')
if c is not None:
c.update({
'description': mgr.description
})
# print("Gathering configuration from ", c)
configs.append(c)
return configs
def update_configs(self, config):
"""
Gather configuration requirements of all plugins
"""
for what in self.plugins: # backend, repo etc.
for key in self.plugins[what]: # s3, filesystem etc.
# print("Updating configuration of", what, key)
self.plugins[what][key].config(what='set', params=config)
return
def show(self, what, name, version, details):
filtered = self.search(what, name, version)
if len(filtered) > 0:
for what in self.order:
print("========")
print(what)
print("========")
if len(filtered[what]) == 0:
print("None\n")
continue
for k in filtered[what]:
obj = self.plugins[what][k]
print("%s (%s) :" % k,
obj.description)
if details:
print(" Supp:", obj.support)
print("")
else:
print("No backends found")
def get_by_key(self, what, key):
return self.plugins[what][key]
def get_by_repo(self, username, dataset):
keys = list(self.plugins['repomanager'].keys())
for k in keys:
try:
repomanager = self.plugins['repomanager'][k]
repokey = repomanager.find(username, dataset)
break
except:
repomanager = None
repokey = None
return (repomanager, repokey)
def get(self, what, name):
filtered = self.search(what, name)
filtered = filtered[what]
if len(filtered) > 0:
return self.plugins[what][filtered[0]]
else:
return None
def shutdown(self):
for what in self.sources:
self.sources[what].cleanup()
pluginmgr = None
def plugins_load():
"""
Load plugins from various sources:
- dgit/plugins
- dgit_extensions package
"""
global pluginmgr
# Auto clone if they have not been already shutdown
if pluginmgr is not None:
plugins_close()
pluginmgr = PluginManager([])
# pluginmgr.show()
def plugins_close():
global pluginmgr
pluginmgr.shutdown()
pluginmgr = None
def plugins_show(what=None, name=None, version=None, details=False):
"""
Show details of available plugins
Parameters
----------
what: Class of plugins e.g., backend
name: Name of the plugin e.g., s3
version: Version of the plugin
details: Show details be shown?
"""
global pluginmgr
return pluginmgr.show(what, name, version, details)
def plugins_get_mgr():
"""
Get the global plugin manager
"""
global pluginmgr
return pluginmgr
def plugins_get_config():
global pluginmgr
return pluginmgr.config()
if __name__ == '__main__':
plugins_load()
plugins_show()
plugins_close()
| pingali/dgit | dgitcore/plugins/common.py | common.py | py | 7,921 | python | en | code | 15 | github-code | 36 | [
{
"api_name": "collections.namedtuple",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",... |
34228583172 | # -*- coding: utf-8 -*-
""" Application Factory
This is the entry point to the entire application.
"""
import os
import json
from flask import Flask, render_template, jsonify, flash, redirect, url_for
def create_app(test_config=None):
"""Create an instance of Wallowa Wildlife Checklists"""
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
# This secret will be overriden with the instance config.
SECRET_KEY='dev',
# Store the database in the instance folder.
DATABASE=os.path.join(app.instance_path, 'wallowawildlife.sqlite'),
# Read in the client_id for google login.
CLIENT_ID=json.loads(
open('wallowawildlife/client_secrets.json', 'r')
.read())['web']['client_id']
)
if test_config is None:
# Load the instance config.
app.config.from_pyfile('config.py', silent=True)
else:
# Otherwise, load the test config.
app.config.update(test_config)
# Make the instance folder if it doesn't exist.
try:
os.makedirs(app.instance_path)
except OSError:
pass
# Make the database available.
from wallowawildlife.db import get_db
@app.route('/')
def index():
"""Handle the index route"""
db = get_db()
types = db.execute('SELECT * FROM creature_type').fetchall()
return render_template('front_page.html', types=types)
@app.route('/wildlife/<int:creature_id>/JSON')
def wildlifeCreatureJSON(creature_id):
"""Create JSON endpoint"""
db = get_db()
c = db.execute('SELECT * FROM creature WHERE id = ?',
(creature_id,)).fetchone()
if c:
json_creature = {'id': c['id'],
'name_common': c['name_common'],
'name_latin': c['name_latin'],
'photo_url': c['photo_url'],
'photo_attr': c['photo_attr'],
'wiki_url': c['wiki_url'],
'type': c['type_id']}
return jsonify(json_creature)
else:
return redirect(url_for('index'))
@app.route('/wildlife/<url_text>/JSON')
def wildlifeTypeJSON(url_text):
"""Create JSON endpoint"""
db = get_db()
creatures = db.execute('SELECT * FROM creature \
WHERE type_id = ?',
(url_text,)).fetchall()
if creatures:
json_creatures = [{'id': c['id'],
'name_common': c['name_common'],
'name_latin': c['name_latin'],
'photo_url': c['photo_url'],
'photo_attr': c['photo_attr'],
'wiki_url': c['wiki_url'],
'type': c['type_id']} for c in creatures]
return jsonify(json_creatures)
else:
return redirect(url_for('index'))
@app.route('/wildlife/JSON')
def wildlifeJSON():
"""Create JSON endpoint"""
db = get_db()
creatures = db.execute('SELECT * FROM creature').fetchall()
json_creatures = [{'id': c['id'],
'name_common': c['name_common'],
'name_latin': c['name_latin'],
'photo_url': c['photo_url'],
'photo_attr': c['photo_attr'],
'wiki_url': c['wiki_url'],
'type': c['type_id']} for c in creatures]
return jsonify(json_creatures)
@app.errorhandler(404)
def page_not_found(e):
"""Redirect from all unhandled URLs to the index route"""
return redirect(url_for('index'))
# Register cli db commands.
from . import db
db.init_app(app)
# Apply blueprints.
from . import auth
app.register_blueprint(auth.bp)
from . import lists
app.register_blueprint(lists.bp)
app.add_url_rule('/', endpoint='index')
return app
| wicker/Wallowa-Wildlife-Checklist-App | wallowawildlife/__init__.py | __init__.py | py | 4,136 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number":... |
37360525865 | import argparse
import glob
import json
import logging
import os
import platform
import re
import traceback
from pathlib import Path
import fitz
if platform.system() == "Windows":
logdir = Path(os.environ['USERPROFILE']) / ".pdf_guru"
else:
logdir = Path(os.environ['HOME']) / ".pdf_guru"
logdir.mkdir(parents=True, exist_ok=True)
logpath = str(logdir / "pdf.log")
cmd_output_path = str(logdir / "cmd_output.json")
def dump_json(path, obj):
with open(path, "w", encoding="utf-8") as f:
json.dump(obj, f, ensure_ascii=False)
def parse_range(page_range: str, page_count: int, is_multi_range: bool = False, is_reverse: bool = False, is_unique: bool = True):
# e.g.: "1-3,5-6,7-10", "1,4-5", "3-N", "even", "odd"
page_range = page_range.strip()
if page_range in ["all", ""]:
roi_indices = list(range(page_count))
return roi_indices
if page_range == "even":
roi_indices = list(range(0, page_count, 2))
return roi_indices
if page_range == "odd":
roi_indices = list(range(1, page_count, 2))
return roi_indices
roi_indices = []
parts = page_range.split(",")
neg_count = sum([p.startswith("!") for p in parts])
pos_count = len(parts) - neg_count
if neg_count > 0 and pos_count > 0:
raise ValueError("页码格式错误:不能同时使用正向选择和反向选择语法")
if pos_count > 0:
for part in parts:
part = part.strip()
if re.match("^!?(\d+|N)(\-(\d+|N))?$", part) is None:
raise ValueError("页码格式错误!")
out = part.split("-")
if len(out) == 1:
if out[0] == "N":
roi_indices.append([page_count-1])
else:
roi_indices.append([int(out[0])-1])
elif len(out) == 2:
if out[1] == "N":
roi_indices.append(list(range(int(out[0])-1, page_count)))
else:
roi_indices.append(list(range(int(out[0])-1, int(out[1]))))
if is_multi_range:
return roi_indices
roi_indices = [i for v in roi_indices for i in v]
if is_unique:
roi_indices = list(set(roi_indices))
roi_indices.sort()
if neg_count > 0:
for part in parts:
part = part.strip()
if re.match("^!?(\d+|N)(\-(\d+|N))?$", part) is None:
raise ValueError("页码格式错误!")
out = part[1:].split("-")
if len(out) == 1:
roi_indices.append([int(out[0])-1])
elif len(out) == 2:
if out[1] == "N":
roi_indices.append(list(range(int(out[0])-1, page_count)))
else:
roi_indices.append(list(range(int(out[0])-1, int(out[1]))))
if is_multi_range:
return roi_indices
roi_indices = [i for v in roi_indices for i in v]
if is_unique:
roi_indices = list(set(range(page_count)) - set(roi_indices))
roi_indices.sort()
if is_reverse:
roi_indices = list(set(range(page_count)) - set(roi_indices))
roi_indices.sort()
return roi_indices
def batch_process(func):
def wrapper(*args, **kwargs):
print(f"args: {args}")
print(f"kwargs: {kwargs}")
doc_path = kwargs['doc_path']
if "*" in doc_path:
path_list = glob.glob(doc_path)
print.debug(f"path_list length: {len(path_list) if path_list else 0}")
if path_list:
del kwargs['doc_path']
for path in path_list:
func(*args, doc_path=path, **kwargs)
else:
func(*args, **kwargs)
func(*args, **kwargs)
return wrapper
@batch_process
def convert_docx2pdf(doc_path: str, output_path: str = None):
try:
from docx2pdf import convert
if output_path is None:
p = Path(doc_path)
output_path = str(p.parent / f"{p.stem}.pdf")
convert(doc_path, output_path)
dump_json(cmd_output_path, {"status": "success", "message": ""})
except:
logging.error(traceback.format_exc())
dump_json(cmd_output_path, {"status": "error", "message": traceback.format_exc()})
@batch_process
def convert_pdf2docx(doc_path: str, page_range: str = "all", output_path: str = None):
try:
from pdf2docx import Converter
doc = fitz.open(doc_path)
roi_indices = parse_range(page_range, doc.page_count)
cv = Converter(doc_path)
if output_path is None:
p = Path(doc_path)
output_path = str(p.parent / f"{p.stem}.docx")
cv.convert(output_path, pages=roi_indices)
cv.close()
dump_json(cmd_output_path, {"status": "success", "message": ""})
except:
logging.error(traceback.format_exc())
dump_json(cmd_output_path, {"status": "error", "message": traceback.format_exc()})
def main():
parser = argparse.ArgumentParser(description="Convert functions")
parser.add_argument("input_path", type=str, help="pdf文件路径")
parser.add_argument("--source-type", type=str, choices=["pdf", 'png', "jpg", "svg", "docx"], default="pdf", help="源类型")
parser.add_argument("--target-type", type=str, choices=['png', "svg", "docx"], default="png", help="目标类型")
parser.add_argument("--page_range", type=str, default="all", help="页码范围")
parser.add_argument("-o", "--output", type=str, help="输出文件路径")
args = parser.parse_args()
if args.source_type == "pdf":
if args.target_type == "docx":
convert_pdf2docx(doc_path=args.input_path, page_range=args.page_range, output_path=args.output)
if __name__ == '__main__':
main() | kevin2li/PDF-Guru | thirdparty/convert_external.py | convert_external.py | py | 5,838 | python | en | code | 941 | github-code | 36 | [
{
"api_name": "platform.system",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line... |
28299895707 | from train import get_model
from torchvision import transforms
from PIL import Image
import matplotlib.pyplot as plt
import torch
import os
from torchvision.models import resnet18, ResNet18_Weights
import torch.nn as nn
import numpy as np
class Make_Test(nn.Module):
def __init__(self, weight_path):
super(Make_Test, self).__init__()
self.transform = transforms.Compose([
transforms.Resize(size=(224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
self.model = get_model()
self.model.load_state_dict(torch.load(weight_path))
self.d = {
0: 'cat',
1: 'dog'
}
def forward(self, image_path):
image = plt.imread(image_path)
image = Image.fromarray(image)
image = self.transform(image).unsqueeze(0)
self.model.eval()
out = 0 if self.model(image).squeeze(-1).item() < 0.5 else 1
return self.d[out]
def visuzalize_loss():
train_loss = np.load('Accuracy/test_losses.npy')
train_acc = np.load('test1/test_accs.npy')
plt.plot(train_loss)
plt.plot(train_acc)
plt.show()
def visualize(mk_test, data_path):
fig = plt.figure(figsize=(9, 9))
rows, cols = 4, 4
for i in range(1, rows * cols + 1):
img = plt.imread(os.path.join("test1", data_path[i - 1]))
label = mk_test(os.path.join("test1", data_path[i - 1]))
fig.add_subplot(rows, cols, i)
plt.imshow(img)
plt.title(label)
plt.axis(False)
plt.show()
if __name__ == '__main__':
folder = "test1"
cnt = 0
mk_test = Make_Test(r"E:\Python test Work\Hieu\Weight\model.pt")
data_path = os.listdir(folder)[16:32]
visualize(mk_test, data_path)
| kienptitit/Dog_Cat_Classification | image_test.py | image_test.py | py | 1,821 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torchvis... |
1763129188 | """
Implements a mixin for remote communication.
"""
import re
import json
import socket
whitespace_re = re.compile(r"\s+")
class RemoteActor:
ENCODING = "utf-8"
DECODER = json.JSONDecoder()
def __init__(self, socket):
""" Creates a new remote actor able to send and receive from the given socket
:param socket: socket connection
"""
self.socket = socket
self.buffer = ""
def parse(self, clear_on_partial=False):
print(self.buffer)
try:
decoded, end = self.DECODER.raw_decode(self.buffer)
except ValueError:
print(self.buffer, "va")
return False, False, False
if not isinstance(decoded, bool) and (isinstance(decoded, int) or isinstance(decoded, float)) and end == len(self.buffer):
if clear_on_partial:
self.buffer = self.buffer[end:]
return True, False, decoded
else:
print(decoded, "blah")
self.buffer = self.buffer[end:]
return True, True, decoded
def send(self, data):
""" Sends the given JSON object to the socket
:param data: JSON object
"""
encoded = json.dumps(data).encode(self.ENCODING)
self.socket.send(encoded)
def receive(self):
""" Continuously receives bytes until a JSON object can be deserialized, at which point
the deserialized object is returned. It is up to the caller to restrict the execution time.
:return: deserialized JSON object
"""
data = bytes()
while True:
try:
data += self.socket.recv(1)
except socket.timeout:
something, complete, decoded = self.parse(True)
print(something, complete, decoded)
if something:
return decoded
raise
try:
decoded = data.decode(self.ENCODING)
self.buffer += decoded
data = bytes()
except UnicodeDecodeError:
continue
something, complete, decoded = self.parse()
if something and complete:
return decoded
def receive_iterator(self):
""" Continuously receives data and deserializes JSON objects as they come in """
while True:
yield self.receive()
| lukasberger/evolution-game | evolution/common/remote_actor_2.py | remote_actor_2.py | py | 2,413 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.compile",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "json.JSONDecoder",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "socket.timeout",
"line_numb... |
26255667961 | """
Here I will read access tokens from txt file for safety
"""
import json
class Token:
def __init__(self):
with open('tokens.json', 'r') as f:
data = json.loads(f.readline())
self.community = data['comm_token']
self.user = data['usr_token']
self.comm_id = -167621445
self.usr_id = 491551942
if __name__=='__main__':
t = Token() | maxikfu/community | auth.py | auth.py | py | 393 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.loads",
"line_number": 10,
"usage_type": "call"
}
] |
25418445648 | #!/usr/bin/env python
from utils.analysis import AbsMovingAvg, Threshold, Derivative
from utils.chaser import Chaser
import os
import sys
parentDir = os.path.dirname(os.getcwd())
sys.path.append(parentDir)
def checkImport(lib):
if not os.path.exists(os.path.join(parentDir, lib)):
print("%s library not found." % lib)
print("please clone github.com/andrewbooker/%s.git into %s" % (lib, parentDir))
exit()
checkImport("mediautils")
from mediautils.mididevices import UsbMidiDevices, MidiOut
## ===== composer =====
checkImport("compositionutils")
from compositionutils.scale import Scale, Modes
tonic = "C"
mode = "aeolian"
print(tonic, mode)
noteSpan = 15
scale = Scale(noteSpan, tonic, Modes.named(mode))
class Consumer():
def __init__(self, midiOut):
self.midiOut = midiOut
self.note = 0
def on(self, velocity):
self.note = scale.noteFrom(int(velocity * 100) % noteSpan)
self.midiOut.note_on(self.note, int(26 + (velocity * 100)), 0)
def off(self):
self.midiOut.note_off(self.note, 0, 0)
## =====
midiDevices = UsbMidiDevices()
midiOut = MidiOut(midiDevices)
consumer = Consumer(midiOut.io)
chaser = Chaser(consumer, 0.6, 0.2)
import soundfile as sf
infile = sys.argv[1]
workingDir = os.path.dirname(infile)
print("loading %s" % infile)
(data, sampleRate) = sf.read(infile, dtype="float32")
for s in range(len(data)):
chaser.add(data[s])
del midiOut
del midiDevices
print("done")
| andrewbooker/audiotomidi | scanWavFile.py | scanWavFile.py | py | 1,495 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"line_numbe... |
72170355625 | import pandas as pd
import pickle
from pathlib import Path
def preprocess_test_df(test_clin_df, test_prot_df, test_pep_df, save_data=False):
if 'upd23b_clinical_state_on_medication' in test_clin_df.columns:
# drop the medication column
test_clin_df = test_clin_df.drop(columns=['upd23b_clinical_state_on_medication'])
# create a column with the UniProt and Peptide name combined
test_pep_df['peptide_uniprot'] = test_pep_df['Peptide'] + '_'+ test_pep_df['UniProt']
# create a table with the visit_id as the index and the proteins or peptides as the feature and the abundance as the values
train_prot_pivot = test_prot_df.pivot(index='visit_id', values='NPX', columns='UniProt')
train_pep_pivot = test_pep_df.pivot(index='visit_id', values='PeptideAbundance', columns='peptide_uniprot')
# combine the two tables on the visit_id
full_prot_train_df = train_prot_pivot.join(train_pep_pivot)
# fill nan with 0
full_prot_train_df = full_prot_train_df.fillna(0)
full_train_df = test_clin_df.merge(full_prot_train_df, how='inner', left_on='visit_id', right_on='visit_id')
full_train_df = full_train_df.sample(frac=1).reset_index(drop=True)
return full_train_df
test_df = pd.read_csv('~/parkinsons_proj_1/parkinsons_project/parkinsons_1/data/raw/test.csv')
prot_test_df = pd.read_csv('~/parkinsons_proj_1/parkinsons_project/parkinsons_1/data/raw/test_proteins.csv')
pep_test_df = pd.read_csv('~/parkinsons_proj_1/parkinsons_project/parkinsons_1/data/raw/test_peptides.csv')
full_test_df = preprocess_test_df(test_df, prot_test_df, pep_test_df, save_data=False)
updr = 'updrs_1'
month = 0
for updr in ['updrs_1', 'updrs_2', 'updrs_3', 'updrs_4']:
updr_df = full_test_df[full_test_df['updrs_test'] == updr]
info_cols = ['visit_id', 'visit_month', 'patient_id', 'updrs_test', 'row_id', 'group_key']
updr_info = updr_df[info_cols]
model_df = updr_df.drop(columns=info_cols)
for month in [0, 6, 12, 24]:
# Load the saved model from file
model_path = Path('~/parkinsons_proj_1/parkinsons_project/parkinsons_1/models/model_rf_reg_updrs_1_0.pkl')
with open(model_path, 'rb') as f:
rf_reg = pickle.load(f)
# Use the imported model to make predictions
y_pred = rfc.predict(model_df)
target = 'updrs_4'
train_df = pd.read_csv(f'~/parkinsons_proj_1/parkinsons_project/parkinsons_1/data/processed/train_{target}.csv')
train_df.head() | dagartga/Boosted-Models-for-Parkinsons-Prediction | src/data/pred_pipeline.py | pred_pipeline.py | py | 2,509 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"l... |
40306775358 | import numpy as np
import pandas as pd
from collections import OrderedDict
import matplotlib as mlt
import matplotlib.pyplot as plt
from scipy import optimize
def get_data():
data = OrderedDict(
amount_spent = [50, 10, 20, 5, 65, 70, 80, 81, 1],
send_discount = [0, 1, 1, 1, 0, 0, 0, 0, 1]
)
df = pd.DataFrame.from_dict(data) # creating a dataframe
X = df['amount_spent'].astype('float').values # converting the type to 'float'
y = df['send_discount'].astype('float').values # converting the type to 'float'
return (X,y) # returning the X , y
def get_theta(costFunction , X , y , iter = 400):
options = {'maxiter':iter} # maximum number of iterations
row , col = X.shape
initial_theta = np.zeros(col)
res = optimize.minimize(
costFunction,
initial_theta ,
(X,y),
jac=True,
method='TNC',
options = options
)
# the fun property of `OptimizeResult` object returns
# the value of costFunction at optimized theta
cost = res.fun
# the optimized theta is in the x property
theta = res.x
return ( cost , theta )
def sigmoid(z):
# convert input to a numpy array
z = np.array(z)
g = np.zeros(z.shape)
g = 1 / (1 + np.exp(-z))
return g
def costFunction(theta, X, y):
m = y.size # number of training examples
J = 0
grad = np.zeros(theta.shape) #
h = sigmoid(X.dot(theta.T)) # sigmoid function
J = (1 / m) * np.sum(-y.dot(np.log(h)) - (1 - y).dot(np.log(1 - h)))
grad = (1 / m) * (h - y).dot(X)
return J, grad
def load_data(url):
df=pd.read_csv(url,header=None);
return ( df.iloc[:,:-1] , df.iloc[:,-1])
def run():
X , y = load_data('./marks.txt')
ones = X[y==1] # features X where y == 1
zeros = X[y==0] # features X where y == 0
#X,y = get_data()
row , col = X.shape
# Add intercept term to X
X = np.concatenate([np.ones((row, 1)), X], axis=1)
(cost,theta)=get_theta(costFunction , X , y )
print('cost => {} , theta => {}'.format(cost,theta) )
#print(' x ',X[:,1:3]) # prints col 0 , 1
# calculate min of X - 2 , max of X + 2
x_treme = np.array([ np.min(X[:,1]) - 2 , np.max(X[:,1]) + 2 ])
# calculate y extreme
#y_treme = (-1. / theta[2]) * ( theta[1] * x_treme + theta[0] )
y_treme = - (( np.dot(theta[1] ,x_treme) ) + theta[0] ) / theta[2]
plt.plot(x_treme , y_treme)
plt.scatter(ones[0],ones[1] , label="1's ")
plt.scatter(zeros[0],zeros[1], label="0's ")
plt.legend(loc="upper right")
plt.show()
if __name__ == "__main__":
run()
| guruprasaad123/ml_for_life | from_scratch/logistic_regression/Newtons method/optimize.py | optimize.py | py | 2,692 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "collections.OrderedDict",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame.from_dict",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name"... |
86340516810 | from openpyxl import load_workbook
# from openpyxl.cell import Cell
if __name__ == '__main__':
wb = load_workbook('data/FINODAYS_Доп. материал для Почта Банк_Диалоги.xlsx')
for sn in wb.sheetnames:
print(sn)
marks = []
for row in wb[sn]:
if row[1].value == 'CLIENT':
print(row[2].value)
elif row[2].value.startswith('Оценка'):
marks.append(row[2].value)
print('Оценки:', *marks, end='\n' + '-' * 50 + '\n')
| eivankin/finodays-2nd-stage | get_user_messages.py | get_user_messages.py | py | 546 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "openpyxl.load_workbook",
"line_number": 5,
"usage_type": "call"
}
] |
432875678 | import numpy as np
from torch.utils import data
import torch as t
import matplotlib.pyplot as plt
import h5py
from .utils.utils import mat2gray_nocrop, plot_img_with_labels
import os
import random
import ipdb
from .visualize_predictions import draw_label_img
from scipy.ndimage import gaussian_filter
import monai
def torch_randint(max_v):
return t.randint(max_v, (1, 1)).view(-1).numpy()[0]
def torch_rand(size=1):
return t.rand(size).numpy()
def augmentation(img, mask):
img = img.numpy()
mask = mask.numpy()
r = [torch_randint(2), torch_randint(2), torch_randint(4)]
if r[0]:
img = np.fliplr(img)
mask = np.fliplr(mask)
if r[1]:
img = np.flipud(img)
mask = np.flipud(mask)
img = np.rot90(img, k=r[2])
mask = np.rot90(mask, k=r[2])
# min_v = (torch_rand() * 0.96) - 0.48
# max_v = 1 + (torch_rand() * 0.96) - 0.48
# for k in range(img.shape[-1]):
# img[:, :, k] = mat2gray_nocrop(img[:, :, k], [min_v, max_v]) - 0.5
"""
r = [torch_randint(2), torch_randint(2), torch_randint(4)]
if r[0]:
img = np.fliplr(img)
mask = np.fliplr(mask)
if r[1]:
img = np.flipud(img)
mask = np.flipud(mask)
img = np.rot90(img, k=r[2])
mask = np.rot90(mask, k=r[2])
img = img.numpy()
min_v = (torch_rand() * 0.96) - 0.48
max_v = 1 + (torch_rand() * 0.96) - 0.48
for k in range(img.shape[2]):
img[:, :, k] = mat2gray_nocrop(img[:, :, k], [min_v, max_v]) - 0.5
"""
"""
tmp_img = t.cat([img, label_img], dim=-1)
transforms = monai.transforms.Compose(
monai.transforms.RandAxisFlipd(keys=["image"], prob=0.5),
monai.transforms.RandRotate90d(keys=["image"], prob=0.5),
monai.transforms.RandGridDistortiond(
keys=["image"], prob=0.5, distort_limit=0.2
),
monai.transforms.OneOf(
[
monai.transforms.RandShiftIntensityd(
keys=["image"], prob=0.5, offsets=(0.1, 0.2)
),
# monai.transforms.RandAdjustContrastd(
# keys=["image"], prob=0.5, gamma=(1.5, 2.5)
# ),
# monai.transforms.RandHistogramShiftd(keys=["image"], prob=0.5),
]
),
)
uba = transforms(dict(image=tmp_img))
img = uba["image"][:, :, :3]
# print(f"{img.shape=}")
label_img = uba["image"][:, :, -2:]
# print(f"{label_img.shape=}")
"""
"""
if t.rand(1)[0] > 0.5:
img = t.flipud(img)
label_img = t.flipud(label_img)
if t.rand(1)[0] > 0.5:
img = t.fliplr(img)
label_img = t.fliplr(label_img)
if t.rand(1)[0] > 0.5:
'''
img = img + t.randn(img.shape) * 0.05
img = t.clamp(img, 0, 1)
'''
if t.rand(1)[0] > 0.5:
times = t.randint(4, (1,))[0]
img = t.rot90(img, k=times)
label_img = t.rot90(label_img, k=times)
"""
return t.from_numpy(img.copy()), t.from_numpy(mask.copy())
class FociDataset(data.Dataset):
def __init__(
self,
*,
hdf5_filename: str,
filenames: tuple[str, ...],
split: str,
crop_size: tuple[int, int],
out_len: int,
):
self.hdf5_filename = hdf5_filename
self.split = split
self.crop_size = crop_size
self.filenames = filenames
self.out_len = out_len
self.h5data = None
def __len__(self):
return len(self.filenames)
def __getitem__(self, idx):
if self.h5data is None:
self.h5data = h5py.File(self.hdf5_filename, "r")
filename = self.filenames[idx]
img = t.from_numpy(self.h5data[filename + "_image"][...]).permute(
1, 2, 0
)
label = t.from_numpy(self.h5data[filename + "_label"][...]).permute(
1, 2, 0
)
in_size = img.shape
out_size = self.crop_size
if not self.split == "test":
r = [
t.randint(in_size[i] - out_size[i], (1,))[0] for i in range(2)
]
img = img[
r[0] : r[0] + out_size[0], r[1] : r[1] + out_size[1], :,
]
label = label[
r[0] : r[0] + out_size[0],
r[1] : r[1] + out_size[1],
:, # TODO: check this. Why do I need to swap the order of the two indices???
]
if self.split == "train":
img, label = augmentation(img, label)
img = img.permute(2, 0, 1).float()
label = label.permute(2, 0, 1).float()
return img, label
| SalamanderXing/dna_foci_detection | dna_foci_detection/data_loaders/foci/dataset.py | dataset.py | py | 4,646 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.randint",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.rand",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.fliplr",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.fliplr",
"line_number... |
20871041067 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import logic
UI_BOARD_SIZE = (8, 6)
UI_BOARD_OFFSET = 0.25
UI_BOARD_CELL_COLORS = [(0,0,0,0.4), (0,0.9,1,0.7)]
'''
UI front-end implementation
'''
class Board:
def __init__(self):
# Create figure and axes
self.fig, self.ax = plt.subplots(figsize=UI_BOARD_SIZE)
# Board background color
self.ax.set_facecolor((0,0,0,0.15))
# Hiding axes
self.ax.get_xaxis().set_visible(False)
self.ax.get_yaxis().set_visible(False)
# Scale axes
self.ax.set_xlim([0,1])
self.ax.set_ylim([0,1])
self.ax.set_aspect('equal', adjustable='box')
# Board cells
self.patches = []
# Last board state - needed for differential update of board
self.last_state = None
# Connect to UI for mouse click event
connection_id = self.fig.canvas.mpl_connect('button_press_event', self.on_click_cell)
self.on_click_event_handler = None
# Generation label
self.lbl_generation = None
# Next generation button
axnext = plt.axes([0.45, 0.9, 0.2, 0.075])
self.bnext = plt.Button(axnext, 'Next', color=(0, 1, 0.7, 0.7), hovercolor=(0, 1, 0.7, 1))
self.bnext.label.set_fontsize(16)
self.bnext.on_clicked(self.on_click_btn_next)
# Reset button
axreset = plt.axes([0.25, 0.9, 0.1, 0.075])
self.breset = plt.Button(axreset, 'Reset', color=(1, 0.2, 0, 0.7), hovercolor=(1, 0.2, 0, 1))
self.breset.label.set_fontsize(16)
self.breset.on_clicked(self.on_click_btn_reset)
def on_click_btn_next(self, event):
if self.on_click_event_handler is None:
raise ValueError
self.on_click_event_handler(logic.EVENT_NEXT_CLICK)
def on_click_btn_reset(self, event):
if self.on_click_event_handler is None:
raise ValueError
self.on_click_event_handler(logic.EVENT_RESET_CLICK)
def on_click_cell(self, event):
if not event.inaxes == self.ax:
return
# Left mouse button click to change cell state
if event.button == 1:
x = int(np.floor((event.xdata - self.cell_margin[0]) / self.cell_width))
y = int(np.floor((event.ydata - self.cell_margin[1]) / self.cell_height))
if self.on_click_event_handler is None:
raise ValueError
self.on_click_event_handler(logic.EVENT_CELL_CLICK, data=(x, y))
def set_click_event_handler(self, handler):
self.on_click_event_handler = handler
def redraw_board(self, state):
# Update cell size and margin
self.cell_width = 1. / state.shape[0]
self.cell_height = 1. / state.shape[1]
self.cell_margin = (self.cell_width * 0.05, self.cell_height * 0.05)
# Remove all previously drawn patches
[p.remove() for p in reversed(self.ax.patches)]
# Add new patches
for x in range(state.shape[0]):
for y in range(state.shape[1]):
rect = patches.Rectangle((x*self.cell_height + self.cell_margin[1], y*self.cell_width + self.cell_margin[0]),
self.cell_width*0.9, self.cell_height*0.9,
fill=True, facecolor=UI_BOARD_CELL_COLORS[state[x, y]])
self.ax.add_patch(rect)
self.patches = self.ax.patches
# Update last state
self.last_state = state.copy()
plt.show()
def redraw_ui_elements(self, generation=None):
# UI elements, status, buttons
if not generation is None:
if not self.lbl_generation is None:
self.lbl_generation.remove()
self.lbl_generation = self.ax.annotate('%d' % generation,
color='k', weight='bold', fontsize=16, ha='center', va='center',
xy=(0.95, 1.08), xycoords=self.ax.transAxes, annotation_clip=False)
def redraw(self, state, generation=None):
# Redraw game board
self.redraw_board(state)
# UI elements, status, buttons
if not generation is None:
self.redraw_ui_elements(generation)
def update(self, state, generation=None):
# Redraw entire board if state dimension has changed
if self.last_state is None or state.size != self.last_state.size:
self.redraw(state, generation)
# Update only those cells, that have changed since last state
diff = np.subtract(state, self.last_state)
changed_idx = list(zip(*diff.nonzero()))
for xy in changed_idx:
self.patches[xy[0] * state.shape[0] + xy[1]].set_facecolor(UI_BOARD_CELL_COLORS[state[xy[0], xy[1]]])
# Update last state
self.last_state = state.copy()
# UI elements, status, buttons
if not generation is None:
self.redraw_ui_elements(generation)
plt.show()
| manu-ho/game_of_life | board.py | board.py | py | 5,007 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axes",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "m... |
31063851875 |
from ..utils import Object
class MessageReplyInfo(Object):
"""
Contains information about replies to a message
Attributes:
ID (:obj:`str`): ``MessageReplyInfo``
Args:
reply_count (:obj:`int`):
Number of times the message was directly or indirectly replied
recent_replier_ids (List of :class:`telegram.api.types.MessageSender`):
Identifiers of at most 3 recent repliers to the message; available in channels with a discussion supergroupThe users and chats are expected to be inaccessible: only their photo and name will be available
last_read_inbox_message_id (:obj:`int`):
Identifier of the last read incoming reply to the message
last_read_outbox_message_id (:obj:`int`):
Identifier of the last read outgoing reply to the message
last_message_id (:obj:`int`):
Identifier of the last reply to the message
Returns:
MessageReplyInfo
Raises:
:class:`telegram.Error`
"""
ID = "messageReplyInfo"
def __init__(self, reply_count, recent_replier_ids, last_read_inbox_message_id, last_read_outbox_message_id, last_message_id, **kwargs):
self.reply_count = reply_count # int
self.recent_replier_ids = recent_replier_ids # list of MessageSender
self.last_read_inbox_message_id = last_read_inbox_message_id # int
self.last_read_outbox_message_id = last_read_outbox_message_id # int
self.last_message_id = last_message_id # int
@staticmethod
def read(q: dict, *args) -> "MessageReplyInfo":
reply_count = q.get('reply_count')
recent_replier_ids = [Object.read(i) for i in q.get('recent_replier_ids', [])]
last_read_inbox_message_id = q.get('last_read_inbox_message_id')
last_read_outbox_message_id = q.get('last_read_outbox_message_id')
last_message_id = q.get('last_message_id')
return MessageReplyInfo(reply_count, recent_replier_ids, last_read_inbox_message_id, last_read_outbox_message_id, last_message_id)
| iTeam-co/pytglib | pytglib/api/types/message_reply_info.py | message_reply_info.py | py | 2,077 | python | en | code | 20 | github-code | 36 | [
{
"api_name": "utils.Object",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "utils.Object.read",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "utils.Object",
"line_number": 44,
"usage_type": "name"
}
] |
5538635649 | # drawing the Earth on equirectangular projection
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import numpy
import matplotlib.ticker as mticker
fig = plt.figure(figsize=(64,32), frameon=False)
ax = fig.add_subplot(1,1,1, projection=ccrs.PlateCarree(central_longitude=180))
ax.set_global()
#ax.stock_img()
#ax.coastlines(resolution='50m')
ax.add_feature(cfeature.NaturalEarthFeature('physical', 'ocean', '50m', edgecolor='face', facecolor=cfeature.COLORS['water']))
ax.add_feature(cfeature.NaturalEarthFeature('physical', 'land', '50m', edgecolor='face', facecolor=cfeature.COLORS['land']))
ax.add_feature(cfeature.NaturalEarthFeature('physical', 'lakes', '50m', edgecolor='face', facecolor=cfeature.COLORS['water']))
ax.add_feature(cfeature.NaturalEarthFeature('physical', 'rivers_lake_centerlines', '50m', edgecolor=cfeature.COLORS['water'], facecolor='none'))
ax.add_feature(cfeature.NaturalEarthFeature('cultural', 'admin_0_countries', '50m', edgecolor='gray', facecolor='none'))
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=False, linewidth=1, alpha=0.8)
gl.xlocator = mticker.FixedLocator(list(range(0,361,60)))
gl.ylocator = mticker.FixedLocator(list(range(-90,91,30)))
plt.subplots_adjust(left=0, right=1, bottom=0, top=1)
plt.savefig("earth-equirectanguler3.png", dpi=8192/64)
| shuyo/xr | earth.py | earth.py | py | 1,349 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "cartopy.crs.PlateCarree",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cart... |
4313052823 | import torch
from gms_loss import *
from PIL import Image
from torchvision import transforms
from gms_loss import MSGMS_Loss
image_path_1= './lj_test_image/1118_visdon_HR_downsampling_2loss_visstyle/0_SR_x_1105_4.png'
image_path_2 = './lj_test_image/1116_tcl_bright/6_SR_x_1105_4.png'
img_ycbcr_1 = Image.open(image_path_1).convert('YCbCr')
img_y_1, img_cb_1, img_cr_1 = img_ycbcr_1.split()
img_ycbcr_2 = Image.open(image_path_2).convert('YCbCr')
img_y_2, img_cb_2, img_cr_2 = img_ycbcr_2.split()
img_y_1 = img_y_1.crop((0,0, 100, 200))
img_y_2 = img_y_2.crop((0,0, 100, 200))
transform = transforms.ToTensor()
Ir = transform(img_y_1).unsqueeze(0)
Ii = transform(img_y_2).unsqueeze(0)
print(Ir.size())
# print(Ir.size())
loss = MSGMS_Loss()
y = loss.forward(Ii, Ir)
print(y)
| JOY2020-Mh/SR_2.0 | gsmd_LOSS/image_calculate_gmsd.py | image_calculate_gmsd.py | py | 795 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PIL.Image.open",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number"... |
13168287871 | import numpy as np
import matplotlib
from matplotlib import pyplot as plt
plt.switch_backend('agg')
import matplotlib.patches
from scipy import stats
import pandas as pd
import math
from mpi4py import MPI
import sys
import itertools
import glob
import os
plt.ioff()
design = str(sys.argv[1])
all_IDs = ['3600687', '7000550', '7200799', '7200645', '3704614', '7202003']#np.genfromtxt('../structures_files/metrics_structures.txt', dtype='str').tolist()
nStructures = len(all_IDs)
percentiles = np.arange(0, 100)
os.chdir('../' + design)
directories = glob.glob('CMIP*_*')
directories.remove('CMIP3_070')
os.chdir('../output_analysis')
scenarios = len(directories)
if design == 'CMIP_curtailment':
sow = 27
else:
sow = 1
idx = np.arange(2, sow*2+2, 2)
historical = pd.read_csv('../structures_files/shortages.csv', index_col=0)
def alpha(i, base=0.2):
l = lambda x: x + base - x * base
ar = [l(0)]
for j in range(i):
ar.append(l(ar[-1]))
return ar[-1]
def shortage_duration(sequence, threshold):
cnt_shrt = [sequence[i] > threshold for i in
range(len(sequence))] # Returns a list of True values when there's a shortage
shrt_dur = [sum(1 for _ in group) for key, group in itertools.groupby(cnt_shrt) if
key] # Counts groups of True values
return shrt_dur
def plotSDC(synthetic, histData, structure_name):
n = 12
# Reshape historic data to a [no. years x no. months] matrix
f_hist = np.reshape(histData, (int(np.size(histData) / n), n))
# Reshape to annual totals
f_hist_totals = np.sum(f_hist, 1)
# Calculate historical shortage duration curves
F_hist = np.sort(f_hist_totals) # for inverse sorting add this at the end [::-1]
# Reshape synthetic data
# Create matrix of [no. years x no. months x no. samples]
synthetic_global = np.zeros([int(len(histData) / n), n, scenarios * sow])
# Loop through every SOW and reshape to [no. years x no. months]
for j in range(scenarios * sow):
synthetic_global[:, :, j] = np.reshape(synthetic[:, j], (int(np.size(synthetic[:, j]) / n), n))
# Reshape to annual totals
synthetic_global_totals = np.sum(synthetic_global, 1)
p = np.arange(100, -10, -10)
# Calculate synthetic shortage duration curves
F_syn = np.empty([int(len(histData) / n), scenarios * sow])
F_syn[:] = np.NaN
for j in range(scenarios * sow):
F_syn[:, j] = np.sort(synthetic_global_totals[:, j])
# For each percentile of magnitude, calculate the percentile among the experiments ran
perc_scores = np.zeros_like(F_syn)
for m in range(int(len(histData) / n)):
perc_scores[m, :] = [stats.percentileofscore(F_syn[m, :], j, 'rank') for j in F_syn[m, :]]
P = np.arange(1., len(histData)/12 + 1) * 100 / (len(histData)/12)
ylimit = np.max(F_syn)
fig, (ax1) = plt.subplots(1, 1, figsize=(14.5, 8))
# ax1
handles = []
labels = []
color = '#000292'
for i in range(len(p)):
ax1.fill_between(P, np.min(F_syn[:, :], 1), np.percentile(F_syn[:, :], p[i], axis=1), color=color, alpha=0.1)
ax1.plot(P, np.percentile(F_syn[:, :], p[i], axis=1), linewidth=0.5, color=color, alpha=0.3)
handle = matplotlib.patches.Rectangle((0, 0), 1, 1, color=color, alpha=alpha(i, base=0.1))
handles.append(handle)
label = "{:.0f} %".format(100 - p[i])
labels.append(label)
#Plot 50th percentile line separately
ax1.plot(P, np.percentile(F_syn[:, :], p[5], axis=1), linewidth=0.7, color=color, alpha=0.7, linestyle='dashed')
ax1.plot(P, F_hist, c='black', linewidth=2, label='Historical record')
ax1.set_ylim(0, ylimit)
ax1.set_xlim(0, 100)
ax1.legend(handles=handles, labels=labels, framealpha=1, fontsize=8, loc='upper left',
title='Frequency in experiment', ncol=2)
ax1.set_xlabel('Shortage magnitude percentile', fontsize=20)
ax1.set_ylabel('Annual shortage (Million $m^3$)', fontsize=20)
fig.suptitle('Shortage magnitudes for ' + structure_name, fontsize=16)
plt.subplots_adjust(bottom=0.2)
fig.savefig('../' + design + '/ShortagePercentileCurves/' + structure_name + '_' + design + '.svg')
fig.savefig('../' + design + '/ShortagePercentileCurves/' + structure_name + '_' + design + '.png')
fig.clf()
# Begin parallel simulation
comm = MPI.COMM_WORLD
# Get the number of processors and the rank of processors
rank = comm.rank
nprocs = comm.size
# Determine the chunk which each processor will neeed to do
count = int(math.floor(nStructures / nprocs))
remainder = nStructures % nprocs
# Use the processor rank to determine the chunk of work each processor will do
if rank < remainder:
start = rank * (count + 1)
stop = start + count + 1
else:
start = remainder * (count + 1) + (rank - remainder) * count
stop = start + count
for i in range(start, stop):
histData = historical.loc[all_IDs[i]].values[-768:] * 1233.4818 / 1000000
synthetic = np.zeros([len(histData), scenarios * sow])
for j in range(scenarios):
path = '../' + design + '/Infofiles/' + all_IDs[i] + '/' + all_IDs[i] + '_info_' + directories[j] + '.txt'
data = np.loadtxt(path)
synthetic[:, j * sow:j * sow + sow] = data[:, idx] * 1233.4818 / 1000000
plotSDC(synthetic, histData, all_IDs[i])
| antonia-had/rival_framings_demand | output_analysis/shortage_duration_curves.py | shortage_duration_curves.py | py | 5,340 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.switch_backend",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ioff",
"line_number": 15,
"usage_type": "call"
},
{
"api_name"... |
31352733665 | # Import all the modules to determine the cofusion matrix
import itertools
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import os
# This function calculates the confusion matrix and visualizes it
def plot_confusion_matrix(y_test, y_pred, file_path,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
Calculates and plots a confusion matrix from
the given labels
Parameters
----------
y_test: list
Already given labels
y_pred:
Predictions made by the model
file_path: str
Name of the of the file where the results should be stored,
together with a path
nomralize: bool
Whether the confusion matrix should ne bormalized
title: str
Whether the plot should have any special title
cmap: plt.cm.*
What cholor scheme should be used for plotting
Returns
-------
An image of confusion matrix
"""
cm = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
classes_pred = [str(i) for i in np.unique(y_pred)]
classes_test = [str(i) for i in np.unique(y_test)]
classes = None
if len(classes_pred)>len(classes_test):
classes = classes_pred
else:
classes = classes_test
# In case the confusion matrix should be normalized
if normalize:
t = cm.sum(axis=1)[:, np.newaxis]
for i in t:
if i[0] == 0:
i[0] = 1
cm = cm.astype('float') / t
plt.figure()
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
thresh = cm.max() / 2.0
plt.tight_layout()
plt.xticks([], [])
plt.yticks([], [])
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.savefig("{}.png".format(file_path))
plt.close()
| martinferianc/PatternRecognition-EIE4 | Coursework 2/post_process.py | post_process.py | py | 1,935 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.cm",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "sklearn.metrics.confusion_matrix",
"line_number": 38,
"usage_type": "call"
},
{
"api_... |
30600682231 | from django.dispatch import receiver
from django.db.models.signals import post_save
from expensense.models import Expense, ApprovalConditions
from django.utils import timezone
@receiver(post_save, sender=Expense)
def auto_approve_expense(sender, instance, **kwargs):
""" Method to auto approve expense requests """
# print('Auto Approved called')
try:
#query to get the manager and admin condition for the employee's team and company
manager_condition = ApprovalConditions.objects.get(user__role = 'MNG',
team = instance.user_id.team,
company = instance.user_id.company)
admin_condition = ApprovalConditions.objects.get(user__role = 'ADM',
team = instance.user_id.team,
company = instance.user_id.company)
# approve the request if the expense amount is lesser than in approval
# condition and is pending and the signature similarity is more than 80%
if (manager_condition and (float(instance.amount) <= manager_condition.max_amount)
and (int(instance.status)==Expense.pending) and (float(instance.similarity)>80)):
# Auto approve for manager
instance.manager_approved_at = timezone.now()
instance.manager_auto_approved = True
instance.status = Expense.manager_approved
instance.save()
manager_condition = None
# approve the request if the expense amount is lesser than in approval
# condition and is manager approved and the signature similarity is more than 80%
elif (admin_condition and (int(instance.status) == Expense.manager_approved)
and (float(instance.amount) <= admin_condition.max_amount) and
float(instance.similarity)>80):
# Auto approve for admin
instance.admin_approved_at = timezone.now()
instance.admin_auto_approved = True
instance.status = Expense.admin_approved
instance.save()
admin_condition=None
except ApprovalConditions.DoesNotExist:
pass
| praharsh05/ExpenSense | expensense_main/expensense/signals.py | signals.py | py | 2,303 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "expensense.models.ApprovalConditions.objects.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "expensense.models.ApprovalConditions.objects",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "expensense.models.ApprovalConditions",
"li... |
20219614528 | # match close atoms in two moleculas by maximum weighted bipartite matching
import numpy as np
import logging
# weights - numpy 2-dimensional array
def wbm(weights):
import pulp
pulp.LpSolverDefault.msg = False
prob = pulp.LpProblem("WBM_Problem", pulp.LpMinimize)
m,n = weights.shape
# print(m,n)
from_nodes = np.arange(m); to_nodes = np.arange(n)
# Create The Decision variables
choices = pulp.LpVariable.dicts("e",(from_nodes, to_nodes), 0, 1, pulp.LpInteger)
# Add the objective function
prob += pulp.lpSum([weights[u][v] * choices[u][v]
for u in from_nodes
for v in to_nodes]), "Total weights of selected edges"
# Constraint set ensuring that the total from/to each node
# is less than its capacity (= 1)
ind1 = np.argsort(weights[:,0].reshape(-1))
# print(ind1)
ind2 = np.argsort(weights[0,:].reshape(-1))
if from_nodes.size >= to_nodes.size:
for v in to_nodes: prob += pulp.lpSum([choices[u][v] for u in from_nodes]) == 1, ""
for i in range(m):
#if i < n//2:
if i < 0:
prob += pulp.lpSum([choices[from_nodes[ind1[i]]][v] for v in to_nodes]) == 1, ""
else: prob += pulp.lpSum([choices[from_nodes[ind1[i]]][v] for v in to_nodes]) <= 1, ""
else:
for u in from_nodes: prob += pulp.lpSum([choices[u][v] for v in to_nodes]) == 1, ""
for i in range(n):
#if i < m//2:
if i < 0:
prob += pulp.lpSum([choices[u][to_nodes[ind2[i]]] for u in from_nodes]) == 1, ""
else: prob += pulp.lpSum([choices[u][to_nodes[ind2[i]]] for u in from_nodes]) <= 1, ""
# The problem is solved using PuLP's choice of Solver
prob.solve()
# The status of the solution is printed to the screen
# print( "Status:", pulp.LpStatus[prob.status])
# Each of the variables is printed with it's resolved optimum value
# for v in prob.variables():
# if v.varValue > 1e-3:
# print(f'{v.name} = {v.varValue}')
# print(f"Sum of wts of selected edges = {round(pulp.value(prob.objective), 4)}")
# print selected edges
selected_from = [v.name.split("_")[1] for v in prob.variables() if v.value() > 1e-3]
selected_to = [v.name.split("_")[2] for v in prob.variables() if v.value() > 1e-3]
selected_edges = []
resultInd = np.zeros(m, dtype='int32')-1
for su, sv in list(zip(selected_from, selected_to)):
resultInd[int(su)] = int(sv)
selected_edges.append((su, sv))
resultIndExtra = np.copy(resultInd)
forget = np.setdiff1d(np.arange(n), selected_to)
if forget.size>0: resultIndExtra = np.concatenate([resultIndExtra,forget])
return resultInd, resultIndExtra
| gudasergey/pyFitIt | pyfitit/wbm.py | wbm.py | py | 2,829 | python | en | code | 28 | github-code | 36 | [
{
"api_name": "pulp.LpSolverDefault",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "pulp.LpProblem",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pulp.LpMinimize",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "numpy.ar... |
70323989225 | #Menggambar graf dengan 8 nodes
import matplotlib
import networkx as nx
import itertools
G = nx.Graph()
#Menambah node
L = ['a','b','c','d','e','f','g','h']
G.add_nodes_from(L)
'''
Kak ini kenapa nodesnya selalu kerandom ya? :(
'''
#Menambah edge
pairs = itertools.combinations(L,2)
edges = list()
for pair in pairs:
edges.append(pair)
for edge in edges:
G.add_edge(*edge)
#Menampilkan gambar
nx.draw_circular(G, with_labels = True, edge_color='b')
matplotlib.pyplot.show()
| dionesiusap/matplotlib-networkx-example | graph.py | graph.py | py | 515 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "networkx.Graph",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "itertools.combinations",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "networkx.draw_circular",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotli... |
18896618824 | from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Example:
# (r'^johnjot/', include('johnjot.foo.urls')),
(r'^api/', include('core.api.urls')),
(r'^admin/', include(admin.site.urls)),
)
| maraca/JohnJot | core/urls.py | urls.py | py | 329 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "django.contrib.admin.autodiscover",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 11,
"usage_type": "attribute"
},
{
... |
23048227631 | #!/usr/bin/env python
# coding: utf-8
# # 積み上げ棒グラフを作成する
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
from matplotlib import pyplot as plt
import numpy as np
#数値は適当
bar1 = [100, 50, 200] #積み上げ棒グラフの一段目
bar2 = [100, 200, 50] #積み上げ棒グラフの二段目
bar3 = [100, 250, 100] #積み上げ棒グラフの三段目
bar3_st = np.add(bar1, bar2).tolist() #bar3を積み上げる位置を指定しておく
sample_labels = ['SampleA', 'SampleB', 'SampleC'] #データのラベルを指定
x = [0, 1, 2] #棒グラフを表示させるx軸座標を決めておく
barwidth = 0.7 #棒グラフの幅を指定する。棒グラフのx軸座標を考慮して決める
plt.figure() #Figureオブジェクトを作成
plt.bar(x, bar1, width=barwidth, label='class1')
plt.bar(x, bar2, bottom=bar1, width=barwidth, label='class2') #bottomで2段目のデータを積み上げる位置を指定する
plt.bar(x, bar3, bottom=bar3_st, width=barwidth, label='class3') #bottomで3段目のデータを積み上げる位置を指定する
plt.xticks(x, sample_labels, fontweight='bold') #x軸のラベルを指定する
#データラベルを棒グラフの中に追加したい場合は以下を追加する
ax = plt.gca() #gca()現在の軸情報を取得(get current axis)
handles, labels = ax.get_legend_handles_labels() #handles 線やマーカーを含んだオブジェクト labels 凡例に表示されるラベル
plt.legend(handles[::-1], labels[::-1], loc='upper left', bbox_to_anchor=(1,1)) #handles[::-1], labels[::-1] 凡例を棒グラフの順番と合わせる
for i in range(len(bar1)):
ax.annotate(str(bar1[i]), xy=(x[i] - 0.1, (bar1[i] / 3)), color='white', fontweight='bold')
for i in range(len(bar2)):
ax.annotate(str(bar2[i]), xy=(x[i] - 0.1, (bar2[i] / 3) + bar1[i]), color='white', fontweight='bold')
for i in range(len(bar3)):
ax.annotate(str(bar3[i]), xy=(x[i] - 0.1, (bar3[i] / 3) + bar3_st[i]), color='white', fontweight='bold')
plt.subplots_adjust(right=0.8) #凡例のために余白を広げる rightのdefaultは0.9
plt.title('BarPlot Test')
plt.xlabel('Sample Name')
plt.ylabel('count')
plt.show()
#plt.savefig('barplot.pdf') #pdfで出力する場合
#plt.savefig('barplot.svg',format='svg') #ベクター画像で出力する場合
plt.close()
# **numpy**モジュール **add( )**
#
# ・配列の要素を足し算する。
#
# <br>
#
# **numpy**モジュール **tolist( )**
#
# ・Numpy配列をリスト型に変換する。
#
# <br>
#
# **pyplot**モジュール **gca( )**
#
# ・現在のAxesオブジェクトを取得する。
#
# <br>
#
# **get_legend_handles_labels( )**
#
# ・handlerとlabelを取得する。handlerは線やマーカーを含んだオブジェクト。labelsは凡例に表示されるラベル(リスト型)。
#
# ・Axesオブジェクト用
#
# <br>
#
# **annotate(s, xy)**
#
# ・xyで指定した位置にsで指定した文字を出力する。
#
# ・Axesオブジェクト用
#
# <br>
#
# ---
# ### 積み上げる段が多い場合に対応できるように、for文で処理する
# In[2]:
get_ipython().run_line_magic('matplotlib', 'inline')
from matplotlib import pyplot as plt
import numpy as np
#数値は適当
bar1 = [100, 50, 200] #積み上げ棒グラフの一段目
bar2 = [100, 200, 50] #積み上げ棒グラフの二段目
bar3 = [100, 250, 100] #積み上げ棒グラフの三段目
bar_data = [bar1, bar2, bar3]
sample_labels = ['SampleA', 'SampleB', 'SampleC'] #データのラベルを指定
group_labels = ['class1', 'class2', 'class3']
x = [0, 1, 2] #棒グラフを表示させるx軸座標を決めておく
barwidth = 0.7 #棒グラフの幅を指定する。棒グラフのx軸座標を考慮して決める
fig, ax = plt.subplots() #FigureオブジェクトとAxesオブジェクトを作成
bottom_position = np.zeros(len(bar_data)) #積み上げる位置を指定するため、積み上げる段数と同じ要素数(ここではbar_dataの要素数)の一次元配列を作成する
for i in range(len(bar_data)): #一段ずつデータをaxオブジェクトに格納する
ax.bar(x, bar_data[i], width=barwidth, bottom=bottom_position, label=group_labels[i])
for j in range(len(bar_data[i])): #annotateはx軸のポイントごとにデータを格納する必要がるので、for文を使う
ax.annotate(str(bar_data[i][j]), xy=(x[j] - 0.1, (bar_data[i][j] / 3) + bottom_position.tolist()[j]), color='white', fontweight='bold')
bottom_position = np.add(bar_data[i], bottom_position)
ax.set_xticks(x)
ax.set_xticklabels(sample_labels, fontweight='bold') #x軸のラベルを指定する
handles, labels = ax.get_legend_handles_labels() #handles 線やマーカーを含んだオブジェクト labels 凡例に表示されるラベル
ax.legend(handles[::-1], labels[::-1], loc='upper left', bbox_to_anchor=(1,1)) #handles[::-1], labels[::-1] 凡例を棒グラフの順番と合わせる
fig.subplots_adjust(right=0.8) #凡例のために余白を広げる rightのdefaultは0.9
ax.set_title('BarPlot Test')
ax.set_xlabel('Sample Name')
ax.set_ylabel('Count')
plt.show()
#fig.savefig('barplot.pdf')
#fig.savefig('barplot.svg',format='svg')
plt.close()
# **numpy**モジュール **zeros(shape)**
#
# ・要素0の配列を生成する。第一引数に配列のshapeを指定できる。
| workskt/book | _build/jupyter_execute/python_plot_cumulativebar.py | python_plot_cumulativebar.py | py | 5,462 | python | ja | code | 0 | github-code | 36 | [
{
"api_name": "numpy.add",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplo... |
12212813324 | #!/usr/bin/env python
''' This is the parallel recursive solution to the Tower of Hanoi and is copied
from the code written in the parallel/rebuilding-the-tower-of-hanoi/ page
of www.drdobbs.com.
The solution has been modified from drdobbs' solution to work with my limited
knowledge of mpi4py. If you use the sleep() functionality to add some dead
time into the loops - even a second will do - you'll start to see the
computation time decrease as the number of processes increases.
Currently the dead time is set to 2 seconds. The solution times for this dead
time are:
1 proc 30.50s
2 procs 17.03s
4 procs 11.14s
8 procs 9.28s
It's very hard to solve the problem in less than 12 seconds. I haven't been
able to do it!
'''
from mpi4py import MPI
import sys
import time
import math
import pickle
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
name = MPI.Get_processor_name()
def tower(src, dest, temp, idx, offset, noofdiscs, plan):
if (offset > 0):
# Defines the level of recursion that we are at. It runs from 0 to
# noofdiscs-1 throughout the calculation.
level = noofdiscs - 2 - int(math.log(offset, 2))
# This if statement splits the processes in half at each level of
# recursion until only one process is evaluating each 'branch'.
# From there it evaluates all subsequent sub-branches and moves.
if (rank % 2**(level+1) < 2**(level) and 2**(level) < size):
# Recursively call tower again. This is the left branch, so
# we SUBTRACT offset from idx.
tower(src, temp, dest, idx-offset, offset/2, noofdiscs, plan);
# Add some dead time here.
time.sleep(2)
# Adds the src and dest poles of move to the plan array.
plan[idx-1][0] = src;
plan[idx-1][1] = dest;
elif (rank % 2**(level+1) >= 2**(level) and 2**(level) < size):
# Add some dead time here.
time.sleep(2)
# Adds the src and dest poles of move to the plan array.
plan[idx-1][0] = src;
plan[idx-1][1] = dest;
# Recursively call tower again. This is the right branch, so
# we ADD offset to idx.
tower(temp, dest, src, idx+offset, offset/2, noofdiscs, plan);
else:
# Recursively call tower again. This is the left branch, so
# we SUBTRACT offset from idx.
tower(src, temp, dest, idx-offset, offset/2, noofdiscs, plan);
# Add some dead time here.
time.sleep(2)
# Adds the src and dest poles of move to the plan array.
plan[idx-1][0] = src;
plan[idx-1][1] = dest;
# Recursively call tower again. This is the right branch, so
# we ADD offset to idx.
tower(temp, dest, src, idx+offset, offset/2, noofdiscs, plan);
else:
# Add some dead time here.
time.sleep(2)
# Once offset reaches zero the algorithm stops recursively calling
# tower. Hence all that is left to do is populate the last elements
# of the plan list.
plan[idx-1][0] = src;
plan[idx-1][1] = dest;
return plan
def main():
# Initialise the number of discs and the list for containing plan.
# Initially it is populated with pairs of zeroes [0, 0], s.t. the number
# of pairs is equal to the number of moves.
#print "The number of processes is", size, "and this is process", rank
noofdiscs = int(sys.argv[1])
plan_init = []
for i in range(0,2**noofdiscs-1):
plan_init.append([0, 0])
# These two variables are used to keep track of the level of recursion
# of the method.
idx = 2**(noofdiscs - 1)
offset = 2**(noofdiscs-2)
# The plan - the set of moves that solves the tower of Hanoi problem -
# is obtained by initialising the tower function, which recursively calls
# ifself until the full solution is found. The solution will be
# distributed across the processes used in the calculation.
plan = tower(0, 2, 1, idx, offset, noofdiscs, plan_init)
# Process 0 now gathers all the modified elements of data together into a
# new list called allplans.
allplans = comm.gather(plan,root=0)
#print 'allplans:',allplans
# The command gather has stuffed a bunch of mostly empty data lists into a
# list. The first command essentially picks out all the non-trivial data
# from each list returned from the processes and bundles it all into one
# list, the solution.
if rank == 0:
plan=[max(i) for i in zip(*allplans)]
#print 'master:',plan
# We use pickle to make a moves file which we write the
# plan list. We use pickle in main() to read the list again.
outfile=open( "moves", "wb" )
pickle.dump(plan, outfile)
main()
| icluster/demos | hanoi/src/hanoi_soln_par.py | hanoi_soln_par.py | py | 4,912 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "mpi4py.MPI.Get_processor_name",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "m... |
20707090879 | import os, time
from Crypto.Random import get_random_bytes, random
from lib.logger import *
log = Logger()
'''
This class handles the BLE Beacon Transmission (TX).
Because after some time of BLE advertising, a restart of the BLE stack (hciconfig hci0 down / up) might be required,
and because the pybleno class can't be shut down and restarted properly, the actual BLE handling has been placed
in a separate python script "en_beacon.py".
'''
class ENTxService:
def __init__(self, bdaddr_rotation_interval_min_minutes, bdaddr_rotation_interval_max_minutes):
self.random_bdaddr = bytes([0x00] * 6)
self.bdaddr_rotation_interval_min_seconds = bdaddr_rotation_interval_min_minutes * 60 + 1
self.bdaddr_rotation_interval_max_seconds = bdaddr_rotation_interval_max_minutes * 60 - 1
if self.bdaddr_rotation_interval_max_seconds < self.bdaddr_rotation_interval_min_seconds:
self.bdaddr_rotation_interval_max_seconds = self.bdaddr_rotation_interval_min_seconds
self.bdaddr_next_rotation_seconds = 0
@staticmethod
def get_current_unix_epoch_time_seconds():
return int(time.time())
@staticmethod
def get_advertising_tx_power_level():
return 12 # in real life, this info should come from the BLE transmitter
def roll_random_bdaddr(self):
# Create a BLE random "Non-Resolvable Private Address", i.e. the two MSBs must be 0, and not all bits 0 or 1
while True:
self.random_bdaddr = bytearray(get_random_bytes(6))
self.random_bdaddr[0] = self.random_bdaddr[0] & 0b00111111
self.random_bdaddr = bytes(self.random_bdaddr)
if (self.random_bdaddr.hex() != "000000000000") and (self.random_bdaddr.hex() != "3fffffffffff"):
break
self.bdaddr_next_rotation_seconds = (self.get_current_unix_epoch_time_seconds()
+ random.randint(self.bdaddr_rotation_interval_min_seconds,
self.bdaddr_rotation_interval_max_seconds))
def bdaddr_should_roll(self):
return self.get_current_unix_epoch_time_seconds() >= self.bdaddr_next_rotation_seconds
def start_beacon(self, rpi, aem):
while True:
if os.system("python3 en_beacon.py %s %s %s" % (rpi.hex(), aem.hex(), self.random_bdaddr.hex())) == 0:
# return code 0 means: ok, advertising started.
break
log.log()
log.log("ERROR: Could not start advertising! Timestamp: %s" % time.strftime("%H:%M:%S", time.localtime()))
log.log()
# try to recover:
os.system("sudo hciconfig hci0 down; sudo hciconfig hci0 up")
time.sleep(1)
@staticmethod
def stop_beacon():
os.system("sudo hciconfig hci0 down; sudo hciconfig hci0 up")
| mh-/exposure-notification-ble-python | lib/en_tx_service.py | en_tx_service.py | py | 2,883 | python | en | code | 28 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "Crypto.Random.get_random_bytes",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "Crypto.Random.random.randint",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": ... |
70744489704 | import fileinput
import glob
import os
import random
import time
import re
from urllib.error import HTTPError
from arghandler import ArgumentHandler, subcmd
from google import search
from subprocess import call
from procurer import ultimate_guitar, lastfm, postulate_url
from rules import rules, clean
from songbook_converter import SongBook, wrap, header
from writer import TexWriter, PdfWriter, FileWriter
def url(keyword):
searchterm = "site:ultimate-guitar.com chords " + keyword
results = search(searchterm, stop=10)
for url in results:
if 'search' not in url:
return url
raise ArithmeticError
import codecs
def filter_existing(lines):
keywords = set(x for x in lines if not find_file_for_keyword(x))
excluded = lines - keywords
if excluded:
print("Not adding some files ( use --force to override):")
for item in excluded:
print(item, find_file_for_keyword(item))
print("Still looking for")
for item in keywords:
print(item, find_file_for_keyword(item))
return keywords
def addsongs(keywords):
written = []
working = 1
for line in keywords:
if not working:
written.append(line)
continue
try:
source = url(line)
artist, title, blob = ultimate_guitar(source)
FileWriter(artist, title, blob, directory='raw/', extension='txt').write()
except HTTPError:
working = 0
print("Google said fuck you")
except Exception as e:
print("Couldn't add " + line)
print(e)
written.append(line)
return written
def find_file_for_keyword(keyword):
globs = (filename.split("\\")[-1][:-5] for filename in glob.glob('reviewed/**'))
for filename in globs:
if keyword.lower() in filename.lower():
return True
if filename.lower() in keyword.lower():
return True
return False
@subcmd
def edit(parser, context, args):
matching_files = find_file_for_keyword("".join(args))
if len(matching_files) == 1:
call(["texmaker", matching_files[0]])
@subcmd
def add(parser, context, args):
source = url(args)
artist, title, blob = ultimate_guitar(source)
FileWriter(artist, title, blob, directory='raw/', extension='txt').write()
@subcmd
def addfile(parser, context, args):
lines = set(line.strip() for line in open(args[0]))
keywords = lines
added = addsongs(keywords)
with open("written.txt", "w") as f:
f.write("\n".join(added))
cleanraw(parser,context,())
def files(directory):
for file in glob.glob(directory + "*"):
with codecs.open(file, encoding='utf-8')as f:
artist, title = os.path.split(file)[-1][:-4].split(" - ")
song = f.read()
yield artist, title, song
@subcmd
def maketex(parser, context, args=('clean',)):
if not len(args):
args=('clean',)
for artist, title, blob in files(args[0] + "/"):
converter = SongBook(artist, title, blob)
latex = converter.produce_song()
TexWriter(artist, title, latex, directory="library/").write()
@subcmd
def cleanraw(parser, context, args=('raw',)):
if not len(args):
args = ('raw',)
for artist, title, blob in files(args[0] + "/"):
blob = clean(blob)
if not "[" in blob:
blob = "[Instrumental]\n" + blob
FileWriter(artist, title, blob, directory='clean/', extension='txt').write()
for artist, title, blob in files("clean/"):
blob = clean(blob)
if not "[" in blob:
blob = "[Instrumental]\n" + blob
FileWriter(artist, title, blob, directory='clean/', extension='txt').write()
@subcmd
def makepdf(parser, context, args=('library',)):
if not len(args):
args=('library',)
latex = (line for line in fileinput.input(glob.glob(args[0] + '/*.tex'), openhook=fileinput.hook_encoded("utf-8")))
latex = header() + wrap("document", "\n".join(latex))
print([(sym,ord(sym)) for sym in latex if ord(sym)>1000])
PdfWriter("Sebastian", "Songbook", latex).write()
@subcmd
def reviewtopdf(parser, context, args):
print("Making tex")
maketex(parser,context,('reviewed',))
print("Making pdf")
makepdf(parser,context,args=('library',))
if __name__ == "__main__":
handler = ArgumentHandler()
handler.run()
| arpheno/songbook | main.py | main.py | py | 4,443 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "google.search",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "procurer.ultimate_guitar",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "writer.FileWriter",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "urllib.error... |
70913641705 | import cv2
import numpy as np
import os
import path
import face_recognition
import getopt, sys
def getOriginalData(file):
count_vertices = 0
count_faces = 0
original_coordinates = []
faces_indices = []
texture_coordinates = []
texture_indices = []
oc_file = open("Original_Vertices.txt", "w")
fi_file = open("Face_Indices.txt", "w")
tc_file = open("Texture_Coordinates.txt", "w")
ti_file = open("Texture_Indices.txt", "w")
for line in file.readlines():
content = line.split(" ")
# 顶点数据
if content[0] == "v":
count_vertices += 1
coordinate = []
for i in range(1, 4):
num = float(content[i].replace("\n", ""))
coordinate.append(num)
original_coordinates.append(coordinate)
oc_file.write(str(coordinate) + "\n")
# 三角面片数据
if content[0] == "f":
count_faces += 1
vertex_indices = []
face_texture = []
for i in range(1, 4):
a = int(content[i].split("/")[0])
b = int(content[i].split("/")[1])
vertex_indices.append(a)
face_texture.append(b)
faces_indices.append(vertex_indices)
texture_indices.append(face_texture)
fi_file.write(str(vertex_indices) + "\n")
ti_file.write(str(face_texture) + "\n")
# 纹理数据
if content[0] == "vt":
coordinate = [float(content[1]), float(content[2].replace("\n", ""))]
tc_file.write(str(coordinate) + "\n")
texture_coordinates.append(coordinate)
print("共有三角网格顶点 " + str(count_vertices) + " 个")
print("共有三角网格面片 " + str(count_faces) + " 个")
oc_file.close()
fi_file.close()
tc_file.close()
ti_file.close()
return np.array(original_coordinates, dtype=np.float32),\
np.array(faces_indices, dtype=np.int32), \
np.array(texture_indices, dtype=np.int32), \
np.array(texture_coordinates, dtype=np.float32)
def getRoundingCoordinates(coordinates):
rc_file = open("Rounding_Vertices.txt", "w")
rounding_coordinates = np.zeros(coordinates.shape, dtype=np.int32)
for i in range(coordinates.shape[0]):
for j in range(coordinates.shape[1]):
rounding_coordinates[i][j] = int(round(coordinates[i][j], 4) * 10000)
for coordinate in rounding_coordinates:
rc_file.write(str(coordinate) + "\n")
rc_file.close()
return rounding_coordinates
def getAdjustedCoordinates(coordinates, x_min, y_min):
ac_file = open("Adjusted_Vertices.txt", "w")
adjusted_coordinates = np.zeros(coordinates.shape, dtype=np.int32)
print("偏移量 x : " + str(x_min) + "\ty : " + str(y_min))
for i in range(coordinates.shape[0]):
adjusted_coordinates[i][0] = coordinates[i][0] - x_min - 1
adjusted_coordinates[i][1] = coordinates[i][1] - y_min - 1
adjusted_coordinates[i][2] = coordinates[i][2]
for coordinate in adjusted_coordinates:
ac_file.write(str(coordinate) + "\n")
ac_file.close()
return adjusted_coordinates
def renderTexture(texture_coordinates, vertices_coordinates, vertices_indices,
texture_indices, texture_file, image):
'''
对图像进行着色,遍历每个三角面片,获取当前三角面片的顶点索引与贴图索引
通过顶点索引与贴图索引获得顶点坐标与贴图坐标
将三角形按照重心与中线分为三个小四边形并进行着色
:param texture_coordinates: 纹理贴图坐标
:param vertices_coordinates: 顶点坐标
:param vertices_indices: 三角面片顶点索引
:param texture_indices: 三角面片贴图索引
:param texture_file: 贴图文件
:return:
'''
texture = cv2.imread(texture_file, cv2.IMREAD_COLOR)
# 获取纹理图像大小
height, width, channels = texture.shape
print("纹理贴图尺寸: " + str(height) + " , " + str(width) + " , " + str(channels))
# 遍历各面
for i in range(vertices_indices.shape[0] - 1, 0, -1):
# 获取当前三角面片顶点索引
index_va = vertices_indices[i][0] - 1
index_vb = vertices_indices[i][1] - 1
index_vc = vertices_indices[i][2] - 1
# 获取当前三角面片顶点的贴图索引
index_ta = texture_indices[i][0] - 1
index_tb = texture_indices[i][1] - 1
index_tc = texture_indices[i][2] - 1
# 获取当前三角面片顶点坐标
va = vertices_coordinates[index_va]
vb = vertices_coordinates[index_vb]
vc = vertices_coordinates[index_vc]
# 获取当前三角面片顶点的贴图坐标
ta = texture_coordinates[index_ta]
tb = texture_coordinates[index_tb]
tc = texture_coordinates[index_tc]
# 获取贴图 BGR 值,注意贴图首先是高(行),其次是宽(列)
ca = texture[getTexturePosition(height, 1 - ta[1]), getTexturePosition(width, ta[0])]
cb = texture[getTexturePosition(height, 1 - tb[1]), getTexturePosition(width, tb[0])]
cc = texture[getTexturePosition(height, 1 - tc[1]), getTexturePosition(width, tc[0])]
# 求三角形重心坐标
gravity_centre = []
for j in range(3):
gravity_centre.append(int((va[j] + vb[j] + vc[j]) / 3))
# 求各顶点对边中点,注意此时应当舍弃 z 坐标
ab = [int((va[0] + vb[0]) / 2), int((va[1] + vb[1]) / 2)]
ac = [int((va[0] + vc[0]) / 2), int((va[1] + vc[1]) / 2)]
bc = [int((vc[0] + vb[0]) / 2), int((vc[1] + vb[1]) / 2)]
cv2.fillConvexPoly(image, np.array([[va[0], va[1]], ab, [gravity_centre[0],
gravity_centre[1]], ac], dtype=np.int32), ca.tolist())
cv2.fillConvexPoly(image, np.array([[vb[0], vb[1]], ab, [gravity_centre[0],
gravity_centre[1]], bc], dtype=np.int32), cb.tolist())
cv2.fillConvexPoly(image, np.array([[vc[0], vc[1]], bc, [gravity_centre[0],
gravity_centre[1]], ac], dtype=np.int32), cc.tolist())
cv2.imwrite("Textured.jpg", color_image)
return
def renderDepth(vertices_coordinates, vertices_indices, depth_image, min_depth, max_depth):
# 生成深度图像
# 遍历各面
temp_depth_a = np.zeros(depth_image.shape, np.uint8)
temp_depth_b = np.zeros(depth_image.shape, np.uint8)
for i in range(vertices_indices.shape[0] - 1, 0, -1):
# 获取当前三角面片顶点索引
index_va = vertices_indices[i][0] - 1
index_vb = vertices_indices[i][1] - 1
index_vc = vertices_indices[i][2] - 1
# 获取当前三角面片顶点坐标
va = vertices_coordinates[index_va]
vb = vertices_coordinates[index_vb]
vc = vertices_coordinates[index_vc]
# 计算三角面片平均深度
mean_depth = (va[2] + vb[2] + vc[2]) / 3
# 归一化深度为 0-255 的灰度
scale = int((mean_depth - min_depth) / (max_depth - min_depth) * 255)
grey_scale = [scale, scale, scale]
cv2.fillConvexPoly(temp_depth_a, np.array([[va[0], va[1]], [vb[0], vb[1]], [vc[0], vc[1]]], dtype=np.int32), grey_scale)
for i in range(vertices_indices.shape[0]):
# 获取当前三角面片顶点索引
index_va = vertices_indices[i][0] - 1
index_vb = vertices_indices[i][1] - 1
index_vc = vertices_indices[i][2] - 1
# 获取当前三角面片顶点坐标
va = vertices_coordinates[index_va]
vb = vertices_coordinates[index_vb]
vc = vertices_coordinates[index_vc]
# 计算三角面片平均深度
mean_depth = (va[2] + vb[2] + vc[2]) / 3
# 归一化深度为 0-255 的灰度
scale = int((mean_depth - min_depth) / (max_depth - min_depth) * 255)
grey_scale = [scale, scale, scale]
cv2.fillConvexPoly(temp_depth_b, np.array([[va[0], va[1]], [vb[0], vb[1]], [vc[0], vc[1]]], dtype=np.int32), grey_scale)
for row in range(depth_image.shape[0]):
for col in range(depth_image.shape[1]):
front = 0
grey_a = temp_depth_a[row][col][0]
grey_b = temp_depth_b[row][col][0]
if grey_a <= grey_b:
front = grey_b
else:
front = grey_a
depth_image[row][col] = front
cv2.imwrite("Depth.jpg", depth_image)
return
def drawTriangularMesh(vertices_indices, coordinates, image, color):
for faces_index in vertices_indices:
# 由索引获取坐标
# print("三角网格索引 " + str(faces_index))
vertex_a = coordinates[faces_index[0] - 1]
vertex_b = coordinates[faces_index[1] - 1]
vertex_c = coordinates[faces_index[2] - 1]
# print("三角面片顶点坐标为 " + str(vertex_a) + "\t" + str(vertex_b) + "\t" + str(vertex_c))
cv2.line(image, (vertex_a[0], vertex_a[1]), (vertex_b[0], vertex_b[1]), color)
cv2.line(image, (vertex_c[0], vertex_c[1]), (vertex_b[0], vertex_b[1]), color)
cv2.line(image, (vertex_a[0], vertex_a[1]), (vertex_c[0], vertex_c[1]), color)
for coordinate in coordinates:
# 注意图片的坐标是 height, width
image[int(coordinate[1]), int(coordinate[0])] = black
return
def getTexturePosition(length, ratio):
p = int(np.floor(length * ratio))
if p >= length:
p = length - 1
return p
def getFaceFeatures():
#利用 FaceAlignment 获取特征坐标点
image = face_recognition.load_image_file("./Textured.jpg")
face_landmarks_list = face_recognition.face_landmarks(image)
alignment_image = cv2.imread("./Textured.jpg", cv2.IMREAD_COLOR)
alignment_coordinates = open("Alignment.txt", "w")
if len(face_landmarks_list) >= 1:
print("成功检测面部")
else:
print("未检测到面部,请核查输入文件!")
return
for face_landmarks in face_landmarks_list:
# 打印此图像中每个面部特征的位置
facial_features = [
'chin',
'left_eyebrow',
'right_eyebrow',
'nose_bridge',
'nose_tip',
'left_eye',
'right_eye',
'top_lip',
'bottom_lip'
]
for facial_feature in facial_features:
print("The {} in this face has the following points: {}"
.format(facial_feature, face_landmarks[facial_feature]))
for facial_feature in facial_features:
#alignment_image[][] = face_landmarks[facial_feature]
alignment_coordinates.write(facial_feature + ": " + str(face_landmarks[facial_feature]) + "\n")
alignment_coordinates.close()
return face_landmarks_list[0]
def drawLandmarks(face_landmarks, image):
red = (0, 0, 255)
for face_landmark in face_landmarks.values():
for coordinate in face_landmark:
cv2.circle(image, coordinate, 5, red, -1)
cv2.imwrite("FaceLandMarked.jpg", image)
def landmarksDictToList(face_landmarks):
all_coordinates = []
for face_landmark in face_landmarks.values():
for coordinate in face_landmark:
all_coordinates.append(coordinate)
return all_coordinates
def getSurroundFaces(adjusted_coordinates, vertices_indices, all_coordinates):
landmarks_dict = {}
faces_dict = {}
landmark_triangles = open("Landmark_Triangles.txt", "w")
for coordinate in all_coordinates:
landmarks_dict.update({})
faces_dict.update({str(coordinate): []})
for vertices_index in vertices_indices:
index_a = vertices_index[0] - 1
index_b = vertices_index[1] - 1
index_c = vertices_index[2] - 1
va = adjusted_coordinates[index_a]
vb = adjusted_coordinates[index_b]
vc = adjusted_coordinates[index_c]
for coordinate in all_coordinates:
if cv2.pointPolygonTest(np.array([[va[0], va[1]], [vb[0], vb[1]], [vc[0], vc[1]]], dtype=np.int32),
coordinate, False) >= 0:
faces_dict[str(coordinate)].append([va, vb, vc])
for landmark, triangle in faces_dict.items():
landmark_triangles.write(str(landmark) + ":\t" + str(triangle) + "\n")
landmark_triangles.close()
refined = refineTriangleFaces(faces_dict)
return refined
def refineTriangleFaces(faces_dict):
refined = {}
for landmark, triangles in faces_dict.items():
if len(triangles) == 1:
refined.update({str(landmark): triangles[0]})
elif len(triangles) == 0:
refined.update({str(landmark): []})
else:
depth = []
for triangle in triangles:
z = triangle[0][2] + triangle[1][2] + triangle[2][2]
depth.append(z)
index = np.argmin(depth)
refined.update({str(landmark): triangles[index]})
refined_file = open("refined.txt", "w")
for k, v in refined.items():
refined_file.write(str(k) + ":\t" + str(v) + "\n")
refined_file.close()
return refined
def getDistance(feature_a, index_a, feature_b, index_b, landmark_triangles, feature_landmarks, xy = True):
# to be continue
distance = 0
return distance
def getGlassesDistanceInformation(face_landmarks):
information_file = open("manInformation.txt", "w")
distances = []
a = (face_landmarks['chin'][16][0] - face_landmarks['chin'][0][0])/10
b = (face_landmarks['right_eye'][3][0] - face_landmarks['left_eye'][0][0])/10
c = (face_landmarks['right_eye'][1][0] + face_landmarks['right_eye'][2][0] - face_landmarks['left_eye'][1][0] - face_landmarks['left_eye'][2][0]) / 20
d = (face_landmarks['right_eye'][1][0] - face_landmarks['left_eye'][3][0])/10
h = (face_landmarks['right_eye'][2][1] - face_landmarks['right_eyebrow'][2][1])/10
f = (face_landmarks['nose_bridge'][3][1] - face_landmarks['nose_bridge'][0][1])/10
g = round(0.7 * h, 1)
e = round(2.2 * f, 1)
distances.append(a)
distances.append(b)
distances.append(c)
distances.append(d)
distances.append(e)
distances.append(f)
distances.append(g)
distances.append(h)
print("配镜所需参数依次为...")
for distance in distances:
information_file.write(str(distance) + "\n")
print(str(distance) + " (mm)")
information_file.close()
return distances
def cutPointCloud(old_obj, threshold):
# 对 z 轴设定阈值,以消除 z 坐标重复
# obj 文件包含 顶点 v, 纹理 vt, 三角面片 f(f中存放顶点与纹理的索引) 信息
# 通过 z 坐标过滤顶点 v,此时包含顶点 v 的三角面片也应当去除
# 若去除三角面片,则其含有的纹理信息也应当去除
# 首先遍历原始 obj 文件,获取顶点列表、纹理列表、三角面片索引信息列表
# 遍历顶点列表,获取要去除的顶点的索引
# 遍历三角面片索引信息列表,将含有要去除的索引的三角面片信息去除
# 记录所去除的面的纹理坐标,遍历纹理列表并去除
# 最终形成新的文件
cut_obj = open("cut_obj.obj", "w")
original_vertices = []
original_texture = []
original_faces = []
remove_vertices_indices = set()
remove_face_indices = set()
remove_texture_indices = set()
for line in old_obj.readlines():
content = line.split(" ")
# 顶点数据
if content[0] == "v":
original_vertices.append(line)
# 三角面片数据
if content[0] == "f":
original_faces.append(line)
# 纹理数据
if content[0] == "vt":
original_texture.append(line)
old_obj.close()
print("未裁剪文件 顶点:\t" + str(len(original_vertices)) +
"\t纹理:\t" + str(len(original_texture)) + "\t三角面:\t" + str(len(original_faces)))
for index, line in enumerate(original_vertices):
content = line.split(" ")
if float(content[3]) > threshold:
remove_vertices_indices.add(index)
else:
continue
for index, line in enumerate(original_faces):
content = line.split(" ")
for i in range(1, 4):
v = int(content[i].split("/")[0])
vt = int(content[i].split("/")[1])
if v - 1 in remove_vertices_indices:
remove_face_indices.add(index)
remove_texture_indices.add(vt - 1)
print("需去除点:\t" + str(len(remove_vertices_indices)) + "\t去除纹理:\t" +
str(len(remove_texture_indices)) + "\t去除网格:\t" + str(len(remove_face_indices)))
# 注意,f 信息中储存的索引是从 1 开始的
# 所以上述代码索引都是从 0 开始的
for index, line in enumerate(original_vertices):
if index not in remove_vertices_indices:
cut_obj.write(line)
else:
# 本行仅是行号补位作用,无意义
cut_obj.write("v 0.042966 -0.094774 0.43439\n")
for index, line in enumerate(original_texture):
if index not in remove_texture_indices:
cut_obj.write(line)
else:
cut_obj.write("vt 0.14193 0.20604\n")
for index, line in enumerate(original_faces):
if index not in remove_face_indices:
cut_obj.write(line)
cut_obj.close()
opts, args = getopt.getopt(sys.argv[1:], "o:t:")
obj_file_path = r"C:\Users\liyanxiang\Desktop\head\resUnWarpMesh.obj"
texture_file_path = r"C:\Users\liyanxiang\Desktop\head\clonedBlur.png"
for opt, value in opts:
print("输入文件 : " + value)
if opt == "-o":
print("obj 文件路径为 : " + value)
obj_file_path = value
if opt == "-t":
print("texture 文件路径为 : " + value)
texture_file_path = value
obj_file = open(obj_file_path, "r")
threshold = 0.45
cutPointCloud(obj_file, threshold)
cut_obj = open("cut_obj.obj", "r")
# original_coordinates 原始顶点坐标
# vertices_indices 三角面片顶点索引
# texture_indices 三角面片贴图索引
# texture_coordinates 贴图坐标
original_coordinates, vertices_indices, texture_indices, texture_coordinates = getOriginalData(cut_obj)
cut_obj.close()
rounding_coordinates = getRoundingCoordinates(original_coordinates)
x_max = np.max(rounding_coordinates[:, 0])
x_min = np.min(rounding_coordinates[:, 0])
y_max = np.max(rounding_coordinates[:, 1])
y_min = np.min(rounding_coordinates[:, 1])
z_max = np.max(rounding_coordinates[:, 2])
z_min = np.min(rounding_coordinates[:, 2])
print("X max: " + str(x_max) + "\t\tX min: " + str(x_min) + "\nY max: " + str(y_max) +
"\t\tY min: " + str(y_min) + "\nZ max: " + str(z_max) + "\t\tZ min: " + str(z_min))
height = int(y_max - y_min)
width = int(x_max - x_min)
depth = int(z_max - z_min)
print("图片高度为: " + str(height))
print("图片宽度为: " + str(width))
adjusted_coordinates = getAdjustedCoordinates(rounding_coordinates, x_min, y_min)
color_image = np.zeros((height, width, 3), np.uint8)
depth_image = np.zeros((height, width, 3), np.uint8)
white = (255, 255, 255)
green = (0, 255, 0)
black = (0, 0, 0)
color_image[:, :] = white
depth_image[:, :] = white
faces_coordinates_file = open("Faces_Coordinates.txt", "w")
'''
for coordinate in rounding_coordinates:
blank_image[int(coordinate[1] - y_min - 1)][int(coordinate[0] - x_min - 1)] = black
'''
drawTriangularMesh(vertices_indices, adjusted_coordinates, color_image, green)
cv2.imwrite("Triangular.jpg", color_image)
renderTexture(texture_coordinates, adjusted_coordinates, vertices_indices, texture_indices, texture_file_path, color_image)
drawTriangularMesh(vertices_indices, adjusted_coordinates, color_image, green)
cv2.imwrite("TextureCombineTriangle.jpg", color_image)
renderDepth(adjusted_coordinates, vertices_indices, depth_image, z_max, z_min)
face_landmarks = getFaceFeatures()
drawLandmarks(face_landmarks, color_image)
all_coordinates = landmarksDictToList(face_landmarks)
landmarks_faces = getSurroundFaces(adjusted_coordinates, vertices_indices, all_coordinates)
distances = getGlassesDistanceInformation(face_landmarks)
faces_coordinates_file.close()
cv2.imshow("Created", color_image)
cv2.imwrite("Created.jpg", color_image)
print("image saved!")
#cv2.waitKey(0)
#cv2.destroyWindow()
| liyanxiangable/3DFaceAlignment | FaceAlignment.py | FaceAlignment.py | py | 20,504 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_nu... |
35872181243 | __author__ = 'Dennis Qiu'
from PIL import Image
def de_steg(encrypted_file):
f, e = encrypted_file.split('.')
steg = Image.open(encrypted_file)
out = Image.new('RGB', (steg.width,steg.height))
for x in range(steg.width):
for y in range(steg.height):
r, g, b = steg.getpixel((x, y))
rh, gh, bh = (r&0x0F)<<4, (g&0x0F)<<4, (b&0x0F)<<4
out.putpixel((x, y), (rh, gh, bh))
out.save(f+"hiddenImage.png")
steg.show()
out.show()
def im_histogram(im='lowContrastBW.png'):
default = Image.open(im)
h = []
for i in range(256):
h.append(0)
for x in range (default.width):
for y in range (default.height):
p = default.getpixel((x, y))
h[p] += 1
default.show()
print('List h:\n{}'.format(h))
default_copy2 = default.copy()
size = default.width * default.height
Lut = im_lut(h, size)
for x in range (default.width):
for y in range (default.height):
p = default.getpixel((x, y))
default_copy2.putpixel((x, y), Lut[p])
default_copy2.show()
def im_lut(list_h, size_n):
lut = []
sum_h = 0
for i in range(256):
sum_h += list_h[i]
lut.append(int((255 / size_n) * sum_h))
print('List lut:\n{}'.format(lut))
return lut
if __name__ == '__main__':
encrypted = 'encrypted4bits.png encrypted4bits1.png encrypted4bits2.png encrypted4bits3.png'.split()
for e in encrypted:
de_steg(e)
im_histogram()
| denqiu/Python-ImageProcessing | image_steg.py | image_steg.py | py | 1,570 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PIL.Image.open",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "PIL.Image.new",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 7,... |
2820524990 | from django.contrib import admin
from .models import CalendarEvent, CalendarEventAttendee, UserCalendar
class CalendarEventAttendeeInline(admin.TabularInline):
model = CalendarEventAttendee
extra = 0
autocomplete_fields = (
'user',
)
class UserCalendarInline(admin.TabularInline):
model = UserCalendar
extra = 0
readonly_fields = (
"uuid",
)
@admin.register(CalendarEvent)
class CalendarEvent(admin.ModelAdmin):
inlines = (
CalendarEventAttendeeInline,
)
autocomplete_fields = (
'organizer',
)
| rimvydaszilinskas/organize-it | apps/calendars/admin.py | admin.py | py | 584 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.admin.TabularInline",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "models.CalendarEventAttendee",
"line_number": 7,
"usage_type": "name"
},
{... |
3903968215 | #!/usr/bin/env python
from __future__ import with_statement
import logging
import logging.handlers
LOG_FILE_HDL = '/tmp/logging_example.out'
mylogger = logging.getLogger("MyLogger")
mylogger.setLevel(logging.DEBUG)
ch_handler = logging.StreamHandler()
ch_handler.setLevel(logging.DEBUG+1)
mylogger.addHandler(ch_handler)
handler = logging.handlers.TimedRotatingFileHandler(
LOG_FILE_HDL, 'M', 1, backupCount=6)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter("%(asctime)s--%(levelname)s--%(message)s"))
mylogger.addHandler(handler)
mylogger.log(logging.DEBUG+1, "begin")
for i in range(20):
mylogger.debug('count i = %d' % i)
#handler.doRollover()
mylogger.log(logging.INFO, "rolled")
logging.shutdown()
| bondgeek/pythonhacks | recipes/logger_example.py | logger_example.py | py | 756 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "logging.StreamHandler",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "logging.DEB... |
33759274918 | import json
import codecs
import sys
if len(sys.argv) != 3:
print('Usage: ' + sys.argv[0] + " <input json path> <output csv path>")
exit()
infilename = sys.argv[1]
outfilename = sys.argv[2]
sep = "|"
out = open(outfilename, 'w')
def processSource(sourceStr):
source = sourceStr.lower()
listOfAppleDevices = ["iphone", "ipad", "for ios", "for mac", "os x", "apple.com"]
listOfAutoTools = ["ifttt", "dlvr.it", "hootsuite", "twitterfeed", "tweetbot",
"twittbot", "roundteam", "hubspot", "socialoomph", "smqueue",
"linkis.com", "tweet jukebox", "tweetsuite", "bufferapp",
"thousandtweets", "postplanner", "manageflitter", "crowdfire"]
listOfSocialPlatforms = ["facebook", "linkedin", "tumblr", "wordpress",
"instagram", "pinterest"]
listOfOtherMobile = ["windows phone", "mobile web", "for blackberry"]
if "android" in source:
return "android"
for apple in listOfAppleDevices:
if apple in source:
return "appledevice"
if "tweetdeck" in source:
return "tweetdeck"
if "twitter web client" in source:
return "webclient"
for soc in listOfSocialPlatforms:
if soc in source:
return "socialsite"
for autoTool in listOfAutoTools:
if autoTool in source:
return "automated"
for i in listOfOtherMobile:
if i in source:
return "othermobile"
return "other"
def isNiceRetweet(tweet):
if 'retweeted_status' in tweet and tweet['retweeted_status'] != None:
rts = tweet['retweeted_status']
if ('favorite_count' in rts and rts['favorite_count'] != None and
'retweet_count' in rts and rts['retweet_count'] != None and
'created_at' in rts and rts['created_at'] != None and
'source' in rts and rts['source'] != None and
'user' in rts and rts['user'] != None and
'followers_count' in rts['user'] and rts['user']['followers_count'] != None):
return True
return False
def getRetweetedTweetId(tweet, isRetweet):
if isRetweet:
return tweet['retweeted_status']['id']
else:
return None
def getRetweetedTweetTime(tweet, isRetweet):
if isRetweet:
return tweet['retweeted_status']['created_at']
else:
return None
def getRetweetedTweetLikesNum(tweet, isRetweet):
if isRetweet:
return int(tweet['retweeted_status']['favorite_count'])
else:
return 0
def getRetweetedTweetRTNum(tweet, isRetweet):
if isRetweet:
return int(tweet['retweeted_status']['retweet_count'])
else:
return 0
def getRetweetedTweetSource(tweet, isRetweet):
if isRetweet:
rtstr = tweet['retweeted_status']['source']
return processSource(rtstr)
else:
return None
def getRetweetedTweetAuthorFollowerCount(tweet, isRetweet):
if isRetweet:
rts = tweet['retweeted_status']
return rts['user']['followers_count']
else:
return 0
def getLang(tweet):
if 'lang' in tweet:
return tweet['lang']
return None
with open(infilename, 'r') as f:
for line in f:
tweet = json.loads(unicode(line.encode('utf-8'), 'utf-8'))
if "source" in tweet.keys():
out.write(str(tweet['id']) + sep)
out.write(str(tweet['created_at']) + sep)
out.write(str(processSource(tweet['source'])) + sep)
out.write(str(getLang(tweet)) + sep)
isRetweet = isNiceRetweet(tweet)
out.write(str(isRetweet) + sep)
out.write(str(getRetweetedTweetId(tweet, isRetweet)) + sep)
out.write(str(getRetweetedTweetTime(tweet, isRetweet)) + sep)
out.write(str(getRetweetedTweetLikesNum(tweet, isRetweet)) + sep)
out.write(str(getRetweetedTweetRTNum(tweet, isRetweet)) + sep)
out.write(str(getRetweetedTweetSource(tweet, isRetweet)) + sep)
out.write(str(getRetweetedTweetAuthorFollowerCount(tweet, isRetweet)) + sep)
out.write(repr(str(tweet['text'].encode('utf-8'))))
out.write("\n")
| ador/trial | scripts/twitterJsonToCsv.py | twitterJsonToCsv.py | py | 4,212 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number":... |
72738108584 | #!/bin/python3
import os
import sys
import pathlib
from amp_database import download_DRAMP
def check_samplelist(samplelist, tools, path):
if(samplelist==[]):
print('<--sample-list> was not given, sample names will be inferred from directory names')
for dirpath, subdirs, files in os.walk(path):
for dir in subdirs:
if (dir not in tools):
samplelist.append(dir)
return list(set(samplelist))
else:
return samplelist
def check_pathlist(filepaths, samplelist, fileending, path):
if(filepaths==[]):
print('<--path-list> was not given, paths to AMP-results-files will be inferred')
for sample in samplelist:
pathlist = []
for dirpath, subdirs, files in os.walk(path):
for file in files:
if ((sample in dirpath) and ((list(filter(file.endswith, fileending))!=[]))):
pathlist.append(dirpath+'/'+file)
filepaths.append(pathlist)
return filepaths
else:
return filepaths
def check_faa_path(faa_path, samplename):
if(os.path.isdir(faa_path)):
path_list = list(pathlib.Path(faa_path).rglob(f"*{samplename}*.faa"))
if (len(path_list)>1):
sys.exit(f'AMPcombi interrupted: There is more than one .faa file for {samplename} in the folder given with --faa_path')
elif(not path_list):
sys.exit(f'AMPcombi interrupted: There is no .faa file containing {samplename} in the folder given with --faa_path')
return path_list[0]
elif(os.path.isfile(faa_path)):
return faa_path
else:
sys.exit(f'AMPcombi interrupted: The input given with --faa_path does not seem to be a valid directory or file. Please check.')
def check_ref_database(database):
if((database==None) and (not os.path.exists('amp_ref_database'))):
print('<--AMP_database> was not given, the current DRAMP general-AMP database will be downloaded and used')
database = 'amp_ref_database'
os.makedirs(database, exist_ok=True)
db = database
download_DRAMP(db)
return db
elif ((not database==None)):
if (os.path.exists(database)):
db = database
print(f'<--AMP_database> = ${db} is found and will be used')
return db
if (not os.path.exists(database)):
sys.exit(f'Reference amp database path {database} does not exist, please check the path.')
elif((database==None) and (os.path.exists('amp_ref_database'))):
print('<--AMP_database> = DRAMP is already downloaded and will be reused')
database = 'amp_ref_database'
db = database
return db
def check_path(path):
return os.path.exists(path) #returns True or False
def check_directory_tree(path, tools, samplelist):
print(f'Checking directory tree {path} for sub-directories \n ')
# get first level of sub-directories, check if at least one is named by a tool-name
subdirs_1 = [x for x in os.listdir(path) if x in tools]
if (not subdirs_1):
sys.exit(f'AMPcombi interrupted: First level sub-directories in {path} are not named by tool-names. Please check the directories names and the keys given in "--tooldict". \n ')
else:
print('First level sub-directories passed check.')
# get second level of sub-directories, check if at least one is named by a sample-name
subdirs_2 = []
for dir in subdirs_1:
subdirs = [x for x in os.listdir(path+dir) if x in samplelist]
if (subdirs):
subdirs_2.append(subdirs)
if (not subdirs_2):
sys.exit(f'AMPcombi interrupted: Second level sub-directories in {path} are not named by sample-names. Please check the directories names and the names given as "--sample_list" \n ')
else:
print('Second level sub-directories passed check')
print('Finished directory check')
def check_input_complete(path, samplelist, filepaths, tools):
# 1. Head folder does not exist and filepaths-list was not given
if((not check_path(path)) and (not filepaths)):
sys.exit('AMPcombi interrupted: Please provide the correct path to either the folder containing all amp files to be summarized (--amp_results) or the list of paths to the files (--path_list)')
# 2. Head folder does not exist, filepaths-list was given but no samplelist
elif((not check_path(path)) and (filepaths) and (not samplelist)):
sys.exit('AMPcombi interrupted: Please provide a list of sample-names (--sample_list) in addition to --path_list')
# 3. Head folder does not exist, filepaths- and samplelist are given:
elif((not check_path(path)) and (not filepaths) and (not samplelist)):
for file in filepaths:
print(f'in check_input_complete the file in filepath is:')
# 3.1. check if paths in filepath-list exist
if(not check_path(file)):
sys.exit(f'AMPcombi interrupted: The path {file} does not exist. Please check the --path_list input.')
# 3.2. check if paths contain sample-names from samplelist
if(not any(n in file for n in samplelist)):
sys.exit(f'AMPcombi interrupted: The path {file} does not contain any of the sample-names given in --sample_list')
# 4. Head folder and sample-list are given
elif((check_path(path)) and (not samplelist)):
check_directory_tree(path, tools, samplelist)
| Darcy220606/AMPcombi | ampcombi/check_input.py | check_input.py | py | 5,496 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "os.walk",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 34,
"u... |
31064957465 |
from ..utils import Object
class ThemeParameters(Object):
"""
Contains parameters of the application theme
Attributes:
ID (:obj:`str`): ``ThemeParameters``
Args:
background_color (:obj:`int`):
A color of the background in the RGB24 format
secondary_background_color (:obj:`int`):
A secondary color for the background in the RGB24 format
text_color (:obj:`int`):
A color of text in the RGB24 format
hint_color (:obj:`int`):
A color of hints in the RGB24 format
link_color (:obj:`int`):
A color of links in the RGB24 format
button_color (:obj:`int`):
A color of the buttons in the RGB24 format
button_text_color (:obj:`int`):
A color of text on the buttons in the RGB24 format
Returns:
ThemeParameters
Raises:
:class:`telegram.Error`
"""
ID = "themeParameters"
def __init__(self, background_color, secondary_background_color, text_color, hint_color, link_color, button_color, button_text_color, **kwargs):
self.background_color = background_color # int
self.secondary_background_color = secondary_background_color # int
self.text_color = text_color # int
self.hint_color = hint_color # int
self.link_color = link_color # int
self.button_color = button_color # int
self.button_text_color = button_text_color # int
@staticmethod
def read(q: dict, *args) -> "ThemeParameters":
background_color = q.get('background_color')
secondary_background_color = q.get('secondary_background_color')
text_color = q.get('text_color')
hint_color = q.get('hint_color')
link_color = q.get('link_color')
button_color = q.get('button_color')
button_text_color = q.get('button_text_color')
return ThemeParameters(background_color, secondary_background_color, text_color, hint_color, link_color, button_color, button_text_color)
| iTeam-co/pytglib | pytglib/api/types/theme_parameters.py | theme_parameters.py | py | 2,062 | python | en | code | 20 | github-code | 36 | [
{
"api_name": "utils.Object",
"line_number": 6,
"usage_type": "name"
}
] |
41165253893 | # -*- coding: utf-8 -*-
'''
This file is part of Habitam.
Habitam is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Habitam is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public
License along with Habitam. If not, see
<http://www.gnu.org/licenses/>.
Created on Apr 21, 2013
@author: Stefan Guna
'''
from django import forms
from django.db.models.query_utils import Q
from habitam.entities.models import AccountLink
from habitam.financial.models import Account
from habitam.ui.forms.generic import NewDocPaymentForm
from django.forms.util import ErrorDict
from django.utils.translation import ugettext as _
from django.forms.forms import NON_FIELD_ERRORS
MONEY_TYPES = (
('cash', _('bani lichizi')),
('bank', _(u'bancă'))
)
TYPES = (
('std', _('standard')),
('repairs', _('repairs')),
('rulment', _('rulment')),
('special', _('special')),
)
class EditAccountForm(forms.ModelForm):
money_type = forms.ChoiceField(label=_('Tip bani'), choices=MONEY_TYPES)
type = forms.ChoiceField(label=_('Tip'), choices=TYPES)
class Meta:
model = Account
fields = ('name', 'type', 'money_type')
def __init__(self, *args, **kwargs):
if 'building' in kwargs.keys():
self._building = kwargs['building']
del kwargs['building']
else:
self._building = None
del kwargs['user']
super(EditAccountForm, self).__init__(*args, **kwargs)
if self.instance.type == 'penalties':
del self.fields['type']
if self.instance.online_payments:
del self.fields['money_type']
def save(self, commit=True):
instance = super(EditAccountForm, self).save(commit=False)
if commit:
instance.save()
if self._building != None:
al = AccountLink.objects.create(holder=self._building,
account=instance)
al.save()
return instance
def add_form_error(self, error_message):
if not self._errors:
self._errors = ErrorDict()
if not NON_FIELD_ERRORS in self._errors:
self._errors[NON_FIELD_ERRORS] = self.error_class()
self._errors[NON_FIELD_ERRORS].append(error_message)
class NewFundTransfer(NewDocPaymentForm):
dest_account = forms.ModelChoiceField(label=_(u'Destinație'),
queryset=Account.objects.all())
def __init__(self, *args, **kwargs):
building = kwargs['building']
account = kwargs['account']
del kwargs['building']
del kwargs['account']
del kwargs['user']
super(NewFundTransfer, self).__init__(*args, **kwargs)
qdirect = Q(accountlink__holder=building)
qparent = Q(accountlink__holder__parent=building)
qbuilding_accounts = Q(qdirect | qparent)
qbilled_direct = Q(collectingfund__billed=building)
qbilled_parent = Q(collectingfund__billed__parent=building)
qbilled = Q(qbilled_direct | qbilled_parent)
qnotarchived = Q(~Q(collectingfund__archived=True) & qbilled)
queryset = Account.objects.filter(Q(qbuilding_accounts | qnotarchived))
queryset = queryset.exclude(pk=account.id).exclude(type='penalties')
self.fields['dest_account'].queryset = queryset
| habitam/habitam-core | habitam/ui/forms/fund.py | fund.py | py | 3,821 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.utils.translation.ugettext",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext",
"line_number": 38,
"usage_type": ... |
12366447292 | import glob
import os
import shutil
import tempfile
import unittest
from ample import constants
from ample.testing import test_funcs
from ample.util import ample_util, spicker
@unittest.skip("unreliable test cases")
@unittest.skipUnless(test_funcs.found_exe("spicker" + ample_util.EXE_EXT), "spicker exec missing")
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.thisd = os.path.abspath(os.path.dirname(__file__))
cls.ample_share = constants.SHARE_DIR
cls.testfiles_dir = os.path.join(cls.ample_share, 'testfiles')
cls.tests_dir = tempfile.gettempdir()
cls.spicker_exe = ample_util.find_exe('spicker' + ample_util.EXE_EXT)
def test_spicker(self):
mdir = os.path.join(self.testfiles_dir, "models")
models = glob.glob(mdir + os.sep + "*.pdb")
work_dir = os.path.join(self.tests_dir, "spicker")
if os.path.isdir(work_dir):
shutil.rmtree(work_dir)
os.mkdir(work_dir)
spickerer = spicker.Spickerer(spicker_exe=self.spicker_exe)
spickerer.cluster(models, run_dir=work_dir)
# This with spicker from ccp4 6.5.010 on osx 10.9.5
names = sorted([os.path.basename(m) for m in spickerer.results[0].models])
ref = [
'5_S_00000005.pdb',
'4_S_00000005.pdb',
'5_S_00000004.pdb',
'4_S_00000002.pdb',
'4_S_00000003.pdb',
'3_S_00000006.pdb',
'3_S_00000004.pdb',
'2_S_00000005.pdb',
'2_S_00000001.pdb',
'3_S_00000003.pdb',
'1_S_00000005.pdb',
'1_S_00000002.pdb',
'1_S_00000004.pdb',
]
self.assertEqual(names, sorted(ref)) # seem to get different results on osx
self.assertEqual(len(names), len(ref))
# Centroid of third cluster
self.assertEqual(
os.path.basename(spickerer.results[2].centroid),
'5_S_00000006.pdb',
"WARNING: Spicker might run differently on different operating systems",
)
shutil.rmtree(work_dir)
if __name__ == "__main__":
unittest.main()
| rigdenlab/ample | ample/util/tests/test_spicker.py | test_spicker.py | py | 2,160 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",... |
39400543792 | import numpy as np;
import cv2;
#load image from file
#cv2.imwrite('imageName.png', img);
rgb_red_pos = 2;
rgb_blue_pos = 0;
rgb_green_pos = 1;
img_1 = cv2.imread('red1.png',1);
##img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY);
##extract the red component image 1
red_only1 = np.int16( np.matrix(img_1[:,:,rgb_red_pos])) - np.int16( np.matrix(img_1[:,:,rgb_blue_pos])) - np.int16( np.matrix(img_1[:,:,rgb_green_pos]));
red_only1 = np.uint8(red_only1);
red_only1[red_only1 < 0] = 0;
red_only1[red_only1 > 255] = 0;
cv2.imshow('Image 1',red_only1);
img_2 = cv2.imread('red1.png',1);
##img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY);
##extract the red component of image 2
red_only2 = np.int16( np.matrix(img_2[:,:,rgb_red_pos])) - np.int16( np.matrix(img_2[:,:,rgb_blue_pos])) - np.int16( np.matrix(img_2[:,:,rgb_green_pos]))
red_only2 = np.uint8(red_only2);
red_only2[red_only2 < 0] = 0;
red_only2[red_only2 > 255] = 0;
cv2.imshow('Image 2',red_only2);
##differences between two frames
subtracted = np.int16(red_only1) - np.int16(red_only2);
subtracted = np.uint8(subtracted);
subtracted[subtracted < 0] = 0;
subtracted[subtracted > 255] = 0;
cv2.imshow('subtracted',subtracted);
def calculateCenterOfMass(subtracted) :
rows = np.shape(np.matrix(subtracted))[0];
cols = np.shape(np.matrix(subtracted))[1];
##calculate the center of mass
#np.sum() #0 for columns 1 for rows
column_sums = np.matrix(np.sum(subtracted,0));
column_numbers = np.matrix(np.arange(cols));
column_mult = np.multiply(column_sums, column_numbers);
total = np.sum(column_mult);
#sum the total of the image matrix
all_total = np.sum(np.sum(subtracted));
print('the column total is'+str(total));
print('the column all total is'+str(all_total));
#column location
#col_location = total / all_total;
return 0 if all_total == 0 else total / all_total;
cofm = calculateCenterOfMass(subtracted);
if(cofm == 0):
print('no object detected ');
else:
print(' object detected ');
if cv2.waitKey(1) & 0xFF == ord('q'): break
# When everything done, release the capture cap.release() cv2.destroyAllWindows()
| botchway44/computer-Vision | image diffrencing.py | image diffrencing.py | py | 2,160 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.int16",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.matrix",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": ... |
35397910028 | from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import glob
import os
from textwrap import dedent
import xml.dom.minidom as DOM
import coverage
from pants.backend.python.targets.python_library import PythonLibrary
from pants.backend.python.targets.python_tests import PythonTests
from pants.backend.python.test_builder import PythonTestBuilder
from pants.base.build_file_aliases import BuildFileAliases
from pants.util.contextutil import pushd, environment_as
from pants_test.base_test import BaseTest
class PythonTestBuilderTestBase(BaseTest):
def run_tests(self, targets, args=None, fast=True, debug=False):
test_builder = PythonTestBuilder(targets, args or [], fast=fast, debug=debug)
with pushd(self.build_root):
return test_builder.run()
class PythonTestBuilderTestEmpty(PythonTestBuilderTestBase):
def test_empty(self):
self.assertEqual(0, self.run_tests(targets=[]))
class PythonTestBuilderTest(PythonTestBuilderTestBase):
@property
def alias_groups(self):
return BuildFileAliases.create(
targets={
'python_library': PythonLibrary,
'python_tests': PythonTests
})
def setUp(self):
super(PythonTestBuilderTest, self).setUp()
self.create_file(
'lib/core.py',
dedent('''
def one(): # line 1
return 1 # line 2
# line 3
# line 4
def two(): # line 5
return 2 # line 6
''').strip())
self.add_to_build_file(
'lib',
dedent('''
python_library(
name='core',
sources=[
'core.py'
]
)
'''))
self.create_file(
'tests/test_core_green.py',
dedent('''
import unittest2 as unittest
import core
class CoreGreenTest(unittest.TestCase):
def test_one(self):
self.assertEqual(1, core.one())
'''))
self.create_file(
'tests/test_core_red.py',
dedent('''
import core
def test_two():
assert 1 == core.two()
'''))
self.add_to_build_file(
'tests',
dedent('''
python_tests(
name='green',
sources=[
'test_core_green.py'
],
dependencies=[
'lib:core'
],
coverage=[
'core'
]
)
python_tests(
name='red',
sources=[
'test_core_red.py'
],
dependencies=[
'lib:core'
],
coverage=[
'core'
]
)
python_tests(
name='all',
sources=[
'test_core_green.py',
'test_core_red.py'
],
dependencies=[
'lib:core'
]
)
python_tests(
name='all-with-coverage',
sources=[
'test_core_green.py',
'test_core_red.py'
],
dependencies=[
'lib:core'
],
coverage=[
'core'
]
)
'''))
self.green = self.target('tests:green')
self.red = self.target('tests:red')
self.all = self.target('tests:all')
self.all_with_coverage = self.target('tests:all-with-coverage')
def test_green(self):
self.assertEqual(0, self.run_tests(targets=[self.green]))
def test_red(self):
self.assertEqual(1, self.run_tests(targets=[self.red]))
def test_mixed(self):
self.assertEqual(1, self.run_tests(targets=[self.green, self.red]))
def test_junit_xml(self):
# We expect xml of the following form:
# <testsuite errors=[Ne] failures=[Nf] skips=[Ns] tests=[Nt] ...>
# <testcase classname="..." name="..." .../>
# <testcase classname="..." name="..." ...>
# <failure ...>...</failure>
# </testcase>
# </testsuite>
report_basedir = os.path.join(self.build_root, 'dist', 'junit')
with environment_as(JUNIT_XML_BASE=report_basedir):
self.assertEqual(1, self.run_tests(targets=[self.red, self.green]))
files = glob.glob(os.path.join(report_basedir, '*.xml'))
self.assertEqual(1, len(files))
junit_xml = files[0]
with open(junit_xml) as fp:
print(fp.read())
root = DOM.parse(junit_xml).documentElement
self.assertEqual(2, len(root.childNodes))
self.assertEqual(2, int(root.getAttribute('tests')))
self.assertEqual(1, int(root.getAttribute('failures')))
self.assertEqual(0, int(root.getAttribute('errors')))
self.assertEqual(0, int(root.getAttribute('skips')))
children_by_test_name = dict((elem.getAttribute('name'), elem) for elem in root.childNodes)
self.assertEqual(0, len(children_by_test_name['test_one'].childNodes))
self.assertEqual(1, len(children_by_test_name['test_two'].childNodes))
self.assertEqual('failure', children_by_test_name['test_two'].firstChild.nodeName)
def coverage_data_file(self):
return os.path.join(self.build_root, '.coverage')
def load_coverage_data(self, path):
data_file = self.coverage_data_file()
self.assertTrue(os.path.isfile(data_file))
coverage_data = coverage.coverage(data_file=data_file)
coverage_data.load()
_, all_statements, not_run_statements, _ = coverage_data.analysis(path)
return all_statements, not_run_statements
def test_coverage_simple(self):
self.assertFalse(os.path.isfile(self.coverage_data_file()))
covered_file = os.path.join(self.build_root, 'lib', 'core.py')
with environment_as(PANTS_PY_COVERAGE='1'):
self.assertEqual(0, self.run_tests(targets=[self.green]))
all_statements, not_run_statements = self.load_coverage_data(covered_file)
self.assertEqual([1, 2, 5, 6], all_statements)
self.assertEqual([6], not_run_statements)
self.assertEqual(1, self.run_tests(targets=[self.red]))
all_statements, not_run_statements = self.load_coverage_data(covered_file)
self.assertEqual([1, 2, 5, 6], all_statements)
self.assertEqual([2], not_run_statements)
self.assertEqual(1, self.run_tests(targets=[self.green, self.red]))
all_statements, not_run_statements = self.load_coverage_data(covered_file)
self.assertEqual([1, 2, 5, 6], all_statements)
self.assertEqual([], not_run_statements)
# The all target has no coverage attribute and the code under test does not follow the
# auto-discover pattern so we should get no coverage.
self.assertEqual(1, self.run_tests(targets=[self.all]))
all_statements, not_run_statements = self.load_coverage_data(covered_file)
self.assertEqual([1, 2, 5, 6], all_statements)
self.assertEqual([1, 2, 5, 6], not_run_statements)
self.assertEqual(1, self.run_tests(targets=[self.all_with_coverage]))
all_statements, not_run_statements = self.load_coverage_data(covered_file)
self.assertEqual([1, 2, 5, 6], all_statements)
self.assertEqual([], not_run_statements)
def test_coverage_modules(self):
self.assertFalse(os.path.isfile(self.coverage_data_file()))
covered_file = os.path.join(self.build_root, 'lib', 'core.py')
with environment_as(PANTS_PY_COVERAGE='modules:does_not_exist,nor_does_this'):
# modules: should trump .coverage
self.assertEqual(1, self.run_tests(targets=[self.green, self.red]))
all_statements, not_run_statements = self.load_coverage_data(covered_file)
self.assertEqual([1, 2, 5, 6], all_statements)
self.assertEqual([1, 2, 5, 6], not_run_statements)
with environment_as(PANTS_PY_COVERAGE='modules:core'):
self.assertEqual(1, self.run_tests(targets=[self.all]))
all_statements, not_run_statements = self.load_coverage_data(covered_file)
self.assertEqual([1, 2, 5, 6], all_statements)
self.assertEqual([], not_run_statements)
def test_coverage_paths(self):
self.assertFalse(os.path.isfile(self.coverage_data_file()))
covered_file = os.path.join(self.build_root, 'lib', 'core.py')
with environment_as(PANTS_PY_COVERAGE='paths:does_not_exist/,nor_does_this/'):
# paths: should trump .coverage
self.assertEqual(1, self.run_tests(targets=[self.green, self.red]))
all_statements, not_run_statements = self.load_coverage_data(covered_file)
self.assertEqual([1, 2, 5, 6], all_statements)
self.assertEqual([1, 2, 5, 6], not_run_statements)
with environment_as(PANTS_PY_COVERAGE='paths:core.py'):
self.assertEqual(1, self.run_tests(targets=[self.all], debug=True))
all_statements, not_run_statements = self.load_coverage_data(covered_file)
self.assertEqual([1, 2, 5, 6], all_statements)
self.assertEqual([], not_run_statements)
| fakeNetflix/square-repo-pants | tests/python/pants_test/backend/python/test_test_builder.py | test_test_builder.py | py | 8,998 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pants_test.base_test.BaseTest",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "pants.backend.python.test_builder.PythonTestBuilder",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pants.util.contextutil.pushd",
"line_number": 22,
"usag... |
417814461 | import os
from collections import defaultdict
def file_statistics(parent_dir):
files_dict = defaultdict(list)
for root, dirs, files in os.walk(parent_dir):
for file in files:
stat = os.stat(os.path.join(root, file))
if stat.st_size <= 100:
files_dict[100].append(file)
elif stat.st_size <= 1000:
files_dict[1000].append(file)
elif stat.st_size <= 10000:
files_dict[10000].append(file)
else:
files_dict[100000].append(file)
result = {}
for key, val in sorted(files_dict.items()):
result[key] = len(val)
return result
if __name__ == '__main__':
print(file_statistics('some_data'))
| Shorokhov-A/practical_tasks | Shorokhov_Andreiy_dz_7/task_7_4.py | task_7_4.py | py | 747 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.defaultdict",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.stat",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number"... |
25084775362 | # This Golf class will be responsible for scraping the latest
# Trump golf outing located on trumpgolfcount.com
from bs4 import BeautifulSoup
import requests
import json
import twitter
import lxml
import pyrebase
def main():
get_latest_outing()
def push_db(data):
# db.child("time").push(data)
db.child("time").child("-LCM0jw1YhB_MxPrN5RS").update({"timez" : data})
print("database has been updated: ", data)
def get_latest_outing():
url = 'http://trumpgolfcount.com/displayoutings#tablecaption'
req = requests.get(url)
soup = BeautifulSoup(req.text, 'lxml')
last_outing = soup.find_all('tr')[1]
golf_info = []
for text in last_outing:
if text.string == '\n':
continue
elif text.string == None:
golf_info.append(text.a.string)
golf_info.append(text.string)
# make total time in hours and minutes
# time = golf_info[11].split(":")
# total_time = time[0] + " hours and " + time[1] + "minutes"
print("============== template ==============")
tweet = "Trump went golfing!" + "\n" + "Where: " + str(golf_info[3]) + "\n" + "When: " + str(golf_info[0]) + "- " + str(golf_info[1])+ "\n" + "Total visits to date: " + str(golf_info[9])
print(golf_info)
print("======================================")
is_new(str(golf_info[0]), tweet)
def is_new(new, tweet):
# we need the key to access the table
print("accessing db . . .")
oldkey = list(db.child("time").get().val())[0]
print("db accessed. success!")
old = db.child("time").get().val()[oldkey]['timez']
print("old: ", old)
print("new: ", new)
if old == new:
print("Trump has not gone golfing yet.")
else:
print("Trump went golfing, tweet!")
post_tweet(new, tweet)
def post_tweet(new, text):
print("posting tweet . . .")
push_db(new)
api.VerifyCredentials()
api.PostUpdate(text)
print(api.VerifyCredentials())
print("Tweet has been posted.")
if __name__ == "__main__": main()
| navonf/isTrumpGolfing | Golf.py | Golf.py | py | 2,033 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 22,
"usage_type": "call"
}
] |
71252887144 | from datetime import datetime
import glob
import os
import time
import anim
import threading
print(datetime.timestamp(datetime.now()))
class User:
def __init__(self, name: str):
self.name = name
class Chat:
def __init__(self, username: str, text: str, score: int = 0):
self.author = User(username)
self.body = text
self.score = score
# most_common = ['a', 'b', 'c']
# characters = anim.get_characters(most_common)
# chats = [
# Chat('a', '안녕'),
# Chat('b', '반가워'),
# Chat('c', '안녕하세요.', score=-1)
# ]
# anim.comments_to_scene(chats, characters, output_filename="hello.mp4")
from flask import Flask, request, jsonify, send_file
app = Flask(__name__)
@app.post('/generate')
def generate():
chats = []
most_common = []
print(request.json)
for c in request.json:
chats.append(Chat(c['nickname'], c['content']))
# if not c['nickname'] in most_common:
most_common.append(c['nickname'])
characters = anim.get_characters(most_common)
filename = f"outputs/{datetime.timestamp(datetime.now())}.mp4"
anim.comments_to_scene(chats, characters, output_filename=filename)
print("success")
return send_file(filename, mimetype='video/mp4')
def delete_every_10_min():
for f in glob.glob("outputs/*.mp4"):
os.remove(f)
time.sleep(600)
delete_every_10_min()
threading.Thread(target=delete_every_10_min)
app.run('0.0.0.0', 5050) | ij5/ace-ainize | app.py | app.py | py | 1,475 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.timestamp",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flas... |
15775777498 | from enum import Enum
from dataclasses import dataclass
class TokenType(Enum):
NUMBER = 0
PLUS = 1
MINUS = 2
ASTERISK = 3
SLASH = 4
LPAR = 5
RPAR = 6
@dataclass
class Token:
type: TokenType
value: str
def __repr__(self) -> str:
return f"({self.type.name}, '{self.value}')"
| ricdip/py-math-interpreter | src/model/token.py | token.py | py | 326 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "enum.Enum",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 15,
"usage_type": "name"
}
] |
36955207429 | import random, string
import wiredtiger, wttest
from helper import copy_wiredtiger_home
from wtdataset import SimpleDataSet
from wtscenario import filter_scenarios, make_scenarios
# test_cursor12.py
# Test cursor modify call
class test_cursor12(wttest.WiredTigerTestCase):
keyfmt = [
('recno', dict(keyfmt='r')),
('string', dict(keyfmt='S')),
]
valuefmt = [
('item', dict(valuefmt='u')),
('string', dict(valuefmt='S')),
]
types = [
('file', dict(uri='file:modify')),
('lsm', dict(uri='lsm:modify')),
('table', dict(uri='table:modify')),
]
# Skip record number keys with LSM.
scenarios = filter_scenarios(make_scenarios(types, keyfmt, valuefmt),
lambda name, d: not ('lsm' in d['uri'] and d['keyfmt'] == 'r'))
# List with original value, final value, and modifications to get
# there.
list = [
{
'o' : 'ABCDEFGH', # no operation
'f' : 'ABCDEFGH',
'mods' : [['', 0, 0]]
},{
'o' : 'ABCDEFGH', # no operation with offset
'f' : 'ABCDEFGH',
'mods' : [['', 4, 0]]
},{
'o' : 'ABCDEFGH', # rewrite beginning
'f' : '--CDEFGH',
'mods' : [['--', 0, 2]]
},{
'o' : 'ABCDEFGH', # rewrite end
'f' : 'ABCDEF--',
'mods' : [['--', 6, 2]]
},{
'o' : 'ABCDEFGH', # append
'f' : 'ABCDEFGH--',
'mods' : [['--', 8, 2]]
},{
'o' : 'ABCDEFGH', # append with gap
'f' : 'ABCDEFGH --',
'mods' : [['--', 10, 2]]
},{
'o' : 'ABCDEFGH', # multiple replacements
'f' : 'A-C-E-G-',
'mods' : [['-', 1, 1], ['-', 3, 1], ['-', 5, 1], ['-', 7, 1]]
},{
'o' : 'ABCDEFGH', # multiple overlapping replacements
'f' : 'A-CDEFGH',
'mods' : [['+', 1, 1], ['+', 1, 1], ['+', 1, 1], ['-', 1, 1]]
},{
'o' : 'ABCDEFGH', # multiple overlapping gap replacements
'f' : 'ABCDEFGH --',
'mods' : [['+', 10, 1], ['+', 10, 1], ['+', 10, 1], ['--', 10, 2]]
},{
'o' : 'ABCDEFGH', # shrink beginning
'f' : '--EFGH',
'mods' : [['--', 0, 4]]
},{
'o' : 'ABCDEFGH', # shrink middle
'f' : 'AB--GH',
'mods' : [['--', 2, 4]]
},{
'o' : 'ABCDEFGH', # shrink end
'f' : 'ABCD--',
'mods' : [['--', 4, 4]]
},{
'o' : 'ABCDEFGH', # grow beginning
'f' : '--ABCDEFGH',
'mods' : [['--', 0, 0]]
},{
'o' : 'ABCDEFGH', # grow middle
'f' : 'ABCD--EFGH',
'mods' : [['--', 4, 0]]
},{
'o' : 'ABCDEFGH', # grow end
'f' : 'ABCDEFGH--',
'mods' : [['--', 8, 0]]
},{
'o' : 'ABCDEFGH', # discard beginning
'f' : 'EFGH',
'mods' : [['', 0, 4]]
},{
'o' : 'ABCDEFGH', # discard middle
'f' : 'ABGH',
'mods' : [['', 2, 4]]
},{
'o' : 'ABCDEFGH', # discard end
'f' : 'ABCD',
'mods' : [['', 4, 4]]
},{
'o' : 'ABCDEFGH', # discard everything
'f' : '',
'mods' : [['', 0, 8]]
},{
'o' : 'ABCDEFGH', # overlap the end and append
'f' : 'ABCDEF--XX',
'mods' : [['--XX', 6, 2]]
},{
'o' : 'ABCDEFGH', # overlap the end with incorrect size
'f' : 'ABCDEFG01234567',
'mods' : [['01234567', 7, 2000]]
},{ # many updates
'o' : '-ABCDEFGHIJKLMNOPQRSTUVWXYZ-',
'f' : '-eeeeeeeeeeeeeeeeeeeeeeeeee-',
'mods' : [['a', 1, 1], ['a', 2, 1], ['a', 3, 1], ['a', 4, 1],
['a', 5, 1], ['a', 6, 1], ['a', 7, 1], ['a', 8, 1],
['a', 9, 1], ['a', 10, 1], ['a', 11, 1], ['a', 12, 1],
['a', 13, 1], ['a', 14, 1], ['a', 15, 1], ['a', 16, 1],
['a', 17, 1], ['a', 18, 1], ['a', 19, 1], ['a', 20, 1],
['a', 21, 1], ['a', 22, 1], ['a', 23, 1], ['a', 24, 1],
['a', 25, 1], ['a', 26, 1],
['b', 1, 1], ['b', 2, 1], ['b', 3, 1], ['b', 4, 1],
['b', 5, 1], ['b', 6, 1], ['b', 7, 1], ['b', 8, 1],
['b', 9, 1], ['b', 10, 1], ['b', 11, 1], ['b', 12, 1],
['b', 13, 1], ['b', 14, 1], ['b', 15, 1], ['b', 16, 1],
['b', 17, 1], ['b', 18, 1], ['b', 19, 1], ['b', 20, 1],
['b', 21, 1], ['b', 22, 1], ['b', 23, 1], ['b', 24, 1],
['b', 25, 1], ['b', 26, 1],
['c', 1, 1], ['c', 2, 1], ['c', 3, 1], ['c', 4, 1],
['c', 5, 1], ['c', 6, 1], ['c', 7, 1], ['c', 8, 1],
['c', 9, 1], ['c', 10, 1], ['c', 11, 1], ['c', 12, 1],
['c', 13, 1], ['c', 14, 1], ['c', 15, 1], ['c', 16, 1],
['c', 17, 1], ['c', 18, 1], ['c', 19, 1], ['c', 20, 1],
['c', 21, 1], ['c', 22, 1], ['c', 23, 1], ['c', 24, 1],
['c', 25, 1], ['c', 26, 1],
['d', 1, 1], ['d', 2, 1], ['d', 3, 1], ['d', 4, 1],
['d', 5, 1], ['d', 6, 1], ['d', 7, 1], ['d', 8, 1],
['d', 9, 1], ['d', 10, 1], ['d', 11, 1], ['d', 12, 1],
['d', 13, 1], ['d', 14, 1], ['d', 15, 1], ['d', 16, 1],
['d', 17, 1], ['d', 18, 1], ['d', 19, 1], ['d', 20, 1],
['d', 21, 1], ['d', 22, 1], ['d', 23, 1], ['d', 24, 1],
['d', 25, 1], ['d', 26, 1],
['e', 1, 1], ['e', 2, 1], ['e', 3, 1], ['e', 4, 1],
['e', 5, 1], ['e', 6, 1], ['e', 7, 1], ['e', 8, 1],
['e', 9, 1], ['e', 10, 1], ['e', 11, 1], ['e', 12, 1],
['e', 13, 1], ['e', 14, 1], ['e', 15, 1], ['e', 16, 1],
['e', 17, 1], ['e', 18, 1], ['e', 19, 1], ['e', 20, 1],
['e', 21, 1], ['e', 22, 1], ['e', 23, 1], ['e', 24, 1],
['e', 25, 1], ['e', 26, 1]]
}
]
def nulls_to_spaces(self, bytes_or_str):
if self.valuefmt == 'u':
# The value is binary
return bytes_or_str.replace(b'\x00', b' ')
else:
# The value is a string
return bytes_or_str.replace('\x00', ' ')
# Convert a string to the correct type for the value.
def make_value(self, s):
if self.valuefmt == 'u':
return bytes(s.encode())
else:
return s
def fix_mods(self, mods):
if bytes != str and self.valuefmt == 'u':
# In Python3, bytes and strings are independent types, and
# the WiredTiger API needs bytes when the format calls for bytes.
newmods = []
for mod in mods:
# We need to check because we may converted some of the Modify
# records already.
if type(mod.data) == str:
newmods.append(wiredtiger.Modify(
self.make_value(mod.data), mod.offset, mod.size))
else:
newmods.append(mod)
mods = newmods
return mods
# Create a set of modified records and verify in-memory reads.
def modify_load(self, ds, single):
# For each test in the list:
# set the original value,
# apply modifications in order,
# confirm the final state
row = 10
c = ds.open_cursor()
for i in self.list:
c.set_key(ds.key(row))
c.set_value(self.make_value(i['o']))
self.assertEquals(c.update(), 0)
c.reset()
self.session.begin_transaction("isolation=snapshot")
c.set_key(ds.key(row))
mods = []
for j in i['mods']:
mod = wiredtiger.Modify(j[0], j[1], j[2])
mods.append(mod)
mods = self.fix_mods(mods)
self.assertEquals(c.modify(mods), 0)
self.session.commit_transaction()
c.reset()
c.set_key(ds.key(row))
self.assertEquals(c.search(), 0)
v = c.get_value()
expect = self.make_value(i['f'])
self.assertEquals(self.nulls_to_spaces(v), expect)
if not single:
row = row + 1
c.close()
# Confirm the modified records are correct.
def modify_confirm(self, ds, single):
# For each test in the list:
# confirm the final state is there.
row = 10
c = ds.open_cursor()
for i in self.list:
c.set_key(ds.key(row))
self.assertEquals(c.search(), 0)
v = c.get_value()
expect = self.make_value(i['f'])
self.assertEquals(self.nulls_to_spaces(v), expect)
if not single:
row = row + 1
c.close()
# Smoke-test the modify API, anything other than an snapshot isolation fails.
def test_modify_txn_api(self):
ds = SimpleDataSet(self, self.uri, 100, key_format=self.keyfmt, value_format=self.valuefmt)
ds.populate()
c = ds.open_cursor()
c.set_key(ds.key(10))
msg = '/not supported/'
self.session.begin_transaction("isolation=read-uncommitted")
mods = []
mods.append(wiredtiger.Modify('-', 1, 1))
self.assertRaisesWithMessage(wiredtiger.WiredTigerError, lambda: c.modify(mods), msg)
self.session.rollback_transaction()
self.session.begin_transaction("isolation=read-committed")
mods = []
mods.append(wiredtiger.Modify('-', 1, 1))
self.assertRaisesWithMessage(wiredtiger.WiredTigerError, lambda: c.modify(mods), msg)
self.session.rollback_transaction()
# Smoke-test the modify API, operating on a group of records.
def test_modify_smoke(self):
ds = SimpleDataSet(self,
self.uri, 100, key_format=self.keyfmt, value_format=self.valuefmt)
ds.populate()
self.modify_load(ds, False)
# Smoke-test the modify API, operating on a single record
def test_modify_smoke_single(self):
ds = SimpleDataSet(self,
self.uri, 100, key_format=self.keyfmt, value_format=self.valuefmt)
ds.populate()
self.modify_load(ds, True)
# Smoke-test the modify API, closing and re-opening the database.
def test_modify_smoke_reopen(self):
ds = SimpleDataSet(self,
self.uri, 100, key_format=self.keyfmt, value_format=self.valuefmt)
ds.populate()
self.modify_load(ds, False)
# Flush to disk, forcing reconciliation.
self.reopen_conn()
self.modify_confirm(ds, False)
# Smoke-test the modify API, recovering the database.
def test_modify_smoke_recover(self):
# Close the original database.
self.conn.close()
# Open a new database with logging configured.
self.conn_config = \
'log=(enabled=true),transaction_sync=(method=dsync,enabled)'
self.conn = self.setUpConnectionOpen(".")
self.session = self.setUpSessionOpen(self.conn)
# Populate a database, and checkpoint it so it exists after recovery.
ds = SimpleDataSet(self,
self.uri, 100, key_format=self.keyfmt, value_format=self.valuefmt)
ds.populate()
self.session.checkpoint()
self.modify_load(ds, False)
# Crash and recover in a new directory.
newdir = 'RESTART'
copy_wiredtiger_home(self, '.', newdir)
self.conn.close()
self.conn = self.setUpConnectionOpen(newdir)
self.session = self.setUpSessionOpen(self.conn)
self.session.verify(self.uri)
self.modify_confirm(ds, False)
# Check that we can perform a large number of modifications to a record.
@wttest.skip_for_hook("timestamp", "crashes on commit_transaction or connection close") # FIXME-WT-9809
def test_modify_many(self):
ds = SimpleDataSet(self,
self.uri, 20, key_format=self.keyfmt, value_format=self.valuefmt)
ds.populate()
c = ds.open_cursor()
self.session.begin_transaction("isolation=snapshot")
c.set_key(ds.key(10))
orig = self.make_value('abcdefghijklmnopqrstuvwxyz')
c.set_value(orig)
self.assertEquals(c.update(), 0)
for i in range(0, 50000):
new = self.make_value("".join([random.choice(string.digits) \
for i in range(5)]))
orig = orig[:10] + new + orig[15:]
mods = []
mod = wiredtiger.Modify(new, 10, 5)
mods.append(mod)
mods = self.fix_mods(mods)
self.assertEquals(c.modify(mods), 0)
self.session.commit_transaction()
c.set_key(ds.key(10))
self.assertEquals(c.search(), 0)
self.assertEquals(c.get_value(), orig)
# Check that modify returns not-found after a delete.
def test_modify_delete(self):
ds = SimpleDataSet(self,
self.uri, 20, key_format=self.keyfmt, value_format=self.valuefmt)
ds.populate()
c = ds.open_cursor()
c.set_key(ds.key(10))
self.assertEquals(c.remove(), 0)
self.session.begin_transaction("isolation=snapshot")
mods = []
mod = wiredtiger.Modify('ABCD', 3, 3)
mods.append(mod)
mods = self.fix_mods(mods)
c.set_key(ds.key(10))
self.assertEqual(c.modify(mods), wiredtiger.WT_NOTFOUND)
self.session.commit_transaction()
# Check that modify returns not-found when an insert is not yet committed
# and after it's aborted.
def test_modify_abort(self):
ds = SimpleDataSet(self,
self.uri, 20, key_format=self.keyfmt, value_format=self.valuefmt)
ds.populate()
# Start a transaction.
self.session.begin_transaction("isolation=snapshot")
# Insert a new record.
c = ds.open_cursor()
c.set_key(ds.key(30))
c.set_value(ds.value(30))
self.assertEquals(c.insert(), 0)
# Test that we can successfully modify our own record.
mods = []
mod = wiredtiger.Modify('ABCD', 3, 3)
mods.append(mod)
c.set_key(ds.key(30))
mods = self.fix_mods(mods)
self.assertEqual(c.modify(mods), 0)
# Test that another transaction cannot modify our uncommitted record.
xs = self.conn.open_session()
xc = ds.open_cursor(session = xs)
xs.begin_transaction("isolation=snapshot")
xc.set_key(ds.key(30))
xc.set_value(ds.value(30))
mods = []
mod = wiredtiger.Modify('ABCD', 3, 3)
mods.append(mod)
mods = self.fix_mods(mods)
xc.set_key(ds.key(30))
self.assertEqual(xc.modify(mods), wiredtiger.WT_NOTFOUND)
xs.rollback_transaction()
# Rollback our transaction.
self.session.rollback_transaction()
# Test that we can't modify our aborted insert.
self.session.begin_transaction("isolation=snapshot")
mods = []
mod = wiredtiger.Modify('ABCD', 3, 3)
mods.append(mod)
mods = self.fix_mods(mods)
c.set_key(ds.key(30))
self.assertEqual(c.modify(mods), wiredtiger.WT_NOTFOUND)
self.session.rollback_transaction()
if __name__ == '__main__':
wttest.run()
| mongodb/mongo | src/third_party/wiredtiger/test/suite/test_cursor12.py | test_cursor12.py | py | 15,173 | python | en | code | 24,670 | github-code | 36 | [
{
"api_name": "wttest.WiredTigerTestCase",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "wtscenario.filter_scenarios",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "wtscenario.make_scenarios",
"line_number": 24,
"usage_type": "call"
},
{
... |
4738433123 | from operator import attrgetter
from django.contrib.auth import get_user_model
from django.db.models import (
CASCADE, SET_NULL, BooleanField, CharField, CheckConstraint, DateTimeField,
F, ForeignKey, IntegerChoices, IntegerField, JSONField, ManyToManyField,
Model, Q, SlugField, TextField, UniqueConstraint)
from django.db.models.fields import URLField
from django.db.models.fields.related import OneToOneField
from django.urls import reverse
from django.utils.text import format_lazy
from django.utils.translation import gettext_lazy as _
from django.utils.translation import pgettext_lazy
from mdanchors import AnchorConverter
from drugcombinator.managers import DrugManager, InteractionManager
from drugcombinator.modelfields import ListField
from drugcombinator.tasks import ping_webarchive
from drugcombinator.utils import get_libravatar_url, markdown_allowed
class LastModifiedModel(Model):
last_modified = DateTimeField(
auto_now=True,
verbose_name=_("last modification")
)
class Meta:
abstract = True
class Drug(LastModifiedModel):
name = CharField(
max_length=128,
verbose_name=_("name")
)
slug = SlugField(
unique=True,
verbose_name=_("identifier")
)
description = TextField(
default='',
blank=True,
verbose_name=_("description"),
help_text=markdown_allowed()
)
risks = TextField(
default='',
blank=True,
verbose_name=_("general risks"),
help_text=format_lazy(
'{text}<br/>{notice}',
text=_(
"Risks specific to combinations involving this substance "
"that do not depend on a specific interaction."),
notice=markdown_allowed()
)
)
effects = TextField(
default='',
blank=True,
verbose_name=_("general effects"),
help_text=format_lazy(
'{text}<br/>{notice}',
text=_(
"Effects specific to combinations involving this "
"substance that do not depend on a specific "
"interaction."),
notice=markdown_allowed()
)
)
aliases = ListField(
verbose_name=_("aliases"),
help_text=_("One alias per line. No need to duplicate case.")
)
interactants = ManyToManyField(
'self',
symmetrical=True,
through='Interaction',
verbose_name=_("interactants")
)
category = ForeignKey(
'Category',
SET_NULL,
null=True,
blank=True,
related_name='drugs',
verbose_name=_("category")
)
common = BooleanField(
default=True,
verbose_name=_("common"),
help_text=_(
"Common substances are displayed as buttons in the app.")
)
# History manager will be added through simple_history's register
# function in translation.py, after the translated fields are
# added by modeltranslation
objects = DrugManager()
def __str__(self):
return self.name
@property
def interactions(self):
return Interaction.objects.filter(
Q(from_drug=self) | Q(to_drug=self)
)
# In Django 3.1.0, the Drug.interactants field accessor only returns
# Drug objects from the Interaction.to_drug field, but misses ones
# from the Interaction.from_drug field. This property is a
# workaround, as this limitation may be removed at framework level
# one day.
@property
def all_interactants(self):
return (
self.interactants.all()
| Drug.objects.filter(
interactions_from__in=self.interactions_to.all()
)
)
def get_absolute_url(self, namespace=None):
name = 'drug'
if namespace:
name = f"{namespace}:{name}"
return reverse(name, kwargs={'slug': self.slug})
class Meta:
verbose_name = _("substance")
ordering = ('slug',)
class Interaction(LastModifiedModel):
class Synergy(IntegerChoices):
UNKNOWN = (0, pgettext_lazy("synergy", "Unknown"))
NEUTRAL = (1, pgettext_lazy("synergy", "Neutral"))
ADDITIVE = (5, _("Additive"))
DECREASE = (2, _("Decrease"))
INCREASE = (3, _("Increase"))
MIXED = (4, _("Mixed"))
class Risk(IntegerChoices):
UNKNOWN = (0, pgettext_lazy("risk", "Unknown"))
NEUTRAL = (1, pgettext_lazy("risk", "Neutral"))
CAUTION = (2, _("Caution"))
UNSAFE = (3, _("Unsafe"))
DANGEROUS = (4, _("Dangerous"))
class Reliability(IntegerChoices):
UNKNOWN = (0, pgettext_lazy("reliability", "Unknown"))
HYPOTHETICAL = (1, _("Hypothetical"))
INFERRED = (2, _("Inferred"))
PROVEN = (3, _("Proven"))
from_drug = ForeignKey(
'Drug',
CASCADE,
related_name='interactions_from',
verbose_name=_("first interactant")
)
to_drug = ForeignKey(
'Drug',
CASCADE,
related_name='interactions_to',
verbose_name=_("second interactant")
)
names = ListField(
verbose_name=_("slang names"),
help_text=_(
"One name per line. The first one can be emphasized in the "
"app.")
)
risk = IntegerField(
choices=Risk.choices,
default=Risk.UNKNOWN,
verbose_name=_("risks")
)
synergy = IntegerField(
choices=Synergy.choices,
default=Synergy.UNKNOWN,
verbose_name=_("synergy")
)
risk_reliability = IntegerField(
choices=Reliability.choices,
default=Reliability.UNKNOWN,
verbose_name=_("risks reliability")
)
effects_reliability = IntegerField(
choices=Reliability.choices,
default=Reliability.UNKNOWN,
verbose_name=_("synergy and effects reliability")
)
risk_description = TextField(
default='',
blank=True,
verbose_name=_("risks description"),
help_text=markdown_allowed()
)
effect_description = TextField(
default='',
blank=True,
verbose_name=_("effects description"),
help_text=markdown_allowed()
)
notes = TextField(
default='',
blank=True,
verbose_name=_("notes"),
help_text=_(
"This field is only displayed on this admin site and is "
"shared between all users and languages.")
)
is_draft = BooleanField(
default=True,
verbose_name=_("draft"),
help_text=_(
"In case of work-in-progress, uncertain or incomplete "
"data.")
)
uris = JSONField(
default=dict,
editable=False,
verbose_name=_("URIs"),
help_text=_(
"URIs extracted from these interaction data texts, mapped "
"to their last Wayback Machine snapshot date.")
)
# History manager will be added throug simple_history's register
# function in translation.py, after the translated fields are
# added by modeltranslation
objects = InteractionManager()
def __str__(self):
return f"{self.from_drug.name} + {self.to_drug.name}"
def get_absolute_url(self, namespace=None):
name = 'combine'
if namespace:
name = f"{namespace}:{name}"
return reverse(name, kwargs={
'slugs': (self.from_drug.slug, self.to_drug.slug)
})
def other_interactant(self, drug):
index = self.interactants.index(drug)
return self.interactants[not index]
@property
def slug(self):
return f"{self.from_drug.slug}_{self.to_drug.slug}"
@property
def interactants(self):
return (self.from_drug, self.to_drug)
@interactants.setter
def interactants(self, interactants):
interactants = sorted(interactants, key=attrgetter('slug'))
self.from_drug, self.to_drug = interactants
def sort_interactants(self):
# The interactants property setter will handle interactants
# reordering
self.interactants = self.interactants
def extract_uris(self):
"""Extract URIs from this model `risk_description` and
`effect_description` text fields."""
return set().union(*map(
lambda field: AnchorConverter(field).uris,
(self.risk_description, self.effect_description)
))
def update_uris(self):
"""Update stored URIs according to this model text fields.
If a URI was already extracted, it will not be modified.
Unused URIs will be removed.
New URIs will be added with a `None` value.
"""
self.uris = {
uri: getattr(self.uris, uri, None)
for uri in self.extract_uris()
}
def schedule_webarchive_ping(self):
ping_webarchive(self.id, self.uris)()
def save(self, process_uris=True, *args, **kwargs):
self.sort_interactants()
if process_uris:
self.update_uris()
super().save(*args, **kwargs)
if process_uris:
self.schedule_webarchive_ping()
@classmethod
def get_dummy_risks(cls):
return [cls(risk=risk) for risk in cls.Risk.values]
@classmethod
def get_dummy_synergies(cls):
return [cls(synergy=synergy) for synergy in cls.Synergy.values]
class Meta:
constraints = (
CheckConstraint(
check=~Q(from_drug=F('to_drug')),
name='interactants_inequals'
),
UniqueConstraint(
fields=('from_drug', 'to_drug'),
name='interactants_unique_together'
)
)
verbose_name = _("interaction")
class Category(LastModifiedModel):
name = CharField(
max_length=128,
verbose_name=_("name")
)
slug = SlugField(
unique=True,
verbose_name=_("identifier")
)
description = TextField(
default='',
blank=True,
verbose_name=_("description")
)
def __str__(self):
return self.name
class Meta:
verbose_name = _("category")
verbose_name_plural = _("categories")
class Note(LastModifiedModel):
title = CharField(
max_length=128,
default=_("Untitled note"),
verbose_name=_("title")
)
content = TextField(
default='',
blank=True,
verbose_name=_("content"),
help_text=_(
"Notes are only displayed on this admin site and are shared "
"between all users and languages.")
)
related_drugs = ManyToManyField(
'Drug',
related_name='notes',
blank=True,
verbose_name=_("involved substances"),
help_text=_(
"If this note involves specific substances, you can "
"optionally set them here.")
)
def __str__(self):
return self.title
class Meta:
verbose_name = _("note")
class Contributor(Model):
user = OneToOneField(
get_user_model(),
CASCADE,
related_name='profile',
verbose_name=_("user")
)
page = URLField(
default='',
blank=True,
max_length=128,
verbose_name=_("personal page"),
help_text=_(
"This link may be used in public contributors lists.")
)
display = BooleanField(
default=False,
verbose_name=_("show publicly"),
help_text=_("Show this profile in public contributors lists.")
)
@property
def avatar_url(self):
return get_libravatar_url(
email=self.user.email,
https=True,
size=150,
default='identicon'
)
def __str__(self):
return self.user.username
class Meta:
verbose_name = _("contributor profile")
verbose_name_plural = _("contributor profiles")
| x-yzt/mixtures | drugcombinator/models.py | models.py | py | 12,000 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "django.db.models.Model",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.gettext_lazy",
"line_number": 25,
"usage_type": "call"
}... |
8368222279 | from itertools import groupby
def checkgroup(word):
group = [key for key, item in groupby(word)]
values =[(k, [i for i in range(len(word)) if word[i] == k]) for k in group]
groupword = 0
for items in values:
if items[1].__len__() == 0:
groupword += 1
continue
for index in range(1, items[1].__len__()):
if items[1][index]-items[1][index-1] >1:
groupword = 0
return groupword
groupword += 1
return groupword
count = int(input())
words = [input() for _ in range(count)]
result = 0
for _ in words:
groupcount = checkgroup(_)
if groupcount != 0:
result+=1
print(result)
| hyelimchoi1223/Algorithm-Study | 백준/[백준]1316 그룹 단어 체커/python.py | python.py | py | 789 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "itertools.groupby",
"line_number": 3,
"usage_type": "call"
}
] |
8342129346 | from django.http import request
from django.http.response import HttpResponse
from django.shortcuts import redirect, render
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from core.models import Medico, Hora, Cita, Paciente
from core.forms import PacienteForm, HoraForm, MedicoForm, DisponibilidadForm, CitaForm
from core.decorators import usuarios_permitiado, usuario_identificado
from datetime import *
# Create your views here.
def home_page(request):
context = {}
return render(request,'pages/home.html', context)
def toma_hora_page(request):
context = {}
if request.method == "POST":
b = request.POST['especialidad']
return redirect('doctores_pages', pk=b)
else:
print("error")
return render(request,'pages/tomar_hora.html', context)
def doctores(request, pk):
context = {}
try:
doctores = Medico.objects.filter(especialidad=pk)
doc_list = []
startdate = date.today()
enddate = startdate + timedelta(days=16)
for doctor in doctores:
horas = Hora.objects.filter(medico=doctor, disponible=True).filter(fecha__range=[startdate, enddate])
grouped = dict()
for hora in horas:
grouped.setdefault(hora.fecha, []).append(hora)
obj = {"doctor": doctor, "horas": grouped}
doc_list.append(obj)
context['doctores'] = doc_list
except:
context['doctores'] = "sin doctores"
context["horas"] = "sin horas"
if request.method == "POST":
pk = request.POST["hora"]
return redirect('confirmacion_page', pk=pk)
else:
print("error")
return render(request,'pages/docts.html', context)
def confirmacion(request, pk):
context = {}
form = PacienteForm()
hora = Hora.objects.get(id=pk)
if request.method == 'POST':
form = PacienteForm(request.POST)
try:
rut = request.POST["rut"]
paciente = Paciente.objects.get(rut=rut)
hora.disponible = False
hora.save()
Cita.objects.create(
paciente = paciente,
hora = hora
)
return redirect('home_page')
except:
if form.is_valid():
paciente = form.save()
hora.disponible = False
hora.save()
Cita.objects.create(
paciente = paciente,
hora = hora
)
return redirect('home_page')
else:
print("error")
else:
print("error")
context["form"] = form
context["hora"] = hora
return render(request, 'pages/conf.html', context)
def cancelar_page(request):
context = {}
if request.method == 'POST':
rut = request.POST['rut']
paciente = Paciente.objects.get(rut=rut)
citas = Cita.objects.filter(paciente=paciente, habilitada=True)
context["citas"] = citas
else:
print("error")
return render(request, 'pages/cancel.html', context)
def confirm_cancelar(request, pk):
context = {}
cita = Cita.objects.get(id=pk)
context["cita"] = cita
if request.method == 'POST':
cita.habilitada = False
cita.save()
hora = Hora.objects.get(id=cita.hora.id)
hora.disponible = True
hora.save()
else:
print("error")
return render(request, 'pages/conf_cancel.html', context)
def login_page(request):
context = {}
if request.method == 'POST':
username = request.POST.get("username")
password = request.POST.get("password")
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('home_page')
else:
print("error al indentificar")
else:
print("error")
return render(request, 'pages/login.html', context)
def logout_user(request):
logout(request)
return redirect('login_page')
@login_required(login_url="login_page")
@usuarios_permitiado(roles_permitidos=['secretaria'])
def secretaria_page(request):
context = {}
return render(request, 'pages/secretaria.html', context)
@login_required(login_url="login_page")
@usuarios_permitiado(roles_permitidos=['secretaria'])
def agregar_hora_page(request):
context = {}
form = HoraForm()
context["form"] = form
if request.method == 'POST':
form = HoraForm(request.POST)
if form.is_valid():
form.save()
else:
print("error")
else:
print("error")
return render(request, 'pages/secretaria/agregar_hora.html', context)
@login_required(login_url="login_page")
@usuarios_permitiado(roles_permitidos=['secretaria'])
def quitar_hora_page(request):
context = {}
try:
doctores = Medico.objects.all()
startdate = date.today()
enddate = startdate + timedelta(days=16)
doc_list = []
for doctor in doctores:
horas = Hora.objects.filter(medico=doctor, disponible=True).filter(fecha__range=[startdate, enddate])
grouped = dict()
for hora in horas:
grouped.setdefault(hora.fecha, []).append(hora)
obj = {"doctor": doctor, "horas": grouped}
doc_list.append(obj)
context['doctores'] = doc_list
except:
context['doctores'] = "sin doctores"
context["horas"] = "sin horas"
if request.method == 'POST':
pk = request.POST["hora"]
hora = Hora.objects.get(id=pk)
hora.disponible = False
hora.save()
return redirect('secretaria_page')
else:
print("error")
return render(request, 'pages/secretaria/quitar_hora.html', context)
@login_required(login_url="login_page")
@usuarios_permitiado(roles_permitidos=['secretaria'])
def agregar_medico_page(request):
context = {}
form = MedicoForm()
context["form"] = form
if request.method == 'POST':
form = MedicoForm(request.POST)
if form.is_valid():
form.save()
else:
print("error")
else:
print("error")
return render(request, 'pages/secretaria/agregar_medico.html', context)
@login_required(login_url="login_page")
@usuarios_permitiado(roles_permitidos=['secretaria'])
def agregar_disponibilidad_page(request):
context = {}
form = DisponibilidadForm()
context["form"] = form
if request.method == 'POST':
form = DisponibilidadForm(request.POST)
if form.is_valid():
form.save()
else:
print("error")
else:
print("error")
return render(request, 'pages/secretaria/agregar_disponibilidad.html', context)
@login_required(login_url="login_page")
@usuarios_permitiado(roles_permitidos=['secretaria'])
def modificar_cita_page(request):
context = {}
citas = Cita.objects.filter(habilitada=True)
context["citas"] = citas
return render(request, 'pages/secretaria/modificar_hora.html', context)
@login_required(login_url="login_page")
@usuarios_permitiado(roles_permitidos=['secretaria'])
def update_cita_page(request, pk):
context = {}
cita = Cita.objects.get(id=pk)
form = CitaForm(instance=cita)
context["form"] = form
if request.method == 'POST':
form = CitaForm(request.POST)
if form.is_valid():
form.save()
else:
print("error")
else:
print("error")
return render(request, 'pages/secretaria/update_cita.html', context) | felipe-quirozlara/arquit-proyect | arquitGalenos/pages/views.py | views.py | py | 7,814 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.http.request",
"line_number": 15,
"usage_type": "argument"
},
{
"api_name": "django.http.request.method",
"line_number": 20,
"usage_type": "attribute"
},
{
"a... |
34981855639 | from flask import Flask, request, render_template
from googlesearch import search
app = Flask(__name__)
def search_pdfs(query, num_results=5):
search_results = []
try:
for j in search(query + " filetype:pdf", num_results=num_results):
search_results.append(j)
return search_results
except Exception as e:
print(f"An error occurred: {str(e)}")
return []
@app.route("/", methods=["GET", "POST"])
def index():
search_results = []
if request.method == "POST":
search_query = request.form.get("query")
search_results = search_pdfs(search_query, num_results=5)
return render_template("index.html", search_results=search_results)
if __name__ == "__main__":
app.run(debug=True)
| suryagowda/booksearcherr | booksearcher/app.py | app.py | py | 764 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "googlesearch.search",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "flask.reques... |
39260413778 | # import system modules
import traceback
import os
import sys
import errno
import subprocess
import time
import signal
import functools
# import I/O modules
import RPi.GPIO as GPIO
import smbus2
import spidev
# import utility modules
import math
import numpy as np
import scipy.constants as const
from dataclasses import dataclass
def timeout(seconds=10, error_message=os.strerror(errno.ETIME)):
def decorator(func):
def _handle_timeout(signum, frame):
raise TimeoutError(error_message)
@functools.wraps(func)
def wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wrapper
return decorator
@dataclass
class LidarConfig:
# dimensions
height: int = 80
width: int = 104 # not including header pixel
Ndata: int = 2
Nlight: int = 12000
# timing
T0_pulse: int = 8
Light_pulse: int = 7
VTX3_pulse: int = 28
ADC_delay: int = 1
light_offset: float = 0.5
extrst_pulse: int = 16384
frame_blank: int = 255
def generate_reg_map(self):
light_offset_int = math.ceil(self.light_offset)
light_offset_half = self.light_offset % 1 > 0
return (
(0x00, 0b11100011), # stop operation
(0x07, 0b11000000), # unknown?
(0x08, (self.extrst_pulse >> 8) & 0xFF), # ext_reset
(0x09, (self.extrst_pulse) & 0xFF),
(0x0A, (self.width >> 8) & 0xFF), # H_pixel_num
(0x0B, (self.width) & 0xFF),
(0x0C, (self.height >> 8) & 0xFF), # V_pixel_num
(0x0D, (self.height) & 0xFF),
(0x0E, 0x25), # HST_offset
(0x0F, 0b10110111), # light_pattern
(0x10, (self.frame_blank) & 0xFF), # frame blanking
(0x11, (self.frame_blank >> 8) & 0xFF),
(0x12, (self.ADC_delay) & 0x1F), # ADC_delay_cfg
(0x13, (0b0100 << 4) | ((self.Nlight >> 16) & 0x0F)), # LV_delay, Nlight
(0x14, (self.Nlight >> 8) & 0xFF),
(0x15, (self.Nlight) & 0xFF),
(0x16, (self.Ndata) & 0xFF), # Ndata (must be >1, otherwise only reset value is read and VTX won't trigger)
(0x17, (self.T0_pulse) & 0xFF), # VTX1
(0x18, (self.T0_pulse) & 0xFF), # VTX2
(0x19, (self.VTX3_pulse >> 8) & 0xFF), # VTX3
(0x1A, (self.VTX3_pulse) & 0xFF),
(0x1B, (self.Light_pulse) & 0xFF), # light_pulse_width
(0x1D, light_offset_int & 0xFF), # light_pulse_offset
(0x1F, (self.T0_pulse >> 1) & 0x7F), # P4_half_delay, P4_delay
(0x20, (0b0 << 7) | ((light_offset_half << 6) & 0x40) | (0b1001)), # L/A, Light_pulse_half_delay, H_pixel_blanking
# (0x21, 0x00), # T1 (linear only)
# (0x22, 0x00), # PHIS (linear only)
# (0x23, 0x00), # T2 (linear only)
(0x24, 0b00001111), # timing signal enable: light/VTX1/VTX2/VTX3
(0x00, 0b11000011), # start clock divider
(0x00, 0b10000011), # start clock
(0x00, 0b00000011), # start timing gen
)
class LidarControl:
# physical
width: int = int(104) # not including header pixel
height: int = int(80)
Ndata: int = int(2)
T_0: float = 8 / 60 * 1e-6
# I/O
i2c_dev = []
i2c_channel = 1
i2c_address_lidar = 0x2A
spi_dev = []
spi_channel = 0
spi_device_MCU = 0
pin_sensor_rst_P = 4
pin_mcu_rst_N = 23
def __init__(self, config=LidarConfig()):
self.width = config.width
self.height = config.height
self.Ndata = config.Ndata
self.T_0 = config.T0_pulse / 60 * 1e-6
self.config = config
def __del__(self):
print("LiDAR clean up called.")
try:
self.spi_dev.close()
GPIO.cleanup()
except Exception as err:
print("Fail to clean GPIO.")
print(err)
def connect_GPIO(self):
GPIO.setmode(GPIO.BCM)
def connect_sensor(self, i2c_ch=1, i2c_addr=0x2A, pin_sensor_rst=4):
self.i2c_channel = i2c_ch
self.i2c_address_lidar = i2c_addr
self.pin_sensor_rst_P = pin_sensor_rst
try:
GPIO.setup(self.pin_sensor_rst_P, GPIO.OUT, initial=0) # sensor reset (P)
except Exception as err:
print("Error:", err)
print("Sensor rst pin initialization failed!")
raise RuntimeError("GPIO not available!")
else:
print("Sensor rst pin initialized.")
try:
i2c_sensor = smbus2.SMBus(self.i2c_channel)
except FileNotFoundError as err:
print("FileNotFoundError", err)
if err.errno == 2:
print("I2C not enabled. Check raspi-config.")
raise RuntimeError("I2C bus not available!")
except Exception as err:
print("Error:", err)
print("I2C initialization failed!")
raise RuntimeError("I2C bus init failed!")
else:
print("I2C initialized.")
self.i2c_dev = i2c_sensor
def connect_MCU(self, spi_ch=0, spi_num=0, pin_mcu_rst=23):
self.spi_channel = spi_ch
self.spi_device_MCU = spi_num
self.pin_mcu_rst_N = pin_mcu_rst
try:
GPIO.setup(self.pin_mcu_rst_N, GPIO.OUT, initial=1) # MCU reset (N)
except Exception as err:
print("Error:", err)
print("MCU rst pin initialization failed!")
raise RuntimeError("GPIO not available!")
else:
print("MCU rst pin initialized.")
try:
spi_mcu = spidev.SpiDev()
spi_mcu.open(self.spi_channel, self.spi_device_MCU)
except Exception as err:
print("Error:", err)
print("SPI initialization failed!")
raise RuntimeError("SPI bus init failed!")
else:
spi_mcu.max_speed_hz = 5000000
spi_mcu.mode = 0b11
spi_mcu.bits_per_word = 8
spi_mcu.lsbfirst = False
print(f"SPI initialized at {spi_mcu.max_speed_hz}Hz.")
self.spi_dev = spi_mcu
def reset_device(self, sensor=True, mcu=False):
if sensor:
GPIO.output(self.pin_sensor_rst_P, 1)
if mcu:
GPIO.output(self.pin_mcu_rst_N, 0)
time.sleep(0.01)
if sensor:
GPIO.output(self.pin_sensor_rst_P, 0)
if mcu:
GPIO.output(self.pin_mcu_rst_N, 1)
time.sleep(0.01)
print("Devices reset.")
@timeout(5)
def load_MCU(self, binary_path="dvp2proc2spi_lnk.elf"):
try:
# specifically ask for dual-core reset-halt-run in openocd
# otherwise core 1 will crash after boot (bug in openocd?)
openocd_cmd = f"program {binary_path} verify; " + \
"reset halt; " + \
"rp2040.core1 arp_reset assert 0; " + \
"rp2040.core0 arp_reset assert 0; " + \
"exit"
load_cmd = ["openocd",
"-f", "interface/raspberrypi-swd.cfg",
"-f", "target/rp2040.cfg",
"-c", openocd_cmd]
subprocess.run(load_cmd, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
except Exception as err:
print("Error:", err)
print("Load binary failed!")
raise RuntimeError("MCU binary loading failed!")
else:
print("MCU binary loaded.")
def setup_sensor(self):
try:
lidar_reg_map = self.config.generate_reg_map()
# write regs and check step-by-step
for instruction in lidar_reg_map:
self.i2c_dev.write_byte_data(self.i2c_address_lidar, instruction[0], instruction[1])
if instruction[1] != self.i2c_dev.read_byte_data(self.i2c_address_lidar, instruction[0]):
raise Exception(f"Register validation failed! @{instruction}")
except OSError as err:
print("OSError", err)
if err.errno == 121:
print("I2C: No response from device! Check wiring on GPIO2/3.")
raise RuntimeError("I2C device not connected!")
except Exception as err:
print("Error:", err)
print("I2C unknown error!")
raise RuntimeError("I2C unknown error!")
else:
print("I2C data sent.")
@timeout(10)
def acquire_data(self):
# [F1..F4] [VTX1,VTX2] [Y] [X]
data = np.zeros((4, 2, self.height, self.width), dtype=np.int16)
# progress info
print(f" - Trigger Frame capture and SPI read.")
# command MCU to start frame capturing
time.sleep(0.01) # wait for MCU to flush FIFO
self.spi_dev.writebytes([0x01])
# query frame state
timeout_counter = 0
while True:
frame_state = self.spi_dev.readbytes(1)
time.sleep(0.01) # wait for MCU to flush FIFO
if frame_state[0] == (0x11):
break
else:
timeout_counter += 1
# re-trigger if there is a timeout (SPI command lost)
if (timeout_counter > 250):
timeout_counter = 0
self.spi_dev.writebytes([0x01])
print(f" - Re-trigger Frame capture.")
# data transfering
data_stream = np.zeros((4, self.height, 2 * (self.width + 1)), dtype=np.int16)
for subframe in range(0, 4):
for line in range(0, self.height):
temp = self.spi_dev.readbytes(4 * (self.width + 1))
temp = np.array(temp, dtype=np.int16)
data_stream[subframe, line, :] = (temp[1::2] << 8) | temp[0::2]
data[:, 0, :, :] = data_stream[:, :, 2::2]
data[:, 1, :, :] = data_stream[:, :, 3::2]
data[[0, 2], :, :, :] = data[[2, 0], :, :, :]
return data
| ExplodingONC/Flash_LiDAR_Microscan | LidarControl.py | LidarControl.py | py | 10,164 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.strerror",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "errno.ETIME",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "signal.signal",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "signal.SIGALRM",
"line... |
5259209115 | # import libraries
import datetime
from airflow import DAG
from airflow.contrib.operators.emr_create_job_flow_operator import EmrCreateJobFlowOperator
from airflow.contrib.operators.emr_add_steps_operator import EmrAddStepsOperator
from airflow.contrib.sensors.emr_step_sensor import EmrStepSensor
from airflow.contrib.operators.emr_terminate_job_flow_operator import EmrTerminateJobFlowOperator
################
# CONFIGURATIONS
################
# name of s3 bucket with scripts
s3_bucket = "s3://dendcapstoneproject/"
# initialize dag
dag = DAG(
"prepare-data-for-redshift",
start_date=datetime.datetime.now()-datetime.timedelta(days=1),
schedule_interval="@once"
)
####################
# CREATE EMR CLUSTER
####################
JOB_FLOW_OVERRIDES = {
"Name": "capstone-emr",
"LogUri": "s3://aws-logs-576946247943-us-west-2/elasticmapreduce/",
"ReleaseLabel": "emr-6.5.0",
"Applications": [{"Name": "Hadoop"}, {"Name": "Spark"}],
"Configurations": [
{
"Classification": "spark-env",
"Configurations": [
{
"Classification": "export",
"Properties": {"PYSPARK_PYTHON": "/usr/bin/python3"},
}
],
}
],
"Instances": {
"InstanceGroups": [
{
"Name": "Master node",
"Market": "ON_DEMAND",
"InstanceRole": "MASTER",
"InstanceType": "m5.xlarge",
"InstanceCount": 1,
},
{
"Name": "Core - 2",
"Market": "ON_DEMAND",
"InstanceRole": "CORE",
"InstanceType": "m5.xlarge",
"InstanceCount": 2,
},
],
"KeepJobFlowAliveWhenNoSteps": True,
"TerminationProtected": False,
},
"JobFlowRole": "EMR_EC2_DefaultRole",
"ServiceRole": "EMR_DefaultRole",
}
create_emr_cluster = EmrCreateJobFlowOperator(
task_id="create_emr_cluster",
job_flow_overrides=JOB_FLOW_OVERRIDES,
aws_conn_id="aws_credentials",
emr_conn_id="emr_default",
dag=dag
)
############################
# IMMIGRATION DATA HANDLING
############################
# preprocess the immigration data prior to create fact and dimension tables
preprocess_immigration_data = EmrAddStepsOperator(
task_id="preprocess_immigration_data",
job_flow_id="{{ task_instance.xcom_pull(task_ids='create_emr_cluster', key='return_value') }}",
aws_conn_id="aws_credentials",
steps=[{
"Name": "preprocess_immigration_data",
"ActionOnFailure": "TERMINATE_CLUSTER",
"HadoopJarStep": {
"Jar": "command-runner.jar",
"Args": [
"spark-submit",
"--master",
"yarn",
"--packages",
"saurfang:spark-sas7bdat:3.0.0-s_2.12",
"--py-files",
f"{s3_bucket}scripts/shared_spark_vars.py",
f"{s3_bucket}scripts/immigration-data-preprocessing.py"
]
}
}],
dag=dag
)
# create the fact and dimension tables
create_immigration_fact_dims = EmrAddStepsOperator(
task_id="create_immigration_fact_dims",
job_flow_id="{{ task_instance.xcom_pull(task_ids='create_emr_cluster', key='return_value') }}",
aws_conn_id="aws_credentials",
steps=[{
"Name": "create_immigration_fact_dims",
"ActionOnFailure": "TERMINATE_CLUSTER",
"HadoopJarStep": {
"Jar": "command-runner.jar",
"Args": [
"spark-submit",
"--master",
"yarn",
"--py-files",
f"{s3_bucket}scripts/shared_spark_vars.py",
f"{s3_bucket}scripts/immigration-fact-and-dimension-creation.py"
]
}
}],
dag=dag
)
# watch the immigration data handling process
watch_immigration_data_handling = EmrStepSensor(
task_id="watch_immigration_data_handling",
job_flow_id="{{ task_instance.xcom_pull('create_emr_cluster', key='return_value') }}",
step_id="{{ task_instance.xcom_pull(task_ids='create_immigration_fact_dims', key='return_value')[0] }}",
aws_conn_id="aws_credentials",
dag=dag
)
############################
# DEMOGRAPHIC DATA HANDLING
############################
# preprocess the demographic data and create fact and dimension tables using it
process_demographic_data = EmrAddStepsOperator(
task_id="process_demographic_data",
job_flow_id="{{ task_instance.xcom_pull(task_ids='create_emr_cluster', key='return_value') }}",
aws_conn_id="aws_credentials",
steps=[{
"Name": "process_demographic_data",
"ActionOnFailure": "TERMINATE_CLUSTER",
"HadoopJarStep": {
"Jar": "command-runner.jar",
"Args": [
"spark-submit",
"--master",
"yarn",
"--py-files",
f"{s3_bucket}scripts/shared_spark_vars.py",
f"{s3_bucket}scripts/demographics-data-processing.py"
]
}
}],
dag=dag
)
# watch the demographic data handling process
watch_demographic_data_handling = EmrStepSensor(
task_id="watch_demographic_data_handling",
job_flow_id="{{ task_instance.xcom_pull('create_emr_cluster', key='return_value') }}",
step_id="{{ task_instance.xcom_pull(task_ids='process_demographic_data', key='return_value')[0] }}",
aws_conn_id="aws_credentials",
dag=dag
)
#############################
# AIRPORT CODES DATA HANDLING
#############################
# preprocess the airport data and create fact and dimension tables using it
process_airport_data = EmrAddStepsOperator(
task_id="process_airport_data",
job_flow_id="{{ task_instance.xcom_pull(task_ids='create_emr_cluster', key='return_value') }}",
aws_conn_id="aws_credentials",
steps=[{
"Name": "process_airport_data",
"ActionOnFailure": "TERMINATE_CLUSTER",
"HadoopJarStep": {
"Jar": "command-runner.jar",
"Args": [
"spark-submit",
"--master",
"yarn",
"--py-files",
f"{s3_bucket}scripts/shared_spark_vars.py",
f"{s3_bucket}scripts/airport-codes-processing.py"
]
}
}],
dag=dag
)
# watch the airport data handling process
watch_airport_data_handling = EmrStepSensor(
task_id="watch_airport_data_handling",
job_flow_id="{{ task_instance.xcom_pull('create_emr_cluster', key='return_value') }}",
step_id="{{ task_instance.xcom_pull(task_ids='process_airport_data', key='return_value')[0] }}",
aws_conn_id="aws_credentials",
dag=dag
)
###########################
# TEMPERATURE DATA HANDLING
###########################
# preprocess the temperature data and create fact and dimension tables using it
process_temperature_data = EmrAddStepsOperator(
task_id="process_temperature_data",
job_flow_id="{{ task_instance.xcom_pull(task_ids='create_emr_cluster', key='return_value') }}",
aws_conn_id="aws_credentials",
steps=[{
"Name": "process_temperature_data",
"ActionOnFailure": "TERMINATE_CLUSTER",
"HadoopJarStep": {
"Jar": "command-runner.jar",
"Args": [
"spark-submit",
"--master",
"yarn",
"--py-files",
f"{s3_bucket}scripts/shared_spark_vars.py",
f"{s3_bucket}scripts/temperature-data-processing.py"
]
}
}],
dag=dag
)
# watch the temperature data handling process
watch_temperature_data_handling = EmrStepSensor(
task_id="watch_temperature_data_handling",
job_flow_id="{{ task_instance.xcom_pull('create_emr_cluster', key='return_value') }}",
step_id="{{ task_instance.xcom_pull(task_ids='process_temperature_data', key='return_value')[0] }}",
aws_conn_id="aws_credentials",
dag=dag
)
#####################
# TERMINATE CLUSTER
####################
# terminate the EMR cluster
terminate_emr_cluster = EmrTerminateJobFlowOperator(
task_id="terminate_emr_cluster",
job_flow_id="{{ task_instance.xcom_pull(task_ids='create_emr_cluster', key='return_value') }}",
aws_conn_id="aws_default",
dag=dag,
)
###########
# JOB FLOW
###########
create_emr_cluster >> preprocess_immigration_data >> create_immigration_fact_dims >> watch_immigration_data_handling
create_emr_cluster >> process_airport_data >> watch_airport_data_handling
create_emr_cluster >> process_demographic_data >> watch_demographic_data_handling
[watch_airport_data_handling, watch_demographic_data_handling] >> process_temperature_data >> watch_temperature_data_handling
[watch_immigration_data_handling, watch_temperature_data_handling] >> terminate_emr_cluster | stefanjaro/data-engineering-nanodegree-capstone-project | airflow/dags/prepare-data-for-redshift.py | prepare-data-for-redshift.py | py | 8,900 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "airflow.DAG",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "datetime.tim... |
74087426982 | import requests, datetime, csv
from flask import Flask
from flask import request, render_template
response = requests.get("http://api.nbp.pl/api/exchangerates/tables/C?format=json")
data_as_json= response.json()
app = Flask(__name__)
for item in data_as_json:
only_rates = item.get('rates')
current_date = item.get('effectiveDate')
codes_list = []
for rate in only_rates:
cc = rate['code']
codes_list.append(cc)
with open('names.csv', 'w', encoding="utf-8", newline='') as csvfile:
fieldnames = ['currency','code','bid','ask']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames, delimiter = ';')
writer.writeheader()
for rate in only_rates:
writer.writerow({'currency':rate.get('currency'),'code':rate.get('code'),
'bid':rate.get('bid'),'ask':rate.get('ask')})
@app.route('/calculator', methods=['GET', 'POST'])
def rates_calculator():
if request.method == 'GET':
print("We received GET")
return render_template("calculator.html", codes_list=codes_list)
elif request.method == 'POST':
print("We received POST")
if current_date != datetime.date.today():
response = requests.get("http://api.nbp.pl/api/exchangerates/tables/C?format=json")
data_as_json= response.json()
for item in data_as_json:
only_rates = item.get('rates')
d = request.form
quantity_form=d.get('quantity')
curr_selected_form=d.get('currencies')
for rate in only_rates:
if curr_selected_form ==rate.get('code'):
result=float(rate.get('ask'))*float(quantity_form)
print(result)
return f'{quantity_form} {curr_selected_form} cost {result:0.2f} PLN.'
if __name__ == "__main__":
app.run(debug=True) | gorkamarlena/currency_calculator | app.py | app.py | py | 1,803 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "csv.DictWriter",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"lin... |
73335571623 | import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
import pytest
from tpk.hypervalidation.hyperparameter_search import (
run_model_cmd_parallel,
run_study,
)
from tpk.torch import TSMixerModel
@pytest.mark.asyncio
async def test_num_workers() -> None:
results = await run_model_cmd_parallel("echo 1", num_executions=3)
assert results == [1.0, 1.0, 1.0]
@pytest.mark.asyncio
async def test_malformed_return_value() -> None:
with unittest.TestCase().assertRaises(ValueError) as _:
await run_model_cmd_parallel("echo hi", num_executions=3)
@pytest.mark.slow
def test_run_study() -> None:
with TemporaryDirectory() as dir:
study_journal_path = Path(dir)
run_study(
model_cls=TSMixerModel,
study_journal_path=study_journal_path,
data_path=Path("data/m5"),
study_name="test_study",
n_trials=1,
tests_per_trial=1,
)
assert (study_journal_path / "journal.log").exists()
| airtai/temporal-data-kit | tests/hypervalidation/test_hyperparameter_search.py | test_hyperparameter_search.py | py | 1,039 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "tpk.hypervalidation.hyperparameter_search.run_model_cmd_parallel",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "unittest.TestCase",
"line_number": 23,
"usage_type": "c... |
32417088655 | import json
from typing import Dict
from influxdb_client import InfluxDBClient, Point, WritePrecision
from influxdb_client.client.write_api import SYNCHRONOUS
import pandas as pd
import logging
class InfluxDB:
def __init__(self, local) -> None:
# Create a config.json file and store your INFLUX token as a key value pair
with open('config.json', 'r') as f:
self.config = json.load(f)
self.client = self.get_influxdb_client(local)
self.write_api = self.client.write_api(write_options=SYNCHRONOUS)
self.query_api = self.client.query_api()
self.delete_api = self.client.delete_api()
def get_influxdb_client(self, local=False):
return InfluxDBClient(
url="http://localhost:8086" if local else "https://us-east-1-1.aws.cloud2.influxdata.com",
token=self.config['INFLUXDB_TOKEN_LOCAL'] if local else self.config['INFLUXDB'],
org="pepe"
)
def write_candles_to_influxdb(
self,
exchange,
symbol: str,
timeframe: str,
candles: pd.DataFrame,
bucket: str = "candles",
) -> None:
if candles.empty:
logging.warning(f"Skipping write to InfluxDB for {exchange} {symbol} {timeframe} as the DataFrame is empty.")
return
symbol = symbol.replace("/", "_")
points = []
for record in candles.to_records():
point = Point("candle") \
.tag("exchange", exchange) \
.tag("symbol", symbol) \
.tag("timeframe", timeframe) \
.field("opens", record.opens) \
.field("highs", record.highs) \
.field("lows", record.lows) \
.field("closes", record.closes) \
.field("volumes", record.volumes) \
.time(record.dates, WritePrecision.MS)
points.append(point)
logging.info(f"Writing {len(candles['dates'])} candles to bucket: {bucket}, organization: 'pepe'")
self.write_api.write(bucket, 'pepe', points)
def read_candles_from_influxdb(
self, exchange: str, symbol: str, timeframe: str, bucket="candles") -> Dict:
symbol = symbol.replace("/", "_")
query = f"""
from(bucket: "{bucket}")
|> range(start: -1000d)
|> filter(fn: (r) => r["_measurement"] == "candle")
|> filter(fn: (r) => r["exchange"] == "{exchange}")
|> filter(fn: (r) => r["symbol"] == "{symbol}")
|> filter(fn: (r) => r["timeframe"] == "{timeframe}")
|> filter(fn: (r) => r["_field"] == "closes" or r["_field"] == "highs" or r["_field"] == "lows" or r["_field"] == "opens" or r["_field"] == "volumes")
|> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")
|> drop(columns: ["_start", "_stop"])
"""
result = self.query_api.query_data_frame(query, 'pepe')
logging.info(f"Found {len(result)} candles from bucket: {bucket}, organization: 'pepe', {exchange}, {symbol}, {timeframe}:")
if result.empty:
return pd.DataFrame(columns=["dates", "opens", "highs", "lows", "closes", "volumes"])
else:
result = result.rename(columns={"_time": "dates"})
result = result.reindex(columns=["dates", "opens", "highs", "lows", "closes", "volumes"])
return result | pattty847/Crypto-Market-Watch | app/api/influx.py | influx.py | py | 3,453 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "influxdb_client.client.write_api.SYNCHRONOUS",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "influxdb_client.InfluxDBClient",
"line_number": 19,
"usage_type": "call"
},
{
... |
18394317715 | import sys
import numpy as np
import tiledb
# Name of the array to create.
array_name = "reading_dense_layouts"
def create_array():
# The array will be 4x4 with dimensions "rows" and "cols", with domain [1,4].
dom = tiledb.Domain(
tiledb.Dim(name="rows", domain=(1, 4), tile=2, dtype=np.int32),
tiledb.Dim(name="cols", domain=(1, 4), tile=2, dtype=np.int32),
)
# The array will be dense with a single attribute "a" so each (i,j) cell can store an integer.
schema = tiledb.ArraySchema(
domain=dom, sparse=False, attrs=[tiledb.Attr(name="a", dtype=np.int32)]
)
# Create the (empty) array on disk.
tiledb.DenseArray.create(array_name, schema)
def write_array():
# Open the array and write to it.
with tiledb.DenseArray(array_name, mode="w") as A:
# NOTE: global writes are not currently supported in the Python API.
# The following code will produce the same array as the corresponding
# C++ example in the docs (which wrote in global order)
data = np.array(([1, 2, 5, 6], [3, 4, 7, 8], [9, 10, 13, 14], [11, 12, 15, 16]))
A[:] = data
def read_array(order):
# Open the array and read from it.
with tiledb.DenseArray(array_name, mode="r") as A:
# Get non-empty domain
print("Non-empty domain: {}".format(A.nonempty_domain()))
# Slice only rows 1, 2 and cols 2, 3, 4.
# NOTE: The `query` syntax is required to get the coordinates for
# dense arrays and specify an order other than the default row-major
data = A.query(attrs=["a"], order=order, coords=True)[1:3, 2:5]
a_vals = data["a"]
coords = np.asarray(list(zip(data["rows"], data["cols"])))
if order != "G" and a_vals.flags["F_CONTIGUOUS"]:
print("NOTE: The following result array has col-major layout internally")
if order != "G":
for i in range(coords.shape[0]):
for j in range(coords.shape[1]):
print(
"Cell {} has data {}".format(
str(coords[i, j]), str(a_vals[i, j])
)
)
else:
# When reading in global order, TileDB always returns a vector (1D array)
for i in range(coords.shape[0]):
print("Cell {} has data {}".format(str(coords[i]), str(a_vals[i])))
# Check if the array already exists.
if tiledb.object_type(array_name) != "array":
create_array()
write_array()
layout = ""
if len(sys.argv) > 1:
layout = sys.argv[1]
order = "C"
if layout == "col":
order = "F"
elif layout == "global":
order = "G"
else:
order = "C"
read_array(order)
| TileDB-Inc/TileDB-Py | examples/reading_dense_layouts.py | reading_dense_layouts.py | py | 2,729 | python | en | code | 165 | github-code | 36 | [
{
"api_name": "tiledb.Domain",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tiledb.Dim",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "tiledb.Dim",
"line_numb... |
32072082706 | from flask import Flask, request, jsonify
from sklearn.ensemble import GradientBoostingRegressor
import pickle
import matplotlib
import joblib
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from load_data import (
get_binance_dataframe,
get_bingx_dataframe,
get_bitget_dataframe,
get_tapbit_dataframe,
)
modelGB = joblib.load("gradient_boosting_model.pkl")
def predict():
exchanges = ["binance", "bitget", "bingx", "tapbit"]
predictions = {}
for exchange in exchanges:
print(exchange)
exchange_df = {}
if exchange == "binance":
exchange_df = get_binance_dataframe()
elif exchange == "bitget":
exchange_df = get_bitget_dataframe()
elif exchange == "bingx":
exchange_df = get_bingx_dataframe()
elif exchange == "tapbit":
exchange_df = get_tapbit_dataframe()
X = exchange_df[["time", "volume", "high", "low", "open", "symbol"]]
actual = exchange_df[["symbol", "close"]]
print("actual data:", actual)
y_pred = modelGB.predict(X)
pd.options.display.float_format = "{:.4f}".format
labels = exchange_df["symbol"].values.tolist()
organized_predictions = {label: price for label, price in zip(labels, y_pred)}
predictions[exchange] = organized_predictions
merged_predictions = pd.DataFrame.from_dict(
predictions[exchange], orient="index", columns=[f"predicted_{exchange}"]
)
# Merge actual data with predictions based on symbol
merged_predictions = merged_predictions.merge(
actual, left_index=True, right_on="symbol", how="inner"
)
merged_predictions.rename(columns={"close": f"actual_{exchange}"}, inplace=True)
merged_predictions.set_index("symbol", inplace=True)
# Calculate difference between predicted and actual values
merged_predictions[f"diff_{exchange}"] = (
merged_predictions[f"predicted_{exchange}"]
- merged_predictions[f"actual_{exchange}"]
)
predictions[exchange] = merged_predictions
result_df = pd.concat(predictions.values(), axis=1)
print(result_df)
return jsonify(result_df.to_dict())
predict()
| PhatcharaNarinrat/adamas-arbitrage | prediction.py | prediction.py | py | 2,279 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "joblib.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "load_data.get_binance_dataframe",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "load_data.get_bitget_dataframe",
"line_number": 28,
"usage_type": "call"
},
{
"api_na... |
36613114839 | import os
import enum
# Folder projet interphone
LOG_DIR = "src_backend/Repport/"
# Information des trace d'erreur
ERROR_TRACE_FILE_PATH = os.path.join(LOG_DIR, 'Error.trace')
# Information des logs pour des log général
LOG_FILENAME = "APP_Window.log"
#Structure du code
LOG_FORMAT = "%(asctime)s [%(levelname)s] - %(message)s" # Format du journal
MAX_BYTES = 1024*1024
LOG_MAX_FILES = 4
class LogLevel(enum.Enum):
INFO = "INFO"
DEBUG = "DEBUG"
ERROR = "ERROR"
# Niveau de journalisation par défaut
# Remplacez "DEBUG" par le niveau de votre choix
DEFAULT_LOG_LEVEL = LogLevel.DEBUG
# Reset
MAX_AGE_DAYS = 2 # Définissez le nombre maximal de jours pour conserver les fichiers
| ClemGRob/InterPhoneVisiaScan | src_backend/constants_log.py | constants_log.py | py | 745 | python | fr | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "enum.Enum",
"line_number": 18,
"usage_type": "attribute"
}
] |
73424753064 | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import re
import json
from importlib import import_module
from inspect import stack
from traceback import print_exc
from urllib.parse import unquote
from utils import *
from config import *
@retry(Exception, cdata='method={}'.format(stack()[0][3]))
def provider_metadata(metafile='metadata.json'):
fetch_git_repo(
dir=VPN_PROVIDERS_GIT_DIR,
url=VPN_PROVIDERS_GIT_URL,
tag=VPN_PROVIDERS_GIT_TAG
)
try:
metadata = json.loads(
open(
'{}/{}'.format(
VPN_PROFILES,
metafile
)
).read()
)
except Exception as e:
print(repr(e))
if DEBUG: print_exc()
metadata = dict()
return metadata
@retry(Exception, cdata='method={}'.format(stack()[0][3]))
def load_provider_groups():
try:
groups = provider_metadata()['provider_groups']
except Exception as e:
print(repr(e))
if DEBUG: print_exc()
groups = ['default']
return groups
@retry(Exception, cdata='method={}'.format(stack()[0][3]))
def load_affiliate_links():
try:
links = provider_metadata()['affiliate_links']
except Exception as e:
print(repr(e))
if DEBUG: print_exc()
links = []
return links
@retry(Exception, cdata='method={}'.format(stack()[0][3]))
def affiliate_link(provider=None):
fetch_git_repo(
dir=VPN_PROVIDERS_GIT_DIR,
url=VPN_PROVIDERS_GIT_URL,
tag=VPN_PROVIDERS_GIT_TAG
)
links = load_affiliate_links()
try:
link = [
el['link']
for el in links
if el['provider'].lower() == provider.lower()
][0]
except Exception as e:
print(repr(e))
if DEBUG: print_exc()
link = 'https://flashrouters.com'
return link
@retry(Exception, cdata='method={}'.format(stack()[0][3]))
def provider_groups():
fetch_git_repo(
dir=VPN_PROVIDERS_GIT_DIR,
url=VPN_PROVIDERS_GIT_URL,
tag=VPN_PROVIDERS_GIT_TAG
)
try:
groups = [
pg['name']
for pg in load_provider_groups()
]
except Exception as e:
print(repr(e))
if DEBUG: print_exc()
groups.sort()
return groups
@retry(Exception, cdata='method={}'.format(stack()[0][3]))
def providers_by_group(group='default'):
group = unquote(group)
fetch_git_repo(
dir=VPN_PROVIDERS_GIT_DIR,
url=VPN_PROVIDERS_GIT_URL,
tag=VPN_PROVIDERS_GIT_TAG
)
default_providers = [
d for d in next(os.walk(VPN_PROFILES))[1]
if d not in ['.git']
]
try:
providers = [
pg['value']
for pg in load_provider_groups()
if group == pg['name']
][0]
if '*' in providers: providers = default_providers
except Exception as e:
print(repr(e))
if DEBUG: print_exc()
providers = default_providers
pass
providers.sort()
return providers
@retry(Exception, cdata='method={}'.format(stack()[0][3]))
def location_groups_by_provider(provider='VPNArea', metafile='METADATA.txt'):
provider = unquote(provider)
fetch_git_repo(
dir=VPN_PROVIDERS_GIT_DIR,
url=VPN_PROVIDERS_GIT_URL,
tag=VPN_PROVIDERS_GIT_TAG
)
try:
mod = import_module(provider.lower())
p = mod.Provider()
if '__disabled__' in dir(p): assert p.__disabled__ == False
assert mod and 'Provider' in dir(mod) and 'get_location_groups' in dir(mod.Provider)
location_groups = p.get_location_groups()
assert location_groups
return location_groups
except:
try:
metadata = open(
'{}/{}/{}'.format(
VPN_PROFILES,
provider,
metafile
)
).read()
except Exception as e:
print(repr(e))
if DEBUG: print_exc()
try:
location_groups = [
' '.join(x.split('.')[0].split()[1:])
for x in metadata.split('\n')
if x.startswith('LOCATIONS')
]
assert ''.join(location_groups)
except Exception as e:
print(repr(e))
if DEBUG: print_exc()
location_groups = ['default']
location_groups.sort()
return location_groups
@retry(Exception, cdata='method={}'.format(stack()[0][3]))
def locations_by_provider(
provider='VPNArea',
group='default',
sort=None,
lat=None,
lon=None
):
provider = unquote(provider)
group = unquote(group)
fetch_git_repo(
dir=VPN_PROVIDERS_GIT_DIR,
url=VPN_PROVIDERS_GIT_URL,
tag=VPN_PROVIDERS_GIT_TAG
)
try:
mod = import_module(provider.lower())
p = mod.Provider()
if '__disabled__' in dir(p): assert p.__disabled__ == False
assert mod and 'Provider' in dir(mod) and 'get_locations' in dir(mod.Provider)
locations = p.get_locations(group=group, sort=sort, lat=lat, lon=lon)
assert locations
if DEBUG: print("'locations='{}'".format(locations))
return locations
except Exception as e:
if DEBUG: print_exc()
if group == 'default':
locfile = 'LOCATIONS.txt'
else:
locfile = 'LOCATIONS {}.txt'.format(group)
try:
locdata = open(
'{}/{}/{}'.format(
VPN_PROFILES,
provider,
locfile
)
).read()
locations = [
dict(
zip(
[
'name',
'ipaddr',
'proto',
'port',
'extra'
],
l.strip().split(',')
)
) for l in locdata.split('\n') if l
]
for loc in locations:
loc['value'] = loc['name']
except:
locations = [
dict(zip(['name', 'value'], [f, f]))
for f in next(
os.walk(
'{}/{}'.format(
VPN_PROFILES,
provider
)
)
)[2]
if f.split('.')[-1] == 'ovpn']
locations = sorted(locations, key=lambda k: k['name'])
return locations
@retry(Exception, cdata='method={}'.format(stack()[0][3]))
def client_cert_required(
provider='VPNArea',
metafile='METADATA.txt',
tmplfile='TEMPLATE.txt'
):
provider = unquote(provider)
fetch_git_repo(
dir=VPN_PROVIDERS_GIT_DIR,
url=VPN_PROVIDERS_GIT_URL,
tag=VPN_PROVIDERS_GIT_TAG
)
regex = re.compile('USERCERT|USERKEY')
required = False
try:
metadata = open(
'{}/{}/{}'.format(
VPN_PROFILES,
provider,
metafile
)
).read()
tmplfile = [
x for x in metadata.split('\n')
if x.startswith('TEMPLATE')
][0]
tmpl = open(
'{}/{}/{}'.format(
VPN_PROFILES,
provider,
tmplfile
)
).read()
cert = get_user_cert_contents(
metadata=metadata,
provider=provider
)
key = get_user_key_contents(
metadata=metadata,
provider=provider
)
assert (not cert or not key) and bool(regex.search(tmpl))
required = True
except Exception as e:
print(repr(e))
if DEBUG: print_exc()
return required
@retry(Exception, cdata='method={}'.format(stack()[0][3]))
def get_user_cert_contents(metadata=None, provider=None):
try:
provider = unquote(provider)
certfile = [
x for x in metadata.split('\n')
if x.startswith('user')
and x.endswith('crt')
][0]
cert = open(
'{}/{}/{}'.format(
VPN_PROFILES,
provider,
certfile
)
).read()
except:
cert = None
return cert
@retry(Exception, cdata='method={}'.format(stack()[0][3]))
def get_user_key_contents(metadata=None, provider=None):
key = None
try:
provider = unquote(provider)
keyfile = [
x for x in metadata.split('\n')
if x.startswith('user')
and x.endswith('key')
][0]
key = open(
'{}/{}/{}'.format(
VPN_PROFILES,
provider,
keyfile
)
).read()
except:
key = None
return key
@retry(Exception, cdata='method={}'.format(stack()[0][3]))
def generate_ovpn_profile(
provider='VPNArea',
metafile='METADATA.txt',
tmplfile='TEMPLATE.txt',
group='default',
name='USA - Los Angeles (UDP)'
):
provider = unquote(provider)
group = unquote(group)
name = unquote(name)
if DEBUG: print("provider='{}' group='{}' name='{}'".format(
provider,
group,
name
))
fetch_git_repo(
dir=VPN_PROVIDERS_GIT_DIR,
url=VPN_PROVIDERS_GIT_URL,
tag=VPN_PROVIDERS_GIT_TAG
)
try:
metadata = open(
'{}/{}/{}'.format(
VPN_PROFILES,
provider,
metafile
)
).read()
except:
metadata = None
try:
tmplfile = [
x for x in metadata.split('\n') if x.startswith('TEMPLATE')
][0]
tmpl = open(
'{}/{}/{}'.format(
VPN_PROFILES,
provider,
tmplfile
)
).read()
except:
tmpl = None
try:
cafile = [
x for x in metadata.split('\n')
if x.startswith('ca') and x.endswith('crt')
][0]
ca = open(
'{}/{}/{}'.format(
VPN_PROFILES,
provider,
cafile
)
).read()
except:
ca = None
try:
cert = get_user_cert_contents(
metadata=metadata,
provider=provider
)
except:
cert = None
try:
key = get_user_key_contents(
metadata=metadata,
provider=provider
)
except:
key = None
try:
tafile = [
x for x in metadata.split('\n') if x.startswith('ta') and x.endswith('key')
][0]
ta = open(
'{}/{}/{}'.format(
VPN_PROFILES,
provider,
tafile
)
).read()
except:
ta = None
try:
crlfile = [
x for x in metadata.split('\n') if x.startswith('crl') and x.endswith('pem')
][0]
crl = open(
'{}/{}/{}'.format(
VPN_PROFILES,
provider,
crlfile
)
).read()
except:
crl = None
try:
location = [
loc for loc in locations_by_provider(
group=group,
provider=provider
)
if loc['name'] == name
][0]
ipaddr = location['ipaddr'].strip()
proto = location['proto'].strip()
port = location['port'].strip()
try:
extras = [
dict(
zip(
['key', 'value'],
l
)
) for l in [
el.split('=') for el in location['extra'].split()
]
]
if DEBUG: print('extras: {}'.format(extras))
except:
extras = None
except:
if DEBUG: print_exc()
# provider with .ovpn profiles (e.g. NordVPN and LimeVPN)
if 'ipaddr' not in location.keys():
try:
tmpl = open(
'{}/{}/{}'.format(
VPN_PROFILES,
provider,
location['name']
)
).read()
except:
if DEBUG: print_exc()
try:
tmpl = tmpl.replace('#PROTO', proto)
tmpl = tmpl.replace('#SERVPROT', proto)
tmpl = tmpl.replace('#SERVER', ipaddr)
tmpl = tmpl.replace('#PORT', port)
except:
if DEBUG: print_exc()
# remove directives
tmpl = tmpl.splitlines()
try:
for extra in extras:
if extra['key'] == '#REMOVE':
for val in [i for i in extra['value']]:
tmpl = [
line for line in tmpl if not bool(
re.search('^#REMOVE{}'.format(val), line)
)]
extras.remove(extra)
for extra in extras:
tmpl = [line.replace(extra['key'], extra['value']) for line in tmpl]
except:
if DEBUG: print_exc()
tmpl = '\n'.join(tmpl)
tmpl = tmpl.replace('#PATHuser.crt', '#USERCERT')
tmpl = tmpl.replace('#PATHuser.key', '#USERKEY')
tmpl = tmpl.replace('#PASS', '')
if cert: tmpl = tmpl.replace(
'cert #USERCERT', '<cert>\n{}\n</cert>\n'.format(
cert
)
)
if key: tmpl = tmpl.replace(
'key #USERKEY', '<key>\n{}\n</key>\n'.format(
key
)
)
tmpl = tmpl.splitlines()
# remove remaining tags
regex = re.compile('^(#REMOVE\d{1})(.*)$')
temp = list()
for line in tmpl:
if regex.search(line):
temp.append(regex.search(line).groups()[1])
else:
temp.append(line)
tmpl = temp
# de-compress tls-auth and key-direction
regex = re.compile('^tls-auth #TLSKEY (\d{1})$')
temp = list()
for line in tmpl:
if regex.search(line):
temp.append('<tls-auth>\n{}\n</tls-auth>\n'.format(ta))
temp.append(
'key-direction {}\n'.format(
regex.search(line).groups()[0]
)
)
else:
temp.append(line)
tmpl = temp
# in-line tls-key
regex = re.compile('^tls-auth #TLSKEY$')
temp = list()
for line in tmpl:
if regex.search(line):
temp.append('<tls-auth>\n{}\n</tls-auth>\n'.format(ta))
else:
temp.append(line)
tmpl = temp
# in-line all other keys
temp = list()
for line in tmpl:
if line.split(' ')[0] in [
'ca',
'crl-verify',
'tls-auth',
'key',
'cert'
]:
fdata = None
try:
fdata = open(
'{}/{}/{}'.format(
VPN_PROFILES,
provider,
line.split(' ')[1].replace('"', '').replace("'", '')
)
).read()
except Exception as e:
if DEBUG: print_exc()
temp.append(line)
if fdata:
temp.append(
'<{}>\n{}\n</{}>\n'.format(
line.split(' ')[0],
fdata,
line.split(' ')[0]
)
)
else:
temp.append(line)
# remove superfluous directives
for regex in ['^dev tun[\d]+']:
tmpl = [line for line in tmpl if not bool(re.search(regex, line))]
tmpl = '\n'.join(temp)
# final sweep for providers with only one ca cert
if ca: tmpl = tmpl.replace(
'ca #CERT', '<ca>\n{}\n</ca>\n'.format(
ca
)
)
if crl: tmpl = tmpl.replace(
'crl-verify #CRLVERIFY',
'<crl-verify>\n{}\n</crl-verify>\n'.format(crl))
return '{}\n'.format(
os.linesep.join(
[
s for s in tmpl.splitlines() if s
]
)
)
| belodetek/unzoner-api | src/vpns.py | vpns.py | py | 16,340 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "json.loads",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "traceback.print_exc",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "inspect.stack",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "traceback.print_exc",
... |
11612639350 | # -*- coding:utf-8 -*-
# ==========================================
# author: ZiChen
# mail: 1538185121@qq.com
# time: 2021/05/03
# 歌词下载脚本
# ==========================================
# 请求及数据处理库
import re
from urllib import request
import json
import traceback
import os
# 本地API
import QQMusicAPI # 本地QQ音乐API
# 输出格式设置
from datetime import datetime
version = '0.2.0'
# 更新日志
# 2021/06/20 🔧更改程序架构,优化程序执行顺序,
# 2021/06/19 🎵增加对QQ音乐单曲歌词下载支持
def urlProcessing(songUrl):
'''
将输入的歌曲链接进行处理得到想要的歌曲链接
songUrl 歌曲链接,如示例(网易云)
'''
Log = '[{levelname}] - {funcName} - '.format(levelname='DEBUG',
funcName='urlProcessing')
Log_ERROR = '[{levelname}] - {funcName} - '.format(levelname='ERROR',
funcName='urlProcessing')
if type(songUrl) == list: # 如果传入的是列表,即需要下载的歌单歌曲
Type = 'PlayList_download'
# 通过分析链接识别歌曲或歌单来源
elif type(songUrl) == str:
# 2021/06/20 先判断是歌曲|歌单歌曲获取
Type = 'Song_PlayListCheck'
if re.search(r'music.163.com/song', songUrl) != None: # 网易云单曲
Type = Type + '|Netease_Song'
elif re.search(r'music.163.com/#/playlist|music.163.com/playlist', songUrl) != None: # 网易云歌单
Type = Type + '|Netease_PlayList_check'
elif re.search(r'y.qq.com/n/ryqq/songDetail', songUrl) != None: # 2021/06/19 QQ音乐单曲
Type = Type + '|QQ_Music_Song'
if Type.split('|')[0] == 'Song_PlayListCheck': # 2021/06/20 确认为歌曲|歌单歌曲获取
# 确认后获取歌曲所属平台做后续处理
Type = Type.split('|')[-1]
if Type == 'QQ_Music_Song': # QQ音乐;调用本地API获取歌词及歌曲信息
print(str(datetime.today()).split(' ')[
1].split('.')[0]+Log+'识别到QQ音乐歌曲')
print(str(datetime.today()).split(' ')[
1].split('.')[0]+Log+'正在处理链接...')
# 2021/06/19 QQ音乐单曲的mid就在url的最后
songID = songUrl.split('/')[-1]
print(str(datetime.today()).split(' ')[
1].split('.')[0]+Log+'已获取歌曲id:%s' % songID)
try:
# 获得歌手名-歌曲名,用于歌词写入
print(str(datetime.today()).split(' ')[
1].split('.')[0]+Log+'正在获取歌曲信息...')
myjson_detail = QQMusicAPI.Details_GET(songID)
# 从字典中获得歌曲的名字及作者/翻唱者
songName = myjson_detail['name']
# 由于作者/翻唱者可能有多个故使用列表存储,最后用join拼接即可
songAuthor = myjson_detail['ar']
# 由于作者/翻唱者之间用 / 隔开会导致文件命名时出错故将 / 替换成 , 但这样做也会使下载的歌曲文件
# 无法正确被播放器识别,暂时的解决方法是给出提示让用户自己去改名
if bool(re.search(r'[/]', songAuthor)) == True:
print(str(datetime.today()).split(' ')[1].split(
'.')[0]+Log_ERROR+'%s 【歌曲名称错误!下载歌词文件后请自行更改歌词文件名!】' % songAuthor)
songAuthor = songAuthor.replace('/', ',')
songDetail = '%s - %s' % (songAuthor, songName)
print(str(datetime.today()).split(' ')[1].split(
'.')[0]+Log+'已获取歌曲信息: %s\n' % songDetail)
# 获得歌词文本
print(str(datetime.today()).split(' ')
[1].split('.')[0]+Log+'发送请求中...')
print(str(datetime.today()).split(' ')[
1].split('.')[0]+Log+'正在处理接受的数据...')
# 从字典中获得歌词文本
lyrics = QQMusicAPI.Lyrics_GET(songID)['lyric']
print(str(datetime.today()).split(' ')[
1].split('.')[0]+Log+'数据处理完毕,已取得歌词文本√\n')
print(str(datetime.today()).split(' ')[
1].split('.')[0]+Log+'正在将歌词写入文件...')
with open('./%s.lrc' % songDetail, 'w', encoding='utf-8') as f:
f.write(lyrics)
print(str(datetime.today()).split(' ')
[1].split('.')[0]+Log+'已保存歌词文件√\n')
# 随便返回个东西
return True
except:
traceback.print_exc()
print(str(datetime.today()).split(' ')
[1].split('.')[0]+Log+'错误!正在重试...\n')
urlProcessing(songUrl)
else: # QQ音乐无法通过get方法获得,得调用本地api获取
if Type == 'Netease_Song': # 网易云
patternID = re.compile(r'[id=]\d+[&]') # 查找数字
print(str(datetime.today()).split(' ')[
1].split('.')[0]+Log+'识别到网易云音乐歌曲')
songID = re.sub(r'[=]|[&]', '', patternID.findall(songUrl)[0])
# 网易云音乐歌词api
neteaseApiUrl_lyric = 'https://zichen-cloud-music-api.vercel.app/lyric?id=%s&realIP=116.25.146.177' % songID
# 网易云音乐歌曲信息api
neteaseApiUrl_detail = 'https://zichen-cloud-music-api.vercel.app/song/detail?ids=%s' % songID
try:
# 获得歌手名-歌曲名,用于歌词写入
print(str(datetime.today()).split(' ')[
1].split('.')[0]+Log+'正在获取歌曲信息...')
if Type == 'Netease_Song': # 网易云
req_detail = request.Request(url=neteaseApiUrl_detail)
res_detail = request.urlopen(req_detail)
# 获取响应的json字符串
str_json_detail = res_detail.read().decode('utf-8')
# 把json转换成字典
myjson_detail = json.loads(str_json_detail)
# 从字典中获得歌曲的名字及作者/翻唱者
if Type == 'Netease_Song': # 网易云
songName = myjson_detail['songs'][0]['name']
# 由于作者/翻唱者可能有多个故使用列表存储,最后用join拼接即可
songAuthorLst = []
for i in myjson_detail['songs'][0]['ar']:
songAuthorLst.append(i['name'])
# 由于作者/翻唱者之间用 / 隔开会导致文件命名时出错故将 / 替换成 , 但这样做也会使下载的歌曲文件
# 无法正确被播放器识别,暂时的解决方法是给出提示让用户自己去改名
if bool(re.search(r'[/]', i['name'])) == True:
print(str(datetime.today()).split(' ')[1].split(
'.')[0]+Log_ERROR+'%s 【歌曲名称错误!下载歌词文件后请自行更改歌词文件名!】' % i['name'])
songAuthor = re.sub(
r'[/]', ',', ','.join(songAuthorLst))
songDetail = '%s - %s' % (songAuthor, songName)
print(str(datetime.today()).split(' ')[1].split(
'.')[0]+Log+'已获取歌曲信息: %s\n' % songDetail)
# 获得歌词文本
print(str(datetime.today()).split(' ')
[1].split('.')[0]+Log+'发送请求中...')
if Type == 'Netease_Song': # 网易云
req_lyric = request.Request(url=neteaseApiUrl_lyric)
res_lyric = request.urlopen(req_lyric)
print(str(datetime.today()).split(' ')
[1].split('.')[0]+Log+'已接收数据√')
# 获取响应的json字符串
str_json_lyric = res_lyric.read().decode('utf-8')
# 把json转换成字典
myjson_lyric = json.loads(str_json_lyric)
print(str(datetime.today()).split(' ')[
1].split('.')[0]+Log+'正在处理接受的数据...')
# 从字典中获得歌词文本
if Type == 'Netease_Song': # 网易云
lyrics = myjson_lyric['lrc']['lyric']
print(str(datetime.today()).split(' ')[
1].split('.')[0]+Log+'数据处理完毕,已取得歌词文本√\n')
# print(lyrics+'\n')
print(str(datetime.today()).split(' ')[
1].split('.')[0]+Log+'正在将歌词写入文件...')
with open('./%s.lrc' % songDetail, 'w', encoding='utf-8') as f:
f.write(lyrics)
print(str(datetime.today()).split(' ')
[1].split('.')[0]+Log+'已保存歌词文件√\n')
# 随便返回个东西
return True
except:
traceback.print_exc()
print(str(datetime.today()).split(' ')
[1].split('.')[0]+Log+'错误!正在重试...\n')
urlProcessing(songUrl)
elif Type == 'PlayList_check':
# 歌单查看并返回歌单详情
try:
if Type == 'Netease_Song': # 网易云
print(str(datetime.today()).split(' ')[
1].split('.')[0]+Log+'识别到网易云音乐歌单')
print(str(datetime.today()).split(' ')[
1].split('.')[0]+Log+'正在处理链接...')
patternID = re.compile(r'[id=]\d+[&]') # 查找数字
playListID = re.sub(
r'[=]|[&]', '', patternID.findall(songUrl)[0])
print(str(datetime.today()).split(' ')[
1].split('.')[0]+Log+'已获取歌单id:%s' % playListID)
limit = 1001 # 歌单中歌曲信息获取数量限制
# 网易云音乐歌单详细信息api
neteaseApiUrl_playList = 'https://zichen-cloud-music-api.vercel.app/playlist/detail?id=%s' % playListID
print(str(datetime.today()).split(' ')[
1].split('.')[0]+Log+'正在向:[%s] 获取歌单信息...' % neteaseApiUrl_playList)
# 加标头
header = {
"User-Agent": "mozilla/4.0 (compatible; MSIE 5.5; Windows NT)",
}
req_playList = request.Request(
url=neteaseApiUrl_playList, headers=header)
res_playList = request.urlopen(req_playList)
# 获取响应的json字符串
str_json_playList = res_playList.read().decode('utf-8')
# 把json转换成字典
myjson_playList = json.loads(str_json_playList)
# 逐个获取歌单内的歌曲名及相应作者/翻唱者
songList = []
# 用于计数显示当前过程的数字
start_num = 0
total_num = len(
myjson_playList["playlist"]["trackIds"]) # 总歌单歌曲数
# 根据大佬所述,未登录状态下无法获取歌单完整曲目,但trackIds是完整的,故获取trackIds后逐个请求,但此方法效率较低
for songTotal in myjson_playList["playlist"]["trackIds"]:
songID = songTotal['id'] # 获得歌曲id
# 网易云音乐歌词api
neteaseApiUrl_lyric = 'https://zichen-cloud-music-api.vercel.app/lyric?id=%s&realIP=116.25.146.177' % songID
# 网易云音乐歌曲信息api
neteaseApiUrl_detail = 'https://zichen-cloud-music-api.vercel.app/song/detail?ids=%s' % songID
req_detail = request.Request(url=neteaseApiUrl_detail)
res_detail = request.urlopen(req_detail)
# 获取响应的json字符串
str_json_detail = res_detail.read().decode('utf-8')
# 把json转换成字典
myjson_detail = json.loads(str_json_detail)
# 从字典中获得歌曲的名字及作者/翻唱者
# Tip:由于获取的歌曲名有\xa0不间断符号故使用join+split消除该符号
songName = "" .join(
myjson_detail['songs'][0]['name'].split())
# 由于作者/翻唱者可能有多个故使用列表存储,最后用join拼接即可
songAuthorLst = []
for i in myjson_detail['songs'][0]['ar']:
songAuthorLst.append(i['name'])
# 由于作者/翻唱者之间用 / 隔开会导致文件命名时出错故将 / 替换成 , 但这样做也会使下载的歌曲文件
# 无法正确被播放器识别,暂时的解决方法是给出提示让用户自己去改名
if bool(re.search(r'[/]', i['name'])) == True:
print(str(datetime.today()).split(' ')[1].split(
'.')[0]+Log_ERROR+'%s 【歌曲名称错误!下载歌词文件后请自行更改歌词文件名!】' % i['name'])
songAuthor = re.sub(
r'[/]', ',', ','.join(songAuthorLst))
# 将 作者/翻唱者+歌曲名+歌曲ID 用元组形式存储并最终存储至列表中
# [(歌曲1),(歌曲2),...]
songList.append([songAuthor, songName, str(songID)])
# 显示完成情况,用print覆盖打印
start_num += 1
print('\r歌单歌曲读取已完成(%s/%s)' %
(start_num, total_num), end='')
print('\n'+str(datetime.today()).split(' ')[
1].split('.')[0]+Log+'已获取歌单信息√\n')
for i in songList:
print('%s - %s - ID:%s' % (i[0], i[1], i[2]))
print('\n'+'-'*15)
return songList
except: # 错误重试
traceback.print_exc()
print(str(datetime.today()).split(' ')
[1].split('.')[0]+Log+'错误!正在重试...\n')
urlProcessing(songUrl)
elif Type == 'PlayList_download':
# 歌单歌曲下载,传入的songID
print(str(datetime.today()).split(' ')[
1].split('.')[0]+Log+'正在启动批量下载模块...')
# 用于计数显示当前过程的数字
start_num = 0
total_num = len(songUrl) # 总歌单歌曲数
# 先解包
for songLst in songUrl:
songDetail = '%s - %s' % (songLst[0], songLst[1])
songID = songLst[2]
# print('songID:%s' % songID)
# print('songLst=%s\n' % songLst)
start_num += 1
# 开始下载
# 网易云音乐歌词api
neteaseApiUrl_lyric = 'https://zichen-cloud-music-api.vercel.app/lyric?id=%s&realIP=116.25.146.177' % songID
# print(neteaseApiUrl_lyric)
# 出错后会重新循环,跳过已经保存的文件,提升效率,避免重复请求
if os.path.exists('./%s.lrc' % songDetail) == True:
pass
else:
try:
# 获得歌词文本
req_lyric = request.Request(url=neteaseApiUrl_lyric)
res_lyric = request.urlopen(req_lyric)
# 获取响应的json字符串
str_json_lyric = res_lyric.read().decode('utf-8')
# 把json转换成字典
myjson_lyric = json.loads(str_json_lyric)
# 从字典中获得歌词文本
lyrics = myjson_lyric['lrc']['lyric']
with open('./%s.lrc' % songDetail, 'w', encoding='utf-8') as f:
f.write(lyrics)
print('\r已下载(%s/%s)' % (start_num, total_num), end='')
if start_num == total_num: # 下载完提示
print('\n'+str(datetime.today()).split(' ')
[1].split('.')[0]+Log+'歌单歌曲歌词下载完毕√')
except:
# traceback.print_exc()
print(str(datetime.today()).split(' ')
[1].split('.')[0]+Log+'{%s}下载错误!\n已跳过出错的歌曲链接\n' % songDetail)
# 删除出错的元素
# print('songUrl=%s\n' % songUrl)
del songUrl[start_num-1]
# print('songUrl=%s\n' % songUrl)
# print(type(songUrl))
if start_num == total_num: # 下载完提示
print('\n'+str(datetime.today()).split(' ')
[1].split('.')[0]+Log+'歌单歌曲歌词下载完毕√')
else:
urlProcessing(songUrl)
| Zichen3317/demo18-lyricsDownloader | fc_lyricsDownloader.py | fc_lyricsDownloader.py | py | 17,698 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.search",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.today",
"line_num... |
23210851418 | from config import Config
import requests, json
from app.models import news_article, news_source
MOVIE_API_KEY = Config.API_KEY
News_Article = news_article.Article
News_Source = news_source.Source
def configure_request(app):
global api_key
api_key = app.config['API_KEY']
def get_news():
request = requests.get('https://newsapi.org/v2/everything?q=all&apiKey={}'
.format(MOVIE_API_KEY))
response = json.loads(request.content)
news = []
for new in response['articles']:
new = News_Article(new['source'], new['author'], new['title'], new['description'], new['urlToImage'],
new['url'], new['publishedAt'])
news.append(new)
return news
def get_news_sources():
request = requests.get('https://newsapi.org/v2/top-headlines/sources?apiKey={}'
.format(MOVIE_API_KEY))
response = json.loads(request.content)
news_sources = []
for source in response['sources']:
source = News_Source(source['id'], source['name'])
news_sources.append(source)
return news_sources
def get_news_from_source(source):
request = requests.get('https://newsapi.org/v2/everything?q={}&apiKey={}'.format(source, MOVIE_API_KEY))
response = json.loads(request.content)
news = []
for new in response['articles']:
new = News_Article(new['source'], new['author'], new['title'], new['description'], new['urlToImage'],
new['url'], new['publishedAt'])
news.append(new)
return news
| Joshua-Barawa/news-app | app/requests.py | requests.py | py | 1,570 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "config.Config.API_KEY",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "config.Config",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "app.models.news_article.Article",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_na... |
70891405863 | from flask import Flask, render_template, request
from transformers import VisionEncoderDecoderModel, ViTFeatureExtractor, AutoTokenizer
import torch
from PIL import Image
import io
import base64
app = Flask(__name__)
model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
feature_extractor = ViTFeatureExtractor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
max_length = 16
num_beams = 4
gen_kwargs = {"max_length": max_length, "num_beams": num_beams}
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
# Check if a file was uploaded
if 'image' not in request.files:
return render_template('index.html', error='No image uploaded')
file = request.files['image']
# Check if the file has a valid extension
if file.filename == '':
return render_template('index.html', error='No image selected')
if file and allowed_file(file.filename):
# Save the uploaded image
file_path = 'uploads/' + file.filename
file.save(file_path)
# Generate captions using the uploaded image
captions = predict_step([file_path])
# Display the image using PIL
image = Image.open(file_path)
image_data = io.BytesIO()
image.save(image_data, format='PNG')
image_base64 = base64.b64encode(image_data.getvalue()).decode('utf-8')
return render_template('index.html', image=image_base64, captions=captions)
else:
return render_template('index.html', error='Invalid file type')
return render_template('index.html')
def allowed_file(filename):
# Add the allowed image file extensions here
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def predict_step(image_paths):
images = []
for image_path in image_paths:
i_image = Image.open(image_path)
if i_image.mode != "RGB":
i_image = i_image.convert(mode="RGB")
images.append(i_image)
pixel_values = feature_extractor(images=images, return_tensors="pt").pixel_values
pixel_values = pixel_values.to(device)
output_ids = model.generate(pixel_values, num_return_sequences=3, **gen_kwargs)
preds = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
preds = [pred.strip() for pred in preds]
return preds
if __name__ == '__main__':
app.run(debug=True) | AtchayaPraba/Listed-Inc-image-captioning | app.py | app.py | py | 2,759 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "transformers.VisionEncoderDecoderModel.from_pretrained",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "transformers.VisionEncoderDecoderModel",
"line_number": 10,
"usage_type... |
15589484398 | import requests
from lxml import etree
import os
'''if __name__=='__main__':
try:
url='https://pic.netbian.com/4kmeinv/'
headers={'user-agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36'}
response=requests.get(url=url,headers=headers)
response.raise_for_status()
response.encoding=response.apparent_encoding
response_text=response.text
print(response_text)
except:
print('网络连接异常')'''
import requests
from lxml import etree
import os
if __name__=='__main__':
url='https://pic.netbian.com/4kmeinv/'
headers={'user-agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36'}
response=requests.get(url=url,headers=headers)
#可以手动修改响应数据的编码格式
#response.encoding='gbk'
page_text=response.text
print(page_text)
tree=etree.HTML(page_text)
list_li=tree.xpath('//div[@class="slist"]//li')
print(list_li)
#创建一个文件夹
if not os.path.exists('./piclibs'):
os.mkdir('./piclibs')
for li in list_li:
img_src='https://pic.netbian.com'+li.xpath('./a/img/@src')[0]
img_name=li.xpath('./a/img/@alt')[0]+'.jpg'
#通用处理中文乱码的方法(注意重新赋值)
img_name=img_name.encode('iso-8859-1').decode('gbk')
#print(img_name,img_src)
#请求图片进行持久存储
img_data=requests.get(url=img_src,headers=headers).content
img_path='piclibs/'+img_name
with open(img_path,'wb') as fp:
fp.write(img_data)
print(img_name,'下载成功!')
| BrotherIsHere/pythonProject | 7.xpath解析案例-下载图片数据.py | 7.xpath解析案例-下载图片数据.py | py | 1,746 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "lxml.etree.HTML",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "lxml.etree",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_nu... |
28231150156 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import netCDF4
from utils import *
def write_jules_overbank_props_1d(overbank_fn, overbank_maps, grid_dim_name):
nco = netCDF4.Dataset(overbank_fn, 'w', format='NETCDF4')
mask = LAND_FRAC > 0.
nland = mask.sum()
for key, value in overbank_maps.items():
overbank_maps[key] = value.transpose()[mask.transpose()]
nco.createDimension(grid_dim_name, nland)
var = nco.createVariable(
'logn_mean', 'f8', (grid_dim_name,), fill_value=F8_FILLVAL
)
var.units = 'ln(m)'
var.standard_name = 'logn_mean'
var[:] = overbank_maps['logn_mean']
var = nco.createVariable(
'logn_stdev', 'f8', (grid_dim_name,), fill_value=F8_FILLVAL
)
var.units = 'ln(m)'
var.standard_name = 'logn_stdev'
var[:] = overbank_maps['logn_stdev']
nco.close()
def write_jules_overbank_props_2d(overbank_fn, overbank_maps, x_dim_name, y_dim_name):
nco = netCDF4.Dataset(overbank_fn, 'w', format='NETCDF4')
nco = add_lat_lon_dims_2d(nco, x_dim_name, y_dim_name)
var = nco.createVariable(
'logn_mean', 'f8', (y_dim_name, x_dim_name),
fill_value=F8_FILLVAL
)
var.units = 'ln(m)'
var.standard_name = 'logn_mean'
var.grid_mapping = 'latitude_longitude'
var[:] = overbank_maps['logn_mean']
var = nco.createVariable(
'logn_stdev', 'f8', (y_dim_name, x_dim_name),
fill_value=F8_FILLVAL
)
var.units = 'ln(m)'
var.standard_name = 'logn_stdev'
var.grid_mapping = 'latitude_longitude'
var[:] = overbank_maps['logn_stdev']
nco.close()
# def write_jules_overbank_props(overbank_fn, one_d=False):
# # Read overbank properties:
# logn_mean_ds = rasterio.open(os.environ['LOGN_MEAN_FN'])
# logn_stdev_ds = rasterio.open(os.environ['LOGN_STDEV_FN'])
# overbank_maps = {}
# overbank_maps['logn_mean'] = logn_mean_ds.read(1, masked=False).squeeze()
# overbank_maps['logn_stdev'] = logn_stdev_ds.read(1, masked=False).squeeze()
# for var in overbank_maps.keys():
# arr = overbank_maps[var]
# arr = np.ma.masked_array(
# arr,
# mask=np.broadcast_to(
# np.logical_not(LAND_FRAC),
# arr.shape
# ),
# dtype=np.float64,
# fill_value=F8_FILLVAL
# )
# overbank_maps[var] = arr
# # Write netCDF:
# if one_d:
# write_jules_overbank_props_1d(overbank_fn, overbank_maps)
# else:
# write_jules_overbank_props_2d(overbank_fn, overbank_maps)
| simonmoulds/jamr | src/python/write_jules_overbank_props.py | write_jules_overbank_props.py | py | 2,611 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "netCDF4.Dataset",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "netCDF4.Dataset",
"line_number": 32,
"usage_type": "call"
}
] |
33540666683 | """HTTP Archive dataflow pipeline for generating HAR data on BigQuery."""
from __future__ import absolute_import
import json
import logging
from copy import deepcopy
from hashlib import sha256
import apache_beam as beam
from modules import utils, constants, transformation
# BigQuery can handle rows up to 100 MB.
MAX_CONTENT_SIZE = 2 * 1024 * 1024
# Number of times to partition the requests tables.
NUM_PARTITIONS = 4
def get_page(har):
"""Parses the page from a HAR object."""
if not har:
return None
page = har.get("log").get("pages")[0]
url = page.get("_URL")
metadata = get_metadata(har)
if metadata:
# The page URL from metadata is more accurate.
# See https://github.com/HTTPArchive/data-pipeline/issues/48
url = metadata.get("tested_url", url)
try:
page = trim_page(page)
payload_json = to_json(page)
except Exception:
logging.warning(
'Skipping pages payload for "%s": unable to stringify as JSON.' % url
)
return None
payload_size = len(payload_json)
if payload_size > MAX_CONTENT_SIZE:
logging.warning(
'Skipping pages payload for "%s": payload size (%s) exceeds the maximum content size of %s bytes.'
% (url, payload_size, MAX_CONTENT_SIZE)
)
return None
return [
{
"url": url,
"payload": payload_json,
"date": har["date"],
"client": har["client"],
"metadata": metadata,
}
]
def get_page_url(har):
"""Parses the page URL from a HAR object."""
page = get_page(har)
if not page:
logging.warning("Unable to get URL from page (see preceding warning).")
return None
return page[0].get("url")
def get_metadata(har):
page = har.get("log").get("pages")[0]
metadata = page.get("_metadata")
return metadata
def is_home_page(mapped_har):
if not mapped_har:
return False
metadata = mapped_har.get("metadata")
if metadata and "crawl_depth" in metadata:
return metadata.get("crawl_depth") == 0
# Only home pages have a crawl depth of 0.
else:
return True
# legacy default
def partition_step(har, num_partitions):
"""Returns a partition number based on the hashed HAR page URL"""
if not har:
logging.warning("Unable to partition step, null HAR.")
return 0
page_url = get_page_url(har)
if not page_url:
logging.warning("Skipping HAR: unable to get page URL (see preceding warning).")
return 0
_hash = hash_url(page_url)
# shift partitions by one so the zero-th contains errors
offset = 1
return (_hash % (num_partitions - 1)) + offset
def get_requests(har):
"""Parses the requests from a HAR object."""
if not har:
return None
page_url = get_page_url(har)
if not page_url:
# The page_url field indirectly depends on the get_page function.
# If the page data is unavailable for whatever reason, skip its requests.
logging.warning(
"Skipping requests payload: unable to get page URL (see preceding warning)."
)
return None
entries = har.get("log").get("entries")
requests = []
for request in entries:
request_url = request.get("_full_url")
if not request_url:
logging.warning('Skipping empty request URL for "%s"', page_url)
continue
try:
payload = to_json(trim_request(request))
except Exception:
logging.warning(
'Skipping requests payload for "%s": unable to stringify as JSON.'
% request_url
)
continue
payload_size = len(payload)
if payload_size > MAX_CONTENT_SIZE:
logging.warning(
'Skipping requests payload for "%s": payload size (%s) exceeded maximum content size of %s bytes.'
% (request_url, payload_size, MAX_CONTENT_SIZE)
)
continue
metadata = get_metadata(har)
requests.append(
{
"page": page_url,
"url": request_url,
"payload": payload,
"date": har["date"],
"client": har["client"],
"metadata": metadata,
}
)
return requests
def trim_request(request):
"""Removes redundant fields from the request object."""
# Make a copy first so the response body can be used later.
request = deepcopy(request)
request.get("response").get("content").pop("text", None)
return request
def trim_page(page):
"""Removes unneeded fields from the page object."""
if not page:
return None
# Make a copy first so the data can be used later.
page = deepcopy(page)
page.pop("_parsed_css", None)
return page
def hash_url(url):
"""Hashes a given URL to a process-stable integer value."""
return int(sha256(url.encode("utf-8")).hexdigest(), 16)
def get_response_bodies(har):
"""Parses response bodies from a HAR object."""
page_url = get_page_url(har)
requests = har.get("log").get("entries")
response_bodies = []
for request in requests:
request_url = request.get("_full_url")
body = None
if request.get("response") and request.get("response").get("content"):
body = request.get("response").get("content").get("text", None)
if body is None:
continue
truncated = len(body) > MAX_CONTENT_SIZE
if truncated:
logging.warning(
'Truncating response body for "%s". Response body size %s exceeds limit %s.'
% (request_url, len(body), MAX_CONTENT_SIZE)
)
metadata = get_metadata(har)
response_bodies.append(
{
"page": page_url,
"url": request_url,
"body": body[:MAX_CONTENT_SIZE],
"truncated": truncated,
"date": har["date"],
"client": har["client"],
"metadata": metadata,
}
)
return response_bodies
def get_technologies(har):
"""Parses the technologies from a HAR object."""
if not har:
return None
page = har.get("log").get("pages")[0]
page_url = page.get("_URL")
app_names = page.get("_detected_apps", {})
categories = page.get("_detected", {})
metadata = get_metadata(har)
# When there are no detected apps, it appears as an empty array.
if isinstance(app_names, list):
app_names = {}
categories = {}
app_map = {}
app_list = []
for app, info_list in app_names.items():
if not info_list:
continue
# There may be multiple info values. Add each to the map.
for info in info_list.split(","):
app_id = "%s %s" % (app, info) if len(info) > 0 else app
app_map[app_id] = app
for category, apps in categories.items():
for app_id in apps.split(","):
app = app_map.get(app_id)
info = ""
if app is None:
app = app_id
else:
info = app_id[len(app):].strip()
app_list.append(
{
"url": page_url,
"category": category,
"app": app,
"info": info,
"date": har["date"],
"client": har["client"],
"metadata": metadata,
}
)
return app_list
def get_lighthouse_reports(har):
"""Parses Lighthouse results from a HAR object."""
if not har:
return None
report = har.get("_lighthouse")
if not report:
return None
page_url = get_page_url(har)
if not page_url:
logging.warning(
"Skipping lighthouse report: unable to get page URL (see preceding warning)."
)
return None
# Omit large UGC.
report.get("audits").get("screenshot-thumbnails", {}).get("details", {}).pop(
"items", None
)
try:
report_json = to_json(report)
except Exception:
logging.warning(
'Skipping Lighthouse report for "%s": unable to stringify as JSON.'
% page_url
)
return None
report_size = len(report_json)
if report_size > MAX_CONTENT_SIZE:
logging.warning(
'Skipping Lighthouse report for "%s": Report size (%s) exceeded maximum content size of %s bytes.'
% (page_url, report_size, MAX_CONTENT_SIZE)
)
return None
metadata = get_metadata(har)
return [
{
"url": page_url,
"report": report_json,
"date": har["date"],
"client": har["client"],
"metadata": metadata,
}
]
def get_parsed_css(har):
"""Extracts the parsed CSS custom metric from the HAR."""
if not har:
return None
page = har.get("log").get("pages")[0]
page_url = get_page_url(har)
if not page_url:
logging.warning("Skipping parsed CSS, no page URL")
return None
metadata = get_metadata(har)
if metadata:
page_url = metadata.get("tested_url", page_url)
is_root_page = True
if metadata:
is_root_page = metadata.get("crawl_depth") == 0
custom_metric = page.get("_parsed_css")
if not custom_metric:
logging.warning("No parsed CSS data for page %s", page_url)
return None
parsed_css = []
for entry in custom_metric:
url = entry.get("url")
ast = entry.get("ast")
if url == 'inline':
# Skip inline styles for now. They're special.
continue
try:
ast_json = to_json(ast)
except Exception:
logging.warning(
'Unable to stringify parsed CSS to JSON for "%s".'
% page_url
)
continue
parsed_css.append({
"date": har["date"],
"client": har["client"],
"page": page_url,
"is_root_page": is_root_page,
"url": url,
"css": ast_json
})
return parsed_css
def to_json(obj):
"""Returns a JSON representation of the object.
This method attempts to mirror the output of the
legacy Java Dataflow pipeline. For the most part,
the default `json.dumps` config does the trick,
but there are a few settings to make it more consistent:
- Omit whitespace between properties
- Do not escape non-ASCII characters (preserve UTF-8)
One difference between this Python implementation and the
Java implementation is the way long numbers are handled.
A Python-serialized JSON string might look like this:
"timestamp":1551686646079.9998
while the Java-serialized string uses scientific notation:
"timestamp":1.5516866460799998E12
Out of a sample of 200 actual request objects, this was
the only difference between implementations. This can be
considered an improvement.
"""
if not obj:
raise ValueError
return json.dumps(obj, separators=(",", ":"), ensure_ascii=False)
def from_json(file_name, element):
"""Returns an object from the JSON representation."""
try:
return [(file_name, json.loads(element))]
except Exception as e:
logging.error('Unable to parse file %s into JSON object "%s...": %s' % (file_name, element[:50], e))
return None
def add_date_and_client(element):
"""Adds `date` and `client` attributes to facilitate BigQuery table routing"""
if element is None:
logging.error('Element is empty, skipping adding date and time')
return None
try:
file_name, har = element
date, client = utils.date_and_client_from_file_name(file_name)
page = har.get("log").get("pages")[0]
metadata = page.get("_metadata", {})
har.update(
{
"date": "{:%Y_%m_%d}".format(date),
"client": metadata.get("layout", client).lower(),
}
)
return har
except Exception as e:
logging.error('Unable to add date and client "%s...": %s' % (element[:50], e))
return None
class WriteNonSummaryToBigQuery(beam.PTransform):
def __init__(
self,
partitions,
dataset_pages,
dataset_technologies,
dataset_lighthouse,
dataset_requests,
dataset_response_bodies,
dataset_parsed_css,
dataset_pages_home_only,
dataset_technologies_home_only,
dataset_lighthouse_home_only,
dataset_requests_home_only,
dataset_response_bodies_home_only,
dataset_parsed_css_home_only,
label=None,
**kwargs,
):
# TODO(BEAM-6158): Revert the workaround once we can pickle super() on py3.
# super().__init__(label)
beam.PTransform.__init__(self)
self.label = label
self.partitions = partitions
self.dataset_pages = dataset_pages
self.dataset_technologies = dataset_technologies
self.dataset_lighthouse = dataset_lighthouse
self.dataset_requests = dataset_requests
self.dataset_response_bodies = dataset_response_bodies
self.dataset_parsed_css = dataset_parsed_css
self.dataset_pages_home = dataset_pages_home_only
self.dataset_technologies_home = dataset_technologies_home_only
self.dataset_lighthouse_home = dataset_lighthouse_home_only
self.dataset_requests_home = dataset_requests_home_only
self.dataset_response_bodies_home = dataset_response_bodies_home_only
self.dataset_parsed_css_home = dataset_parsed_css_home_only
def _transform_and_write_partition(
self, pcoll, name, index, fn, table_all, table_home, schema
):
formatted_name = utils.title_case_beam_transform_name(name)
all_rows = pcoll | f"Map{formatted_name}{index}" >> beam.FlatMap(fn)
home_only_rows = all_rows | f"Filter{formatted_name}{index}" >> beam.Filter(is_home_page)
home_only_rows | f"Write{formatted_name}Home{index}" >> transformation.WriteBigQuery(
table=lambda row: utils.format_table_name(row, table_home),
schema=schema,
)
def expand(self, hars):
# Add one to the number of partitions to use the zero-th partition for failures
partitions = hars | beam.Partition(partition_step, self.partitions + 1)
# log 0th elements (failures)
partitions[0] | "LogPartitionFailures" >> beam.FlatMap(
lambda e: logging.warning(f"Unable to partition record: {e}")
)
# enumerate starting from 1
for idx in range(1, self.partitions + 1):
self._transform_and_write_partition(
pcoll=partitions[idx],
name="pages",
index=idx,
fn=get_page,
table_all=self.dataset_pages,
table_home=self.dataset_pages_home,
schema=constants.BIGQUERY["schemas"]["pages"],
)
self._transform_and_write_partition(
pcoll=partitions[idx],
name="technologies",
index=idx,
fn=get_technologies,
table_all=self.dataset_technologies,
table_home=self.dataset_technologies_home,
schema=constants.BIGQUERY["schemas"]["technologies"],
)
self._transform_and_write_partition(
pcoll=partitions[idx],
name="lighthouse",
index=idx,
fn=get_lighthouse_reports,
table_all=self.dataset_lighthouse,
table_home=self.dataset_lighthouse_home,
schema=constants.BIGQUERY["schemas"]["lighthouse"],
)
self._transform_and_write_partition(
pcoll=partitions[idx],
name="requests",
index=idx,
fn=get_requests,
table_all=self.dataset_requests,
table_home=self.dataset_requests_home,
schema=constants.BIGQUERY["schemas"]["requests"],
)
self._transform_and_write_partition(
pcoll=partitions[idx],
name="response_bodies",
index=idx,
fn=get_response_bodies,
table_all=self.dataset_response_bodies,
table_home=self.dataset_response_bodies_home,
schema=constants.BIGQUERY["schemas"]["response_bodies"],
)
self._transform_and_write_partition(
pcoll=partitions[idx],
name="parsed_css",
index=idx,
fn=get_parsed_css,
table_all=self.dataset_parsed_css,
table_home=self.dataset_parsed_css_home,
schema=constants.BIGQUERY["schemas"]["parsed_css"],
)
| HTTPArchive/data-pipeline | modules/non_summary_pipeline.py | non_summary_pipeline.py | py | 17,297 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "logging.warning",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "logging.warning",
... |
7813600766 | """add region column for sample
Create Date: 2021-04-05 17:09:26.078925
"""
import enumtables # noqa: F401
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "20210405_170924"
down_revision = "20210401_211915"
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
"region_types",
sa.Column("item_id", sa.String(), nullable=False),
sa.PrimaryKeyConstraint("item_id", name=op.f("pk_region_types")),
schema="aspen",
)
op.enum_insert(
"region_types",
[
"North America",
"Oceania",
"Asia",
"Europe",
"South America",
"Africa",
],
schema="aspen",
)
op.add_column(
"samples",
sa.Column(
"region",
sa.String(),
nullable=True,
comment="This is the continent this sample was collected from.",
),
schema="aspen",
)
op.execute("""UPDATE aspen.samples SET region='North America'""")
op.alter_column(
"samples",
"region",
existing_type=sa.VARCHAR(),
nullable=False,
existing_comment="This is the continent this sample was collected from.",
schema="aspen",
)
op.create_foreign_key(
op.f("fk_samples_region_region_types"),
"samples",
"region_types",
["region"],
["item_id"],
source_schema="aspen",
referent_schema="aspen",
)
def downgrade():
op.drop_constraint(
op.f("fk_samples_region_region_types"),
"samples",
schema="aspen",
type_="foreignkey",
)
op.drop_column("samples", "region", schema="aspen")
op.drop_table("region_types", schema="aspen")
| chanzuckerberg/czgenepi | src/backend/database_migrations/versions/20210405_170924_add_region_column_for_sample.py | 20210405_170924_add_region_column_for_sample.py | py | 1,818 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "alembic.op.create_table",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Strin... |
26424981939 | #coding: utf-8
#
# example 11.4
#
import numpy as np
from geothermal_md import *
from matplotlib.pyplot import *
from scipy.optimize import curve_fit
#
# donnees du probleme
#
gam = 0.5772157
M = np.loadtxt("..\\data\\pumping_test2.txt")
t = M[:,0] # time in minutes
sf = M[:,1] # drawndown in meters
nt = len(sf)
qo = 17 # heat transfer per metre
rw = 12.2
#
#
#
# first approach
#
Ti = 2.4 # m2/min
Si = 0.004
rbi = 0.01
G_vect = np.vectorize(leaky_function)
Si = 0.003
def s_theo(t,Tnew,Snew,rb):
al = Tnew/Snew
u = rw**2/(4*al*t)
s = qo/(4*pi*Tnew)*G_vect(u,rb)
return s
#
#
rbi = 0.03
po = [Ti,Si,rbi]
ni = 3
tn =t[ni:nt]
sn = sf[ni:nt]
params,resn = curve_fit(s_theo,tn,sn,po)
Tn = params[0]
Sn = params[1]
rbn = params[2]
print ('T = ',Tn,'m2/min')
print('S = ',Sn)
print('r/b = ',rbn)
s1 = s_theo(t,Ti,Si,rbi)
s2 = s_theo(t,Tn,Sn,rbn)
alh = Tn/Sn
u = rw**2/(4*alh*t)
un = rw**2/(4*alh*tn)
x = 1/u
p1 = loglog(x, sf, label='Measured',color = 'black')
p2 = loglog(x, s2, label='Curve fit',color = 'black', linestyle='none', marker='o')
ll = legend()
sizeOfFont = 14
fontProperties = {'weight' : 'bold', 'size' : sizeOfFont}
a = gca()
gx = xlabel('1/u')
gy = ylabel('')
setp(gx,'fontsize',15,'fontweight','bold')
setp(gy,'fontsize',15,'fontweight','bold')
setp(a,'yscale','log')
setp(a,'xscale','log')
nx = len(x)
show() | LouisLamarche/Fundamentals-of-Geothermal-Heat-Pump-Systems | chapter11/Example11_4.py | Example11_4.py | py | 1,363 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.loadtxt",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.vectorize",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "scipy.optimize.curve_fit",
"line_number": 41,
"usage_type": "call"
}
] |
15019927918 | # This is a sample Python script.
import pandas as pd
import csv
from datetime import datetime
import json
import paho.mqtt.client as mqtt
from itertools import count
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
# Press Mayús+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
field_names = ['timestamp', 'id', 'heart', 'accelerometer']
def write_on_csv(json_missatge):
with open('base_dades_didac.csv', 'a') as csv_file:
dict_object = csv.DictWriter(csv_file, fieldnames=field_names)
dt=datetime.now()
ts = datetime.timestamp(dt)
new_entry = {'timestamp': int(ts)}
new_entry.update(json_missatge)
print("\nEl missatge rebut es:",new_entry)
dict_object.writerow(new_entry)
def on_message(client, userdata, message):
missatge_deco=str(message.payload.decode("utf-8"))
print("el missatge es:",missatge_deco)
#missatge = json.loads(missatge_deco)
#write_on_csv(missatge)
#print("message received ", str(message.payload.decode("utf-8")))
def subscribe_MQTT():
client = mqtt.Client('SoftwareLazo')
client.on_message = on_message
client.connect('test.mosquitto.org')
client.subscribe('SensorDidacLazo')
client.loop_forever()
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
subscribe_MQTT()
| JordiLazo/embedded_and_ubiquitous_systems_103056 | ReceiverMQTT/main.py | main.py | py | 1,463 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "csv.DictWriter",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "datetime.datet... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.