seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
21998859226 | from typing import List
class Solution:
def deleteAndEarn(self, nums: List[int]) -> int:
max_val = max(nums)
total = [0] * (max_val + 1)
for val in nums:
total[val] += val
def rob(nums):
first = nums[0]
second = max(nums[0], nums[1])
for i in range(2, len(nums)):
first, second = second, max(second, first + nums[i])
return second
return rob(total)
| hangwudy/leetcode | 700-799/740. 删除并获得点数.py | 740. 删除并获得点数.py | py | 475 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 5,
"usage_type": "name"
}
] |
73813844989 | import io
import numpy as np
import sys
from gym.envs.toy_test import discrete
from copy import deepcopy as dc
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
class GridworldEnv(discrete.DiscreteEnv):
metadata = {'render.modes': ['human', 'ansi']}
def __init__(self, shape = [4, 4]):
if not isinstance(shape, (list, tuple)) or not len(shape) == 2:
raise ValueError('shape argument must be a list/tuple of length 2')
self.shape = shape
nS = np.prod(shape) #np.prod : array 내부 element들의 곱
nA = 4
MAX_Y = shape[0]
MAX_X = shape[1]
P = {}
grid = np.arange(nS).reshape(shape) # np.arange(x) 0부터 x까지 [0, 1, ..., x]
it = np.nditer(grid, flags=['multi_index']) # iterator
while not it.finished:
s = it.iterindex
y, x = it.multi_index # 왜 y,x 순서? => (row, column)가 (y, x) 에 대응
P[s] = {a: [] for a in range(nA)} # a = 0, ..,3 돌면서 [] 생성 (s = iterindex 이고 state)
# P[s][a] = (prob, next_state, reward, is_done)
def is_done(s): # terminal or not
return s == 0 or s == (nS - 1)
reward = 0.0 if is_done(s) else -1.0 # reward는 현재 state와 action 기준 (여기서는 action 종류 관계없이 동일)
if is_done(s):
P[s][UP] = [(1.0, s, reward, True)] # 왜 [ ]?
P[s][RIGHT] = [(1.0, s, reward, True)]
P[s][DOWN] = [(1.0, s, reward, True)]
P[s][LEFT] = [(1.0, s, reward, True)]
else:
ns_up = s if y == 0 else s - MAX_X # 맨 윗줄이면 그대로, 아니면 MAX_X 만큼 빼기 (s는 1차원 배열이니까)
ns_right = s if x == (MAX_X - 1) else s + 1
ns_down = s if y == (MAX_Y -1) else s + MAX_X
ns_left = s if x == 0 else s - 1
P[s][UP] = [(1.0, ns_up, reward, is_done(ns_up))]
P[s][RIGHT] = [(1.0, ns_right, reward, is_done(ns_right))]
P[s][DOWN] = [(1.0, ns_down, reward, is_done(ns_down))]
P[s][LEFT] = [(1.0, ns_left, reward, is_done(ns_left))]
it.iternext() | hyeonahkimm/RLfrombasic | src/common/gridworld.py | gridworld.py | py | 2,222 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "gym.envs.toy_test.discrete.DiscreteEnv",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "gym.envs.toy_test.discrete",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "numpy.prod",
"line_number": 23,
"usage_type": "call"
},
{
... |
37600184033 | import torch
from safetensors.torch import save_file
import argparse
from pathlib import Path
def main(args):
input_path = Path(args.input_path).resolve()
output_path = args.output_path
overwrite = args.overwrite
if input_path.suffix == ".safetensors":
raise ValueError(
f"{input_path} is already a safetensors file. / {input_path} は既に safetensors ファイルです。"
)
if output_path is None:
output_path = input_path.parent / f"{input_path.stem}.safetensors"
else:
output_path = Path(output_path).resolve()
if output_path.exists() and not overwrite:
raise FileExistsError(
f"{output_path.name} already exists. Use '--overwrite' or '-w' to overwite. / {output_path.name} は既に存在します。'--overwrite' か '-w' を指定すると上書きします。"
)
print(f"Loading...")
model = torch.load(input_path, map_location="cpu")
save_file(model, output_path)
print("Done!")
print(f"Saved to {output_path} /\n {output_path} に保存しました。")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"input_path",
type=str,
help="input path",
)
parser.add_argument(
"--output_path",
"-o",
type=str,
help="output path",
)
parser.add_argument(
"--overwrite",
"-w",
action="store_true",
help="overwrite output file",
)
args = parser.parse_args()
main(args)
| p1atdev/sd_ti_merge | to_safetensors.py | to_safetensors.py | py | 1,560 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "safetensors.torch.save_file",
... |
40458088335 | # general imports for EMISSOR and the BRAIN
from cltl import brain
from emissor.representation.scenario import ImageSignal
# specific imports
from datetime import datetime
import time
import cv2
import pathlib
import emissor_api
#### The next utils are needed for the interaction and creating triples and capsules
import chatbots.util.driver_util as d_util
import chatbots.util.capsule_util as c_util
import chatbots.util.face_util as f_util
def get_next_image(camera, imagefolder):
what_is_seen = None
success, frame = camera.read()
if success:
current_time = int(time.time() * 1e3)
imagepath = f"{imagefolder}/{current_time}.png"
image_bbox = (0, 0, frame.shape[1], frame.shape[0])
cv2.imwrite(imagepath, frame)
print(imagepath)
what_is_seen = f_util.detect_objects(imagepath)
return what_is_seen, current_time, image_bbox
def create_imageSignal_and_annotations_in_emissor (results, image_time, image_bbox, scenario_ctrl):
#### We create an imageSignal
imageSignal = d_util.create_image_signal(scenario_ctrl, f"{image_time}.png", image_bbox, image_time)
scenario_ctrl.append_signal(imageSignal)
what_is_seen = []
## The next for loop creates a capsule for each object detected in the image and posts a perceivedIn property for the object in the signal
## The "front_camera" is the source of the signal
for result in results:
current_time = int(time.time() * 1e3)
bbox = [int(num) for num in result['yolo_bbox']]
object_type = result['label_string']
object_prob = result['det_score']
what_is_seen.append(object_type)
mention = f_util.create_object_mention(imageSignal, "front_camera", current_time, bbox, object_type,
object_prob)
imageSignal.mentions.append(mention)
return what_is_seen, imageSignal
def add_perception_to_episodic_memory (imageSignal: ImageSignal, object_list, my_brain, scenario_ctrl, location, place_id):
response_list = []
for object in object_list:
### We created a perceivedBy triple for this experience,
### @TODO we need to include the bouding box somehow in the object
#print(object)
capsule = c_util.scenario_image_triple_to_capsule(scenario_ctrl,
imageSignal,
location,
place_id,
"front_camera",
object,
"perceivedIn",
imageSignal.id)
#print(capsule)
# Create the response from the system and store this as a new signal
# We use the throughts to respond
response = my_brain.update(capsule, reason_types=True, create_label=True)
response_list.append(response)
return response_list
def watch_and_remember(scenario_ctrl,
camera,
imagefolder,
my_brain,
location,
place_id):
t1 = datetime.now()
while (datetime.now()-t1).seconds <= 60:
###### Getting the next input signals
what_did_i_see, current_time, image_bbox = get_next_image(camera, imagefolder)
object_list, imageSignal = create_imageSignal_and_annotations_in_emissor(what_did_i_see, current_time, image_bbox, scenario_ctrl)
response = add_perception_to_episodic_memory(imageSignal, object_list, my_brain, scenario_ctrl, location, place_id)
print(response)
reply = "\nI saw: "
if len(object_list) > 1:
for index, object in enumerate(object_list):
if index == len(object_list) - 1:
reply += " and"
reply += " a " + object
elif len(object_list) == 1:
reply += " a " + object_list[0]
else:
reply = "\nI cannot see! Something wrong with my camera."
print(reply + "\n")
def main():
### Link your camera
camera = cv2.VideoCapture(0)
# Initialise the brain in GraphDB
##### Setting the agents
AGENT = "Leolani2"
HUMAN_NAME = "Stranger"
HUMAN_ID = "stranger"
scenarioStorage, scenario_ctrl, imagefolder, rdffolder, location, place_id = emissor_api.start_a_scenario(AGENT, HUMAN_ID, HUMAN_NAME)
log_path = pathlib.Path(rdffolder)
my_brain = brain.LongTermMemory(address="http://localhost:7200/repositories/sandbox",
log_dir=log_path,
clear_all=True)
watch_and_remember(scenario_ctrl, camera, imagefolder, my_brain, location, place_id)
scenario_ctrl.scenario.ruler.end = int(time.time() * 1e3)
scenarioStorage.save_scenario(scenario_ctrl)
camera.release()
if __name__ == '__main__':
main()
| leolani/cltl-chatbots | src/chatbots/bots/episodic_image_memory.py | episodic_image_memory.py | py | 5,114 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "time.time",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "chatbots.util.face_util.detect_objects",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "chatbots... |
8926064474 | import pandas as pd
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
import time
import requests
import shutil
import os.path
import docx2txt
from webdriver_manager.chrome import ChromeDriverManager
from datetime import datetime
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
driver = webdriver.Chrome(ChromeDriverManager().install())
action = ActionChains(driver)
url_path = pd.read_csv("urls_data.csv")
url_list = list(url_path['0'])
base_dir = './ctcfp.org'
for i in url_list:
title1 = ''
title = ''
transcript = ''
audio_path = ''
audio = ''
post_date = ''
file_name = ''
try:
print(i)
driver.get(i)
time.sleep(5)
CHECK=driver.find_element_by_xpath('//li[@class="jupiterx-post-meta-categories list-inline-item"]/a')
CHECK=CHECK.text
if CHECK=="Podcasts":
title1 = driver.find_element_by_xpath('//div[@class="container-fluid"]/h1')
title = title1.text
print(title)
# transcript = driver.find_element_by_xpath('//div[@class="jet-button__container"]/a')
# transcript = transcript.get_attribute('href')
date_ = driver.find_element_by_xpath('//li[@class="jupiterx-post-meta-date list-inline-item"]/time')
date_ = date_.text
from dateutil.parser import parse
date_ = parse(date_, fuzzy=True)
print(date_, 'parse')
post_date = datetime.strptime(str(date_), '%Y-%m-%d %H:%M:%S').strftime('%m/%d/%Y')
print(post_date, "post_date")
file_name = title.replace(" ", "_")
if os.path.exists('./ctcfp.org/' + file_name):
pass
else:
time.sleep(10)
try:
try:
try:
audio_path = driver.find_element_by_xpath('//div[@class="jet-button__container"]/a')
except:
audio_path = driver.find_element_by_xpath('//div[@class ="jupiterx-content"]/article/div/div[1]/ul/li[1]/a')
except:
audio_path = driver.find_element_by_xpath('//div[@class="jupiterx-post-content clearfix"]/div/div[1]/a')
link = audio_path.get_attribute('href')
print(link, "audio_link")
text = "audio_file"
params = {
"ie": "UTF-8",
"client": "tw-ob",
"q": text,
"tl": "en",
"total": "1",
"idx": "0",
"textlen": str(len(text))
}
response = requests.get(link, params=params)
response.raise_for_status()
assert response.headers["Content-Type"] == "audio/mpeg"
with open("output.mp3", "wb") as file:
file.write(response.content)
print("Done.")
os.rename("output.mp3", file_name + ".mp3")
path = os.path.join(base_dir, file_name)
os.mkdir(path)
try:
try:
driver.find_element_by_xpath('//div[@class="elementor-container elementor-column-gap-default"]/div[2]/div/div/div/div/div/a/div[4]/span').click()
except:
driver.find_element_by_xpath('//div[@class ="jupiterx-content"]/article/div/div[1]/ul/li[2]/a').click()
except:
try:
driver.find_element_by_xpath('//div[@class="jupiterx-post-content clearfix"]/ul/li[1]/a').click()
except:
driver.find_element_by_xpath('//div[@class="jupiterx-post-content clearfix"]/div/div[2]/a').click()
time.sleep(20)
filepath = '/home/webtunixi5/Downloads'
filename = max([filepath + "/" + f for f in os.listdir(filepath)], key=os.path.getctime)
print(filename)
time.sleep(10)
shutil.move(os.path.join('.', filename), file_name + '_orig.docx')
text = docx2txt.process(file_name + '_orig.docx')
time.sleep(5)
with open(file_name + '_orig.txt', 'w') as f:
for line in text:
f.write(line)
with open(file_name + '.txt', 'w') as f:
for line in title:
f.write(line)
with open(file_name + '_info.txt', 'w') as f:
f.write(i + '\n')
f.write(post_date)
print("Scraped transcript data")
shutil.move(file_name + ".mp3", path + "/" + file_name + ".mp3")
print('audio moved successful')
shutil.move(file_name + '_orig.txt', path + '/' + file_name + '_orig.txt')
shutil.move(file_name + '.txt', path + '/' + file_name + '.txt')
shutil.move(file_name + '_info.txt', path + '/' + file_name + '_info.txt')
print("Done.")
if os.path.exists('./'+file_name + '_orig.docx'):
os.remove('./'+file_name + '_orig.docx')
except Exception as e:
print(e)
pass
else:
print("Not a podcast.")
pass
except Exception as e:
print("++++++++++++++++++")
pass | priyankathakur6321/WebScraping-Automation | ctcfp/main.py | main.py | py | 5,904 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "webdriver_manager.chrome.ChromeDriverManager",
"line_number": 15,
"usage_type": "call"
},
... |
22175885434 | # Author:Zhang Yuan
from MyPackage import *
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import seaborn as sns
import statsmodels.api as sm
from scipy import stats
# ------------------------------------------------------------
__mypath__ = MyPath.MyClass_Path("") # 路径类
mylogging = MyDefault.MyClass_Default_Logging(activate=False) # 日志记录类,需要放在上面才行
myfile = MyFile.MyClass_File() # 文件操作类
myword = MyFile.MyClass_Word() # word生成类
myexcel = MyFile.MyClass_Excel() # excel生成类
myini = MyFile.MyClass_INI() # ini文件操作类
mytime = MyTime.MyClass_Time() # 时间类
myparallel = MyTools.MyClass_ParallelCal() # 并行运算类
myplt = MyPlot.MyClass_Plot() # 直接绘图类(单个图窗)
mypltpro = MyPlot.MyClass_PlotPro() # Plot高级图系列
myfig = MyPlot.MyClass_Figure(AddFigure=False) # 对象式绘图类(可多个图窗)
myfigpro = MyPlot.MyClass_FigurePro(AddFigure=False) # Figure高级图系列
myplthtml = MyPlot.MyClass_PlotHTML() # 画可以交互的html格式的图
mypltly = MyPlot.MyClass_Plotly() # plotly画图相关
mynp = MyArray.MyClass_NumPy() # 多维数组类(整合Numpy)
mypd = MyArray.MyClass_Pandas() # 矩阵数组类(整合Pandas)
mypdpro = MyArray.MyClass_PandasPro() # 高级矩阵数组类
myDA = MyDataAnalysis.MyClass_DataAnalysis() # 数据分析类
myDefault = MyDefault.MyClass_Default_Matplotlib() # 画图恢复默认设置类
# myMql = MyMql.MyClass_MqlBackups() # Mql备份类
# myBaidu = MyWebCrawler.MyClass_BaiduPan() # Baidu网盘交互类
# myImage = MyImage.MyClass_ImageProcess() # 图片处理类
myBT = MyBackTest.MyClass_BackTestEvent() # 事件驱动型回测类
myBTV = MyBackTest.MyClass_BackTestVector() # 向量型回测类
myML = MyMachineLearning.MyClass_MachineLearning() # 机器学习综合类
mySQL = MyDataBase.MyClass_MySQL(connect=False) # MySQL类
mySQLAPP = MyDataBase.MyClass_SQL_APPIntegration() # 数据库应用整合
myWebQD = MyWebCrawler.MyClass_QuotesDownload(tushare=False) # 金融行情下载类
myWebR = MyWebCrawler.MyClass_Requests() # Requests爬虫类
myWebS = MyWebCrawler.MyClass_Selenium(openChrome=False) # Selenium模拟浏览器类
myWebAPP = MyWebCrawler.MyClass_Web_APPIntegration() # 爬虫整合应用类
myEmail = MyWebCrawler.MyClass_Email() # 邮箱交互类
myReportA = MyQuant.MyClass_ReportAnalysis() # 研报分析类
myFactorD = MyQuant.MyClass_Factor_Detection() # 因子检测类
myKeras = MyDeepLearning.MyClass_tfKeras() # tfKeras综合类
myTensor = MyDeepLearning.MyClass_TensorFlow() # Tensorflow综合类
myMT5 = MyMql.MyClass_ConnectMT5(connect=False) # Python链接MetaTrader5客户端类
myMT5Pro = MyMql.MyClass_ConnectMT5Pro(connect=False) # Python链接MT5高级类
myMT5Indi = MyMql.MyClass_MT5Indicator() # MT5指标Python版
myMT5Report = MyMT5Report.MyClass_StratTestReport(AddFigure=False) # MT5策略报告类
myMT5Analy = MyMT5Analysis.MyClass_ForwardAnalysis() # MT5分析类
myMT5Lots_Fix = MyMql.MyClass_Lots_FixedLever(connect=False) # 固定杠杆仓位类
myMT5Lots_Dy = MyMql.MyClass_Lots_DyLever(connect=False) # 浮动杠杆仓位类
myMT5run = MyMql.MyClass_RunningMT5() # Python运行MT5
myMT5code = MyMql.MyClass_CodeMql5() # Python生成MT5代码
myMoneyM = MyTrade.MyClass_MoneyManage() # 资金管理类
myDefault.set_backend_default("Pycharm") # Pycharm下需要plt.show()才显示图
# ------------------------------------------------------------
# Jupyter Notebook 控制台显示必须加上:%matplotlib inline ,弹出窗显示必须加上:%matplotlib auto
# %matplotlib inline
# import warnings
# warnings.filterwarnings('ignore')
# %%
# 简介
# A CNN-LSTM-Based Model to Forecast Stock Prices (Wenjie Lu, Jiazheng Li, Yifan Li, Aijun Sun, Jingyang Wang, Complexity magazine, vol. 2020, Article ID 6622927, 10 pages, 2020) 一文的作者比较了各种股票价格预测模型:
# 股票价格数据具有时间序列的特点。
# 同时,基于机器学习长短期记忆(LSTM)具有通过记忆功能分析时间序列数据之间关系的优势,我们提出了一种基于CNN-LSTM的股票价格预测方法。
# 同时,我们使用MLP、CNN、RNN、LSTM、CNN-RNN等预测模型逐一对股票价格进行了预测。此外,还对这些模型的预测结果进行了分析和比较。
# 本研究利用的数据涉及1991年7月1日至2020年8月31日的每日股票价格,包括7127个交易日。
# 在历史数据方面,我们选择了八个特征,包括开盘价、最高价、最低价、收盘价、成交量、成交额、涨跌幅和变化。
# 首先,我们采用CNN从数据中有效提取特征,即前10天的项目。然后,我们采用LSTM,用提取的特征数据来预测股票价格。
# 根据实验结果,CNN-LSTM可以提供可靠的股票价格预测,并且预测精度最高。
# 这种预测方法不仅为股票价格预测提供了一种新的研究思路,也为学者们研究金融时间序列数据提供了实践经验。
# 在所有考虑的模型中,CNN-LSTM模型在实验中产生了最好的结果。在这篇文章中,我们将考虑如何创建这样一个模型来预测金融时间序列,以及如何在MQL5专家顾问中使用创建的ONNX模型。
#%%
#Python libraries
import matplotlib.pyplot as plt
import MetaTrader5 as mt5
import tensorflow as tf
import numpy as np
import pandas as pd
import tf2onnx
from sklearn.model_selection import train_test_split
from sys import argv
#check tensorflow version
print(tf.__version__)
#check GPU support
print(len(tf.config.list_physical_devices('GPU'))>0)
#initialize MetaTrader5 for history data
if not mt5.initialize():
print("initialize() failed, error code =",mt5.last_error())
quit()
#show terminal info
terminal_info=mt5.terminal_info()
print(terminal_info)
#show file path
file_path=terminal_info.data_path+"\\MQL5\\Files\\"
print(file_path)
#data path to save the model
# data_path=argv[0]
# last_index=data_path.rfind("\\")+1
# data_path=data_path[0:last_index]
data_path = __mypath__.get_desktop_path() + "\\"
print("data path to save onnx model",data_path)
#set start and end dates for history data
from datetime import timedelta,datetime
end_date = datetime.now()
start_date = end_date - timedelta(days=120)
#print start and end dates
print("data start date=",start_date)
print("data end date=",end_date)
#get EURUSD rates (H1) from start_date to end_date
eurusd_rates = mt5.copy_rates_range("EURUSD", mt5.TIMEFRAME_H1, start_date, end_date)
#create dataframe
df = pd.DataFrame(eurusd_rates)
df.head()
df.shape
#prepare close prices only
data = df.filter(['close']).values
#show close prices
plt.figure(figsize = (18,10))
plt.plot(data,'b',label = 'Original')
plt.xlabel("Hours")
plt.ylabel("Price")
plt.title("EURUSD_H1")
plt.legend()
plt.show()
#%%
#scale data using MinMaxScaler
from sklearn.preprocessing import MinMaxScaler
scaler=MinMaxScaler(feature_range=(0,1))
scaled_data = scaler.fit_transform(data)
#training size is 80% of the data
training_size = int(len(scaled_data)*0.80)
print("training size:",training_size)
#create train data and check size
train_data_initial = scaled_data[0:training_size,:]
print(len(train_data_initial))
#create test data and check size
test_data_initial= scaled_data[training_size:,:1]
print(len(test_data_initial))
#split a univariate sequence into samples
def split_sequence(sequence, n_steps):
X, y = list(), list()
for i in range(len(sequence)):
#find the end of this pattern
end_ix = i + n_steps
#check if we are beyond the sequence
if end_ix > len(sequence)-1:
break
#gather input and output parts of the pattern
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix]
X.append(seq_x)
y.append(seq_y)
return np.array(X), np.array(y)
#split into samples
time_step = 120
x_train, y_train = split_sequence(train_data_initial, time_step)
x_test, y_test = split_sequence(test_data_initial, time_step)
#reshape input to be [samples, time steps, features] which is required for LSTM
x_train =x_train.reshape(x_train.shape[0],x_train.shape[1],1)
x_test = x_test.reshape(x_test.shape[0],x_test.shape[1],1)
#%%
#import keras libraries for the model
import math
from keras.models import Sequential
from keras.layers import Dense,Activation,Conv1D,MaxPooling1D,Dropout
from keras.layers import LSTM
from keras.utils.vis_utils import plot_model
from keras.metrics import RootMeanSquaredError as rmse
from keras import optimizers
#define the model
model = Sequential()
model.add(Conv1D(filters=256, kernel_size=2,activation='relu',padding = 'same',input_shape=(120,1)))
model.add(MaxPooling1D(pool_size=2))
model.add(LSTM(100, return_sequences = True))
model.add(Dropout(0.3))
model.add(LSTM(100, return_sequences = False))
model.add(Dropout(0.3))
model.add(Dense(units=1, activation = 'sigmoid'))
model.compile(optimizer='adam', loss= 'mse' , metrics = [rmse()])
#show model
model.summary()
#measure time
import time
time_calc_start = time.time()
#fit model with 300 epochs
history=model.fit(x_train,y_train,epochs=300,validation_data=(x_test,y_test),batch_size=32,verbose=1)
#calculate time
fit_time_seconds = time.time() - time_calc_start
print("fit time =",fit_time_seconds," seconds.")
#show training history keys
history.history.keys()
#show iteration-loss graph for training and validation
plt.figure(figsize = (18,10))
plt.plot(history.history['loss'],label='Training Loss',color='b')
plt.plot(history.history['val_loss'],label='Validation-loss',color='g')
plt.xlabel("Iteration")
plt.ylabel("Loss")
plt.title("LOSS")
plt.legend()
plt.show()
#show iteration-rmse graph for training and validation
plt.figure(figsize = (18,10))
plt.plot(history.history['root_mean_squared_error'],label='Training RMSE',color='b')
plt.plot(history.history['val_root_mean_squared_error'],label='Validation-RMSE',color='g')
plt.xlabel("Iteration")
plt.ylabel("RMSE")
plt.title("RMSE")
plt.legend()
plt.show()
#evaluate training data
model.evaluate(x_train, y_train, batch_size = 32)
#evaluate testing data
model.evaluate(x_test, y_test, batch_size = 32)
#prediction using training data
train_predict = model.predict(x_train)
plot_y_train = y_train.reshape(-1,1)
#show actual vs predicted (training) graph
plt.figure(figsize=(18,10))
plt.plot(scaler.inverse_transform(plot_y_train),color = 'b', label = 'Original')
plt.plot(scaler.inverse_transform(train_predict),color='red', label = 'Predicted')
plt.title("Prediction Graph Using Training Data")
plt.xlabel("Hours")
plt.ylabel("Price")
plt.legend()
plt.show()
#prediction using testing data
test_predict = model.predict(x_test)
plot_y_test = y_test.reshape(-1,1)
#%%
# 为了计算度量,我们需要将数据从区间[0,1]转换过来。同样,我们使用MinMaxScaler。
#calculate metrics
from sklearn import metrics
from sklearn.metrics import r2_score
#transform data to real values
value1=scaler.inverse_transform(plot_y_test)
value2=scaler.inverse_transform(test_predict)
#calc score
score = np.sqrt(metrics.mean_squared_error(value1,value2))
print("RMSE : {}".format(score))
print("MSE :", metrics.mean_squared_error(value1,value2))
print("R2 score :",metrics.r2_score(value1,value2))
#show actual vs predicted (testing) graph
plt.figure(figsize=(18,10))
plt.plot(scaler.inverse_transform(plot_y_test),color = 'b', label = 'Original')
plt.plot(scaler.inverse_transform(test_predict),color='g', label = 'Predicted')
plt.title("Prediction Graph Using Testing Data")
plt.xlabel("Hours")
plt.ylabel("Price")
plt.legend()
plt.show()
# save model to ONNX
output_path = data_path+"model.eurusd.H1.120.onnx"
onnx_model = tf2onnx.convert.from_keras(model, output_path=output_path)
print(f"model saved to {output_path}")
# 保存到MQL5的Files中
output_path = file_path+"model.eurusd.H1.120.onnx"
onnx_model = tf2onnx.convert.from_keras(model, output_path=output_path)
print(f"saved model to {output_path}")
# finish
mt5.shutdown()
# Python脚本的完整代码附在文章的Jupyter笔记本中。
# 在《基于CNN-LSTM的模型预测股票价格》一文中,采用CNN-LSTM架构的模型获得了R^2=0.9646的最佳结果。在我们的例子中,CNN-LSTM网络产生的最佳结果是R^2=0.9684。根据这些结果,这种类型的模型在解决预测问题时可以很有效率。
# 我们考虑了一个Python脚本的例子,它建立和训练CNN-LSTM模型来预测金融时间序列。
#%% Using the Model in MetaTrader 5
# 2.1. 在你开始之前要知道的好事
# 有两种方法来创建一个模型: 你可以使用OnnxCreate从onnx文件创建模型,或者使用OnnxCreateFromBuffer从数据阵列创建模型。
# 如果ONNX模型被用作EA中的资源,你需要在每次改变模型时重新编译EA。
# 并非所有模型都有完全定义的尺寸输入和/或输出张量。这通常是负责包尺寸的第一个维度。在运行一个模型之前,你必须使用OnnxSetInputShape和OnnxSetOutputShape函数明确指定尺寸。模型的输入数据应该以训练模型时的相同方式准备。
# 对于输入和输出数据,我们建议使用模型中使用的相同类型的数组、矩阵和/或向量。在这种情况下,你将不必在运行模型时转换数据。如果数据不能以所需类型表示,数据将被自动转换。
# 使用OnnxRun来推理(运行)你的模型。请注意,一个模型可以被多次运行。使用模型后,使用 OnnxRelease 函数释放它。
# 2.2. 读取onnx文件并获得输入和输出的信息
# 为了使用我们的模型,我们需要知道模型的位置、输入数据类型和形状,以及输出数据类型和形状。根据之前创建的脚本,model.eurusd.H1.120.onnx与生成onnx文件的Python脚本位于同一个文件夹中。输入是float32,120个归一化的收盘价(用于在批量大小等于1的情况下工作);输出是float32,这是一个由模型预测的归一化价格。
# 我们还在MQL5\Files文件夹中创建了onnx文件,以便使用MQL5脚本获得模型输入和输出数据。
# 参见MT5
| MuSaCN/PythonProjects2023-02-14 | Project_Papers文章调试/4.如何在MQL5中使用ONNX模型.py | 4.如何在MQL5中使用ONNX模型.py | py | 14,235 | python | zh | code | 1 | github-code | 6 | [
{
"api_name": "tensorflow.__version__",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.config.list_physical_devices",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "tensorflow.config",
"line_number": 94,
"usage_type": "attribute"
},... |
25503087204 | from django.shortcuts import render, redirect
from django.urls import reverse
from django.contrib.auth.decorators import login_required
from .models import ListingComment, Listing, Bid, Category
from .forms import CreateListingForm
import os
import boto3
def home(request):
listings = Listing.objects.filter(is_active=True)
categories = Category.objects.all()
return render(request, "auction/home.html", {
'listings': listings,
'categories': categories
})
@login_required
def createListing(request):
if request.method == 'POST':
form = CreateListingForm(request.POST , request.FILES)
if form.is_valid():
listing = form.save(commit=False)
listing.owner = request.user
price = form.cleaned_data['price'] or 1
bid = Bid(bid=price, bid_owner=request.user)
bid.save()
listing.bid_price = bid
if request.FILES['image']:
image_file = request.FILES['image']
# Connect to S3
s3 = boto3.client('s3')
# Upload the image file to S3
s3.upload_fileobj(image_file, os.getenv('AWS_STORAGE_BUCKET_NAME'), 'static/auction_images/' + image_file.name)
# Get the URL of the uploaded image
url = f"https://s3.amazonaws.com/{os.getenv('AWS_STORAGE_BUCKET_NAME')}/{'static/auction_images/' + image_file.name}"
listing.image_url = url
listing.save()
return redirect(reverse('auction:home'))
else:
print(form.errors)
form = CreateListingForm()
return render(request, 'auction/createListing.html', {
'form': form
})
def category(request):
if request.method == 'POST':
category = request.POST['category']
category_object = Category.objects.get(category_name=category)
categories = Category.objects.exclude(category_name=category)
listings = Listing.objects.filter(is_active=True, category=category_object)
return render(request, 'auction/category.html', {
'listings': listings,
'categories': categories,
'category': category
})
@login_required
def listing(request, listing_id):
listing = Listing.objects.get(id=listing_id)
comments = ListingComment.objects.filter(listing=listing)
if listing in request.user.user_watchlists.all():
watchlist = True
else:
watchlist = False
return render(request, 'auction/listing.html', {
'listing': listing,
'watchlist': watchlist,
'comments': comments
})
@login_required
def addWatchlist(request):
if request.method == 'POST':
listing = Listing.objects.get(id=request.POST['listing_id'])
listing.watchlist.add(request.user)
listing.save()
return redirect(reverse('auction:listing', args = (listing.id,)))
@login_required
def removeWatchlist(request):
if request.method == 'POST':
listing = Listing.objects.get(id=request.POST['listing_id'])
listing.watchlist.remove(request.user)
listing.save()
return redirect(reverse('auction:listing', args = (listing.id,)))
@login_required
def watchlist(request):
watchlists = request.user.user_watchlists.all()
return render(request, 'auction/watchlist.html', {
'watchlists': watchlists
})
@login_required
def addComment(request):
if request.method == 'POST':
id = request.POST['listing_id']
listing = Listing.objects.get(id=id)
content = request.POST['comment']
comment = ListingComment(content=content, listing=listing, author=request.user)
comment.save()
return redirect(reverse('auction:listing', args = (listing.id,)))
@login_required
def addBid(request):
if request.method == 'POST':
id = request.POST['listing_id']
listing = Listing.objects.get(id=id)
bid = float(request.POST['bid'])
current_bid = listing.bid_price.bid
comments = ListingComment.objects.filter(listing=listing)
if listing in request.user.user_watchlists.all():
watchlist = True
else:
watchlist = False
if bid > current_bid:
newBid = Bid(bid=bid, bid_owner=request.user)
newBid.save()
listing.bid_price = newBid
listing.save()
return render(request, 'auction/listing.html', {
'listing': listing,
'comments': comments,
'update': True,
'watchlist': watchlist
})
else:
return render(request, 'auction/listing.html', {
'listing': listing,
'comments': comments,
'update': False,
'watchlist': watchlist
})
@login_required
def removeListing(request, listing_id):
if request.method == 'POST':
listing = Listing.objects.get(id=listing_id)
listing.delete()
return redirect(reverse('auction:home'))
@login_required
def sellListing(request, listing_id):
if request.method == 'POST':
listing = Listing.objects.get(id=listing_id)
listing.is_active = False
listing.save()
buyer = listing.bid_price.bid_owner
comments = ListingComment.objects.filter(listing=listing)
return render(request, 'auction/listing.html', {
'listing': listing,
'comments': comments,
'message': f'Sold to {buyer} for ${listing.bid_price.bid}'
})
| samyarsworld/social-network | auction/views.py | views.py | py | 5,652 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "models.Listing.objects.filter",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "models.Listing.objects",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "models.Listing",
"line_number": 12,
"usage_type": "name"
},
{
"api_name... |
40080606131 | import os
import connexion
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
from flask_bcrypt import Bcrypt
basedir = os.path.abspath(os.path.dirname(__file__))
# Create the Connexion application instance
connex_app = connexion.App(__name__, specification_dir=basedir)
# Get the underlying Flask app instance
app = connex_app.app
bcrypt = Bcrypt(app)
# Configure the SQLAlchemy part of the app instance
app.config['SQLALCHEMY_ECHO'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////' + os.path.join(basedir, 'database.db')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config["DEBUG"] = True
# Create the SQLAlchemy db instance
db = SQLAlchemy(app)
# Initialize Marshmallow
ma = Marshmallow(app)
SECRET_KEY="\xb3\x88e\x0e\xab\xa93\x01x\x82\xd1\xe0\x1b\xb6f;\x1a\x91d\x91\xc1-I\x00"
TIME_FOR_TOKEN_DAYS = 0
TIME_FOR_TOKEN_SECONDS = 600
BCRYPT_LOG_ROUNDS = 13 | tuvetula/ApiRestFlask_videos | config.py | config.py | py | 950 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.abspath",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "connexion.App",
"line_n... |
10711040761 | import tensorflow as tf
from tensorflow import keras
import numpy as np
import os
import sys
sys.path.append(os.getcwd())
from utils.prepareReviewDataset import intToWord, return_processed_data_and_labels
def decode_review(text):
return " ".join([intToWord.get(i, "?") for i in text])
train_data, train_labels, test_data, test_labels = return_processed_data_and_labels(250)
model = keras.Sequential()
model.add(keras.layers.Embedding(88000, 16))
model.add(keras.layers.GlobalAveragePooling1D())
model.add(keras.layers.Dense(16, activation="relu"))
model.add(keras.layers.Dense(1, activation="sigmoid"))
model.summary() # prints a summary of the model
model.compile(optimizer="adam", loss="binary_crossentropy", metrics = ["accuracy"])
x_val = train_data[:10000]
x_train = train_data[10000:]
y_val = train_labels[:10000]
y_train = train_labels[10000:]
fitModel = model.fit(x_train, y_train, epochs=40, batch_size=512, validation_data=(x_val, y_val), verbose=1)
def saveTheModel():
model.save("model.h5")
def printModelEvaluation():
results = model.evaluate(test_data, test_labels)
print(results)
def testModelOnTestData():
test_review = test_data[0]
predict = model.predict([test_review])
print("Review: ")
print(decode_review(test_review))
print("Prediction: " + str(predict[0]))
print("Actual: " + str(test_labels[0]))
| tung2389/Deep-Learning-projects | Text Classification/trainModel.py | trainModel.py | py | 1,347 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.path.append",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "utils.prepareReviewDataset.intTo... |
24046293426 | # Autor: João PauLo Falcão
# Github: https://github.com/jplfalcao
# Data de criação: 09/10/2023
# Data de modificação:
# Versão: 1.0
# Importando a biblioteca
import yt_dlp
# Endereço do vídeo a ser baixado
url = input("Digite a url do vídeo: ")
# Especificando o formato '.mp4' para o vídeo
ydl_opts = {
'format': 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/mp4'
}
# Usando a classe YoutubeDL para baixar o vídeo
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
ydl.download([url])
print("Vídeo baixado com sucesso!")
| jplfalcao/python | youtube_video_download/ytvd.py | ytvd.py | py | 532 | python | pt | code | 0 | github-code | 6 | [
{
"api_name": "yt_dlp.YoutubeDL",
"line_number": 20,
"usage_type": "call"
}
] |
32623837320 | from base_factor import BaseFactor
from data.data_module import DataModule
class PEFactor(BaseFactor):
def __init__(self):
BaseFactor.__init__(self,'pe')
def compute(self,begin_date,end_date):
print(self.name,flush=True)
dm =DataModule()
df_daily = dm.get_k_data()
print(df_daily)
if __name__ == '__main__':
pe = PEFactor()
pe.compute(None,None) | bowenzz/Quant-Trading-System | factor/pe_factor.py | pe_factor.py | py | 403 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "base_factor.BaseFactor",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "base_factor.BaseFactor.__init__",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "base_factor.BaseFactor",
"line_number": 6,
"usage_type": "name"
},
{
"api_na... |
35416294037 |
import rdflib
from rdflib import Graph
from scipy.special import comb, perm
from itertools import combinations
g = Graph()
g.parse(r'/Users/shenghua/Desktop/ontology/ontology.owl')
deleted_str=r"http://www.semanticweb.org/zhou/ontologies/2020/3/untitled-ontology-19#"
len_deleted_st=len(deleted_str)
query = """
SELECT * WHERE {
?s rdfs:range ?o .
}
"""
query_class = """
SELECT ?o WHERE {
?s rdfs:subClassOf ?o .
}
"""
query1 = """SELECT ?downp ?downq ?action WHERE {
?action rdfs:domain ?dp.
?action rdfs:range ?rq.
?dcp rdfs:subClassOf ?dp.
?rcq rdfs:subClassOf ?rq.
?downp rdfs:subClassOf ?dcp.
?downq rdfs:subClassOf ?rcq.
}
"""
query2 = """SELECT ?dp ?rq ?action WHERE {
?action rdfs:domain ?dcp.
?action rdfs:range ?rcq.
?dp rdfs:subClassOf ?dcp.
?rq rdfs:subClassOf ?rcq.
}
"""
query3 = """SELECT ?dcp ?downq ?action WHERE {
?action rdfs:domain ?dp.
?action rdfs:range ?rq.
?dcp rdfs:subClassOf ?dp.
?rcq rdfs:subClassOf ?rq.
?downq rdfs:subClassOf ?rcq.
}
"""
query4 = """SELECT ?downp ?rcq ?action WHERE {
?action rdfs:domain ?dp.
?action rdfs:range ?rq.
?dcp rdfs:subClassOf ?dp.
?rcq rdfs:subClassOf ?rq.
?downp rdfs:subClassOf ?dcp.
}
"""
#print (g.subject_objects(predicate=None))
a=[]
for row in g.query(query):
for i in range(0,len(row)):
if (str(row[0])[len_deleted_st:])=='detect':
#print(str(row[1])[len_deleted_st:])
a.append(str(row[1])[len_deleted_st:])
#print (set(a))
detected_elements=set(a)
print ("detected_elements:")
print (detected_elements)
allclass=[]
for row in g.query(query_class):
allclass.append(str(row[0])[len_deleted_st:])
all_high_level_class=set(allclass)
print (all_high_level_class)
track=[]
for row in g.query(query):
for i in range(0,len(row)):
if (str(row[0])[len_deleted_st:])=='track':
#print(str(row[1])[len_deleted_st:])
track.append(str(row[1])[len_deleted_st:])
#print (set(a))
tracked_elements=set(track)
print ("tracked_elements:")
print (tracked_elements)
detected_or_tracked_elements=tracked_elements.union(detected_elements)
d=[]
for row in g.query(query1): #3-3
for i in range(0,len(row)):
if ((str(row[2])[len_deleted_st:len_deleted_st+6])=='affect')and ((str(row[0])[len_deleted_st:]) !=(str(row[1])[len_deleted_st:]) )and ((str(row[0])[len_deleted_st:]) in (detected_or_tracked_elements)) and ((str(row[1])[len_deleted_st:]) in (detected_or_tracked_elements)) and ((str(row[0])[len_deleted_st:]) not in all_high_level_class) and ((str(row[1])[len_deleted_st:]) not in all_high_level_class):
#print(str(row[0])[len_deleted_st:],str(row[1])[len_deleted_st:])
d.append((str(row[0])[len_deleted_st:],str(row[1])[len_deleted_st:]))
#print(len(d))
affected_elements_3_3=set(d)
print("affected_elements_3_3")
print(affected_elements_3_3)
d=[]
for row in g.query(query2): #2-2
print (row)
for i in range(0,len(row)):
if ((str(row[2])[len_deleted_st:len_deleted_st+6])=='affect')and ((str(row[0])[len_deleted_st:]) !=(str(row[1])[len_deleted_st:]) )and ((str(row[0])[len_deleted_st:]) in (detected_or_tracked_elements)) and ((str(row[1])[len_deleted_st:]) in (detected_or_tracked_elements)) and ((str(row[0])[len_deleted_st:]) not in all_high_level_class) and ((str(row[1])[len_deleted_st:]) not in all_high_level_class):
#print(str(row[0])[len_deleted_st:],str(row[1])[len_deleted_st:])
d.append((str(row[0])[len_deleted_st:],str(row[1])[len_deleted_st:]))
print(d)
#print(len(d))
affected_elements_2_2=set(d)
print("affected_elements_2_2")
print(affected_elements_2_2)
d=[]
for row in g.query(query3): #2-3
for i in range(0,len(row)):
if ((str(row[2])[len_deleted_st:len_deleted_st+6])=='affect')and ((str(row[0])[len_deleted_st:]) !=(str(row[1])[len_deleted_st:]) )and ((str(row[0])[len_deleted_st:]) in (detected_or_tracked_elements)) and ((str(row[1])[len_deleted_st:]) in (detected_or_tracked_elements)) and ((str(row[0])[len_deleted_st:]) not in all_high_level_class) and ((str(row[1])[len_deleted_st:]) not in all_high_level_class):
#print(str(row[0])[len_deleted_st:],str(row[1])[len_deleted_st:])
d.append((str(row[0])[len_deleted_st:],str(row[1])[len_deleted_st:]))
#print(len(d))
affected_elements_2_3=set(d)
print("affected_elements_2_3")
print(affected_elements_2_3)
d=[]
for row in g.query(query4): #3-2
for i in range(0,len(row)):
if ((str(row[2])[len_deleted_st:len_deleted_st+6])=='affect')and ((str(row[0])[len_deleted_st:]) !=(str(row[1])[len_deleted_st:]) )and ((str(row[0])[len_deleted_st:]) in (detected_or_tracked_elements)) and ((str(row[1])[len_deleted_st:]) in (detected_or_tracked_elements)) and ((str(row[0])[len_deleted_st:]) not in all_high_level_class) and ((str(row[1])[len_deleted_st:]) not in all_high_level_class):
#print(str(row[0])[len_deleted_st:],str(row[1])[len_deleted_st:])
d.append((str(row[0])[len_deleted_st:],str(row[1])[len_deleted_st:]))
#print(len(d))
affected_elements_3_2=set(d)
print("affected_elements_3_2")
print(affected_elements_3_2)
affected_elements=((affected_elements_3_3.union(affected_elements_2_2)).union(affected_elements_3_2)).union(affected_elements_2_3)
set(affected_elements)
print ("affected_elements")
for i in affected_elements:
print(i)
print (affected_elements)
print (len(affected_elements))
potential_applications=[]
number_of_potential_applications=0
for j in range(1, len(affected_elements)+1):
number_of_potential_applications=number_of_potential_applications+comb(len(affected_elements), i)
print (number_of_potential_applications)
for p in list(combinations(affected_elements, 3)):
potential_applications.append(p)
| 0AnonymousSite0/Data-and-Codes-for-Integrating-Computer-Vision-and-Traffic-Modelling | 3. Shared codes/Codes for SPARQL query in the CV-TM ontology/Query of CV-TM Ontology.py | Query of CV-TM Ontology.py | py | 5,900 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "rdflib.Graph",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "scipy.special.comb",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "itertools.combinations",
"line_number": 169,
"usage_type": "call"
}
] |
36025283136 | from ..Model import BootQModel
from Agent import Agent
import random
from chainer import cuda
try:
import cupy
except:
pass
import numpy as np
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
class BootQAgent(Agent):
"""
Deep Exploration via Bootstrapped DQN
Args:
_shard (class): necessary, shared part of q func
_head (class): necessary, head part of q func
_env (Env): necessary, env to learn, should be rewritten from Env
_is_train (bool): default True
_optimizer (chainer.optimizers): not necessary, if not then func won't be updated
_replay (Replay): necessary for training
_K (int): how many heads to use
_mask_p (float): p to be passed when train for each head
_gpu (bool): whether to use gpu
_gamma (float): reward decay
_batch_size (int): how much tuples to pull from replay
_epsilon (float): init epsilon, p for choosing randomly
_epsilon_decay (float): epsilon *= epsilon_decay
_epsilon_underline (float): epsilon = max(epsilon_underline, epsilon)
_grad_clip (float): clip grad, 0 is no clip
"""
def __init__(self, _shared, _head, _env, _is_train=True,
_optimizer=None, _replay=None,
_K=10, _mask_p=0.5,
_gpu=False, _gamma=0.99, _batch_size=32,
_epsilon=0.5, _epsilon_decay=0.995, _epsilon_underline=0.01,
_grad_clip=1.):
super(BootQAgent, self).__init__()
self.is_train = _is_train
self.q_func = BootQModel(_shared, _head, _K)
if _gpu:
self.q_func.to_gpu()
self.env = _env
if self.is_train:
self.target_q_func = BootQModel(_shared, _head, _K)
if _gpu:
self.target_q_func.to_gpu()
self.target_q_func.copyparams(self.q_func)
if _optimizer:
self.q_opt = _optimizer
self.q_opt.setup(self.q_func)
self.replay = _replay
self.config.K = _K
self.config.mask_p = _mask_p
self.config.gpu = _gpu
self.config.gamma = _gamma
self.config.batch_size = _batch_size
self.config.epsilon = _epsilon
self.config.epsilon_decay = _epsilon_decay
self.config.epsilon_underline = _epsilon_underline
self.config.grad_clip = _grad_clip
def startNewGame(self):
super(BootQAgent, self).startNewGame()
# randomly choose head
self.use_head = random.randint(0, self.config.K - 1)
logger.info('Use head: ' + str(self.use_head))
def step(self):
if not self.env.in_game:
return False
# get current state
cur_state = self.env.getState()
# choose action in step
action = self.chooseAction(self.q_func, cur_state)
# do action and get reward
reward = self.env.doAction(action)
logger.info('Action: ' + str(action) + '; Reward: %.3f' % (reward))
if self.is_train:
# get new state
next_state = self.env.getState()
# store replay_tuple into memory pool
self.replay.push(
cur_state, action, reward, next_state,
np.random.binomial(1, self.config.mask_p,
(self.config.K)).tolist()
)
return self.env.in_game
def forward(self, _cur_x, _next_x, _state_list):
# get cur outputs
cur_output = self.func(self.q_func, _cur_x, True)
# get next outputs, NOT target
next_output = self.func(self.q_func, _next_x, False)
# choose next action for each output
next_action = [
self.env.getBestAction(
o.data,
_state_list
) for o in next_output # for each head in Model
]
# get next outputs, target
next_output = self.func(self.target_q_func, _next_x, False)
return cur_output, next_output, next_action
def grad(self, _cur_output, _next_output, _next_action,
_batch_tuples, _err_list, _err_count, _k):
# alloc
if self.config.gpu:
_cur_output.grad = cupy.zeros_like(_cur_output.data)
else:
_cur_output.grad = np.zeros_like(_cur_output.data)
# compute grad from each tuples
for i in range(len(_batch_tuples)):
# if use bootstrap and masked
if not _batch_tuples[i].mask[_k]:
continue
cur_action_value = \
_cur_output.data[i][_batch_tuples[i].action].tolist()
reward = _batch_tuples[i].reward
target_value = reward
# if not empty position, not terminal state
if _batch_tuples[i].next_state.in_game:
next_action_value = \
_next_output.data[i][_next_action[i]].tolist()
target_value += self.config.gamma * next_action_value
loss = cur_action_value - target_value
_cur_output.grad[i][_batch_tuples[i].action] = 2 * loss
_err_list[i] += abs(loss)
_err_count[i] += 1
def doTrain(self, _batch_tuples, _weights):
cur_x = self.getCurInputs(_batch_tuples)
next_x = self.getNextInputs(_batch_tuples)
# if bootstrap, they are all list for heads
cur_output, next_output, next_action = self.forward(
cur_x, next_x, [t.next_state for t in _batch_tuples])
# compute grad for each head
err_list = [0.] * len(_batch_tuples)
err_count = [0.] * len(_batch_tuples)
for k in range(self.config.K):
self.grad(cur_output[k], next_output[k], next_action[k],
_batch_tuples, err_list, err_count, k)
if _weights is not None:
if self.config.gpu:
_weights = cuda.to_gpu(_weights)
self.gradWeight(cur_output[k], _weights)
if self.config.grad_clip:
self.gradClip(cur_output[k], self.config.grad_clip)
# backward
cur_output[k].backward()
# adjust grads of shared
for param in self.q_func.shared.params():
param.grad /= self.config.K
# avg err
for i in range(len(err_list)):
if err_count[i] > 0:
err_list[i] /= err_count[i]
else:
err_list[i] = None
return err_list
def chooseAction(self, _model, _state):
if self.is_train:
# update epsilon
self.updateEpsilon()
random_value = random.random()
if random_value < self.config.epsilon:
# randomly choose
return self.env.getRandomAction(_state)
else:
# use model to choose
x_data = self.env.getX(_state)
output = self.func(_model, x_data, False)
output = output[self.use_head]
logger.info(str(output.data))
return self.env.getBestAction(output.data, [_state])[0]
else:
x_data = self.env.getX(_state)
output = self.func(_model, x_data, False)
action_dict = {}
for o in output:
action = self.env.getBestAction(o.data, [_state])[0]
if action not in action_dict.keys():
action_dict[action] = 1
else:
action_dict[action] += 1
logger.info(str(action_dict))
max_k = -1
max_v = 0
for k, v in zip(action_dict.keys(), action_dict.values()):
if v > max_v:
max_k = k
max_v = v
return max_k
| ppaanngggg/DeepRL | DeepRL/Agent/BootQAgent.py | BootQAgent.py | py | 7,856 | python | en | code | 29 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "Agent.Agent",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "Model.BootQModel",
... |
10233643835 | from typing import List, Optional
from twitchio import PartialUser, Client, Channel, CustomReward, parse_timestamp
__all__ = (
"PoolError",
"PoolFull",
"PubSubMessage",
"PubSubBitsMessage",
"PubSubBitsBadgeMessage",
"PubSubChatMessage",
"PubSubBadgeEntitlement",
"PubSubChannelPointsMessage",
"PubSubModerationAction",
"PubSubModerationActionModeratorAdd",
"PubSubModerationActionBanRequest",
"PubSubModerationActionChannelTerms",
"PubSubChannelSubscribe",
)
class PubSubError(Exception):
pass
class ConnectionFailure(PubSubError):
pass
class PoolError(PubSubError):
pass
class PoolFull(PoolError):
pass
class PubSubChatMessage:
"""
A message received from twitch.
Attributes
-----------
content: :class:`str`
The content received
id: :class:`str`
The id of the payload
type: :class:`str`
The payload type
"""
__slots__ = "content", "id", "type"
def __init__(self, content: str, id: str, type: str):
self.content = content
self.id = id
self.type = type
class PubSubBadgeEntitlement:
"""
A badge entitlement
Attributes
-----------
new: :class:`int`
The new badge
old: :class:`int`
The old badge
"""
__slots__ = "new", "old"
def __init__(self, new: int, old: int):
self.new = new
self.old = old
class PubSubMessage:
"""
A message from the pubsub websocket
Attributes
-----------
topic: :class:`str`
The topic subscribed to
"""
__slots__ = "topic", "_data"
def __init__(self, client: Client, topic: Optional[str], data: dict):
self.topic = topic
self._data = data
class PubSubBitsMessage(PubSubMessage):
"""
A Bits message
Attributes
-----------
message: :class:`PubSubChatMessage`
The message sent along with the bits.
badge_entitlement: Optional[:class:`PubSubBadgeEntitlement`]
The badges received, if any.
bits_used: :class:`int`
The amount of bits used.
channel_id: :class:`int`
The channel the bits were given to.
user: Optional[:class:`twitchio.PartialUser`]
The user giving the bits. Can be None if anonymous.
version: :class:`str`
The event version.
"""
__slots__ = "badge_entitlement", "bits_used", "channel_id", "context", "anonymous", "message", "user", "version"
def __init__(self, client: Client, topic: str, data: dict):
super().__init__(client, topic, data)
data = data["message"]
self.message = PubSubChatMessage(data["data"]["chat_message"], data["message_id"], data["message_type"])
self.badge_entitlement = (
PubSubBadgeEntitlement(
data["data"]["badge_entitlement"]["new_version"], data["data"]["badge_entitlement"]["old_version"]
)
if data["data"]["badge_entitlement"]
else None
)
self.bits_used: int = data["data"]["bits_used"]
self.channel_id: int = int(data["data"]["channel_id"])
self.user = (
PartialUser(client._http, data["data"]["user_id"], data["data"]["user_name"])
if data["data"]["user_id"]
else None
)
self.version: str = data["version"]
class PubSubBitsBadgeMessage(PubSubMessage):
"""
A Badge message
Attributes
-----------
user: :class:`twitchio.PartialUser`
The user receiving the badge.
channel: :class:`twitchio.Channel`
The channel the user received the badge on.
badge_tier: :class:`int`
The tier of the badge
message: :class:`str`
The message sent in chat.
timestamp: :class:`datetime.datetime`
The time the event happened
"""
__slots__ = "user", "channel", "badge_tier", "message", "timestamp"
def __init__(self, client: Client, topic: str, data: dict):
super().__init__(client, topic, data)
data = data["message"]
self.user = PartialUser(client._http, data["user_id"], data["user_name"])
self.channel: Channel = client.get_channel(data["channel_name"]) or Channel(
name=data["channel_name"], websocket=client._connection
)
self.badge_tier: int = data["badge_tier"]
self.message: str = data["chat_message"]
self.timestamp = parse_timestamp(data["time"])
class PubSubChannelPointsMessage(PubSubMessage):
"""
A Channel points redemption
Attributes
-----------
timestamp: :class:`datetime.datetime`
The timestamp the event happened.
channel_id: :class:`int`
The channel the reward was redeemed on.
id: :class:`str`
The id of the reward redemption.
user: :class:`twitchio.PartialUser`
The user redeeming the reward.
reward: :class:`twitchio.CustomReward`
The reward being redeemed.
input: Optional[:class:`str`]
The input the user gave, if any.
status: :class:`str`
The status of the reward.
"""
__slots__ = "timestamp", "channel_id", "user", "id", "reward", "input", "status"
def __init__(self, client: Client, topic: str, data: dict):
super().__init__(client, topic, data)
redemption = data["message"]["data"]["redemption"]
self.timestamp = parse_timestamp(redemption["redeemed_at"])
self.channel_id: int = int(redemption["channel_id"])
self.id: str = redemption["id"]
self.user = PartialUser(client._http, redemption["user"]["id"], redemption["user"]["display_name"])
self.reward = CustomReward(client._http, redemption["reward"], PartialUser(client._http, self.channel_id, None))
self.input: Optional[str] = redemption.get("user_input")
self.status: str = redemption["status"]
class PubSubModerationAction(PubSubMessage):
"""
A basic moderation action.
Attributes
-----------
action: :class:`str`
The action taken.
args: List[:class:`str`]
The arguments given to the command.
created_by: :class:`twitchio.PartialUser`
The user that created the action.
message_id: Optional[:class:`str`]
The id of the message that created this action.
target: :class:`twitchio.PartialUser`
The target of this action.
from_automod: :class:`bool`
Whether this action was done automatically or not.
"""
__slots__ = "action", "args", "created_by", "message_id", "target", "from_automod"
def __init__(self, client: Client, topic: str, data: dict):
super().__init__(client, topic, data)
self.action: str = data["message"]["data"]["moderation_action"]
self.args: List[str] = data["message"]["data"]["args"]
self.created_by = PartialUser(
client._http, data["message"]["data"]["created_by_user_id"], data["message"]["data"]["created_by"]
)
self.message_id: Optional[str] = data["message"]["data"].get("msg_id")
self.target = (
PartialUser(
client._http, data["message"]["data"]["target_user_id"], data["message"]["data"]["target_user_login"]
)
if data["message"]["data"]["target_user_id"]
else None
)
self.from_automod: bool = data["message"]["data"].get("from_automod", False)
class PubSubModerationActionBanRequest(PubSubMessage):
"""
A Ban/Unban event
Attributes
-----------
action: :class:`str`
The action taken.
args: List[:class:`str`]
The arguments given to the command.
created_by: :class:`twitchio.PartialUser`
The user that created the action.
target: :class:`twitchio.PartialUser`
The target of this action.
"""
__slots__ = "action", "args", "created_by", "message_id", "target"
def __init__(self, client: Client, topic: str, data: dict):
super().__init__(client, topic, data)
self.action: str = data["message"]["data"]["moderation_action"]
self.args: List[str] = data["message"]["data"]["moderator_message"]
self.created_by = PartialUser(
client._http, data["message"]["data"]["created_by_id"], data["message"]["data"]["created_by_login"]
)
self.target = (
PartialUser(
client._http, data["message"]["data"]["target_user_id"], data["message"]["data"]["target_user_login"]
)
if data["message"]["data"]["target_user_id"]
else None
)
class PubSubModerationActionChannelTerms(PubSubMessage):
"""
A channel Terms update.
Attributes
-----------
type: :class:`str`
The type of action taken.
channel_id: :class:`int`
The channel id the action occurred on.
id: :class:`str`
The id of the Term.
text: :class:`str`
The text of the modified Term.
requester: :class:`twitchio.PartialUser`
The requester of this Term.
"""
__slots__ = "type", "channel_id", "id", "text", "requester", "expires_at", "updated_at"
def __init__(self, client: Client, topic: str, data: dict):
super().__init__(client, topic, data)
self.type: str = data["message"]["data"]["type"]
self.channel_id = int(data["message"]["data"]["channel_id"])
self.id: str = data["message"]["data"]["id"]
self.text: str = data["message"]["data"]["text"]
self.requester = PartialUser(
client._http, data["message"]["data"]["requester_id"], data["message"]["data"]["requester_login"]
)
self.expires_at = (
parse_timestamp(data["message"]["data"]["expires_at"]) if data["message"]["data"]["expires_at"] else None
)
self.updated_at = (
parse_timestamp(data["message"]["data"]["updated_at"]) if data["message"]["data"]["updated_at"] else None
)
class PubSubChannelSubscribe(PubSubMessage):
"""
Channel subscription
Attributes
-----------
channel: :class:`twitchio.Channel`
Channel that has been subscribed or subgifted.
context: :class:`str`
Event type associated with the subscription product.
user: Optional[:class:`twitchio.PartialUser`]
The person who subscribed or sent a gift subscription. Can be None if anonymous.
message: :class:`str`
Message sent with the sub/resub.
emotes: Optional[List[:class:`dict`]]
Emotes sent with the sub/resub.
is_gift: :class:`bool`
If this sub message was caused by a gift subscription.
recipient: Optional[:class:`twitchio.PartialUser`]
The person the who received the gift subscription.
sub_plan: :class:`str`
Subscription Plan ID.
sub_plan_name: :class:`str`
Channel Specific Subscription Plan Name.
time: :class:`datetime.datetime`
Time when the subscription or gift was completed. RFC 3339 format.
cumulative_months: :class:`int`
Cumulative number of tenure months of the subscription.
streak_months: Optional[:class:`int`]
Denotes the user's most recent (and contiguous) subscription tenure streak in the channel.
multi_month_duration: Optional[:class:`int`]
Number of months gifted as part of a single, multi-month gift OR number of months purchased as part of a multi-month subscription.
"""
__slots__ = (
"channel",
"context",
"user",
"message",
"emotes",
"is_gift",
"recipient",
"sub_plan",
"sub_plan_name",
"time",
"cumulative_months",
"streak_months",
"multi_month_duration",
)
def __init__(self, client: Client, topic: str, data: dict):
super().__init__(client, topic, data)
subscription = data["message"]
self.channel: Channel = client.get_channel(subscription["channel_name"]) or Channel(
name=subscription["channel_name"], websocket=client._connection
)
self.context: str = subscription["context"]
try:
self.user = PartialUser(client._http, int(subscription["user_id"]), subscription["user_name"])
except KeyError:
self.user = None
self.message: str = subscription["sub_message"]["message"]
try:
self.emotes = subscription["sub_message"]["emotes"]
except KeyError:
self.emotes = None
self.is_gift: bool = subscription["is_gift"]
try:
self.recipient = PartialUser(
client._http, int(subscription["recipient_id"]), subscription["recipient_user_name"]
)
except KeyError:
self.recipient = None
self.sub_plan: str = subscription["sub_plan"]
self.sub_plan_name: str = subscription["sub_plan_name"]
self.time = parse_timestamp(subscription["time"])
try:
self.cumulative_months = int(subscription["cumulative_months"])
except KeyError:
self.cumulative_months = None
try:
self.streak_months = int(subscription["streak_months"])
except KeyError:
self.streak_months = None
try:
self.multi_month_duration = int(subscription["multi_month_duration"])
except KeyError:
self.multi_month_duration = None
class PubSubModerationActionModeratorAdd(PubSubMessage):
"""
A moderator add event.
Attributes
-----------
channel_id: :class:`int`
The channel id the moderator was added to.
moderation_action: :class:`str`
Redundant.
target: :class:`twitchio.PartialUser`
The person who was added as a mod.
created_by: :class:`twitchio.PartialUser`
The person who added the mod.
"""
__slots__ = "channel_id", "target", "moderation_action", "created_by"
def __init__(self, client: Client, topic: str, data: dict):
super().__init__(client, topic, data)
self.channel_id = int(data["message"]["data"]["channel_id"])
self.moderation_action: str = data["message"]["data"]["moderation_action"]
self.target = PartialUser(
client._http, data["message"]["data"]["target_user_id"], data["message"]["data"]["target_user_login"]
)
self.created_by = PartialUser(
client._http, data["message"]["data"]["created_by_user_id"], data["message"]["data"]["created_by"]
)
_mod_actions = {
"approve_unban_request": PubSubModerationActionBanRequest,
"deny_unban_request": PubSubModerationActionBanRequest,
"channel_terms_action": PubSubModerationActionChannelTerms,
"moderator_added": PubSubModerationActionModeratorAdd,
"moderation_action": PubSubModerationAction,
}
def _find_mod_action(client: Client, topic: str, data: dict):
typ = data["message"]["type"]
if typ in _mod_actions:
return _mod_actions[typ](client, topic, data)
else:
raise ValueError(f"unknown pubsub moderation action '{typ}'")
_mapping = {
"channel-bits-events-v2": ("pubsub_bits", PubSubBitsMessage),
"channel-bits-badge-unlocks": ("pubsub_bits_badge", PubSubBitsBadgeMessage),
"channel-subscribe-events-v1": ("pubsub_subscription", PubSubChannelSubscribe),
"chat_moderator_actions": ("pubsub_moderation", _find_mod_action),
"channel-points-channel-v1": ("pubsub_channel_points", PubSubChannelPointsMessage),
"whispers": ("pubsub_whisper", None),
}
def create_message(client, msg: dict):
topic = msg["data"]["topic"].split(".")[0]
r = _mapping[topic]
return r[0], r[1](client, topic, msg["data"])
| PythonistaGuild/TwitchIO | twitchio/ext/pubsub/models.py | models.py | py | 15,690 | python | en | code | 714 | github-code | 6 | [
{
"api_name": "twitchio.Client",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "twitchio.Client",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "twitchio.PartialUser... |
6814941665 | from urllib.request import urlopen
from pdfminer.high_level import extract_text
def pdf_to_text(data):
with urlopen(data) as wFile:
text = extract_text(wFile)
return text
docUrl = 'https://diavgeia.gov.gr/doc/ΩΕΚ64653ΠΓ-2ΞΡ'
print(pdf_to_text(docUrl))
| IsVeneti/greek-gov-nlp | Preprocessing/ConvertPdf.py | ConvertPdf.py | py | 280 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "urllib.request.urlopen",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pdfminer.high_level.extract_text",
"line_number": 8,
"usage_type": "call"
}
] |
70159895227 | __all__ = [
'points_to_morton',
'morton_to_points',
'points_to_corners',
'coords_to_trilinear',
'unbatched_points_to_octree',
'quantize_points'
]
import torch
from kaolin import _C
def quantize_points(x, level):
r"""Quantize :math:`[-1, 1]` float coordinates in to
:math:`[0, (2^{level})-1]` integer coords.
If a point is out of the range :math:`[-1, 1]` it will be clipped to it.
Args:
x (torch.FloatTensor): Floating point coordinates,
must be of last dimension 3.
level (int): Level of the grid
Returns
(torch.ShortTensor): Quantized 3D points, of same shape than x.
"""
res = 2 ** level
qpts = torch.floor(torch.clamp(res * (x + 1.0) / 2.0, 0, res - 1.)).short()
return qpts
def unbatched_points_to_octree(points, level, sorted=False):
r"""Convert (quantized) 3D points to an octree.
This function assumes that the points are all in the same frame of reference
of :math:`[0, 2^level]`. Note that SPC.points does not satisfy this constraint.
Args:
points (torch.ShortTensor):
The Quantized 3d points. This is not exactly like SPC points hierarchies
as this is only the data for a specific level.
level (int): Max level of octree, and the level of the points.
sorted (bool): True if the points are unique and sorted in morton order.
Returns:
(torch.ByteTensor):
the generated octree,
of shape :math:`(2^\text{level}, 2^\text{level}, 2^\text{level})`.
"""
if not sorted:
unique = torch.unique(points.contiguous(), dim=0).contiguous()
morton = torch.sort(points_to_morton(unique).contiguous())[0]
points = morton_to_points(morton.contiguous())
return _C.ops.spc.points_to_octree(points.contiguous(), level)
def points_to_morton(points):
r"""Convert (quantized) 3D points to morton codes.
Args:
points (torch.ShortTensor):
Quantized 3D points. This is not exactly like SPC points hierarchies
as this is only the data for a specific level,
of shape :math:`(\text{num_points}, 3)`.
Returns:
(torch.LongTensor):
The morton code of the points,
of shape :math:`(\text{num_points})`
Examples:
>>> inputs = torch.tensor([
... [0, 0, 0],
... [0, 0, 1],
... [0, 0, 2],
... [0, 0, 3],
... [0, 1, 0]], device='cuda', dtype=torch.int16)
>>> points_to_morton(inputs)
tensor([0, 1, 8, 9, 2], device='cuda:0')
"""
shape = list(points.shape)[:-1]
points = points.reshape(-1, 3)
return _C.ops.spc.points_to_morton_cuda(points.contiguous()).reshape(*shape)
def morton_to_points(morton):
r"""Convert morton codes to points.
Args:
morton (torch.LongTensor): The morton codes of quantized 3D points,
of shape :math:`(\text{num_points})`.
Returns:
(torch.ShortInt):
The points quantized coordinates,
of shape :math:`(\text{num_points}, 3)`.
Examples:
>>> inputs = torch.tensor([0, 1, 8, 9, 2], device='cuda')
>>> morton_to_points(inputs)
tensor([[0, 0, 0],
[0, 0, 1],
[0, 0, 2],
[0, 0, 3],
[0, 1, 0]], device='cuda:0', dtype=torch.int16)
"""
shape = list(morton.shape)
shape.append(3)
morton = morton.reshape(-1)
return _C.ops.spc.morton_to_points_cuda(morton.contiguous()).reshape(*shape)
def points_to_corners(points):
r"""Calculates the corners of the points assuming each point is the 0th bit corner.
Args:
points (torch.ShortTensor): Quantized 3D points,
of shape :math:`(\text{num_points}, 3)`.
Returns:
(torch.ShortTensor):
Quantized 3D new points,
of shape :math:`(\text{num_points}, 8, 3)`.
Examples:
>>> inputs = torch.tensor([
... [0, 0, 0],
... [0, 2, 0]], device='cuda', dtype=torch.int16)
>>> points_to_corners(inputs)
tensor([[[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1]],
<BLANKLINE>
[[0, 2, 0],
[0, 2, 1],
[0, 3, 0],
[0, 3, 1],
[1, 2, 0],
[1, 2, 1],
[1, 3, 0],
[1, 3, 1]]], device='cuda:0', dtype=torch.int16)
"""
shape = list(points.shape)
shape.insert(-1, 8)
return _C.ops.spc.points_to_corners_cuda(points.contiguous()).reshape(*shape)
def coords_to_trilinear(coords, points):
r"""Calculates the coefficients for trilinear interpolation.
To interpolate with the coefficients, do:
``torch.sum(features * coeffs, dim=-1)``
with ``features`` of shape :math:`(\text{num_points}, 8)`
Args:
coords (torch.FloatTensor): Floating point 3D points,
of shape :math:`(\text{num_points}, 3)`.
points (torch.ShortTensor): Quantized 3D points (the 0th bit of the voxel x is in),
of shape :math:`(\text{num_points}, 3)`.
Returns:
(torch.FloatTensor):
The trilinear interpolation coefficients,
of shape :math:`(\text{num_points}, 8)`.
"""
shape = list(points.shape)
shape[-1] = 8
points = points.reshape(-1, 3)
coords = coords.reshape(-1, 3)
return _C.ops.spc.coords_to_trilinear_cuda(coords.contiguous(), points.contiguous()).reshape(*shape)
| silence394/GraphicsSamples | Nvida Samples/kaolin/kaolin/ops/spc/points.py | points.py | py | 5,820 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.floor",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torch.clamp",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torch.unique",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "torch.sort",
"line_number": ... |
18718175573 | from django.shortcuts import render, HttpResponse, redirect
from .models import Note
from django.urls import reverse
# Create your views here.
def index(request):
context = {
"notes": Note.objects.all(),
}
return render(request, 'notes/index.html', context)
def create(request):
if request.method == 'POST':
title = request.POST['title']
description = request.POST['description']
Note.objects.create(title=title, description=description)
context = {
'notes': Note.objects.all(),
}
return render(request, 'notes/notes_index.html', context)
def destroy(request, note_id):
if request.method == 'POST':
Note.objects.get(id=int(note_id)).delete()
context = {
'notes': Note.objects.all()
}
return render(request, 'notes/notes_index.html', context) | mtjhartley/codingdojo | dojoassignments/python/django/full_stack_django/ajax_notes/apps/notes/views.py | views.py | py | 846 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "models.Note.objects.all",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "models.Note.objects",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "models.Note",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.shor... |
16104264799 | import gatt
import numpy as np
import time
import datetime
class BLE(gatt.Device):
def __init__(self, Age, Height, Gender, Write, manager,mac_address):
super().__init__(manager = manager , mac_address = mac_address)
self.Age = Age
self.Height = Height
self.Gender = Gender
self.Write = Write
self.values = []
self.ReadStatus = False
self.Read = True
self.count = 0
self.manager = manager
def Write_Bytes(self, c):
#Write bytes to initiate communication
magicBytes = [0xac, 0x02, 0xf7, 0x00, 0x00, 0x00, 0xcc, 0xc3]
c.write_value(magicBytes)
#write 2nd bytes to server
magicBytes = [0xac, 0x02, 0xfa, 0x00, 0x00, 0x00, 0xcc, 0xc6]
c.write_value(magicBytes)
#Calculate offset for error checking bits
Offset = self.Age + self.Height - 56
#Sending Age and height with offset to server
magicBytes = [0xac, 0x02, 0xfb, 0x01, self.Age, self.Height, 0xcc, Offset]
c.write_value(magicBytes)
#time.sleep(1)
#Calculate offset for Present date
now = datetime.datetime.now()
#Write present date to server(scale)
Offset = (now.year-1799) + now.month + now.day
magicBytes = [0xac, 0x02, 0xfd, (now.year - 2000), now.month, now.day, 0xcc, Offset]
c.write_value(magicBytes)
#time.sleep(1)
#Calculate offset for time
now = datetime.datetime.now()
Offset = now.hour + now.minute
magicBytes = [0xac, 0x02, 0xfc, now.hour, now.minute, 56, 0xcc, Offset]
c.write_value(magicBytes)
#time.sleep(1)
magicBytes = [0xac, 0x02, 0xfe, 0x06, 0x00, 0x00, 0xcc, 0xd0]
c.write_value(magicBytes)
#time.sleep(0.6)
def body_composition(self):
#Weight of person shifting higher bit by 8bit position to left and or with lower bit to get 16bit value
weight = float(((self.values[0][12] << 8) | self.values[0][13]) / 10) #. kg
Fat = float (((self.values[0][18] << 8 ) | self.values[0][19])/ 10) #.%
Calorie = int((self.values[1][5] << 8) | self.values[1][6] ) #. kcal
bone_mass = float(((self.values[1][7] << 8 ) | self.values[1][8]) / 10 ) #. kg
water = float(((self.values[1][9] << 8) | self.values[1][10]) / 10) #.% body composition
MetabolicAge = int(self.values[1][11]) #In years
print ("Weight ===================>" + str(weight) +".Kg")
print ("Fat ======================>" + str(Fat) + "%")
print ("Calorie ==================>" + str(Calorie) + "Kcal")
print ("Bone_mass ================>" + str(bone_mass) + "Kg")
print ("Water ====================>" + str(water) + "%")
print ("MetabolicAge =============>" + str(MetabolicAge) + "years")
return {"Weight" : weight,
"Fat" : Fat,
"Calorie" : Calorie,
"BoneMass": bone_mass,
"Water" : water ,
"MAge" : MetabolicAge }
def services_resolved(self):
super().services_resolved()
for s in self.services:
if s.uuid == '0000ffb0-0000-1000-8000-00805f9b34fb': #services 0016
for c in s.characteristics:
if (c.uuid == '0000ffb1-0000-1000-8000-00805f9b34fb') and (self.Write == True): #char 0017
self.Write_Bytes (c)
self.Write = False
print(c)
#c.enable_notifications()
if c.uuid == '0000ffb2-0000-1000-8000-00805f9b34fb' and self.Read == True:
c.read_value()
c.enable_notifications()
print(c)
self.Read = False
def characteristic_value_updated(self, characteristic, value):
print("Firmware version:", value)
check = value[0] + value[1]
if check == 0:
self.ReadStatus = True
if len(value) == 20 and self.ReadStatus == True :
if self.count == 0:
self.values.append(value)
self.count = self.count + 1
else:
if value != self.values[0]:
self.values.append(value)
self.manager.stop()
| sanbuddhacharyas/MEDICAL_CARE | source_code/BLE.py | BLE.py | py | 4,518 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "gatt.Device",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "datetime... |
30455799471 | import pandas as pd
import tensorflow as tf
import argparse
from . import data
TRAIN_URL = data.TRAIN_URL
TEST_URL = data.TEST_URL
CSV_COLUMN_NAMES = data.CSV_COLUMN_NAMES
LABELS = data.LABELS
def maybe_download():
train_path = tf.keras.utils.get_file(TRAIN_URL.split('/')[-1], TRAIN_URL)
test_path = tf.keras.utils.get_file(TEST_URL.split('/')[-1], TEST_URL)
return train_path, test_path
def load_data(y_name='Labels'):
"""Returns the dataset as (train_x, train_y), (test_x, test_y)."""
train_path, test_path = maybe_download()
train = pd.read_csv(train_path, names=CSV_COLUMN_NAMES, header=0)
train_x, train_y = train, train.pop(y_name)
test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0)
test_x, test_y = test, test.pop(y_name)
return (train_x, train_y), (test_x, test_y)
def train_input_fn(features, labels, batch_size):
"""An input function for training"""
# Convert the inputs to a Dataset.
dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
# Shuffle, repeat, and batch the examples.
dataset = dataset.shuffle(1000).repeat().batch(batch_size)
# Return the dataset.
return dataset
def eval_input_fn(features, labels, batch_size):
"""An input function for evaluation"""
features = dict(features)
if labels is None:
# No labels, use only features.
inputs = features
else:
inputs = (features, labels)
# Convert the inputs to a Dataset.
dataset = tf.data.Dataset.from_tensor_slices(inputs)
# Batch the examples
assert batch_size is not None, "batch_size must not be None"
dataset = dataset.batch(batch_size)
# Return the dataset.
return dataset
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', default=100, type=int, help='batch size')
parser.add_argument('--train_steps', default=1000,
type=int, help='number of training steps')
globalClassifier = None
globalArgs = None
def main(argv):
args = parser.parse_args(argv[1:])
# Fetch the data
(train_x, train_y), (test_x, test_y) = load_data()
# Feature columns describe how to use the input.
my_feature_columns = []
for key in train_x.keys():
my_feature_columns.append(tf.feature_column.numeric_column(key=key))
# Build 2 hidden layer DNN with 10, 10 units respectively.
classifier = tf.estimator.DNNClassifier(
feature_columns=my_feature_columns, hidden_units=[10, 10], n_classes=25)
# Train the Model.
classifier.train(input_fn=lambda: train_input_fn(
train_x, train_y, args.batch_size), steps=args.train_steps)
# Evaluate the model.
eval_result = classifier.evaluate(
input_fn=lambda: eval_input_fn(test_x, test_y, args.batch_size))
# Set global variables
global globalClassifier
global globalArgs
globalClassifier = classifier
globalArgs = args
print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))
def getModelData():
return globalClassifier, globalArgs
| RajithaKumara/Best-Fit-Job-ML | classifier/estimator/model.py | model.py | py | 3,077 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "tensorflow.keras.utils.get_file",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.utils.get_file",
"line_number": 16,
"usage_type": "call"
},
{
... |
42924345016 | import sys
import os
import time
import re
import csv
import cv2
import tensorflow as tf
import numpy as np
#import pandas as pd
from PIL import Image
from matplotlib import pyplot as plt
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
# if len(sys.argv) < 0:
# print('Usage: python {} test_image_path checkpoint_path'.format(sys.argv[0]))
# exit()
def name_path_files(file_dir):
# 文件名及文件路径列表
path_files = []
name_files = []
for roots, dirs, files in os.walk(file_dir):
for f in files:
tmp = os.path.join(roots, f)
if ('.jpg' in tmp):
path_files.append(tmp)
name_files.append(f)
try:
# user enters in the filename of the csv file to convert
# in_filename = argv[1:]
print("files received list :" + str(path_files))
except (IndexError, IOError) as e:
print("invalid file detected...")
exit(1)
# print(path_filename)
# print(only_filename)
path_files_name = np.ravel(path_files)
only_file_name = np.ravel(name_files)
# print(path_files)
# print('#####' * 10)
# print(name_files)
return path_files, name_files
PATH_TO_CKPT = sys.argv[1]
PATH_TO_LABELS = 'annotations/label_map.pbtxt'
NUM_CLASSES = 4
IMAGE_SIZE = (48, 32)
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with detection_graph.as_default():
with tf.Session(graph=detection_graph, config=config) as sess:
path_files, name_files = name_path_files('./images/verification/')
writer_lists = []
for path_f in path_files:
start_time = time.time()
print(time.ctime())
image = Image.open(path_f)
image_np = np.array(image).astype(np.uint8)
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# print(classes)
# print(num_detections)
eval_dicts = {'boxes':boxes, 'scores':scores, 'classes':classes, 'num_detections':num_detections}
use_time = time.time() - start_time
vis_util.visualize_boxes_and_labels_on_image_array(
image_np, np.squeeze(boxes), np.squeeze(classes).astype(np.int32), np.squeeze(scores),
category_index, use_normalized_coordinates=True, min_score_thresh=0.5, line_thickness=2)
#vis_util.VisualizeSingleFrameDetections.images_from_evaluation_dict(image_np,eval_dict=eval_dicts)
#categories_glob = []
print(category_index)
f_name = re.split('/',path_f)
#print(category_index.get(value))
for index, value in enumerate(classes[0]):
if scores[0, index] > 0.5:
score = scores[0, index]
categories_glob = category_index.get(value)
writer_list = [f_name[-1], categories_glob['id'], categories_glob['name'], score, use_time]
writer_lists.append(writer_list)
# print(writer_list)
# print(index, '---', categories_glob,'---', score )
print(boxes)
plt.figure(figsize=IMAGE_SIZE)
plt.imshow(image_np)
#plt.savefig('./test_result/predicted_' + f_name[-1])
cv2.imwrite('./test_result/predicted_' + f_name[-1] + ".jpg", image_np)
#writer_lists.append(writer_list)
#print('Image:{} Num: {} classes:{} scores:{} Time: {:.3f}s'.format(f_name[-1], num_detections, 'np.squeeze(classes[:2])', np.max(np.squeeze(scores)), use_time))
with open('./test_result/test_result.csv', 'w') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(['test file', 'id', 'classes', 'scores', 'time/s'])
writer.writerows(writer_lists)
| ppalantir/axjingWorks | AcademicAN/TwoStage/test_batch.py | test_batch.py | py | 5,072 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "os.walk",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "numpy.ravel",
"line_number": 38... |
3547587015 | import numpy as np
import tensorflow as tf
from structure_vgg import CNN
from datetime import datetime
import os
from tfrecord_reader import tfrecord_read
import config
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string('dataset', 'dset1', 'Choose dset1 or dset2 for training, default dset1.')
tf.flags.DEFINE_string('checkpoint', None,
'Whether use a pre-trained checkpoint to continue training, default None.')
def main():
checkpoint_dir = 'checkpoints'
if FLAGS.checkpoint is not None:
checkpoint_path = os.path.join(checkpoint_dir, FLAGS.checkpoint.lstrip('checkpoints/'))
else:
current_time = datetime.now().strftime('%Y%m%d-%H%M')
checkpoint_path = os.path.join(checkpoint_dir, '{}'.format(current_time))
try:
os.makedirs(checkpoint_path)
except os.error:
print('Unable to make checkpoints direction: %s' % checkpoint_path)
model_save_path = os.path.join(checkpoint_path, 'model.ckpt')
cnn = CNN()
read_for_train = tfrecord_read(
FLAGS.dataset, config.batch_size, config.num_epochs, config.train_slice, training=True)
read_for_val = tfrecord_read(
FLAGS.dataset, config.batch_size, config.num_epochs, config.train_slice, training=False)
saver = tf.train.Saver()
print('Build session.')
tfconfig = tf.ConfigProto()
tfconfig.gpu_options.allow_growth = True
sess = tf.Session(config=tfconfig)
if FLAGS.checkpoint is not None:
print('Restore from pre-trained model.')
checkpoint = tf.train.get_checkpoint_state(checkpoint_path)
meta_graph_path = checkpoint.model_checkpoint_path + '.meta'
restore = tf.train.import_meta_graph(meta_graph_path)
restore.restore(sess, tf.train.latest_checkpoint(checkpoint_path))
step = int(meta_graph_path.split('-')[2].split('.')[0])
else:
print('Initialize.')
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
step = 0
epoch_pre = step * config.batch_size // config.file_num[FLAGS.dataset]
loss_list = []
accuracy_list = []
val_epoch_accuracies = []
# train_writer = tf.summary.FileWriter('log', sess.graph)
# summary_op = tf.summary.merge_all()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
print('Start training:')
while not coord.should_stop():
X_train_batch, y_train_batch = sess.run([read_for_train.X_batch, read_for_train.y_batch])
loss, train_batch_accuracy, _, lr = sess.run([cnn.loss, cnn.batch_accuracy, cnn.optimizer, cnn.learning_rate],
{cnn.X_inputs: X_train_batch, cnn.y_inputs: y_train_batch,
cnn.keep_prob: config.keep_prob, cnn.training: True})
loss_list.append(loss)
X_val_batch, y_val_batch = sess.run([read_for_val.X_batch, read_for_val.y_batch])
correct_pre_num, val_batch_accuracy = sess.run([cnn.correct_pre_num, cnn.batch_accuracy],
{cnn.X_inputs: X_val_batch, cnn.y_inputs: y_val_batch,
cnn.keep_prob: 1.0, cnn.training: False})
val_epoch_accuracies.append(val_batch_accuracy)
# train_writer.add_summary(summary, step)
# train_writer.flush(
epoch_cur = step * config.batch_size // config.file_num[FLAGS.dataset]
if epoch_cur > epoch_pre:
# val_epoch_accuracy = np.sum(correct_pre_nums) / ((step + 1) * config.batch_size)
val_epoch_accuracy = np.mean(val_epoch_accuracies)
accuracy_list.append(val_epoch_accuracy)
print('For epoch %i: val_epoch_accuracy = %.3f%%\n' %
(epoch_pre, val_epoch_accuracy * 100))
epoch_pre = epoch_cur
val_epoch_accuracies = []
if step % 10 == 0 and step > 0:
print('>> At step %i: loss = %.3f, train_batch_accuracy = %.3f%%' %
(step, loss, train_batch_accuracy * 100))
print(lr)
if step % 1000 == 0 and step > 0:
save_path = saver.save(sess, model_save_path, global_step=step)
print('Model saved in file: %s' % save_path)
step += 1
except KeyboardInterrupt:
print('Interrupted')
coord.request_stop()
except Exception as e:
coord.request_stop(e)
finally:
save_path = saver.save(sess, model_save_path, global_step=step)
print('Model saved in file: %s' % save_path)
coord.request_stop()
coord.join(threads)
sess.close()
if __name__ == '__main__':
main()
| yikaiw/DL-hw2-CNN | main.py | main.py | py | 4,806 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.environ",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.flags",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.flags.DEFINE_string",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "t... |
43200145217 | """empty message
Revision ID: 5b1f1d56cb45
Revises: 934b5daacc67
Create Date: 2019-06-03 19:02:22.711720
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '5b1f1d56cb45'
down_revision = '934b5daacc67'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('post', 'date_posted',
existing_type=postgresql.TIMESTAMP(),
nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('post', 'date_posted',
existing_type=postgresql.TIMESTAMP(),
nullable=False)
# ### end Alembic commands ###
| tgalvinjr/blog-ip | migrations/versions/5b1f1d56cb45_.py | 5b1f1d56cb45_.py | py | 830 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "alembic.op.alter_column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.dialects.postgresql.TIMESTAMP",
"line_number": 22,
"usage_type": "call"
},
{
"api_n... |
709779467 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import string
import pandas as pd
from gensim.models import KeyedVectors
import time
from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS
#x=find_department('Mortage requirements specified are incorrect', False)
def find_department(single_query,only_department):
#Load model----------------------------------------------------------------------
st = time.time()
wordmodelfile = '~/Documents/STUDY/Hackathon/NLP/GoogleNews-vectors-negative300.bin'
wordmodel = KeyedVectors.load_word2vec_format(wordmodelfile, binary = True, limit=200000)
et = time.time()
s = 'Word embedding loaded in %f secs.' % (et-st)
print(s)
#Preprocessing----------------------------------------------------------------------
single_query_cleaned = clean_set([single_query])[0]
if(len(single_query_cleaned)==0):
return False
data = pd.read_csv("~/Documents/STUDY/Hackathon/NLP/dataset/resolved.csv")
if(only_department == False):
queries = data['query']
_list = queries.values.tolist()
#Cleaned data
newDataset = clean_set(_list)
x=return_key(3,single_query_cleaned,newDataset,wordmodel)
if(x!=0):
x=_list[newDataset.index(x)]
return fetch_query_details(x,0,'resolved')
#print('here 2')
#departments = pd.unique(data['Product']) Sample departments
keys = ['security', 'loans', 'accounts', 'insurance', 'investments',
'fundstransfer', 'cards']
#For each element in newDataset (Query) we find the most similar key (Department) mode=0
department=return_key(5,single_query_cleaned,keys,wordmodel)
#Returning depart
q_id = log_query(max(data['query_id'])+1,single_query,department)
return department,q_id
def change_department(q_id, new_department):
data = pd.read_csv("~/Documents/STUDY/Hackathon/NLP/dataset/unresolved.csv")
i=data[data['query_id']==q_id].index.values[0]
#print(i)
data.set_value(i,"department", new_department)
data.to_csv('~/Documents/STUDY/Hackathon/NLP/dataset/unresolved.csv', encoding='utf-8', index=False)
def clean_set(_list):
newDataset=[]
for response in _list:
#Lower, remove punctuations and strip white-spaces and split by spaces
response_words=response.lower().translate(str.maketrans('', '', string.punctuation)).strip().split()
response=''
for word in response_words:
if word not in ENGLISH_STOP_WORDS:
response+=word+' '
newDataset.append(response[:-1])
return newDataset
#resolve_query(62,521,'What to do eh?')
def resolve_query(query_id,employee_id,solution):
from datetime import date
today = date.today().strftime("%d/%m/%Y")
d = fetch_query_details('',query_id,'unresolved')
query_date = d[0][3]
d[0][3] = solution
d[0] = d[0] + [query_date,today,employee_id]
unresolved_data = pd.read_csv("~/Documents/STUDY/Hackathon/NLP/dataset/unresolved.csv")
unresolved_data = unresolved_data[unresolved_data.query_id != query_id]
unresolved_data.to_csv('~/Documents/STUDY/Hackathon/NLP/dataset/unresolved.csv', encoding='utf-8', index=False)
new_data = pd.DataFrame(d, columns = ['query_id','query','department','solution','query_date','date_solved','employee_id'])
data = pd.read_csv("~/Documents/STUDY/Hackathon/NLP/dataset/resolved.csv")
data = pd.concat([data, new_data])
data.to_csv('~/Documents/STUDY/Hackathon/NLP/dataset/resolved.csv', encoding='utf-8', index=False)
#new_data = pd.DataFrame([d], columns = ['query_id','query','department','query_date'])
def log_query(query_id, query, department):
from datetime import date
today = date.today().strftime("%d/%m/%Y")
d=[query_id,query,department,today]
new_data = pd.DataFrame([d], columns = ['query_id','query','department','query_date'])
try:
data = pd.read_csv("~/Documents/STUDY/Hackathon/NLP/dataset/unresolved.csv")
if(len(data)>0):
test = True
else:
test = False
except:
test = False
if(test):
new_data.at[0, 'query_id'] = max(max(data['query_id'])+1,query_id)
data = pd.concat([data, new_data])
else:
data = new_data
data.to_csv('~/Documents/STUDY/Hackathon/NLP/dataset/unresolved.csv', encoding='utf-8', index=False)
return data.loc[data['query'] == query].values.tolist()[0]
#----------------------------------------------------------------------
def fetch_query_details(text,query_id,file_name):
data = pd.read_csv("~/Documents/STUDY/Hackathon/NLP/dataset/"+file_name+".csv")
if(text == 'all'):
return data.values.tolist()
elif(query_id==0):
return data.loc[data['query'] == text].values.tolist()
else:
return data.loc[data['query_id'] == query_id].values.tolist()
def return_key(threshold,sentence_a,keys,wordmodel):
sentence_a = sentence_a.lower().split()
distance_list = []
for key in keys:
sentence_b = key.lower().split()
distance_list.append(wordmodel.wmdistance(sentence_a, sentence_b))
#print(min(distance_list))
if(min(distance_list)>threshold):
return 0
return(keys[distance_list.index(min(distance_list))])
'''
data = pd.read_csv("~/Documents/STUDY/Hackathon/NLP/Consumer_Complaints.csv",nrows=500)
#218 queries
xtrain = data.loc[data['Consumer complaint narrative'].notnull(), ['Consumer complaint narrative','Product','Company public response']]
xtrain = xtrain.loc[xtrain['Company public response'].notnull(), ['Consumer complaint narrative','Product','Company public response']]
xtrain.to_csv('./dataset/resolved.csv', encoding='utf-8', index=False)
'''
#print(find_department('credit repair services'))
#SAVING----------------------------------------------------------------------
| ankitd3/Assist-ANS | NLP/distance.py | distance.py | py | 6,026 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "time.time",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "gensim.models.KeyedVectors.load_word2vec_format",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "gensim.models.KeyedVectors",
"line_number": 15,
"usage_type": "name"
},
{
... |
39252790870 | import time
import logging
from django.contrib import admin
from django.contrib import messages
from django.contrib.admin import helpers
from django.urls import reverse
from django.db import transaction
from django.db.models import Count
from django.template.response import TemplateResponse
from django.utils.html import format_html, format_html_join
from mangaki.models import (
Work, TaggedWork, WorkTitle, Genre, Track, Tag, Artist, Studio, Editor, Rating, Page,
Suggestion, Evidence, Announcement, Recommendation, Pairing, Reference, Top, Ranking,
Role, Staff, FAQTheme,
FAQEntry, Trope, Language,
ExtLanguage, WorkCluster,
UserBackgroundTask,
ActionType,
get_field_changeset
)
from mangaki.utils.anidb import AniDBTag, client, diff_between_anidb_and_local_tags
from mangaki.utils.db import get_potential_posters
from collections import defaultdict
from enum import Enum
from mangaki.utils.work_merge import WorkClusterMergeHandler
ActionTypeColors = {
ActionType.DO_NOTHING: 'black', # INFO_ONLY
ActionType.JUST_CONFIRM: 'green',
ActionType.CHOICE_REQUIRED: 'red'
}
class MergeErrors(Enum):
NO_ID = 'no ID'
FIELDS_MISSING = 'missing fields'
NOT_ENOUGH_WORKS = 'not enough works'
def handle_merge_errors(response, request, final_work, nb_merged,
message_user):
if response == MergeErrors.NO_ID:
message_user(request,
"Aucun ID n'a été fourni pour la fusion.",
level=messages.ERROR)
if response == MergeErrors.FIELDS_MISSING:
message_user(request,
"""Un ou plusieurs des champs requis n'ont pas été remplis.
(Détails: {})""".format(", ".join(final_work)),
level=messages.ERROR)
if response == MergeErrors.NOT_ENOUGH_WORKS:
message_user(request,
"Veuillez sélectionner au moins 2 œuvres à fusionner.",
level=messages.WARNING)
if response is None: # Confirmed
message_user(request,
format_html('La fusion de {:d} œuvres vers <a href="{:s}">{:s}</a> a bien été effectuée.'
.format(nb_merged, final_work.get_absolute_url(), final_work.title)))
def create_merge_form(works_to_merge_qs):
work_dicts_to_merge = list(works_to_merge_qs.values())
field_changeset = get_field_changeset(work_dicts_to_merge)
fields_to_choose = []
fields_required = []
template_rows = []
suggestions = {}
for field, choices, action, suggested, _ in field_changeset:
suggestions[field] = suggested
template_rows.append({
'field': field,
'choices': choices,
'action_type': action,
'suggested': suggested,
'color': ActionTypeColors[action],
})
if field != 'id' and action != ActionType.DO_NOTHING:
fields_to_choose.append(field)
if action == ActionType.CHOICE_REQUIRED:
fields_required.append(field)
template_rows.sort(key=lambda row: int(row['action_type']), reverse=True)
rating_samples = [(Rating.objects.filter(work_id=work_dict['id']).count(),
Rating.objects.filter(work_id=work_dict['id'])[:10]) for work_dict in work_dicts_to_merge] # FIXME: too many queries
return fields_to_choose, fields_required, template_rows, rating_samples, suggestions
@transaction.atomic # In case trouble happens
def merge_works(request, selected_queryset, force=False, extra=None):
user = request.user if request else None
if selected_queryset.model == WorkCluster: # Author is reviewing an existing WorkCluster
from_cluster = True
cluster = selected_queryset.first()
works_to_merge_qs = cluster.works.order_by('id').prefetch_related('rating_set', 'genre')
else: # Author is merging those works from a Work queryset
from_cluster = False
works_to_merge_qs = selected_queryset.order_by('id').prefetch_related('rating_set', 'genre')
nb_works_to_merge = works_to_merge_qs.count()
if request and request.POST.get('confirm'): # Merge has been confirmed
rich_context = request.POST
else:
fields_to_choose, fields_required, template_rows, rating_samples, suggestions = create_merge_form(works_to_merge_qs)
context = {
'fields_to_choose': ','.join(fields_to_choose),
'fields_required': ','.join(fields_required),
'template_rows': template_rows,
'rating_samples': rating_samples,
'queryset': selected_queryset,
'opts': Work._meta if not from_cluster else WorkCluster._meta,
'action': 'merge' if not from_cluster else 'trigger_merge',
'action_checkbox_name': helpers.ACTION_CHECKBOX_NAME
}
if all(field in suggestions for field in fields_required):
rich_context = dict(context)
for field in suggestions:
rich_context[field] = suggestions[field]
if extra is not None:
for field in extra:
rich_context[field] = extra[field]
if force:
rich_context['confirm'] = True
if rich_context.get('confirm'): # Merge has been confirmed
works_to_merge = list(works_to_merge_qs)
if not from_cluster:
cluster = WorkCluster(user=user, checker=user)
cluster.save() # Otherwise we cannot add works
cluster.works.add(*works_to_merge)
# Happens when no ID was provided
if not rich_context.get('id'):
return None, None, MergeErrors.NO_ID
final_id = int(rich_context.get('id'))
final_work = Work.objects.get(id=final_id)
# Notice how `cluster` always exist in this scope.
# noinspection PyUnboundLocalVariable
merge_handler = WorkClusterMergeHandler(cluster,
works_to_merge,
final_work)
missing_required_fields = merge_handler.overwrite_fields(
set(filter(None, rich_context.get('fields_to_choose').split(','))),
set(filter(None, rich_context.get('fields_required').split(','))),
rich_context)
# Happens when a required field was left empty
if missing_required_fields:
return None, missing_required_fields, MergeErrors.FIELDS_MISSING
merge_handler.perform_redirections()
merge_handler.accept_cluster(user)
return len(works_to_merge), merge_handler.target_work, None
# Just show a warning if only one work was checked
if nb_works_to_merge < 2:
return None, None, MergeErrors.NOT_ENOUGH_WORKS
return nb_works_to_merge, None, TemplateResponse(request, 'admin/merge_selected_confirmation.html', context)
logger = logging.getLogger(__name__)
class TaggedWorkInline(admin.TabularInline):
model = TaggedWork
fields = ('work', 'tag', 'weight')
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.select_related('work', 'tag')
class StaffInline(admin.TabularInline):
model = Staff
fields = ('role', 'artist')
raw_id_fields = ('artist',)
class WorkTitleInline(admin.TabularInline):
model = WorkTitle
fields = ('title', 'language', 'type')
class ReferenceInline(admin.TabularInline):
model = Reference
fields = ('source', 'identifier')
class AniDBaidListFilter(admin.SimpleListFilter):
title = 'AniDB aid'
parameter_name = 'AniDB aid'
def lookups(self, request, model_admin):
return ('Vrai', 'Oui'), ('Faux', 'Non')
def queryset(self, request, queryset):
if self.value() == 'Faux':
return queryset.filter(anidb_aid=0)
elif self.value() == 'Vrai':
return queryset.exclude(anidb_aid=0)
else:
return queryset
@admin.register(FAQTheme)
class FAQAdmin(admin.ModelAdmin):
ordering = ('order',)
search_fields = ('theme',)
list_display = ('theme', 'order')
@admin.register(Work)
class WorkAdmin(admin.ModelAdmin):
search_fields = ('id', 'title', 'worktitle__title')
list_display = ('id', 'category', 'title', 'nsfw')
list_filter = ('category', 'nsfw', AniDBaidListFilter)
raw_id_fields = ('redirect',)
actions = ['make_nsfw', 'make_sfw', 'refresh_work_from_anidb', 'merge',
'refresh_work', 'update_tags_via_anidb', 'change_title']
inlines = [StaffInline, WorkTitleInline, ReferenceInline, TaggedWorkInline]
readonly_fields = (
'sum_ratings',
'nb_ratings',
'nb_likes',
'nb_dislikes',
'controversy',
)
def make_nsfw(self, request, queryset):
rows_updated = queryset.update(nsfw=True)
if rows_updated == 1:
message_bit = "1 œuvre est"
else:
message_bit = "%s œuvres sont" % rows_updated
self.message_user(request, "%s désormais NSFW." % message_bit)
make_nsfw.short_description = "Rendre NSFW les œuvres sélectionnées"
def update_tags_via_anidb(self, request, queryset):
works = queryset.all()
if request.POST.get('confirm'): # Updating tags has been confirmed
to_update_work_ids = set(map(int, request.POST.getlist('to_update_work_ids')))
nb_updates = len(to_update_work_ids)
work_ids = list(map(int, request.POST.getlist('work_ids')))
tag_titles = request.POST.getlist('tag_titles')
tag_weights = list(map(int, request.POST.getlist('weights')))
tag_anidb_tag_ids = list(map(int, request.POST.getlist('anidb_tag_ids')))
tags = list(map(AniDBTag, tag_titles, tag_weights, tag_anidb_tag_ids))
# Checkboxes to know which tags have to be kept regardless of their pending status
tag_checkboxes = request.POST.getlist('tag_checkboxes')
tags_to_process = set(tuple(map(int, tag_checkbox.split(':'))) for tag_checkbox in tag_checkboxes)
# Make a dict with work_id -> tags to keep
tags_final = {}
for index, work_id in enumerate(work_ids):
if work_id not in to_update_work_ids:
continue
if work_id not in tags_final:
tags_final[work_id] = []
if (work_id, tags[index].anidb_tag_id) in tags_to_process:
tags_final[work_id].append(tags[index])
# Process selected tags for works that have been selected
for work in works:
if work.id in to_update_work_ids:
client.update_tags(work, tags_final[work.id])
if nb_updates == 0:
self.message_user(request,
"Aucune oeuvre n'a été marquée comme devant être mise à jour.",
level=messages.WARNING)
elif nb_updates == 1:
self.message_user(request,
"Mise à jour des tags effectuée pour une œuvre.")
else:
self.message_user(request,
"Mise à jour des tags effectuée pour {} œuvres.".format(nb_updates))
return None
# Check for works with missing AniDB AID
if not all(work.anidb_aid for work in works):
self.message_user(request,
"""Certains de vos choix ne possèdent pas d'identifiant AniDB.
Le rafraichissement de leurs tags a été omis. (Détails: {})"""
.format(", ".join(map(lambda w: w.title,
filter(lambda w: not w.anidb_aid, works)))),
level=messages.WARNING)
# Retrieve and send tags information to the appropriate form
all_information = {}
for index, work in enumerate(works, start=1):
if work.anidb_aid:
if index % 25 == 0:
logger.info('(AniDB refresh): Sleeping...')
time.sleep(1) # Don't spam AniDB.
anidb_tags = client.get_tags(anidb_aid=work.anidb_aid)
tags_diff = diff_between_anidb_and_local_tags(work, anidb_tags)
tags_count = 0
for tags_info in tags_diff.values():
tags_count += len(tags_info)
if tags_count > 0:
all_information[work.id] = {
'title': work.title,
'deleted_tags': tags_diff["deleted_tags"],
'added_tags': tags_diff["added_tags"],
'updated_tags': tags_diff["updated_tags"],
'kept_tags': tags_diff["kept_tags"]
}
if all_information:
context = {
'all_information': all_information.items(),
'queryset': queryset,
'opts': TaggedWork._meta,
'action': 'update_tags_via_anidb',
'action_checkbox_name': helpers.ACTION_CHECKBOX_NAME
}
return TemplateResponse(request, "admin/update_tags_via_anidb.html", context)
else:
self.message_user(request,
"Aucune des œuvres sélectionnées n'a subit de mise à jour des tags chez AniDB.",
level=messages.WARNING)
return None
update_tags_via_anidb.short_description = "Mettre à jour les tags des œuvres depuis AniDB"
def make_sfw(self, request, queryset):
rows_updated = queryset.update(nsfw=False)
if rows_updated == 1:
message_bit = "1 œuvre n'est"
else:
message_bit = "%s œuvres ne sont" % rows_updated
self.message_user(request, "%s désormais plus NSFW." % message_bit)
make_sfw.short_description = "Rendre SFW les œuvres sélectionnées"
@transaction.atomic
def refresh_work_from_anidb(self, request, queryset):
works = queryset.all()
# Check for works with missing AniDB AID
offending_works = []
if not all(work.anidb_aid for work in works):
offending_works = [work for work in works if not work.anidb_aid]
self.message_user(request,
"Certains de vos choix ne possèdent pas d'identifiant AniDB. "
"Leur rafraichissement a été omis. (Détails: {})"
.format(", ".join(map(lambda w: w.title, offending_works))),
level=messages.WARNING)
# Check for works that have a duplicate AniDB AID
aids_with_works = defaultdict(list)
for work in works:
if work.anidb_aid:
aids_with_works[work.anidb_aid].append(work)
aids_with_potdupe_works = defaultdict(list)
for work in Work.objects.filter(anidb_aid__in=aids_with_works.keys()):
aids_with_potdupe_works[work.anidb_aid].append(work)
works_with_conflicting_anidb_aid = []
for anidb_aid, potdupe_works in aids_with_potdupe_works.items():
if len(potdupe_works) > 1:
works_with_conflicting_anidb_aid.extend(aids_with_works[anidb_aid])
# Alert the user for each work he selected that has a duplicate AniDB ID
self.message_user(
request,
"""Le rafraichissement de {} a été omis car d'autres œuvres possèdent
le même identifiant AniDB #{}. (Œuvres en conflit : {})"""
.format(
", ".join(map(lambda w: w.title, aids_with_works[anidb_aid])),
anidb_aid,
", ".join(map(lambda w: w.title, aids_with_potdupe_works[anidb_aid]))
),
level=messages.WARNING
)
# Refresh works from AniDB
refreshed = 0
for index, work in enumerate(works, start=1):
if work.anidb_aid and work not in works_with_conflicting_anidb_aid:
logger.info('Refreshing {} from AniDB.'.format(work))
if client.get_or_update_work(work.anidb_aid) is not None:
refreshed += 1
if index % 25 == 0:
logger.info('(AniDB refresh): Sleeping...')
time.sleep(1) # Don't spam AniDB.
if refreshed > 0:
self.message_user(request,
"Le rafraichissement de {} œuvre(s) a été effectué avec succès."
.format(refreshed))
refresh_work_from_anidb.short_description = "Rafraîchir les œuvres depuis AniDB"
def merge(self, request, queryset):
nb_merged, final_work, response = merge_works(request, queryset)
handle_merge_errors(response, request, final_work, nb_merged,
self.message_user)
return response
merge.short_description = "Fusionner les œuvres sélectionnées"
def refresh_work(self, request, queryset):
if request.POST.get('confirm'): # Confirmed
downloaded_titles = []
for obj in queryset:
chosen_poster = request.POST.get('chosen_poster_{:d}'.format(obj.id))
if not chosen_poster:
continue
if obj.retrieve_poster(chosen_poster):
downloaded_titles.append(obj.title)
if downloaded_titles:
self.message_user(
request,
"Des posters ont été trouvés pour les anime suivants : %s." % ', '.join(downloaded_titles))
else:
self.message_user(request, "Aucun poster n'a été trouvé, essayez de changer le titre.")
return None
bundle = []
for work in queryset:
bundle.append((work.id, work.title, get_potential_posters(work)))
context = {
'queryset': queryset,
'bundle': bundle,
'opts': self.model._meta,
'action_checkbox_name': helpers.ACTION_CHECKBOX_NAME
}
return TemplateResponse(request, 'admin/refresh_poster_confirmation.html', context)
refresh_work.short_description = "Mettre à jour la fiche de l'anime (poster)"
@transaction.atomic
def change_title(self, request, queryset):
if request.POST.get('confirm'): # Changing default title has been confirmed
work_ids = request.POST.getlist('work_ids')
titles_ids = request.POST.getlist('title_ids')
titles = WorkTitle.objects.filter(
pk__in=titles_ids, work__id__in=work_ids
).values_list('title', 'work__title', 'work__id')
for new_title, current_title, work_id in titles:
if new_title != current_title:
Work.objects.filter(pk=work_id).update(title=new_title)
self.message_user(request, 'Les titres ont bien été changés pour les œuvres sélectionnées.')
return None
work_titles = WorkTitle.objects.filter(work__in=queryset.values_list('pk', flat=True))
full_infos = work_titles.values(
'pk', 'title', 'language__code', 'type', 'work_id', 'work__title'
).order_by('title').distinct('title')
titles = {}
for infos in full_infos:
if infos['work_id'] not in titles:
titles[infos['work_id']] = {}
titles[infos['work_id']].update({
infos['pk']: {
'title': infos['title'],
'language': infos['language__code'] if infos['language__code'] else 'inconnu',
'type': infos['type'] if infos['title'] != infos['work__title'] else 'current'
}
})
if titles:
context = {
'work_titles': titles,
'queryset': queryset,
'opts': Work._meta,
'action': 'change_title',
'action_checkbox_name': helpers.ACTION_CHECKBOX_NAME
}
return TemplateResponse(request, 'admin/change_default_work_title.html', context)
else:
self.message_user(request,
'Aucune des œuvres sélectionnées ne possèdent de titre alternatif.',
level=messages.WARNING)
return None
change_title.short_description = "Changer le titre par défaut"
@admin.register(Artist)
class ArtistAdmin(admin.ModelAdmin):
search_fields = ('id', 'name')
@admin.register(Tag)
class TagAdmin(admin.ModelAdmin):
list_display = ("title",)
readonly_fields = ("nb_works_linked",)
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.annotate(works_linked=Count('work'))
def nb_works_linked(self, obj):
return obj.works_linked
nb_works_linked.short_description = 'Nombre d\'œuvres liées au tag'
@admin.register(TaggedWork)
class TaggedWorkAdmin(admin.ModelAdmin):
search_fields = ('work__title', 'tag__title')
@admin.register(WorkCluster)
class WorkClusterAdmin(admin.ModelAdmin):
list_display = ('user', 'get_work_titles', 'resulting_work', 'reported_on', 'merged_on', 'checker', 'status', 'difficulty')
list_filter = ('status',)
list_select_related = ('user', 'resulting_work', 'checker')
raw_id_fields = ('user', 'works', 'checker', 'resulting_work', 'origin')
search_fields = ('id',)
actions = ('trigger_merge', 'reject')
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.prefetch_related('works')
def trigger_merge(self, request, queryset):
nb_merged, final_work, response = merge_works(request, queryset)
handle_merge_errors(response, request, final_work, nb_merged,
self.message_user)
return response
trigger_merge.short_description = "Fusionner les œuvres de ce cluster"
def reject(self, request, queryset):
rows_updated = queryset.update(status='rejected')
if rows_updated == 1:
message_bit = "1 cluster"
else:
message_bit = "%s clusters" % rows_updated
self.message_user(request, "Le rejet de %s a été réalisé avec succès." % message_bit)
reject.short_description = "Rejeter les clusters sélectionnés"
def get_work_titles(self, obj):
cluster_works = obj.works.all() # Does not include redirected works
if cluster_works:
def get_admin_url(work):
if work.redirect is None:
return reverse('admin:mangaki_work_change', args=(work.id,))
else:
return '#'
return (
'<ul>' +
format_html_join('', '<li>{} ({}<a href="{}">{}</a>)</li>',
((work.title, 'was ' if work.redirect is not None else '',
get_admin_url(work), work.id) for work in cluster_works)) +
'</ul>'
)
else:
return '(all deleted)'
get_work_titles.allow_tags = True
@admin.register(Suggestion)
class SuggestionAdmin(admin.ModelAdmin):
list_display = ('work', 'problem', 'date', 'user', 'is_checked')
list_filter = ('problem',)
actions = ['check_suggestions', 'uncheck_suggestions']
raw_id_fields = ('work',)
search_fields = ('work__title', 'user__username')
def view_on_site(self, obj):
return obj.work.get_absolute_url()
def check_suggestions(self, request, queryset):
rows_updated = queryset.update(is_checked=True)
for suggestion in queryset:
if suggestion.problem == 'ref': # Reference suggestion
reference, created = Reference.objects.get_or_create(work=suggestion.work, url=suggestion.message)
reference.suggestions.add(suggestion)
if rows_updated == 1:
message_bit = "1 suggestion"
else:
message_bit = "%s suggestions" % rows_updated
self.message_user(request, "La validation de %s a été réalisé avec succès." % message_bit)
check_suggestions.short_description = "Valider les suggestions sélectionnées"
def uncheck_suggestions(self, request, queryset):
rows_updated = queryset.update(is_checked=False)
if rows_updated == 1:
message_bit = "1 suggestion"
else:
message_bit = "%s suggestions" % rows_updated
self.message_user(request, "L'invalidation de %s a été réalisé avec succès." % message_bit)
uncheck_suggestions.short_description = "Invalider les suggestions sélectionnées"
@admin.register(Announcement)
class AnnouncementAdmin(admin.ModelAdmin):
exclude = ('title',)
@admin.register(Pairing)
class PairingAdmin(admin.ModelAdmin):
list_display = ('artist', 'work', 'date', 'user', 'is_checked')
actions = ['make_director', 'make_composer', 'make_author']
def make_director(self, request, queryset):
rows_updated = 0
director = Role.objects.get(slug='director')
for pairing in queryset:
_, created = Staff.objects.get_or_create(work_id=pairing.work_id, artist_id=pairing.artist_id,
role=director)
if created:
pairing.is_checked = True
pairing.save()
rows_updated += 1
if rows_updated == 1:
message_bit = "1 réalisateur a"
else:
message_bit = "%s réalisateurs ont" % rows_updated
self.message_user(request, "%s été mis à jour." % message_bit)
make_director.short_description = "Valider les appariements sélectionnés pour réalisation"
def make_composer(self, request, queryset):
rows_updated = 0
composer = Role.objects.get(slug='composer')
for pairing in queryset:
_, created = Staff.objects.get_or_create(work_id=pairing.work_id, artist_id=pairing.artist_id,
role=composer)
if created:
pairing.is_checked = True
pairing.save()
rows_updated += 1
if rows_updated == 1:
message_bit = "1 compositeur a"
else:
message_bit = "%s compositeurs ont" % rows_updated
self.message_user(request, "%s été mis à jour." % message_bit)
make_composer.short_description = "Valider les appariements sélectionnés pour composition"
def make_author(self, request, queryset):
rows_updated = 0
author = Role.objects.get(slug='author')
for pairing in queryset:
_, created = Staff.objects.get_or_create(work_id=pairing.work_id, artist_id=pairing.artist_id, role=author)
if created:
pairing.is_checked = True
pairing.save()
rows_updated += 1
if rows_updated == 1:
message_bit = "1 auteur a"
else:
message_bit = "%s auteurs ont" % rows_updated
self.message_user(request, "%s été mis à jour." % message_bit)
make_author.short_description = "Valider les appariements sélectionnés pour écriture"
@admin.register(Rating)
class RatingAdmin(admin.ModelAdmin):
raw_id_fields = ('user', 'work')
@admin.register(Reference)
class ReferenceAdmin(admin.ModelAdmin):
list_display = ['work', 'url']
raw_id_fields = ('work', 'suggestions')
class RankingInline(admin.TabularInline):
model = Ranking
fields = ('content_type', 'object_id', 'name', 'score', 'nb_ratings', 'nb_stars',)
readonly_fields = ('name',)
def name(self, instance):
return str(instance.content_object)
@admin.register(Top)
class TopAdmin(admin.ModelAdmin):
inlines = [
RankingInline,
]
readonly_fields = ('category', 'date',)
def has_add_permission(self, request):
return False
@admin.register(Role)
class RoleAdmin(admin.ModelAdmin):
model = Role
prepopulated_fields = {'slug': ('name',)}
@admin.register(Evidence)
class EvidenceAdmin(admin.ModelAdmin):
list_display = ['user', 'suggestion', 'agrees', 'needs_help']
admin.site.register(Genre)
admin.site.register(Track)
admin.site.register(Studio)
admin.site.register(Editor)
admin.site.register(Page)
admin.site.register(FAQEntry)
admin.site.register(Recommendation)
admin.site.register(Trope)
admin.site.register(Language)
admin.site.register(ExtLanguage)
admin.site.register(UserBackgroundTask)
admin.site.site_header = "Administration Mangaki"
| mangaki/mangaki | mangaki/mangaki/admin.py | admin.py | py | 28,860 | python | en | code | 137 | github-code | 6 | [
{
"api_name": "mangaki.models.ActionType.DO_NOTHING",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "mangaki.models.ActionType",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "mangaki.models.ActionType.JUST_CONFIRM",
"line_number": 33,
"usage_ty... |
39760240581 | # -*- coding: utf-8 -*-
from django.conf.urls import patterns, include, url
urlpatterns = patterns('CiscoDxUketsukeApp.views',
# url(r'^$', 'CiscoDxUketsuke.views.home', name='home'),
# url(r'^getData/' 'CiscoDxUketsuke.views.getData'),
url(r'^member_tsv/$','member_tsv'),
url(r'^member_json/$','member_json'),
url(r'^room_tsv/$','room_tsv'),
url(r'^room_json/$','room_json'),
url(r'^folder_tsv/$','folder_tsv'),
url(r'^folder_json/$','folder_json'),
url(r'^favorite_tsv/$','favorite_tsv'),
url(r'^favorite_json/$','favorite_json'),
url(r'^test/$','test'),
url(r'^test2/$','test2'),
url(r'^pad1/$','pad1'),
url(r'^pad2/$','pad2'),
url(r'^top/$','top'),
url(r'^member/$','member'),
url(r'^room/$','room'),
url(r'^folder/$','folder'),
url(r'^folder/(?P<dxId>\d+)/$','folder'),
url(r'^folder/(?P<dxId>\d+)/(?P<folderId>\d+)/$','folder'),
url(r'^home/$','home'),
url(r'^fav/$','fav'),
url(r'^index/$','index'),
url(r'^list/$','list'),
url(r'^add_dx/$','add_dx'),
url(r'^edit_dx/$','edit_dx'),
url(r'^add_member/$','add_member'),
url(r'^add_room/$','add_room'),
url(r'^add_folder/$','add_folder'),
url(r'^add1/$','add_member_room_db'),
) | fjunya/dxApp | src/CiscoDxUketsukeApp/urls.py | urls.py | py | 1,254 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.conf.urls.patterns",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "djan... |
73968831228 | """Analyzes the MCTS explanations output by run_mcts.py in terms of stress and context entropy."""
import pickle
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import wilcoxon
def analyze_mcts_explanations(explanations_path: Path,
save_dir: Path) -> None:
"""Analyzes the MCTS explanations output by run_mcts.py in terms of stress and context entropy.
:param explanations_path: Path to a pickle file containing the explanations from run_mcts.py.
:param save_dir: Path to a directory where analysis plots will be saved.
"""
# Load MCTS results
with open(explanations_path, 'rb') as f:
results = pickle.load(f)
# Create save_dir
save_dir.mkdir(parents=True, exist_ok=True)
# Extract MCTS results
original_stress = results['original_stress']
masked_stress_dependent = results['masked_stress_dependent']
masked_stress_independent = results['masked_stress_independent']
original_entropy = results['original_entropy']
masked_entropy_dependent = results['masked_entropy_dependent']
masked_entropy_independent = results['masked_entropy_independent']
# Plot stress
stress_bins = np.linspace(0, 1, 20)
plt.clf()
plt.figure(figsize=(12, 8))
plt.hist(original_stress, stress_bins, alpha=0.5, label='Original')
plt.hist(masked_stress_dependent, stress_bins, alpha=0.5, label='Context-Dependent')
plt.hist(masked_stress_independent, stress_bins, alpha=0.5, label='Context-Independent')
plt.legend(fontsize=20)
plt.ylabel('Count', fontsize=20)
plt.yticks(fontsize=16)
plt.xlabel('Stress Score', fontsize=20)
plt.xticks(fontsize=16)
plt.title(rf'Stress Score for Original Text and Explanations', fontsize=24)
plt.savefig(save_dir / f'stress.pdf', bbox_inches='tight')
# Plot entropy
max_entropy = -np.log2(1 / 3)
entropy_bins = np.linspace(0, max_entropy, 20)
plt.clf()
plt.figure(figsize=(12, 8))
plt.hist(original_entropy, entropy_bins, alpha=0.5, label='Original')
plt.hist(masked_entropy_dependent, entropy_bins, alpha=0.5, label='Context-Dependent')
plt.hist(masked_entropy_independent, entropy_bins, alpha=0.5, label='Context-Independent')
plt.legend(fontsize=20)
plt.ylabel('Count', fontsize=20)
plt.yticks(fontsize=16)
plt.xlabel('Context Entropy', fontsize=20)
plt.xticks(fontsize=16)
plt.title(rf'Context Entropy for Original Text and Explanations', fontsize=24)
plt.savefig(save_dir / f'entropy.pdf', bbox_inches='tight')
# Print stress and entropy results
print(f'Average stress (original) = '
f'{np.mean(original_stress):.3f} +/- {np.std(original_stress):.3f}')
print(f'Average stress (dependent) = '
f'{np.mean(masked_stress_dependent):.3f} +/- {np.std(masked_stress_dependent):.3f}')
print(f'Average stress (independent) = '
f'{np.mean(masked_stress_independent):.3f} +/- {np.std(masked_stress_independent):.3f}')
print()
print(f'Average entropy (original) = '
f'{np.mean(original_entropy):.3f} +/- {np.std(original_entropy):.3f}')
print(f'Average entropy (dependent) = '
f'{np.mean(masked_entropy_dependent):.3f} +/- {np.std(masked_entropy_dependent):.3f}')
print(f'Average entropy (independent) = '
f'{np.mean(masked_entropy_independent):.3f} +/- {np.std(masked_entropy_independent):.3f}')
# Compute stress and entropy diffs
diff_stress_dependent_original = masked_stress_dependent - original_stress
diff_stress_independent_original = masked_stress_independent - original_stress
diff_stress_dependent_independent = masked_stress_dependent - masked_stress_independent
diff_entropy_dependent_original = masked_entropy_dependent - original_entropy
diff_entropy_independent_original = masked_entropy_independent - original_entropy
diff_entropy_dependent_independent = masked_entropy_dependent - masked_entropy_independent
# Print stress and entropy diffs
print(f'Average difference in stress (dependent - original) = '
f'{np.mean(diff_stress_dependent_original):.3f} +/- {np.std(diff_stress_dependent_original):.3f} '
f'(p = {wilcoxon(masked_stress_dependent, original_stress).pvalue:.4e})')
print(f'Average difference in stress (independent - original) = '
f'{np.mean(diff_stress_independent_original):.3f} +/- {np.std(diff_stress_independent_original):.3f} '
f'(p = {wilcoxon(masked_stress_independent, original_stress).pvalue:.4e})')
print(f'Average difference in stress (dependent - independent) = '
f'{np.mean(diff_stress_dependent_independent):.3f} +/- {np.std(diff_stress_dependent_independent):.3f} '
f'(p = {wilcoxon(masked_stress_dependent, masked_stress_independent).pvalue:.4e})')
print()
print(f'Average difference in entropy (dependent - original) = '
f'{np.mean(diff_entropy_dependent_original):.3f} +/- {np.std(diff_entropy_dependent_original):.3f} '
f'(p = {wilcoxon(masked_entropy_dependent, original_entropy).pvalue:.4e})')
print(f'Average difference in entropy (independent - original) = '
f'{np.mean(diff_entropy_independent_original):.3f} +/- {np.std(diff_entropy_independent_original):.3f} '
f'(p = {wilcoxon(masked_entropy_independent, original_entropy).pvalue:.4e})')
print(f'Average difference in entropy (dependent - independent) = '
f'{np.mean(diff_entropy_dependent_independent):.3f} +/- {np.std(diff_entropy_dependent_independent):.3f} '
f'(p = {wilcoxon(masked_entropy_dependent, masked_entropy_independent).pvalue:.4e})')
if __name__ == '__main__':
from tap import Tap
class Args(Tap):
explanations_path: Path
"""Path to a pickle file containing the explanations from run_mcts.py."""
save_dir: Path
"""Path to a directory where analysis plots will be saved."""
analyze_mcts_explanations(**Args().parse_args().as_dict())
| swansonk14/MCTS_Interpretability | analyze_mcts_explanations.py | analyze_mcts_explanations.py | py | 6,053 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "pickle.load",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_numb... |
4206104345 | # pylint: disable=no-member, no-name-in-module, import-error
from __future__ import absolute_import
import glob
import distutils.command.sdist
import distutils.log
import subprocess
from setuptools import Command, setup
import setuptools.command.sdist
# Patch setuptools' sdist behaviour with distutils' sdist behaviour
setuptools.command.sdist.sdist.run = distutils.command.sdist.sdist.run
class LintCommand(Command):
"""
Custom setuptools command for running lint
"""
description = 'run lint against project source files'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
self.announce("Running pylint", level=distutils.log.INFO)
subprocess.check_call(["pylint"] + glob.glob("*.py"))
setup(
# Package name:
name="dxlmisc",
# Version number:
version="0.0.1",
# Requirements
install_requires=[
"pylint"
],
description="Misc OpenDXL Tools",
python_requires='>=2.7.9,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',
classifiers=[
"Development Status :: 4 - Beta",
"Topic :: Software Development :: Libraries :: Python Modules",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6"
],
cmdclass={
"lint": LintCommand
}
)
| jbarlow-mcafee/opendxl-misc | setup.py | setup.py | py | 1,654 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "setuptools.command",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "distutils.command.sdist.command",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "distutils.command.sdist",
"line_number": 12,
"usage_type": "name"
},
{
... |
21738440212 | import cv2
# Problem 4.
# Rescale the video vid1.jpg by 0.5 and display the original video and the rescaled one in separate windows.
def rescaleFrame(frame, scale):
width = int(frame.shape[1] * scale)
height = int(frame.shape[0] * scale)
dimensions = (width, height)
return cv2.resize(frame, dimensions, interpolation=cv2.INTER_AREA)
capture = cv2.VideoCapture('vid1.mp4')
while True:
frame_loaded, frame = capture.read()
if frame is not None: # or if isTrue
frame_rescaled = rescaleFrame(frame, 0.5)
cv2.imshow('Video', frame)
cv2.imshow('Video_rescaled', frame_rescaled)
else:
print('empty frame')
exit(1)
if cv2.waitKey(20) & 0xFF == ord('d'):
break
capture.release()
cv2.destroyAllWindows()
cv2.waitKey(0) | markhamazaspyan/Python_2_ASDS | opencvHW1/problem4.py | problem4.py | py | 799 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.resize",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_AREA",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"lin... |
33628818675 | # -*- coding: utf-8 -*-
""" Created by Safa Arıman on 12.12.2018 """
import base64
import json
import urllib.request, urllib.parse, urllib.error
import urllib.request, urllib.error, urllib.parse
import urllib.parse
from ulauncher.api.client.EventListener import EventListener
from ulauncher.api.shared.action.DoNothingAction import DoNothingAction
from ulauncher.api.shared.action.OpenUrlAction import OpenUrlAction
from ulauncher.api.shared.action.CopyToClipboardAction import CopyToClipboardAction
from ulauncher.api.shared.action.RenderResultListAction import RenderResultListAction
from ulauncher.api.shared.item.ExtensionResultItem import ExtensionResultItem
__author__ = 'safaariman'
class ExtensionKeywordListener(EventListener):
def __init__(self, icon_file):
self.icon_file = icon_file
def on_event(self, event, extension):
query = event.get_argument()
results = []
workspace_url = extension.preferences.get('url')
user = extension.preferences.get('username')
password = extension.preferences.get('password')
token = base64.b64encode(str('%s:%s' % (user, password)).encode()).decode()
url = urllib.parse.urljoin(workspace_url, 'rest/internal/2/productsearch/search')
get_url = "%s?%s" % (url, urllib.parse.urlencode({'q': query}))
req = urllib.request.Request(get_url, headers={'Authorization': 'Basic %s' % token})
result_types = []
try:
response = urllib.request.urlopen(req)
result_types = json.loads(response.read())
except urllib.error.HTTPError as e:
if e.code == 401:
results.append(
ExtensionResultItem(
name='Authentication failed.',
description='Please check your username/e-mail and password.',
icon=self.icon_file,
on_enter=DoNothingAction()
)
)
return RenderResultListAction(results)
except urllib.error.URLError as e:
results.append(
ExtensionResultItem(
name='Could not connect to Jira.',
description='Please check your workspace url and make sure you are connected to the internet.',
icon=self.icon_file,
on_enter=DoNothingAction()
)
)
return RenderResultListAction(results)
for rtype in result_types:
for item in rtype.get('items', []):
key = item.get('subtitle')
title = item.get('title')
url = item.get('url')
results.append(
ExtensionResultItem(
name=title if not key else '%s - %s' % (key, title),
description=key,
icon=self.icon_file,
on_enter=OpenUrlAction(url=url),
on_alt_enter=CopyToClipboardAction(url),
)
)
if not results:
results.append(
ExtensionResultItem(
name="Search '%s'" % query,
description='No results. Try searching something else :)',
icon=self.icon_file,
on_enter=DoNothingAction()
)
)
return RenderResultListAction(results)
| safaariman/ulauncher-jira | jira/listeners/extension_keyword.py | extension_keyword.py | py | 3,484 | python | en | code | 10 | github-code | 6 | [
{
"api_name": "ulauncher.api.client.EventListener.EventListener",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "base64.b64encode",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "urllib.request.parse.urljoin",
"line_number": 32,
"usage_type": "call"
... |
43213705575 | #!/usr/bin/env python3
"""
Program to decode the first sprite of a CTHG 2 file.
Mainly intended as a test for the checking the encoder, but also a
demonstration of how to decode.
"""
_license = """
Copyright (c) 2013 Alberth "Alberth" Hofkamp
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from PIL import Image
class Infile:
def __init__(self, fname):
self.fname = fname
self.handle = open(self.fname, "rb")
# Read header
for h in [ord('C'), ord('T'), ord('H'), ord('G'), 2, 0]:
v = self.getByte()
assert v == h
def getByte(self):
v = self.handle.read(1)[0]
return v
def getWord(self):
b = self.getByte()
return b | (self.getByte() << 8)
def getLong(self):
w = self.getWord()
return w | (self.getWord() << 16)
def getData(self, size):
data = []
for i in range(size):
data.append(self.getByte())
return data
def decode_xy(pix_idx, w, h):
y = pix_idx // w
x = pix_idx - w * y
assert x >= 0 and x < w
assert y >= 0 and y < h
return x, y
def get_colour(table, idx):
if table == 0:
return (0, 0, 0, 255)
if table == 1:
return (idx, 0, 0, 255)
if table == 2:
return (0, idx, 0, 255)
if table == 3:
return (0, 0, idx, 255)
if table == 4:
return (0, idx, idx, 255)
if table == 5:
return (idx, 0, idx, 255)
assert False
class Sprite:
def __init__(self, infile):
size = infile.getLong() - 2 - 2 - 2
self.number = infile.getWord()
self.width = infile.getWord()
self.height = infile.getWord()
self.data = infile.getData(size)
print("Sprite number {}".format(self.number))
print("Width {}".format(self.width))
print("Height {}".format(self.height))
print("Size {}".format(size))
print("Data size {}".format(len(self.data)))
def get_data(self, idx):
return self.data[idx], idx + 1
def save(self):
im = Image.new("RGBA", (self.width, self.height), (0, 0, 0, 0))
pix = im.load()
idx = 0
pix_idx = 0
while idx < len(self.data):
length, idx = self.get_data(idx)
if length <= 63: # Fixed non-transparent 32bpp pixels (RGB)
length = length & 63
x, y = decode_xy(pix_idx, self.width, self.height)
for i in range(length):
d = (self.data[idx], self.data[idx+1], self.data[idx+2], 255)
pix[x, y] = d
idx = idx + 3
pix_idx = pix_idx + 1
x = x + 1
if x == self.width:
x = 0
y = y + 1
continue
elif length <= 64+63: # Partially transparent 32bpp pixels (RGB)
length = length & 63
opacity, idx = self.get_data(idx)
x, y = decode_xy(pix_idx, self.width, self.height)
for i in range(length):
d = (self.data[idx], self.data[idx+1], self.data[idx+2], opacity)
pix[x, y] = d
idx = idx + 3
pix_idx = pix_idx + 1
x = x + 1
if x == self.width:
x = 0
y = y + 1
continue
elif length <= 128+63: # Fixed fully transparent pixels
length = length & 63
pix_idx = pix_idx + length
continue
else: # Recolour layer.
length = length & 63
table, idx = self.get_data(idx)
opacity, idx = self.get_data(idx)
x, y = decode_xy(pix_idx, self.width, self.height)
for i in range(length):
col, idx = self.get_data(idx)
pix[x, y] = get_colour(table, col)
pix_idx = pix_idx + 1
x = x + 1
if x == self.width:
x = 0
y = y + 1
continue
im.save("sprite_{}.png".format(self.number))
inf = Infile("x.out")
spr = Sprite(inf)
spr.save()
| CorsixTH/CorsixTH | SpriteEncoder/decode.py | decode.py | py | 5,314 | python | en | code | 2,834 | github-code | 6 | [
{
"api_name": "PIL.Image.new",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 104,
"usage_type": "name"
}
] |
30546132474 | # %%
import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd
import seaborn as sns
from src import consts as const
from src.processing import attribute_builder as ab
from src.processing import plotting, refactor
sns.set(palette="Set2")
# Output Configurations
pd.set_option('display.max_rows', 60)
pd.set_option('display.max_columns', 60)
plt.style.use('classic')
# Read Dataset
date_cols = ['flight_date', 'scheduled_departure_date', 'off_block_date', 'take_off_date',
'landing_date', 'on_block_date', 'scheduled_arrival_date', 'registered_delay_date']
df = pd.read_csv(const.PROCESSED_DATA_DIR / 'full_info.csv',
sep='\t', parse_dates=date_cols)
# %% [markdown]
# ## Overview
df.head(5)
# %%
### PRELIMINARY SETUP ###
df.drop(',', axis=1, inplace=True)
df.rename(columns={'size_code': 'fleet'}, inplace=True)
print("The dataset size is: {}".format(df.shape))
# %%
types = df.dtypes
counts = df.apply(lambda x: x.count())
uniques = df.apply(lambda x: [x.unique()])
distincts = df.apply(lambda x: x.unique().shape[0])
missing_ratio = (df.isnull().sum() / df.shape[0]) * 100
cols = ['types', 'counts', 'uniques', 'distincts', 'missing_ratio']
desc = pd.concat([types, counts, uniques, distincts,
missing_ratio], axis=1, sort=False)
desc.columns = cols
# %%
### DELETE BASED ON FILLING FACTOR ###
df = refactor.remove_cols_nan_based(df, .7) # remove cols with > 70% nan
# %%
delayed = df[df['delay_code'].notna()].shape[0]
not_delayed = df.shape[0] - delayed
plt.subplots()
sns.barplot(x=['delayed', 'not delayed'], y=[delayed, not_delayed])
plt.savefig('num_delayed.png', bbox_inches='tight')
# %%
# using only delayed flights
df.drop(df.loc[df['delay_code'].isna()].index, axis=0, inplace=True)
# %%
edges = df[['origin_airport', 'destination_airport']].values
g = nx.from_edgelist(edges)
print('There are {} different airports and {} connections'.format(
len(g.nodes()), len(g.edges())))
# %%
plotting.connections_map(df)
# %%
plotting.freq_connections(edges, save=True)
# %%
plotting.absolute_flt_pie(df, save=True)
# %%
plotting.time_distribution(df, save=True)
# %%
plotting.simple_bar(df, 'fleet', save=True)
# %%
### ADJUST FLEET ###
df = refactor.adjust_fleets(df)
# %%
plotting.airc_model_fleet(df, save=True)
# %%
plotting.fleet_time_flt(df, save=True)
# %%
plotting.tail_fleet(df, save=True)
# %%
plotting.delay_daily_pie(df, save=True)
# %%
plotting.delay_sample(df, save=True)
# %%
### REMOVING BADLY FORMED RECORDS ###
same_ori_dest = df.loc[df['origin_airport'] ==
df['destination_airport']]
print(
f"# of records with same origin and destination airports: {same_ori_dest.shape[0]}")
df.drop(same_ori_dest.index, axis=0, inplace=True)
not_take_off = df.loc[df['take_off_date'].isna()]
print(
f"# of planes that did not take off after same ori-dest instances removed: {not_take_off.shape[0]}")
df.drop(not_take_off.index, axis=0, inplace=True)
not_landing = df.loc[df['landing_date'].isna()]
print(
f"# of planes that did not land after same ori-dest instances removed: {not_landing.shape[0]}")
df.drop(not_landing.index, axis=0, inplace=True)
training_flt = df.loc[df['service_type'] == 'K']
print(f"# of training flights: {training_flt.shape[0]}")
df.drop(training_flt.index, axis=0, inplace=True)
nan_takeoff = len(df.loc[df['take_off_date'].isna()])
nan_landing = len(df.loc[df['landing_date'].isna()])
nan_offblock = len(df.loc[df['off_block_date'].isna()])
nan_onblock = len(df.loc[df['on_block_date'].isna()])
print(f"Null take-off: {nan_takeoff}")
print(f"Null landing: {nan_landing}")
print(f"Null off-block: {nan_offblock}")
print(f"Null on-block: {nan_onblock}")
offblock_takeoff = df.loc[df['off_block_date'] > df['take_off_date']]
print(f"off-block > take-off: {len(offblock_takeoff)}")
df.drop(offblock_takeoff.index, axis=0, inplace=True)
takeoff_landing = df.loc[df['take_off_date'] >= df['landing_date']]
print(f"take-off >= landing: {len(takeoff_landing)}")
df.drop(takeoff_landing.index, axis=0, inplace=True)
landing_onblock = df.loc[df['landing_date'] > df['on_block_date']]
print(f"landing > on-block: {len(landing_onblock)}")
df.drop(landing_onblock.index, axis=0, inplace=True)
print("\nThe dataset size is: {}".format(df.shape))
# %%
# plotting.delay_month_weekday(df)
# %%
plotting.proportion_delay_type(df, save=True)
# %%
# Build delay codes
df = refactor.build_delay_codes(df)
# %%
plotting.cloud_coverage_dist(df, save=True)
# %%
df = refactor.fix_cloud_data(df)
df = refactor.remove_cols_nan_based(df, .7) # remove cols with > 70% nan
# %%
df.rename(
columns={'origin_cloud_coverage_lvl_1': 'origin_cloud_coverage',
'origin_cloud_height_lvl_1': 'origin_cloud_height',
'destination_cloud_coverage_lvl_1': 'destination_cloud_coverage',
'destination_cloud_height_lvl_1': 'destination_cloud_height'}, inplace=True)
# %%
plotting.weather_distributions(df, save=True)
# %%
plotting.cloud_distribution(df, save=True)
# %%
# Save data
df.to_csv(const.PROCESSED_DATA_DIR / 'basic_eda.csv',
sep='\t', encoding='utf-8', index=False)
# %%
| ajmcastro/flight-time-prediction | src/processing/eda.py | eda.py | py | 5,177 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "seaborn.set",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.set_option",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pandas.set_option",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.sty... |
72536666107 | from dataclasses import asdict, dataclass
from functools import cached_property
from time import sleep
from typing import Any, Dict, List, Optional, Union
from airflow import AirflowException
from airflow.models.taskinstance import Context
from airflow.providers.http.hooks.http import HttpHook
from constants import CRYPTO_COMPARE_HTTP_CONN_ID
from hooks.wrappers.http_stream import HttpStreamHook
from kafka import KafkaProducer
from operators.cryptocurrency.price.base import CryptocurrencyBaseOperator
from utils.exception import raise_airflow_exception
from utils.kafka import kafka_producer_context
from utils.request import get_request_json
@dataclass
class CryptocurrencyMultiPriceApiData:
fsyms: str
tsyms: str
class CryptocurrencyPriceSourcingStreamOperator(CryptocurrencyBaseOperator):
cryptocurrency_http_conn_id: str = CRYPTO_COMPARE_HTTP_CONN_ID
def __init__(
self,
symbol_list: List[str],
**kwargs,
) -> None:
super().__init__(**kwargs)
self.symbol_list = symbol_list
def read(
self,
endpoint: str,
data: Optional[Dict[str, Any]] = None,
):
return self.try_to_get_request_json(
http_hook=self.http_hook,
endpoint=endpoint,
data=data,
)
@cached_property
def kafka_topic_name(self) -> str:
return "cryptocurrency"
@cached_property
def standard_currency(self) -> str:
return "USD"
@cached_property
def sleep_second(self) -> float:
return 1 / len(self.symbol_list)
@property
def api_endpoint(self):
return "pricemulti"
def try_to_get_request_json(
self,
http_hook: Union[HttpHook, HttpStreamHook],
endpoint: str,
data: Optional[Dict[str, Any]] = None,
retry_count: int = 5,
err_msg: str = "",
) -> Dict[str, Any]:
if retry_count <= 0:
raise_airflow_exception(
error_msg=err_msg,
logger=self.log,
)
try:
response_json = get_request_json(
http_hook=http_hook,
endpoint=endpoint,
data=data,
headers=self.api_header,
back_off_cap=self.back_off_cap,
back_off_base=self.back_off_base,
proxies=self.proxies,
)
except AirflowException as e:
self.log.info(f"raise AirflowException err_msg: {e}")
sleep(10)
return self.try_to_get_request_json(
http_hook=http_hook,
endpoint=endpoint,
data=data,
retry_count=retry_count - 1,
err_msg=f"{err_msg} retry_count : {retry_count}\nerr_msg : {e} \n\n",
)
response_status = response_json.get("Response")
if response_status == "Error":
response_message = response_json.get("Message")
if (
response_message
== "You are over your rate limit please upgrade your account!"
):
self.PROXY_IP_IDX += 1
self.log.info(
f"{response_message}, raise PROXY_IP_IDX to {self.PROXY_IP_IDX}"
)
return self.try_to_get_request_json(
http_hook=http_hook,
endpoint=endpoint,
data=data,
retry_count=retry_count - 1,
err_msg=f"{err_msg} retry_count : {retry_count}\nerr_msg : {response_message} \n\n",
)
return response_json
def write(
self,
json_data: List[Dict[str, Any]],
kafka_producer: KafkaProducer,
) -> None:
for data in json_data:
kafka_producer.send(self.kafka_topic_name, value=data)
kafka_producer.flush()
@cached_property
def api_data(
self,
) -> CryptocurrencyMultiPriceApiData:
return CryptocurrencyMultiPriceApiData(
fsyms=",".join(self.symbol_list),
tsyms=self.standard_currency,
)
@staticmethod
def transform(data: Dict[str, Any]) -> List[Dict[str, Any]]:
return [
{"symbol": symbol, "close": usd.get("USD")} for symbol, usd in data.items()
]
def execute(self, context: Context) -> None:
# pass
with kafka_producer_context() as kafka_producer:
while 1:
json_data = self.read(
endpoint=self.api_endpoint,
data=asdict(self.api_data),
)
transformed_data = self.transform(data=json_data)
self.write(
json_data=transformed_data,
kafka_producer=kafka_producer,
)
sleep(10)
| ksh24865/cryptocurrency-data-pipeline | Airflow/dags/operators/cryptocurrency/price/sourcing_stream.py | sourcing_stream.py | py | 4,859 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "dataclasses.dataclass",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "operators.cryptocurrency.price.base.CryptocurrencyBaseOperator",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "constants.CRYPTO_COMPARE_HTTP_CONN_ID",
"line_number": 2... |
9324466807 | from flask import Blueprint, render_template, url_for
lonely = Blueprint('lonely',
__name__,
template_folder='./',
static_folder='./',
static_url_path='/')
lonely.display_name = 'Lonely'
lonely.published = True
lonely.description = "An interactive visualization of original music."
@lonely.route('/')
def _lonely():
return render_template('lonely.html')
| connerxyz/exhibits | cxyz/exhibits/lonely/lonely.py | lonely.py | py | 437 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Blueprint",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 15,
"usage_type": "call"
}
] |
18536492857 | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 11 15:05:36 2019
@author: Ashley
"""
# Manuscript Malezieux, Kees, Mulle submitted to Cell Reports
# Figure S3 - Complex spikes
# Description: changes in complex spikes with theta and LIA, plotted separately
# %% import modules
import os
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
from matplotlib.colors import rgb2hex
from itertools import compress
import matplotlib as mpl
from mpl_toolkits.axes_grid1 import Divider, Size
from mpl_toolkits.axes_grid1.mpl_axes import Axes
# %% definitions
# bootstrap: one-factor ANOVA-like (for any number of groups):
# is between-group variance bigger than within-group?
def calculate_F(groups_list):
num_g = len(groups_list)
box = np.concatenate(groups_list)
GM = np.nanmedian(box)
gm = np.zeros(num_g)
gs = np.zeros(num_g)
denom = np.zeros(num_g)
for i in np.arange(num_g):
gm[i] = np.nanmedian(groups_list[i])
gs[i] = groups_list[i].size
denom[i] = np.nansum(np.abs(groups_list[i]-np.nanmedian(groups_list[i])))
F = (np.sum(gs*np.abs(GM-gm)))/(np.sum(denom))
return F
# one-way anova for many groups: resampling from big box; only when variances are the same
# returns real_F, p_boot
def boot_anova(groups_list, num_b):
# compute the real F
real_F = calculate_F(groups_list)
faux_F = np.zeros(num_b)
# compute size and variance of groups
groups_size = [np.nan]*len(groups_list)
groups_var = [np.nan]*len(groups_list)
for g in np.arange(len(groups_list)):
groups_size[g] = groups_list[g].size
groups_var[g] = MADAM(groups_list[g], np.nanmedian(groups_list[g]))
# if the largest variance is more than 2x the smallest, resample within groups
# demean each group and sample with replacement
if max(groups_var)/min(groups_var) > 2:
# subtract the median from each group before resampling
dm_groups_list = [np.nan] * len(groups_list)
for g in np.arange(len(groups_list)):
dm_groups_list[g] = groups_list[g] - np.nanmedian(groups_list[g])
# shuffle and deal from each group with replacement
for b in np.arange(num_b):
# deal into faux groups, each one the same size as in real data
f_groups_list = [None] * len(groups_list)
for g in np.arange(len(groups_list)):
group = dm_groups_list[g]
resample = group[np.random.randint(0, group.size, size=group.size)]
f_groups_list[g] = resample
faux_F[b] = calculate_F(f_groups_list)
p_boot = np.sum(faux_F > real_F)/num_b
# if the variances are mostly the same, resample from the big box without replacement
else:
box = np.concatenate(groups_list)
for b in np.arange(num_b):
np.random.shuffle(box)
box1 = np.copy(box)
# deal into fax groups, each one the same size as in real data
f_groups_list = list()
for g in np.arange(len(groups_list)):
f_groups_list.append(box1[0:int(groups_size[g])])
box1 = box1[int(groups_size[g]):]
faux_F[b] = calculate_F(f_groups_list)
p_boot = np.sum(faux_F > real_F)/num_b
return real_F, p_boot
# definition for self_calculated variance (called MADAM??)
# VERSION: accounts for nans when dividing by number of samples
def MADAM(data_pts, descriptor):
v = np.nansum(np.abs(data_pts-descriptor))/np.sum(~np.isnan(data_pts))
return v
def boot_t(t_g0, t_g1, num_b):
real_d = np.nanmedian(t_g1) - np.nanmedian(t_g0)
faux_d = np.zeros(num_b)
box = np.append(t_g0, t_g1)
for b in np.arange(num_b):
f_g0 = box[np.random.randint(0, box.size, size=t_g0.size)]
f_g1 = box[np.random.randint(0, box.size, size=t_g1.size)]
faux_d[b] = np.nanmedian(f_g1) - np.nanmedian(f_g0)
p = np.sum(np.abs(faux_d) > np.abs(real_d))/num_b
return real_d, p
def boot_pair_t(diff, num_b):
real_d = np.mean(diff)
faux_d = np.zeros(num_b)
for b in np.arange(num_b):
sample = np.random.choice([-1, 1], size = diff.size, replace=True)
faux_d[b] = np.mean(diff*sample)
p = np.sum(faux_d<real_d)/num_b
return real_d, p
# definiton for finding 95% confidence intervals for each bin in histogram
# Version: for a **mean** histogram of **several** histograms
# H_array must be arranged [samples, bins]
def CI_avg_hist(H_array, num_b, CI_perc):
real_H = np.nanmean(H_array, axis=0)
faux_H = np.full([H_array.shape[1], num_b], np.nan)
for b in np.arange(num_b):
samp = np.random.randint(0, H_array.shape[0], H_array.shape[0])
faux_H[:, b] = np.nanmean(H_array[samp, :], axis=0)
CI_low, CI_high = np.nanpercentile(faux_H, [(100-CI_perc)/2, 100-((100-CI_perc)/2)],
axis=1)
return real_H, CI_high, CI_low
# eta = event triggered averages. CHANGE: nans instead of removing events
def prepare_eta(signal, ts, event_times, win):
win_npts = [ts[ts < ts[0] + np.abs(win[0])].size,
ts[ts < ts[0] + np.abs(win[1])].size]
et_ts = ts[0:np.sum(win_npts)] - ts[0] + win[0]
et_signal = np.empty(0)
if event_times.size > 0:
if signal.ndim == 1:
et_signal = np.zeros((et_ts.size, event_times.size))
for i in np.arange(event_times.size):
if np.logical_or((event_times[i]+win[0]<ts[0]), (event_times[i]+win[1]>ts[-1])):
et_signal[:, i] = np.nan*np.ones(et_ts.size)
else:
# find index of closest timestamp to the event time
ind = np.argmin(np.abs(ts-event_times[i]))
et_signal[:, i] = signal[(ind - win_npts[0]): (ind + win_npts[1])]
elif signal.ndim == 2:
et_signal = np.zeros((signal.shape[0], et_ts.size, event_times.size))
for i in np.arange(event_times.size):
if np.logical_or((event_times[i]+win[0]<ts[0]), (event_times[i]+win[1]>ts[-1])):
et_signal[:, :, i] = np.nan*np.ones([signal.shape[0], et_ts.size])
else:
# find index of closest timestamp to the event time
ind = np.argmin(np.abs(ts-event_times[i]))
et_signal[:, :, i] = signal[:, (ind - win_npts[0]):
(ind + win_npts[1])]
return et_signal, et_ts
# eta = event triggered averages
# this code is for point processes, but times instead of inds
def prepare_eta_times(pt_times, event_times, win):
et_signal = []
if (pt_times.size > 0) & (event_times.size > 0):
# find pt_times that occur within window of each event_time
for i in np.arange(event_times.size):
ts_section = pt_times[(pt_times > event_times[i] + win[0]) &
(pt_times < event_times[i] + win[1])]
ts_section = ts_section - event_times[i]
et_signal.append(ts_section)
else:
et_signal = [np.empty(0) for k in np.arange(event_times.size)]
return et_signal
# eta = event triggered averages: Version: skip events too close to edge
def prepare_eta_skip(signal, ts, event_times, win):
win_npts = [ts[ts < ts[0] + np.abs(win[0])].size,
ts[ts < ts[0] + np.abs(win[1])].size]
et_ts = ts[0:np.sum(win_npts)] - ts[0] + win[0]
et_signal = np.empty(0)
if event_times.size > 0:
# remove any events that are too close to the beginning or end of recording
if event_times[0]+win[0] < ts[0]:
event_times = event_times[1:]
if event_times[-1]+win[1] > ts[-1]:
event_times = event_times[:-1]
if signal.ndim == 1:
et_signal = np.zeros((et_ts.size, event_times.size))
for i in np.arange(event_times.size):
# find index of closest timestamp to the event time
ind = np.argmin(np.abs(ts-event_times[i]))
et_signal[:, i] = signal[(ind - win_npts[0]): (ind + win_npts[1])]
elif signal.ndim == 2:
et_signal = np.zeros((signal.shape[0], et_ts.size, event_times.size))
for i in np.arange(event_times.size):
# find index of closest timestamp to the event time
ind = np.argmin(np.abs(ts-event_times[i]))
et_signal[:, :, i] = signal[:, (ind - win_npts[0]):
(ind + win_npts[1])]
return et_signal, et_ts
# %% load data
dataset_folder = (r'C:\Users\akees\Documents\Ashley\Papers\MIND 1\Cell Reports\Dryad upload\Dataset')
cell_files = os.listdir(dataset_folder)
data = [{} for k in np.arange(len(cell_files))]
for i in np.arange(len(cell_files)):
full_file = os.path.join(dataset_folder, cell_files[i])
data[i] = np.load(full_file, allow_pickle=True).item()
states = [{'id':'theta', 'bef':-2.5, 'aft':0.5, 'samp_time':2, 't_win':[-3, 3]},
{'id':'LIA', 'bef':-4, 'aft':-1, 'samp_time':2, 't_win':[-4, 2]}]
ntl = ['nost', 'theta', 'LIA']
# %% process data - for dVm vs dFR analysis
# for each cell, find start and stop times for unlabeled times
for i in np.arange(len(data)):
state_start = np.concatenate([data[i]['theta_start'], data[i]['LIA_start']])
state_start = np.sort(state_start)
state_stop = np.concatenate([data[i]['theta_stop'], data[i]['LIA_stop']])
state_stop = np.sort(state_stop)
data[i]['nost_start'] = np.append(data[i]['Vm_ds_ts'][0], state_stop)
data[i]['nost_stop'] = np.append(state_start, data[i]['Vm_ds_ts'][-1])
# for each cell, make a new spike_times for specifically non-spikelets
for i in np.arange(len(data)):
data[i]['spike_times'] = np.delete(data[i]['sp_times'],
data[i]['spikelets_ind'])
# for each cell, calculate the isi (inter-spike-interval)
# for true spikes only
for i in np.arange(len(data)):
if data[i]['spike_times'].size > 0:
isi0 = data[i]['spike_times'][0] - data[i]['Vm_ds_ts'][0]
data[i]['isi'] = np.ediff1d(data[i]['spike_times'], to_begin=isi0)
else:
data[i]['isi'] = np.empty(0)
# find the (true) spikes that are within bursts
burst_isi = 0.006 # seconds (Mizuseki 2012 0.006)
for i in np.arange(len(data)):
if data[i]['spike_times'].size > 0:
burst_bool = 1*(data[i]['isi'] < burst_isi)
burst_sp = np.where(data[i]['isi'] < burst_isi)[0]
burst_sp0 = np.where(np.ediff1d(burst_bool) == 1)[0]
bursts = [None]*len(burst_sp0)
if burst_sp0.size > 0:
for j in np.arange(len(burst_sp0)-1):
inds = np.append(burst_sp0[j], burst_sp[np.logical_and(burst_sp > burst_sp0[j],
burst_sp < burst_sp0[j+1])])
bursts[j] = data[i]['spike_times'][inds]
# special case for the last burst:
j = len(burst_sp0)-1
inds = np.append(burst_sp0[j], burst_sp[burst_sp > burst_sp0[j]])
bursts[j] = data[i]['spike_times'][inds]
data[i]['bursts'] = bursts
else:
data[i]['bursts'] = [None]*0
# add windows triggered by start of some brain states
# collect relative times for (true) spikes, singles, doublets, bursts, and CS
for l in np.arange(len(states)):
for i in np.arange(len(data)):
t_Vm, t_ts = prepare_eta(data[i]['Vm_s_ds'], data[i]['Vm_ds_ts'],
data[i][states[l]['id']+'_start'],
states[l]['t_win'])
t_sp = prepare_eta_times(data[i]['sp_times'],
data[i][states[l]['id']+'_start'],
states[l]['t_win'])
t_spike = prepare_eta_times(data[i]['spike_times'],
data[i][states[l]['id']+'_start'],
states[l]['t_win'])
spikelet_times = data[i]['sp_times'][data[i]['spikelets_ind'].astype(int)]
t_spikelet = prepare_eta_times(spikelet_times,
data[i][states[l]['id']+'_start'],
states[l]['t_win'])
single_times = data[i]['sp_times'][data[i]['singles_ind'].astype(int)]
t_single = prepare_eta_times(single_times,
data[i][states[l]['id']+'_start'],
states[l]['t_win'])
if data[i]['doublets_ind'].size > 0:
doublet_times = data[i]['sp_times'][data[i]['doublets_ind'][0]]
else:
doublet_times = np.empty(0)
t_doublet = prepare_eta_times(doublet_times,
data[i][states[l]['id']+'_start'],
states[l]['t_win'])
burst_times = np.array([d[0] for d in data[i]['bursts']])
t_burst = prepare_eta_times(burst_times,
data[i][states[l]['id']+'_start'],
states[l]['t_win'])
t_CS = prepare_eta_times(data[i]['CS_start'],
data[i][states[l]['id']+'_start'],
states[l]['t_win'])
data[i][states[l]['id']+'_Vm'] = t_Vm
data[i][states[l]['id']+'_sp'] = t_sp # all spikes and spikelets
data[i][states[l]['id']+'_spike'] = t_spike # all spikes (no spikelets)
data[i][states[l]['id']+'_spikelet'] = t_spikelet
data[i][states[l]['id']+'_single'] = t_single
data[i][states[l]['id']+'_doublet'] = t_doublet
data[i][states[l]['id']+'_burst'] = t_burst
data[i][states[l]['id']+'_CS'] = t_CS
states[l]['t_ts'] = t_ts
# add windows triggered by start of some brain states
# collect relative times for (true) spikes, singles, doublets, bursts, and CS
for l in np.arange(len(states)):
for i in np.arange(len(data)):
single_times = data[i]['sp_times'][data[i]['singles_ind'].astype(int)]
if data[i]['doublets_ind'].size > 0:
doublet_times = np.concatenate(data[i]['sp_times'][data[i]['doublets_ind']])
else:
doublet_times = np.empty(0)
nonCS_times = np.sort(np.concatenate((single_times, doublet_times)))
t_nonCS = prepare_eta_times(nonCS_times,
data[i][states[l]['id']+'_start'],
states[l]['t_win'])
if len(data[i]['CS_ind']) > 0:
CS_times = data[i]['sp_times'][np.concatenate(data[i]['CS_ind'])]
else:
CS_times = np.empty(0)
t_CS = prepare_eta_times(CS_times,
data[i][states[l]['id']+'_start'],
states[l]['t_win'])
data[i][states[l]['id']+'_CS_spikes'] = t_CS
data[i][states[l]['id']+'_nonCS_spikes'] = t_nonCS
# for each event in each cell, calculate the CS rate and CS index
for i in np.arange(len(data)):
for l in np.arange(len(ntl)):
CS_rate = np.full(data[i][ntl[l]+'_start'].size, np.nan)
CS_perc = np.full(data[i][ntl[l]+'_start'].size, np.nan)
for j in np.arange(data[i][ntl[l]+'_start'].size):
start = data[i][ntl[l]+'_start'][j]
stop = data[i][ntl[l]+'_stop'][j]
num_spikes = np.sum(np.logical_and(data[i]['spike_times'] > start,
data[i]['spike_times'] < stop))
if len(data[i]['CS_ind']) > 0:
CS_spike_times = data[i]['sp_times'][np.concatenate(data[i]['CS_ind'])]
num_CS_spikes = np.sum(np.logical_and(CS_spike_times > start,
CS_spike_times < stop))
num_CS = np.sum(np.logical_and(data[i]['CS_start'] > start,
data[i]['CS_start'] < stop))
else:
num_CS_spikes = 0
num_CS = 0
CS_perc[j] = num_CS_spikes/num_spikes
CS_rate[j] = num_CS/(stop-start)
data[i][ntl[l]+'_CS_rate'] = CS_rate
data[i][ntl[l]+'_CS_perc'] = CS_perc
# %% event-based organization for dVm vs dFR
# make a dictionary to hold values collapsed over all cells
events = [{} for k in np.arange(len(states))]
# find Vm0, dVm and significance for each run, excluding when Ih is changed
for l in np.arange(len(states)):
all_c_p = np.empty(0)
all_Ih = np.empty(0)
all_Vm0 = np.empty(0)
all_dVm = np.empty(0)
all_dVm_p = np.empty(0)
for i in np.arange(len(data)):
samp_freq = 1/(data[i]['Vm_ds_ts'][1] - data[i]['Vm_ds_ts'][0])
num_ind = int(states[l]['samp_time']*samp_freq)
# find index of dIh_times
dIh_ind = data[i]['dIh_times']*samp_freq
dIh_ind = dIh_ind.astype(int)
c_p = np.zeros(data[i][states[l]['id']+'_start'].size)
Ih = np.zeros(data[i][states[l]['id']+'_start'].size)
Vm0 = np.zeros(data[i][states[l]['id']+'_start'].size)
dVm = np.zeros(data[i][states[l]['id']+'_start'].size)
dVm_p = np.zeros(data[i][states[l]['id']+'_start'].size)
for j in np.arange(data[i][states[l]['id']+'_start'].size):
# find indices
bef_ind = int(np.sum(data[i]['Vm_ds_ts'] <
(data[i][states[l]['id']+'_start'][j] + states[l]['bef'])))
aft_ind = int(np.sum(data[i]['Vm_ds_ts'] <
(data[i][states[l]['id']+'_start'][j] + states[l]['aft'])))
# put nan if times are straddling a time when dIh is changed
dIh_true = np.where((dIh_ind > bef_ind) & (dIh_ind < aft_ind + num_ind))[0]
if dIh_true.size > 0:
Ih[j] = np.nan
Vm0[j] = np.nan
dVm[j] = np.nan
dVm_p[j] = np.nan
else:
if np.logical_or(l==0, l==1):
c_p[j] = data[i][states[l]['id']+'_cell_p']
else:
c_p[j] = data[i]['theta_cell_p']
Ih_ind = np.searchsorted(data[i]['Vm_Ih_ts'],
data[i][states[l]['id']+'_start'][j])
Ih[j] = data[i]['Vm_Ih'][Ih_ind]
# test whether Vm values are significantly different
# Welch's t-test: normal, unequal variances, independent samp
t, p = stats.ttest_ind(data[i]['Vm_ds'][bef_ind:bef_ind+num_ind],
data[i]['Vm_ds'][aft_ind:aft_ind+num_ind],
equal_var=False, nan_policy='omit')
dVm_p[j] = p
if (np.nanmean(data[i]['Vm_ds'][aft_ind:aft_ind+num_ind]) -
np.nanmean(data[i]['Vm_ds'][bef_ind:bef_ind+num_ind])) > 0:
Vm0[j] = np.nanmin(data[i]['Vm_s_ds'][bef_ind:bef_ind+num_ind])
dVm[j] = (np.nanmax(data[i]['Vm_s_ds'][aft_ind:aft_ind+num_ind]) -
np.nanmin(data[i]['Vm_s_ds'][bef_ind:bef_ind+num_ind]))
else:
Vm0[j] = np.nanmax(data[i]['Vm_s_ds'][bef_ind:bef_ind+num_ind])
dVm[j] = (np.nanmin(data[i]['Vm_s_ds'][aft_ind:aft_ind+num_ind]) -
np.nanmax(data[i]['Vm_s_ds'][bef_ind:bef_ind+num_ind]))
data[i][states[l]['id']+'_c_p'] = c_p
data[i][states[l]['id']+'_Ih'] = Ih
data[i][states[l]['id']+'_Vm0'] = Vm0
data[i][states[l]['id']+'_dVm'] = dVm
data[i][states[l]['id']+'_dVm_p'] = dVm_p
all_c_p = np.append(all_c_p, c_p)
all_Ih = np.append(all_Ih, Ih)
all_Vm0 = np.append(all_Vm0, Vm0)
all_dVm = np.append(all_dVm, dVm)
all_dVm_p = np.append(all_dVm_p, dVm_p)
events[l]['c_p'] = all_c_p
events[l]['Ih'] = all_Ih
events[l]['Vm0'] = all_Vm0
events[l]['dVm'] = all_dVm
events[l]['dVm_p'] = all_dVm_p
# add windows triggered by start of some brain states
for l in np.arange(len(states)):
for i in np.arange(len(data)):
t_Vm, t_ts = prepare_eta(data[i]['Vm_s_ds'], data[i]['Vm_ds_ts'],
data[i][states[l]['id']+'_start'],
states[l]['t_win'])
t_sp = prepare_eta_times(data[i]['sp_times'],
data[i][states[l]['id']+'_start'],
states[l]['t_win'])
data[i][states[l]['id']+'_Vm'] = t_Vm
data[i][states[l]['id']+'_sp'] = t_sp
states[l]['t_ts'] = t_ts
# add triggered windows to event dictionary
for l in np.arange(len(events)):
raster_sp = []
psth_sp = np.empty(0)
Vm = np.empty((states[l]['t_ts'].shape[0], 0))
duration = np.empty(0)
cell_id = np.empty(0)
for i in np.arange(len(data)):
cell_psth_sp = np.empty(0)
if data[i][states[l]['id']+'_start'].size > 0:
Vm = np.append(Vm, data[i][states[l]['id']+'_Vm'], axis=1)
duration = np.append(duration, (data[i][states[l]['id']+'_stop'] -
data[i][states[l]['id']+'_start']))
if isinstance(data[i]['cell_id'], str):
ind = data[i]['cell_id'].find('_')
cell_int = int(data[i]['cell_id'][:ind])*np.ones(data[i][states[l]['id']+'_start'].size)
cell_id = np.append(cell_id, cell_int)
else:
cell_int = data[i]['cell_id']*np.ones(data[i][states[l]['id']+'_start'].size)
cell_id = np.append(cell_id, cell_int)
for j in np.arange(data[i][states[l]['id']+'_start'].size):
psth_sp = np.append(psth_sp, data[i][states[l]['id']+'_sp'][j])
cell_psth_sp = np.append(cell_psth_sp, data[i][states[l]['id']+'_sp'][j])
raster_sp.append(data[i][states[l]['id']+'_sp'][j])
data[i][states[l]['id']+'_psth_sp'] = cell_psth_sp
# remove nans
no_nan = np.logical_and([~np.isnan(Vm).any(axis=0)],
[~np.isnan(events[l]['Vm0'])]).flatten()
events[l]['Vm'] = Vm[:, no_nan]
events[l]['cell_id'] = cell_id[no_nan]
events[l]['duration'] = duration[no_nan]
events[l]['raster_sp'] = list(compress(raster_sp, no_nan))
events[l]['c_p'] = events[l]['c_p'][no_nan]
events[l]['Ih'] = events[l]['Ih'][no_nan]
events[l]['Vm0'] = events[l]['Vm0'][no_nan]
events[l]['dVm'] = events[l]['dVm'][no_nan]
events[l]['dVm_p'] = events[l]['dVm_p'][no_nan]
# %% process data - for CS/burst analysis
# for each (true) spike, determine which state it occurs in (and those in no state)
# Version: all spikes, not just those used for spike threshold analysis
for i in np.arange(len(data)):
nost_sp = np.ones(data[i]['spike_times'].size, dtype=bool)
for l in np.arange(len(states)):
state_sp = np.zeros(data[i]['spike_times'].size, dtype=bool)
for j in np.arange(data[i][states[l]['id']+'_start'].size):
# find the spikes that occur in that event
temp_bool = np.all((data[i]['spike_times'] > data[i][states[l]['id']+'_start'][j],
data[i]['spike_times'] < data[i][states[l]['id']+'_stop'][j]),
axis=0)
state_sp = state_sp + temp_bool
data[i][states[l]['id']+'_spike_bool'] = np.squeeze(state_sp)
nost_sp = nost_sp*[state_sp == False]
data[i]['nost_spike_bool'] = np.squeeze(nost_sp)
# for each burst, determine which state it occurs in (and those in no state)
for i in np.arange(len(data)):
burst_start = np.array([d[0] for d in data[i]['bursts']])
nost_bst = np.ones(burst_start.size, dtype=bool)
for l in np.arange(len(states)):
state_bst = np.zeros(burst_start.size, dtype=bool)
for j in np.arange(data[i][states[l]['id']+'_start'].size):
# find the bursts that start during that event
temp_bool = np.all((burst_start > data[i][states[l]['id']+'_start'][j],
burst_start < data[i][states[l]['id']+'_stop'][j]),
axis=0)
state_bst = state_bst + temp_bool
data[i][states[l]['id']+'_bst_bool'] = np.squeeze(state_bst)
nost_bst = nost_bst*[state_bst == False]
data[i]['nost_bst_bool'] = np.squeeze(nost_bst)
# for each cell, determine the % of spikes in bursts for theta, LIA, nost
ntl = ['nost', 'theta', 'LIA']
for i in np.arange(len(data)):
burst_perc = np.full(3, np.nan)
sp_times = data[i]['spike_times']
if len(data[i]['bursts']) > 0:
burst_times = np.concatenate(data[i]['bursts'])
else:
burst_times = 0
for l in np.arange(len(ntl)):
total_spikes = 0
burst_spikes = 0
for j in np.arange(data[i][ntl[l]+'_start'].size):
start = data[i][ntl[l]+'_start'][j]
stop = data[i][ntl[l]+'_stop'][j]
spikes = np.sum(np.logical_and(sp_times > start, sp_times < stop))
bursts = np.sum(np.logical_and(burst_times > start, burst_times < stop))
total_spikes = total_spikes + spikes
burst_spikes = burst_spikes + bursts
if total_spikes != 0:
burst_perc[l] = burst_spikes/total_spikes
data[i]['burst_perc'] = burst_perc
# for each CS, determine which state it occurs in (and those in no state)
for i in np.arange(len(data)):
nost_CS = np.ones(data[i]['CS_start'].size, dtype=bool)
for l in np.arange(len(states)):
state_CS = np.zeros(data[i]['CS_start'].size, dtype=bool)
for j in np.arange(data[i][states[l]['id']+'_start'].size):
# find the bursts that start during that event
temp_bool = np.all((data[i]['CS_start'] > data[i][states[l]['id']+'_start'][j],
data[i]['CS_start'] < data[i][states[l]['id']+'_stop'][j]),
axis=0)
state_CS = state_CS + temp_bool
data[i][states[l]['id']+'_CS_bool'] = np.squeeze(state_CS)
nost_CS = nost_CS*[state_CS == False]
data[i]['nost_CS_bool'] = np.squeeze(nost_CS)
# collect the CS features divided by state
keep_cells = np.where([isinstance(d['cell_id'], int) for d in data])[0]
CS_ntl = [{} for l in np.arange(len(ntl))]
for l in np.arange(len(ntl)):
num_sp = np.empty(0)
CS_dur = np.empty(0)
CS_height_Vm = np.empty(0)
CS_rel_ahp_Vm = np.empty(0)
for k in np.arange(keep_cells.size):
i = keep_cells[k]
num_sp = np.append(num_sp, np.array([d.size for d in data[i]['CS_ind']])[data[i][ntl[l]+'_CS_bool']])
CS_dur = np.append(CS_dur, (data[i]['CS_stop'] - data[i]['CS_start'])[data[i][ntl[l]+'_CS_bool']])
CS_height_Vm = np.append(CS_height_Vm, (data[i]['CS_max_Vm'][data[i][ntl[l]+'_CS_bool']] -
data[i]['CS_start_Vm'][data[i][ntl[l]+'_CS_bool']]))
CS_rel_ahp_Vm = np.append(CS_rel_ahp_Vm, (data[i]['CS_stop_Vm'][data[i][ntl[l]+'_CS_bool']] -
data[i]['CS_start_Vm'][data[i][ntl[l]+'_CS_bool']]))
CS_ntl[l]['num_sp'] = num_sp
CS_ntl[l]['CS_dur'] = CS_dur
CS_ntl[l]['CS_height_Vm'] = CS_height_Vm
CS_ntl[l]['CS_rel_ahp_Vm'] = CS_rel_ahp_Vm
# %% set figure parameters
# set colors
# states
c_run_theta = [0.398, 0.668, 0.547]
c_nonrun_theta = [0.777, 0.844, 0.773]
c_LIA = [0.863, 0.734, 0.582]
# response type
c_hyp = [0.184, 0.285, 0.430]
c_dep = [0.629, 0.121, 0.047]
c_no = [1, 1, 1]
c_lhyp = [0.62, 0.71, 0.84]
c_ldep = [0.97, 0.71, 0.67]
# dependent variables
c_sp = [0.398, 0.461, 0.703]
c_Vm = [0.398, 0.461, 0.703]
# other
c_lgry = [0.75, 0.75, 0.75]
c_mgry = [0.5, 0.5, 0.5]
c_dgry = [0.25, 0.25, 0.25]
c_wht = [1, 1, 1]
c_blk = [0, 0, 0]
c_bwn = [0.340, 0.242, 0.125]
c_lbwn = [0.645, 0.484, 0.394]
c_grn = [0.148, 0.360, 0.000]
c_dVm = [c_hyp, c_mgry, c_dep]
c_state = [c_mgry, c_run_theta, c_lbwn]
c_state_dark = [c_dgry, c_grn, c_bwn]
c_tl = [c_run_theta, c_lbwn]
c_tnl = [c_run_theta, c_blk, c_lbwn]
# set style defaults
mpl.rcParams['font.size'] = 8
mpl.rcParams['savefig.dpi'] = 1200
mpl.rcParams['lines.linewidth'] = 1.5
mpl.rcParams['font.sans-serif'] = "Arial"
mpl.rcParams['font.family'] = "sans-serif"
mpl.rcParams['axes.spines.right'] = False
mpl.rcParams['axes.spines.top'] = False
mpl.rcParams['axes.linewidth'] = 1
mpl.rcParams['xtick.major.size'] = 4
mpl.rcParams['xtick.major.width'] = 1
mpl.rcParams['ytick.major.size'] = 4
mpl.rcParams['ytick.major.width'] = 1
mpl.rcParams['boxplot.whiskerprops.linestyle'] = '-'
mpl.rcParams['patch.force_edgecolor'] = True
mpl.rcParams['patch.facecolor'] = 'b'
# set figure output folder
fig_folder = r'C:\Users\akees\Documents\Ashley\Figures\2020-05_Paper_MIND1\FigS3'
# set which states to plot
## all states
#d_l = [0, 1, 2]
# theta only
d_l = [0, 1]
## LIA only
#d_l = [0, 2]
# %% make hist isi figure
keep_cells = [isinstance(d['cell_id'], int) for d in data]
theta_cell_p = np.array([d['theta_cell_p'] for d in data])[keep_cells]
LIA_cell_p = np.array([d['LIA_cell_p'] for d in data])[keep_cells]
c_state_hist = [c_mgry, c_grn, c_bwn]
c_state_fill = [c_lgry, c_run_theta, c_lbwn]
# prep numbers for mean hist isi - divided between states
bins = np.arange(0, 200, 1)
H = np.full([len(data), bins.size-1, len(ntl)], np.nan)
for i in np.arange(len(data)):
for l in np.arange(len(ntl)):
H[i, :, l] = np.histogram(1000*data[i]['isi'][data[i][ntl[l]+'_spike_bool']],
bins=bins, density=True)[0]
# remove extra recordings from cells
H = H[keep_cells, :, :]
# define the 95% CI for each bin by randomly selecting (with replacement) over cells
H_mean = np.full([H.shape[1], H.shape[2]], np.nan)
H_CI_high = np.full([H.shape[1], H.shape[2]], np.nan)
H_CI_low = np.full([H.shape[1], H.shape[2]], np.nan)
CI_perc = 95
num_b = 1000
for l in np.arange(len(ntl)):
real_H, CI_high, CI_low = CI_avg_hist(H[:, :, l], num_b, CI_perc)
H_mean[:, l] = real_H
H_CI_high[:, l] = CI_high
H_CI_low[:, l] = CI_low
# plot the mean hist isi
fig, ax = plt.subplots(1, figsize=[4.5, 2.2])
for l in d_l:
ax.plot(bins[:-1], H_mean[:, l], color=c_state_hist[l], zorder=2)
ax.fill_between(bins[:-1], H_CI_low[:, l], H_CI_high[:, l],
facecolor=c_state_fill[l], linewidth=0, zorder=1, alpha=0.25)
#ax.axvline(6, color=c_blk, linestyle='--')
#ax.set_xlim([0, 200])
ax.set_ylim([0, 0.27])
ax.set_yticks([0, 0.05, 0.1, 0.15, 0.2, 0.25])
ax.set_yticklabels([0, '', 0.1, '', 0.2, ''])
ax.set_ylabel('proportion')
ax.set_xscale('log')
ax.set_xlim([1, 100])
ax.set_xlabel('inter-spike interval (ms)')
fig.tight_layout()
plt.savefig(os.path.join(fig_folder, ntl[d_l[-1]]+'_hist_isi.png'), transparent=True)
# do the stats for the above figure
S = np.full([len(data), len(ntl)], np.nan)
for i in np.arange(len(data)):
for l in np.arange(len(ntl)):
isi = 1000*data[i]['isi'][data[i][ntl[l]+'_spike_bool']]
if isi.size > 10:
S[i, l] = np.nanmedian(isi)
# remove extra recordings from cells
S = S[keep_cells, :]
# do the paired boot stats
num_b = 1000
p = np.full(len(ntl) - 1, np.nan)
d = np.full(len(ntl) - 1, np.nan)
for l in np.arange(len(ntl) - 1):
dif = S[:, l+1] - S[:, 0]
# remove nans
dif = dif[~np.isnan(dif)]
d[l], p[l] = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# do the paired boot stats for theta hyp and LIA dep cells only
num_b = 1000
# theta hyp cells
dif = S[:, 1][theta_cell_p < 0.05] - S[:, 0][theta_cell_p < 0.05]
# remove nans
dif = dif[~np.isnan(dif)]
d, p = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# LIA dep cells
dif = S[:, 2][LIA_cell_p > 0.95] - S[:, 0][LIA_cell_p > 0.95]
# remove nans
dif = dif[~np.isnan(dif)]
d, p = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# descriptive numbers
l = 1
np.sum(~np.isnan(S[:, l]))
np.nanmedian(S[:, l])
np.nanstd(S[:, l])
MADAM(S[:, l], np.nanmedian(S[:, l]))
# %% make CS features figures - number of spikes per CS
keep_cells = [isinstance(d['cell_id'], int) for d in data]
theta_cell_p = np.array([d['theta_cell_p'] for d in data])[keep_cells]
LIA_cell_p = np.array([d['LIA_cell_p'] for d in data])[keep_cells]
c_state_hist = [c_mgry, c_grn, c_bwn]
c_state_fill = [c_lgry, c_run_theta, c_lbwn]
# prep numbers for mean hist of # spikes in CS - divided between states
bins = np.arange(0.5, 51.5, 1)
H = np.full([len(data), bins.size-1, len(ntl)], np.nan)
for i in np.arange(len(data)):
for l in np.arange(len(ntl)):
num_sp = np.array([d.size for d in data[i]['CS_ind']])[data[i][ntl[l]+'_CS_bool']]
H[i, :, l] = np.histogram(num_sp, bins=bins)[0]
# normalize to total number of CS
H[i, :, l] = H[i, :, l]/np.sum(H[i, :, l])
# remove extra recordings from cells
H = H[keep_cells, :, :]
# define the 95% CI for each bin by randomly selecting (with replacement) over cells
H_mean = np.full([H.shape[1], H.shape[2]], np.nan)
H_CI_high = np.full([H.shape[1], H.shape[2]], np.nan)
H_CI_low = np.full([H.shape[1], H.shape[2]], np.nan)
CI_perc = 95
num_b = 1000
for l in np.arange(len(ntl)):
real_H, CI_high, CI_low = CI_avg_hist(H[:, :, l], num_b, CI_perc)
H_mean[:, l] = real_H
H_CI_high[:, l] = CI_high
H_CI_low[:, l] = CI_low
# plot the mean hist isi
# create a figure with axes of defined size
fig = plt.figure(figsize=[2, 2])
# The first items are for padding and the second items are for the axes.
# sizes are in inch.
h = [Size.Fixed(0.5), Size.Fixed(1.2)]
v = [Size.Fixed(0.5), Size.Fixed(1.2)]
divider = Divider(fig, (0.0, 0.0, 1., 1.), h, v, aspect=False)
ax = Axes(fig, divider.get_position())
ax.set_axes_locator(divider.new_locator(nx=1, ny=1))
fig.add_axes(ax)
#for l in np.arange(len(ntl)):
for l in d_l:
ax.plot(np.arange(1, bins.size), H_mean[:, l], color=c_state_hist[l], zorder=2)
ax.fill_between(np.arange(1, bins.size), H_CI_low[:, l], H_CI_high[:, l],
facecolor=c_state_fill[l], linewidth=0, zorder=1, alpha=0.25)
ax.set_ylim([0, 0.4])
ax.set_yticks([0, 0.2, 0.4])
ax.set_yticklabels([0, '', 0.4])
ax.set_xlim([3, 12])
ax.set_xticks([3, 6, 9, 12])
ax.set_xlabel('number of spikes')
ax.set_ylabel('proportion')
plt.savefig(os.path.join(fig_folder, ntl[d_l[-1]]+'_CS_num_spikes.png'), transparent=True)
# do the stats for the above figure
S = np.full([len(data), len(ntl)], np.nan)
for i in np.arange(len(data)):
for l in np.arange(len(ntl)):
num_sp = np.array([d.size for d in data[i]['CS_ind']])[data[i][ntl[l]+'_CS_bool']]
if num_sp.size > 0:
#S[i, l] = stats.mode(num_sp)[0]
S[i, l] = np.nanmedian(num_sp)
# remove extra recordings from cells
S = S[keep_cells, :]
# do the paired boot stats
num_b = 1000
p = np.full(len(ntl) - 1, np.nan)
d = np.full(len(ntl) - 1, np.nan)
for l in np.arange(len(ntl) - 1):
dif = S[:, l+1] - S[:, 0]
# remove nans
dif = dif[~np.isnan(dif)]
d[l], p[l] = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# do the paired boot stats for theta hyp and LIA dep cells only
num_b = 1000
# theta hyp cells
dif = S[:, 1][theta_cell_p < 0.05] - S[:, 0][theta_cell_p < 0.05]
# remove nans
dif = dif[~np.isnan(dif)]
d, p = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# LIA dep cells
dif = S[:, 2][LIA_cell_p > 0.95] - S[:, 0][LIA_cell_p > 0.95]
# remove nans
dif = dif[~np.isnan(dif)]
d, p = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# descriptive numbers
l = 1
np.sum(~np.isnan(S[:, l]))
np.nanmedian(S[:, l])
np.nanstd(S[:, l])
MADAM(S[:, l], np.nanmedian(S[:, l]))
# %% make CS features figures - CS duration
# prep numbers for mean hist CS duration - divided between states
bins = np.arange(0, 0.5, 0.02)
H = np.full([len(data), bins.size-1, len(ntl)], np.nan)
for i in np.arange(len(data)):
for l in np.arange(len(ntl)):
CS_dur = (data[i]['CS_stop'] - data[i]['CS_start'])[data[i][ntl[l]+'_CS_bool']]
H[i, :, l] = np.histogram(CS_dur, bins=bins)[0]
# normalize to total number of CS
H[i, :, l] = H[i, :, l]/np.sum(H[i, :, l])
# define the 95% CI for each bin by randomly selecting (with replacement) over cells
H_mean = np.full([H.shape[1], H.shape[2]], np.nan)
H_CI_high = np.full([H.shape[1], H.shape[2]], np.nan)
H_CI_low = np.full([H.shape[1], H.shape[2]], np.nan)
CI_perc = 95
num_b = 1000
for l in np.arange(len(ntl)):
real_H, CI_high, CI_low = CI_avg_hist(H[:, :, l], num_b, CI_perc)
H_mean[:, l] = real_H
H_CI_high[:, l] = CI_high
H_CI_low[:, l] = CI_low
# plot the mean hist CS duration
# create a figure with axes of defined size
fig = plt.figure(figsize=[2, 2])
# The first items are for padding and the second items are for the axes.
# sizes are in inch.
h = [Size.Fixed(0.5), Size.Fixed(1.2)]
v = [Size.Fixed(0.5), Size.Fixed(1.2)]
divider = Divider(fig, (0.0, 0.0, 1., 1.), h, v, aspect=False)
ax = Axes(fig, divider.get_position())
ax.set_axes_locator(divider.new_locator(nx=1, ny=1))
fig.add_axes(ax)
for l in d_l:
ax.plot(bins[:-1], H_mean[:, l], color=c_state_hist[l], zorder=2)
ax.fill_between(bins[:-1], H_CI_low[:, l], H_CI_high[:, l],
facecolor=c_state_fill[l], linewidth=0, zorder=1, alpha=0.25)
ax.set_xlim([0, 0.2])
ax.set_ylim([0, 0.5])
ax.set_xticks([0, 0.1, 0.2])
ax.set_yticks([0, 0.25, 0.5])
ax.set_yticklabels([0, '', 0.5])
ax.set_xlabel('duration (ms)')
plt.savefig(os.path.join(fig_folder, ntl[d_l[-1]]+'_CS_dur.png'), transparent=True)
# do the stats for the above figure
S = np.full([len(data), len(ntl)], np.nan)
for i in np.arange(len(data)):
for l in np.arange(len(ntl)):
CS_dur = 1000*(data[i]['CS_stop'] - data[i]['CS_start'])[data[i][ntl[l]+'_CS_bool']]
if CS_dur.size > 0:
S[i, l] = np.nanmedian(CS_dur)
# remove extra recordings from cells
S = S[keep_cells, :]
## do the kruskall-wallace
#H, p_kw = stats.kruskal(S[:, 0], S[:, 1], S[:, 2], nan_policy='omit')
# do the paired boot stats
num_b = 1000
p = np.full(len(ntl) - 1, np.nan)
d = np.full(len(ntl) - 1, np.nan)
for l in np.arange(len(ntl) - 1):
dif = S[:, l+1] - S[:, 0]
# remove nans
dif = dif[~np.isnan(dif)]
d[l], p[l] = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# do the paired boot stats for theta hyp and LIA dep cells only
num_b = 1000
# theta hyp cells
dif = S[:, 1][theta_cell_p < 0.05] - S[:, 0][theta_cell_p < 0.05]
# remove nans
dif = dif[~np.isnan(dif)]
d, p = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# LIA dep cells
dif = S[:, 2][LIA_cell_p > 0.95] - S[:, 0][LIA_cell_p > 0.95]
# remove nans
dif = dif[~np.isnan(dif)]
d, p = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# LIA nondep cells
dif = S[:, 2][LIA_cell_p < 0.95] - S[:, 0][LIA_cell_p < 0.95]
# remove nans
dif = dif[~np.isnan(dif)]
d, p = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# descriptive numbers
l = 1
np.sum(~np.isnan(S[:, l]))
np.nanmedian(S[:, l])
np.nanstd(S[:, l])
MADAM(S[:, l], np.nanmedian(S[:, l]))
# %% make CS features figures - subthreshold depolarization during CS
# prep numbers for mean hist CS max-start Vm - divided between states
bins = np.arange(0, 40, 2)
H = np.full([len(data), bins.size-1, len(ntl)], np.nan)
for i in np.arange(len(data)):
for l in np.arange(len(ntl)):
CS_height_Vm = (data[i]['CS_max_Vm'][data[i][ntl[l]+'_CS_bool']] -
data[i]['CS_start_Vm'][data[i][ntl[l]+'_CS_bool']])
H[i, :, l] = np.histogram(CS_height_Vm, bins=bins)[0]
# normalize to total number of CS
H[i, :, l] = H[i, :, l]/np.sum(H[i, :, l])
# remove extra recordings from cells
H = H[keep_cells, :, :]
# define the 95% CI for each bin by randomly selecting (with replacement) over cells
H_mean = np.full([H.shape[1], H.shape[2]], np.nan)
H_CI_high = np.full([H.shape[1], H.shape[2]], np.nan)
H_CI_low = np.full([H.shape[1], H.shape[2]], np.nan)
CI_perc = 95
num_b = 1000
for l in np.arange(len(ntl)):
real_H, CI_high, CI_low = CI_avg_hist(H[:, :, l], num_b, CI_perc)
H_mean[:, l] = real_H
H_CI_high[:, l] = CI_high
H_CI_low[:, l] = CI_low
# plot the mean hist isi
# create a figure with axes of defined size
fig = plt.figure(figsize=[2, 2])
# The first items are for padding and the second items are for the axes.
# sizes are in inch.
h = [Size.Fixed(0.5), Size.Fixed(1.2)]
v = [Size.Fixed(0.5), Size.Fixed(1.2)]
divider = Divider(fig, (0.0, 0.0, 1., 1.), h, v, aspect=False)
ax = Axes(fig, divider.get_position())
ax.set_axes_locator(divider.new_locator(nx=1, ny=1))
fig.add_axes(ax)
for l in d_l:
ax.plot(bins[:-1], H_mean[:, l], color=c_state_hist[l], zorder=2)
ax.fill_between(bins[:-1], H_CI_low[:, l], H_CI_high[:, l],
facecolor=c_state_fill[l], linewidth=0, zorder=1, alpha=0.25)
#ax.set_xlabel('CS height (mV)')
#ax.set_ylabel('proportion of CS')
ax.set_xlim([0, 35])
ax.set_xticks([0, 10, 20, 30])
ax.set_xlabel('subthreshold depolarization (mV)')
ax.set_ylim([0, 0.3])
ax.set_yticks([0, 0.1, 0.2, 0.3])
ax.set_yticklabels([0, '', '', 0.3])
plt.savefig(os.path.join(fig_folder, ntl[d_l[-1]]+'_CS_height.png'), transparent=True)
# do the stats for the above figure
S = np.full([len(data), len(ntl)], np.nan)
for i in np.arange(len(data)):
for l in np.arange(len(ntl)):
CS_height_Vm = (data[i]['CS_max_Vm'][data[i][ntl[l]+'_CS_bool']] -
data[i]['CS_start_Vm'][data[i][ntl[l]+'_CS_bool']])
if CS_height_Vm.size > 0:
S[i, l] = np.nanmedian(CS_height_Vm)
# remove extra recordings from cells
S = S[keep_cells, :]
# do the paired boot stats
num_b = 1000
p = np.full(len(ntl) - 1, np.nan)
d = np.full(len(ntl) - 1, np.nan)
for l in np.arange(len(ntl) - 1):
dif = S[:, l+1] - S[:, 0]
# remove nans
dif = dif[~np.isnan(dif)]
d[l], p[l] = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# do the paired boot stats for theta hyp and LIA dep cells only
num_b = 1000
# theta hyp cells
dif = S[:, 1][theta_cell_p < 0.05] - S[:, 0][theta_cell_p < 0.05]
# remove nans
dif = dif[~np.isnan(dif)]
d, p = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# LIA dep cells
dif = S[:, 2][LIA_cell_p > 0.95] - S[:, 0][LIA_cell_p > 0.95]
# remove nans
dif = dif[~np.isnan(dif)]
d, p = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
## LIA nondep cells
#dif = S[:, 2][LIA_cell_p < 0.95] - S[:, 0][LIA_cell_p < 0.95]
## remove nans
#dif = dif[~np.isnan(dif)]
#d, p = boot_pair_t(dif, num_b)
#print(dif.size)
#print(d)
#print(p)
# descriptive numbers
l = 1
np.sum(~np.isnan(S[:, l]))
np.nanmedian(S[:, l])
np.nanstd(S[:, l])
MADAM(S[:, l], np.nanmedian(S[:, l]))
# %% make CS features figures - after-CS hyperpolarization
# prep numbers for mean hist CS relative ahp - divided between states
bins = np.arange(-25, 10, 2)
H = np.full([len(data), bins.size-1, len(ntl)], np.nan)
for i in np.arange(len(data)):
for l in np.arange(len(ntl)):
CS_rel_ahp_Vm = (data[i]['CS_stop_Vm'][data[i][ntl[l]+'_CS_bool']] -
data[i]['CS_start_Vm'][data[i][ntl[l]+'_CS_bool']])
H[i, :, l] = np.histogram(CS_rel_ahp_Vm, bins=bins)[0]
# normalize to total number of CS
H[i, :, l] = H[i, :, l]/np.sum(H[i, :, l])
# remove extra recordings from cells
H = H[keep_cells, :, :]
# define the 95% CI for each bin by randomly selecting (with replacement) over cells
H_mean = np.full([H.shape[1], H.shape[2]], np.nan)
H_CI_high = np.full([H.shape[1], H.shape[2]], np.nan)
H_CI_low = np.full([H.shape[1], H.shape[2]], np.nan)
CI_perc = 95
num_b = 1000
for l in np.arange(len(ntl)):
real_H, CI_high, CI_low = CI_avg_hist(H[:, :, l], num_b, CI_perc)
H_mean[:, l] = real_H
H_CI_high[:, l] = CI_high
H_CI_low[:, l] = CI_low
# plot the mean hist isi
# create a figure with axes of defined size
fig = plt.figure(figsize=[2, 2])
# The first items are for padding and the second items are for the axes.
# sizes are in inch.
h = [Size.Fixed(0.5), Size.Fixed(1.2)]
v = [Size.Fixed(0.5), Size.Fixed(1.2)]
divider = Divider(fig, (0.0, 0.0, 1., 1.), h, v, aspect=False)
ax = Axes(fig, divider.get_position())
ax.set_axes_locator(divider.new_locator(nx=1, ny=1))
fig.add_axes(ax)
for l in d_l:
ax.plot(bins[:-1], H_mean[:, l], color=c_state_hist[l], zorder=2)
ax.fill_between(bins[:-1], H_CI_low[:, l], H_CI_high[:, l],
facecolor=c_state_fill[l], linewidth=0, zorder=1, alpha=0.25)
#ax.set_xlabel('CS relative afterhyperpolarization (mV)')
#ax.set_ylabel('proportion of CS')
ax.set_xlim([-20, 3])
ax.set_xticks([-20, -10, 0])
ax.set_xlabel('hyperpolarization (mV)')
ax.set_ylim([0, 0.5])
ax.set_yticks([0, 0.25, 0.5])
ax.set_yticklabels([0, '', 0.5])
plt.savefig(os.path.join(fig_folder, ntl[d_l[-1]]+'_CS_ahp.png'), transparent=True)
# do the stats for the above figure
S = np.full([len(data), len(ntl)], np.nan)
for i in np.arange(len(data)):
for l in np.arange(len(ntl)):
CS_rel_ahp_Vm = (data[i]['CS_stop_Vm'][data[i][ntl[l]+'_CS_bool']] -
data[i]['CS_start_Vm'][data[i][ntl[l]+'_CS_bool']])
if CS_rel_ahp_Vm.size > 0:
S[i, l] = np.nanmedian(CS_rel_ahp_Vm)
# remove extra recordings from cells
S = S[keep_cells, :]
## do the friedman test (nonparametric repeated measures anova)
## remove cells that have any nans
#S_nonan = S[np.all(~np.isnan(S), axis=1), :]
#X2, p_fried = stats.friedmanchisquare(S_nonan[:, 0], S_nonan[:, 1], S_nonan[:, 2])
#X2, p_fried = stats.friedmanchisquare(S[:, 0], S[:, 1], S[:, 2])
# do the paired boot stats
num_b = 1000
p = np.full(len(ntl) - 1, np.nan)
d = np.full(len(ntl) - 1, np.nan)
for l in np.arange(len(ntl) - 1):
dif = S[:, l+1] - S[:, 0]
# remove nans
dif = dif[~np.isnan(dif)]
d[l], p[l] = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# do the paired boot stats for theta hyp and LIA dep cells only
num_b = 1000
# theta hyp cells
dif = S[:, 1][theta_cell_p < 0.05] - S[:, 0][theta_cell_p < 0.05]
# remove nans
dif = dif[~np.isnan(dif)]
d, p = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# LIA dep cells
dif = S[:, 2][LIA_cell_p > 0.95] - S[:, 0][LIA_cell_p > 0.95]
# remove nans
dif = dif[~np.isnan(dif)]
d, p = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# theta nonhyp cells
dif = dif = S[:, 1][theta_cell_p > 0.05] - S[:, 0][theta_cell_p > 0.05]
# remove nans
dif = dif[~np.isnan(dif)]
d, p = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# descriptive numbers
l = 1
np.sum(~np.isnan(S[:, l]))
np.nanmedian(S[:, l])
np.nanstd(S[:, l])
MADAM(S[:, l], np.nanmedian(S[:, l]))
#%% CS-based stats for the average histograms
measure = 'num_sp'
measure = 'CS_dur'
measure = 'CS_height_Vm'
measure = 'CS_rel_ahp_Vm'
num_b = 1000
g0 = CS_ntl[0][measure]
g1 = CS_ntl[1][measure]
g2 = CS_ntl[2][measure]
groups_list = [g0, g1, g2]
real_F, p_boot = boot_anova(groups_list, num_b)
# try the stats test again with a kruskal-wallace (nonparametric 1-way anova)
H, p_kw = stats.kruskal(g0, g1, g2, nan_policy='omit')
# do the pairwise t-tests
boot_t(g0, g1, 1000)
boot_t(g0, g2, 1000)
boot_t(g1, g2, 1000)
# do the 2-sample Kolmogorov–Smirnov test (good for bimodal distributions?)
stats.ks_2samp(g0, g1)
stats.ks_2samp(g0, g2)
stats.ks_2samp(g1, g2)
# some numbers from the histogram
l = 1
CS_ntl[l][measure].size
np.nanmedian(CS_ntl[l][measure])
np.nanstd(CS_ntl[l][measure])
MADAM(CS_ntl[l][measure], np.nanmedian(CS_ntl[l][measure]))
# %% make CS rate and index figures
keep_cells = [isinstance(d['cell_id'], int) for d in data]
theta_cell_p = np.array([d['theta_cell_p'] for d in data])[keep_cells]
LIA_cell_p = np.array([d['LIA_cell_p'] for d in data])[keep_cells]
## find which cells have a significant change - boot
#anova_cells = np.full(len(data), np.nan)
#t_boot_cells = np.full([len(data), len(states)], np.nan)
#real_d_cells = np.full([len(data), len(states)], np.nan)
#num_b = 1000
#for i in np.arange(len(data)):
# groups_list = [data[i]['nost_CS_rate'], data[i]['theta_CS_rate'],
# data[i]['LIA_CS_rate']]
# real_F, anova_cells[i] = boot_anova(groups_list, num_b)
# # if the anova is significant, do the adhoc stats
# if anova_cells[i] < 0.05:
# for l in np.arange(len(states)):
# real_d_cells[i, l], t_boot_cells[i, l] = boot_t(groups_list[0], groups_list[l+1], num_b)
## remove extra recordings
#anova_cells = anova_cells[keep_cells]
#t_boot_cells = t_boot_cells[keep_cells, :]
#real_d_cells = real_d_cells[keep_cells, :]
# find which cells have a significant change - nonparametric stats
p_kw = np.full(len(data), np.nan)
p_mw = np.full([len(data), len(states)], np.nan)
num_b = 1000
for i in np.arange(len(data)):
groups_list = [data[i]['nost_CS_rate'], data[i]['theta_CS_rate'],
data[i]['LIA_CS_rate']]
try:
H, p_kw[i] = stats.kruskal(groups_list[0], groups_list[1], groups_list[2],
nan_policy='omit')
except ValueError:
p_kw[i] = np.nan
# if the anova is significant, do the adhoc stats
if p_kw[i] < 0.05:
for l in np.arange(len(states)):
U, p_mw[i, l] = stats.mannwhitneyu(groups_list[0], groups_list[l+1],
alternative='two-sided')
# remove extra recordings
p_kw = p_kw[keep_cells]
p_mw = p_mw[keep_cells, :]
# each cells' average frequency of CS during theta, LIA, and no state
# Version: theta and LIA separate
# prep numbers
# only take first recording from each cell
S = np.full([len(data), len(ntl)], np.nan)
for i in np.arange(len(data)):
for l in np.arange(len(ntl)):
num_CS = np.sum([data[i][ntl[l]+'_CS_bool']])
total_time = np.sum(data[i][ntl[l]+'_stop'] - data[i][ntl[l]+'_start'])
S[i, l] = num_CS/total_time
# remove extra recordings from cells
S = S[keep_cells, :]
# plot the stack plot for cell values for each state
fig, ax = plt.subplots(1, figsize=[2.3, 2])
line_x = np.array([1.75, 3.25])
bar_x = np.array([1, 4])
y = S[:, d_l]
for i in np.arange(y.shape[0]):
ax.plot(line_x, y[i, :], color=c_lgry, zorder=1)
if d_l == [0, 1]:
if theta_cell_p[i] < 0.05:
ax.plot(line_x, y[i, :], color=rgb2hex(c_hyp), zorder=2)
if theta_cell_p[i] > 0.95:
ax.plot(line_x, y[i, :], color=rgb2hex(c_dep), zorder=2)
elif d_l == [0, 2]:
if LIA_cell_p[i] < 0.05:
ax.plot(line_x, y[i, :], color=rgb2hex(c_hyp), zorder=2)
if LIA_cell_p[i] > 0.95:
ax.plot(line_x, y[i, :], color=rgb2hex(c_dep), zorder=2)
for l in np.arange(y.shape[1]):
# remove nans
no_nan = y[:, l]
no_nan = no_nan[~np.isnan(no_nan)]
bp = ax.boxplot(no_nan, sym='', patch_artist=True,
whis=[5, 95], widths=0.75, positions=[bar_x[l]])
for element in ['boxes', 'whiskers', 'fliers', 'means', 'medians', 'caps']:
plt.setp(bp[element], color=c_state[d_l[l]], linewidth=1.5)
for patch in bp['boxes']:
patch.set(facecolor=c_wht)
ax.set_xticks(bar_x)
ax.xaxis.set_tick_params(length=0)
ax.set_xticklabels(['unlabeled', 'theta'])
ax.set_yticks([0, 0.2, 0.4, 0.6, 0.8])
ax.set_yticklabels([0, '', 0.4, '', 0.8])
ax.set_ylabel('Cs rate (Hz)')
ax.set_xlim([0, bar_x[1]+1])
ax.spines['bottom'].set_visible(False)
fig.tight_layout()
plt.savefig(os.path.join(fig_folder, ntl[d_l[-1]]+'_CS_rate.png'), transparent=True)
# do the paired boot stats
num_b = 1000
p = np.full(len(ntl) - 1, np.nan)
d = np.full(len(ntl) - 1, np.nan)
for l in np.arange(len(ntl) - 1):
dif = S[:, l+1] - S[:, 0]
# remove nans
dif = dif[~np.isnan(dif)]
d[l], p[l] = boot_pair_t(dif, num_b)
print(d)
print(p)
# do the paired boot stats for theta hyp and LIA dep cells only
num_b = 1000
# theta hyp cells
dif = S[:, 1][theta_cell_p < 0.05] - S[:, 0][theta_cell_p < 0.05]
# remove nans
dif = dif[~np.isnan(dif)]
d, p = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# LIA dep cells
dif = S[:, 2][LIA_cell_p > 0.95] - S[:, 0][LIA_cell_p > 0.95]
# remove nans
dif = dif[~np.isnan(dif)]
d, p = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# descriptive numbers
l = 1
np.sum(~np.isnan(S[:, l]))
np.nanmedian(S[:, l])
np.nanstd(S[:, l])
MADAM(S[:, l], np.nanmedian(S[:, l]))
# find which cells have a significant change - nonparametric stats
p_kw = np.full(len(data), np.nan)
p_mw = np.full([len(data), len(states)], np.nan)
num_b = 1000
for i in np.arange(len(data)):
groups_list = [data[i]['nost_CS_perc'], data[i]['theta_CS_perc'],
data[i]['LIA_CS_perc']]
# do the kruskall-wallace if not all the CS_perc values are nan
if ~np.all(np.isnan(np.concatenate(groups_list))):
try:
H, p_kw[i] = stats.kruskal(groups_list[0], groups_list[1], groups_list[2],
nan_policy='omit')
except ValueError:
p_kw[i] = np.nan
# if the anova is significant, do the adhoc stats
if p_kw[i] < 0.05:
for l in np.arange(len(states)):
# remove nans before running the test
g0 = groups_list[0]
g0 = g0[~np.isnan(g0)]
g1 = groups_list[l+1]
g1 = g1[~np.isnan(g1)]
U, p_mw[i, l] = stats.mannwhitneyu(g0, g1,
alternative='two-sided')
# remove extra recordings
p_kw = p_kw[keep_cells]
p_mw = p_mw[keep_cells, :]
# %% CS index
# each cells' CS index during theta, LIA, and no state
# Version: theta and LIA separate
# prep numbers
# only take first recording from each cell
S = np.full([len(data), len(ntl)], np.nan)
for i in np.arange(len(data)):
for l in np.arange(len(ntl)):
num_CS_spikes = np.sum(np.array([c.size for c in data[i]['CS_ind']])[data[i][ntl[l]+'_CS_bool']])
total_spikes = np.sum([data[i][ntl[l]+'_spike_bool']])
CS_perc = num_CS_spikes/total_spikes
if CS_perc > 1:
CS_perc = 1
S[i, l] = CS_perc
# remove extra recordings from cells
S = S[keep_cells, :]
# plot the stack plot for cell values for each state
fig, ax = plt.subplots(1, figsize=[2.3, 2])
line_x = np.array([1.75, 3.25])
bar_x = np.array([1, 4])
y = S[:, d_l]
for i in np.arange(y.shape[0]):
ax.plot(line_x, y[i, :], color=c_lgry, zorder=1)
if d_l == [0, 1]:
if theta_cell_p[i] < 0.05:
ax.plot(line_x, y[i, :], color=rgb2hex(c_hyp), zorder=2)
if theta_cell_p[i] > 0.95:
ax.plot(line_x, y[i, :], color=rgb2hex(c_dep), zorder=2)
elif d_l == [0, 2]:
if LIA_cell_p[i] < 0.05:
ax.plot(line_x, y[i, :], color=rgb2hex(c_hyp), zorder=2)
if LIA_cell_p[i] > 0.95:
ax.plot(line_x, y[i, :], color=rgb2hex(c_dep), zorder=2)
for l in np.arange(y.shape[1]):
# remove nans
no_nan = y[:, l]
no_nan = no_nan[~np.isnan(no_nan)]
bp = ax.boxplot(no_nan, sym='', patch_artist=True,
whis=[5, 95], widths=0.75, positions=[bar_x[l]])
for element in ['boxes', 'whiskers', 'fliers', 'means', 'medians', 'caps']:
plt.setp(bp[element], color=c_state[d_l[l]], linewidth=1.5)
for patch in bp['boxes']:
patch.set(facecolor=c_wht)
ax.set_xticks(bar_x)
ax.xaxis.set_tick_params(length=0)
ax.set_xticklabels(['unlabeled', 'theta'])
ax.set_yticks([0, 0.25, 0.5, 0.75, 1])
ax.set_yticklabels([0, '', 0.5, '', 1])
ax.set_ylabel('Cs index')
ax.set_xlim([0, bar_x[1]+1])
ax.spines['bottom'].set_visible(False)
fig.tight_layout()
plt.savefig(os.path.join(fig_folder, ntl[d_l[-1]]+'_CS_index.png'), transparent=True)
# do the paired boot stats
num_b = 1000
p = np.full(len(ntl) - 1, np.nan)
d = np.full(len(ntl) - 1, np.nan)
for l in np.arange(len(ntl) - 1):
dif = S[:, l+1] - S[:, 0]
# remove nans
dif = dif[~np.isnan(dif)]
d[l], p[l] = boot_pair_t(dif, num_b)
print(d)
print(p)
# do the paired boot stats for theta hyp and LIA dep cells only
num_b = 1000
# theta hyp cells
dif = S[:, 1][theta_cell_p < 0.05] - S[:, 0][theta_cell_p < 0.05]
# remove nans
dif = dif[~np.isnan(dif)]
d, p = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# LIA dep cells
dif = S[:, 2][LIA_cell_p > 0.95] - S[:, 0][LIA_cell_p > 0.95]
# remove nans
dif = dif[~np.isnan(dif)]
d, p = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# descriptive numbers
l = 1
np.sum(~np.isnan(S[:, l]))
np.nanmedian(S[:, l])
np.nanstd(S[:, l])
MADAM(S[:, l], np.nanmedian(S[:, l]))
# %% make figures - dCSI vs dVm
l = 0
state='theta'
# Event-based correlation between dVm and change in CS index
unique_cells = [isinstance(d['cell_id'], int) for d in data]
fig, ax = plt.subplots(1, figsize=[2.25, 2.25])
n = 0
for i in np.arange(len(data)):
for j in np.arange(data[i][state+'_start'].size):
x = data[i][state+'_dVm'][j]
z = data[i][state+'_dVm_p'][j]
# calculate the CS index in the before window
CS_bef = np.logical_and(data[i][state+'_CS_spikes'][j] > states[l]['bef'],
data[i][state+'_CS_spikes'][j] < states[l]['bef'] + states[l]['samp_time'])
CS_bef = np.sum(CS_bef)
nonCS_bef = np.logical_and(data[i][state+'_nonCS_spikes'][j] > states[l]['bef'],
data[i][state+'_nonCS_spikes'][j] < states[l]['bef'] + states[l]['samp_time'])
nonCS_bef = np.sum(nonCS_bef)
CSindex_bef = CS_bef/(CS_bef+nonCS_bef)
# calculate the CS index in the after window
CS_aft = np.logical_and(data[i][state+'_CS_spikes'][j] > states[l]['aft'],
data[i][state+'_CS_spikes'][j] < states[l]['aft'] + states[l]['samp_time'])
CS_aft = np.sum(CS_aft)
nonCS_aft = np.logical_and(data[i][state+'_nonCS_spikes'][j] > states[l]['aft'],
data[i][state+'_nonCS_spikes'][j] < states[l]['aft'] + states[l]['samp_time'])
nonCS_aft = np.sum(nonCS_aft)
CSindex_aft = CS_aft/(CS_aft+nonCS_aft)
if np.logical_and(CS_bef+nonCS_bef == 0, CS_aft+nonCS_aft == 0):
y = np.nan
else:
y = CSindex_aft-CSindex_bef
if np.isnan(y) == False:
n = n+1
if z > 0.05:
ax.scatter(x, y, s=5, facecolors='none', edgecolors=c_mgry, alpha=1, zorder=1)
elif x < 0:
ax.scatter(x, y, s=5, facecolors=c_lhyp, edgecolors=c_lhyp, alpha=1, zorder=2)
elif x > 0:
ax.scatter(x, y, s=5, facecolors=c_ldep, edgecolors=c_ldep, alpha=1, zorder=2)
ax.axhline(0, linestyle='--', color=c_blk, zorder=1)
ax.axvline(0, linestyle='--', color=c_blk, zorder=1)
ax.set_ylim([-1.1, 1.1])
ax.set_xlim([-18, 18])
# cell-based dVm vs change in CS index
# prep numbers for dVm
all_dVm = np.array([d[state+'_mean_dVm'] for d in data])[[isinstance(d['cell_id'], int) for d in data]]
all_cell_p = np.array([d[state+'_cell_p'] for d in data])[[isinstance(d['cell_id'], int) for d in data]]
keep_cells = np.logical_or(np.isnan(all_dVm), np.isnan(all_cell_p))==0
all_dVm = all_dVm[keep_cells]
all_cell_p = all_cell_p[keep_cells]
cell_hyp_sig = all_dVm[all_cell_p < 0.05]
cell_hyp_no = all_dVm[(all_dVm < 0) & (all_cell_p >= 0.05)]
cell_dep_sig = all_dVm[all_cell_p > 0.95]
cell_dep_no = all_dVm[(all_dVm > 0) & (all_cell_p <= 0.95)]
# prep number for CS index
dCSI = np.full(len(data), np.nan)
for i in np.arange(len(data)):
dCSI_cell = np.full(data[i][state+'_start'].size, np.nan)
for j in np.arange(data[i][state+'_start'].size):
# calculate the CS index in the before window
CS_bef = np.logical_and(data[i][state+'_CS_spikes'][j] > states[l]['bef'],
data[i][state+'_CS_spikes'][j] < states[l]['bef'] + states[l]['samp_time'])
CS_bef = np.sum(CS_bef)
nonCS_bef = np.logical_and(data[i][state+'_nonCS_spikes'][j] > states[l]['bef'],
data[i][state+'_nonCS_spikes'][j] < states[l]['bef'] + states[l]['samp_time'])
nonCS_bef = np.sum(nonCS_bef)
CSindex_bef = CS_bef/(CS_bef+nonCS_bef)
# calculate the CS index in the after window
CS_aft = np.logical_and(data[i][state+'_CS_spikes'][j] > states[l]['aft'],
data[i][state+'_CS_spikes'][j] < states[l]['aft'] + states[l]['samp_time'])
CS_aft = np.sum(CS_aft)
nonCS_aft = np.logical_and(data[i][state+'_nonCS_spikes'][j] > states[l]['aft'],
data[i][state+'_nonCS_spikes'][j] < states[l]['aft'] + states[l]['samp_time'])
nonCS_aft = np.sum(nonCS_aft)
CSindex_aft = CS_aft/(CS_aft+nonCS_aft)
if np.logical_and(CS_bef+nonCS_bef == 0, CS_aft+nonCS_aft == 0):
dCSI_cell[j] = np.nan
else:
dCSI_cell[j] = CSindex_aft-CSindex_bef
dCSI[i] = np.nanmean(dCSI_cell)
dCSI = dCSI[unique_cells]
dCSI = dCSI[keep_cells]
cell_hyp_sig_dCSI = dCSI[all_cell_p < 0.05]
cell_hyp_no_dCSI = dCSI[(all_dVm < 0) & (all_cell_p >= 0.05)]
cell_dep_sig_dCSI = dCSI[all_cell_p > 0.95]
cell_dep_no_dCSI = dCSI[(all_dVm > 0) & (all_cell_p <= 0.95)]
# add the cell dots on top
s_cell = 20
ax.scatter(cell_hyp_sig, cell_hyp_sig_dCSI, s=s_cell, facecolors=c_hyp,
edgecolors=c_blk, zorder=3, alpha=1)
ax.scatter(cell_hyp_no, cell_hyp_no_dCSI, s=s_cell, facecolors='none',
edgecolors=c_blk, zorder=3, alpha=1)
ax.scatter(cell_dep_sig, cell_dep_sig_dCSI, s=s_cell, facecolors=rgb2hex(c_dep),
edgecolors=rgb2hex(c_blk), zorder=3, alpha=1)
ax.scatter(cell_dep_no, cell_dep_no_dCSI, s=s_cell, facecolors='none',
edgecolors=c_blk, zorder=3, alpha=1)
ax.set_xlabel(r'$\Delta$'+' Vm (mV)')
ax.set_ylabel(r'$\Delta$'+' CS index')
fig.tight_layout()
plt.savefig(os.path.join(fig_folder, 'dCSI_vs_dVm_'+state+'.png'), transparent=True)
| Ashkees/Malezieux_CellRep_2020 | figure_scripts/Malezieux_CellRep_FigS3.py | Malezieux_CellRep_FigS3.py | py | 63,803 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "numpy.concatenate",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.nanmedian",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line... |
28965388899 | from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.remote.webelement import WebElement
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import TimeoutException
import time
import json
import os
from course import Course
# Web Driver configuration
PATH = "C:\Program Files (x86)\chromedriver.exe"
driver = webdriver.Chrome(PATH)
coursesList = []
# getting pages URLs
f = open("URL.txt", "r")
URLs = []
for x in f:
URLs.append(x)
f.close()
# searching through each page from file and through each subpage (< 1 2 3 ... 7 >)
for URL in URLs:
emptyPage = False # means that the page number is out of range and there is no more content on this page
subpageCounter = 1
while not emptyPage:
print(URL+'&p='+str(subpageCounter))
driver.get(URL+'&p='+str(subpageCounter))
subpageCounter += 1
try: # element with this class name is a big container for all smaller divs. If it is not present then there is no content on the page
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CLASS_NAME, 'course-list--container--3zXPS')))
container = driver.find_element_by_class_name('course-list--container--3zXPS')
coursesBiggerDivs = container.find_elements_by_class_name('browse-course-card--link--3KIkQ')
courses = container.find_elements_by_class_name('course-card--container--3w8Zm')
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
counter = 0
for course in courses: # each course we convert into an object of 'Course' class (data extraction)
title = course.find_element_by_class_name('udlite-heading-md').text
desc = course.find_element_by_class_name('udlite-text-sm').text
author = course.find_element_by_class_name('udlite-text-xs').text
try:
spanElement = course.find_element_by_css_selector('span.star-rating--rating-number--3lVe8')
except NoSuchElementException:
ratings = 'Brak ocen'
else:
ratings = spanElement.text
try:
details = course.find_elements_by_css_selector('span.course-card--row--1OMjg')
courseLength = details[0].text
courseLevel = details[len(details)-1].text
except NoSuchElementException:
print("Brak dodatkowych informacji")
courseLength = 'Brak informacji'
courseLevel = 'Brak informacji'
try:
image = course.find_element_by_class_name('course-card--course-image--2sjYP')
ActionChains(driver).move_to_element(image).perform()
imageSourceURL = image.get_attribute('src')
except NoSuchElementException:
print("Brak zdjęcia")
imageSourceURL = 'https://www.google.com/url?sa=i&url=https%3A%2F%2Fwww.smarthome.com.au%2Faeotec-z-wave-plug-in-smart-switch-6.html&psig=AOvVaw33Vx1wP6a3B3QAn_6WPe4A&ust=1602514347326000&source=images&cd=vfe&ved=0CAIQjRxqFwoTCNitsanlrOwCFQAAAAAdAAAAABAE'
try:
priceDiv = course.find_element_by_css_selector('div.price-text--price-part--Tu6MH')
ActionChains(driver).move_to_element(priceDiv).perform()
spans = priceDiv.find_elements_by_tag_name('span')
price = spans[len(spans) - 1].text
except NoSuchElementException:
price = 'Brak ceny'
try:
courseLink = coursesBiggerDivs[counter].get_attribute('href')
except NoSuchElementException:
courseLink = None
counter += 1
c = Course(title, desc, author, ratings, price, imageSourceURL, courseLength, courseLevel, courseLink)
coursesList.append(c)
except TimeoutException:
print('[INFO] Ostatnia podstrona adresu URL')
emptyPage = True
os.remove('objectsInJSON.txt')
for course in coursesList: #search through each course page and get some more specific information
driver.get(course.URL)
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CLASS_NAME, 'topic-menu')))
topicDiv = driver.find_element_by_class_name('topic-menu')
elements = topicDiv.find_elements_by_class_name('udlite-heading-sm')
course.setCategory(elements[0].text)
course.setSubcategory(elements[1].text)
courseDescription = driver.find_element_by_class_name('styles--description--3y4KY')
course.setExtendedDescription(courseDescription.get_attribute('innerHTML'))
# write converted course object into output file
string = course.makeJSON()
with open('objectsInJSON.txt','a',encoding='utf-8') as file:
json.dump(string, file, ensure_ascii=False)
file.write("\n")
driver.quit()
file.close()
| krzysztofzajaczkowski/newdemy | utils/WebCrawler/main.py | main.py | py | 5,352 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.support.ui.WebDriverWait",
"line_number": 37,
"usage_type": "call"
},
... |
21881174301 | from selenium import webdriver
from selenium.webdriver.common.by import By
import time
import os
try:
link = "http://suninjuly.github.io/file_input.html"
browser = webdriver.Chrome()
browser.get(link)
elements = browser.find_elements(By.CSS_SELECTOR, ".form-control")
for element in elements:
if element.get_attribute('required') != None:
element.send_keys("Мой ответ")
print(os.getcwd())
current_dir = os.path.realpath(os.path.dirname(__file__))
file_path = os.path.join(current_dir, 'file.txt')
print(file_path)
element = browser.find_element(By.CSS_SELECTOR, "#file")
element.send_keys(file_path)
button = browser.find_element(By.CSS_SELECTOR, "button.btn")
button.click()
finally:
# ожидание чтобы визуально оценить результаты прохождения скрипта
time.sleep(10)
# закрываем браузер после всех манипуляций
browser.quit()
| Mayurityan/stepik_auto_tests_course | lesson 2.2 send file form.py | lesson 2.2 send file form.py | py | 1,034 | python | ru | code | 0 | github-code | 6 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR",
"line_number": 12,
"usage_type": "attribute"
... |
26257812486 | # Imports
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
import users
from datetime import datetime
"""
Модуль для поиска атлетов по парамтрам пользователя
"""
# Variables
Base = declarative_base()
# Class definitions
class Athlette(Base):
__tablename__ = "Athelete"
id = sa.Column(sa.INTEGER, primary_key=True)
age = sa.Column(sa.INTEGER)
birthdate = sa.Column(sa.TEXT)
gender = sa.Column(sa.TEXT)
height = sa.Column(sa.REAL)
name = sa.Column(sa.TEXT)
weight = sa.Column(sa.INTEGER)
gold_medals = sa.Column(sa.INTEGER)
silver_medals = sa.Column(sa.INTEGER)
bronze_medals = sa.Column(sa.INTEGER)
total_medals = sa.Column(sa.INTEGER)
sport = sa.Column(sa.TEXT)
country = sa.Column(sa.TEXT)
# Function definitions
def search_id(id, session):
query_str = session.query(Athlette).filter(Athlette.id == id).first()
usr = f"{query_str}"
return usr
def height_compare(id, session, bcolors):
"""
Сравнение роста атлетов с пользовательским
"""
# берем из базы рост пользователя
usr_query = session.query(users.User).filter(users.User.id == id).first()
usr_height = usr_query.height
# ищем атлетов по росту пользователя
ath_query = session.query(Athlette).filter(
Athlette.height == usr_height)
ath_count = ath_query.count()
ath_found = ath_query.all()
# выводим содержимое объектов Athlete, если найдены
res = ""
if ath_found:
for ath in ath_found:
res += f" {ath.name}, {ath.sport} \n"
res = f"{res}\n Всего атлетов с ростом {ath.height} метра: {ath_count}"
else:
print(bcolors.FAIL +
f"\nERROR: Атлет с ростом {usr_height}m не найден" + bcolors.ENDC)
return res
def bday_compare(id, session):
"""
Ищем атлета, наиболее близкого по дате рождения к пользователю
"""
dt_format = '%Y-%m-%d'
usr_query = session.query(users.User).filter(users.User.id == id).first()
user_bday_str = usr_query.birthdate
user_bday_dt_obj = datetime.strptime(user_bday_str, dt_format)
ath_query_all_obj = session.query(Athlette).all()
ath_bday_all_dt_list = list()
for ath in ath_query_all_obj:
ath_bday_all_dt_list.append(
datetime.strptime(ath.birthdate, dt_format))
closest_bday_dt_obj = ath_bday_all_dt_list[min(range(len(ath_bday_all_dt_list)),
key=lambda i: abs(ath_bday_all_dt_list[i]-user_bday_dt_obj))]
# выбираем всех атлетов по самой ближней дате рождения
closest_bday_str = closest_bday_dt_obj.strftime(dt_format)
ath_query_bday_query = session.query(Athlette).filter(
Athlette.birthdate == closest_bday_str)
# берем из базы данные и считаем
ath_bday_obj = ath_query_bday_query.all()
ath_bday_count = ath_query_bday_query.count()
# формируем возврат
res = ""
for ath in ath_bday_obj:
res = f"{res}\n {ath.name}, д.р.: {ath.birthdate}, {ath.sport}"
return res
if __name__ == "__main__":
print("ERROR: Запуск скрипта через выполнение модуля start.py \n")
# DEBUG
# print('Info: Module find_athlete.py - imported')
| vsixtynine/sf-sql-task | find_athlete.py | find_athlete.py | py | 3,609 | python | ru | code | 0 | github-code | 6 | [
{
"api_name": "sqlalchemy.ext.declarative.declarative_base",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.INTEGER",
"line_number": 18,
"usage_type": "attribute"
},
{
... |
25178584436 | from summarize import *
from rbm_dae.deepAE import *
import rouge
def summarize_sentence_vectors(df, vector_set):
"""
Function applying the summarization function to get the ranked sentences.
Parameters:
df: dataframe containing the data to summarize
vector_set: the column name of the vector set to rank on
Returns the ranked sentences
"""
print('summarizing sentence vectors..')
sentence_vectors = df[vector_set].tolist()
sentence_vectors = np.array(sentence_vectors)
#Create a list of ranked sentences.
ranked_sentences = summarize_emails(df, sentence_vectors[0])
# display_summary(df, ranked_sentences)
return ranked_sentences
def summarize_autoencoder_vectors(df, net, vector_set):
"""
Function applying the autoencoder to the df vectors and the applying the summarization function to get the ranked sentences.
Parameters:
df: dataframe containing the data to summarize
net: trained autoencoder
vector_set: the column name of the vector set to rank on
Returns the ranked sentences
"""
print('summarizing autoencoder sentence vectors..')
sentence_vectors = df[vector_set].tolist()
torch_vectors = torch.tensor(sentence_vectors[0], dtype=torch.float32)
output_vectors = net(torch_vectors)
#Create a list of ranked sentences.
ranked_sentences = summarize_emails(df, output_vectors, True)
# display_summary(df, ranked_sentences)
return ranked_sentences
def evaluate_rankings(df_train, df_test, target, sum_lens, corpus_ae=True, vector_set='sentence_vectors'):
"""
Funtion to evaluate the returned summaries. the summaries are created baased on the raw sentence vectors and the autoencoder vectors
Parameters:
df_train: dataframe with the training data
df_test: dataframe with the test data
target: string containing the column that should be used as the summary reference
sum_len: An array holding the number of sentences to include in the summary
corpus_as: Boolean deciding wether to train the autoencoder on the entire corpus or on each document
vector_set: column name of the column with the sentence vectors (can be glove vectors or tf vectors)
Returns: the scores for the rouge parameters (3D matrix)
"""
evaluator = rouge.Rouge()
#create and train the autoencoder (see autoencoder module)
net = None
if corpus_ae:
net = train_autoencoder(df_train, vector_set)
# loop through all docs in the corpus
print('evaluating summaries..')
df_len = int(df_test.shape[0])
sum_scores = np.zeros((len(sum_lens), 3, 3, df_len))
ae_sum_scores = np.zeros((len(sum_lens), 3, 3, df_len))
curr_row = 0
for index, row in df_test.iterrows():
print('iteration: ', index)
df_c = pd.DataFrame([row])
df_c['body'].iloc[0]
# Only proceed if the vectors of the current row are of correct dimensions (not [])
if len(df_c[vector_set].tolist()[0]) > 0:
# train AE on the current document only
if not corpus_ae :
net = train_autoencoder(df_c, vector_set)
reference = df_c[target].iloc[0] # reference that we score against (could be summary or subject)!
print('reference: ', reference)
# get the ranked sentences for the original and the ae modified sentence vectors
ranked_sentences = summarize_sentence_vectors(df_c, vector_set)
ranked_ae_sentences = summarize_autoencoder_vectors(df_c, net, vector_set)
# collecting the scores for the specified summary lengths
for s_len in sum_lens:
print('s_len: ', s_len)
if len(ranked_sentences) >= s_len:
# get the top ranked sentences
sum = []
sum_ae = []
for i in range(s_len):
sum.append(ranked_sentences[i][2])
sum_ae.append(ranked_ae_sentences[i][2])
sum_str = ' '.join(sum)
sum_ae_str = ' '.join(sum_ae)
print('summary: ', sum_str)
print('ae summary: ', sum_ae_str)
# get the ROUGE scores for the ranked sentences and add to plot data
sum_score = evaluator.get_scores(sum_str, reference)
sum_ae_score = evaluator.get_scores(sum_ae_str, reference)
sum_scores[s_len-1, 0, 0, curr_row] = sum_score[0]['rouge-1']['f']
sum_scores[s_len-1, 0, 1, curr_row] = sum_score[0]['rouge-1']['p']
sum_scores[s_len-1, 0, 2, curr_row] = sum_score[0]['rouge-1']['r']
sum_scores[s_len-1, 1, 0, curr_row] = sum_score[0]['rouge-2']['f']
sum_scores[s_len-1, 1, 1, curr_row] = sum_score[0]['rouge-2']['p']
sum_scores[s_len-1, 1, 2, curr_row] = sum_score[0]['rouge-2']['r']
sum_scores[s_len-1, 2, 0, curr_row] = sum_score[0]['rouge-l']['f']
sum_scores[s_len-1, 2, 1, curr_row] = sum_score[0]['rouge-l']['p']
sum_scores[s_len-1, 2, 2, curr_row] = sum_score[0]['rouge-l']['r']
ae_sum_scores[s_len-1, 0, 0, curr_row] = sum_ae_score[0]['rouge-1']['f']
ae_sum_scores[s_len-1, 0, 1, curr_row] = sum_ae_score[0]['rouge-1']['p']
ae_sum_scores[s_len-1, 0, 2, curr_row] = sum_ae_score[0]['rouge-1']['r']
ae_sum_scores[s_len-1, 1, 0, curr_row] = sum_ae_score[0]['rouge-2']['f']
ae_sum_scores[s_len-1, 1, 1, curr_row] = sum_ae_score[0]['rouge-2']['p']
ae_sum_scores[s_len-1, 1, 2, curr_row] = sum_ae_score[0]['rouge-2']['r']
ae_sum_scores[s_len-1, 2, 0, curr_row] = sum_ae_score[0]['rouge-l']['f']
ae_sum_scores[s_len-1, 2, 1, curr_row] = sum_ae_score[0]['rouge-l']['p']
ae_sum_scores[s_len-1, 2, 2, curr_row] = sum_ae_score[0]['rouge-l']['r']
curr_row += 1
sum_scores = sum_scores[:, :, :, 0:curr_row]
ae_sum_scores = ae_sum_scores[:, :, :, 0:curr_row]
return sum_scores, ae_sum_scores
# calculating averages
def analyze_and_plot_rouge_scores(sum_scores, ae_sum_scores, metric, dataset_name, summary_len):
avg_scores = np.mean(sum_scores)
avg_scores_ae = np.mean(ae_sum_scores)
print(dataset_name)
print('Summary length: ', summary_len)
raw_mean = 'Mean ' + metric + ' for raw vectors: ' + str(round(avg_scores, 3))
dae_mean = 'Mean ' + metric + ' for DAE vectors: ' + str(round(avg_scores_ae, 3))
print(raw_mean)
print(dae_mean)
# Add to plot graphs for the extracted sentences
"""
x = np.arange(len(sum_scores)).tolist()
label_1 = "Raw " + metric
label_2 = "AE vector " + metric
plt.plot(x, sum_scores.tolist(), label = label_1)
plt.plot(x, ae_sum_scores.tolist(), label = label_2)
plt.xlabel('Sentence')
plt.ylabel('ROUGE score')
title = "ROUGE " +metric + " for raw (mean: " + str(round(avg_scores, 3)) +") and AE (mean: "+str(round(avg_scores_ae, 3)) +") for " + dataset_name
plt.title(title)
plt.legend()
plt.show()
"""
def evaluate_bc3():
"""
Base function to run and plot the ROUGE scores for the bc3 dataset
"""
BC3_PICKLE_LOC = "./final_data/BC3_127.pkl"
BC3_df = pd.read_pickle(BC3_PICKLE_LOC)
# df contains 127 rows that all have df_vectors representation!
# Split into training and test set
BC3_df_train = BC3_df.iloc[:117]
BC3_df_test = BC3_df.iloc[117:]
# evaluate on 'summary' or 'subject'
target = 'summary'
summary_len = [1]
# can set to use the df vectors ('df_vectors') or the glove vectors ('sentence_vectors')
corpus_ae = True
vector_set = 'sentence_vectors' #df_vectors
sum_scores, ae_sum_scores = evaluate_rankings(BC3_df_train, BC3_df_test, target, summary_len, corpus_ae, vector_set)
plot_all_scores(sum_scores, ae_sum_scores, 'bc3 dataset', summary_len[0])
def evaluate_spotify():
"""
Base function to run and plot the ROUGE scores for the spotify dataset
"""
SPOTIFY_PICKLE_TRAIN_LOC = "./final_data/spotify_train_422.pkl"
SPOTIFY_PICKLE_TEST_LOC = "./final_data/spotify_test_45.pkl"
df_train = pd.read_pickle(SPOTIFY_PICKLE_TRAIN_LOC)
df_test = pd.read_pickle(SPOTIFY_PICKLE_TEST_LOC)
# section to get the summary for a specidic episode
# df_sent = df_train.loc[df_train['episode_id'] == '7DoDuJE4sCBu2jJlOgCrwA']
# df_test = df_sent
target = 'episode_desc'
summary_len = [1]
corpus_ae = True # if false, the autoencoder is only trained on the sentences in the current document
# can set to use the df vectors (t-idf) ('df_vectors') or the glove vectors ('sentence_vectors')
vector_set = 'sentence_vectors'
sum_scores, ae_sum_scores = evaluate_rankings(df_train, df_test, target, summary_len, corpus_ae, vector_set)
plot_all_scores(sum_scores, ae_sum_scores, 'spotify dataset', summary_len[0])
def plot_all_scores(sum_scores, ae_sum_scores, dataset, summary_len):
"""
Base function to plot ROUGE scores.
Parameters:
- sum_scores: Matirx of scores for the raw vectors.
- as_sum_scores: Matrix of scores for the vectors produced by autoencoder
"""
analyze_and_plot_rouge_scores(sum_scores[0][0][0], ae_sum_scores[0][0][0], 'rouge-1 f-score', dataset, summary_len)
analyze_and_plot_rouge_scores(sum_scores[0][0][1], ae_sum_scores[0][0][1], 'rouge-1 precision', dataset, summary_len)
analyze_and_plot_rouge_scores(sum_scores[0][0][2], ae_sum_scores[0][0][2], 'rouge-1 recall', dataset, summary_len)
# plot rouge-2 scores:
analyze_and_plot_rouge_scores(sum_scores[0][1][0], ae_sum_scores[0][1][0], 'rouge-2 f-score', dataset, summary_len)
analyze_and_plot_rouge_scores(sum_scores[0][1][1], ae_sum_scores[0][1][1], 'rouge-2 precision', dataset, summary_len)
analyze_and_plot_rouge_scores(sum_scores[0][1][2], ae_sum_scores[0][1][2], 'rouge-2 recall', dataset, summary_len)
# plot rouge-l scores:
analyze_and_plot_rouge_scores(sum_scores[0][2][0], ae_sum_scores[0][2][0], 'rouge-l f-score', dataset, summary_len)
analyze_and_plot_rouge_scores(sum_scores[0][2][1], ae_sum_scores[0][2][1], 'rouge-l precision', dataset, summary_len)
analyze_and_plot_rouge_scores(sum_scores[0][2][2], ae_sum_scores[0][2][2], 'rouge-l recall', dataset, summary_len)
def get_mean(sum_scores, ae_sum_scores):
"""
Function to get the mean of a vector of scores.
"""
avg_scores = np.mean(sum_scores)
avg_scores_ae = np.mean(ae_sum_scores)
return avg_scores, avg_scores_ae
def evaluate_sentence_length_performance(df_train, df_test, target, summary_len, corpus_ae, vector_set, dataset):
"""
Function to cumpute the rouge scores for a range of summary lengths.
"""
averages_p = np.zeros((summary_len, 2))
averages_ae_p = np.zeros((summary_len, 2))
averages_r = np.zeros((summary_len, 2))
averages_ae_r = np.zeros((summary_len, 2))
summary_lengths = [1, 2, 3, 4, 5, 6]
sum_scores, ae_sum_scores = evaluate_rankings(df_train, df_test, target, summary_lengths, corpus_ae, vector_set)
for i in range(1, summary_len):
print('evaluating rankings for # sentences: ', i)
for j in range(2): # for rouge-1 and rouge-2
avg_score_p, avg_score_ae_p = get_mean(sum_scores[i-1][j][1], ae_sum_scores[i-1][j][1])
avg_score_r, avg_score_ae_r = get_mean(sum_scores[i-1][j][2], ae_sum_scores[i-1][j][2])
averages_p[i, j] = avg_score_p
averages_ae_p[i, j] = avg_score_ae_p
averages_r[i, j] = avg_score_r
averages_ae_r[i, j] = avg_score_ae_r
print('averages: ', averages_p)
print('averages ae: ', averages_ae_p)
averages_p = averages_p[1:].transpose()
averages_ae_p = averages_ae_p[1:].transpose()
averages_r = averages_r[1:].transpose()
averages_ae_r = averages_ae_r[1:].transpose()
return averages_p, averages_ae_p, averages_r, averages_ae_r
def plot_sentences(glove_averages, glove_averages_ae, df_averages, df_averages_ae, title, dataset):
"""
Function to plot the mean scores vs sentence lengths for the different sentence encodings
"""
x = np.arange(1,7).tolist()
plt.plot(x, glove_averages.tolist(), label = "Glove vector")
plt.plot(x, glove_averages_ae.tolist(), label = "Glove DAE vector")
plt.plot(x, df_averages.tolist(), label = 'tf-idf vector')
plt.plot(x, df_averages_ae.tolist(), label = 'tf-idf DAE vector')
plt.xlabel('Number of sentences')
plt.ylabel(title)
t = title + ' for ' + dataset
plt.title(t)
plt.legend()
plt.show()
def run_sentence_length_evaluation():
"""
Main function to compute the mean scores for each summary length for the two datasets.
"""
BC3_PICKLE_LOC = "./final_data/BC3_127.pkl"
BC3_df = pd.read_pickle(BC3_PICKLE_LOC)
# df contains 127 rows that all have df_vectors representation!
# Split into training and test set
bc3_df_train = BC3_df.iloc[:117]
bc3_df_test = BC3_df.iloc[117:]
bc3_target = 'summary'
SPOTIFY_PICKLE_TRAIN_LOC = "./final_data/spotify_train_422.pkl"
SPOTIFY_PICKLE_TEST_LOC = "./final_data/spotify_test_45.pkl"
s_df_train = pd.read_pickle(SPOTIFY_PICKLE_TRAIN_LOC)
s_df_test = pd.read_pickle(SPOTIFY_PICKLE_TEST_LOC)
s_target = 'episode_desc'
summary_len = 7
corpus_ae = True
vector_set = 'sentence_vectors'
df_vector_set = 'df_vectors'
# metric = 0 # 0 = f-score, 1 = precision, 2 = recall
bc3_glove_p, bc3_glove_ae_p, bc3_glove_r, bc3_glove_ae_r = evaluate_sentence_length_performance(bc3_df_train, bc3_df_test, bc3_target, summary_len, corpus_ae, vector_set, 'bc3 dataset')
bc3_df_p, bc3_df_ae_p, bc3_df_r, bc3_df_ae_r = evaluate_sentence_length_performance(bc3_df_train, bc3_df_test, bc3_target, summary_len, corpus_ae, df_vector_set, 'bc3 dataset')
plot_sentences(bc3_glove_p[0], bc3_glove_ae_p[0], bc3_df_p[0], bc3_df_ae_p[0], 'ROUGE-1 scores precision', 'BC3 dataset')
plot_sentences(bc3_glove_p[1], bc3_glove_ae_p[1], bc3_df_p[1], bc3_df_ae_p[1], 'ROUGE-2 scores precision', 'BC3 dataset')
plot_sentences(bc3_glove_r[0], bc3_glove_ae_r[0], bc3_df_r[0], bc3_df_ae_r[0], 'ROUGE-1 scores recall', 'BC3 dataset')
plot_sentences(bc3_glove_r[1], bc3_glove_ae_r[1], bc3_df_r[1], bc3_df_ae_r[1], 'ROUGE-2 scores recall', 'BC3 dataset')
s_glove_p, s_glove_ae_p, s_glove_r, s_glove_ae_r = evaluate_sentence_length_performance(s_df_train, s_df_test, s_target, summary_len, corpus_ae, vector_set, 'Spotify dataset')
s_df_p, s_df_ae_p, s_df_r, s_df_ae_r = evaluate_sentence_length_performance(s_df_train, s_df_test, s_target, summary_len, corpus_ae, df_vector_set, 'Spotify dataset')
plot_sentences(s_glove_p[0], s_glove_ae_p[0], s_df_p[0], s_df_ae_p[0], 'ROUGE-1 scores precision', 'Spotify dataset')
plot_sentences(s_glove_p[1], s_glove_ae_p[1], s_df_p[1], s_df_ae_p[1], 'ROUGE-2 scores precision', 'Spotify dataset')
plot_sentences(s_glove_r[0], s_glove_ae_r[0], s_df_r[0], s_df_ae_r[0], 'ROUGE-1 scores recall', 'Spotify dataset')
plot_sentences(s_glove_r[1], s_glove_ae_r[1], s_df_r[1], s_df_ae_r[1], 'ROUGE-2 scores recall', 'Spotify dataset')
# evaluate_bc3()
evaluate_spotify()
# run_sentence_length_evaluation() | MikaelTornwall/dd2424_project | evaluate.py | evaluate.py | py | 15,618 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "rouge.Rouge",
"line_number": 54,
"usage_type": "call"
}
] |
27483903677 | from django.shortcuts import render, redirect
from django.contrib import messages
from .models import User
from .forms import RegisterForm, LoginForm
from .utils import require_login
def login_page(request):
context = {"reg_form": RegisterForm(), "login_form": LoginForm()}
return render(request, "users/login.html", context)
def login(request):
curr_user = User.user_manager.login(request.POST["email_address"], request.POST["password"])
if not curr_user:
messages.error(request, "E-mail or password incorrect")
return redirect("login_page")
else:
request.session["curr_user"] = curr_user.id
return redirect("dashboard")
def register(request):
registered, guy_or_errors = User.user_manager.register(request.POST)
if not registered:
for error in guy_or_errors: messages.error(request, error)
return redirect("login_page")
else:
request.session["curr_user"] = guy_or_errors.id
return redirect("dashboard")
def log_off(request):
request.session.clear()
return redirect("login_page")
@require_login
def dashboard(request, curr_user):
context = {
"curr_user": curr_user,
"users": User.user_manager.all(),
}
return render(request, "users/dashboard.html", context)
@require_login
def show(request, curr_user, id):
print("show page")
context = {
"curr_user": curr_user,
"user": User.user_manager.get(id=id),
}
return render(request, "users/show.html", context)
@require_login
def edit(request, curr_user, id):
context = {
"curr_user": curr_user,
"user": User.user_manager.get(id=id),
}
return render(request, "users/edit.html", context)
@require_login
def update(request, curr_user, id):
# Logic to check if curr_user is admin or user being updated
if not (curr_user.admin or curr_user.id == int(id)):
print(curr_user.id, id, curr_user.id == id)
return redirect("/dashboard")
# Logic to actually update
errors = User.user_manager.update(id, request.POST)
if errors:
for error in errors:
messages.error(request, error)
return redirect("edit", id=id)
else:
return redirect("show", id=id)
| madjaqk/django_user_dashboard | apps/users/views.py | views.py | py | 2,061 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "forms.RegisterForm",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "forms.LoginForm",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "models.User.u... |
70713803708 | import re
import os
import sys
import nltk
import json
import wandb
import joblib
import datasets
import numpy as np
import pandas as pd
from time import process_time
from nltk import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
np.random.seed(42)
class LemmaTokenizer:
ignore_tokens = [',', '.', ';', ':', '"', '``', "''", '`']
def __init__(self):
self.wnl = WordNetLemmatizer()
def __call__(self, doc):
return [self.wnl.lemmatize(t) for t in word_tokenize(doc) if t not in self.ignore_tokens]
def prepare_dataset(data_folder, label2id, data_types, max_length):
def combine_data(example):
temp_text = ""
for data_type in data_types:
temp_text += example[data_type] + " "
example["text"] = temp_text[:max_length]
return example
dataset = datasets.load_from_disk(data_folder + "dataset/")
dataset = dataset["train"]
dataset_encoded = dataset.class_encode_column("category")
dataset_aligned = dataset_encoded.align_labels_with_mapping(label2id, "category")
dataset_cleaned = dataset_aligned.map(combine_data)
dataset = dataset_cleaned.remove_columns(["title", "body"])
dataset = dataset.rename_column("category", "label")
return dataset
def main():
hps = {
"data_types": ["title", "body"],
"loss_function": "squared_hinge",
"ngram_range": 3,
"max_length": 512,
}
wandb_id = wandb.util.generate_id()
run = wandb.init(
project="DMOZ-classification",
config=hps,
job_type="training",
name="SVM_DMOZ_" + str(wandb_id),
tags=["SVM", "DMOZ"],
)
data_folder = "/ceph/csedu-scratch/other/jbrons/thesis-web-classification/"
id2label = {0: "Arts", 1: "Business", 2: "Computers", 3: "Health", 4: "Home", 5: "News", 6: "Recreation", 7: "Reference", 8: "Science", 9: "Shopping", 10: "Society", 11: "Sports", 12: "Games"}
label2id = {v: k for k, v in id2label.items()}
labels = label2id.keys()
dataset = prepare_dataset(data_folder, label2id, hps["data_types"], hps["max_length"])
X_train, y_train = dataset["text"], dataset["label"]
tokenizer=LemmaTokenizer()
pipeline = make_pipeline(
TfidfVectorizer(
ngram_range=(1, hps["ngram_range"]),
tokenizer=tokenizer,
token_pattern=None
),
LinearSVC(loss=hps["loss_function"])
)
t0 = process_time()
pipeline.fit(X_train, y_train)
training_time = process_time() - t0
print("Training time {:5.2f}s for {:0d} samples.".format(training_time, len(y_train)))
run.summary["training_time"] = training_time
filename = data_folder + "models/SVM/model.pkl"
joblib.dump(pipeline, filename, compress=3)
model_artifact = wandb.Artifact(
name="model_SVM_DMOZ",
type="model"
)
model_artifact.add_file(thesis_folder + "models/SVM/model.pkl")
run.log_artifact(model_artifact)
if __name__ == "__main__":
main()
| JesseBrons/Webpageclassification | training/train_model_SVM.py | train_model_SVM.py | py | 3,173 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "numpy.random.seed",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "nltk.stem.WordNetLemmatizer",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "nltk.... |
30367917171 | """Tutorial 8. Putting two plots on the screen
This tutorial sets up for showing how Chaco allows easily opening multiple
views into a single dataspace, which is demonstrated in later tutorials.
"""
from scipy import arange
from scipy.special import jn
from enable.api import ComponentEditor
from traits.api import HasTraits, Instance
from traitsui.api import Item, View
from chaco.api import create_line_plot, HPlotContainer
from chaco.tools.api import PanTool
class PlotExample(HasTraits):
container = Instance(HPlotContainer)
traits_view = View(
Item(
"container",
editor=ComponentEditor(),
show_label=False,
width=800,
height=600,
),
title="Chaco Tutorial",
)
def _container_default(self):
x = arange(-5.0, 15.0, 20.0 / 100)
y = jn(0, x)
left_plot = create_line_plot(
(x, y), bgcolor="white", add_grid=True, add_axis=True
)
left_plot.tools.append(PanTool(left_plot))
self.left_plot = left_plot
y = jn(1, x)
right_plot = create_line_plot(
(x, y), bgcolor="white", add_grid=True, add_axis=True
)
right_plot.tools.append(PanTool(right_plot))
right_plot.y_axis.orientation = "right"
self.right_plot = right_plot
# Tone down the colors on the grids
right_plot.hgrid.line_color = (0.3, 0.3, 0.3, 0.5)
right_plot.vgrid.line_color = (0.3, 0.3, 0.3, 0.5)
left_plot.hgrid.line_color = (0.3, 0.3, 0.3, 0.5)
left_plot.vgrid.line_color = (0.3, 0.3, 0.3, 0.5)
container = HPlotContainer(spacing=20, padding=50, bgcolor="lightgray")
container.add(left_plot)
container.add(right_plot)
return container
demo = PlotExample()
if __name__ == "__main__":
demo.configure_traits()
| enthought/chaco | examples/tutorials/tutorial8.py | tutorial8.py | py | 1,873 | python | en | code | 286 | github-code | 6 | [
{
"api_name": "traits.api.HasTraits",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "traits.api.Instance",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "chaco.api.HPlotContainer",
"line_number": 19,
"usage_type": "argument"
},
{
"api_name": ... |
72531840829 | """Adds column to use scicrunch alternative
Revision ID: b60363fe438f
Revises: 39fa67f45cc0
Create Date: 2020-12-15 18:26:25.552123+00:00
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "b60363fe438f"
down_revision = "39fa67f45cc0"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"group_classifiers",
sa.Column("uses_scicrunch", sa.Boolean(), nullable=True, server_default="0"),
)
# ### end Alembic commands ###
# Applies the default to all
query = 'UPDATE "group_classifiers" SET uses_scicrunch=false;'
op.execute(query)
# makes non nullable
# 'ALTER TABLE "group_classifiers" ALTER "uses_scicrunch" SET NOT NULL;'
op.alter_column("group_classifiers", "uses_scicrunch", nullable=False)
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("group_classifiers", "uses_scicrunch")
# ### end Alembic commands ###
| ITISFoundation/osparc-simcore | packages/postgres-database/src/simcore_postgres_database/migration/versions/b60363fe438f_adds_column_to_use_scicrunch_alternative.py | b60363fe438f_adds_column_to_use_scicrunch_alternative.py | py | 1,066 | python | en | code | 35 | github-code | 6 | [
{
"api_name": "alembic.op.add_column",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Boolean... |
21247913444 | import unittest
from unittest.mock import patch
import os
from typing import Optional
from dataclasses import dataclass
from io import StringIO
from ml_project.train_pipeline import run_train_pipeline
from ml_project.predict_pipeline import run_predict_pipeline
from sklearn.preprocessing import StandardScaler
from ml_project.entities import (
TrainingPipelineParams,
SplitParams,
FeatureParams,
TrainingParams
)
numerical_features = [
"age",
"sex",
"cp",
"trestbps",
"chol",
"fbs",
"restecg",
"thalach",
"exang",
"oldpeak",
"slope",
"ca",
"thal",
]
@dataclass
class TestTrainingPipelineParams:
input_data_path: str = "data/raw/heart_cleveland_upload.csv"
output_model_path: str = "tests/tmp/test_model.pkl"
metric_path: str = "tests/tmp/test_metrics.json"
split_params: SplitParams = SplitParams(
test_size=0.25,
random_state=5
)
feature_params: FeatureParams = FeatureParams(
numerical_features=numerical_features,
target_col="condition"
)
train_params: TrainingParams = TrainingParams(
model_type="RandomForestClassifier",
)
train_dataframe_path: Optional[str] = "data/raw/predict_dataset.csv"
scaler: Optional[str] = None
@dataclass
class TestPredictPipelineParams:
input_data_path: str = "data/raw/predict_dataset.csv"
input_model_path: str = "models/model.pkl"
output_data_path: str = "tests/tmp/test_model_predicts.csv"
class TestEnd2End(unittest.TestCase):
test_train_piplein_params = TestTrainingPipelineParams()
test_test_piplein_params = TestPredictPipelineParams()
@unittest.mock.patch("ml_project.train_pipeline.logger")
def test_train_end2end(self, mock_log):
with patch("sys.stdout", new=StringIO()):
path_to_model, metrics = run_train_pipeline(self.test_train_piplein_params)
self.assertTrue(os.path.exists(path_to_model))
self.assertTrue(metrics["0"]["f1-score"] > 0.6)
self.assertTrue(metrics["1"]["f1-score"] > 0.6)
@unittest.mock.patch("ml_project.train_pipeline.logger")
def test_predict_end2end(self, mock_log):
with patch("sys.stdout", new=StringIO()):
run_predict_pipeline(self.test_test_piplein_params)
self.assertTrue(os.path.exists(self.test_test_piplein_params.output_data_path))
| made-mlops-2022/alexey_sklyannyy | tests/test_end2end_training.py | test_end2end_training.py | py | 2,397 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "ml_project.entities.SplitParams",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "ml_project.entities.FeatureParams",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "ml_project.entities.TrainingParams",
"line_number": 49,
"usage_type": "... |
34958665792 | # -*- coding: utf-8 -*-
import numpy as np
import os
import time
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision.transforms as trn
import torchvision.datasets as dset
import torch.nn.functional as F
import json
from attack_methods import pgd
from models.wrn import WideResNet
from option import BaseOptions
class PrivateOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
# WRN Architecture
self.parser.add_argument('--layers', default=28, type=int, help='total number of layers')
self.parser.add_argument('--widen-factor', default=10, type=int, help='widen factor')
self.parser.add_argument('--droprate', default=0.0, type=float, help='dropout probability')
# /////////////// Training ///////////////
def train():
net.train() # enter train mode
loss_avg = 0.0
for bx, by in train_loader:
bx, by = bx.cuda(), by.cuda()
adv_bx = adversary_train(net, bx, by)
# forward
logits = net(adv_bx)
# backward
# scheduler.step()
optimizer.zero_grad()
loss = F.cross_entropy(logits, by)
loss.backward()
optimizer.step()
# exponential moving average
loss_avg = loss_avg * 0.8 + float(loss) * 0.2
state['train_loss'] = loss_avg
# test function
def test():
net.eval()
loss_avg = 0.0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.cuda(), target.cuda()
adv_data = adversary_test(net, data, target)
# forward
output = net(adv_data)
loss = F.cross_entropy(output, target)
# accuracy
pred = output.data.max(1)[1]
correct += pred.eq(target.data).sum().item()
# test loss average
loss_avg += float(loss.data)
state['test_loss'] = loss_avg / len(test_loader)
state['test_accuracy'] = correct / len(test_loader.dataset)
# overall_test function
def test_in_testset():
net.eval()
loss_avg = 0.0
correct = 0
adv_loss_avg = 0.0
adv_correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.cuda(), target.cuda()
adv_data = adversary_test(net, data, target)
# forward
output = net(data)
loss = F.cross_entropy(output, target)
# accuracy
pred = output.data.max(1)[1]
correct += pred.eq(target.data).sum().item()
# test loss average
loss_avg += float(loss.data)
# forward
adv_output = net(adv_data)
adv_loss = F.cross_entropy(adv_output, target)
# accuracy
adv_pred = adv_output.data.max(1)[1]
adv_correct += adv_pred.eq(target.data).sum().item()
# test loss average
adv_loss_avg += float(adv_loss.data)
state['test_loss'] = loss_avg / len(test_loader)
state['test_accuracy'] = correct / len(test_loader.dataset)
state['adv_test_loss'] = adv_loss_avg / len(test_loader)
state['adv_test_accuracy'] = adv_correct / len(test_loader.dataset)
def test_in_trainset():
train_loader = torch.utils.data.DataLoader(
train_data, batch_size=opt.test_bs, shuffle=False,
num_workers=opt.prefetch, pin_memory=torch.cuda.is_available())
net.eval()
loss_avg = 0.0
correct = 0
adv_loss_avg = 0.0
adv_correct = 0
with torch.no_grad():
for data, target in train_loader:
data, target = data.cuda(), target.cuda()
adv_data = adversary_test(net, data, target)
# forward
output = net(data)
loss = F.cross_entropy(output, target)
# accuracy
pred = output.data.max(1)[1]
correct += pred.eq(target.data).sum().item()
# test loss average
loss_avg += float(loss.data)
# forward
adv_output = net(adv_data)
adv_loss = F.cross_entropy(adv_output, target)
# accuracy
adv_pred = adv_output.data.max(1)[1]
adv_correct += adv_pred.eq(target.data).sum().item()
# test loss average
adv_loss_avg += float(adv_loss.data)
state['train_loss'] = loss_avg / len(train_loader)
state['train_accuracy'] = correct / len(train_loader.dataset)
state['adv_train_loss'] = adv_loss_avg / len(train_loader)
state['adv_train_accuracy'] = adv_correct / len(train_loader.dataset)
opt = PrivateOptions().parse()
state = {k: v for k, v in opt._get_kwargs()}
torch.manual_seed(opt.random_seed)
np.random.seed(opt.random_seed)
cudnn.benchmark = True
# # mean and standard deviation of channels of CIFAR-10 images
# mean = [x / 255 for x in [125.3, 123.0, 113.9]]
# std = [x / 255 for x in [63.0, 62.1, 66.7]]
train_transform = trn.Compose([trn.RandomHorizontalFlip(), trn.RandomCrop(32, padding=4),
trn.ToTensor(), trn.Normalize(
mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])])
test_transform = trn.Compose([trn.ToTensor(), trn.Normalize(
mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])])
if opt.dataset == 'cifar10':
train_data = dset.CIFAR10(opt.dataroot, train=True, transform=train_transform, download=True)
test_data = dset.CIFAR10(opt.dataroot, train=False, transform=test_transform)
num_classes = 10
else:
train_data = dset.CIFAR100(opt.dataroot, train=True, transform=train_transform, download=True)
test_data = dset.CIFAR100(opt.dataroot, train=False, transform=test_transform)
num_classes = 100
train_loader = torch.utils.data.DataLoader(
train_data, batch_size=opt.batch_size, shuffle=True,
num_workers=opt.prefetch, pin_memory=torch.cuda.is_available())
test_loader = torch.utils.data.DataLoader(
test_data, batch_size=opt.test_bs, shuffle=False,
num_workers=opt.prefetch, pin_memory=torch.cuda.is_available())
# Create model
if opt.model == 'wrn':
net = WideResNet(opt.layers, num_classes, opt.widen_factor, dropRate=opt.droprate)
else:
assert False, opt.model + ' is not supported.'
start_epoch = opt.start_epoch
if opt.ngpu > 0:
net = torch.nn.DataParallel(net, device_ids=list(range(opt.ngpu)))
net.cuda()
torch.cuda.manual_seed(opt.random_seed)
# Restore model if desired
if opt.load != '':
if opt.test and os.path.isfile(opt.load):
net.load_state_dict(torch.load(opt.load))
print('Appointed Model Restored!')
else:
model_name = os.path.join(opt.load, opt.dataset + opt.model +
'_epoch_' + str(start_epoch) + '.pt')
if os.path.isfile(model_name):
net.load_state_dict(torch.load(model_name))
print('Model restored! Epoch:', start_epoch)
else:
raise Exception("Could not resume")
epoch_step = json.loads(opt.epoch_step)
lr = state['learning_rate']
optimizer = torch.optim.SGD(
net.parameters(), lr, momentum=state['momentum'],
weight_decay=state['decay'], nesterov=True)
# def cosine_annealing(step, total_steps, lr_max, lr_min):
# return lr_min + (lr_max - lr_min) * 0.5 * (
# 1 + np.cos(step / total_steps * np.pi))
#
#
# scheduler = torch.optim.lr_scheduler.LambdaLR(
# optimizer,
# lr_lambda=lambda step: cosine_annealing(
# step,
# opt.epochs * len(train_loader),
# 1, # since lr_lambda computes multiplicative factor
# 1e-6 / opt.learning_rate)) # originally 1e-6
adversary_train = pgd.PGD(epsilon=opt.epsilon * 2, num_steps=opt.num_steps, step_size=opt.step_size * 2).cuda()
adversary_test = pgd.PGD(epsilon=opt.epsilon * 2, num_steps=opt.test_num_steps, step_size=opt.test_step_size * 2).cuda()
if opt.test:
test_in_testset()
# test_in_trainset()
print(state)
exit()
# Make save directory
if not os.path.exists(opt.save):
os.makedirs(opt.save)
if not os.path.isdir(opt.save):
raise Exception('%s is not a dir' % opt.save)
with open(os.path.join(opt.save, "log_" + opt.dataset + opt.model +
'_training_results.csv'), 'w') as f:
f.write('epoch,time(s),train_loss,test_loss,test_accuracy(%)\n')
print('Beginning Training\n')
# Main loop
best_test_accuracy = 0
for epoch in range(start_epoch, opt.epochs + 1):
state['epoch'] = epoch
begin_epoch = time.time()
train()
test()
# Save model
if epoch > 10 and epoch % 10 == 0:
torch.save(net.state_dict(),
os.path.join(opt.save, opt.dataset + opt.model +
'_epoch_' + str(epoch) + '.pt'))
if state['test_accuracy'] > best_test_accuracy:
best_test_accuracy = state['test_accuracy']
torch.save(net.state_dict(),
os.path.join(opt.save, opt.dataset + opt.model +
'_epoch_best.pt'))
# Show results
with open(os.path.join(opt.save, "log_" + opt.dataset + opt.model +
'_training_results.csv'), 'a') as f:
f.write('%03d,%0.6f,%05d,%0.3f,%0.3f,%0.2f\n' % (
(epoch),
lr,
time.time() - begin_epoch,
state['train_loss'],
state['test_loss'],
100. * state['test_accuracy'],
))
print('Epoch {0:3d} | LR {1:.6f} | Time {2:5d} | Train Loss {3:.3f} | Test Loss {4:.3f} | Test Acc {5:.2f}'.format(
(epoch),
lr,
int(time.time() - begin_epoch),
state['train_loss'],
state['test_loss'],
100. * state['test_accuracy'])
)
# Adjust learning rate
if epoch in epoch_step:
lr = optimizer.param_groups[0]['lr'] * opt.lr_decay_ratio
optimizer = torch.optim.SGD(
net.parameters(), lr, momentum=state['momentum'],
weight_decay=state['decay'], nesterov=True)
print("new lr:", lr)
| arthur-qiu/adv_vis | cifar10_wrn_at.py | cifar10_wrn_at.py | py | 10,072 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "option.BaseOptions",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "option.BaseOptions.initialize",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "option.BaseOptions",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "t... |
33245759934 | import time
from collections import OrderedDict
from collections.abc import Callable, Sequence
from typing import Any, NamedTuple
DB = "timeseries"
TABLE_RAW = "paii_raw"
TIME_FIELD = "paii_time"
class Field(NamedTuple):
json_key: str
store_flag: bool
db_name: str
data_type: str
convert: Callable[[dict[str, Any], str], Any] | None = None
# conversions
def fahrenheit2celsius(data: dict[str, Any], key: str) -> float:
f = data[key]
return round((f - 32) * 5 / 9, 2)
def get_response_date(data: dict[str, Any], key: str) -> int:
return data.get(key, time.time())
def missing_int(data: dict[str, Any], key: str) -> int:
return data.get(key, -1)
fields = [
# inserted by the sceduler
Field(TIME_FIELD, True, TIME_FIELD, "TIMESTAMPTZ NOT NULL"),
# from the device
Field("SensorId", False, "SensorId", "VARCHAR"), # eg "84:f3:eb:7b:c8:ee"
Field("DateTime", False, "DateTime", "VARCHAR"), # eg "2020/08/23T10:44:39z"
Field("Geo", True, "geo", "VARCHAR"), # eg "PurpleAir-c8ee"
Field("Mem", False, "Mem", "VARCHAR"), # eg 18936
Field("memfrag", False, "memfrag", "VARCHAR"), # eg 8
Field("memfb", False, "memfb", "VARCHAR"), # eg 17568
Field("memcs", False, "memcs", "VARCHAR"), # eg 896
Field("Id", False, "Id", "VARCHAR"), # eg 4177
Field("lat", True, "lat", "DOUBLE PRECISION"), # eg -37.8484
Field("lon", True, "lon", "DOUBLE PRECISION"), # eg 145.177399
Field("Adc", True, "adc", "DOUBLE PRECISION"), # eg 0.05
Field("loggingrate", False, "loggingrate", "VARCHAR"), # eg 15
Field("place", True, "place", "VARCHAR"), # eg "outside"
Field("version", False, "version", "VARCHAR"), # eg "6.01"
Field("uptime", False, "uptime", "VARCHAR"), # eg 242296
Field("rssi", False, "rssi", "VARCHAR"), # eg -59
Field("period", True, "period", "INTEGER"), # eg 120
Field("httpsuccess", False, "httpsuccess", "VARCHAR"), # eg 12169
Field("httpsends", False, "httpsends", "VARCHAR"), # eg 12182
Field("hardwareversion", True, "hardwareversion", "VARCHAR"), # eg "2.0"
Field(
"hardwarediscovered",
False,
"hardwarediscovered",
"VARCHAR",
), # eg "2.0+BME280+PMSX003-B+PMSX003-A"
Field(
"current_temp_f",
True,
"current_temp_c",
"DOUBLE PRECISION",
fahrenheit2celsius,
), # eg 52
Field("current_humidity", True, "current_humidity", "DOUBLE PRECISION"), # eg 55
Field(
"current_dewpoint_f",
True,
"current_dewpoint_c",
"DOUBLE PRECISION",
fahrenheit2celsius,
), # eg 36
Field("pressure", True, "pressure", "DOUBLE PRECISION"), # eg 1005.28
Field("p25aqic_b", False, "p25aqic_b", "VARCHAR"), # eg "rgb(0,228,0)"
Field("pm2.5_aqi_b", False, "pm2.5_aqi_b", "VARCHAR"), # eg_aqi_b": 5
Field("pm1_0_cf_1_b", True, "pm1_0_cf_1_b", "DOUBLE PRECISION"), # eg 0.39
Field("p_0_3_um_b", False, "p_0_3_um_b", "VARCHAR"), # eg 261.79
Field("pm2_5_cf_1_b", True, "pm2_5_cf_1_b", "DOUBLE PRECISION"), # eg 1.3 **** µg/m3
Field("p_0_5_um_b", False, "p_0_5_um_b", "VARCHAR"), # eg 72.35
Field("pm10_0_cf_1_b", True, "pm10_0_cf_1_b", "DOUBLE PRECISION"), # eg 1.72
Field("p_1_0_um_b", False, "p_1_0_um_b", "VARCHAR"), # eg 13.05
Field("pm1_0_atm_b", False, "pm1_0_atm_b", "VARCHAR"), # eg 0.39
Field("p_2_5_um_b", False, "p_2_5_um_b", "VARCHAR"), # eg 2.42
Field("pm2_5_atm_b", False, "pm2_5_atm_b", "VARCHAR"), # eg 1.3
Field("p_5_0_um_b", False, "p_5_0_um_b", "VARCHAR"), # eg 0.7
Field("pm10_0_atm_b", False, "pm10_0_atm_b", "VARCHAR"), # eg 1.72
Field("p_10_0_um_b", False, "p_10_0_um_b", "VARCHAR"), # eg 0.0
Field("p25aqic", False, "p25aqic", "VARCHAR"), # eg "rgb(0,228,0)"
Field("pm2.5_aqi", False, "pm2.5_aqi", "VARCHAR"), # eg_aqi": 1
Field("pm1_0_cf_1", True, "pm1_0_cf_1", "DOUBLE PRECISION"), # eg 0.14
Field("p_0_3_um", False, "p_0_3_um", "VARCHAR"), # eg 163.63
Field("pm2_5_cf_1", True, "pm2_5_cf_1", "DOUBLE PRECISION"), # eg 0.33 **** µg/m3
Field("p_0_5_um", False, "p_0_5_um", "VARCHAR"), # eg 45.77
Field("pm10_0_cf_1", True, "pm10_0_cf_1", "DOUBLE PRECISION"), # eg 0.42
Field("p_1_0_um", False, "p_1_0_um", "VARCHAR"), # eg 7.79
Field("pm1_0_atm", False, "pm1_0_atm", "VARCHAR"), # eg 0.14
Field("p_2_5_um", False, "p_2_5_um", "VARCHAR"), # eg 0.56
Field("pm2_5_atm", False, "pm2_5_atm", "VARCHAR"), # eg 0.33
Field("p_5_0_um", False, "p_5_0_um", "VARCHAR"), # eg 0.18
Field("pm10_0_atm", False, "pm10_0_atm", "VARCHAR"), # eg 0.42
Field("p_10_0_um", False, "p_10_0_um", "VARCHAR"), # eg 0.0
Field("pa_latency", False, "pa_latency", "VARCHAR"), # eg 631
Field("response", False, "response", "VARCHAR"), # eg 201
Field(
"response_date",
True,
"response_date",
"INTEGER",
get_response_date,
), # eg 1598179477
Field("latency", True, "latency", "INTEGER", missing_int), # eg 1459
Field("key1_response", False, "key1_response", "VARCHAR"), # eg 200
Field("key1_response_date", False, "key1_response_date", "VARCHAR"), # eg 1598179467
Field("key1_count", False, "key1_count", "VARCHAR"), # eg 79205
Field("ts_latency", False, "ts_latency", "VARCHAR"), # eg 1198
Field("key2_response", False, "key2_response", "VARCHAR"), # eg 200
Field("key2_response_date", False, "key2_response_date", "VARCHAR"), # eg 1598179470
Field("key2_count", False, "key2_count", "VARCHAR"), # eg 79212
Field("ts_s_latency", False, "ts_s_latency", "VARCHAR"), # eg 1141
Field("key1_response_b", False, "key1_response_b", "VARCHAR"), # eg 200
Field(
"key1_response_date_b",
False,
"key1_response_date_b",
"VARCHAR",
), # eg 1598179472
Field("key1_count_b", False, "key1_count_b", "VARCHAR"), # eg 79213
Field("ts_latency_b", False, "ts_latency_b", "VARCHAR"), # eg 1133
Field("key2_response_b", False, "key2_response_b", "VARCHAR"), # eg 200
Field(
"key2_response_date_b",
False,
"key2_response_date_b",
"VARCHAR",
), # eg 1598179474
Field("key2_count_b", False, "key2_count_b", "VARCHAR"), # eg 79217
Field("ts_s_latency_b", False, "ts_s_latency_b", "VARCHAR"), # eg 1136
Field("wlstate", False, "wlstate", "VARCHAR"), # eg "Connected"
Field("status_0", True, "status_0", "INTEGER"), # eg 2
Field("status_1", True, "status_1", "INTEGER"), # eg 2
Field("status_2", True, "status_2", "INTEGER"), # eg 2
Field("status_3", True, "status_3", "INTEGER"), # eg 2
Field("status_4", True, "status_4", "INTEGER"), # eg 2
Field("status_5", True, "status_5", "INTEGER"), # eg 2
Field("status_6", True, "status_6", "INTEGER", missing_int), # eg 2
Field("status_7", True, "status_7", "INTEGER"), # eg 0
Field("status_8", True, "status_8", "INTEGER"), # eg 2
Field("status_9", True, "status_9", "INTEGER"), # eg 2
Field("ssid", False, "ssid", "VARCHAR"), # eg "apocalypse
]
def gen_stored(fields: list[Field] = fields):
yield from (f for f in fields if f.store_flag)
def compose_create(
table_name: str,
time_field: str,
fields: list[Field] = fields,
) -> str:
fdesc = ",\n".join([f"{f.db_name} {f.data_type}" for f in gen_stored()])
sql = f"""CREATE TABLE IF NOT EXISTS {table_name} (
{fdesc},
PRIMARY KEY({time_field})
);
"""
return sql
def compose_insert(field_names: Sequence, table_name: str) -> str:
"""compose parameterized insert SQL
Args:
field_names (Sequence): database table field names
table_name (str): database table name.
Returns:
str: insert SQL.
"""
fields = ", ".join(field_names)
placeholders = ", ".join([f"${i+1}" for i in range(len(field_names))])
sql = f"INSERT INTO {table_name} ({fields}) values ({placeholders})"
return sql
def convert_data(data: dict[str, Any], fields: list[Field] = fields) -> OrderedDict:
"""return filtered and ordered device data
Args:
data (Dict[str, Any]): raw dictionary directly from device.
fields (List[Field], optional): fields specification
Returns:
OrderedDict[str, Any]: {db_key: converted_value, ...} items() will return
in the same order as SQL commands assuming they are all based on the same
field list.
"""
missing = []
def convert(data: dict[str, Any], field: Field) -> Any:
if field.convert:
# custom converstion function, takes care of missing data
return field.convert(data, field.json_key)
if field.json_key in data:
return data[field.json_key]
else:
missing.append(field.json_key)
return None
missing.clear()
result = OrderedDict({f.db_name: convert(data, f) for f in gen_stored()})
# if missing:
# print(f"fields were missing: {missing}")
return result
| PaulSorenson/purpleair_sensor | paii/purple_data.py | purple_data.py | py | 9,065 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "typing.NamedTuple",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "collections.abc.Callable",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "typing.Any",
... |
32941642034 | import numpy as np
import matplotlib
from matplotlib.colors import ListedColormap
SLACred = '#8C1515'
SLACgrey = '#53565A'
SLACblue = '#007C92'
SLACteal = '#279989'
SLACgreen = '#8BC751'
SLACyellow = '#FEDD5C'
SLACorange = '#E04F39'
SLACpurple = '#53284F'
SLAClavender = '#765E99'
SLACbrown = '#5F574F'
SLACcolors = [SLACred,
SLACblue,
SLACteal,
SLACgreen,
SLACyellow,
SLACgrey,
SLACorange,
SLACpurple,
SLAClavender,
SLACbrown,
]
# SLACsage = [199./256, 209./256, 197./256]
white = [256./256, 256./256, 256./256]
SLACpaloverde = [39./256, 153./256, 137./256]
matplotlib.cm.register_cmap('SLACverde',
ListedColormap(np.array([np.interp(np.linspace(0, 1, 256),
[0, 1],
[whiteV, pvV])
for whiteV, pvV in zip(white, SLACpaloverde)]).T,
name = 'SLACverde'))
LaTeXflavor = {"numu": r'$\nu_\mu$',
"numubar": r'$\bar{\nu}_\mu$',
"nue": r'$\nu_e$',
"nuebar": r'$\bar{\nu}_e$',
"nutau": r'$\nu_\tau$',
"nutaubar": r'$\bar{\nu}_\tau$'}
matplotlib.rc('axes', **{"prop_cycle": matplotlib.cycler(color = SLACcolors)})
matplotlib.rc('image', **{"cmap": 'SLACverde'})
matplotlib.rc('font', **{"family": 'sans-serif',
"sans-serif": 'Arial',
"size": 16,
"weight": 'bold'})
matplotlib.rc('text', **{"usetex": True})
| DanielMDouglas/SLACplots | SLACplots/colors.py | colors.py | py | 1,729 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.cm.register_cmap",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib.cm",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.colors.ListedColormap",
"line_number": 33,
"usage_type": "call"
},
{
"a... |
7438577752 | from pathlib import Path
from vesper.tests.test_case import TestCase
from vesper.util.preference_manager import PreferenceManager
import vesper.tests.test_utils as test_utils
_DATA_DIR_PATH = Path(test_utils.get_test_data_dir_path(__file__))
_PREFERENCE_FILE_PATH = _DATA_DIR_PATH / 'Preferences.yaml'
_EMPTY_PREFERENCE_FILE_PATH = _DATA_DIR_PATH / 'Empty Preferences.yaml'
_NON_MAPPING_PREFERENCE_FILE_PATH = \
_DATA_DIR_PATH / 'Non Mapping Preferences.yaml'
_MALFORMED_PREFERENCE_FILE_PATH = _DATA_DIR_PATH / 'Malformed Preferences.yaml'
class PreferenceManagerTests(TestCase):
def test_get(self):
preferences = _get_preferences(_PREFERENCE_FILE_PATH)
cases = (
('one', 1),
('category_a.two', 2),
('category_a.three', 'three'),
('category_a.category_b.forty_five', 45),
('category_a.category_b.fifty six', 56),
('category_a.category_b', {'forty_five': 45, 'fifty six': 56})
)
for p in preferences:
for name, value in cases:
self.assertTrue(name in p)
self.assertEqual(p[name], value)
self.assertEqual(p.get(name), value)
def test_get_of_nonexistent_preferences(self):
preferences = _get_preferences(_PREFERENCE_FILE_PATH)
cases = (
'bobo',
'category_a.bobo'
)
for p in preferences:
for name in cases:
self.assertFalse(name in p)
self.assertRaises(KeyError, p.__getitem__, name)
self.assertIsNone(p.get(name))
self.assertEqual(p.get(name, 10), 10)
def test_empty_preference_file(self):
self._test_bad_preference_file(_EMPTY_PREFERENCE_FILE_PATH)
def _test_bad_preference_file(self, file_path):
preferences = _get_preferences(file_path)
for p in preferences:
self.assertEqual(len(p), 0)
def test_malformed_preference_file(self):
self._test_bad_preference_file(_MALFORMED_PREFERENCE_FILE_PATH)
def test_non_mapping_preference_file(self):
self._test_bad_preference_file(_NON_MAPPING_PREFERENCE_FILE_PATH)
def test_nonexistent_preference_file(self):
manager = PreferenceManager()
manager.load_preferences_from_file('nonexistent')
self.assertEqual(len(manager.preferences), 0)
def _get_preferences(file_path):
# Create preferences from file.
manager = PreferenceManager.create_for_file(file_path)
preferences_a = manager.preferences
# Create preferences from YAML.
with open(file_path) as file_:
yaml = file_.read()
manager = PreferenceManager.create_for_yaml(yaml)
preferences_b = manager.preferences
return (preferences_a, preferences_b)
| HaroldMills/Vesper | vesper/util/tests/test_preference_manager.py | test_preference_manager.py | py | 2,906 | python | en | code | 47 | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "vesper.tests.test_utils.get_test_data_dir_path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "vesper.tests.test_utils",
"line_number": 8,
"usage_type": "name"
},
{
"... |
35792097840 | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
from selenium.webdriver.support.ui import WebDriverWait
import random
def get_sore_and_Price(store_id,internet_id):
driver = webdriver.Chrome('C:/Users/cunzh/Desktop/chromedriver.exe') ## you should change the path before you run
start_page = driver.get("https://www.homedepot.com/l/")
driver.find_element_by_id("storeSearchBox").send_keys(store_id)
driver.find_element_by_class_name("sfSearchbox__button").click()
time.sleep(random.randint(3,5))
Message=''
try:
store = driver.find_element_by_class_name('sfstores')
store_name = store.find_element_by_class_name('sfstorename').text
#print(store.get_attribute("outerHTML"))
except:
price="NA"
Message="store cannot be found"
else:
a = store.find_element_by_class_name('sfstorelinks').find_element_by_tag_name('a')
time.sleep(random.randint(3,5)) #time.sleep are pretening human behavior; human spend different time on different web. So this website will not recognize this as a bot.
a.click() #Randint gives us random integer 3 to 5
time.sleep(random.randint(3,5))
driver.find_element_by_id("headerSearch").send_keys(internet_id)
time.sleep(random.randint(3,5))
driver.find_element_by_id("headerSearchButton").click()
time.sleep(random.randint(3,5))
try:
content = driver.find_element_by_class_name("price-detailed__wrapper")
# print(content.get_attribute('innerHTML'))
spans = content.find_elements_by_tag_name('span')
if len(spans) != 3:
price='NA'
Message='price cannot be found'
else:
a = spans[1]
b = spans[2]
price = a.text + '.' + b.text
except:
price='NA'
Message='price cannot be found'
return store_id,price,Message
# In[ ]:
# We can test the code by using follwing example:
store_list = ['954', '907', '6917']
test_list = ['302895490', '302895488', '100561401', '206809290']
list1=[]
for store in store_list:
for item in test_list:
list1.append(get_sore_and_Price(store,item))
# In[ ]:
list1
| JiyuanZhanglalala/Web-Scraping- | Home Depot Web Scraping Function.py | Home Depot Web Scraping Function.py | py | 2,398 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "random.randin... |
41958018958 | from waterworld.waterworld import env as custom_waterworld
from potential_field.potential_field_policy import PotentialFieldPolicy
from utils import get_frames
from pettingzoo.utils import average_total_reward
from multiprocessing import Pool, cpu_count
import tqdm
import numpy as np
from matplotlib import pyplot as plt
import json
n_coop_options = [1, 2]
n_sensor_options = [1, 2, 5, 20, 30]
angle_options = [("randomize_angle",False),("randomize_angle",True),
("spin_angle",0),("spin_angle",0.1),("spin_angle",0.5),("spin_angle",1)]
obs_weighting_options=[1, 0.5]
poison_weighting_options=[1, 0.5]
barrier_weighting_options=[1, 0.5]
food_weighting_options=[1, 0.5]
def test_policy(config, rounds=100):
env = custom_waterworld(**config["env_config"])
policy = PotentialFieldPolicy(**config["potential_field_config"]).get_movement_vector
for i in tqdm.tqdm(range(rounds)):
reward_sum, frame_list = get_frames(env, policy)
config["rewards"].append(reward_sum)
env.close()
with open(f"potential_field/test_main/{config['config_index']}.json", "x") as f:
json.dump(config, f, indent=4)
def get_configs():
configs = []
i=0
for n_coop in n_coop_options:
for n_sensor in n_sensor_options:
for angle_config in angle_options:
configs.append({"env_config":
{"n_coop": n_coop,"n_sensors": n_sensor,},
"potential_field_config":{
"n_sensors": n_sensor,
angle_config[0]: angle_config[1],
},
"rewards": [],
"config_index": i
})
i += 1
for obs_weight in obs_weighting_options:
for poison_weight in poison_weighting_options:
for barrier_weight in barrier_weighting_options:
for food_weight in food_weighting_options:
configs.append({"env_config":
{"n_coop": n_coop,"n_sensors": 30,},
"potential_field_config":{
"n_sensors": 30,
"obs_weight": obs_weight,
"poison_weight": poison_weight,
"barrier_weight": barrier_weight,
"food_weight": food_weight
},
"rewards": [],
"config_index": i
})
i += 1
return configs
def get_main_configs():
configs = []
i=0
for n_coop in n_coop_options:
for n_sensor in n_sensor_options:
for angle_config in angle_options:
configs.append({"env_config":
{"n_coop": n_coop,"n_sensors": n_sensor,},
"potential_field_config":{
"n_sensors": n_sensor,
angle_config[0]: angle_config[1],
},
"rewards": [],
"config_index": i
})
i += 1
return configs
def get_env_configs():
configs = []
i=0
for n_coop in n_coop_options:
for n_sensor in n_sensor_options:
configs.append({"env_config":
{"n_coop": n_coop,"n_sensors": n_sensor,},
"rewards": [],
"config_index": i
})
i += 1
return configs
def test_random_env(config, rounds=100):
env = custom_waterworld(**config["env_config"])
action_space = env.action_space("pursuer_0")
def policy(obs):
return action_space.sample()
for i in tqdm.tqdm(range(rounds)):
reward_sum, frame_list = get_frames(env, policy)
config["rewards"].append(reward_sum)
env.close()
with open(f"potential_field/test_random/{config['config_index']}.json", "x") as f:
json.dump(config, f, indent=4)
if __name__ == "__main__":
configs = get_env_configs()
with Pool(processes=int(cpu_count() - 2)) as pool:
for _ in tqdm.tqdm(pool.imap_unordered(test_random_env, configs), total=len(configs)):
pass
| ezxzeng/syde750_waterworld | test_policy.py | test_policy.py | py | 4,604 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "waterworld.waterworld.env",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "potential_field.potential_field_policy.PotentialFieldPolicy",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 27,
"usage_type": "call"... |
32840040688 |
from scipy.sparse import csr_matrix
from .text import WordPieceParser
from collections.abc import Mapping, Iterable
class RecordVectorMap(Mapping):
def __init__(self, records, wp_model_path, vec_format='bag-of-words'):
text_parser = WordPieceParser(wp_model_path)
self.rec_seq_map, self.record_vecs = self.rec2vecs(records, text_parser, vec_format)
def rec2vecs(self, records, text_parser, vec_format):
rec_seq_map = {}
cols, rows, data = [], [], []
col_dim = 0 if vec_format=='sequence' else text_parser.vocab_size
for rec_seq, (rec_id, rec_text) in enumerate(records):
rec_seq_map[rec_id] = rec_seq
parsed = text_parser.parse(rec_text, parse_format=vec_format)
if vec_format=='sequence':
if len(parsed)!=0:
rows.extend([rec_seq]*len(parsed))
cols.extend(list(range(len(parsed))))
data.extend(parsed)
if len(parsed)>col_dim:
col_dim = len(parsed)
else:
for wp_id, tf in parsed.items():
rows.append(rec_seq)
cols.append(wp_id)
data.append(tf)
record_vecs = csr_matrix((data, (rows, cols)), shape=(len(records), col_dim))
return rec_seq_map, record_vecs
def __getitem__(self, key):
if isinstance(key, str):
return self.get_by_seqs(self.rec_seq_map[key])
elif isinstance(key, Iterable):
return self.get_by_seqs([self.rec_seq_map[a_key] for a_key in key])
else:
raise TypeError('Key must be string (key of record) or iterable (list of key of record).')
def get_by_seqs(self, key):
if isinstance(key, int):
return self.record_vecs[key]
elif isinstance(key, Iterable):
return self.record_vecs[key]
else:
raise TypeError('Seqs must be int (seq of record) or iterable (list of seq of record).')
def __iter__(self):
return iter(self.record_vecs)
def __len__(self):
return len(self.rec_seq_map)
| rmhsiao/CAGNIR | utils/data/record.py | record.py | py | 2,182 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "collections.abc.Mapping",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "text.WordPieceParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.csr_matrix",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "... |
25860754920 | from typing import Any
from random import randint, random
import pygame as py
from engine.game_engine import Pygame
from engine.color import Color
from engine.game_objects import IGameObject
from engine.game_objects.modules import IAnimationModule, ICollisionModule
from engine.image import SpriteSheet
from engine.ui.text import TypeWriterText
from engine.map import LoopingMap
from src.player.player import Player
class UI_NPC_Text(TypeWriterText):
def __init__(self, game: Pygame = None):
super().__init__("We need to push forward! We need to push forward!", (200, 200, 200), game.fonts["8bit"], (135, 590), 10, game=game)
class UI_NPC_Text_Box(IGameObject):
def __init__(self, game: Pygame = None):
super().__init__("ui", game)
self.pos = (120, 577)
self.add_layer(f"{self.game.game_dir}\\data\\images\\ui\\ui_npc_text_box.png")
self.rect = self.get_layer_image(0).get_rect(topleft=self.pos)
class UI_NPC_Box(IGameObject):
def __init__(self, game: Pygame = None):
super().__init__("ui", game)
self.pos = (10, 665)
# self.add_layer(f"{self.game.game_dir}/data/images/npc_board.png")
self.add_layer(py.Surface((100, 100), flags=py.SRCALPHA))
self.add_layer(py.Surface((100, 100), flags=py.SRCALPHA))
self.add_layer(py.Surface((100, 100), flags=py.SRCALPHA))
# self.add_layer(f"{self.game.game_dir}/data/images/npc_box.png")
self.npc = ""
self.npcs = {
"red": py.image.load(f"{self.game.game_dir}/data/images/ui/npcs/red_npc.png")
}
self.anim_atlas = SpriteSheet(f"{self.game.game_dir}/data/images/ui/noise_atlas.png", 100, 100).get_image_array()
self.rect = self.get_layer_image(1).get_rect(bottomleft=self.pos)
self.set_module("animation", IAnimationModule(self, False))
self.get_module("animation").add_animation_by_dict(
"noise",
{
"layer": 1,
"frames": [self.anim_atlas[0], self.anim_atlas[1], self.anim_atlas[2], self.anim_atlas[3],
self.anim_atlas[4], self.anim_atlas[5], self.anim_atlas[6], self.anim_atlas[7]],
"frame_time": 40,
"loop": False,
"callback": self.noise_anim_ended
}
)
self.get_module("animation").add_animation_by_dict(
"noise_reversed",
{
"layer": 1,
"frames": [self.anim_atlas[7], self.anim_atlas[6], self.anim_atlas[5], self.anim_atlas[4],
self.anim_atlas[3], self.anim_atlas[2], self.anim_atlas[1], self.anim_atlas[1]],
"frame_time": 40,
"loop": False,
"callback": self.noise__reversed_anim_ended
}
)
self.text = UI_NPC_Text(self.game)
self.border = UI_NPC_Text_Box(self.game)
self.game.objects.add("ui_npc_text", self.text)
self.game.objects.add("ui_npc_text_box", self.border)
self.get_module("animation").should_animate = True
self.get_module("animation").play("noise")
def noise_anim_ended(self):
self.npc = "red"
if self.npc != "":
self.set_layer(self.npcs[self.npc], layer_id=0)
self.set_layer(self.anim_atlas[8], layer_id=1)
def noise__reversed_anim_ended(self):
self.npc = ""
self.set_layer(py.Surface((100, 100), py.SRCALPHA), layer_id=0)
self.set_layer(py.Surface((100, 100), py.SRCALPHA), layer_id=1)
class UI_BG(IGameObject):
def __init__(self, game=None):
super().__init__("ui", game)
self.pos = (0, 675)
self.add_layer(f"{self.game.game_dir}/data/images/ui_bg.png")
self.rect = self.get_layer_image(0).get_rect(bottomleft=self.pos)
# Rock Object #
class Rock(IGameObject):
def __init__(self, game: Pygame):
super(Rock, self).__init__('env', game)
self.set_module("collision", ICollisionModule(self, False))
self.pos = (self.game.screen.get_width(), randint(0, self.game.screen.get_height()))
self.vel = (randint(-2,-1), randint(-1, 1) * (random() * 0.5))
spr = py.image.load(f"{self.game.game_dir}/data/images/rock.png").convert_alpha()
scale = max(25, random() * 48)
spr = py.transform.scale(spr, (scale, scale))
spr = py.transform.rotate(spr, randint(-25, 25))
self.add_layer(spr)
self.rect = self.get_layer_image(self.primary_layer).get_rect(center=self.pos)
self.mask = py.mask.from_surface(self.get_layer_image(0))
def update(self, *args, **kwargs) -> None:
self.pos = (self.pos[0] + self.vel[0], self.pos[1] + self.vel[1])
self.rect = self.get_layer_image(0).get_rect(center=self.pos)
super(Rock, self).update()
class RockSpawner(IGameObject):
def __init__(self, group: str = "handler", game: Pygame = None):
super().__init__(group, game)
self.spawn_timing = 2000
self.last_spawn_time = self.game.time
def update(self, *args: Any, **kwargs: Any) -> None:
if self.last_spawn_time + self.spawn_timing <= self.game.time:
self.game.objects.add(f"rock_{randint(0, 999999)}", Rock(self.game))
self.last_spawn_time = self.game.time
return super().update(*args, **kwargs)
# MINIMAL RUNNING EXAMPLE #
# Main Game Engine Object #
class Game(Pygame):
def __init__(self):
super(Game, self).__init__(1200, 675, "Space Game", fps_target=60)
self.add_group("handler")
self.add_group("map")
self.add_group("player")
self.add_group("player_projectile")
self.add_group("env")
self.add_group("enemy")
self.add_group('ui')
self.fonts["8bit"] = self.get_font("8-BIT WONDER.ttf", 17)
# SETUP GAME AXIS CONTROLS (IControlModule().get_axis("move_left"))
self.axis = {'move_left': {py.K_a: -1, py.K_d: 1}, 'move_up': {py.K_w: -1, py.K_s: 1}}
py.event.set_grab(True)
py.mouse.set_pos((100, self.screen.get_height() / 2))
py.mouse.set_visible(False)
self.load_data()
self.start_game_loop()
def load_data(self):
super(Game, self).load_data() # Required to set the base dir of the game for easy access in objects without recalculating where to split the path (self.game_dir)
self.objects['map'] = LoopingMap(self, "data/images/background.png", [50, 0]) # Looping map uses the images size to calculate its looping. You may need to rescale your image to fit your game area (Window scaling will handle it after as long as it covers the initial screen). Supports vertical or horizontal but not both (Hopefully in future revisions)
self.objects["player"] = Player((0, self.screen.get_height() / 2), self) # Adds a GameObject to the ObjectHandler so that update and draw calls are triggered correctly
self.objects.add("rock_spawner", RockSpawner(game=self))
self.objects["ui_bg"] = UI_BG(self)
self.objects['ui_npc_box'] = UI_NPC_Box(self)
# self.objects['ui_npc_text'] = UI_NPC_Text(self)
def draw(self):
self.screen.fill(Color(1, 1, 1, 1).RGB) # self.screen.fill((255, 255, 255)). Color class is used mostly for storing colors to easily recall but may get more features later
super(Game, self).draw() # Required to call the draw function for registered objects
# UNCOMMENT FOR RECT DEBUGGING
if self.debug:
for group in self.groups.keys():
for sprite in self.groups[group]:
self.debug.debug_collision(sprite)
super(Game, self).render_display() # Required to call the render update of the display (py.display.flip())
def update(self):
super(Game, self).update()
for event in py.event.get():
# COMMENT TO REMOVE MANUAL WINDOW RESIZING SUPPORT
if event.type == py.VIDEORESIZE:
self.windowSize = (event.w, event.h)
py.display._resize_event(event)
if event.type == py.QUIT:
self.quit()
if event.type == py.KEYDOWN:
# UNCOMMENT TO TOGGLE DEBUGGING
if event.key == py.K_c: self.debug.set_debug(not self.debug._debug)
if event.key == py.K_p:
npc_ui = self.objects.get("ui_npc_box")
if npc_ui.npc == "":
npc_ui.get_module("animation").play("noise")
else:
self.objects["ui_npc_box"].get_module("animation").play("noise_reversed")
if event.key == py.K_ESCAPE:
self.quit()
if __name__ == '__main__':
g = Game()
| XCPika/Pygame-Extension-Framework | main.py | main.py | py | 8,838 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "engine.ui.text.TypeWriterText",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "engine.game_engine.Pygame",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "engine.game_objects.IGameObject",
"line_number": 21,
"usage_type": "name"
},
... |
225835019 | import streamlit as st
import calculator_logic
st.title("Calculator App")
num1 = st.number_input("Enter the first number:")
num2 = st.number_input("Enter the second number:")
operation = st.selectbox("Select an operation", calculator_logic.OPERATIONS)
if st.button("Calculate"):
result = calculator_logic.calculate(num1, num2, operation)
st.success(f"The result is {result}")
# Define a function to display the signature
def display_signature():
st.markdown(
"""
<style>
.signature {
font-size: 1rem;
font-style: italic;
text-align: center;
padding: 1rem 0;
color: #333;
transition: color 0.5s ease-in-out;
}
.signature:hover {
color: #007bff;
}
</style>
"""
, unsafe_allow_html=True
)
st.markdown(
"""
<div class="signature">
Made with ❤️ by Shib Kumar Saraf
</div>
"""
, unsafe_allow_html=True
)
# Add the signature to your Streamlit app
display_signature()
| shib1111111/basic_calculator | app.py | app.py | py | 1,113 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "streamlit.title",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "streamlit.number_input",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "streamlit.number_input",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "streamlit.... |
12866597010 | import sys
from collections import defaultdict
def tpsortutil(u, visited, stack, cur):
visited[u] = True
for i in graph[u]:
if not visited[i]:
tpsortutil(i, visited, stack, cur)
elif i in cur:
return
stack.append(u)
def topologicalsort(graph, vertices):
visited = [False] * vertices
stack = []
for i in range(vertices):
cur = set()
if not visited[i] and graph[i]:
tpsortutil(i, visited, stack, cur)
del cur
stack = stack[::-1]
print(stack)
if __name__ == "__main__":
vertices = int(input())
graph = defaultdict(list)
edges = int(input())
for _ in range(edges):
edge = [int(x) for x in input().split()]
graph[edge[0]].append(edge[1])
print(graph)
topologicalsort(graph, vertices) | tyao117/AlgorithmPractice | TopologicalSort/TopologicalSort.py | TopologicalSort.py | py | 825 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.defaultdict",
"line_number": 26,
"usage_type": "call"
}
] |
32787034238 | """
URL configuration for backend project.
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
from drf_yasg import openapi
from drf_yasg.views import get_schema_view
from rest_framework import permissions
schema_view = get_schema_view(
openapi.Info(
title="SoC Portal API",
default_version="v1",
description="Test description",
terms_of_service="https://www.google.com/policies/terms/",
contact=openapi.Contact(email="contact@wncc.local"),
license=openapi.License(name="BSD License"),
),
permission_classes=[],
public=True,
)
urlpatterns = [
path("admin/", admin.site.urls),
path("api/accounts/", include("accounts.urls")),
path("api/dashboard/", include("dashboard.urls")),
path("api/projects/", include("projects.urls")),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += [
path(
"swagger<format>/", schema_view.without_ui(cache_timeout=0), name="schema-json"
),
path(
"swagger/",
schema_view.with_ui("swagger", cache_timeout=0),
name="schema-swagger-ui",
),
path("redoc/", schema_view.with_ui("redoc", cache_timeout=0), name="schema-redoc"),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| wncc/SoC-Portal | backend/backend/urls.py | urls.py | py | 2,001 | python | en | code | 12 | github-code | 6 | [
{
"api_name": "drf_yasg.views.get_schema_view",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi.Info",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": ... |
33962354417 | import numpy as np
from sklearn.linear_model import LogisticRegression
from random import randrange
from math import ceil, floor
# Загрузка данных из текстового файла
data = np.genfromtxt('данные двумерная модель.txt', skip_header=1) # Пропустить первую строку с названиями столбцов
# Разделение данных на факторы (x) и зависимую переменную (y)
X = data[:, :-1] # Первые 6 столбцов
y = data[:, -1] # Последний столбец
# Создание и обучение модели множественной регрессии
model = LogisticRegression()
model.fit(X, y)
# Вывод коэффициентов регрессии
print("Коэффициенты регрессии:")
print("a1, a2:", [round(x, 3) for x in model.coef_[0]])
print("b (пересечение):", round(model.intercept_[0], 3))
X_min = [float('inf'), float('inf')]
X_max = [float('-inf'), float('-inf')]
for x in X:
for i in range(len(x)):
if x[i] < X_min[i]:
X_min[i] = x[i]
if x[i] > X_max[i]:
X_max[i] = x[i]
R_X = []
for i in range(len(X_min)):
R_X.append(randrange(ceil(X_min[i]), floor(X_max[i])))
print(R_X)
# Спрогнозировать новое значение y на основе заданных факторов (замените значения x_new)
x_new = np.array([R_X])
y_pred = model.predict(x_new)
print(f"Прогнозное значение y для новых данных: {y_pred[0]:.2f}")
print(f'{model.coef_[0][0]:.3f} {model.coef_[0][1]:.3f}, {model.intercept_[0]:.3f}') | IlnazMmm/RKA | 3 lab/prog2.py | prog2.py | py | 1,708 | python | ru | code | 0 | github-code | 6 | [
{
"api_name": "numpy.genfromtxt",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LogisticRegression",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "random.randrange",
"line_number": 33,
"usage_type": "call"
},
{
"api_name... |
33124682966 | import json
import os
import docx
with open(f'disciplinas.json') as f:
data = json.load(f)
# print(df.columns.values)
for index, discpln in data.items():
print(f'{discpln["sigla"]} - {discpln["nome"]}')
doc = docx.Document()
doc.add_heading(f'{discpln["sigla"]} - {discpln["nome"]}')
doc.add_heading(f'{discpln["nome_en"]}', level=3)
doc.add_paragraph()
# Dados gerais
p = doc.add_paragraph(style = 'List Bullet')
p.add_run(f'Créditos-aula: {discpln["CA"]}\n')
p.add_run(f'Créditos-trabalho: {discpln["CT"]}\n')
p.add_run(f'Carga horária: {discpln["CH"]}\n')
p.add_run(f'Ativação: {discpln["ativacao"]}\n')
p.add_run(f'Departamento: {discpln["departamento"]}\n')
# Cursos e semestres ideais
cs = f'Curso (semestre ideal):'
for curso, semestre in discpln["semestre"].items():
cs += f' {curso} ({semestre}),'
p.add_run(cs[:-1])
# Objetivos
doc.add_heading(f'Objetivos', level=2)
doc.add_paragraph(f'{discpln["objetivos"]}')
if discpln["abstract"]:
p = doc.add_paragraph()
p.add_run(f'{discpln["objectives"]}').italic = True
# Docentes
doc.add_heading(f'Docente(s) Responsável(eis) ', level=2)
profs = discpln["docentes"]
nprofs = discpln["ndoc"]
if nprofs:
p = doc.add_paragraph(style='List Bullet')
for i in range(nprofs-1):
p.add_run(f'{profs[i]}\n')
p.add_run(f'{profs[-1]}')
# programa resumido
doc.add_heading(f'Programa resumido', level=2)
doc.add_paragraph(f'{discpln["resumo"]}')
if discpln["abstract"]:
p = doc.add_paragraph()
p.add_run(f'{discpln["abstract"]}').italic = True
# programa
doc.add_heading(f'Programa', level=2)
doc.add_paragraph(f'{discpln["programa"]}')
if discpln["program"]:
p = doc.add_paragraph()
p.add_run(f'{discpln["program"]}').italic = True
# avaliação
doc.add_heading('Avaliação', level=2)
p = doc.add_paragraph( style='List Bullet')
p.add_run('Método: ').bold = True
p.add_run(f'{discpln["metodo"]}\n')
p.add_run('Critério: ').bold = True
p.add_run(f'{discpln["criterio"]}\n')
p.add_run('Norma de recuperação: ').bold = True
p.add_run(f'{discpln["exame"]}')
# bibliografia
doc.add_heading('Bibliografia', level=2)
doc.add_paragraph(f'{discpln["bibliografia"]}')
# Requisitos
nr = discpln['requisitos']
if nr:
doc.add_heading('Requisitos', level=2)
p = doc.add_paragraph(style='List Bullet')
for k, req in nr.items():
p.add_run(f"{req['sigla']} - {req['nome']} ({req['tipo']})\n")
# salvando
try:
os.mkdir(f'../assets/disciplinas/')
except FileExistsError:
pass
docname = f'../assets/disciplinas/{discpln["sigla"]}.docx'
doc.save(docname)
# exportando pdf
os.system(f'abiword --to=pdf {docname}')
# break
| luizeleno/pyjupiter | _python/gera-doc-pdf-unificado.py | gera-doc-pdf-unificado.py | py | 2,978 | python | es | code | 2 | github-code | 6 | [
{
"api_name": "json.load",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "docx.Document",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 94,
... |
5432720139 | from sklearn.metrics import pairwise_distances
import numpy as np
import pandas as pd
from scipy.sparse import spmatrix
from anndata import AnnData
from scipy.stats import rankdata
from typing import Optional
from . import logger
from .symbols import NOVEL, REMAIN, UNASSIGN
class Distance():
"""
Class that deals with the cross-dataset cell-by-cell-type distance matrix.
Parameters
----------
dist_mat
Cell-by-cell-type distance matrix.
cell
Cell meta-information including at least `'dataset'`, `'ID'` and `'cell_type'`.
cell_type
Cell type meta-information including at least `'dataset'` and `'cell_type'`.
Attributes
----------
dist_mat
A cell-by-cell-type distance matrix.
cell
Cell meta-information including `'dataset'`, `'ID'` and `'cell_type'`.
cell_type
Cell type meta-information including `'dataset'` and `'cell_type'`.
n_cell
Number of cells involved.
n_cell_type
Number of cell types involved.
shape
Tuple of number of cells and cell types.
assignment
Assignment of each cell to the most similar cell type in each dataset (obtained through the `assign` method).
"""
def __init__(self, dist_mat: np.ndarray, cell: pd.DataFrame, cell_type: pd.DataFrame):
self.dist_mat = dist_mat
if cell.shape[0] != self.dist_mat.shape[0]:
raise ValueError(
f"🛑 Number of cells in `cell` does not match the cell number in `dist_mat`")
if cell_type.shape[0] != self.dist_mat.shape[1]:
raise ValueError(
f"🛑 Number of cell types in `cell_type` does not match the cell type number in `dist_mat`")
if not {'dataset', 'ID', 'cell_type'}.issubset(set(cell.columns)):
raise KeyError(
f"🛑 Please include `'dataset'`, `'ID'` and `'cell_type'` as the cell meta-information")
if not {'dataset', 'cell_type'}.issubset(set(cell_type.columns)):
raise KeyError(
f"🛑 Please include `'dataset'` and `'cell_type'` as the cell type meta-information")
self.cell = cell
self.cell_type = cell_type
@property
def n_cell(self) -> int:
"""Number of cells."""
return self.dist_mat.shape[0]
@property
def n_cell_type(self) -> int:
"""Number of cell types."""
return self.dist_mat.shape[1]
@property
def shape(self) -> tuple:
"""Numbers of cells and cell types."""
return self.dist_mat.shape
def __repr__(self):
lend = len(np.unique(self.cell_type.dataset))
if lend > 1:
base = f"Cross-dataset distance matrix between {self.n_cell} cells and {self.n_cell_type} cell types from {lend} datasets"
else:
base = f"Distance matrix between {self.n_cell} cells and {self.n_cell_type} cell types"
base += f"\n dist_mat: distance matrix between {self.n_cell} cells and {self.n_cell_type} cell types"
base += f"\n cell: cell meta-information ({str(list(self.cell.columns))[1:-1]})"
base += f"\n cell_type: cell type meta-information ({str(list(self.cell_type.columns))[1:-1]})"
if hasattr(self, 'assignment'):
base += f"\n assignment: data frame of cross-dataset cell type assignment"
return base
@staticmethod
def from_adata(adata: AnnData, dataset: str, cell_type: str, use_rep: Optional[str] = None, metric: Optional[str] = None, n_jobs: Optional[int] = None, check_params: bool = True, **kwargs):
"""
Generate a :class:`~cellhint.distance.Distance` object from the :class:`~anndata.AnnData` given.
Parameters
----------
adata
An :class:`~anndata.AnnData` object containing different datasets/batches and cell types.
In most scenarios, the format of the expression `.X` in the AnnData is flexible (normalized, log-normalized, z-scaled, etc.).
However, when `use_rep` is specified as `'X'` (or `X_pca` is not detected in `.obsm` and no other latent representations are provided), `.X` should be log-normalized (to a constant total count per cell).
dataset
Column name (key) of cell metadata specifying dataset information.
cell_type
Column name (key) of cell metadata specifying cell type information.
use_rep
Representation used to calculate distances. This can be `'X'` or any representations stored in `.obsm`.
Default to the PCA coordinates if present (if not, use the expression matrix `X`).
metric
Metric to calculate the distance between each cell and each cell type. Can be `'euclidean'`, `'cosine'`, `'manhattan'` or any metrics applicable to :func:`sklearn.metrics.pairwise_distances`.
Default to `'euclidean'` if latent representations are used for calculating distances, and to `'correlation'` if the expression matrix is used.
n_jobs
Number of CPUs used. Default to one CPU. `-1` means all CPUs are used.
check_params
Whether to check (or set the default) for `dataset`, `cell_type`, `use_rep` and `metric`.
(Default: `True`)
**kwargs
Other keyword arguments passed to :func:`sklearn.metrics.pairwise_distances`.
Returns
----------
:class:`~cellhint.distance.Distance`
A :class:`~cellhint.distance.Distance` object representing the cross-dataset cell-by-cell-type distance matrix.
"""
#Use `check_params = False` if `dataset`, `cell_type`, `use_rep` and `metric` are already provided correctly.
if check_params:
if dataset not in adata.obs:
raise KeyError(
f"🛑 '{dataset}' is not found in the provided AnnData")
if cell_type not in adata.obs:
raise KeyError(
f"🛑 '{cell_type}' is not found in the provided AnnData")
if use_rep is None:
if 'X_pca' in adata.obsm.keys():
logger.info(f"👀 Detected PCA coordinates in the object, will use these to calculate distances")
use_rep = 'X_pca'
else:
logger.info(f"🧙 Using the expression matrix to calculate distances")
use_rep = 'X'
elif (use_rep not in adata.obsm.keys()) and (use_rep != 'X'):
raise KeyError(
f"🛑 '{use_rep}' is not found in `.obsm`")
if use_rep == 'X' and adata.n_vars > 15000:
logger.warn(f"⚠️ Warning: {adata.n_vars} features are used for calculating distances. Subsetting the AnnData into HVGs is recommended")
if metric is None:
metric = 'correlation' if use_rep == 'X' else 'euclidean'
Cell_X = adata.X if use_rep == 'X' else adata.obsm[use_rep]
IDs = adata.obs_names
datasets = adata.obs[dataset].astype(str).values
celltypes = adata.obs[cell_type].astype(str).values
use_Cell_X = Cell_X if use_rep != 'X' else np.expm1(Cell_X)
Celltype_X = []
col_ds = []
col_cs =[]
for d in np.unique(datasets):
for c in np.unique(celltypes[datasets == d]):
col_cs.append(c)
col_ds.append(d)
m = use_Cell_X[(datasets == d) & (celltypes == c), :].mean(axis = 0)
Celltype_X.append(m.A1 if isinstance(m, np.matrix) else m)
Celltype_X = np.log1p(np.array(Celltype_X)) if use_rep == 'X' else np.array(Celltype_X)
if metric not in ['cityblock', 'cosine', 'euclidean', 'l1', 'l2', 'manhattan']:
if isinstance(Cell_X, spmatrix):
Cell_X = Cell_X.toarray()
if isinstance(Celltype_X, spmatrix):
Celltype_X = Celltype_X.toarray()
dist_mat = pairwise_distances(Cell_X, Celltype_X, metric = metric, n_jobs = n_jobs, **kwargs)
cell = pd.DataFrame(dict(dataset=datasets, ID=IDs, cell_type=celltypes))
cell_type = pd.DataFrame(dict(dataset=col_ds, cell_type=col_cs))
return Distance(dist_mat, cell, cell_type)
def normalize(self, Gaussian_kernel: bool = False, rank: bool = True, normalize: bool = True) -> None:
"""
Normalize the distance matrix with a Gaussian kernel.
Parameters
----------
Gaussian_kernel
Whether to apply the Gaussian kernel to the distance matrix.
(Default: `False`)
rank
Whether to turn the matrix into a rank matrx.
(Default: `True`)
normalize
Whether to maximum-normalize the distance matrix.
(Default: `True`)
Returns
----------
None
The :class:`~cellhint.distance.Distance` object modified with a normalized distance matrix.
"""
if Gaussian_kernel:
sds = np.sqrt((self.dist_mat ** 2).sum(axis = 1) / self.n_cell_type)[:, np.newaxis]
self.dist_mat = np.exp(- self.dist_mat / (2 / sds)**2)
self.dist_mat = 1 - self.dist_mat / self.dist_mat.sum(axis = 1)[:, np.newaxis]
if rank:
self.dist_mat = rankdata(self.dist_mat).reshape(self.dist_mat.shape)
if normalize:
self.dist_mat = self.dist_mat / self.dist_mat.max()
def concatenate(self, *distances, by: str = 'cell', check: bool = False):
"""
Concatenate by either cells (rows) or cell types (columns).
Parameters
----------
distances
A :class:`~cellhint.distance.Distance` object or a list of such objects.
by
The direction of concatenation, joining either cells (`'cell'`, rows) or cell types (`'cell_type'`, columns).
(Default: `'cell'`)
check
Check whether the concatenation is feasible.
(Default: `False`)
Returns
----------
:class:`~cellhint.distance.Distance`
A :class:`~cellhint.distance.Distance` object concatenated along cells (`by = 'cell'`) or cell types (`by = 'cell_type'`).
"""
distances = distances[0] if isinstance(distances[0], (list, tuple, set)) else distances
distances = tuple(distances)
all_distances = (self,) + distances
if by not in ['cell', 'cell_type']:
raise ValueError(
f"🛑 Unrecognized `by` value, should be one of `'cell'` or `'cell_type'`")
if check:
series_compare = [(x.cell_type.dataset+x.cell_type.cell_type).sort_values() for x in all_distances] if by == 'cell' else [(x.cell.dataset+x.cell.ID).sort_values() for x in all_distances]
if pd.concat(series_compare, axis = 1).T.drop_duplicates().shape[0] > 1:
raise Exception(
f"🛑 Concatenation is not feasible. Please ensure the meta-information is matched")
if by == 'cell':
dist_mat = np.concatenate([x.dist_mat for x in all_distances], axis = 0)
cell = pd.concat([x.cell for x in all_distances], axis = 0, ignore_index = True)
return Distance(dist_mat, cell, self.cell_type)
else:
match_base = (self.cell.dataset+self.cell.ID).reset_index().set_index(0)
indices = [np.argsort(match_base.loc[x.cell.dataset+x.cell.ID, 'index'].values) for x in distances]
dist_mat = np.concatenate([self.dist_mat] + [x.dist_mat[y, :] for x,y in zip(distances, indices)], axis = 1)
cell_type = pd.concat([x.cell_type for x in all_distances], axis = 0, ignore_index = True)
return Distance(dist_mat, self.cell, cell_type)
def symmetric(self) -> bool:
"""
Check whether the distance matrix is symmetric in terms of datasets and cell types.
Returns
----------
bool
`True` or `False` indicating whether all datasets and cell types are included in the object (thus symmetric).
"""
return np.array_equal(np.unique(self.cell.dataset + self.cell.cell_type), np.unique(self.cell_type.dataset + self.cell_type.cell_type))
def filter_cells(self, check_symmetry: bool = True) -> None:
"""
Filter out cells whose gene expression profiles do not correlate most with the eigen cell they belong to (i.e., correlate most with other cell types).
Parameters
----------
check_symmetry
Whether to check the symmetry of the distance matrix in terms of datasets and cell types.
(Default: `True`)
Returns
----------
None
A :class:`~cellhint.distance.Distance` object with undesirable cells filtered out.
"""
if check_symmetry and not self.symmetric():
raise ValueError(
f"🛑 Cell filtering is not possible. Please provide the matrix with symmetric datasets and cell types")
bool_cell = np.ones(self.n_cell, dtype=bool)
for i, s in self.cell.iterrows():
flag_dataset = self.cell_type.dataset == s['dataset']
if self.cell_type.cell_type.values[flag_dataset][self.dist_mat[i][flag_dataset].argmin()] != s['cell_type']:
bool_cell[i] = False
if (~bool_cell).sum() == 0:
logger.info(f"✂️ No cells are filtered out")
else:
ds_unique, ds_table = np.unique(self.cell.dataset.values[~bool_cell], return_counts = True)
if len(ds_unique) == 1:
logger.info(f"✂️ {(~bool_cell).sum()} cells are filtered out from {ds_unique[0]}")
else:
logger.info(f"✂️ {(~bool_cell).sum()} cells are filtered out, including:")
for m, n in zip(ds_unique, ds_table):
logger.info(f" {n} cells from {m}")
self.dist_mat = self.dist_mat[bool_cell]
self.cell = self.cell[bool_cell]
all_combine = (self.cell_type.dataset + ': ' + self.cell_type.cell_type).values
left_combine = np.unique(self.cell.dataset + ': ' + self.cell.cell_type)
if len(left_combine) < len(all_combine):
column_keep = np.isin(all_combine, left_combine)
self.dist_mat = self.dist_mat[:, column_keep]
self.cell_type = self.cell_type[column_keep]
logger.info(f"✂️ The following cell types are discarded due to low confidence in annotation:")
for rec in all_combine[~column_keep]:
logger.info(f" {rec}")
def to_meta(self, check_symmetry: bool = True, turn_binary: bool = False, return_symmetry: bool = True) -> pd.DataFrame:
"""
Meta-analysis of cross-dataset cell type dissimilarity or membership.
Parameters
----------
check_symmetry
Whether to check the symmetry of the distance matrix in terms of datasets and cell types.
(Default: `True`)
turn_binary
Whether to turn the distance matrix into a cell type membership matrix before meta analysis.
(Default: `False`)
return_symmetry
Whether to return a symmetric dissimilarity matrix by averaging with its transposed form.
(Default: `True`)
Returns
----------
:class:`~pandas.DataFrame`
A :class:`~pandas.DataFrame` object representing the cell-type-level dissimilarity matrix (`turn_binary = False`) or membership matrix (`turn_binary = True`).
"""
if check_symmetry and not self.symmetric():
raise ValueError(
f"🛑 Meta cell analysis is not possible. Concatenate all datasets and cell types beforehand using `concatenate`")
use_mat = self.to_binary(False if check_symmetry else True).dist_mat if turn_binary else self.dist_mat
meta_cell = []
for _, s in self.cell_type.iterrows():
meta_cell.append(use_mat[(self.cell.dataset == s['dataset']) & (self.cell.cell_type == s['cell_type']), :].mean(axis = 0))
meta_cell = pd.DataFrame(np.array(meta_cell))
meta_cell.index = (self.cell_type.dataset + ': ' + self.cell_type.cell_type).values
meta_cell.columns = meta_cell.index
return (meta_cell + meta_cell.T)/2 if return_symmetry else meta_cell
def to_binary(self, check_symmetry: bool = True):
"""
Turn the distance matrix into a binary matrix representing the estimated cell type membership across datasets.
Parameters
----------
check_symmetry
Whether to check the symmetry of the distance matrix in terms of datasets and cell types.
(Default: `True`)
Returns
----------
:class:`~cellhint.distance.Distance`
A :class:`~cellhint.distance.Distance` object representing the estimated cell type membership across datasets.
"""
if check_symmetry and not self.symmetric():
raise ValueError(
f"🛑 Cannot convert to a binary matrix. Please provide the matrix with symmetric datasets and cell types")
member_mat = np.zeros(self.shape, dtype = int)
datasets = self.cell_type.dataset.values
for dataset in np.unique(datasets):
indices = np.where(datasets == dataset)[0]
member_mat[range(member_mat.shape[0]), indices[self.dist_mat[:, indices].argmin(axis = 1)]] = 1
return Distance(member_mat, self.cell, self.cell_type)
def assign(self) -> None:
"""
Assign each cell to its most similar cell type in each dataset.
Returns
----------
None
Modified object with the result of cell assignment added as `.assignment`.
"""
assignment = {}
for dataset in np.unique(self.cell_type.dataset):
flag = self.cell_type.dataset == dataset
assignment[dataset] = self.cell_type.cell_type.values[flag][self.dist_mat[:, flag].argmin(axis = 1)]
assignment = pd.DataFrame(assignment, index = self.cell.index)
#no need to assign cells for the dataset they belong to
for dataset in assignment.columns:
flag = self.cell.dataset == dataset
assignment.loc[flag, dataset] = self.cell.cell_type.values[flag]
self.assignment = assignment
def to_confusion(self, D1: str, D2: str, check: bool = True) -> tuple:
"""
This function is deprecated. Use `to_pairwise_confusion` and `to_multi_confusion` instead.
Extract the dataset1-by-dataset2 and dataset2-by-dataset1 confusion matrices. Note this function is expected to be applied to a binary membership matrix.
Parameters
----------
D1
Name of the first dataset.
D2
Name of the second dataset.
check
Whether to check names of the two datasets are contained.
(Default: `True`)
Returns
----------
tuple
The dataset1-by-dataset2 and dataset2-by-dataset1 confusion matrices.
"""
if check and not {D1, D2}.issubset(np.unique(self.cell_type.dataset)):
raise ValueError(
f"🛑 Please provide correct dataset names")
D1_col_flag = self.cell_type.dataset == D1
D2_col_flag = self.cell_type.dataset == D2
D1_celltypes = self.cell_type.cell_type.values[D1_col_flag]
D2_celltypes = self.cell_type.cell_type.values[D2_col_flag]
D1_row_flag = self.cell.dataset == D1
D2_row_flag = self.cell.dataset == D2
D1byD2 = pd.DataFrame(np.array([self.dist_mat[D1_row_flag & (self.cell.cell_type == x)][:, D2_col_flag].sum(axis=0) for x in D1_celltypes]), columns = D2_celltypes, index = D1_celltypes)
D2byD1 = pd.DataFrame(np.array([self.dist_mat[D2_row_flag & (self.cell.cell_type == x)][:, D1_col_flag].sum(axis=0) for x in D2_celltypes]), columns = D1_celltypes, index = D2_celltypes)
return D1byD2, D2byD1
def to_pairwise_confusion(self, D1: str, D2: str, check: bool = True) -> tuple:
"""
Extract the dataset1-by-dataset2 and dataset2-by-dataset1 confusion matrices.
Parameters
----------
D1
Name of the first dataset.
D2
Name of the second dataset.
check
Whether to check names of the two datasets are contained.
(Default: `True`)
Returns
----------
tuple
The dataset1-by-dataset2 and dataset2-by-dataset1 confusion matrices.
"""
if check and not {D1, D2}.issubset(np.unique(self.cell_type.dataset)):
raise ValueError(
f"🛑 Please provide correct dataset names")
if not hasattr(self, 'assignment'):
raise AttributeError(
f"🛑 No `.assignment` attribute in the object. Use the `.assign` method first")
D1_flag = (self.cell.dataset == D1)
D2_flag = (self.cell.dataset == D2)
D1byD2 = pd.crosstab(self.cell.cell_type[D1_flag], self.assignment.loc[D1_flag, D2])
D2byD1 = pd.crosstab(self.cell.cell_type[D2_flag], self.assignment.loc[D2_flag, D1])
D1byD2_lack_columns = D2byD1.index.difference(D1byD2.columns)
if len(D1byD2_lack_columns) > 0:
D1byD2 = D1byD2.join(pd.DataFrame(np.zeros((len(D1byD2.index), len(D1byD2_lack_columns)), dtype=int), index = D1byD2.index, columns = D1byD2_lack_columns))
D2byD1_lack_columns = D1byD2.index.difference(D2byD1.columns)
if len(D2byD1_lack_columns) > 0:
D2byD1 = D2byD1.join(pd.DataFrame(np.zeros((len(D2byD1.index), len(D2byD1_lack_columns)), dtype=int), index = D2byD1.index, columns = D2byD1_lack_columns))
return D1byD2, D2byD1.loc[D1byD2.columns, D1byD2.index]
def to_multi_confusion(self, relation: pd.DataFrame, D: str, check: bool = True) -> tuple:
"""
Extract the confusion matrices between meta-cell-types defined prior and cell types from a new dataset.
Parameters
----------
relation
A :class:`~pandas.DataFrame` object representing the cell type harmonization result across multiple datasets.
D
Name of the new dataset to be aligned.
check
Whether to check names of the datasets are contained.
(Default: `True`)
Returns
----------
tuple
The confusion matrices between meta-cell-types defined prior and cell types from a new dataset.
"""
datasets = relation.columns[0::2]
if check:
if not set(datasets).issubset(np.unique(self.cell_type.dataset)):
raise ValueError(
f"🛑 `relation` contains unexpected dataset names")
if D not in np.unique(self.cell_type.dataset) or D in datasets:
raise ValueError(
f"🛑 Please provide a valid dataset name `D`")
if not hasattr(self, 'assignment'):
raise AttributeError(
f"🛑 No `.assignment` attribute in the object. Use the `.assign` method first")
#D1byD2
D1_flag = self.cell.dataset.isin(datasets)
D1_assign = self.assignment[D1_flag]
D1_truth = np.full(D1_assign.shape[0], UNASSIGN, dtype = object)
for _, s in relation.iterrows():
celltypes = s.values[0::2]
non_blank_flag = ~np.isin(celltypes, [NOVEL, REMAIN])
existing_datasets = datasets[non_blank_flag]
existing_celltypes = celltypes[non_blank_flag]
flag = np.all(D1_assign[existing_datasets] == existing_celltypes, axis = 1).values & self.cell[D1_flag].dataset.isin(existing_datasets).values
D1_truth[flag] = ' '.join(s.values)
D1_used = D1_truth != UNASSIGN
D1byD2 = pd.crosstab(D1_truth[D1_used], D1_assign.loc[D1_used, D])
#D2byD1
D2_flag = self.cell.dataset == D
D2_assign = self.assignment[D2_flag]
D2_predict = np.full(D2_assign.shape[0], UNASSIGN, dtype = object)
for _, s in relation.iterrows():
celltypes = s.values[0::2]
flags = (D2_assign[datasets] == celltypes) | np.isin(celltypes, [NOVEL, REMAIN])
D2_predict[np.all(flags, axis = 1).values] = ' '.join(s.values)
D2_used = D2_predict != UNASSIGN
D2byD1 = pd.crosstab(self.cell.cell_type[D2_flag][D2_used], D2_predict[D2_used])
#warning
if relation.shape[0] > D1byD2.shape[0]:
lost_celltypes = np.setdiff1d(relation.apply(lambda row: ' '.join(row.values), axis = 1).values, D1byD2.index)
logger.warn(f"⚠️ Warning: no cells are found to match these patterns: {set(lost_celltypes)}. Double check the harmonized relationships before integrating '{D}'")
D1byD2 = pd.concat([D1byD2, pd.DataFrame(np.zeros((len(lost_celltypes), len(D1byD2.columns)), dtype=int), index = lost_celltypes, columns = D1byD2.columns)], axis = 0)
#a unique cell type in D2 may be annotated to nothing and filtered
lost_celltypes = np.setdiff1d(np.unique(self.cell.cell_type[D2_flag]), D2byD1.index)
if len(lost_celltypes) > 0:
D2byD1 = pd.concat([D2byD1, pd.DataFrame(np.zeros((len(lost_celltypes), len(D2byD1.columns)), dtype=int), index = lost_celltypes, columns = D2byD1.columns)], axis = 0)
#return
D1byD2_lack_columns = D2byD1.index.difference(D1byD2.columns)
if len(D1byD2_lack_columns) > 0:
D1byD2 = D1byD2.join(pd.DataFrame(np.zeros((len(D1byD2.index), len(D1byD2_lack_columns)), dtype=int), index = D1byD2.index, columns = D1byD2_lack_columns))
D2byD1_lack_columns = D1byD2.index.difference(D2byD1.columns)
if len(D2byD1_lack_columns) > 0:
D2byD1 = D2byD1.join(pd.DataFrame(np.zeros((len(D2byD1.index), len(D2byD1_lack_columns)), dtype=int), index = D2byD1.index, columns = D2byD1_lack_columns))
return D1byD2, D2byD1.loc[D1byD2.columns, D1byD2.index]
| Teichlab/cellhint | cellhint/distance.py | distance.py | py | 26,272 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "numpy.ndarray",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "numpy.unique",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "anndata.AnnData... |
25971386553 | """
.. testsetup:: *
from zasim.cagen.utils import *
"""
# This file is part of zasim. zasim is licensed under the BSD 3-clause license.
# See LICENSE.txt for details.
from ..features import HAVE_TUPLE_ARRAY_INDEX
from itertools import product
import numpy as np
if HAVE_TUPLE_ARRAY_INDEX:
def offset_pos(pos, offset):
"""Offset a position by an offset. Any amount of dimensions should work.
>>> offset_pos((1, ), (5, ))
(6,)
>>> offset_pos((1, 2, 3), (9, 8, 7))
(10, 10, 10)"""
if len(pos) == 1:
return (pos[0] + offset[0],)
else:
return tuple([a + b for a, b in zip(pos, offset)])
else:
def offset_pos(pos, offset):
"""Offset a position by an offset. Only works for 1d."""
if isinstance(pos, tuple):
pos = pos[0]
if isinstance(offset, tuple):
offset = offset[0]
return pos + offset
def gen_offset_pos(pos, offset):
"""Generate code to offset a position by an offset.
>>> gen_offset_pos(["i", "j"], ["foo", "bar"])
['i + foo', 'j + bar']"""
return ["%s + %s" % (a, b) for a, b in zip(pos, offset)]
def dedent_python_code(code):
'''
Dedent a bit of python code, like this:
>>> print dedent_python_code("""# update the histogram
... if result != center:
... self.target.histogram[result] += 1""")
# update the histogram
if result != center:
self.target.histogram[result] += 1
'''
lines = code.split("\n")
resultlines = [lines[0]] # the first line shall never have any whitespace.
if len(lines) > 1:
common_whitespace = len(lines[1]) - len(lines[1].lstrip())
if common_whitespace > 0:
for line in lines[1:]:
white, text = line[:common_whitespace], line[common_whitespace:]
assert line == "" or white.isspace()
resultlines.append(text)
else:
resultlines.extend(lines[1:])
return "\n".join(resultlines)
def rule_nr_to_multidim_rule_arr(number, digits, base=2):
"""Given the rule `number`, the number of cells the neighbourhood has
(as `digits`) and the `base` of the cells, this function calculates the
multidimensional rule table for computing that rule."""
if base < 256: dtype = "int8"
else: dtype = "int16" # good luck with that.
res = np.zeros((base,) * digits, dtype=dtype)
entries = base ** digits
blubb = base ** entries
for position in product(*([xrange(base-1, -1, -1)] * digits)):
blubb /= base
d = int(number // (blubb))
number -= d * (blubb)
res[position] = d
return res
def rule_nr_to_rule_arr(number, digits, base=2):
"""Given a rule `number`, the number of cells the neighbourhood has
(as `digits`) and the `base` of the cells, this function calculates the
lookup array for computing that rule.
>>> rule_nr_to_rule_arr(110, 3)
[0, 1, 1, 1, 0, 1, 1, 0]
>>> rule_nr_to_rule_arr(26, 3, 3)
[2, 2, 2, ...]
"""
entries = base ** digits
result = [0 for index in range(entries)]
blubb = base ** entries
for e in range(entries - 1, -1, -1):
blubb /= base
d = int(number // (blubb))
number -= d * (blubb)
result[e] = d
return result
def elementary_digits_and_values(neighbourhood, base=2, rule_arr=None):
"""From a neighbourhood, the base of the values used and the array that
holds the results for each combination of neighbourhood values, create a
list of dictionaries with the neighbourhood values paired with their
result_value ordered by the position like in the rule array.
If the rule_arr is None, no result_value field will be generated."""
digits_and_values = []
offsets = neighbourhood.offsets
names = neighbourhood.names
digits = len(offsets)
for i in range(base ** digits):
values = rule_nr_to_rule_arr(i, digits, base)
asdict = dict(zip(names, values))
digits_and_values.append(asdict)
if rule_arr is not None:
if not isinstance(rule_arr, np.ndarray) or len(rule_arr.shape) == 1:
indices = enumerate(xrange(base ** digits))
else:
indices = enumerate(reversed(list(product(*([xrange(base-1,-1,-1)] * digits)))))
for index, rule_idx in indices:
digits_and_values[index].update(result_value = rule_arr[rule_idx])
return digits_and_values
| timo/zasim | zasim/cagen/utils.py | utils.py | py | 4,490 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "features.HAVE_TUPLE_ARRAY_INDEX",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "itertools.product",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "numpy.n... |
71645960508 | from django.urls import path
from . import views
app_name = 'blog'
urlpatterns = [
path('', views.index, name="index"),
path('detalhes/<int:pk>/<slug:slug>', views.detail, name="details"),
path('post/novo/', views.post, name="new_post"),
path('editar/post/<int:pk>', views.edit, name="edit"),
path('filter/<int:pk>/<str:username>', views.filter, name="filter"),
path('search/', views.search, name="search"),
path('delete/<int:pk>', views.delete, name="delete_post"),
path('contato/', views.contato, name="contato"),
]
| eduardoferreira97/Blog | blog/urls.py | urls.py | py | 553 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
25847181178 | import tkinter as tk
import sqlite3
def guardar_palabras():
palabras = [entrada1.get(), entrada2.get(), entrada3.get(), entrada4.get(), entrada5.get()]
# Conexión a la base de datos
conexion = sqlite3.connect('basedatos.db')
cursor = conexion.cursor()
# Crear la tabla "palabras" si no existe
cursor.execute('''CREATE TABLE IF NOT EXISTS palabras
(id INTEGER PRIMARY KEY AUTOINCREMENT,
palabra TEXT)''')
# Eliminar las palabras anteriores en la tabla
cursor.execute("DELETE FROM palabras")
# Insertar las últimas 5 palabras en la tabla "palabras"
for palabra in palabras:
cursor.execute("INSERT INTO palabras (palabra) VALUES (?)", (palabra,))
# Guardar cambios y cerrar conexión
conexion.commit()
conexion.close()
ventana.destroy()
ventana = tk.Tk()
ventana.title("5 Palabras")
frase_inicio = "Si fueras 5 palabras, cuáles serías?:"
etiqueta_frase = tk.Label(ventana, text=frase_inicio)
etiqueta_frase.pack()
entrada1 = tk.Entry(ventana)
entrada1.pack()
entrada2 = tk.Entry(ventana)
entrada2.pack()
entrada3 = tk.Entry(ventana)
entrada3.pack()
entrada4 = tk.Entry(ventana)
entrada4.pack()
entrada5 = tk.Entry(ventana)
entrada5.pack()
boton = tk.Button(ventana, text="Aceptar", command=guardar_palabras)
boton.pack()
ventana.mainloop()
def mostrar_ventana1():
ventana1 = tk.Toplevel()
ventana1.title("Tus palabras")
etiqueta1 = tk.Label(ventana1, text="Estas son tus palabras:")
etiqueta1.pack()
# Conexión a la base de datos
conexion = sqlite3.connect('basedatos.db')
cursor = conexion.cursor()
# Consulta para recuperar las palabras
cursor.execute("SELECT palabra FROM palabras")
palabras = cursor.fetchall()
for palabra in palabras:
etiqueta = tk.Label(ventana1, text=palabra[0])
etiqueta.pack()
# Cerrar conexión
conexion.close()
def mostrar_ventana2():
ventana2 = tk.Toplevel()
ventana2.title("Palabras nuevas")
frase_inicio = "Si fueras 5 palabras, cuáles serías?:"
etiqueta_frase = tk.Label(ventana2, text=frase_inicio)
etiqueta_frase.pack()
entrada1 = tk.Entry(ventana2)
entrada1.pack()
entrada2 = tk.Entry(ventana2)
entrada2.pack()
entrada3 = tk.Entry(ventana2)
entrada3.pack()
entrada4 = tk.Entry(ventana2)
entrada4.pack()
entrada5 = tk.Entry(ventana2)
entrada5.pack()
def guardar_palabras_nuevas():
palabras = [entrada1.get(), entrada2.get(), entrada3.get(), entrada4.get(), entrada5.get()]
# Conexión a la base de datos
conexion = sqlite3.connect('basedatos.db')
cursor = conexion.cursor()
# Crear la tabla "palabras" si no existe
cursor.execute('''CREATE TABLE IF NOT EXISTS palabras
(id INTEGER PRIMARY KEY AUTOINCREMENT,
palabra TEXT)''')
# Eliminar las palabras anteriores en la tabla
cursor.execute("DELETE FROM palabras")
# Insertar las últimas 5 palabras en la tabla "palabras"
for palabra in palabras:
cursor.execute("INSERT INTO palabras (palabra) VALUES (?)", (palabra,))
# Guardar cambios y cerrar conexión
conexion.commit()
conexion.close()
boton = tk.Button(ventana2, text="Aceptar", command=guardar_palabras_nuevas)
boton.pack()
ventana_principal = tk.Tk()
ventana_principal.title("Ventana Principal")
boton_ventana1 = tk.Button(ventana_principal, text="Tus palabras", command=mostrar_ventana1)
boton_ventana1.pack()
boton_ventana2 = tk.Button(ventana_principal, text="Palabras nuevas", command=mostrar_ventana2)
boton_ventana2.pack()
ventana_principal.mainloop() | AlejandroAntonPineda/ArtPersonality | base_datos.py | base_datos.py | py | 3,754 | python | es | code | 0 | github-code | 6 | [
{
"api_name": "sqlite3.connect",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "tkinter.Entry",
"line_num... |
42937022866 | import datetime
import sqlite3
import os
import sys
from PyQt6.QtWidgets import *
from PyQt6.QtCore import Qt
from docxtpl import DocxTemplate
class mailbackGenWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("Test Mailback Letter Generator")
self.setFixedSize(722, 479)
main_layout = QVBoxLayout()
self.db = sqlite3.connect("test_mailback.db")
self.cur = self.db.cursor()
client_label = QLabel("Select Client: ")
self.client_select = QComboBox()
self.populateClientSelect()
def setAndGet():
self.getDefaultAddress()
self.setDefaultAddress()
self.client_select.currentIndexChanged.connect(setAndGet)
reason_label = QLabel("Select All Reasons for Return ")
self.reason_select = QFrame()
self.reason_layout = QGridLayout()
self.reasonCheckBoxList = []
self.address1 = QLineEdit()
self.address1.setFixedWidth(200)
self.address2 = QLineEdit()
self.address2.setFixedWidth(200)
self.address3 = QLineEdit()
self.address3.setFixedWidth(200)
self.clear_address_button = QPushButton("Clear Address")
self.clear_address_button.clicked.connect(self.clearAddress)
self.default_address_button = QPushButton("Default")
self.default_address_button.clicked.connect(self.setDefaultAddress)
self.populateReasonLayout()
self.reason_select.setLayout(self.reason_layout)
self.reason_error = QLabel("Please select at least one reason.")
self.reason_error.setStyleSheet("color: red")
self.reason_error.hide()
self.envelope_button = QPushButton("Generate Envelope")
self.envelope_button.clicked.connect(self.printEnvelope)
self.large_envelope_button = QPushButton("Large Envelope Sheet")
self.large_envelope_button.clicked.connect(self.printLargeEnvelope)
self.submit_button = QPushButton("Generate Letter")
self.submit_button.clicked.connect(self.generateLetter)
widgets = [client_label, self.client_select, reason_label, self.reason_select, self.reason_error,
self.submit_button, self.envelope_button, self.large_envelope_button]
for w in widgets:
main_layout.addWidget(w)
widget = QWidget()
widget.setLayout(main_layout)
# Set the central widget of the Window. Widget will expand
# to take up all the space in the window by default.
self.setCentralWidget(widget)
self.template = DocxTemplate("test_mailback_template.docx")
self.envelope = DocxTemplate("mailout.docx")
self.big_envelope = DocxTemplate("large envelope template.docx")
self.current_date = datetime.date.today().strftime('%m/%d/%Y')
self.currentClient = ""
self.currentAddr1 = ""
self.currentAddr2 = ""
self.currentPhoneNumber = ""
self.getDefaultAddress()
self.setDefaultAddress()
def populateClientSelect(self):
tups = self.cur.execute("""SELECT query_name FROM client
ORDER BY query_name ASC;""")
clients = [name for t in tups for name in t]
self.client_select.addItems(clients)
def getDefaultAddress(self):
client_name = self.client_select.currentText()
client_row = self.cur.execute("""SELECT full_name, address, phone_number
FROM client
WHERE query_name = ?""", (client_name,))
self.currentClient, full_addr, self.currentPhoneNumber = [c for t in client_row for c in t]
self.currentAddr1, self.currentAddr2 = full_addr.split('*')
def setDefaultAddress(self):
self.address1.setText(self.currentClient)
self.address2.setText(self.currentAddr1)
self.address3.setText(self.currentAddr2)
def clearAddress(self):
self.address1.clear()
self.address2.clear()
self.address3.clear()
def populateReasonLayout(self):
reasonTypes = self.cur.execute("""SELECT DISTINCT type FROM mailback_reason;""")
reasonTypes = [t for rt in reasonTypes for t in rt]
print(reasonTypes)
column = 0
row = 0
for t in reasonTypes:
if column == 2:
column = 0
row += 1
frame = QFrame()
layout = QVBoxLayout()
layout.addWidget(QLabel(t + ':'))
reasons = self.cur.execute("""SELECT reason FROM mailback_reason
WHERE type = ?;""", (t,))
reasons = [r for rt in reasons for r in rt]
for r in reasons:
box = QCheckBox(r)
self.reasonCheckBoxList.append(box)
layout.addWidget(box)
frame.setLayout(layout)
self.reason_layout.addWidget(frame, column, row, Qt.AlignmentFlag.AlignTop)
column += 1
if column == 2:
column = 0
row += 1
frame = QFrame()
layout = QGridLayout()
layout.addWidget(QLabel('Name:'), 0, 0, Qt.AlignmentFlag.AlignLeft)
layout.addWidget(self.address1, 0, 1, Qt.AlignmentFlag.AlignLeft)
layout.addWidget(QLabel('Address:'), 1, 0, Qt.AlignmentFlag.AlignLeft)
layout.addWidget(self.address2, 1, 1, Qt.AlignmentFlag.AlignLeft)
layout.addWidget(QLabel('City/State/Zip:'), 2, 0, Qt.AlignmentFlag.AlignLeft)
layout.addWidget(self.address3, 2, 1, Qt.AlignmentFlag.AlignLeft)
layout.addWidget(self.clear_address_button, 3, 0, Qt.AlignmentFlag.AlignLeft)
layout.addWidget(self.default_address_button, 3, 1, Qt.AlignmentFlag.AlignLeft)
frame.setLayout(layout)
self.reason_layout.addWidget(frame, column, row, Qt.AlignmentFlag.AlignLeft)
def generateLetter(self):
#FOR SETTING FIXED WIDTH/HEIGHT
#print(self.width())
#print(self.height())
# avoids Microsoft Word opening dialog box saying that letter.docx caused error
if os.path.exists("letter.docx"):
os.remove("letter.docx")
reasons = []
for box in self.reasonCheckBoxList:
if box.isChecked():
reasons.append(box.text())
box.setChecked(False)
reason = ""
rlength = len(reasons)
if rlength == 1:
reason = reasons[0]
elif rlength == 2:
reason = reasons[0] + ' and ' + reasons[1]
elif rlength > 2:
for i in range(0, rlength):
if i != rlength - 1:
reason += reasons[i] + ', '
else:
reason += 'and ' + reasons[i]
else: # reasons is empty
self.reason_error.show()
return 1
self.reason_error.hide()
self.submit_button.setEnabled(False)
fill_in = {"date": self.current_date,
"client": self.currentClient,
"reason": reason,
"address_1": self.currentAddr1,
"address_2": self.currentAddr2,
"phone_number": self.currentPhoneNumber
}
self.template.render(fill_in)
self.template.save('letter.docx')
os.startfile("letter.docx", "print")
self.submit_button.setEnabled(True)
def printEnvelope(self):
self.envelope_button.setEnabled(False)
fill_in = {"client": self.address1.text(),
"addr_1": self.address2.text(),
"addr_2": self.address3.text()}
self.envelope.render(fill_in)
self.envelope.save('envelope.docx')
os.startfile("envelope.docx", "print")
self.envelope_button.setEnabled(True)
def printLargeEnvelope(self):
self.large_envelope_button.setEnabled(False)
fill_in = {"client": self.address1.text(),
"addr_1": self.address2.text(),
"addr_2": self.address3.text()}
self.big_envelope.render(fill_in)
self.big_envelope.save('big_envelope.docx')
os.startfile("big_envelope.docx", "print")
self.large_envelope_button.setEnabled(True)
def main():
app = QApplication(sys.argv)
window = mailbackGenWindow()
window.show()
app.exec()
if __name__ == "__main__":
main()
| Centari2013/PublicMailbackGeneratorTest | main.py | main.py | py | 8,527 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sqlite3.connect",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "docxtpl.DocxTemplate",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "docxtpl.DocxTemplate",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "docxtpl.Doc... |
27385560013 | from road import Road
from copy import deepcopy
from collections import deque
from vehicleGenerator import VehicleGenerators
import numpy as np
from scipy.spatial import distance
import random
class Simulator:
def __init__(self, config = {}) -> None:
self.setDefaultConfig()
#update vals
for attr, val in config.items():
setattr(self, attr, val)
def setDefaultConfig(self):
#time
self.t = 520.0
#time step
self.dt = 1/60
#frames count
self.frameCount = 0
#roads
self.roads = {}
self.vehicleGens = deque()
self.trafficSignals = deque()
def createTrafficSignals(self, trafficSignal):
self.trafficSignals.append(trafficSignal)
def createRoad(self, start, end, startCross, endCross):
road = Road(start, end, startCross, endCross)
self.roads[(startCross, endCross)] = road
# return road
def createRoads(self, roadsList):
for roadCoords in roadsList:
self.createRoad(*roadCoords)
def createRoadsFromGraph(self, graph):
self.graph = graph
for idx in range(len(graph)):
start = graph[idx][0]
if len(graph[idx][1]) > 0:
for vertexIdx in graph[idx][1]:
end = (graph[vertexIdx][0][0], graph[vertexIdx][0][1])
length = distance.euclidean(start, end)
sin = (end[1] - start[1]) / length
cos = (end[0] - start[0]) / length
self.createRoad((start[0] - 0.3 * sin, start[1] + 0.3 * cos), (end[0] - 0.3 * sin, end[1] + 0.3 * cos), idx, vertexIdx)
def createGen(self, genConfig):
self.vehicleGens.append(VehicleGenerators(self, genConfig))
def update(self):
# Updating every road
for roadKey in self.roads:
road = self.roads[roadKey]
if len(road.vehicles) > 0 and road.vehicles[0].currentRoadIndex + 1 < len(road.vehicles[0].path):
vehicle = road.vehicles[0]
nextRoad = self.roads[vehicle.path[vehicle.currentRoadIndex + 1]]
else:
road.update(self.dt, self.t)
nextRoad = None
road.update(self.dt, self.t, nextRoad)
# Checking the roads for out of bounds vehicle
for roadKey in self.roads:
road = self.roads[roadKey]
# If road does not have vehicles, then continue
if len(road.vehicles) == 0: continue
# If not
vehicle = road.vehicles[0]
# If the first vehicle is out of road bounds
if vehicle.x >= road.length:
#if vehicle just wanders:
if len(vehicle.path) == 1:
vehicle.currentRoadIndex = 1
newVehicle = deepcopy(vehicle)
newVehicle.x = 0
crossRoad = self.graph[road.endCross]
if len(crossRoad[1]) > 0:
if newVehicle.decideToRide():
carNums = [len(self.roads[(road.endCross, k)].vehicles) for k in crossRoad[1]]
minNum = np.min(carNums)
minIdx = [i for i, x in enumerate(carNums) if x == minNum]
nextCross = crossRoad[1][random.choice(minIdx)]
self.roads[(road.endCross, nextCross)].vehicles.append(newVehicle)
else:
pass
# If vehicle has a next road
if vehicle.currentRoadIndex + 1 < len(vehicle.path):
# Updating the current road to next road
vehicle.currentRoadIndex += 1
# Creating a copy and reseting some vehicle properties
newVehicle = deepcopy(vehicle)
newVehicle.x = 0
# Adding it to the next road
nextRoadIndex = vehicle.path[vehicle.currentRoadIndex]
self.roads[nextRoadIndex].vehicles.append(newVehicle)
# In all cases, removing it from its road
road.vehicles.popleft()
for signal in self.trafficSignals:
signal.update(self)
for gen in self.vehicleGens:
gen.update()
if (self.t >= 540 and self.t <= 660) or (self.t >= 1020 and self.t <= 1080):
gen.vehicleRate = 190
else:
gen.vehicleRate = 40
self.t += self.dt
if self.t >= 1440:
self.t = 0
| EHAT32/alg_labs_sem_7 | lab3/simulator.py | simulator.py | py | 4,713 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.deque",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "road.Road",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "scipy.spatial.distance.... |
10900131686 | from flask import Flask
from flask import Flask, request, render_template, send_file
app = Flask(__name__)
@app.route('/cookiestealer/', methods=['GET'])
def cookieStealer():
filename = 'cookiemonster.jpg'
print("This is the cookie: \n")
print(request.cookies)
print("")
return send_file(filename, mimetype='image/jpeg')
if __name__ == '__main__':
app.run(port=3100, debug=True)
| FelixDryselius/SecureDataSystemsGroup17 | lab1/xss/3rd_party_cookie_stealer.py | 3rd_party_cookie_stealer.py | py | 420 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "flask.request.cookies",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "flask.send_file",... |
74479990587 | import json
from random import uniform
import matplotlib.pyplot as plt
class Scanner:
def __init__(self, data_filename, n_neighbours):
self.data_filename = data_filename
self.n_neighbours = n_neighbours
def scanner(self, visualize_data=False):
f = open(self.data_filename, encoding="utf-8")
content = f.read()
data = json.loads(content)
data = sorted(data, key=lambda x: x["coordinates"])
avg_area_price = 0
result = []
differences = []
for apartment in range(len(data)):
avg_neigh_price = 0
neighbours = self.find_neighbours(data[apartment], data)
for n in neighbours:
avg_neigh_price += n["price"]
avg_neigh_price /= self.n_neighbours
if data[apartment]["price"] < avg_neigh_price:
# append apartment data and difference between
# average prices of neighbours and apartment price
result.append(
[data[apartment], avg_neigh_price - data[apartment]["price"]]
)
avg_area_price += data[apartment]["price"]
differences.append([data[apartment], neighbours])
avg_area_price /= len(data)
for el in result:
# append difference between average price in search area
# and apartment price (profitability)
el.append(avg_area_price - el[0]["price"])
# sort data by apartment profitability
result = sorted(result, key=lambda x: x[1], reverse=True)
if visualize_data:
self.visualize_n_neighbours(differences)
return result
def find_neighbours(self, apartment, data):
data = sorted(
data,
key=lambda x: (
(apartment["coordinates"][0] - x["coordinates"][0]) ** 2
+ (apartment["coordinates"][1] - x["coordinates"][1]) ** 2
)
** 0.5,
)
neighbours = data[: self.n_neighbours]
return neighbours
@staticmethod
def visualize_n_neighbours(differences):
colors = ["b", "g", "r", "c", "m", "y", "k"]
colors_i = 0
for diff in differences:
color = colors[colors_i % len(colors)]
colors_i += 1
el_coord = diff[0]["coordinates"]
for i in range(len(diff[1])):
diff_coord = diff[1][i]["coordinates"]
# add small number to coordinates so that lines on plot do not overlap
d = uniform(0.003, 0.005)
plt.plot(
[el_coord[0] + d, diff_coord[0] + d],
[el_coord[1] + d, diff_coord[1] + d],
"-ro",
color=color,
)
plt.show()
| SergeyBurik/profitable_apartments_parser | profitable_apartments_parser/scanner/scanner.py | scanner.py | py | 2,832 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "json.loads",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "random.uniform",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
... |
43371048993 | import pygame
from pygame.locals import KEYDOWN, K_ESCAPE, QUIT
from os import path
import parameters.enums as en
from Objects.player import Player, Wall
from Objects.map import Map
from Objects.robot import Robot
import numpy as np
from Objects.machinery import Machinery, Destiny
import sys
import torch
from Objects.utils import GameBuffer
import pickle
class Game(pygame.sprite.Sprite):
SCORE = 0
def __init__(self):
pygame.init()
self.screen = pygame.display.set_mode((en.WIDTH, en.HEIGHT))
self.clock = pygame.time.Clock()
self.load_data()
self.checkpoint = "robotCheckpoint" if path.exists("robotCheckpoint") else None
def load_data(self):
game_folder = path.dirname(__file__)
self.map = Map(game_folder + "/Static/map.txt")
def new(self):
"""Initialize all variables."""
self.buffer = GameBuffer()
self.allSprites = pygame.sprite.Group()
self.machineryParts = pygame.sprite.Group()
self.destination = pygame.sprite.Group()
self.robots = pygame.sprite.Group()
self.walls = pygame.sprite.Group()
self.part = Machinery(self)
for j, row in enumerate(self.map.data):
for i, col in enumerate(row):
if col == "1":
Wall(self, i, j)
elif col == "D":
self.destiny = Destiny(self, i, j)
elif col == "P":
self.player = Player(self, self.buffer, i, j)
elif col == "R":
self.robot = Robot(self, self.buffer, i, j, self.checkpoint)
def quit(self):
pygame.quit()
sys.exit()
def events(self):
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
self.robot.save()
self.quit()
if event.type == QUIT:
self.robot.save()
self.quit()
def get_screen(self):
screen = pygame.transform.scale(self.screen, (60, 60))
screen = np.array(pygame.surfarray.array3d(screen))
screen = screen.transpose((2, 1, 0))
return torch.from_numpy(screen)
def run(self):
self.playing = True
prevScore = self.SCORE
while self.playing:
self.dt = self.clock.tick(en.FPS) / 1000
screenMatrix = self.get_screen()
self.events()
self.updates(windowPixel=screenMatrix)
self.draw()
if prevScore != self.SCORE:
self.robot.train()
prevScore = self.SCORE
def updates(self, **args):
for sprite in self.allSprites:
if isinstance(sprite, Robot) or isinstance(sprite, Player):
sprite.update(args["windowPixel"])
else:
sprite.update()
def draw_grid(self):
for x in range(0, en.WIDTH, en.TILE_SIZE):
pygame.draw.line(self.screen, en.LIGHTGREY, (x, 0), (x, en.HEIGHT))
for y in range(0, en.HEIGHT, en.TILE_SIZE):
pygame.draw.line(self.screen, en.LIGHTGREY, (0, y), (en.WIDTH, y))
def draw(self):
self.screen.fill(en.BGCOLOR)
self.draw_grid()
self.allSprites.draw(self.screen)
pygame.display.flip()
g = Game()
while True:
g.new()
g.run()
| anfego22/rele | main.py | main.py | py | 3,394 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pygame.sprite",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "pygame.init",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pygame.display... |
13321588984 | from airflow.models import Variable
import datetime
from .test_utils import create_test_database, db_connect
from dags.rock.rock_content_items import ContentItem
from dags.rock.rock_content_items_connections import ContentItemConnection
import vcr
create_test_database()
def test_run_fetch_and_save_content_item_connections(monkeypatch):
def mock_get(config, deserialize_json=True, default_var=None):
if default_var:
return default_var
if "_rock_api" in config:
return "https://rock.apollos.app/api"
if "_rock_token" in config:
return "ASZjZWdf3IqrbZX9sedtB4wb"
if "_rock_config":
return {
"CONTENT_MAPPINGS": {
"ContentSeriesContentItem": {"ContentChannelTypeId": [6]},
"DevotionalContentItem": {"ContentChannelId": [7]},
"WeekendContentItem": {"ContentChannelId": [5]},
},
"PERSONA_CATEGORY_ID": 186,
"SERIES_CATEGORY_ORIGIN_IDS": [4, 33],
}
monkeypatch.setattr(
Variable,
"get",
mock_get,
)
content_item = ContentItem(
{
"client": "test",
"execution_date": datetime.datetime(
2005, 7, 14, 12, 30, tzinfo=datetime.timezone.utc
),
"do_backfill": True,
}
)
content_item_connection = ContentItemConnection(
{
"client": "test",
"execution_date": datetime.datetime(
2005, 7, 14, 12, 30, tzinfo=datetime.timezone.utc
),
"do_backfill": True,
}
)
monkeypatch.setattr(
content_item.pg_hook,
"get_conn",
db_connect,
)
monkeypatch.setattr(
content_item_connection.pg_hook,
"get_conn",
db_connect,
)
with vcr.use_cassette(
"tests/cassettes/content_item_connections/content_items.yaml"
):
content_item.run_fetch_and_save_content_items()
with vcr.use_cassette(
"tests/cassettes/content_item_connections/initial_content_item_connections.yaml"
):
content_item_connection.run_fetch_and_save_content_items_connections()
conn = db_connect()
with conn:
with conn.cursor() as curs:
# Check for initial parent content item
curs.execute("SELECT id FROM content_item")
parent_item_id = curs.fetchone()[0]
# Check that initial content item connections are correct
curs.execute(
"""
SELECT parent_id, origin_id FROM content_item_connection;
"""
)
initial_content_item_connections = curs.fetchall()
assert len(initial_content_item_connections) == 3
expected = [
(parent_item_id, "20"),
(parent_item_id, "18"),
(parent_item_id, "19"),
]
i = 0
for connection in initial_content_item_connections:
assert connection == expected[i]
i += 1
# Delete content item connection
with vcr.use_cassette(
"tests/cassettes/content_item_connections/delete_content_item_connection.yaml"
):
content_item_connection.run_delete_content_item_connections()
curs.execute("SELECT parent_id, origin_id FROM content_item_connection;")
content_item_connections_with_deletion = curs.fetchall()
assert len(content_item_connections_with_deletion) == 2
expected = [
(parent_item_id, "20"),
(parent_item_id, "18"),
]
i = 0
for connection in content_item_connections_with_deletion:
assert connection == expected[i]
i += 1
conn.close()
| CrossingsCommunityChurch/apollos-shovel | tests/test_rock_content_item_connections.py | test_rock_content_item_connections.py | py | 3,945 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "test_utils.create_test_database",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "airflow.models.Variable",
"line_number": 31,
"usage_type": "argument"
},
{
"api_name": "dags.rock.rock_content_items.ContentItem",
"line_number": 36,
"usage_type": "c... |
35539138268 | from django.http import JsonResponse
from .models import Task
def _get_all_tasks():
task_objects = Task.objects.all()[:30]
tasks = []
for task_obj in task_objects:
task = task_obj.get_as_dict()
tasks.append(task)
return tasks
def index(request):
if request.method == 'GET':
return get_all(request)
elif request.method == 'POST':
code = 200
error = None
task_for_response = None
task_description = request.POST.get('task[description]')
task_category = request.POST.get('task[category]')
task_date_added = request.POST.get('task[dateAdded]')
if (task_description is None) or (task_category is None)\
or (task_date_added is None):
code = 400
error = 'No task data submitted'
else:
task = Task()
task.description = task_description
task.category = task_category
task.date_added = task_date_added
task.save()
task_for_response = task.get_as_dict()
response = {
'task': task_for_response,
'code': code,
'error': error
}
return JsonResponse(response)
def get_all(request):
tasks = _get_all_tasks()
response = {
"tasks": tasks,
"code": 200
}
return JsonResponse(response)
def update_task(request, task_id):
code = 200
error = None
task_for_response = None
task = Task.objects.get(id=task_id)
if task is None:
code = 404
error = f'No task found for id {task_id}'
else:
task_description = request.POST.get('task[description]')
task_category = request.POST.get('task[category]')
task_completed = request.POST.get('task[completed]')
if (task_description is None) or (task_category is None)\
or (task_completed is None):
code = 400
error = 'No task data submitted'
else:
task.description = task_description
task.category = task_category
task.completed = task_completed in ('true', 'True', '1')
task.save()
task_for_response = task.get_as_dict()
response = {
'task': task_for_response,
'code': code,
'error': error
}
return JsonResponse(response)
def delete_completed(request):
Task.objects.filter(completed=True).delete()
return get_all(request) | bluepostit/django-js-todo | todos/views.py | views.py | py | 2,483 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "models.Task.objects.all",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "models.Task.objects",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "models.Task",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "models.Task... |
74918976186 | import networkx as nx
import re
def read_file(file):
first = set()
second = set()
G = nx.DiGraph()
prog = re.compile("Step ([A-Z]) must be finished before step ([A-Z]) can begin.")
with open(file) as f:
lines = f.readlines()
for line in lines:
r = prog.match(line.strip())
if not r.group(1) in G:
G.add_node(r.group(1))
if not r.group(2) in G:
G.add_node(r.group(2))
if not G.has_edge(r.group(1),r.group(2)):
G.add_edge(r.group(1),r.group(2))
first.add(r.group(1))
second.add(r.group(2))
return (G,first- second)
def duration(step):
return 60+ord(step)-64
def day7(file):
G,starter = read_file(file)
path = list()
to_visit = sorted(starter,reverse=True)
while len(to_visit) > 0:
node = to_visit.pop()
path.append(node)
neighbours = G[node]
for n in neighbours:
if not n in to_visit and not n in path:
allCompleted = True
for u,v in G.in_edges(nbunch=n):
if not u in path:
allCompleted = False
if allCompleted:
to_visit.append(n)
to_visit = sorted(to_visit,reverse=True)
#print("".join(path))
work_route = "".join(path)
end_letter = path[-1]
path = list()
to_visit = sorted(starter,reverse=True)
second = 0
workers = list()
# Trabajo Actual, segundo que termina
workers.append(['.',0])
workers.append(['.',0])
workers.append(['.',0])
workers.append(['.',0])
workers.append(['.',0])
def full_workers(workers):
full = True
for w in workers:
if w[0] == ".":
full = False
return full
end = False
while not end:
if len(to_visit) == 0 or full_workers(workers):
second += 1
for i in range(0,len(workers)):
if workers[i][1] <= second:
if workers[i][0] != ".":
path.append(workers[i][0])
neighbours = G[workers[i][0]]
for n in neighbours:
if not n in to_visit and not n in path:
allCompleted = True
for u,v in G.in_edges(nbunch=n):
if not u in path:
allCompleted = False
if allCompleted:
to_visit.append(n)
to_visit = sorted(to_visit,reverse=True)
if workers[i][0] == end_letter:
#print("Finish point")
#print("Seconds: %d" % second)
end = True
if len(to_visit) > 0:
node = to_visit.pop()
workers[i][1] = second+duration(node)
workers[i][0] = node
else:
workers[i][0] = "."
return work_route,second
| aarroyoc/advent-of-code-2018 | python/day7/day7.py | day7.py | py | 3,076 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "networkx.DiGraph",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 8,
"usage_type": "call"
}
] |
34276132086 | """ Initializes Pickly Files"""
import pickle
import json
import requests
import urllib.request
def guiinit(sub):
#Gets information from Reddit
r = urllib.request.urlopen(r'http://www.reddit.com/r/' + sub + '/new/.json', timeout=60).read().decode("utf-8")
data = json.loads(r)
#Creates ists to hold data
titlelist = []
urllist = []
permalinklist = []
#Creats Files
title = open("title.obj", 'wb')
url = open("url.obj", 'wb')
permalink = open("perma.obj", 'wb')
#Appends Data from Reddit API to lists
for i in range(0,20):
titlelist.append(data['data']['children'][i]['data']['title']) #Gets title
urllist.append(data['data']['children'][i]['data']['url']) #Gets URL
permalinklist.append("http://www.reddit.com" + data['data']['children'][i]['data']['permalink']) #Gets Comments
#Dumps lists to files
pickle.dump(titlelist, title)
pickle.dump(urllist, url)
pickle.dump(permalinklist, permalink)
#Closes files
title.close()
url.close()
permalink.close() | picklesforfingers/FeedMeReddit | pickleinit.py | pickleinit.py | py | 1,069 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "urllib.request.request.urlopen",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 10,
"usage_type": "name"
},
{
"api_nam... |
14391947993 | import glob
import json
def LoadTweets(directory):
directory = directory +"/*json"
files = glob.glob(directory)[:100]
twts = [a for fl in files for a in json.load(open(fl))]
twts.sort(key=lambda x: x['id'] if 'id' in x else int(x['id_str']) if 'id_str' in x else 0)
twts = [a for a in twts if 'id' in a or 'str_id' in a]
twts = [a for a in twts
if (('id' in a and a['id'] >= 656971539691257900) or
('id_str' in a and int(a['id_str']) >= 656971539691257900))]
res = []
prev_id = 0
for tw in twts:
if 'id' not in tw: tw['id'] = int(tw['id_str'])
if 'id_str' not in tw: tw['id_str'] = str(tw['id'])
if tw['id'] != prev_id: res.append(tw)
prev_id = tw['id']
return res
def LoadUsers(directory):
directory = directory + "/*txt"
usrfiles = glob.glob(directory)
users = [(a.strip("@"),fl) # Removing the @
for fl in usrfiles
for nms in open(fl)
for a in nms.strip().split(",")]
usrset = set([a[0] for a in users])
return usrset, users
| datumkg/electweet | ElecTweet/TweetLoader.py | TweetLoader.py | py | 1,092 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "glob.glob",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 25,
"usage_type": "call"
}
] |
33548088967 | import os
from django.core.management.base import BaseCommand, CommandError
from main.settings import BASE_DIR, DEBUG
from costcenter.models import Fund, Source, CostCenter, FundCenter, FinancialStructureManager
from lineitems.models import LineForecast, LineItem
class Command(BaseCommand):
"""
A class to be used only for development purposes. It serves to fill in some funds, sources, fund centers and cost centers. Values fed in the database should match what is expected from the data to be used when running uploadtocsv which also uses test encumbrance report data.
"""
def handle(self, *args, **options):
if DEBUG:
LineForecast.objects.all().delete()
LineItem.objects.all().delete()
CostCenter.objects.all().delete()
Source.objects.all().delete()
Fund.objects.all().delete()
FundCenter.objects.all().delete()
self.set_fund()
self.set_source()
self.set_fund_center()
self.set_cost_center()
else:
print("This capability is only available when DEBUG is True")
def set_fund(self):
items = [
{"fund": "C113", "name": "National Procurement", "vote": "1"},
{"fund": "C116", "name": "Kitchen Procurement", "vote": "5"},
{"fund": "C523", "name": "Basement Procurement", "vote": "1"},
{"fund": "CXXX", "name": "Bedroom Procurement", "vote": "1"},
]
for item in items:
try:
found = Fund.objects.get(fund=item["fund"])
if found:
print(f"Fund {found} exists")
except Fund.DoesNotExist:
new_item = Fund.objects.create(**item)
print(f"Created fund {new_item}")
def set_source(self):
items = [{"source": "Kitchen"}]
for item in items:
try:
found = Source.objects.get(source=item["source"])
if found:
print(f"Source {found} exists")
except Source.DoesNotExist:
new_item = Source.objects.create(**item)
print(f"Created Source {new_item}")
def set_fund_center(self):
# Create root FC
fc = {"fundcenter": "1111AA", "shortname": "root", "parent": None}
new_item = FundCenter.objects.create(**fc)
root = FundCenter.objects.filter(fundcenter="1111AA").first()
print(f"Created Fund Center {root}, sequence {root.sequence}")
root_children = [
{"fundcenter": "1111AB", "shortname": "AB", "parent": root},
{"fundcenter": "1111AC", "shortname": "AC", "parent": root},
]
for item in root_children:
try:
found = FundCenter.objects.get(fundcenter=item["fundcenter"])
if found:
print(f"Fund Center {found} exists")
except FundCenter.DoesNotExist:
item["sequence"] = FinancialStructureManager().set_parent(fundcenter_parent=root)
new_item = FundCenter.objects.create(**item)
print(f"Created Fund Center {new_item}, sequence {new_item.sequence}")
ab = FundCenter.objects.filter(fundcenter="1111AB").first()
ab_children = [
{"fundcenter": "2222BA", "shortname": "BA", "parent": ab},
{"fundcenter": "2222BB", "shortname": "BB", "parent": ab},
]
for item in ab_children:
try:
found = FundCenter.objects.get(fundcenter=item["fundcenter"])
if found:
print(f"Fund Center {found} exists")
except FundCenter.DoesNotExist:
item["sequence"] = FinancialStructureManager().set_parent(fundcenter_parent=ab)
new_item = FundCenter.objects.create(**item)
print(f"Created Fund Center {new_item}")
def set_cost_center(self):
fund = Fund.objects.get(fund="C113")
source = Source.objects.get(source="Kitchen")
ab = FundCenter.objects.get(fundcenter="1111AB")
ac = FundCenter.objects.get(fundcenter="1111AC")
FSM = FinancialStructureManager()
items = [
{
"costcenter": "8486B1",
"shortname": "Utensils",
"fund": fund,
"source": source,
"isforecastable": True,
"isupdatable": True,
"note": "",
"parent": ac,
},
{
"costcenter": "8486C1",
"shortname": "Food and drink",
"fund": fund,
"source": source,
"isforecastable": True,
"isupdatable": True,
"note": "A quick and short note for 1234FF",
"parent": ab,
},
{
"costcenter": "8486C2",
"shortname": "Basement Stuff",
"fund": fund,
"source": source,
"isforecastable": True,
"isupdatable": True,
"note": "",
"parent": ab,
},
]
for item in items:
try:
found = CostCenter.objects.get(costcenter=item["costcenter"])
if found:
print(f"Cost Center {found} exists")
except CostCenter.DoesNotExist:
item["sequence"] = FSM.set_parent(item["parent"], item)
new_item = CostCenter.objects.create(**item)
print(f"Created Cost Center {new_item}")
| mariostg/bft | encumbrance/management/commands/populate.py | populate.py | py | 5,663 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "main.settings.DEBUG",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "lineitems.models.LineForecast.objects.all",
"line_number": 16,
"usage_type": "... |
12477031144 | import argparse
import os
import importlib.util
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import json
from collections import defaultdict
import utils
import transformers
parser = argparse.ArgumentParser()
parser.add_argument('--task')
parser.add_argument('--model')
parser.add_argument('--dataset')
parser.add_argument('--k', default='0')
parser.add_argument('--mode', default='all')
parser.add_argument('--prompt', default='qa')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--repeats', default=1, type=int)
parser.add_argument('--output', default='plot.png')
parser.add_argument('--device', default='cpu')
args = parser.parse_args()
os.environ["DEVICE"] = args.device
# Check if submission module is present. If it is not, then main() will not be executed.
use_submission = importlib.util.find_spec('submission') is not None
if use_submission:
import submission
def plot():
dataset = 'xsum'
data = defaultdict(lambda: defaultdict(list))
model = 'med'
mode = 'lora16'
x_vals = set()
for k in [0,1,8,128]:
fn = '_'.join([model, dataset, str(k), mode])
id_ = '_'.join([model, dataset, mode])
with open(f'submission/results/ft/{fn}.json', 'r') as f:
score = json.load(f)['metric']
data[id_]['x'].append(k)
x_vals.add(k)
data[id_]['y'].append(score)
prompt_mode = 'tldr'
for k in [0,1,4]:
fn = '_'.join([model, dataset, str(k), prompt_mode])
id_ = '_'.join([model, dataset, prompt_mode])
with open(f'submission/results/icl/{fn}.json', 'r') as f:
score = json.load(f)['metric']
data[id_]['x'].append(k)
x_vals.add(k)
data[id_]['y'].append(score)
for k, v in data.items():
plt.plot(v['x'], v['y'], label=k)
if max(x_vals) > 4:
plt.xscale('symlog')
ax = plt.gca()
ax.xaxis.set_major_formatter(mticker.ScalarFormatter())
ax.xaxis.set_ticks(sorted(x_vals))
plt.legend()
plt.title(dataset)
plt.ylabel(utils.metric_for_dataset(dataset))
plt.xlabel('Number of support examples')
plt.savefig(args.output, bbox_inches='tight')
# Download all models and datasets required by the grader
def cache():
models = [
{'name': 'bert-tiny', 'type': transformers.AutoModelForSequenceClassification, 'num_labels': 5},
{'name': 'bert-tiny', 'type': transformers.AutoModelForCausalLM},
{'name': 'med', 'type': transformers.AutoModelForCausalLM}
]
for model in models:
if 'num_labels' in model:
utils.get_model_and_tokenizer(model['name'], model['type'], num_labels = model['num_labels'])
else:
utils.get_model_and_tokenizer(model['name'], model['type'])
datasets = [
{'name': 'amazon', 'n_train': 1, 'n_val': 125},
{'name': 'xsum', 'n_train': 8, 'n_val': 125}
]
for dataset in datasets:
utils.get_dataset(dataset=dataset['name'], n_train=dataset['n_train'], n_val=dataset['n_val'])
def run():
ks = [int(k) for k in args.k.split(',')]
if args.task == 'run_ft':
submission.run_ft(args.model.split(','), args.dataset.split(','), ks, args.mode.split(','), args.debug, args.repeats)
elif args.task == 'run_icl':
submission.run_icl(args.model.split(','), args.dataset.split(','), ks, args.prompt.split(','), args.debug, args.repeats)
elif args.task == 'plot_ft':
submission.plot_ft(args.model.split(','), args.dataset.split(','), ks, args.mode.split(','), args.output)
elif args.task == 'plot_icl':
assert ',' not in args.dataset, "Only one dataset at a time for plotting"
submission.plot_icl(args.model.split(','), args.dataset, ks, args.prompt.split(','), args.output)
elif args.task == 'plot':
plot()
elif args.task == 'cache':
cache()
if __name__ == '__main__':
run() | mariopenglee/llm-metalearning | src/main.py | main.py | py | 3,965 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "importlib.util.util.find_spec",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": ... |
6323470596 | import typing
from typing import (
Union,
Optional,
List,
)
import asyncio
import logging
from datetime import datetime
from hikari import ActionRowComponent, Embed, MessageCreateEvent, embeds
from hikari import ButtonStyle
from hikari.impl.special_endpoints import MessageActionRowBuilder, LinkButtonBuilder
from hikari.events import InteractionCreateEvent
import lightbulb
import lightbulb.utils as lightbulb_utils
from lightbulb import commands, context
from lightbulb import OptionModifier as OM
from lightbulb.context import Context
import hikari
from matplotlib.style import available
from numpy import full, isin
from fuzzywuzzy import fuzz
from utils import Colors, Human, Paginator, crumble
from core import getLogger, Inu
log = getLogger(__name__)
plugin = lightbulb.Plugin("Voice commands")
@plugin.command
@lightbulb.add_checks(
lightbulb.has_role_permissions(hikari.Permissions.MOVE_MEMBERS),
lightbulb.bot_has_role_permissions(hikari.Permissions.MOVE_MEMBERS),
lightbulb.guild_only,
)
# @lightbulb.option(
# "member",
# "a person who is in the current voice channel. normally you",
# type=hikari.Member,
# default=None,
# )
@lightbulb.option(
"from-voice-channel",
"the voice channel where move peaple of",
type=hikari.GuildChannel,
default=None,
)
@lightbulb.option(
"voice-channel",
"the voice channel where you want to move to",
type=hikari.GuildChannel,
)
@lightbulb.command(
"move-all",
"moves all members from a current voice channel into another",
aliases=["move"]
)
@lightbulb.implements(commands.SlashCommand, commands.PrefixCommand)
async def move_all(ctx: Context):
target_channel: hikari.InteractionChannel = ctx.options["voice-channel"]
if not target_channel.type == hikari.ChannelType.GUILD_VOICE:
await ctx.respond(f"{target_channel} is not a voice channel", flags=hikari.MessageFlag.EPHEMERAL)
return None
if not ctx.options["from-voice-channel"]:
member = ctx.member
states = ctx.bot.cache.get_voice_states_view_for_guild(ctx.guild_id)
voice_state = [state for state in states.values() if state.user_id == member.id]
if not voice_state:
await ctx.respond(f"{member.display_name} needs to be in a voice channel")
return None
channel_id = voice_state[0].channel_id
user_ids = [state.user_id for state in states.values() if state.channel_id == channel_id]
else:
user_ids = [
state.user_id for state in ctx.bot.cache.get_voice_states_view_for_guild(ctx.guild_id).values()
if state.channel_id == ctx.options["from-voice-channel"].id
]
tasks = [
asyncio.create_task(
ctx.bot.rest.edit_member(
guild=ctx.guild_id,
user=user_id,
voice_channel=target_channel.id
)
)
for user_id in user_ids
]
await asyncio.wait(tasks, return_when=asyncio.ALL_COMPLETED)
await ctx.respond(
f"Moved {Human.list_([f'<@{user_id}>' for user_id in user_ids], with_a_or_an=False)} to `{target_channel.name}`"
)
@move_all.autocomplete("voice-channel")
async def tag_name_auto_complete(
option: hikari.AutocompleteInteractionOption,
interaction: hikari.AutocompleteInteraction
) -> List[str]:
vcs = []
guild = interaction.get_guild()
if not guild:
return []
for ch in guild.get_channels().values():
if not isinstance(ch, hikari.GuildVoiceChannel):
continue
if lightbulb_utils.permissions_in(ch, interaction.member) & hikari.Permissions.CONNECT:
vcs.append(f"{ch.id} | {ch.name}")
return vcs[:24]
def load(bot: Inu):
bot.add_plugin(plugin)
| zp33dy/inu | inu/ext/commands/voice.py | voice.py | py | 3,792 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "core.getLogger",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "lightbulb.Plugin",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "lightbulb.context.Context",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "hikari.Inte... |
1518966554 | from argparse import ArgumentParser, RawTextHelpFormatter
from glob import glob
from subprocess import check_call
import os
from shutil import rmtree
def compile_clm():
# Define and parse command line arguments
# ---------------------------------------
dsc = "Compile CLM on Piz Daint. A case will be created in a subfolder of your ${SCRATCH}.\n"\
" WARNING: tool has to be run from the default Prg-Env-cray environment"
parser = ArgumentParser(description=dsc, formatter_class=RawTextHelpFormatter)
parser.add_argument('cesm_trunk', help="path to the CESM directory")
parser.add_argument('--clm_version', choices=['4.0', '4.5'], default='4.0', help="CLM version")
parser.add_argument('-c', '--compiler', help="compiler to use (default: pgi)", default='pgi')
parser.add_argument('-v', '--compiler_version', help="switch to this version of the compiler\n"\
"This is not recommended by CSCS")
parser.add_argument('-d', '--debug', help="compile in debug mode (default: false)",
action='store_true')
parser.add_argument('--src_mod', action='append',
help="path to additionnal/modified sources (e.g. oasis interface)\n"\
"has to be a folder containing src.xxx subfolders, can be specified several times")
parser.add_argument('-o', '--output', help="output executable file path (default: ./cesm.exe)",
default='./cesm.exe')
parser.add_argument('--no_exe', help="do not execute build_cesm.bash, leave it to any suited modification before actual compilation.",
action='store_false', dest='execute')
opts = parser.parse_args()
# Init some variables
# -------------------
CESM_TRUNK = opts.cesm_trunk
EXP = 'clm{:s}_bld'.format(opts.clm_version)
CASEDIR = os.path.join(os.environ['SCRATCH'], EXP)
if os.path.exists(CASEDIR):
rmtree(CASEDIR)
RES = '1.9x2.5_gx1v6'
COMP = 'ITEST'
MACH = 'daint'
if opts.clm_version == '4.5':
COMP += 'CLM45'
out_exe = os.path.abspath(opts.output)
sourcemods = [os.path.abspath(src_dir) for src_dir in opts.src_mod]
create_case_fmt = '{:s}/scripts/create_newcase -res {:s} -compset {:s} -mach {:s} -compiler pgi_oas -case {:s}'
create_case_cmd = create_case_fmt.format(CESM_TRUNK, RES, COMP, MACH, CASEDIR)
# Build compiling script
# ----------------------
with open('build_cesm.bash', mode='w') as script:
script.write('#!/bin/bash\n')
script.write('\n')
script.write('# ----------------------------------------------\n')
script.write('# Modules\n')
script.write('# ----------------------------------------------\n')
script.write('\n')
if opts.compiler == 'pgi':
script.write('module switch PrgEnv-cray PrgEnv-pgi\n')
if opts.compiler_version is not None:
script.write('module switch pgi pgi/{:s}\n'.format(opts.compiler_version))
elif opts.compiler == 'intel':
script.write('module switch PrgEnv-cray PrgEnv-intel\n')
if opts.compiler_version is not None:
script.write('module switch intel intel/{:s}\n'.format(opts.compiler_version))
elif opts.compiler == 'cray' and opts.compiler_version is not None:
script.write('module switch cce cce/{:s}\n'.format(opts.compiler_version))
script.write('\n')
script.write('module load cray-netcdf\n')
script.write('module load daint-gpu\n')
script.write('\n')
script.write('module list\n')
script.write('\n')
script.write('# ----------------------------------------------\n')
script.write('# Create case\n')
script.write('# ----------------------------------------------\n')
script.write('\n')
script.write('{:s}\n'.format(create_case_cmd))
script.write('\n')
script.write('# ----------------------------------------------\n')
script.write('# Setup case\n')
script.write('# ----------------------------------------------\n')
script.write('\n')
script.write('cd {:s}\n'.format(CASEDIR))
script.write('\n')
script.write('switch off river routing\n')
script.write('./xmlchange RTM_MODE="NULL"\n')
script.write('\n')
script.write('set transient CO2\n')
script.write('./xmlchange CCSM_BGC=CO2A,CLM_CO2_TYPE=diagnostic\n')
if opts.debug:
script.write('# activate debug mode\n')
script.write('./xmlchange -file env_build.xml -id DEBUG -val "TRUE"\n')
script.write('\n')
script.write('./cesm_setup\n')
script.write('\n')
script.write('# ----------------------------------------------\n')
script.write('# Add source additions/modifications\n')
script.write('# ----------------------------------------------\n')
script.write('\n')
for src_dir in sourcemods:
print(src_dir)
for comp in glob('{:s}/src.*'.format(src_dir)):
print(comp)
script.write('rsync -avrL {:s} SourceMods\n'.format(comp))
script.write('\n')
script.write('# ----------------------------------------------\n')
script.write('# Build\n')
script.write('# ----------------------------------------------\n')
script.write('\n')
script.write('{:s}.build\n'.format(EXP))
script.write('rsync -avr bld/cesm.exe {:s}\n'.format(out_exe))
os.chmod('build_cesm.bash', 0o755)
# Execute compiling script
# ------------------------
if opts.execute:
check_call(['./build_cesm.bash'])
| COSMO-RESM/COSMO_CLM2_tools | COSMO_CLM2_tools/compile_clm.py | compile_clm.py | py | 5,769 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "argparse.RawTextHelpFormatter",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os... |
22200579634 | #!/usr/bin/env python
from __future__ import absolute_import
import apache_beam as beam
import argparse
import json
import logging
import sys
import urllib
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from google.cloud import bigquery
from google.cloud import storage
from raxxla_transforms import data_transforms
from raxxla_transforms import table_schemas
# Global vars
logger = logging.getLogger('raxxla_loader')
formatter = '%(asctime)s %(message)s'
logging.basicConfig(level=logging.INFO, format=formatter)
table_list = ['bodies', 'systems', 'powerplay', 'population', 'stations']
arg_list = table_list + ['all']
runner_list = ['DataflowRunner', 'DirectRunner']
# Configure flags
flags = argparse.ArgumentParser(description='Initialize Raxxla BigQuery tables from EDSM data.')
flags.add_argument('--project', help='ID of the Google Cloud project to use.')
flags.add_argument('--dataset', help='Name of the BigQuery dataset to store EDSM data in.')
flags.add_argument('--bucket', help='Name of the GCS bucket to store EDSM data in.')
flags.add_argument('--runner', help='Name of the Beam runner type to use for the pipeline.', choices=runner_list)
flags.add_argument('--upload_to_gcs', help='Upload EDSM files to GCS from local download.', choices=arg_list, nargs='+')
flags.add_argument('--delete', help='Delete tables from BigQuery.', choices=arg_list, nargs='+')
flags.add_argument('--download', help='Download files from EDSM into Google Cloud Storage.', choices=arg_list, nargs='+')
flags.add_argument('--upload_to_bq', help='Write converted values to BigQuery. Requires files to be staged in GCS.', choices=arg_list, nargs='+')
args = flags.parse_args()
class remove_blank_rows(beam.DoFn):
def process(self, element):
if element is not None:
yield element
else:
return
def delete_bq_data(tables, project_id, dataset_id):
client = bigquery.Client()
dataset_ref = client.dataset(dataset_id)
tables = set(tables)
try:
for table in tables:
table_ref = dataset_ref.table(table)
delete_string = 'Deleted ' + project_id + '.' + dataset_id + '.' + table
client.delete_table(table_ref)
logger.info(delete_string)
except Exception as e:
delete_error_string = 'Unable to delete EDSM BQ tables: ' + str(e)
logger.error(delete_error_string)
sys.exit()
def download_edsm_files(files):
edsm_urls = {
'bodies': 'https://www.edsm.net/dump/bodies.json',
'systems': 'https://www.edsm.net/dump/systemsWithCoordinates.json',
'powerplay': 'https://www.edsm.net/dump/powerPlay.json',
'population': 'https://www.edsm.net/dump/systemsPopulated.json',
'stations': 'https://www.edsm.net/dump/stations.json'
}
files = set(files)
try:
for file in files:
dl_string = 'Downloading ' + file + ' file from EDSM...'
logger.info(dl_string)
download_url = edsm_urls[file]
download_path = '/tmp/' + file
urllib.urlretrieve(download_url, download_path)
except Exception as e:
download_error_string = 'Unable to download EDSM files: ' + str(e)
logger.error(download_error_string)
sys.exit()
def upload_to_bigquery(files, project_id, dataset_id, bucket_id, runner, pipeline_options):
files = set(files)
try:
for file in files:
import_string = 'Importing ' + file + ' file into BigQuery...'
logger.info(import_string)
table_spec = project_id + ':' + dataset_id + '.' + str(file)
if runner == 'DataflowRunner':
file_path = 'gs://' + str(bucket_id) + '/' + str(file)
elif runner == 'DirectRunner':
file_path = '/tmp/' + str(file)
with beam.Pipeline(options=pipeline_options) as p:
json_lines = p | beam.io.ReadFromText(file_path)
if file == 'bodies':
schema = table_schemas.bodies
rows = json_lines | beam.Map(data_transforms.transform_bodies)
elif file == 'systems':
schema = table_schemas.systems
rows = json_lines | beam.Map(data_transforms.transform_systems)
elif file == 'powerplay':
schema = table_schemas.powerplay
rows = json_lines | beam.Map(data_transforms.transform_powerplay)
elif file == 'population':
schema = table_schemas.population
rows = json_lines | beam.Map(data_transforms.transform_population)
elif file == 'stations':
schema = table_schemas.stations
rows = json_lines | beam.Map(data_transforms.transform_stations)
if schema and rows:
bq_loader = rows | beam.ParDo(remove_blank_rows()) | beam.io.WriteToBigQuery(
table_spec,
schema=schema,
write_disposition=beam.io.BigQueryDisposition.WRITE_EMPTY,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
method='DEFAULT',
batch_size=500)
else:
raise Exception('Unable to assemble rows for upload. Check upstream repo for schema updates.')
except Exception as e:
bq_upload_error_string = 'Unable to load EDSM files: ' + str(e)
logger.error(bq_upload_error_string)
sys.exit()
def upload_to_gcs(files, project_id, bucket):
try:
for file in files:
blob = bucket.blob(file)
file_path = '/tmp/' + str(file)
upload_log_string = 'Uploading ' + file + ' to GCS.'
logger.info(upload_log_string)
blob.upload_from_filename(file_path)
except Exception as e:
gcs_upload_error_string = 'Unable to upload EDSM files to GCS: ' + str(e)
logger.error(gcs_upload_error_string)
sys.exit()
def main(argv=None):
parser = argparse.ArgumentParser()
known_args, pipeline_args = parser.parse_known_args(argv)
project_id = args.project
dataset_id = args.dataset
bucket_id = args.bucket
if args.delete:
if 'all' in args.delete:
delete_bq_data(table_list, project_id, dataset_id)
else:
delete_bq_data(args.delete, project_id, dataset_id)
if args.download:
if 'all' in args.download:
download_edsm_files(table_list)
else:
download_edsm_files(args.download)
if args.upload_to_gcs:
storage_client = storage.Client()
try:
gcs_bucket = storage_client.get_bucket(bucket_id)
except Exception as e:
logger.warning('GCS bucket not found, creating...')
gcs_bucket = storage_client.create_bucket(bucket_id)
if 'all' in args.upload_to_gcs:
upload_to_gcs(table_list, project_id, gcs_bucket)
else:
upload_to_gcs(args.upload_to_gcs, project_id, gcs_bucket)
if args.upload_to_bq:
staging_location = '--staging_location=gs://' + bucket_id + '/staging'
temp_location = '--temp_location=gs://' + bucket_id + '/temp'
pipeline_args.extend([
'--job_name=raxxla-loader',
'--setup_file=./setup.py',
staging_location,
temp_location,
])
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = True
runner = args.runner
if 'all' in args.upload_to_bq:
upload_to_bigquery(table_list, project_id, dataset_id, bucket_id, runner, pipeline_options)
else:
upload_to_bigquery(args.upload_to_bq, project_id, dataset_id, bucket_id, runner, pipeline_options)
if __name__ == '__main__':
main()
| mjcastner/edsm_bq | beam_parser.py | beam_parser.py | py | 8,112 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "argparse.Argu... |
36848412423 | import hashlib
import random
import sqlite3
from typing import List, Optional
import more_itertools
import numpy as np
import pandas as pd
import scipy.spatial
import skimage.transform
from carla_real_traffic_scenarios import DT
from carla_real_traffic_scenarios.ngsim import DatasetMode
from carla_real_traffic_scenarios.opendd.dataset import OpenDDDataset, Place
from carla_real_traffic_scenarios.utils.carla import RealTrafficVehicle, find_best_matching_model
from carla_real_traffic_scenarios.utils.transforms import Transform, Vector3, Vector2
def extract_utm_trajectory_from_df(df) -> List[Transform]:
trajectory = df[['UTM_X', 'UTM_Y', 'UTM_ANGLE']].values
return [Transform(Vector3(x, y, 0), Vector2(np.cos(angle), np.sin(angle))) for x, y, angle in trajectory]
class Utm2CarlaMapper:
def __init__(self, place: Place):
image_middle = np.array(place.image_size) // 2
pix2utm_transform = skimage.transform.AffineTransform(np.array(
[[place.world_params[0], place.world_params[2], place.world_params[4]],
[place.world_params[1], place.world_params[3], place.world_params[5]],
[0, 0, 1]]))
self.pix2utm_transformer = pix2utm_transform
utm2pix_transform = skimage.transform.AffineTransform(pix2utm_transform._inv_matrix)
self.utm2pix_transformer = utm2pix_transform
map_center_utm = np.array(place.map_center_utm.as_numpy()[:2])
reflect_matrix = np.array([[1, 0, 0], [0, -1, 0], [0, 0, 1]], dtype='float32') # reflect over Y axis
self.utm2carla_transformer = skimage.transform.AffineTransform(translation=-map_center_utm) + \
skimage.transform.AffineTransform(matrix=reflect_matrix) + \
place.correction_transform
def utm2pix(self, transform: Transform):
return self._transform_with_convert(transform, self.utm2pix_transformer)
def pix2utm(self, transform: Transform):
return self._transform_with_convert(transform, self.pix2utm_transformer)
def utm2carla(self, transform: Transform):
return self._transform_with_convert(transform, self.utm2carla_transformer)
def _transform_with_convert(self, transform: Transform, transformer: skimage.transform.AffineTransform):
position = transform.position.as_numpy()[:2]
position = position.reshape(-1, 2)
orientation = transform.orientation.as_numpy()
orientation = orientation.reshape(-1, 2)
position, orientation = self.transform(position, orientation, transformer)
position = Vector2.from_numpy(position.squeeze()).to_vector3(0)
orientation = Vector2.from_numpy(orientation.squeeze())
return Transform(position, orientation)
def transform(self, positions: np.ndarray, orientations: np.ndarray,
transformer: skimage.transform.AffineTransform):
orientations = positions + orientations
positions = transformer(positions)
orientations = transformer(orientations)
orientations = orientations - positions
return positions, orientations
class OpenDDVehicle:
def __init__(self, df, transformer) -> None:
self._df = df
self.id = int(df.OBJID.iloc[0])
self.width_m = float(df.WIDTH.iloc[0])
self.length_m = float(df.LENGTH.iloc[0])
self._model = find_best_matching_model(self.width_m, self.length_m)
self._frame = 0
self._transformer = transformer
self._max_frame = len(df)
self.trajectory_utm = extract_utm_trajectory_from_df(self._df)
self.trajectory_carla = self._map_trajectory_to_carla(self.trajectory_utm)
def set_end_of_trajectory_timestamp(self, timestamp_end_s):
df = self._df
df = df[df.TIMESTAMP < timestamp_end_s]
self._max_frame = len(df)
self.trajectory_utm = extract_utm_trajectory_from_df(df)
self.trajectory_carla = self._map_trajectory_to_carla(self.trajectory_utm)
self._df = df
def step(self):
self._frame += 1
@property
def type_id(self):
return self._model.type_id
@property
def speed_mps(self):
return self._df.V.iloc[self._frame]
@property
def velocity(self):
return (self.transform_carla.orientation * self.speed_mps).to_vector3(0)
@property
def transform_utm(self):
return self.trajectory_utm[self._frame]
@property
def transform_carla(self):
return self.trajectory_carla[self._frame]
@property
def has_finished(self) -> bool:
return self._frame >= self._max_frame - 1
def as_real_traffic_car(self):
timestamp = self._df.TIMESTAMP.iloc[self._frame]
debug_string = f'id={self.id} fm={self._frame} ts={timestamp:0.2f}'
return RealTrafficVehicle(self.id, self.type_id, timestamp,
self.width_m, self.length_m, self.transform_carla,
self.speed_mps,
debug_string)
def _map_trajectory_to_carla(self, trajectory_utm) -> List[Transform]:
trajectory_carla = []
for transform_utm in trajectory_utm:
transform_carla = self._transformer.utm2carla(transform_utm)
transform_carla = \
Transform(transform_carla.position.with_z(self._model.z_offset), transform_carla.orientation)
trajectory_carla.append(transform_carla)
return trajectory_carla
MIN_EPISODE_LENGTH_STEPS = 10 / DT
def _resample_df(df, target_timedelta_s):
# create timedelta index from TIMESTAMP column (pd.Grouper uses it)
df = df.set_index(pd.TimedeltaIndex(df.TIMESTAMP, 's'))
# group by OBJID and resample TimedeltaIndex to target fps
freq_ms = int(target_timedelta_s * 1000)
grouper = df.groupby([pd.Grouper(freq=f'{freq_ms}ms'), 'OBJID'])
df = grouper.first() # take last observation from grouped bins
df = df.reset_index(level=['OBJID']) # recover OBJID column
df['TIMESTAMP'] = df.index.to_series().dt.total_seconds()
return df
def _find_ego_vehicle_with_time_frame(place, session_df, ego_id=None):
all_objids = list(set(session_df.OBJID.to_list()))
explicit_ego_id = ego_id is not None
while True:
ego_id = ego_id if explicit_ego_id else random.choice(all_objids)
obj_df = session_df[session_df.OBJID == ego_id]
start_idx, stop_idx = _trim_trajectory_utm_to_entry_end_exit(place, obj_df)
if not explicit_ego_id and (start_idx is None or stop_idx is None or start_idx >= stop_idx):
continue
timestamp_start_s = obj_df.iloc[start_idx].TIMESTAMP if start_idx is not None else None
timestamp_end_s = obj_df.iloc[stop_idx].TIMESTAMP if stop_idx is not None else None
return ego_id, timestamp_start_s, timestamp_end_s
def _trim_trajectory_utm_to_entry_end_exit(place, obj_df):
exits_utm = np.array([exit.as_numpy() if exit else np.zeros(2) for entry, exit in place.roads_utm])
entries_utm = np.array([entry.as_numpy() if exit else np.zeros(2) for entry, exit in place.roads_utm])
trajectory_utm = obj_df[['UTM_X', 'UTM_Y']].values
dm_entries = scipy.spatial.distance_matrix(entries_utm, trajectory_utm)
entries_distances_m = np.min(dm_entries, axis=1)
nearest_entry_idx = np.argmin(entries_distances_m) # idx of nearest entry
# trajectory idx where vehicle pass nearest roundabout entry
trajectory_start_idx = np.argmin(dm_entries[nearest_entry_idx])
min_distance_from_nearest_entry = dm_entries[nearest_entry_idx][trajectory_start_idx]
MAX_DISTANCE_FROM_WP_M = 2
PRE_ENTRY_DISTANCE_M = 20
# ensure that it passes entry not more than MAX_DISTANCE_FROM_WP_M
if min_distance_from_nearest_entry > MAX_DISTANCE_FROM_WP_M:
trajectory_start_idx = None
elif trajectory_start_idx > 0:
# take 1st index from part of trajectory distanced not more than PRE_ENTRY_DISTANCE_M
trajectory_start_idx = np.where(
dm_entries[nearest_entry_idx][:trajectory_start_idx] < PRE_ENTRY_DISTANCE_M
)[0][0]
dm_exits = scipy.spatial.distance_matrix(exits_utm, trajectory_utm)
exit_distances_m = np.min(dm_exits, axis=1)
nearest_exit_idx = np.argmin(exit_distances_m)
trajectory_end_idx = np.argmin(dm_exits[nearest_exit_idx])
min_distance_from_nearest_exit = dm_exits[nearest_exit_idx][trajectory_end_idx]
# ensure that it passes exit not more than MAX_DISTANCE_FROM_WP_M
if min_distance_from_nearest_exit > MAX_DISTANCE_FROM_WP_M:
trajectory_end_idx = None
else:
trajectory_end_idx = trajectory_end_idx + np.where(
dm_exits[nearest_exit_idx][trajectory_end_idx:] < PRE_ENTRY_DISTANCE_M
)[0][-1]
return trajectory_start_idx, trajectory_end_idx
def _determine_split(session_name, ego_id, start, stop) -> DatasetMode:
split_frac = 0.8
start, stop = int(round(start, 0)), int(round(stop, 0))
hash_num = int(hashlib.sha1(f'{session_name},{ego_id},{start},{stop}'.encode('utf-8')).hexdigest(), 16)
if (hash_num % 100) / 100 < split_frac:
return DatasetMode.TRAIN
else:
return DatasetMode.VALIDATION
class OpenDDRecording():
def __init__(self, *, dataset: OpenDDDataset, timedelta_s: float = DT,
dataset_mode: DatasetMode = DatasetMode.TRAIN) -> None:
self._dataset = dataset
self._dataset_mode = dataset_mode
self._env_vehicles = {}
self._df: Optional[pd.DataFrame] = None
self._frame = 0
self._timedelta_s = timedelta_s
self._timestamps = []
self._session_name: Optional[str] = None
self._transformer: Optional[Utm2CarlaMapper] = None
def reset(self, session_name, seed=None):
if self._df is not None:
del self._df
self._session_name = session_name
with sqlite3.connect(self._dataset.db_path) as conn:
df = pd.read_sql(f'select * from {session_name}', conn)
# for now do not extract pedestrians, bicycles and trailers
df = df[~df.CLASS.str.contains('Pedestrian|Bicycle|Trailer')]
df = _resample_df(df, self._timedelta_s)
self._timestamps = np.arange(df.TIMESTAMP.min(),
df.TIMESTAMP.max() + self._timedelta_s,
self._timedelta_s)
self._df = df
# search for train/validation roundabout pass
dataset_mode = None
if seed is not None:
random.seed(seed)
while dataset_mode != self._dataset_mode:
ego_id, timestamp_start_s, timestamp_end_s = _find_ego_vehicle_with_time_frame(self.place, self._df)
dataset_mode = _determine_split(session_name, ego_id, timestamp_start_s, timestamp_end_s)
self._frame = np.where(np.isclose(self._timestamps, timestamp_start_s, 0.0001))[0][0] + 1
self._env_vehicles = {}
self._transformer = Utm2CarlaMapper(self.place)
return ego_id, timestamp_start_s, timestamp_end_s
def step(self) -> List[RealTrafficVehicle]:
timestamp_s = self._timestamps[self._frame]
vehicles_current_ids = self._df[
np.isclose(self._df.TIMESTAMP, timestamp_s)
].OBJID.to_list()
for vehicle_id in vehicles_current_ids:
if vehicle_id not in self._env_vehicles:
# TODO: check if x/y smoothing is not required (in ngsim dataset there is smoothing in 15 frames wnd)
new_vehicle_df = self._df[
(self._df.OBJID == vehicle_id) &
((self._df.TIMESTAMP >= timestamp_s) | np.isclose(self._df.TIMESTAMP, timestamp_s))
]
self._env_vehicles[vehicle_id] = OpenDDVehicle(new_vehicle_df, self._transformer)
self._env_vehicles = {k: v for k, v in self._env_vehicles.items() if not v.has_finished}
real_traffic_vehicles = [v.as_real_traffic_car() for v in self._env_vehicles.values()]
if real_traffic_vehicles:
if len(real_traffic_vehicles) > 1:
assert all([
np.isclose(v1.timestamp_s, v2.timestamp_s)
for v1, v2 in more_itertools.windowed(real_traffic_vehicles, 2)
]), (
self._session_name,
[v.debug for v in real_traffic_vehicles],
)
assert np.isclose(real_traffic_vehicles[0].timestamp_s, timestamp_s), \
(real_traffic_vehicles[0].timestamp_s, timestamp_s)
self._frame += 1
for v in self._env_vehicles.values():
v.step()
return real_traffic_vehicles
def close(self):
pass
@property
def place(self) -> Place:
place_name = self._session_name.split('_')[0]
return self._dataset.places[place_name]
@property
def session_name(self) -> str:
return self._session_name
@property
def timestamp_s(self) -> float:
return self._timestamps[self._frame]
@property
def transformer(self):
return self._transformer
def get_df_by_objid(self, ego_id):
return self._df[self._df.OBJID == ego_id]
@property
def has_finished(self):
return self._frame >= len(self._timestamps) - 1
| deepsense-ai/carla-real-traffic-scenarios | carla_real_traffic_scenarios/opendd/recording.py | recording.py | py | 13,416 | python | en | code | 67 | github-code | 6 | [
{
"api_name": "carla_real_traffic_scenarios.utils.transforms.Transform",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "carla_real_traffic_scenarios.utils.transforms.Vector3",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "carla_real_traffic_scenarios.utils.... |
9975379557 | from django.conf.urls import url
from .views import *
urlpatterns = [
# 课程列表
url(r'^list/$', CourseListView.as_view(), name='list'),
# 课程详情
url(r'^detail/(?P<course_id>\d+)/$', DetailView.as_view(), name='detail'),
# 视频信息
url(r'^info/(?P<course_id>\d+)/$', InfoView.as_view(), name='info'),
# 课程评论
url(r'^comment/(?P<course_id>\d+)/$', CommentView.as_view(), name='comment'),
# 添加评论
url(r'^addcomment/$', AddComment.as_view(), name='addcomment'),
#video
url(r'^video/(?P<video_id>\d+)/$', VideoView.as_view(), name='video'),
]
| Liyb5/web | EduOnline/BlueSky/apps/courses/urls.py | urls.py | py | 635 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.conf.urls.url",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.c... |
72935917627 | import os
from celery import Celery
from celery.schedules import crontab
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings')
app = Celery('backend')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.conf.beat_schedule = {
'check_mail_everyday': {
'task': 'emailService.task.check_dates_task',
'schedule': crontab(hour=1, minute=00),
}
}
app.autodiscover_tasks() | anunayajoshi/futureme | backend/backend/celery.py | celery.py | py | 426 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.environ.setdefault",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "celery.Celery",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "celery.schedules.cro... |
31535390686 | from functools import wraps
def vowel_filter(function):
vowels = ["a", "e", "i", "o", "u", "y"]
found_vowels = []
@wraps(function)
def wrapper():
for ch in function():
if ch.lower() in vowels:
found_vowels.append(ch)
return found_vowels
return wrapper
@vowel_filter
def get_letters():
return ["a", "b", "c", "d", "e"]
print(get_letters())
| iliyan-pigeon/Soft-uni-Courses | pythonProjectOOP/decorators/vowels_filter.py | vowels_filter.py | py | 416 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "functools.wraps",
"line_number": 8,
"usage_type": "call"
}
] |
5549915372 | from django.shortcuts import render, redirect
from django.utils import timezone
from .forms import ActivateSertificateForm
from programs.models import Category
from .models import Sertificate
# Create your views here.
def activate_sertificate(request):
if request.method == "POST":
form = ActivateSertificateForm(request.POST)
if form.is_valid():
sert = form.save(commit=False)
try:
sertificate = Sertificate.objects.get(number = sert.number)
if not sertificate.is_active:
sertificate.user = request.user
sertificate.activation_date = timezone.now()
sertificate.is_active = True
sertificate.save()
return redirect('success/')
else:
return redirect('outdated/')
except Exception as e:
return redirect('unsuccess/')
else:
form = ActivateSertificateForm()
categories = Category.objects.all();
return render(request, 'activation/check_sertificate.html', {'form': form, 'categories': categories })
def return_success_message(request):
categories = Category.objects.all();
return render(request, 'activation/success.html', { 'categories': categories })
def return_unsuccess_message(request):
categories = Category.objects.all();
return render(request, 'activation/unsuccess.html', { 'categories': categories })
def return_outdated_message(request):
categories = Category.objects.all();
return render(request, 'activation/outdated.html', { 'categories': categories })
| vladisgrig/babeo | activation/views.py | views.py | py | 1,655 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "forms.ActivateSertificateForm",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "models.Sertificate.objects.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "models.Sertificate.objects",
"line_number": 15,
"usage_type": "attribute"
... |
23380015513 | from gensim.models.doc2vec import Doc2Vec
import pickle
def get_most_similar_docs(test_data, model_path):
# Load the Doc2Vec model
model = Doc2Vec.load(model_path)
# Split the test_data string into a list of words
test_data_words = test_data.split()
# Infer the vector for the test document
inferred_vector = model.infer_vector(test_data_words)
with open('./homework_6/data/tagged_data.pkl', 'rb') as file:
loaded_tagged_data = pickle.load(file)
# Get the 5 most similar documents based on the inferred vector
sims = model.dv.most_similar([inferred_vector], topn=5)
idx = [sims[i][0] for i in range(5)]
# Print the most similar documents
print('Test Document: «{}»\n'.format(' '.join(test_data_words)))
print(u'SIMILAR DOCS PER MODEL %s:\n' % model)
for label, index in [('1', 0), ('2', 1), ('3', 2), ('4', 3), ('5', 4)]:
print(u'%s %s: «%s»\n' % (label, sims[index], ' '.join(loaded_tagged_data[int(sims[index][0])].words)))
return idx
test_data = 'exotic yellow spice note meet lean lime pith light crisp nose old vine expression historic winery . meyer lemon rind juice show brightly palate grippy chalkiness complement rich lemon curd flavor'
get_most_similar_docs(test_data, './homework_6/models/doc2vec.model')
| Tokarevmm/homework5 | homework_6/recommend.py | recommend.py | py | 1,287 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "gensim.models.doc2vec.Doc2Vec.load",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "gensim.models.doc2vec.Doc2Vec",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "pickle.load",
"line_number": 15,
"usage_type": "call"
}
] |
2559703297 | import os
from flask import Flask
from flask_jwt_extended import JWTManager
from flask_login import LoginManager
from .auth import ldap_handler
from .db import database
from .db.models import *
from .messages import messages
from .mocks import fake_ldap_handler
configuration_switch = {
"default": "backend.config.DevConfig", # Development configuration (fake LDAP)
"staging": "backend.config.StagingConfig", # Staging configuration (should be as close as possible to prod)
"production": "backend.config.ProductionConfig", # Production configuration
}
ENV = os.environ.get("ENV", "default")
# SET UP =====================================
LOGIN_MANAGER = LoginManager()
LOGIN_MANAGER.login_view = "auth.login"
LOGIN_MANAGER.login_message = messages.LOGIN_MANAGER_MESSAGE
LDAP = fake_ldap_handler.FAKE_LDAP if ENV == "default" else ldap_handler.LDAP
DB = database.DB
JWT_MANAGER = JWTManager()
# ===================================================
def create_app(test_configuration=None, test_db=None):
"""Application factory method"""
app = Flask(__name__, static_folder="../build/static", template_folder="../build")
# Configure the application
if test_configuration:
app.config.update(test_configuration)
else:
app.config.from_object(configuration_switch[ENV])
# Register extensions ################################
# | | | | | | | | | | | | | | | | | #
######################################################
LDAP.init_app(app)
LOGIN_MANAGER.init_app(app)
JWT_MANAGER.init_app(app)
if test_configuration:
test_db.init_app(app)
else:
DB.init_app(app)
############################################################
from .views import auth, index, home, configuration, plugin
app.register_blueprint(auth.bp)
app.register_blueprint(index.bp)
app.register_blueprint(home.bp)
app.register_blueprint(configuration.bp)
app.register_blueprint(plugin.bp)
return app
| elliecherrill/diligent | backend/__init__.py | __init__.py | py | 2,021 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "os.environ.get",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "flask_login.LoginManager",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "messages.mess... |
22167366155 | import time
from functools import wraps
from MatrixDecomposition import MatrixDecomposition
from MatrixGeneration import MatrixGeneration
def fn_timer(function):
@wraps(function)
def function_timer(*args, **kwargs):
t0 = time.time()
result = function(*args, **kwargs)
t1 = time.time()
print("Total time running '%s': %s seconds" % (function.__name__, str(t1 - t0)))
return result
return function_timer
class Analyzer:
@staticmethod
def analyze_tridiagonal():
Analyzer.analyze_gauss(10, MatrixGeneration.tridiagonal)
Analyzer.analyze_seidel(10, MatrixGeneration.tridiagonal)
Analyzer.analyze_gauss(50, MatrixGeneration.tridiagonal)
Analyzer.analyze_seidel(50, MatrixGeneration.tridiagonal)
Analyzer.analyze_gauss(100, MatrixGeneration.tridiagonal)
Analyzer.analyze_seidel(100, MatrixGeneration.tridiagonal)
@staticmethod
def analyze_hilbert():
Analyzer.analyze_gauss(10, MatrixGeneration.hilbert)
Analyzer.analyze_seidel(10, MatrixGeneration.hilbert)
Analyzer.analyze_gauss(50, MatrixGeneration.hilbert)
Analyzer.analyze_seidel(50, MatrixGeneration.hilbert)
Analyzer.analyze_gauss(100, MatrixGeneration.hilbert)
Analyzer.analyze_seidel(100, MatrixGeneration.hilbert)
@staticmethod
@fn_timer
def analyze_gauss(n, method):
print(f'\'{method.__name__}\' {n}')
matrix = method(n)
right = MatrixGeneration.right(matrix)
matrix_decomposition = MatrixDecomposition(matrix)
gauss = matrix_decomposition.solve_by_gauss(right)
@staticmethod
@fn_timer
def analyze_seidel(n, method):
print(f'\'{method.__name__}\' {n}')
matrix = method(n)
right = MatrixGeneration.right(matrix)
matrix_decomposition = MatrixDecomposition(matrix)
seidel = matrix_decomposition.solve_by_seidel(right, 1e-3)
| g3tawayfrom/appmath_lab4 | Analyzer.py | Analyzer.py | py | 1,953 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "time.time",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "functools.wraps",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "MatrixGeneration.MatrixGeneration.... |
44502183300 | from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField
from wtforms.validators import DataRequired, Length, ValidationError
from urllib.parse import urlparse
from app.models import Business
def business_name_exists(form, field):
business_name = field.data
business = Business.query.filter(Business.name == business_name).first()
if business:
raise ValidationError("Business with this name already exists.")
def valid_zip_code(form, field):
if not field.data.isdigit() or len(field.data) != 5:
raise ValidationError("Invalid ZIP Code.")
def valid_phone_number(form, field):
if not field.data.isdigit() or len(field.data) != 10:
raise ValidationError("Invalid phone number.")
# def valid_url(form, field):
# try:
# result = urlparse(field.data)
# if not all([result.scheme, result.netloc]):
# raise ValueError()
# except ValueError:
# raise ValidationError("Invalid URL.")
class BusinessForm(FlaskForm):
# class Meta:
# csrf = False
name = StringField('Business Name', validators=[
Length(min=1, max=50),
DataRequired(),
# business_name_exists
])
address = StringField('Address', validators=[
Length(min=1, max=255),
DataRequired()
])
city = StringField('City', validators=[
Length(min=1, max=50),
DataRequired()
])
state = StringField('State', validators=[
Length(min=1, max=25),
DataRequired()
])
zip_code = StringField('Zip Code', validators=[
Length(min=1, max=10),
DataRequired(),
# valid_zip_code
])
phone_number = StringField('Phone Number', validators=[
Length(min=1, max=30),
DataRequired(),
# valid_phone_number
])
category_id = IntegerField('Category ID', validators=[DataRequired()])
owner_id = IntegerField('Owner ID', validators=[DataRequired()])
website = StringField('Website', validators=[
Length(min=1, max=255),
DataRequired(),
# valid_url
])
about = StringField('About', validators=[
Length(min=1, max=500),
DataRequired()
])
type = StringField('Type', validators=[
Length(min=1, max=255),
DataRequired()
])
| stroud91/ReactFlaskProject | app/forms/bussiness_form.py | bussiness_form.py | py | 2,324 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "app.models.Business.query.filter",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "app.models.Business.query",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "app.models.Business",
"line_number": 10,
"usage_type": "name"
},
{
... |
12697223783 | from telegram import Update
from telegram.ext import (
Updater,
CallbackContext,
run_async,
CommandHandler,
)
from utils import Config
from pkgutil import walk_packages
from types import ModuleType
from typing import Dict
from utils import get_filter
submodules: Dict[str, ModuleType] = {
module_name: loader.find_module(module_name).load_module(module_name)
for loader, module_name, is_package in walk_packages(__path__)
}
def describe():
return "列出所有的指令, 需注意列出的指令在当前的环境内不一定可用"
def run(update: Update, context: CallbackContext):
update.effective_message.reply_text(
text="所有指令如下:\n"
+ "\n".join(
[f"/{command}: {description}"
for command, description in commands_list]
),
quote=True
)
commands_list = tuple(
(name, module.describe())
for name, module in submodules.items()
) + (
(__name__, describe()),
)
def register(updater: Updater):
for module in submodules.values():
module.register(updater)
dp = updater.dispatcher
dp.add_handler(CommandHandler(
__name__, run, filters=get_filter(Config.watchers), run_async=True))
# dp.add_handler(CommandHandler(__name__, run, filters=Filters.all)) # DEBUG
# * Unavailable until all commands are implemented (or at least their describe methods return a string with len > 3)
updater.bot.set_my_commands(commands_list)
| finall1008/telegram-pusher-bot | commands/__init__.py | __init__.py | py | 1,483 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "typing.Dict",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "types.ModuleType",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "pkgutil.walk_packages",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "telegram.Update",
... |
18004211915 | import os
import numpy as np
import torch
import transforms3d
def plane2pose(plane_parameters):
r3 = plane_parameters[:3]
r2 = np.zeros_like(r3)
r2[0], r2[1], r2[2] = (-r3[1], r3[0], 0) if r3[2] * r3[2] <= 0.5 else (-r3[2], 0, r3[0])
r1 = np.cross(r2, r3)
pose = np.zeros([4, 4], dtype=np.float32)
pose[0, :3] = r1
pose[1, :3] = r2
pose[2, :3] = r3
pose[2, 3] = plane_parameters[3]
pose[3, 3] = 1
return pose
def plane2euler(plane_parameters, axes='sxyz'):
pose = plane2pose(plane_parameters)
T, R, Z, S = transforms3d.affines.decompose(pose)
euler = transforms3d.euler.mat2euler(R, axes=axes)
return T, euler | PKU-EPIC/UniDexGrasp | dexgrasp_policy/dexgrasp/utils/data_info.py | data_info.py | py | 669 | python | en | code | 63 | github-code | 6 | [
{
"api_name": "numpy.zeros_like",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.cross",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_numb... |
21207331986 | from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.urls import reverse_lazy
from django.shortcuts import render
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView
from .models import *
from .filters import PostFilter
from datetime import datetime
from .forms import PostForm
class PostList(ListView):
model = Post
ordering = 'title'
template_name = 'news.html'
context_object_name = 'news'
paginate_by = 10
# def get_queryset(self):
# queryset = super().get_queryset()
# self.filterset = PostFilter(self.request.GET, queryset)
# return self.filterset.qs
#
# def get_context_data(self, **kwargs):
# context = super().get_context_data(**kwargs)
# # context['time_now'] = datetime.utcnow()
# # context['next_sale'] = None
# # context['sorted_posts'] = Post.objects.filter().order_by('-dateCreation')
# context['filterset'] = self.filterset
# return context
class PostDetail(DetailView):
model = Post
template_name = 'post.html'
context_object_name = 'post'
class PostSearch(ListView):
model = Post
ordering = ['dateCreation']
template_name = 'search.html'
context_object_name = 'news'
paginate_by = 10
form_class = PostForm
def get_queryset(self):
queryset = super().get_queryset()
self.filterset = PostFilter(self.request.GET, queryset)
return self.filterset.qs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['filterset'] = self.filterset
# context['']
return context
# class PostSearch(ListView):
# model = Post
# template_name = "search.html"
# context_object_name = "news"
# ordering = ["dateCreation"]
# paginate_by = 10
# form_class = PostForm
#
# def get_filter(self):
# return PostFilter(self.request.GET, queryset=super().get_queryset())
#
# def get_queryset(self):
# return self.get_filter().qs
#
# def get_context_data(self, *args, **kwargs):
# return {
# **super().get_context_data(*args, **kwargs),
# "filterset": self.get_filter(),
# }
# def create_post(request):
# if request.method == 'POST':
# form = PostForm(request.POST)
# form.save()
# return HttpResponseRedirect('/news/')
# form = PostForm()
# return render(request, 'post_edit.html', {'form':form})
class PostCreate(PermissionRequiredMixin, CreateView):
permission_required = ('news.add_post',)
raise_exception = True
form_class = PostForm
model = Post
template_name = 'post_edit.html'
# def form_valid(self, form):
# product = form.save(commit=False)
# post.categoryType.choices = 'NW'
# return super().form_valid(form)
class PostUpdate(PermissionRequiredMixin, UpdateView):
permission_required = ('news.change_post',)
form_class = PostForm
model = Post
template_name = 'post_edit.html'
class PostDelete(PermissionRequiredMixin, DeleteView):
permission_required = ('news.delete_post',)
model = Post
template_name='post_delete.html'
success_url= reverse_lazy('post_list') | AlexAlexG/SF_lessons | NewsPaper/news/views.py | views.py | py | 3,331 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.views.generic.ListView",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.views.generic.DetailView",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.views.generic.ListView",
"line_number": 36,
"usage_type": "name"
}... |
27831010517 | import numpy as np
import netCDF4 as nc4
import net_radiation
import atmospheric_parameters
import wind_shear_velocity
import datetime
# Using TerraClimate
# 2.5 arcminute (1/24 degree) resolution: ~5 km N-S
# Import step
# ... load files here or with a CLI
years = range(1958, 2019)
months_zero_indexed = range(12)
TerraClimateDir = '/media/andy/data1/TerraClimate/'
def extract_data(varnc, varname, varmonth_zero_indexed=None):
if varmonth_zero_indexed is None:
var = varnc.variables[varname][:]
else:
var = varnc.variables[varname][varmonth_zero_indexed]
fv = var.fill_value
var = var.data
var[var == fv] = np.nan
return var
# Get lats and lons from one file
srad_nc = nc4.Dataset(TerraClimateDir+'TerraClimate_srad_1958.nc')
lats = extract_data(srad_nc, 'lat')
lons = extract_data(srad_nc, 'lon')
#LONS, LATS = np.meshgrid (lons, lats)
# Shear velocity of winds: tool to compute from velocity
ustar_interp = wind_shear_velocity.create_lookup_table_one_step()
# Elevation
elevation_nc = nc4.Dataset(TerraClimateDir+'Gebco_2020_2_point_5_arcminute.nc')
elevation = extract_data(elevation_nc, 'value')
elevation = elevation[::-1]
# Heat capacity of air
specific_heat_capacity_of_air = 1.005 # approx. constant at 1 atm
# Humidity minor impact below 40C or so
# But this is an approximation!
cp = specific_heat_capacity_of_air # Easier
# Water density
rho_w = 1000.
# Latent heat of vaporization for water
Lv = 2.5E6
DeltaH_vap = Lv # to make me happier
# Ratio of molecular weight of water vapor to dry air
epsilon = 0.622
# Days in month, for weighting
days_in_month = [31, 28.25, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
# Evaporation array
E = np.zeros(elevation.shape)
years = years[:13] # 1970 and before: before so much GW
for year in years:
print(year)
# Incoming solar radiation (monthly average)
srad_nc = nc4.Dataset(TerraClimateDir+'TerraClimate_srad_'+str(year)+'.nc')
# Maximum daily temperature (monthly average)
tmax_nc = nc4.Dataset(TerraClimateDir+'TerraClimate_tmax_'+str(year)+'.nc')
# Minimum daily temperature (monthly average)
tmin_nc = nc4.Dataset(TerraClimateDir+'TerraClimate_tmin_'+str(year)+'.nc')
# Wind speed (monthly average)
ws_nc = nc4.Dataset(TerraClimateDir+'TerraClimate_ws_'+str(year)+'.nc')
# Vapor pressure (monthly average)
vap_nc = nc4.Dataset(TerraClimateDir+'TerraClimate_vap_'+str(year)+'.nc')
# Now compute for each month
for month_zi in months_zero_indexed:
print(month_zi+1)
# Data
srad = extract_data(srad_nc, 'srad', month_zi)
tmax = extract_data(tmax_nc, 'tmax', month_zi)
tmin = extract_data(tmin_nc, 'tmin', month_zi)
ws = extract_data(ws_nc, 'ws', month_zi)
vap = extract_data(vap_nc, 'vap', month_zi) * 1000. # kPa to Pa
# Average radiation on the midday of the month; could be more precise
date = datetime.date(year, month_zi+1, int(np.round(days_in_month[month_zi]/2.)))
#elevation = 2000. # placeholder
#julian_day = 205 # placeholder
#vap = .03*101325 # placeholder
albedo = 0.06
# Calculations:
# Net Radiation
Rn = net_radiation.computeNetRadiation(elevation, date, lats, len(lons),
tmax, tmin, vap, srad, albedo)
# Shear velocity of winds
ustar = ustar_interp(ws)
# Vapor-pressure deficit
# We don't have max and min humidity
VPD = atmospheric_parameters.compute_vpd( (tmax+tmin)/2., vap )
# Atmospheric pressure
P = atmospheric_parameters.compute_atmospheric_pressure(elevation)
# Atmospheric density (ignoring temperature + humidity effects)
rho_a = atmospheric_parameters.compute_atmospheric_density(elevation,
(tmax + tmin)/2.)
# Clausius-Clayperon phase-change slope
Delta = ( atmospheric_parameters.compute_Delta_e_sat( tmax )
+ atmospheric_parameters.compute_Delta_e_sat( tmin ) ) / 2.
_E = (Rn + cp*rho_a*ustar**2/(Delta*ws) * VPD) \
/ ( rho_w*Lv + P*cp*rho_w/epsilon )
_E[_E<0] = 0 # ignore condensation; I think it's spurious (Antarctica?)
E += _E*days_in_month[month_zi]
E /= (365.25*len(years))
# Export
from netCDF4 import Dataset
import numpy
import time
#path to the file you want to open or create
location_string="evaporation_002p5_arcmin.nc"
# Create nc file
rootgrp = Dataset(location_string,"w",format="NETCDF4")
# Dimensions
lat=rootgrp.createDimension("lat", len(lats))
lon=rootgrp.createDimension("lon", len(lons))
value = rootgrp.createDimension("evaporation", None)
# Values
latitudes = rootgrp.createVariable("lat", "f4", ("lat",))
longitudes = rootgrp.createVariable("lon", "f4", ("lon",))
values = rootgrp.createVariable("value", "f4" , ("lat", "lon",))
latitudes[:] = lats
longitudes[:] = lons
values[:] = E
# Units
latitudes.units = "degrees north"
longitudes.units = "degrees east"
values.units = "metres per second"
# Metadata
rootgrp.description = "Evaporation derived from TerraClimate data products (see https://github.com/umn-earth-surface/TerraClimate-potential-open-water-evaporation)."
rootgrp.history = "created" + time.ctime(time.time())
values.Long_Name = "Open-water evaporation"
# Save
rootgrp.close()
| MNiMORPH/TerraClimate-potential-open-water-evaporation | penman.py | penman.py | py | 5,465 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "numpy.nan",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "netCDF4.Dataset",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "wind_shear_velocity.create_lookup_table_one_step",
"line_number": 34,
"usage_type": "call"
},
{
"a... |
33702704860 | import openpyxl as xl
import xlwings as xw
from Worksheet import Worksheet,QPreviewItem
from Workcell import *
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from copy import copy
#import time
import datetime
##################################################
# class for PS sheet handling
##################################################
UP = 0
DOWN = 1
nn = 0
class PSsheet(Worksheet):
def __init__(self,sheet = None,sheet_wr = None):
super(PSsheet,self).__init__(sheet,sheet_wr)
self._preview_model_list = []
self._extended_preview_model_list = None
self.init_ps_sheet()
self.init_ps_model()
def __del__(self):
super(PSsheet,self).__del__()
del self._preview_model
del self._extended_preview_model_list
def init_ps_sheet(self):
if self._xmlname != None:
self._status = self.search_header_by_value(u'Status(POR,INIT,PREV)')
self._subject_matter = self.search_header_by_value(u'Subject Matter/\nFunctional Area')
self._container_name = self.search_header_by_value(u'Container Name\nTechnical Specification')
def init_ps_model(self):
self._preview_model = QStandardItemModel()
self._preview_model.setColumnCount(4)
self._preview_model.setHeaderData(0,Qt.Horizontal,'status')
self._preview_model.setHeaderData(1,Qt.Horizontal,'subject matter')
self._preview_model.setHeaderData(2,Qt.Horizontal,'container name')
self._preview_model.setHeaderData(3,Qt.Horizontal,'xmlname')
self._extended_preview_model = None
def update_model(self):
super(PSsheet,self).update_model()
self._preview_model_list = []
self.init_ps_model()
self.init_ps_sheet()
if self._xmlname != None:
try:
for xml_name in self.xml_names():
item_status = QPreviewItem(self._status.get_item_by_xmlname(xml_name))
item_subject_matter = QPreviewItem(self._subject_matter.get_item_by_xmlname(xml_name))
item_container_name = QPreviewItem(self._container_name.get_item_by_xmlname(xml_name))
item_xml_name = QPreviewItem(xml_name)
self._preview_model.appendRow((item_status,item_subject_matter,item_container_name,item_xml_name))
self._preview_model_list.append((item_status.value,item_subject_matter.value,item_container_name.value,item_xml_name.value))
except:
return 'error'
def status(self):
cells = list(self._worksheet.iter_cols(min_col=self._status.col,min_row=self._status.row+1,max_col=self._status.col,max_row=self.max_row).next())
while cells[-1].value == None:
cells.pop()
return map(lambda x:Status(x,self._worksheet_wr),cells)
def cell(self,row,col):
#return self._worksheet_wr.range(row,col)
return self._worksheet.cell(row=row,column=col)
def auto_fit(self,cols):
for col in cols:
for i in range(1):
self._worksheet_wr.api.Columns(col).AutoFit()
def add_row(self,start_pos,offset,orientation):
loop = offset
while loop > 0:
self._worksheet_wr.api.Rows[start_pos].Insert(-4121)
loop -= 1
def delete_row(self,start_pos,offset):
self._worksheet_wr.range('%d:%d'%(start_pos,start_pos+offset-1)).api.Delete()
def lock_row(self,row,status):
self._worksheet_wr.api.Rows[row-1].Locked = True
def lock_sheet(self):
self._worksheet_wr.api.Protect()
def lock_sheet_status(self):
return self._worksheet_wr.api.ProtectContents
def unlock_sheet(self):
self._worksheet_wr.api.Unprotect()
def unlock_all_cells(self):
self._worksheet_wr.api.Cells.Locked = False
def extended_preview_model(self):
if self._extended_preview_model == None:
self._extended_preview_model = QStandardItemModel()
self._extended_preview_model.setColumnCount(self._worksheet.max_column)
for row in self._worksheet.rows:
item_row = []
for cell in row:
try:
if cell.value == None:
item = QStandardItem('')
else:
item = QStandardItem(cell.value)
except:
item = QStandardItem('')
item_row.append(item)
self._extended_preview_model.appendRow(item_row)
return self._extended_preview_model
@property
def extended_preview_model_list(self):
if self._extended_preview_model_list == None:
self._extended_preview_model_list = []
for row in self._worksheet.iter_rows(min_row=self.min_row,max_row=self.max_row,min_col=self.min_col,max_col=self.max_col):
item_row = []
for cell in row:
#item_row.append(cell.value if cell.value is not None else '')
try:
#item_row.append('' if cell.value is None else str(cell.value) if type(cell.value) == long else '%s-%s-%s'%((cell+datetime.delta(days=1)).timetuple().tm_year,(cell+datetime.delta(days=1)).timetuple().tm_mon,(cell+datetime.delta(days=1)).timetuple().tm_mday) if type(cell) is datetime.datetime else (cell.value))
item_row.append('' if cell.value is None else str('%s-%s-%s'%((cell.value+datetime.timedelta(days=1)).timetuple().tm_year,(cell.value+datetime.timedelta(days=1)).timetuple().tm_mon,(cell.value+datetime.timedelta(days=1)).timetuple().tm_mday)) if type(cell.value) is datetime.datetime else str(cell.value))
except:
item_row.append('')
self._extended_preview_model_list.append(item_row)
return self._extended_preview_model_list
@property
def preview_model(self):
return self._preview_model_list
| DericGitHub/excel-operator | model/PSsheet.py | PSsheet.py | py | 6,063 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "Worksheet.Worksheet",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "Worksheet.QPreviewItem",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "Worksheet.QPreviewItem",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "Wor... |
35164165716 | #!/usr/bin/python3
import subprocess
import json
import requests
import time
import logging
import os
#bin Paths
ipfspath = '/usr/local/bin/ipfs'
wgetpath = '/usr/bin/wget'
wcpath = '/usr/bin/wc'
#Basic logging to ipfspodcastnode.log
logging.basicConfig(format="%(asctime)s : %(message)s", datefmt="%Y-%m-%d %H:%M:%S", filename="ipfspodcastnode.log", filemode="w", level=logging.INFO)
#Create an empty email.cfg (if it doesn't exist)
if not os.path.exists('cfg/email.cfg'):
with open('cfg/email.cfg', 'w') as ecf:
ecf.write('')
#Init IPFS (if necessary)
if not os.path.exists('ipfs/config'):
logging.info('Initializing IPFS')
ipfs_init = subprocess.run(ipfspath + ' init', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#Start WebUI
import webui
logging.info('Starting Web UI')
#Automatically discover relays and advertise relay addresses when behind NAT.
swarmnat = subprocess.run(ipfspath + ' config --json Swarm.RelayClient.Enabled true', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#Start IPFS
daemon = subprocess.run(ipfspath + ' daemon >/dev/null 2>&1 &', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
logging.info('Starting IPFS Daemon')
time.sleep(10)
#Get IPFS ID
with open('ipfs/config', 'r') as ipcfg:
ipconfig = ipcfg.read()
jtxt = json.loads(ipconfig)
logging.info('IPFS ID : ' + jtxt['Identity']['PeerID'])
#Main loop
while True:
#Request payload
payload = { 'version': 0.6, 'ipfs_id': jtxt['Identity']['PeerID'] }
#Read E-mail Config
with open('cfg/email.cfg', 'r') as ecf:
email = ecf.read()
if email == '':
email = 'user@example.com'
payload['email'] = email
#Check if IPFS is running, restart if necessary.
payload['online'] = False
diag = subprocess.run(ipfspath + ' diag sys', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if diag.returncode == 0:
ipfs = json.loads(diag.stdout)
payload['ipfs_ver'] = ipfs['ipfs_version']
payload['online'] = ipfs['net']['online']
if payload['online'] == False:
#Start the IPFS daemon
daemon = subprocess.run(ipfspath + ' daemon >/dev/null 2>&1 &', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
logging.info('@@@ IPFS NOT RUNNING !!! Restarting Daemon @@@')
#Get Peer Count
peercnt = 0
speers = subprocess.run(ipfspath + ' swarm peers|wc -l', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if speers.returncode == 0:
peercnt = speers.stdout.decode().strip()
payload['peers'] = peercnt
#Request work
logging.info('Requesting Work...')
try:
response = requests.post("https://IPFSPodcasting.net/Request", timeout=120, data=payload)
work = json.loads(response.text)
logging.info('Response : ' + str(work))
except requests.RequestException as e:
logging.info('Error during request : ' + str(e))
work = { 'message': 'Request Error' }
if work['message'] == 'Request Error':
logging.info('Error requesting work from IPFSPodcasting.net (check internet / firewall / router).')
elif work['message'][0:7] != 'No Work':
if work['download'] != '' and work['filename'] != '':
logging.info('Downloading ' + str(work['download']))
#Download any "downloads" and Add to IPFS (1hr48min timeout)
try:
hash = subprocess.run(wgetpath + ' -q --no-check-certificate "' + work['download'] + '" -O - | ' + ipfspath + ' add -q -w --stdin-name "' + work['filename'] + '"', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=6500)
hashcode = hash.returncode
except subprocess.SubprocessError as e:
logging.info('Error downloading/pinning episode : ' + str(e))
hashcode = 99
if hashcode == 0:
#Get file size (for validation)
downhash=hash.stdout.decode().strip().split('\n')
size = subprocess.run(ipfspath + ' cat ' + downhash[0] + ' | ' + wcpath + ' -c', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
downsize=size.stdout.decode().strip()
logging.info('Added to IPFS ( hash : ' + str(downhash[0]) + ' length : ' + str(downsize) + ')')
payload['downloaded'] = downhash[0] + '/' + downhash[1]
payload['length'] = downsize
else:
payload['error'] = hashcode
if work['pin'] != '':
#Directly pin if already in IPFS
logging.info('Pinning hash (' + str(work['pin']) + ')')
try:
pin = subprocess.run(ipfspath + ' pin add ' + work['pin'], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=6500)
pincode = pin.returncode
except subprocess.SubprocessError as e:
logging.info('Error direct pinning : ' + str(e))
#Clean up any other pin commands that may have spawned
cleanup = subprocess.run('kill `ps aux|grep "ipfs pin ad[d]"|awk \'{ print $2 }\'`', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
pincode = 98
if pincode == 0:
#Verify Success and return full CID & Length
pinchk = subprocess.run(ipfspath + ' ls ' + work['pin'], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if pinchk.returncode == 0:
hashlen=pinchk.stdout.decode().strip().split(' ')
payload['pinned'] = hashlen[0] + '/' + work['pin']
payload['length'] = hashlen[1]
else:
payload['error'] = pinchk.returncode
else:
payload['error'] = pincode
if work['delete'] != '':
#Delete/unpin any expired episodes
logging.info('Unpinned old/expired hash (' + str(work['delete']) + ')')
delete = subprocess.run(ipfspath + ' pin rm ' + work['delete'], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
payload['deleted'] = work['delete']
#Report Results
logging.info('Reporting results...')
#Get Usage/Available
repostat = subprocess.run(ipfspath + ' repo stat -s|grep RepoSize', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if repostat.returncode == 0:
repolen = repostat.stdout.decode().strip().split(':')
used = int(repolen[1].strip())
else:
used = 0
payload['used'] = used
df = os.statvfs('/')
payload['avail'] = df.f_bavail * df.f_frsize
try:
response = requests.post("https://IPFSPodcasting.net/Response", timeout=120, data=payload)
except requests.RequestException as e:
logging.info('Error sending response : ' + str(e))
else:
logging.info('No work.')
#wait 10 minutes then start again
logging.info('Sleeping 10 minutes...')
time.sleep(600)
| Cameron-IPFSPodcasting/podcastnode-Umbrel | ipfspodcastnode.py | ipfspodcastnode.py | py | 6,566 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "logging.basicConfig",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"l... |
30980411340 | from matplotlib.pyplot import draw
import pygame
from pygame.locals import *
pygame.init()
pygame.mixer.init()
# set screen resolution
resolution = (725,725)
# open a screen of above resolution
screen = pygame.display.set_mode(resolution)
# defining palette colours (global variables) as dictionary
gameColours={
'green': (101, 155, 94),
'dG': (73, 113, 69),
'red': (200, 70,48),
'dR' : (135, 47, 31)
}
# storing screen variable values
width = screen.get_width()
height = screen.get_height()
# text_on_screen() not affected by mouse position variables
# Game main menu, with start and exit buttons
def main_menu():
# track position of mouse
mx, my = pygame.mouse.get_pos()
# Generate play/quit buttons
playButton = pygame.Rect(width/7, (height/2), 200, 100)
quitButton = pygame.Rect(((width/7)+200+width/7), (height/2), 200, 100)
# Hover on buttons
if playButton.collidepoint((mx, my)):
# Rendering button darker green
pygame.draw.rect(screen, gameColours['dG'], playButton)
else:
# Rendering button green
pygame.draw.rect(screen, gameColours['green'], playButton)
if quitButton.collidepoint((mx,my)):
# Rendering button darker red
pygame.draw.rect(screen, gameColours['dR'], quitButton)
else:
# Rendering button light red
pygame.draw.rect(screen, gameColours['red'], quitButton)
# tableGen() not affected by mouse position variables
# getCatPath() not affected by mouse position variables
# updateScore() not affected by mouse position variables
# displayScore() not affected by mouse position variables
# Begins the "lets roll" screen of game, with button to start
def gameTime(score):
# mouse position
mx, my = pygame.mouse.get_pos()
rollButton = pygame.Rect(width/3, (height-100), 225, 70)
# Hover on roll button
if rollButton.collidepoint((mx, my)):
pygame.draw.rect(screen, gameColours['dR'], rollButton)
else:
pygame.draw.rect(screen, gameColours['red'], rollButton)
# checkWinner() not affected by mouse position variables
# getDice() not affected by mouse position variables
# showDice() not affected by mouse position variables
# gameLogic() not affected by mouse position variables
# user won
def winScreen(die1, die2, num, score):
# mouse coordinates
mx, my = pygame.mouse.get_pos()
againButton = pygame.Rect(width/7, (height-70), 225, 50)
quitButton = pygame.Rect(width/7+300, (height-70), 225, 50)
# hover effects (collision)
if againButton.collidepoint((mx, my)):
pygame.draw.rect(screen, gameColours['dG'], againButton)
else:
pygame.draw.rect(screen, gameColours['green'], againButton)
if quitButton.collidepoint((mx, my)):
pygame.draw.rect(screen, gameColours['dR'], quitButton)
else:
pygame.draw.rect(screen, gameColours['red'], quitButton)
# screen for when user loses
def loseScreen(die1, die2, num, score):
againButton = pygame.Rect(width/7, (height-70), 225, 50)
quitButton = pygame.Rect(width/7+300, (height-70), 225, 50)
# mouse coordinates
mx, my = pygame.mouse.get_pos()
# hover collision
if againButton.collidepoint((mx, my)):
pygame.draw.rect(screen, gameColours['dG'], againButton)
else:
pygame.draw.rect(screen, gameColours['green'], againButton)
if quitButton.collidepoint((mx, my)):
pygame.draw.rect(screen, gameColours['dR'], quitButton)
else:
pygame.draw.rect(screen, gameColours['red'], quitButton)
# screen for when computer and user dice are equal
def drawScreen(die1, num, score):
againButton = pygame.Rect(width/7, (height-70), 225, 50)
quitButton = pygame.Rect(width/7+300, (height-70), 225, 50)
mx, my = pygame.mouse.get_pos()
if againButton.collidepoint((mx, my)):
pygame.draw.rect(screen, gameColours['dG'], againButton)
else:
pygame.draw.rect(screen, gameColours['green'], againButton)
if quitButton.collidepoint((mx, my)):
pygame.draw.rect(screen, gameColours['dR'], quitButton)
else:
pygame.draw.rect(screen, gameColours['red'], quitButton)
| jessica-leishman/high-rollers | analysis_static/manual slices/hrStatic3.py | hrStatic3.py | py | 4,288 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pygame.init",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pygame.mixer.init",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_mode"... |
15298986512 | #!/usr/bin/python3.6
import requests, json, datetime
from time import sleep
try:
while True:
req = requests.get('https://www.mercadobitcoin.net/api/BTC/ticker/')
cot = json.loads(req.text)
d = datetime.datetime.now()
print(d.strftime('%c'))
print('BTC:', cot['ticker']['buy'][:8])
sleep(10)
print('')
except:
print("Failed to establish a connection.")
| andreMarqu3s/bit_value | cotacao.py | cotacao.py | py | 387 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"... |
72810210107 | def create_piechart():
# Importamos las dependencias
import pandas as pd
import matplotlib.pyplot as plt
from config import engine
from sqlalchemy.orm import sessionmaker
engine = engine
# Intentamos leer desde la base de datos, la tabla "tabla_1"
try:
with engine.connect() as connection:
df = pd.read_sql_table('tabla_1', connection)
except Exception as e:
print('Ocurrió un error al intentar conectar o leer la base de datos:')
print(str(e))
return
# Va recorrer la columna "sentiment" de nuestro dataframe y sumará la cantidad de veces que se repite cada valor disponible(negative, positive y neutral)
sentiment_counts = df['sentiment'].value_counts()
# Le indicamos a matplotlib las características con las que debe crear nuestro piechart
plt.figure(figsize=(10, 5))
plt.pie(sentiment_counts, labels=sentiment_counts.index, autopct='%1.1f%%')
plt.title('Sentiment Analysis Pie Chart')
# Guardamos en la carpeta raíz del proyecto el piechart generado en formato png
plt.savefig("piechart.png")
# Esto sirve para mostrar en pantalla el piechart generado, por defecto está deshabilitado
# plt.show()
| NebyX1/data-science-engineering-end-to-end-project-bootcamp-milei-twitter-scraping | piechart_script.py | piechart_script.py | py | 1,239 | python | es | code | 0 | github-code | 6 | [
{
"api_name": "config.engine",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "config.engine.connect",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "config.engine",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "pandas.read_sql_tabl... |
25018507922 | import base64
import os
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import padding
current_dir = os.path.dirname(os.path.abspath(__file__))
def encrypt_data(data, key_path='public.pem'):
public_key_path = os.path.join(current_dir, key_path)
# Load the public key from file
with open(public_key_path, 'rb') as public_key_file:
public_key = serialization.load_pem_public_key(
public_key_file.read(),
backend=default_backend()
)
# Encrypt the message using the public key
encrypted_data = public_key.encrypt(
data.encode('utf-8'),
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
# Base64 encode the encrypted data
encoded_encrypted_data = base64.b64encode(encrypted_data).decode('utf-8')
return encoded_encrypted_data
def decrypt_data(encoded_encrypted_data, key_path='private.pem'):
private_key_path = os.path.join(current_dir, key_path)
# Load the private key from file
with open(private_key_path, 'rb') as private_key_file:
private_key = serialization.load_pem_private_key(
private_key_file.read(),
password=None,
backend=default_backend()
)
# Base64 decode the encrypted data
encrypted_data = base64.b64decode(encoded_encrypted_data)
# Decrypt the encrypted data using the private key
decrypted_data = private_key.decrypt(
encrypted_data,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
# Return the decrypted data as a string
return decrypted_data.decode('utf-8')
| ivana-dodik/Blockchain | EP --zadatak 02/crypto.py | crypto.py | py | 1,898 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.dirname",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_nu... |
8267514816 | from __future__ import annotations
from kombu.pools import producers
from .queues import task_exchange
priority_to_routing_key = {
'high': 'hipri',
'mid': 'midpri',
'low': 'lopri',
}
def send_as_task(connection, fun, args=(), kwargs={}, priority='mid'):
payload = {'fun': fun, 'args': args, 'kwargs': kwargs}
routing_key = priority_to_routing_key[priority]
with producers[connection].acquire(block=True) as producer:
producer.publish(payload,
serializer='pickle',
compression='bzip2',
exchange=task_exchange,
declare=[task_exchange],
routing_key=routing_key)
if __name__ == '__main__':
from kombu import Connection
from .tasks import hello_task
connection = Connection('amqp://guest:guest@localhost:5672//')
send_as_task(connection, fun=hello_task, args=('Kombu',), kwargs={},
priority='high')
| celery/kombu | examples/simple_task_queue/client.py | client.py | py | 994 | python | en | code | 2,643 | github-code | 6 | [
{
"api_name": "kombu.pools.producers",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "queues.task_exchange",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "queues.task_exchange",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "kombu... |
70943725628 | import json
import os
class FolderWalker:
"""
Check folder with results. Walk through the folders and define paths to various files.
If any values are not counted in one of the frameworks, they will be excluded in competitors.
Thus, the class ensures consistency of results in the analysis.
"""
def __init__(self, working_dir: str):
self.working_dir = os.path.abspath(working_dir)
path_to_config_json = os.path.join(self.working_dir, 'configuration.json')
with open(path_to_config_json) as file:
config_info = json.load(file)
self.datasets = config_info['Datasets']
self.launches = config_info['Launches']
self.libraries = config_info['Libraries to compare']
self.clip_border = config_info['Clip border']
self.forecast_files = {}
self.timeout_files = {}
self.additional_files = {}
for dataset in self.datasets:
for launch in range(self.launches):
for library in self.libraries:
launch_number = f'launch_{launch}'
case_id = f'{dataset}|{launch}|{library}'
validation_case_path = os.path.join(self.working_dir, dataset, launch_number, library)
all_forecasted_paths = self.find_files(validation_case_path,
search_pattern='forecast_vs_actual.csv')
self.forecast_files.update({case_id: all_forecasted_paths})
all_timeouts_paths = self.find_files(validation_case_path,
search_pattern='timeouts.json')
self.timeout_files.update({case_id: all_timeouts_paths})
all_additional_paths = self.find_additional_files(validation_case_path)
if all_additional_paths is not None:
self.additional_files.update({case_id: all_additional_paths})
self.exclude_mismatched_results()
def exclude_mismatched_results(self):
"""
In some cases it is not possible to get results for some cases (dataset -
launch number - library - time series - forecast horizon). So there is a
need to exclude cases without calculations
"""
for dataset in self.datasets:
# First cycle - collect information
dataset_execution_time = []
dataset_forecast = []
for launch in range(self.launches):
for library in self.libraries:
case_id = f'{dataset}|{launch}|{library}'
ex_time_files = set(map(lambda x: os.path.basename(x), self.timeout_files[case_id]))
forecast_files = set(map(lambda x: os.path.basename(x), self.forecast_files[case_id]))
dataset_execution_time.append(ex_time_files)
dataset_forecast.append(forecast_files)
# Find intersection for all cases
dataset_execution_time = set.intersection(*dataset_execution_time)
dataset_forecast = set.intersection(*dataset_forecast)
# Second cycle - update info
for launch in range(self.launches):
for library in self.libraries:
case_id = f'{dataset}|{launch}|{library}'
ex_time_file = self.timeout_files[case_id][0]
current_path = os.path.dirname(ex_time_file)
upd_time_paths = add_path_to_files(current_path, dataset_execution_time)
upd_forecasts = add_path_to_files(current_path, dataset_forecast)
self.timeout_files.update({case_id: upd_time_paths})
self.forecast_files.update({case_id: upd_forecasts})
@staticmethod
def find_files(folder_with_files: str, search_pattern: str):
""" Find all files in the folder and return full paths """
files = os.listdir(folder_with_files)
files.sort()
all_paths = []
for file in files:
if search_pattern in file:
all_paths.append(os.path.join(folder_with_files, file))
return all_paths
@staticmethod
def find_additional_files(folder_with_files: str):
""" Search for unusual files in saved folder - additional info """
files = os.listdir(folder_with_files)
files.sort()
extra_paths = []
for file in files:
if 'timeouts.json' not in file and 'forecast_vs_actual.csv' not in file:
extra_paths.append(os.path.join(folder_with_files, file))
if len(extra_paths) == 0:
return None
return extra_paths
def add_path_to_files(current_path: str, files: set):
""" In set with file names for each file add folder path """
updated_data = []
for file in files:
updated_data.append(os.path.join(current_path, file))
updated_data.sort()
return updated_data
| ITMO-NSS-team/pytsbe | pytsbe/report/walk.py | walk.py | py | 5,028 | python | en | code | 30 | github-code | 6 | [
{
"api_name": "os.path.abspath",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number"... |
23039179017 | import cv2
import numpy as np
from hand import HandRecognizer
from args import OLD_FONT_THRESHOLD
class OldRecognizer(HandRecognizer):
def __init__(self, imname, already_read=False):
super(OldRecognizer, self).__init__(imname, already_read)
self.cal_result()
def loop_process(self, func):
pad = 0.05
for x, y, w, h in self.recs:
x0, y0, x1, y1 = map(int, (x + pad * w, y + pad * h, x + w - pad * w, y + h - pad * h))
single = cv2.cvtColor(self.raw_sudoku_im[y0:y1, x0:x1], cv2.COLOR_BGR2GRAY)
self.result.append(func(single))
def single_recognize(self, im):
ret, im = cv2.threshold(im, OLD_FONT_THRESHOLD, 255, cv2.THRESH_BINARY)
# self._debug(im)
r, c = im.shape
edges = cv2.Canny(im, 20, 50)
black, mid_area = 0, 0
for i in range(r):
for j in range(c):
if edges[i, j] == 255:
black += 1
if 1 / 3 * c < j < 2 / 3 * c:
mid_area += 1
return mid_area / black # 图像中央部分黑色像素的比例
def cal_result(self):
# print(*zip(self.result, range(9)), sep='\n')
self.result = sorted(zip(self.result, range(9)), reverse=True)[0][1]
if __name__ == '__main__':
print(OldRecognizer('test_im/raw_old.jpg').result) | sjdeak/RoboMasters2017-RuneDetector | old.py | old.py | py | 1,379 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "hand.HandRecognizer",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "cv2.cvtColor",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "cv2.threshold... |
73535922429 | import numpy as np
import matplotlib.pyplot as plt
from scipy import sparse
def output_result(path, matrix):
f = open(path, "w+")
f.write(str(len(matrix)) + '\n')
for row in matrix:
f.write(str(row)[1:-1])
f.write('\n')
f.close()
def read_buildings(path_to_buildings):
'''
:param path_to_buildings: a path to text file that consists coordinates of the buildings
:return: list of (x1,y1,x2,y2)
'''
file = open(path_to_buildings)
buildings = []
for line in file:
sp = line.split(" ")
x1, y2, x2, y1 = float(sp[0]), 1 - float(sp[1]), float(sp[2]), 1 - float(sp[3])
buildings.append((x1, y1, x2, y2))
return buildings
def isInsideBuildings(xx, yy, buildings):
'''
:param coord: (x,y) - coords to check
:param building: (x1,y1,x2,y2) - building
:return: Boolean
'''
answer = False
for building in buildings:
(x1, y1, x2, y2) = building
if x1 <= xx <= x2 and y1 <= yy <= y2:
answer = True
return not answer
def get_cond_check_func(buildings):
'''
Given building construct function that verify whether point (x,y) is outside these buildings
:param buildings:
:return: lambda (x,y) -> ...
'''
return lambda x, y: isInsideBuildings(x, y, buildings)
class ConvectionDiffusion:
def __init__(self, max_t, l1, l2, k, N, cond_func, eps):
self.max_t = max_t
self.l1 = l1
self.l2 = l2
self.k = k
self.N = N
self.h = 1.0 / N
self.eps = eps
self.cond_func = cond_func
self.tau = 1 / (4 * k * N * N)
self.U = np.zeros((N + 1, N + 1))
self.coeffs = [1 - 4 * self.tau * k / (self.h * self.h),
self.tau * (k / (self.h * self.h) - l1 / (2 * self.h)),
self.tau * (k / (self.h * self.h) + l1 / (2 * self.h)),
self.tau * (k / (self.h * self.h) - l2 / (2 * self.h)),
self.tau * (k / (self.h * self.h) + l2 / (2 * self.h))]
def check_correctness(self, x, y):
return 0 <= x < self.N and 0 < y < self.N
def iteration(self):
'''
One iteration of the simple iteration methods
:return: error
'''
dx = [0, 1, -1, 0, 0]
dy = [0, 0, 0, 1, -1]
new_U = np.zeros((N + 1, N + 1))
for i in range(self.N + 1):
for j in range(self.N + 1):
new_U[i, j] = self.U[i, j]
if not self.cond_func(i / N, j / N):
continue
else:
new_U[i, j] *= self.coeffs[0]
for f in range(1, 5):
x = i + dx[f]
y = j + dy[f]
if self.cond_func(x / N, y / N) and self.check_correctness(x, y):
new_U[i, j] += self.U[x, y] * self.coeffs[f]
else:
new_U[i, j] += self.U[i, j] * self.coeffs[f]
old_U = self.U
self.U = new_U
return np.max(np.abs((old_U / 100 - new_U / 100)))
def init_matrix(self):
self.U[:, 1] = 100
def solve(self):
'''
:return: U and image
'''
self.init_matrix()
for f in range(0, self.max_t):
error = self.iteration()
print(error)
if error < self.eps:
break
fig = plt.imshow(self.U / 100)
plt.colorbar(fig)
plt.show()
return self.U / 100
def optimized_solve(self):
vars = (self.N + 1) * (self.N + 1)
dx = [0, 1, -1, 0, 0]
dy = [0, 0, 0, 1, -1]
A = sparse.lil_matrix((vars, vars))
b = np.zeros(vars)
frames = []
for i in range(vars):
y = i // (self.N + 1)
x = i % (self.N + 1)
A[i, i] = self.coeffs[0]
if not self.cond_func(x / (self.N + 1), y / (self.N + 1)) or not self.check_correctness(x, y):
continue
if x == 0:
A[i, i] = 1
continue
for j in range(1, 5):
xx = x + dx[j]
yy = y + dy[j]
if xx == 0:
b[i] += self.coeffs[j]
continue
if not self.cond_func(xx / (self.N + 1), yy / (self.N + 1)) or not self.check_correctness(xx, yy):
A[i, i] += self.coeffs[j]
else:
A[i, yy * (self.N + 1) + xx] = self.coeffs[j]
A = sparse.csc_matrix(A)
x = np.zeros(vars)
for i in range(self.N + 1):
x[i * (self.N + 1)] = 0
for f in range(self.max_t):
x_new = A @ x + b
error = np.max(np.abs((x_new - x) / np.maximum(1, x)))
if f % 100 == 0:
frames.append(x_new.reshape((self.N + 1, self.N + 1)))
print(error)
if error < self.eps:
x = x_new
break
x = x_new
answer = x.reshape((self.N + 1, self.N + 1))
fig = plt.imshow(answer)
plt.colorbar(fig)
plt.show()
return answer, frames
def animate(frames):
fig = plt.figure()
ims = []
from matplotlib import animation
i = 0
for frame in frames:
print(frame.shape)
im = plt.imshow(frame, animated=True)
ims.append([im])
i += 1
plt.colorbar(im)
Writer = animation.writers['ffmpeg']
writer = Writer(fps=60, metadata=dict(artist='Me'), bitrate=260)
ani = animation.ArtistAnimation(fig, ims, interval=17, blit=True,
repeat_delay=1000)
# ani.save('diffusion.html')
ani.save('diffusion.mp4', writer=writer)
plt.show()
if __name__ == "__main__":
max_t = 100000
l_1 = 1
l_2 = 0.0
k = 0.5
N = 300
eps = 1e-6
buildings = read_buildings("buildings.txt")
cond_func = get_cond_check_func(buildings)
solver = ConvectionDiffusion(max_t, l_1, l_2, k, N, cond_func, eps)
u, frames = solver.optimized_solve()
animate(frames)
output_result("output.txt", u)
| arsee2/numerical_modelling_diffusion_convection_process | nm_project.py | nm_project.py | py | 6,194 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.zeros",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 105... |
6253113584 | import torch
import torch.nn as nn
import torch.nn.functional as F
from _polytope_ import Polytope, Face
import utilities as utils
from collections import OrderedDict
import numpy as np
import time
import copy
import convex_adversarial.convex_adversarial as ca
import full_lp as flp
class PLNN(nn.Module):
#TODO: determine if building net addition was necessary
# add some explanations for some methods
""" Simple piecewise neural net.
Fully connected layers and ReLus only
"""
def __init__(self, layer_sizes=None, bias=True, dtype=torch.FloatTensor):
super(PLNN, self).__init__()
if layer_sizes is None:
layer_sizes = [32, 64, 128, 64, 32, 10]
self.layer_sizes = layer_sizes
self.dtype = dtype
self.fcs = []
self.bias = bias
self.net = self.build_network(layer_sizes)
def build_network(self, layer_sizes):
layers = OrderedDict()
num = 1
for size_pair in zip(layer_sizes, layer_sizes[1:]):
size, next_size = size_pair
layer = nn.Linear(size, next_size, bias=self.bias).type(self.dtype)
layers[str(num)] = layer
self.fcs.append(layer)
num = num + 1
layers[str(num)] = nn.ReLU()
num = num + 1
del layers[str(num-1)] # No ReLU for the last layer
net = nn.Sequential(layers).type(self.dtype)
print(self.layer_sizes)
return net
def get_parameters(self):
params = []
for fc in self.fcs:
fc_params = [elem for elem in fc.parameters()]
for param in fc_params:
params.append(param)
return params
def config_str_to_config_list(self, config_str):
""" Given str of configs, converts to list of torch tensors of right
layer sizes
"""
assert isinstance(config_str, str)
assert len(config_str) == sum(self.layer_sizes[1:-1])
splits = []
running_idx = 0
for el in self.layer_sizes[1:-1]:
layer_config = config_str[running_idx:running_idx + el]
layer_config = torch.Tensor([float(el) for el in layer_config])
# Do some cuda nonsense here?
splits.append(layer_config)
running_idx += el
return splits
def relu_config(self, x, return_pre_relus=True):
pre_relus = self.forward_by_layer(x)
configs = [(pre_relu.squeeze() > 0).type(torch.float32)
for pre_relu in pre_relus]
if return_pre_relus:
return pre_relus, configs
else:
return configs
def make_adversarial_constraints(self, polytope, true_label,
domain):
""" Given a config computes the linear map in terms of this config
for all neurons INCLUDING the output neurons (logits) and generates
the polytope constraints for the neuron config and
constraints for each of the decision boundaries
configs - as usual
true_label -
"""
# Make all the adversarial_constraints:
#if(x) = Ax + b (in R^#logits)
# adversarial constraints are:
# f_true(x) - f_j(x) = 0 (for all j != true)
# ~ which is ~
# <a_true, x> + b_true - <a_j, x> - b_j = 0
# ~ which is ~
# <a_true - a_j, x> = b_j - b_true
total_a = polytope.linear_map['A']
total_b = polytope.linear_map['b']
num_logits = total_a.shape[0]
facets = []
true_a = total_a[true_label]
true_b = total_b[true_label]
for i in range(num_logits):
if i == true_label:
continue
dec_bound = {'A': true_a - total_a[i],
'b': total_b[i] - true_b}
new_facet = polytope.facet_constructor(None, facet_type='decision',
extra_tightness=dec_bound)
if new_facet.fast_domain_check():
facets.append(new_facet)
return facets
def compute_polytope_config(self, configs, comparison_form_flag=False,
uncertain_constraints=None, as_tensor=False):
lambdas = [torch.diag(config) for config in configs]
js = [torch.diag(-2 * config + 1) for config in configs]
# Compute Z_k = W_k * x + b_k for each layer
wks = [self.fcs[0].weight]
bks = [self.fcs[0].bias]
for (i, fc) in enumerate(self.fcs[1:]):
current_wk = wks[-1]
current_bk = bks[-1]
current_lambda = lambdas[i]
precompute = fc.weight.matmul(current_lambda)
wks.append(precompute.matmul(current_wk))
bks.append(precompute.matmul(current_bk) + fc.bias)
a_stack = []
b_stack = []
for j, wk, bk in zip(js, wks, bks):
a_stack.append(j.matmul(wk))
b_stack.append(-j.matmul(bk))
if as_tensor:
return {'a_stack': a_stack,
'b_stack': b_stack,
'total_a': wks[-1],
'total_b': bks[-1]}
polytope_A = utils.as_numpy(torch.cat(a_stack, dim=0))
polytope_b = utils.as_numpy(torch.cat(b_stack, dim=0))
if(comparison_form_flag):
polytope_A, polytope_b = utils.comparison_form(polytope_A, polytope_b)
return {'poly_a': polytope_A,
'poly_b': polytope_b,
'configs': configs,
'total_a': wks[-1],
'total_b': bks[-1]
}
def compute_polytope(self, x, comparison_form_flag=False, as_tensor=False):
pre_relus, configs = self.relu_config(x, return_pre_relus=True)
poly_out = self.compute_polytope_config(configs, comparison_form_flag,
as_tensor=as_tensor)
poly_out['pre_relus'] = pre_relus
return poly_out
def compute_matrix(self, configs):
M = torch.eye(self.layer_sizes[0])
for config, fc, layer_size in zip(configs, self.fcs, self.layer_sizes):
nullifier = torch.Tensor([config.numpy() for _ in range(0, layer_size)])
M_layer_prime = fc.weight * torch.transpose(nullifier, 0, 1)
M = torch.matmul(M_layer_prime, M)
M = torch.matmul(self.fcs[-1].weight, M)
return M
def forward_by_layer(self, x):
pre_relus = []
x = x.view(-1, self.layer_sizes[0])
for fc in self.fcs[:-1]:
x = fc(x)
pre_relus.append(x.clone())
x = F.relu(x)
return pre_relus
def forward(self, x):
x = x.view(-1, self.layer_sizes[0])
for fc in self.fcs[:-1]:
x = F.relu(fc(x))
return self.fcs[-1](x) # No ReLu on the last one
def compute_interval_bounds(self, domain_obj, compute_logit_bounds=False,
as_tensor=False):
""" For each neuron computes a bound for the range of values each
pre-ReLU can take.
ARGS:
domain_obj : Domain - object used to hold bounding boxes
on_off_format: boolean - if True, we return the more fine-grained
list which displays if neurons are on or
off, instead of stable
RETURNS:
returned_bounds : list of tensors giving pre-Relu bounds
uncertain_set: list of tensors with 1 if uncertain about this
neuron in the list
list of length (# fully connected layers - 1), where each element
is a tensor of shape (num_neurons, 2) for the bounds for the preReLU
"""
box = domain_obj.box_to_tensor()
# setup + asserts
assert all(box[:, 0] <= box[:, 1])
# Redoing this one more time
# Redo this but doing it right :
midpoint_matrix = torch.Tensor([[1.0], [1.0]]) / 2.0
ranges_matrix = torch.Tensor([[-1.0], [1.0]]) / 2.0
returned_bounds = []
dead_set = [] # list of tensors, 1 if always on or off
working_bounds = box
current_low, current_high = box[:, 0], box[:, 1]
if compute_logit_bounds:
layers_to_check = self.fcs
else:
layers_to_check = self.fcs[:-1]
for fc in layers_to_check:
weight, bias = fc.weight, fc.bias
weight_pos, weight_neg = utils.split_tensor_pos(weight)
new_high = (torch.matmul(weight_pos, current_high) +
torch.matmul(weight_neg, current_low))
new_low = (torch.matmul(weight_pos, current_low) +
torch.matmul(weight_neg, current_high))
if bias is not None:
new_high += bias
new_low += bias
returned_bounds.append(torch.stack([new_low, new_high], dim=1))
current_low = F.relu(new_low)
current_high = F.relu(new_high)
if as_tensor:
return returned_bounds
else:
return [utils.as_numpy(_) for _ in returned_bounds]
def compute_improved_ia_bounds(self, domain_obj):
""" Implements the improved interval bounds as presented here:
https://arxiv.org/pdf/1809.03008.pdf (appendix C)
[also done with gradients pushed through so we can build RS loss ]
# CODE HEAVILY BORROWED FROM https://github.com/MadryLab/relu_stable/blob/master/models/MNIST_improved_ia.py
# (but we're transposed from that code)
"""
box = domain_obj.box_to_tensor()
init_lows = box[:, 0]
init_highs = box[:, 1]
assert all(init_lows <= init_highs) # assert lows less than highs
layers_to_check = self.fcs[:-1] # set the
intermed_lows, intermed_highs = [], []
# define the recursive call
def recurs(layer_num, lows, highs, weights, biases):
assert len(lows) == len(highs) == len(weights) == len(biases) == layer_num
# current layer
low = lows[0]
high = highs[0]
weight = weights[0]
bias = biases[0]
# Base case
if layer_num == 1:
weight_pos, weight_neg = utils.split_tensor_pos(weight)
next_low = (torch.matmul(weight_pos, init_lows) +
torch.matmul(weight_neg, init_highs) + bias)
next_high = (toch.matmul(weight_pos, init_highs) +
torch.matmul(weight_neg, init_lows) + bias)
return next_low, next_high
# Recursive case
prev_weight = weights[1]
prev_bias = biases[1]
# Compute W_A, W_N (need to zero out COLUMNS here)
w_a = torch.matmul(weight, (low > 0).diag_embed())
w_n = weight - w_a
w_n_pos, w_n_neg = utils.split_tensor_pos(w_n)
w_prod = torch.matmul(w_a, prev_weight)
b_prod = torch.matmul(w_a, prev_bias)
# Compute prev layer bounds
prev_low = (torch.matmul(w_n_pos, low) +
torch.matmul(w_n_neg, high) + bias)
prev_high = (torch.matmul(w_n_pos, high) +
torch.matmul(w_n_neg, low) + bias)
# Recurse
deeper_lows, deeper_highs = recurs(layer_num - 1, lows[1:], highs[1:],
[w_prod] + weights[2:],
[b_prod] + biases[2:])
return (prev_low + deeper_lows, prev_high + deeper_highs)
# compute the lower and upper bounds for all neurons
running_lows = [init_lows]
running_highs = [init_highs]
running_weights = [self.fcs[0].weight]
running_biases = [self.fcs[0].bias]
for layer_num, layer in enumerate(self.fcs[:-1]):
new_lows, new_highs = recurs(layer_num + 1, running_lows, running_highs,
running_weights, running_biases)
running_lows = [new_lows] + running_lows
running_highs = [new_highs] + running_highs
running_weights = self.fcs[layer_num + 1].weight
running_biases = self.fcs[layer_num + 1].bias
return running_lows[::-1], running_highs[::-1]
def compute_full_lp_bounds(self, domain_obj):
""" Compute the full linear program values.
Code here is in a different file
"""
return flp.compute_full_lp_bounds(self, domain_obj)
def compute_dual_lp_bounds(self, domain_obj):
""" Use KW to actually find the bounds. Uses L_inf bounds to help
get better bounds
"""
low_bounds = torch.Tensor(domain_obj.box_low)
high_bounds = torch.Tensor(domain_obj.box_high)
midpoint = ((low_bounds + high_bounds) / 2.0).view(1, -1)
box_bounds = (low_bounds, high_bounds)
dual_net = ca.DualNetwork(self.net, midpoint, domain_obj.linf_radius,box_bounds=box_bounds).dual_net
bounds, dead_set = [], []
for el in dual_net:
if isinstance(el, ca.DualReLU):
bounds.append(torch.cat((el.zl.view(-1, 1), el.zu.view(-1, 1)),
dim=1))
dead_set.append(~el.I.squeeze())
return bounds
def compute_dual_ia_bounds(self, domain_obj):
""" Use both interval analysis and dual bounds to get best bounds """
ia = self.compute_interval_bounds(domain_obj)
dd = self.compute_dual_lp_bounds(domain_obj)
bounds = []
for i, d in zip(ia, dd):
stacked = torch.stack((i, d))
new_lows = torch.max(stacked[:, :, 0], dim=0)[0]
new_highs = torch.min(stacked[:, :, 1], dim=0)[0]
new_bounds = torch.stack((new_lows, new_highs), dim=1)
bounds.append(new_bounds)
return bounds
def fast_lip_all_vals(self, x, l_q, on_off_neurons):
""" Does the fast_value for all possible c's """
num_logits = self.fcs[-1].out_features
if not isinstance(x, torch.Tensor):
true_label = self(torch.Tensor(x)).max(1)[1].item()
else:
true_label = self(x).max(1)[1].item()
c_vecs, lip_values = [], []
for i in range(num_logits):
if true_label == i:
continue
c_vec = torch.zeros(num_logits)
c_vec[true_label] = 1.0
c_vec[i] = -1.0
lip_value = self.fast_lip(c_vec, l_q, on_off_neurons)
c_vecs.append(c_vec)
lip_values.append(lip_value)
return c_vecs, lip_values
def fast_lip(self, c_vector, l_q, on_off_neurons):
"""
Pytorch implementation of fast_lip. Might be buggy? Who knows?
see : https://arxiv.org/pdf/1804.09699.pdf for details
INPUTS:
c_vector: tensor that multiplies the output vector:
we compute gradient of c^Tf(x)
l_q : int - q_norm of lipschitzness that we compute
(is dual norm: e.g. if bounds come from an l_inf box,
this should be 1)
on_off_neurons : list of LongTensors (entries in -1, 0 or 1)
corresponding to the set of
(off, uncertain, on, respectively) neurons
inside the domain
RETURNS:
upper bound on lipschitz constant
"""
######################################################################
# First generate inputs needed by fast_lip algorithm #
######################################################################
# --- split off active and uncertain neurons
# -1 means off (don't care)
# 0 means UNCERTAIN
# 1 means ACTIVE
active_neuron_list, uncertain_neuron_list = [], []
for neuron_by_layer in on_off_neurons:
active_neuron_list.append((neuron_by_layer == 1))
uncertain_neuron_list.append((neuron_by_layer == 0))
# --- get list of weights, initialize placeholders
weights = [layer.weight for layer in self.fcs[:-1]]
weights.append(c_vector.matmul(self.fcs[-1].weight).view(1, -1))
constant_term = weights[0]
lowers = [torch.zeros_like(constant_term)]
uppers = [torch.zeros_like(constant_term)]
######################################################################
# Loop through layers using the _bound_layer_grad subroutine #
######################################################################
for i in range(len(weights) - 1):
subroutine_out = self._bound_layers_grad(constant_term, lowers[-1],
uppers[-1],
weights[i + 1],
active_neuron_list[i],
uncertain_neuron_list[i])
constant_term, upper, lower = subroutine_out
lowers.append(lower)
uppers.append(upper)
######################################################################
# Finalize and return the output #
######################################################################
low_bound = (constant_term + lowers[-1]).abs()
upp_bound = (constant_term + uppers[-1]).abs()
layerwise_max = torch.where(low_bound > upp_bound, low_bound, upp_bound)
return torch.norm(layerwise_max, p=l_q).item()
def _bound_layers_grad(self, constant_term, lower, upper, weight,
active_neurons, uncertain_neurons):
""" Subroutine for fast_lip.
Assume weight has shape [m, n]
ARGS: (let's make sure the types and shapes all mesh)
constant_term: floatTensor shape (n, n_0)
lower: floatTensor shape (n, n_0)
upper: floatTensor shape (n, n_0)
weight: floatTensor shape (m, n)
active_neurons: torch.Tensor shape (n,)
uncertain_neurons: torch.Tensor shape (n,)
RETURNS:
new constant term, lower, and upper, each with shape (m, n_0)
"""
# ASSERTS ON SHAPES FOR DEBUGGING
n_0 = self.layer_sizes[0]
n = weight.shape[1]
assert constant_term.shape == (n, n_0)
assert lower.shape == (n, n_0)
assert upper.shape == (n, n_0)
assert active_neurons.shape == (n,)
assert uncertain_neurons.shape == (n,)
# Make diagonals and split weights by +/-
active_diag = torch.diag(active_neurons).float()
uncertain_diag = torch.diag(uncertain_neurons).float()
pos_weight, neg_weight = utils.split_tensor_pos(weight)
# Compute the new constant_term
new_constant_term = weight.matmul(active_diag).matmul(constant_term)
# Make new upper bounds/lower bounds
cons_low = constant_term + lower
_, neg_cons_low = utils.split_tensor_pos(cons_low)
cons_upp = constant_term + upper
pos_cons_upp, _ = utils.split_tensor_pos(cons_upp)
new_upper = (pos_weight.matmul(active_diag).matmul(upper) +
neg_weight.matmul(active_diag).matmul(lower) +
neg_weight.matmul(uncertain_diag).matmul(neg_cons_low) +
pos_weight.matmul(uncertain_diag).matmul(pos_cons_upp))
new_lower = (pos_weight.matmul(active_diag).matmul(lower) +
neg_weight.matmul(active_diag).matmul(upper) +
pos_weight.matmul(uncertain_diag).matmul(neg_cons_low) +
neg_weight.matmul(uncertain_diag).matmul(pos_cons_upp))
return new_constant_term, new_upper, new_lower
class PLNN_seq(PLNN):
""" Simple piecewise neural net.
Fully connected layers and ReLus only
built from nn.Sequential
"""
def __init__(self, sequential, layer_sizes, dtype=torch.FloatTensor):
super(PLNN_seq, self).__init__(layer_sizes, dtype)
self.fcs = [layer for layer in sequential if type(layer) == nn.Linear]
self.net = sequential
class LinearRegionCollection(object):
""" Takes a ReturnObj and builds a lot of linear regions and stores them
"""
def __init__(self, plnn_obj, return_obj, objective_vec=None,
do_setup=False):
self.plnn_obj = plnn_obj
self.return_obj = return_obj
self.collection = {}
for config in return_obj.seen_polytopes:
self.collection[config] = LinearRegion(plnn_obj, config,
return_obj=return_obj,
objective_vec=objective_vec,
do_setup=do_setup)
def get_maximum_lipschitz_constant(self):
return max(_.get_lipschitz_constant()
for _ in self.collection.values())
def gradient_angle_list(self):
""" Gets the gradient angles between neighboring linear regions """
angle_list = {}
for (u, v) in self.return_obj.polytope_graph.keys():
u_grad = self.collection[u].get_gradient()
v_grad = self.collection[v].get_gradient()
angle_list[(u, v)] = utils.angle(u_grad, v_grad)
return angle_list
def gradient_magnitude_diff_list(self, grad_fxn=None):
""" Gets the magnitude of gradient difference
between neighboring linear regions
"""
if grad_fxn is None:
grad_fxn = lambda u, v: torch.norm(u - v).item()
output = {}
for (u, v) in self.return_obj.polytope_graph.keys():
u_grad = self.collection[u].get_gradient()
v_grad = self.collection[v].get_gradient()
output[(u, v)] = grad_fxn(u_grad, v_grad)
return output
def get_greedy_lipschitz_components(self):
""" Returns dict of str -> [str1, ..., ] mapping locally maximal
linear regions to the set of regions that will greedily
approach this local max
"""
# Let's just be really naive about this
def get_ascent_neighbor(node):
""" Gets the neighbor that has highest lipschitz constant
Returns None if nothing has higher than this one
"""
current = node.get_lipschitz_constant()
neighbors = [(_, _.get_lipschitz_constant())
for _ in node.get_neighbors()]
max_neighbor = max(neighbors, key=lambda p: p[1])
if max_neighbor[1] > current:
return max_neighbor[0]
return None
def greedy_search_single_node(start_config):
""" Start with a single sign_config and do greedy search
to find max_lipschitz constant. Return the sign_config
of the greedy search output
"""
current_node = self.collection[start_config]
while True:
next_node = get_ascent_neighbor(current_node)
if next_node is None:
break
else:
current_node = next_node
return current_node.sign_config
greedy_output = {}
for config in self.collection.keys():
greedy_parent = greedy_search_single_node(config)
if greedy_parent not in greedy_output:
greedy_output[greedy_parent] = []
greedy_output[greedy_parent].append(config)
return greedy_output
class LinearRegion(object):
""" Holds info and shortcuts to work with linear regions """
@classmethod
def process_return_obj(cls, plnn_obj, return_obj, objective_vec=None,
do_setup=False):
""" Given a GeoCertReturn object, will build a linear region for
all of the 'seen polytopes' and return the outputs in a
dict keyed on teh sign_configs
"""
output = {}
for config in return_obj.seen_polytopes:
output[config] = cls(plnn_obj, config,
return_obj=return_obj,
objective_vec=objective_vec,
do_setup=do_setup)
return output
def __init__(self, plnn_obj, sign_config, return_obj=None,
objective_vec=None, do_setup=False):
""" Initializes a Linear Region object
ARGS:
plnn_obj - the network this region is linear for
sign_config - the neuron configuration of the region
return_obj : GeoCertReturn object - if not None is an
output of GeoCert which contains info about
the linear regions.
"""
super(LinearRegion, self).__init__()
self.plnn_obj = plnn_obj
self.sign_config = sign_config
self.hex_config = hex(int(self.sign_config, 2))
self.return_obj = return_obj
self.objective_vec = objective_vec
# setting up attributes to be stored later
self._polytope_config = None
self.polytope = None
self.linear_map = None
self.jacobian = None
self.largest_sv = None
if do_setup:
self.setup()
def __repr__(self):
return "LinearRegion: %s" % self.hex_config
def get_neighbors(self):
""" If the return obj is not None, will error. Otherwise will
return a list of neighboring LinearRegion objects
"""
assert self.return_obj is not None
neigbor_list = []
for edge in self.return_obj.polytope_graph:
if self.sign_config == edge[0]:
neigbor_idx = 1
elif self.sign_config == edge[1]:
neigbor_idx = 0
else:
continue
neigbor_list.append(edge[neigbor_idx])
return [LinearRegion(self.plnn_obj, neigbor_config,
return_obj=self.return_obj,
objective_vec=self.objective_vec)
for neigbor_config in neigbor_list]
def _get_polytope_config(self):
if self._polytope_config is not None:
return self._polytope_config
plnn_obj = self.plnn_obj
config = plnn_obj.config_str_to_config_list(self.sign_config)
self._polytope_config = plnn_obj.compute_polytope_config(config)
return self._polytope_config
def setup(self):
self.get_polytope()
self.get_linear_map()
self.get_jacobian()
self.get_largest_singular_value()
def get_polytope(self):
""" For this linear region will return the polytope for which
the neural net satisfies the given neuron configuration
"""
if self.polytope is not None:
return self.polytope
_polytope_config = self._get_polytope_config()
self.polytope = {'A': _polytope_config['poly_a'],
'b': _polytope_config['poly_b']}
return self.polytope
def get_linear_map(self):
""" For this linear region will return a torch.nn.Linear
object corresponding to the linear map at this neuron
configuration
"""
if self.linear_map is not None:
return self.linear_map
_polytope_config = self._get_polytope_config()
A = nn.Parameter(_polytope_config['total_a'])
b = nn.Parameter(_polytope_config['total_b'])
linear_map = nn.Linear(*A.shape)
linear_map.weight = A
linear_map.bias = b
self.linear_map = linear_map
return self.linear_map
def get_jacobian(self):
""" For this linear region will get the jacobian at this
linear piece
"""
if self.jacobian is not None:
return self.jacobian
linear_map = self.get_linear_map()
self.jacobian = linear_map.weight
return self.jacobian
def get_largest_singular_value(self):
""" Will return the largest singular value of the jacobian
of this linear region
"""
if self.largest_sv is not None:
return self.largest_sv
jacobian = self.get_jacobian()
self.largest_sv = jacobian.svd().S[0].item()
return self.largest_sv
def get_gradient(self):
assert self.objective_vec is not None
return self.objective_vec.matmul(self.get_jacobian())
def get_lipschitz_constant(self):
if self.objective_vec is not None:
return self.objective_vec.matmul(self.get_jacobian()).norm().item()
else:
return self.get_largest_singular_value()
| revbucket/geometric-certificates | plnn.py | plnn.py | py | 29,208 | python | en | code | 40 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "torch.FloatTensor",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "collections.Orde... |
39525459958 | import pyttsx3
from gtts import gTTS
import os
#MALE
engine = pyttsx3.init()
engine.say("Hello there")
engine.runAndWait()
#FEMALE
mytext = 'You are welcome to Roles Academy Madam.'
language = 'en'
myobj = gTTS(text=mytext, lang=language, slow=False)
myobj.save("welcome.mp3")
os.system("mpg321 welcome.mp3")
| adesolasamuel/EqualityMachine | texttospeech.py | texttospeech.py | py | 312 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "pyttsx3.init",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "gtts.gTTS",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 15,
"usage_type": "call"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.