code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import glob
import os
# os.environ["IMAGEIO_FFMPEG_EXE"] = "C:/ffmpeg-4.4.1-essentials_build/ffmpeg-4.4.1-essentials_build/bin"
import re
import sys
import urllib
from tkinter import (BOTH, RIGHT, YES, Button, Entry, Label, Listbox, Menu,
Scrollbar, StringVar, Tk, Y)
from tkinter import messagebox as m_box
import validators
from moviepy.editor import AudioFileClip
from pytube import YouTube, exceptions
import subprocess
ws = Tk()
ws.title('YT Downloader - Scarica le tue canzoni')
ws.geometry('1000x600')
ws.eval('tk::PlaceWindow . center')
if getattr(sys, 'frozen', False):
dirname = os.path.dirname(sys.executable)
elif __file__:
dirname = os.path.dirname(__file__)
# ws.iconbitmap(os.path.join(dirname, "icon", "icon.ico"))
### Center the window ###
#Same size will be defined in variable for center screen in Tk_Width and Tk_height
Tk_Width = 1000
Tk_Height = 600
#calculate coordination of screen and window form
x_Left = int(ws.winfo_screenwidth()/2 - Tk_Width/2)
y_Top = int(ws.winfo_screenheight()/2 - Tk_Height/2)
# Write following format for center screen
ws.geometry("+{}+{}".format(x_Left, y_Top))
###
def make_menu(w):
global the_menu
the_menu = Menu(w, tearoff=0)
the_menu.add_command(label="Taglia")
the_menu.add_command(label="Copia")
the_menu.add_command(label="Incolla")
def show_menu(e):
w = e.widget
the_menu.entryconfigure("Taglia",
command=lambda: w.event_generate("<<Cut>>"))
the_menu.entryconfigure("Copia",
command=lambda: w.event_generate("<<Copy>>"))
the_menu.entryconfigure("Incolla",
command=lambda: w.event_generate("<<Paste>>"))
the_menu.tk.call("tk_popup", the_menu, e.x_root, e.y_root)
def delSelected():
link_selected = lb.curselection()
if len(link_selected) == 0:
m_box.showerror("Error", "Nessun link selezionato")
for i in link_selected:
lb.delete(i)
def insert_link():
inserted_link = link.get()
inserted_link.replace(" ", "")
# check if inserted string is a valid url
if validators.url(inserted_link):
#check if the link is a YouTube link
try:
YouTube(inserted_link).check_availability()
list_of_urls = lb.get(0, 'end')
# check if the link was already inserted
if inserted_link not in list_of_urls:
lb.insert('end',inserted_link)
yt_link.delete(0,'end')
else:
yt_link.delete(0,'end')
m_box.showerror("Error", "Link YouTube già inserito!")
except exceptions.VideoUnavailable:
yt_link.delete(0,'end')
m_box.showerror("Error", "Link video YouTube non disponibile!\nInserisci un link di un video YouTube!")
except urllib.error.URLError:
yt_link.delete(0,'end')
m_box.showerror("Error", "Internet non disponibile")
else:
yt_link.delete(0,'end')
m_box.showerror("Error", "Inserisci un link valido!")
def download():
list_of_urls = lb.get(0, 'end')
if len(list_of_urls) == 0:
m_box.showerror("Error", "Nessun link inserito")
else:
answer=m_box.askyesnocancel("Richiesta", "Vuoi davvero scaricare tutte le canzoni?")
if answer:
if os.path.isdir(dirname+"/Canzoni_mp4"): #if Canzoni_mp4 esiste allora chiedi se vuole cancellare
answer=m_box.askyesnocancel("Richiesta", "Vuoi cancellare tutte le canzoni che ci sono nella cartella 'Canzoni_mp4'?")
if answer:
files = glob.glob('./Canzoni_mp4/*')
for f in files:
os.remove(f)
try:
for i in list_of_urls:
yt = YouTube(i)
title = yt.title
title = re.sub(r'[\\/*?:"<>|]',"-",title)
default_filename = title + ".mp4"
new_filename = title+'.mp3'
parent_dir = os.path.join(dirname, "Canzoni_mp4")
str = yt.streams.get_audio_only()
str.download(output_path=parent_dir,filename=default_filename,max_retries=10)
try:
subprocess.run([
'ffmpeg', '-y',
'-i', os.path.join(parent_dir, default_filename),
os.path.join(parent_dir, new_filename)
],shell=True)
# audioclip = AudioFileClip(os.path.join(parent_dir, default_filename))
# audioclip.write_audiofile(os.path.join(parent_dir, new_filename))
# audioclip.close()
files = glob.glob(parent_dir+'/*.mp4')
for f in files:
os.remove(f)
except:
files = glob.glob(parent_dir+'/*.mp4')
for f in files:
os.remove(f)
m_box.showerror("Error", "Errore di conversione da MP4 a MP3")
except:
m_box.showerror("Error", "Errore di download")
m_box.showinfo("Scaricato", "Ho scaricato tutto")
else:
pass
make_menu(ws)
show = Label(ws, anchor="w",fg ="#f5453c", text = 'Bentornato su "YT Downloader - Scarica le tue canzoni"', font = ("Serif", 14), padx = 0, pady = 10)
show.pack()
show = Label(ws, text = "Lista dei link delle canzoni che vuoi scaricare: ",
font = ("Times", 14), padx = 10, pady = 10)
show.pack()
lb = Listbox(ws, selectmode = "multiple")
scroll_one=Scrollbar(ws,command=lb.yview)
lb.configure(yscrollcommand=scroll_one.set)
lb.pack(padx = 20, pady = 0, expand = YES, fill = BOTH)
scroll_one.pack(side=RIGHT,fill=Y)
get_info = Label(ws, text="Inserisci il link della canzone che vuoi scaricare: ",
font = ("Times", 14), padx = 10, pady = 10)
get_info.pack()
link = StringVar()
yt_link = Entry(ws, width=60, textvariable=link)
yt_link.pack()
yt_link.bind_class("Entry", "<Button-3><ButtonRelease-3>", show_menu)
yt_link.focus()
Button(ws, text="Inserisci link", command=insert_link).pack()
Button(ws, text="Cancella link", command=delSelected).pack()
Button(ws, text="Scarica le canzoni", command=download, activeforeground =
"#f5453c").pack()
ws.mainloop()
| [
"tkinter.messagebox.askyesnocancel",
"tkinter.Button",
"tkinter.Label",
"os.remove",
"tkinter.Entry",
"tkinter.StringVar",
"os.path.isdir",
"tkinter.messagebox.showinfo",
"glob.glob",
"tkinter.Menu",
"tkinter.messagebox.showerror",
"os.path.dirname",
"validators.url",
"re.sub",
"pytube.Y... | [((453, 457), 'tkinter.Tk', 'Tk', ([], {}), '()\n', (455, 457), False, 'from tkinter import BOTH, RIGHT, YES, Button, Entry, Label, Listbox, Menu, Scrollbar, StringVar, Tk, Y\n'), ((5406, 5550), 'tkinter.Label', 'Label', (['ws'], {'anchor': '"""w"""', 'fg': '"""#f5453c"""', 'text': '"""Bentornato su "YT Downloader - Scarica le tue canzoni\\""""', 'font': "('Serif', 14)", 'padx': '(0)', 'pady': '(10)'}), '(ws, anchor=\'w\', fg=\'#f5453c\', text=\n \'Bentornato su "YT Downloader - Scarica le tue canzoni"\', font=(\'Serif\',\n 14), padx=0, pady=10)\n', (5411, 5550), False, 'from tkinter import BOTH, RIGHT, YES, Button, Entry, Label, Listbox, Menu, Scrollbar, StringVar, Tk, Y\n'), ((5574, 5684), 'tkinter.Label', 'Label', (['ws'], {'text': '"""Lista dei link delle canzoni che vuoi scaricare: """', 'font': "('Times', 14)", 'padx': '(10)', 'pady': '(10)'}), "(ws, text='Lista dei link delle canzoni che vuoi scaricare: ', font=(\n 'Times', 14), padx=10, pady=10)\n", (5579, 5684), False, 'from tkinter import BOTH, RIGHT, YES, Button, Entry, Label, Listbox, Menu, Scrollbar, StringVar, Tk, Y\n'), ((5720, 5754), 'tkinter.Listbox', 'Listbox', (['ws'], {'selectmode': '"""multiple"""'}), "(ws, selectmode='multiple')\n", (5727, 5754), False, 'from tkinter import BOTH, RIGHT, YES, Button, Entry, Label, Listbox, Menu, Scrollbar, StringVar, Tk, Y\n'), ((5768, 5799), 'tkinter.Scrollbar', 'Scrollbar', (['ws'], {'command': 'lb.yview'}), '(ws, command=lb.yview)\n', (5777, 5799), False, 'from tkinter import BOTH, RIGHT, YES, Button, Entry, Label, Listbox, Menu, Scrollbar, StringVar, Tk, Y\n'), ((5952, 6065), 'tkinter.Label', 'Label', (['ws'], {'text': '"""Inserisci il link della canzone che vuoi scaricare: """', 'font': "('Times', 14)", 'padx': '(10)', 'pady': '(10)'}), "(ws, text='Inserisci il link della canzone che vuoi scaricare: ', font\n =('Times', 14), padx=10, pady=10)\n", (5957, 6065), False, 'from tkinter import BOTH, RIGHT, YES, Button, Entry, Label, Listbox, Menu, Scrollbar, StringVar, Tk, Y\n'), ((6108, 6119), 'tkinter.StringVar', 'StringVar', ([], {}), '()\n', (6117, 6119), False, 'from tkinter import BOTH, RIGHT, YES, Button, Entry, Label, Listbox, Menu, Scrollbar, StringVar, Tk, Y\n'), ((6131, 6169), 'tkinter.Entry', 'Entry', (['ws'], {'width': '(60)', 'textvariable': 'link'}), '(ws, width=60, textvariable=link)\n', (6136, 6169), False, 'from tkinter import BOTH, RIGHT, YES, Button, Entry, Label, Listbox, Menu, Scrollbar, StringVar, Tk, Y\n'), ((619, 650), 'os.path.dirname', 'os.path.dirname', (['sys.executable'], {}), '(sys.executable)\n', (634, 650), False, 'import os\n'), ((1215, 1233), 'tkinter.Menu', 'Menu', (['w'], {'tearoff': '(0)'}), '(w, tearoff=0)\n', (1219, 1233), False, 'from tkinter import BOTH, RIGHT, YES, Button, Entry, Label, Listbox, Menu, Scrollbar, StringVar, Tk, Y\n'), ((2068, 2097), 'validators.url', 'validators.url', (['inserted_link'], {}), '(inserted_link)\n', (2082, 2097), False, 'import validators\n'), ((680, 705), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (695, 705), False, 'import os\n'), ((1819, 1870), 'tkinter.messagebox.showerror', 'm_box.showerror', (['"""Error"""', '"""Nessun link selezionato"""'], {}), "('Error', 'Nessun link selezionato')\n", (1834, 1870), True, 'from tkinter import messagebox as m_box\n'), ((2961, 3014), 'tkinter.messagebox.showerror', 'm_box.showerror', (['"""Error"""', '"""Inserisci un link valido!"""'], {}), "('Error', 'Inserisci un link valido!')\n", (2976, 3014), True, 'from tkinter import messagebox as m_box\n'), ((3117, 3165), 'tkinter.messagebox.showerror', 'm_box.showerror', (['"""Error"""', '"""Nessun link inserito"""'], {}), "('Error', 'Nessun link inserito')\n", (3132, 3165), True, 'from tkinter import messagebox as m_box\n'), ((3191, 3268), 'tkinter.messagebox.askyesnocancel', 'm_box.askyesnocancel', (['"""Richiesta"""', '"""Vuoi davvero scaricare tutte le canzoni?"""'], {}), "('Richiesta', 'Vuoi davvero scaricare tutte le canzoni?')\n", (3211, 3268), True, 'from tkinter import messagebox as m_box\n'), ((6272, 6326), 'tkinter.Button', 'Button', (['ws'], {'text': '"""Inserisci link"""', 'command': 'insert_link'}), "(ws, text='Inserisci link', command=insert_link)\n", (6278, 6326), False, 'from tkinter import BOTH, RIGHT, YES, Button, Entry, Label, Listbox, Menu, Scrollbar, StringVar, Tk, Y\n'), ((6335, 6388), 'tkinter.Button', 'Button', (['ws'], {'text': '"""Cancella link"""', 'command': 'delSelected'}), "(ws, text='Cancella link', command=delSelected)\n", (6341, 6388), False, 'from tkinter import BOTH, RIGHT, YES, Button, Entry, Label, Listbox, Menu, Scrollbar, StringVar, Tk, Y\n'), ((6397, 6485), 'tkinter.Button', 'Button', (['ws'], {'text': '"""Scarica le canzoni"""', 'command': 'download', 'activeforeground': '"""#f5453c"""'}), "(ws, text='Scarica le canzoni', command=download, activeforeground=\n '#f5453c')\n", (6403, 6485), False, 'from tkinter import BOTH, RIGHT, YES, Button, Entry, Label, Listbox, Menu, Scrollbar, StringVar, Tk, Y\n'), ((3303, 3342), 'os.path.isdir', 'os.path.isdir', (["(dirname + '/Canzoni_mp4')"], {}), "(dirname + '/Canzoni_mp4')\n", (3316, 3342), False, 'import os\n'), ((5304, 5353), 'tkinter.messagebox.showinfo', 'm_box.showinfo', (['"""Scaricato"""', '"""Ho scaricato tutto"""'], {}), "('Scaricato', 'Ho scaricato tutto')\n", (5318, 5353), True, 'from tkinter import messagebox as m_box\n'), ((2521, 2575), 'tkinter.messagebox.showerror', 'm_box.showerror', (['"""Error"""', '"""Link YouTube già inserito!"""'], {}), "('Error', 'Link YouTube già inserito!')\n", (2536, 2575), True, 'from tkinter import messagebox as m_box\n'), ((2668, 2783), 'tkinter.messagebox.showerror', 'm_box.showerror', (['"""Error"""', '"""Link video YouTube non disponibile!\nInserisci un link di un video YouTube!"""'], {}), '(\'Error\',\n """Link video YouTube non disponibile!\nInserisci un link di un video YouTube!"""\n )\n', (2683, 2783), True, 'from tkinter import messagebox as m_box\n'), ((2858, 2910), 'tkinter.messagebox.showerror', 'm_box.showerror', (['"""Error"""', '"""Internet non disponibile"""'], {}), "('Error', 'Internet non disponibile')\n", (2873, 2910), True, 'from tkinter import messagebox as m_box\n'), ((3422, 3542), 'tkinter.messagebox.askyesnocancel', 'm_box.askyesnocancel', (['"""Richiesta"""', '"""Vuoi cancellare tutte le canzoni che ci sono nella cartella \'Canzoni_mp4\'?"""'], {}), '(\'Richiesta\',\n "Vuoi cancellare tutte le canzoni che ci sono nella cartella \'Canzoni_mp4\'?"\n )\n', (3442, 3542), True, 'from tkinter import messagebox as m_box\n'), ((2169, 2191), 'pytube.YouTube', 'YouTube', (['inserted_link'], {}), '(inserted_link)\n', (2176, 2191), False, 'from pytube import YouTube, exceptions\n'), ((3589, 3617), 'glob.glob', 'glob.glob', (['"""./Canzoni_mp4/*"""'], {}), "('./Canzoni_mp4/*')\n", (3598, 3617), False, 'import glob\n'), ((3773, 3783), 'pytube.YouTube', 'YouTube', (['i'], {}), '(i)\n', (3780, 3783), False, 'from pytube import YouTube, exceptions\n'), ((3849, 3885), 're.sub', 're.sub', (['"""[\\\\\\\\/*?:"<>|]"""', '"""-"""', 'title'], {}), '(\'[\\\\\\\\/*?:"<>|]\', \'-\', title)\n', (3855, 3885), False, 'import re\n'), ((4018, 4054), 'os.path.join', 'os.path.join', (['dirname', '"""Canzoni_mp4"""'], {}), "(dirname, 'Canzoni_mp4')\n", (4030, 4054), False, 'import os\n'), ((5243, 5289), 'tkinter.messagebox.showerror', 'm_box.showerror', (['"""Error"""', '"""Errore di download"""'], {}), "('Error', 'Errore di download')\n", (5258, 5289), True, 'from tkinter import messagebox as m_box\n'), ((3678, 3690), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (3687, 3690), False, 'import os\n'), ((4831, 4863), 'glob.glob', 'glob.glob', (["(parent_dir + '/*.mp4')"], {}), "(parent_dir + '/*.mp4')\n", (4840, 4863), False, 'import glob\n'), ((4931, 4943), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (4940, 4943), False, 'import os\n'), ((5006, 5038), 'glob.glob', 'glob.glob', (["(parent_dir + '/*.mp4')"], {}), "(parent_dir + '/*.mp4')\n", (5015, 5038), False, 'import glob\n'), ((5143, 5205), 'tkinter.messagebox.showerror', 'm_box.showerror', (['"""Error"""', '"""Errore di conversione da MP4 a MP3"""'], {}), "('Error', 'Errore di conversione da MP4 a MP3')\n", (5158, 5205), True, 'from tkinter import messagebox as m_box\n'), ((4393, 4435), 'os.path.join', 'os.path.join', (['parent_dir', 'default_filename'], {}), '(parent_dir, default_filename)\n', (4405, 4435), False, 'import os\n'), ((4465, 4503), 'os.path.join', 'os.path.join', (['parent_dir', 'new_filename'], {}), '(parent_dir, new_filename)\n', (4477, 4503), False, 'import os\n'), ((5106, 5118), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (5115, 5118), False, 'import os\n')] |
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import glob, random
import sklearn
from sklearn.decomposition import PCA
from xgboost.sklearn import XGBRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingRegressor,BaggingRegressor, RandomForestRegressor,VotingRegressor
from sklearn.linear_model import LinearRegression
from lightgbm import LGBMRegressor
import catboost
from catboost import CatBoostRegressor
from tqdm import tqdm
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
#import warnings
#warnings.filterwarnings('ignore')
folder = os.path.dirname(os.path.abspath(__file__))
train_new = pd.read_csv(folder+'/Train.csv')
bands_of_interest = ['S2_B5', 'S2_B4', 'S2_B3', 'S2_B2', 'CLIM_pr', 'CLIM_soil']
band_names = [l.strip() for l in open(folder + '/band_names.txt', 'r').readlines()]
def process_train(fid, folder= folder+'/imtrain'):
fn = f'{folder}/{fid}.npy'
arr = np.load(fn)
values = {}
for month in range(12):
bns = [str(month) + '_' + b for b in bands_of_interest] # Bands of interest for this month
idxs = np.where(np.isin(band_names, bns)) # Index of these bands
vs = arr[idxs, 20, 20] # Sample the im at the center point
for bn, v in zip(bns, vs[0]):
values[bn] = v
return values
def process_test(fid, folder= folder+'/imtest'):
fn = f'{folder}/{fid}.npy'
arr = np.load(fn)
values = {}
for month in range(12):
bns = [str(month) + '_' + b for b in bands_of_interest] # Bands of interest for this month
idxs = np.where(np.isin(band_names, bns)) # Index of these bands
vs = arr[idxs, 20, 20] # Sample the im at the center point
for bn, v in zip(bns, vs[0]):
values[bn] = v
return values
# Make a new DF with the sampled values from each field
train_sampled = pd.DataFrame([process_train(fid) for fid in train_new['Field_ID'].values])
#MODEL
X = train_sampled.copy()
y = train_new['Yield'].values
print(X.head)
print(y)
X_train, X_test, y_train, y_test = train_test_split(X, y)
model=BaggingRegressor(CatBoostRegressor(silent=True),n_estimators=55)
model.fit(X_train, y_train)
print('Score:', mean_squared_error(y_test, model.predict(X_test), squared=False))
#SUBMITTING
ss = pd.read_csv(folder+'/SampleSubmission.csv')
test_sampled = pd.DataFrame([process_test(fid) for fid in ss['Field_ID'].values])
preds = model.predict(test_sampled)
ss['Yield'] = preds
ss.to_csv(folder+'/Sub.csv', index=False)
| [
"sklearn.preprocessing.LabelEncoder",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.isin",
"catboost.CatBoostRegressor",
"os.path.abspath",
"numpy.load"
] | [((691, 705), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (703, 705), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((828, 862), 'pandas.read_csv', 'pd.read_csv', (["(folder + '/Train.csv')"], {}), "(folder + '/Train.csv')\n", (839, 862), True, 'import pandas as pd\n'), ((2214, 2236), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {}), '(X, y)\n', (2230, 2236), False, 'from sklearn.model_selection import train_test_split\n'), ((2443, 2488), 'pandas.read_csv', 'pd.read_csv', (["(folder + '/SampleSubmission.csv')"], {}), "(folder + '/SampleSubmission.csv')\n", (2454, 2488), True, 'import pandas as pd\n'), ((788, 813), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (803, 813), False, 'import os\n'), ((1123, 1134), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (1130, 1134), True, 'import numpy as np\n'), ((1573, 1584), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (1580, 1584), True, 'import numpy as np\n'), ((2261, 2291), 'catboost.CatBoostRegressor', 'CatBoostRegressor', ([], {'silent': '(True)'}), '(silent=True)\n', (2278, 2291), False, 'from catboost import CatBoostRegressor\n'), ((1295, 1319), 'numpy.isin', 'np.isin', (['band_names', 'bns'], {}), '(band_names, bns)\n', (1302, 1319), True, 'import numpy as np\n'), ((1745, 1769), 'numpy.isin', 'np.isin', (['band_names', 'bns'], {}), '(band_names, bns)\n', (1752, 1769), True, 'import numpy as np\n')] |
import math
def ipno2ipadd(ipno):
if math.isnan(ipno):
ipno = 0
w = int ( ipno / 16777216 ) % 256
x = int ( ipno / 65536 ) % 256
y = int ( ipno / 256 ) % 256
z = int ( ipno ) % 256
return "{}.{}.{}.{}".format(w,x,y,z)
def ipadd2ipno(ipadd):
w,x,y,z = ipadd.split('.')
retipno = int(w) * 16777216 + int(x) * 65536 + int(y) * 256 + int(z)
return retipno
def isip(strin):
a = strin.split('.')
if len(a) != 4:
return False
for x in a:
if not x.isdigit():
return False
i = int(x)
if i < 0 or i >255:
return False
return True
| [
"math.isnan"
] | [((41, 57), 'math.isnan', 'math.isnan', (['ipno'], {}), '(ipno)\n', (51, 57), False, 'import math\n')] |
# Copyright 2020 MongoDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
"""Pseudo-builders for building test lists for Resmoke"""
import SCons
from collections import defaultdict
TEST_REGISTRY = defaultdict(list)
def register_test(env, file, test):
"""Register test into the dictionary of tests for file_name"""
test_path = test
if env.get("AUTO_INSTALL_ENABLED", False) and env.GetAutoInstalledFiles(test):
test_path = env.GetAutoInstalledFiles(test)[0]
if SCons.Util.is_String(file):
file = env.File(file)
env.Depends(file, test_path)
file_name = file.path
TEST_REGISTRY[file_name].append(test_path)
env.GenerateTestExecutionAliases(test)
def test_list_builder_action(env, target, source):
"""Build a test list used by resmoke.py to execute binary tests."""
if SCons.Util.is_String(target[0]):
filename = env.subst(target[0])
else:
filename = target[0].path
source = [env.File(s).path if SCons.Util.is_String(s) else s.path for s in source]
with open(filename, "w") as ofile:
tests = TEST_REGISTRY[filename]
if source:
tests.extend(source)
for s in tests:
ofile.write("{}\n".format(str(s)))
TEST_LIST_BUILDER = SCons.Builder.Builder(
action=SCons.Action.FunctionAction(
test_list_builder_action, {"cmdstr": "Generating $TARGETS"},
)
)
def exists(env):
return True
def generate(env):
env["MONGO_TEST_REGISTRY"] = TEST_REGISTRY
env.Append(BUILDERS={"TestList": TEST_LIST_BUILDER})
env.AddMethod(register_test, "RegisterTest")
| [
"collections.defaultdict",
"SCons.Action.FunctionAction",
"SCons.Util.is_String"
] | [((1217, 1234), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1228, 1234), False, 'from collections import defaultdict\n'), ((1507, 1533), 'SCons.Util.is_String', 'SCons.Util.is_String', (['file'], {}), '(file)\n', (1527, 1533), False, 'import SCons\n'), ((1847, 1878), 'SCons.Util.is_String', 'SCons.Util.is_String', (['target[0]'], {}), '(target[0])\n', (1867, 1878), False, 'import SCons\n'), ((2312, 2404), 'SCons.Action.FunctionAction', 'SCons.Action.FunctionAction', (['test_list_builder_action', "{'cmdstr': 'Generating $TARGETS'}"], {}), "(test_list_builder_action, {'cmdstr':\n 'Generating $TARGETS'})\n", (2339, 2404), False, 'import SCons\n'), ((1999, 2022), 'SCons.Util.is_String', 'SCons.Util.is_String', (['s'], {}), '(s)\n', (2019, 2022), False, 'import SCons\n')] |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.asset_v1.proto import (
asset_service_pb2 as google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2,
)
from google.longrunning import (
operations_pb2 as google_dot_longrunning_dot_operations__pb2,
)
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class AssetServiceStub(object):
"""Asset service definition.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ExportAssets = channel.unary_unary(
"/google.cloud.asset.v1.AssetService/ExportAssets",
request_serializer=google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.ExportAssetsRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.BatchGetAssetsHistory = channel.unary_unary(
"/google.cloud.asset.v1.AssetService/BatchGetAssetsHistory",
request_serializer=google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.BatchGetAssetsHistoryRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.BatchGetAssetsHistoryResponse.FromString,
)
self.CreateFeed = channel.unary_unary(
"/google.cloud.asset.v1.AssetService/CreateFeed",
request_serializer=google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.CreateFeedRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.Feed.FromString,
)
self.GetFeed = channel.unary_unary(
"/google.cloud.asset.v1.AssetService/GetFeed",
request_serializer=google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.GetFeedRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.Feed.FromString,
)
self.ListFeeds = channel.unary_unary(
"/google.cloud.asset.v1.AssetService/ListFeeds",
request_serializer=google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.ListFeedsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.ListFeedsResponse.FromString,
)
self.UpdateFeed = channel.unary_unary(
"/google.cloud.asset.v1.AssetService/UpdateFeed",
request_serializer=google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.UpdateFeedRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.Feed.FromString,
)
self.DeleteFeed = channel.unary_unary(
"/google.cloud.asset.v1.AssetService/DeleteFeed",
request_serializer=google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.DeleteFeedRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class AssetServiceServicer(object):
"""Asset service definition.
"""
def ExportAssets(self, request, context):
"""Exports assets with time and resource types to a given Cloud Storage
location. The output format is newline-delimited JSON.
This API implements the [google.longrunning.Operation][google.longrunning.Operation] API allowing you
to keep track of the export.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def BatchGetAssetsHistory(self, request, context):
"""Batch gets the update history of assets that overlap a time window.
For RESOURCE content, this API outputs history with asset in both
non-delete or deleted status.
For IAM_POLICY content, this API outputs history when the asset and its
attached IAM POLICY both exist. This can create gaps in the output history.
If a specified asset does not exist, this API returns an INVALID_ARGUMENT
error.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateFeed(self, request, context):
"""Creates a feed in a parent project/folder/organization to listen to its
asset updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetFeed(self, request, context):
"""Gets details about an asset feed.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListFeeds(self, request, context):
"""Lists all asset feeds in a parent project/folder/organization.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateFeed(self, request, context):
"""Updates an asset feed configuration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteFeed(self, request, context):
"""Deletes an asset feed.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_AssetServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
"ExportAssets": grpc.unary_unary_rpc_method_handler(
servicer.ExportAssets,
request_deserializer=google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.ExportAssetsRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"BatchGetAssetsHistory": grpc.unary_unary_rpc_method_handler(
servicer.BatchGetAssetsHistory,
request_deserializer=google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.BatchGetAssetsHistoryRequest.FromString,
response_serializer=google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.BatchGetAssetsHistoryResponse.SerializeToString,
),
"CreateFeed": grpc.unary_unary_rpc_method_handler(
servicer.CreateFeed,
request_deserializer=google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.CreateFeedRequest.FromString,
response_serializer=google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.Feed.SerializeToString,
),
"GetFeed": grpc.unary_unary_rpc_method_handler(
servicer.GetFeed,
request_deserializer=google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.GetFeedRequest.FromString,
response_serializer=google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.Feed.SerializeToString,
),
"ListFeeds": grpc.unary_unary_rpc_method_handler(
servicer.ListFeeds,
request_deserializer=google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.ListFeedsRequest.FromString,
response_serializer=google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.ListFeedsResponse.SerializeToString,
),
"UpdateFeed": grpc.unary_unary_rpc_method_handler(
servicer.UpdateFeed,
request_deserializer=google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.UpdateFeedRequest.FromString,
response_serializer=google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.Feed.SerializeToString,
),
"DeleteFeed": grpc.unary_unary_rpc_method_handler(
servicer.DeleteFeed,
request_deserializer=google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.DeleteFeedRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.cloud.asset.v1.AssetService", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
| [
"grpc.method_handlers_generic_handler",
"grpc.unary_unary_rpc_method_handler"
] | [((8440, 8539), 'grpc.method_handlers_generic_handler', 'grpc.method_handlers_generic_handler', (['"""google.cloud.asset.v1.AssetService"""', 'rpc_method_handlers'], {}), "('google.cloud.asset.v1.AssetService',\n rpc_method_handlers)\n", (8476, 8539), False, 'import grpc\n'), ((5953, 6240), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.ExportAssets'], {'request_deserializer': 'google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.ExportAssetsRequest.FromString', 'response_serializer': 'google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString'}), '(servicer.ExportAssets,\n request_deserializer=\n google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.\n ExportAssetsRequest.FromString, response_serializer=\n google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString)\n', (5988, 6240), False, 'import grpc\n'), ((6303, 6655), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.BatchGetAssetsHistory'], {'request_deserializer': 'google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.BatchGetAssetsHistoryRequest.FromString', 'response_serializer': 'google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.BatchGetAssetsHistoryResponse.SerializeToString'}), '(servicer.BatchGetAssetsHistory,\n request_deserializer=\n google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.\n BatchGetAssetsHistoryRequest.FromString, response_serializer=\n google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.\n BatchGetAssetsHistoryResponse.SerializeToString)\n', (6338, 6655), False, 'import grpc\n'), ((6702, 7007), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.CreateFeed'], {'request_deserializer': 'google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.CreateFeedRequest.FromString', 'response_serializer': 'google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.Feed.SerializeToString'}), '(servicer.CreateFeed,\n request_deserializer=\n google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.\n CreateFeedRequest.FromString, response_serializer=\n google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.Feed.\n SerializeToString)\n', (6737, 7007), False, 'import grpc\n'), ((7051, 7346), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.GetFeed'], {'request_deserializer': 'google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.GetFeedRequest.FromString', 'response_serializer': 'google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.Feed.SerializeToString'}), '(servicer.GetFeed, request_deserializer=\n google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.\n GetFeedRequest.FromString, response_serializer=\n google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.Feed.\n SerializeToString)\n', (7086, 7346), False, 'import grpc\n'), ((7396, 7712), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.ListFeeds'], {'request_deserializer': 'google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.ListFeedsRequest.FromString', 'response_serializer': 'google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.ListFeedsResponse.SerializeToString'}), '(servicer.ListFeeds,\n request_deserializer=\n google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.\n ListFeedsRequest.FromString, response_serializer=\n google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.\n ListFeedsResponse.SerializeToString)\n', (7431, 7712), False, 'import grpc\n'), ((7759, 8064), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.UpdateFeed'], {'request_deserializer': 'google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.UpdateFeedRequest.FromString', 'response_serializer': 'google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.Feed.SerializeToString'}), '(servicer.UpdateFeed,\n request_deserializer=\n google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.\n UpdateFeedRequest.FromString, response_serializer=\n google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.Feed.\n SerializeToString)\n', (7794, 8064), False, 'import grpc\n'), ((8111, 8382), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.DeleteFeed'], {'request_deserializer': 'google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.DeleteFeedRequest.FromString', 'response_serializer': 'google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString'}), '(servicer.DeleteFeed,\n request_deserializer=\n google_dot_cloud_dot_asset__v1_dot_proto_dot_asset__service__pb2.\n DeleteFeedRequest.FromString, response_serializer=\n google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString)\n', (8146, 8382), False, 'import grpc\n')] |
from datetime import datetime, timedelta
import logging
from flask import Blueprint, abort, render_template, request, redirect
from flask_oauthlib.provider import OAuth2Provider
from oauthlib.common import generate_token as generate_random_token
from flask_login import current_user, login_required
from auth.login.base import demand_pebble
from .models import db, IssuedToken, AuthClient, User
from .redis import client as redis
import json
oauth_bp = Blueprint('oauth_bp', __name__)
oauth = OAuth2Provider()
class Grant:
def __init__(self, client_id, code, user_id, scopes, redirect_uri):
self.client_id = client_id
self.code = code
self.user_id = user_id
self.scopes = scopes
self.redirect_uri = redirect_uri
@property
def user(self):
return User.query.filter_by(id=self.user_id).one()
def delete(self):
redis.delete(self.key)
@property
def key(self):
return self.redis_key(self.client_id, self.code)
def serialise(self):
return json.dumps([self.client_id, self.code, self.user_id, self.scopes, self.redirect_uri]).encode('utf-8')
@classmethod
def deserialise(cls, serialised):
return cls(*json.loads(serialised.decode('utf-8')))
@classmethod
def redis_key(cls, client_id, code):
return f'grant-{client_id}-{code}'
@oauth.grantgetter
def load_grant(client_id, code):
return Grant.deserialise(redis.get(Grant.redis_key(client_id, code)))
@oauth.grantsetter
def set_grant(client_id, code, request, *args, **kwargs):
if not current_user.is_authenticated:
logging.error("Tried to set a grant for a user who is not logged in!?")
return None
grant = Grant(client_id, code['code'], current_user.id, request.scopes, request.redirect_uri)
redis.setex(grant.key, 100, grant.serialise())
return grant
@oauth.tokengetter
def get_token(access_token=None, refresh_token=None):
if access_token:
# There are two valid 'tokens': ones we've issued, and the Pebble token.
# Because we don't actually store the pebble token as an issued token, we have to
# check for it here and invent a token if it's the one we tried to use.
token = IssuedToken.query.filter_by(access_token=access_token).one_or_none()
if token:
return token
user = User.query.filter_by(pebble_token=access_token).one_or_none()
if user:
return IssuedToken(access_token=access_token, refresh_token=None, expires=None, client_id=None, user=user,
scopes=['pebble', 'pebble_token', 'profile'])
elif refresh_token:
return IssuedToken.query.filter_by(refresh_token=refresh_token).one_or_none()
@oauth.tokensetter
def set_token(token, request, *args, **kwargs):
expires_in = token.get('expires_in')
expires = datetime.utcnow() + timedelta(seconds=expires_in)
scopes = token['scope'].split(' ')
token = IssuedToken(access_token=token['access_token'], refresh_token=token['refresh_token'], expires=expires,
client_id=request.client.client_id, user_id=request.user.id,
scopes=scopes)
db.session.add(token)
db.session.commit()
return token
@oauth.clientgetter
def get_client(client_id):
return AuthClient.query.filter_by(client_id=client_id).one()
@oauth_bp.route('/authorise', methods=['GET', 'POST'])
@login_required
@oauth.authorize_handler
def authorise(*args, **kwargs):
return True
@oauth_bp.route('/token', methods=['GET', 'POST'])
@oauth.token_handler
def access_token():
return None
@oauth_bp.route('/error')
def oauth_error():
return render_template('oauth-error.html',
error=request.args.get('error', 'unknown'),
error_description=request.args.get('error_description', '')), 400
def generate_token(request, refresh_token=False):
return generate_random_token()
def init_app(app):
app.config['OAUTH2_PROVIDER_TOKEN_EXPIRES_IN'] = 315576000 # 10 years
app.config['OAUTH2_PROVIDER_ERROR_ENDPOINT'] = 'oauth_bp.oauth_error'
oauth.init_app(app)
app.register_blueprint(oauth_bp, url_prefix='/oauth')
app.extensions['csrf'].exempt(oauth_bp)
| [
"flask.request.args.get",
"datetime.datetime.utcnow",
"json.dumps",
"flask_oauthlib.provider.OAuth2Provider",
"datetime.timedelta",
"flask.Blueprint",
"logging.error",
"oauthlib.common.generate_token"
] | [((456, 487), 'flask.Blueprint', 'Blueprint', (['"""oauth_bp"""', '__name__'], {}), "('oauth_bp', __name__)\n", (465, 487), False, 'from flask import Blueprint, abort, render_template, request, redirect\n'), ((496, 512), 'flask_oauthlib.provider.OAuth2Provider', 'OAuth2Provider', ([], {}), '()\n', (510, 512), False, 'from flask_oauthlib.provider import OAuth2Provider\n'), ((3963, 3986), 'oauthlib.common.generate_token', 'generate_random_token', ([], {}), '()\n', (3984, 3986), True, 'from oauthlib.common import generate_token as generate_random_token\n'), ((1618, 1689), 'logging.error', 'logging.error', (['"""Tried to set a grant for a user who is not logged in!?"""'], {}), "('Tried to set a grant for a user who is not logged in!?')\n", (1631, 1689), False, 'import logging\n'), ((2875, 2892), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (2890, 2892), False, 'from datetime import datetime, timedelta\n'), ((2895, 2924), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'expires_in'}), '(seconds=expires_in)\n', (2904, 2924), False, 'from datetime import datetime, timedelta\n'), ((1041, 1131), 'json.dumps', 'json.dumps', (['[self.client_id, self.code, self.user_id, self.scopes, self.redirect_uri]'], {}), '([self.client_id, self.code, self.user_id, self.scopes, self.\n redirect_uri])\n', (1051, 1131), False, 'import json\n'), ((3769, 3805), 'flask.request.args.get', 'request.args.get', (['"""error"""', '"""unknown"""'], {}), "('error', 'unknown')\n", (3785, 3805), False, 'from flask import Blueprint, abort, render_template, request, redirect\n'), ((3852, 3893), 'flask.request.args.get', 'request.args.get', (['"""error_description"""', '""""""'], {}), "('error_description', '')\n", (3868, 3893), False, 'from flask import Blueprint, abort, render_template, request, redirect\n')] |
from re import compile as re_compile
from urllib.parse import parse_qs, quote_plus, urlencode, urlparse, urlunparse
from marshmallow import ValidationError
from .base_validators import BaseError, BaseValidator
class ValidError(BaseError):
@classmethod
def field_is_missing(cls, field: str, field_name: str) -> ValidationError:
return cls.required_field_is_empty(field, field_name)
@classmethod
def field_is_empty(cls, field: str, field_name: str,
is_required: bool=False) -> ValidationError:
if is_required:
return cls.required_field_is_empty(field, field_name)
else:
return ValidationError(
f'Поле "{field}" не может быть пустым.',
field_name, field, error_code=cls.get_empty()
)
@classmethod
def field_incorrect_value(cls, field: str, field_name: str,
value = None) -> ValidationError:
postfix = f': "{value}".' if value else '.'
return ValidationError(
f'Поле "{field}" содержит недопустимое значение{postfix}',
field_name, field, error_code=cls.get_incorrect()
)
@classmethod
def id_incorrect_value(cls, field: str, field_name: str,
value) -> ValidationError:
return ValidationError(
(
f'Поле "{field}" содержит недопустимое значение. '
f'Должно быть: число >= 0 в формате int или str. '
f'Имеется: {value}.'
),
field_name, field, value=value, error_code=cls.get_incorrect(True)
)
@classmethod
def obj_not_exist(cls, field: str, field_name: str,
id: int or str) -> ValidationError:
return ValidationError(
f'{field_name} с id="{id}" не существует.',
field_name, field, error_code=cls.get_missing()
)
@classmethod
def obj_data_not_exist(cls, obj_name: str, obj_id,
field_name: str, field: str, field_id) -> ValidationError:
return ValidationError(
(f'{obj_name} = "{obj_id}": '
f'{field_name} с id = "{field_id}" не существует.'),
field_name, field, error_code=cls.get_missing()
)
@classmethod
def required_field_is_empty(cls, field: str,
field_name: str) -> ValidationError:
return ValidationError(
'Обязательный параметр не может быть пустым.',
field_name, field, error_code=cls.get_empty(True)
)
@classmethod
def required_field_is_missing(cls, field: str,
field_name: str) -> ValidationError:
return ValidationError(
'Отсутствует обязательный параметр.',
field_name, field, error_code=cls.get_missing(True)
)
@classmethod
def obj_is_already_exist(cls, field: str, obj_name: str,
value) -> ValidationError:
return ValidationError(
(f'{obj_name} с {field}="{value}" уже существует. '
'Запрещено добавлять дубликаты.'),
obj_name, field
)
class SchemaValidator(BaseValidator):
@classmethod
def is_field_exist(cls, data: dict, field: str, field_name: str) -> object:
"""Проверка обязательного поля КП на существование.
Args:
data (dict): словарь полей КП.
field (str): наименование поля КП в схеме.
field_name (str): наименование поля КП для отображения.
Returns:
object: поле, прошедшее валидацию.
Raises:
ValidError: если параметр пуст.
"""
try:
field_data = data[field]
if field_data is None:
raise ValidError.required_field_is_empty(field, field_name)
if isinstance(field_data, str):
field_data = cls.parse_string(field_data)
if not field_data and not str(field_data).isdigit():
raise ValidError.required_field_is_empty(field, field_name)
except KeyError:
raise ValidError.required_field_is_missing(field, field_name)
else:
data[field] = field_data
return data.get(field)
@classmethod
def str_field(cls, data: dict, field: str, field_name: str,
is_required: bool=False) -> str:
"""Валидация строковых полей параметров.
Заменяет исходную строку на строку, прошедшую валидацию.
Args:
data (dict): словарь поля extra.
field (str): наименование поля параметра в схеме.
field_name (str): наименование поля параметра для отображения.
is_required (bool, optional): признак обязательности параметра
(по умолчанию - False).
Returns:
str: поле, прошедшее валидацию.
Raises:
ValidError: если параметр не является строкой.
"""
try:
field_data = data[field]
if field_data is None:
raise KeyError
field_data = cls.parse_string(field_data)
if not field_data:
raise KeyError
except AttributeError:
raise ValidError.field_incorrect_value(field, field_name,
data.get(field))
except KeyError:
if is_required:
raise ValidError.required_field_is_missing(field, field_name)
else:
data[field] = field_data
return data.get(field)
@classmethod
def url_field(cls, data: dict, field: str, field_name: str,
is_required: bool=False) -> str:
"""Валидация url-поля.
Заменяет исходную строку на строку, прошедшую валидацию.
Args:
data (dict): словарь поля extra.
field (str): наименование поля параметра в схеме.
field_name (str): наименование поля параметра для отображения.
is_required (bool, optional): признак обязательности параметра
(по умолчанию - False).
Returns:
str: поле, прошедшее валидацию.
Raises:
ValidError: если параметр не является валидной ссылкой.
"""
try:
url = data[field]
if url is None:
raise KeyError
url = cls.parse_string(url)
if not url:
raise KeyError
while url[-1] == '/':
url = url[:-1]
if url.count('/') < 2:
raise AttributeError
try:
parsed_url = urlparse(url)
except:
raise AttributeError
else:
if parsed_url.query:
new_url = list(parsed_url)
new_url[4] = urlencode(parse_qs(parsed_url.query),
quote_plus)
url = urlunparse(new_url)
pattern = re_compile((
r'(ftp|https?):\/\/(www\.)?'
r'[^\s\\\/\*\^|&\!\?()\{\}\[\]:;\'"%$\+=`]{1,256}'
r'\.[a-zA-Z0-9-а-яёА-ЯЁ()]{1,10}(:[0-9]{2,6})?(\/.*)?$'
))
if not pattern.search(url):
raise AttributeError
except AttributeError:
raise ValidError.field_incorrect_value(field, field_name,
data.get(field))
except KeyError:
if is_required:
raise ValidError.required_field_is_missing(field, field_name)
else:
data[field] = url
return data.get(field)
@classmethod
def url(cls, url: str) -> str:
return cls.url_field({'url': url}, 'url', 'Ссылка', is_required = True)
@classmethod
def id_field(cls, data: dict, field: str, field_name: str,
is_required: bool=False):
"""Валидация поля, содержащего идентифкатор/-ы.
Args:
data (dict): словарь полей КП.
field (str): наименование поля.
field_name (str): наименование поля для отображения.
is_required (bool, optional): признак обязательности параметра
(по умолчанию - False).
Returns:
any: поле, прошедшее валидацию.
Raises:
ValidError: если параметр не прошел валидацию.
"""
try:
field_data = data[field]
if field_data is None:
raise KeyError
data_type = type(field_data)
if isinstance(field_data, str):
field_data = [int(field_data), ]
elif isinstance(field_data, int):
field_data = [field_data, ]
else:
field_data = set([int(v) for v in field_data])
if not len(field_data):
raise KeyError
for v in field_data:
if v < 0:
raise ValueError
except (ValueError, TypeError):
raise ValidError.id_incorrect_value(field, field_name,
data.get(field))
except KeyError:
if is_required:
raise ValidError.required_field_is_missing(field, field_name)
else:
if data_type in (str, int):
data[field] = data_type(field_data.pop())
else:
data[field] = sorted(data_type(field_data))
return data.get(field)
@classmethod
def class_field(cls, data: dict, field: str, field_name: str,
FieldClass, is_required: bool=False, to_type: bool=False) -> dict:
"""Валидация поля, имеющего собственный класс.
Args:
data (dict): словарь, содержащий поле.
FieldClass (object): класс данных параметра.
field (str): наименование поля параметра в схеме.
field_name (str): наименование поля параметра для отображения.
is_required (bool, optional): признак обязательности параметра
(по умолчанию - False).
to_type (bool, optional): приведение данных к типу исходных данных
(по умолчанию - False).
Returns:
dict: словарь с обновленными данными.
Raises:
ValidError: если параметр содержит недопустимое значение.
"""
try:
field_data = data[field]
if field_data is None:
raise KeyError
if to_type and field_data == '':
data[field] = None
raise KeyError
data_type = type(field_data)
key_type = type(list(FieldClass.ALL.keys())[0])
if data_type in (str, int):
field_data = [key_type(field_data), ]
else:
field_data = set([key_type(v) for v in field_data])
if not len(field_data):
raise KeyError
try:
[FieldClass.ALL[v] for v in field_data]
except KeyError:
raise ValueError
except (ValueError, TypeError):
raise ValidError.field_incorrect_value(field, field_name,
data.get(field))
except KeyError:
if is_required:
raise ValidError.required_field_is_missing(field, field_name)
else:
if data_type in (str, int):
if to_type:
data[field] = field_data.pop()
else:
data[field] = data_type(field_data.pop())
else:
if to_type:
data[field] = sorted(list(field_data))
else:
data[field] = sorted(data_type(field_data))
return data.get(field)
| [
"urllib.parse.urlparse",
"marshmallow.ValidationError",
"re.compile",
"urllib.parse.urlunparse",
"urllib.parse.parse_qs"
] | [((3135, 3259), 'marshmallow.ValidationError', 'ValidationError', (['f"""{obj_name} с {field}="{value}" уже существует. Запрещено добавлять дубликаты."""', 'obj_name', 'field'], {}), '(\n f\'{obj_name} с {field}="{value}" уже существует. Запрещено добавлять дубликаты.\'\n , obj_name, field)\n', (3150, 3259), False, 'from marshmallow import ValidationError\n'), ((7216, 7384), 're.compile', 're_compile', (['"""(ftp|https?):\\\\/\\\\/(www\\\\.)?[^\\\\s\\\\\\\\\\\\/\\\\*\\\\^|&\\\\!\\\\?()\\\\{\\\\}\\\\[\\\\]:;\\\\\'"%$\\\\+=`]{1,256}\\\\.[a-zA-Z0-9-а-яёА-ЯЁ()]{1,10}(:[0-9]{2,6})?(\\\\/.*)?$"""'], {}), '(\n \'(ftp|https?):\\\\/\\\\/(www\\\\.)?[^\\\\s\\\\\\\\\\\\/\\\\*\\\\^|&\\\\!\\\\?()\\\\{\\\\}\\\\[\\\\]:;\\\\\\\'"%$\\\\+=`]{1,256}\\\\.[a-zA-Z0-9-а-яёА-ЯЁ()]{1,10}(:[0-9]{2,6})?(\\\\/.*)?$\'\n )\n', (7226, 7384), True, 'from re import compile as re_compile\n'), ((6827, 6840), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (6835, 6840), False, 'from urllib.parse import parse_qs, quote_plus, urlencode, urlparse, urlunparse\n'), ((7173, 7192), 'urllib.parse.urlunparse', 'urlunparse', (['new_url'], {}), '(new_url)\n', (7183, 7192), False, 'from urllib.parse import parse_qs, quote_plus, urlencode, urlparse, urlunparse\n'), ((7043, 7069), 'urllib.parse.parse_qs', 'parse_qs', (['parsed_url.query'], {}), '(parsed_url.query)\n', (7051, 7069), False, 'from urllib.parse import parse_qs, quote_plus, urlencode, urlparse, urlunparse\n')] |
# -*- coding: utf-8 -*-
# Copyright (C) 2012-2014 Mag. <NAME> All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. <EMAIL>
# #*** <License> ************************************************************#
# This module is part of the package CNDB.OMP.
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# #*** </License> ***********************************************************#
#
#++
# Name
# CNDB.OMP.Net_Credentials
#
# Purpose
# Model credentials for a network interface
#
# Revision Dates
# 14-Mar-2012 (CT) Creation
# 6-Dec-2012 (RS) Add `belongs_to_node`, add `max_links`
# 15-May-2013 (CT) Replace `auto_cache` by `link_ref_attr_name`
# 20-May-2013 (CT) Set `_Net_Credentials_.left.link_ref_suffix` to `None`
# 13-Aug-2013 (CT) Add `key.typ`
# 30-Sep-2013 (CT) Mixin `Belongs_to_Node_Left`, not `Belongs_to_Node`
# 14-Apr-2014 (CT) Add mixin `Belongs_to_Net_Device_Left`
# ««revision-date»»···
#--
from __future__ import absolute_import, division, print_function, unicode_literals
from _MOM.import_MOM import *
from _MOM.import_MOM import _A_String_Ascii_
from _CNDB import CNDB
import _CNDB._OMP
import _CNDB._OMP.Net_Interface
import _CNDB._OMP.Belongs_to_Net_Device
import _CNDB._OMP.Belongs_to_Node
from _TFL.Regexp import Regexp, re
_Ancestor_Essence = CNDB.OMP.Link1
_Mixin_1 = CNDB.OMP.Belongs_to_Node_Left
_Mixin_2 = CNDB.OMP.Belongs_to_Net_Device_Left
class _Net_Credentials_ (_Mixin_1, _Mixin_2, _Ancestor_Essence) :
"""Model credentials used by a Net_Interface, e.g., `802.1x`
authentication for a wired interface, or WPA authentication for a WiFi
interface.
"""
is_partial = True
class _Attributes \
( _Mixin_1._Attributes
, _Mixin_2._Attributes
, _Ancestor_Essence._Attributes
) :
_Ancestor = _Ancestor_Essence._Attributes
### Primary attributes
class left (_Ancestor.left) :
"""The network interface using these credentials."""
role_type = CNDB.OMP.Net_Interface
role_name = "interface"
link_ref_attr_name = "credentials"
link_ref_suffix = None
max_links = 1
# end class left
### *** BEWARE ***
### To ensure that a `Net_Interface` has only one `credentials`, no
### other essential primary key attributes must be defined here or by
### derived classes
# end class _Attributes
# end class _Net_Credentials_
_Ancestor_Essence = _Net_Credentials_
class WPA_Credentials (_Ancestor_Essence) :
"""Model credentials necessary for WPA authentication."""
class _Attributes (_Ancestor_Essence._Attributes) :
_Ancestor = _Ancestor_Essence._Attributes
class key (Eval_Mixin, _A_String_Ascii_) :
"""Key used for WPA authentication."""
kind = Attr.Required
max_length = 32
typ = "Key"
### allow characters up to "\xFF"
_cooked_re = Regexp \
( "^[\x00-\xFF]*$"
, re.VERBOSE
)
# end class key
# end class _Attributes
# end class WPA2
if __name__ != "__main__" :
CNDB.OMP._Export ("*")
### __END__ CNDB.OMP.Net_Credentials
| [
"_CNDB.CNDB.OMP._Export",
"_TFL.Regexp.Regexp"
] | [((3420, 3441), '_CNDB.CNDB.OMP._Export', 'CNDB.OMP._Export', (['"""*"""'], {}), "('*')\n", (3436, 3441), False, 'from _CNDB import CNDB\n'), ((3224, 3257), '_TFL.Regexp.Regexp', 'Regexp', (["'^[\\x00-ÿ]*$'", 're.VERBOSE'], {}), "('^[\\x00-ÿ]*$', re.VERBOSE)\n", (3230, 3257), False, 'from _TFL.Regexp import Regexp, re\n')] |
#!/usr/bin/env python3
import sys
from PyQt5.QtWidgets import QApplication
from capybara_tw.app import MainWindow
__version__ = '0.1'
__application_name__ = 'Capybara Translation Workbench'
__organization_name__ = 'Capybara Translation'
def run():
app_ = QApplication(sys.argv)
app_.setApplicationName(__application_name__)
app_.setOrganizationName(__organization_name__)
app_.setApplicationVersion(__version__)
window = MainWindow()
window.show()
sys.exit(app_.exec_())
if __name__ == '__main__':
run()
| [
"capybara_tw.app.MainWindow",
"PyQt5.QtWidgets.QApplication"
] | [((262, 284), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (274, 284), False, 'from PyQt5.QtWidgets import QApplication\n'), ((444, 456), 'capybara_tw.app.MainWindow', 'MainWindow', ([], {}), '()\n', (454, 456), False, 'from capybara_tw.app import MainWindow\n')] |
import requests
from django.urls import reverse
from django.test import TestCase
from django.db.models import Count
from django.shortcuts import get_object_or_404
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Car, Rate
from car.serializers import CarSerializer, PopularCarSerializer
CAR_URL = reverse('car:cars-list')
POPULAR_CAR_URL = reverse('car:popular-list')
CAR_MAKE_EXTERNAL_API = 'https://vpic.nhtsa.dot.gov/api/vehicles/getallmakes?format=json'
CAR_MODEL_EXTERNAL_API = 'https://vpic.nhtsa.dot.gov/api/vehicles/getmodelsformake/{}?format=json'
def sample_car(**params):
"""Create and return a sample car"""
defaults = {
'make_name': 'HONDA',
'model_name': 'Accord',
}
defaults.update(params)
return Car.objects.create(**defaults)
class PublicCarApiTests(TestCase):
"""Test the publicly available cars API"""
def setUp(self):
self.client = APIClient()
def test_retrieve_car_list(self):
"""Test retriving a list of cars"""
sample_car()
sample_car()
res = self.client.get(CAR_URL)
cars = Car.objects.all().order_by('-make_name')
serializer = CarSerializer(cars, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_create_car_successful(self):
"""Test a new car creation was successful by checking External API"""
payload = {
'make_name': '<NAME>',
'model_name': 'V8 Vantage',
}
self.client.post(CAR_URL, payload)
exists = Car.objects.filter(
make_name=payload['make_name'],
model_name=payload['model_name']
).exists()
self.assertTrue(exists)
def test_create_car_with_lowercase(self):
"""Test a new car creation with lowercase"""
payload = {
'make_name': '<NAME>',
'model_name': 'V8 Vantage',
}
res = self.client.post(CAR_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
def test_create_car_invalid(self):
"""Test a new car creation failed"""
payload = {'model_name': ''}
res = self.client.post(CAR_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_car_average_rate_value(self):
"""Test average rate value for particular car"""
car = sample_car()
Rate.objects.create(car=car, rate=3)
Rate.objects.create(car=car, rate=5)
res = self.client.get(CAR_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data[0]['rating'], 4)
def test_car_default_rate_value(self):
"""Test default rate value for particular car"""
sample_car()
res = self.client.get(CAR_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data[0]['rating'], 0)
def test_retrieve_popular_cars(self):
"""Test retrieve popular cars based on number of rates"""
car1 = sample_car(make_name="BMW", model_name="M4")
car2 = sample_car(make_name="Mercedes", model_name="Benz")
Rate.objects.create(car=car1, rate=4)
Rate.objects.create(car=car1, rate=2)
Rate.objects.create(car=car1, rate=2)
Rate.objects.create(car=car2, rate=3)
Rate.objects.create(car=car2, rate=5)
res = self.client.get(POPULAR_CAR_URL)
ordered_queryset = Rate.objects.filter(car_id=car1.id).values('car').annotate(total_rates=Count('car'))
popular_cars = [get_object_or_404(Car, id=item['car']) for item in ordered_queryset]
serializer = PopularCarSerializer(popular_cars, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data[0], serializer.data[0])
class ExternalCarApiTests(TestCase):
""" Test for External Vehicle API"""
def setUp(self):
self.client = APIClient()
def test_car_in_vehicles(self):
"""Test requested car exists inside the external API data"""
payload = {
'make_name': 'ASTON MARTIN',
'model_name': 'V8 Vantage',
}
car_makes_res = requests.get(CAR_MAKE_EXTERNAL_API).json()
car_make = next(item for item in car_makes_res['Results'] if item["Make_Name"] == payload['make_name'])
if car_make:
car_models_res = requests.get(CAR_MODEL_EXTERNAL_API.format(car_make['Make_Name'])).json()
car_model = next(item for item in car_models_res['Results']
if item["Model_Name"] == payload['model_name'])
self.assertIn(car_model['Make_Name'], payload['make_name'])
def test_car_not_in_vehicles(self):
"""Test requested car DO NOT exists inside the external API data"""
payload = {
'make_name': 'Test Make',
'model_name': 'Test Model',
}
car_makes_res = requests.get(CAR_MAKE_EXTERNAL_API).json()
try:
car_make = next(item for item in car_makes_res['Results']
if item["Make_Name"] == payload['make_name'])
except StopIteration:
return None
self.assertEqual(car_make, None) | [
"core.models.Rate.objects.filter",
"car.serializers.CarSerializer",
"core.models.Car.objects.filter",
"django.db.models.Count",
"core.models.Rate.objects.create",
"django.shortcuts.get_object_or_404",
"core.models.Car.objects.create",
"car.serializers.PopularCarSerializer",
"requests.get",
"rest_f... | [((351, 375), 'django.urls.reverse', 'reverse', (['"""car:cars-list"""'], {}), "('car:cars-list')\n", (358, 375), False, 'from django.urls import reverse\n'), ((394, 421), 'django.urls.reverse', 'reverse', (['"""car:popular-list"""'], {}), "('car:popular-list')\n", (401, 421), False, 'from django.urls import reverse\n'), ((806, 836), 'core.models.Car.objects.create', 'Car.objects.create', ([], {}), '(**defaults)\n', (824, 836), False, 'from core.models import Car, Rate\n'), ((965, 976), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (974, 976), False, 'from rest_framework.test import APIClient\n'), ((1220, 1250), 'car.serializers.CarSerializer', 'CarSerializer', (['cars'], {'many': '(True)'}), '(cars, many=True)\n', (1233, 1250), False, 'from car.serializers import CarSerializer, PopularCarSerializer\n'), ((2516, 2552), 'core.models.Rate.objects.create', 'Rate.objects.create', ([], {'car': 'car', 'rate': '(3)'}), '(car=car, rate=3)\n', (2535, 2552), False, 'from core.models import Car, Rate\n'), ((2561, 2597), 'core.models.Rate.objects.create', 'Rate.objects.create', ([], {'car': 'car', 'rate': '(5)'}), '(car=car, rate=5)\n', (2580, 2597), False, 'from core.models import Car, Rate\n'), ((3276, 3313), 'core.models.Rate.objects.create', 'Rate.objects.create', ([], {'car': 'car1', 'rate': '(4)'}), '(car=car1, rate=4)\n', (3295, 3313), False, 'from core.models import Car, Rate\n'), ((3322, 3359), 'core.models.Rate.objects.create', 'Rate.objects.create', ([], {'car': 'car1', 'rate': '(2)'}), '(car=car1, rate=2)\n', (3341, 3359), False, 'from core.models import Car, Rate\n'), ((3368, 3405), 'core.models.Rate.objects.create', 'Rate.objects.create', ([], {'car': 'car1', 'rate': '(2)'}), '(car=car1, rate=2)\n', (3387, 3405), False, 'from core.models import Car, Rate\n'), ((3414, 3451), 'core.models.Rate.objects.create', 'Rate.objects.create', ([], {'car': 'car2', 'rate': '(3)'}), '(car=car2, rate=3)\n', (3433, 3451), False, 'from core.models import Car, Rate\n'), ((3460, 3497), 'core.models.Rate.objects.create', 'Rate.objects.create', ([], {'car': 'car2', 'rate': '(5)'}), '(car=car2, rate=5)\n', (3479, 3497), False, 'from core.models import Car, Rate\n'), ((3773, 3818), 'car.serializers.PopularCarSerializer', 'PopularCarSerializer', (['popular_cars'], {'many': '(True)'}), '(popular_cars, many=True)\n', (3793, 3818), False, 'from car.serializers import CarSerializer, PopularCarSerializer\n'), ((4064, 4075), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (4073, 4075), False, 'from rest_framework.test import APIClient\n'), ((3683, 3721), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Car'], {'id': "item['car']"}), "(Car, id=item['car'])\n", (3700, 3721), False, 'from django.shortcuts import get_object_or_404\n'), ((1158, 1175), 'core.models.Car.objects.all', 'Car.objects.all', ([], {}), '()\n', (1173, 1175), False, 'from core.models import Car, Rate\n'), ((1653, 1742), 'core.models.Car.objects.filter', 'Car.objects.filter', ([], {'make_name': "payload['make_name']", 'model_name': "payload['model_name']"}), "(make_name=payload['make_name'], model_name=payload[\n 'model_name'])\n", (1671, 1742), False, 'from core.models import Car, Rate\n'), ((3645, 3657), 'django.db.models.Count', 'Count', (['"""car"""'], {}), "('car')\n", (3650, 3657), False, 'from django.db.models import Count\n'), ((4318, 4353), 'requests.get', 'requests.get', (['CAR_MAKE_EXTERNAL_API'], {}), '(CAR_MAKE_EXTERNAL_API)\n', (4330, 4353), False, 'import requests\n'), ((5078, 5113), 'requests.get', 'requests.get', (['CAR_MAKE_EXTERNAL_API'], {}), '(CAR_MAKE_EXTERNAL_API)\n', (5090, 5113), False, 'import requests\n'), ((3574, 3609), 'core.models.Rate.objects.filter', 'Rate.objects.filter', ([], {'car_id': 'car1.id'}), '(car_id=car1.id)\n', (3593, 3609), False, 'from core.models import Car, Rate\n')] |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import copy
import json
import os
from typing import Any, Dict, List, Optional, Sequence
from iopath.common.file_io import PathManager
from .implicitron_dataset import ImplicitronDataset, ImplicitronDatasetBase
from .utils import (
DATASET_TYPE_KNOWN,
DATASET_TYPE_TEST,
DATASET_TYPE_TRAIN,
DATASET_TYPE_UNKNOWN,
)
# TODO from dataset.dataset_configs import DATASET_CONFIGS
DATASET_CONFIGS: Dict[str, Dict[str, Any]] = {
"default": {
"box_crop": True,
"box_crop_context": 0.3,
"image_width": 800,
"image_height": 800,
"remove_empty_masks": True,
}
}
# fmt: off
CO3D_CATEGORIES: List[str] = list(reversed([
"baseballbat", "banana", "bicycle", "microwave", "tv",
"cellphone", "toilet", "hairdryer", "couch", "kite", "pizza",
"umbrella", "wineglass", "laptop",
"hotdog", "stopsign", "frisbee", "baseballglove",
"cup", "parkingmeter", "backpack", "toyplane", "toybus",
"handbag", "chair", "keyboard", "car", "motorcycle",
"carrot", "bottle", "sandwich", "remote", "bowl", "skateboard",
"toaster", "mouse", "toytrain", "book", "toytruck",
"orange", "broccoli", "plant", "teddybear",
"suitcase", "bench", "ball", "cake",
"vase", "hydrant", "apple", "donut",
]))
# fmt: on
_CO3D_DATASET_ROOT: str = os.getenv("CO3D_DATASET_ROOT", "")
def dataset_zoo(
dataset_name: str = "co3d_singlesequence",
dataset_root: str = _CO3D_DATASET_ROOT,
category: str = "DEFAULT",
limit_to: int = -1,
limit_sequences_to: int = -1,
n_frames_per_sequence: int = -1,
test_on_train: bool = False,
load_point_clouds: bool = False,
mask_images: bool = False,
mask_depths: bool = False,
restrict_sequence_name: Sequence[str] = (),
test_restrict_sequence_id: int = -1,
assert_single_seq: bool = False,
only_test_set: bool = False,
aux_dataset_kwargs: dict = DATASET_CONFIGS["default"],
path_manager: Optional[PathManager] = None,
) -> Dict[str, ImplicitronDatasetBase]:
"""
Generates the training / validation and testing dataset objects.
Args:
dataset_name: The name of the returned dataset.
dataset_root: The root folder of the dataset.
category: The object category of the dataset.
limit_to: Limit the dataset to the first #limit_to frames.
limit_sequences_to: Limit the dataset to the first
#limit_sequences_to sequences.
n_frames_per_sequence: Randomly sample #n_frames_per_sequence frames
in each sequence.
test_on_train: Construct validation and test datasets from
the training subset.
load_point_clouds: Enable returning scene point clouds from the dataset.
mask_images: Mask the loaded images with segmentation masks.
mask_depths: Mask the loaded depths with segmentation masks.
restrict_sequence_name: Restrict the dataset sequences to the ones
present in the given list of names.
test_restrict_sequence_id: The ID of the loaded sequence.
Active for dataset_name='co3d_singlesequence'.
assert_single_seq: Assert that only frames from a single sequence
are present in all generated datasets.
only_test_set: Load only the test set.
aux_dataset_kwargs: Specifies additional arguments to the
ImplicitronDataset constructor call.
Returns:
datasets: A dictionary containing the
`"dataset_subset_name": torch_dataset_object` key, value pairs.
"""
restrict_sequence_name = tuple(restrict_sequence_name)
aux_dataset_kwargs = dict(aux_dataset_kwargs)
datasets = {}
# TODO:
# - implement loading multiple categories
if dataset_name in ["co3d_singlesequence", "co3d_multisequence"]:
# This maps the common names of the dataset subsets ("train"/"val"/"test")
# to the names of the subsets in the CO3D dataset.
set_names_mapping = _get_co3d_set_names_mapping(
dataset_name,
test_on_train,
only_test_set,
)
# load the evaluation batches
task = dataset_name.split("_")[-1]
batch_indices_path = os.path.join(
dataset_root,
category,
f"eval_batches_{task}.json",
)
if not os.path.isfile(batch_indices_path):
# The batch indices file does not exist.
# Most probably the user has not specified the root folder.
raise ValueError("Please specify a correct dataset_root folder.")
with open(batch_indices_path, "r") as f:
eval_batch_index = json.load(f)
if task == "singlesequence":
assert (
test_restrict_sequence_id is not None and test_restrict_sequence_id >= 0
), (
"Please specify an integer id 'test_restrict_sequence_id'"
+ " of the sequence considered for 'singlesequence'"
+ " training and evaluation."
)
assert len(restrict_sequence_name) == 0, (
"For the 'singlesequence' task, the restrict_sequence_name has"
" to be unset while test_restrict_sequence_id has to be set to an"
" integer defining the order of the evaluation sequence."
)
# a sort-stable set() equivalent:
eval_batches_sequence_names = list(
{b[0][0]: None for b in eval_batch_index}.keys()
)
eval_sequence_name = eval_batches_sequence_names[test_restrict_sequence_id]
eval_batch_index = [
b for b in eval_batch_index if b[0][0] == eval_sequence_name
]
# overwrite the restrict_sequence_name
restrict_sequence_name = [eval_sequence_name]
for dataset, subsets in set_names_mapping.items():
frame_file = os.path.join(dataset_root, category, "frame_annotations.jgz")
assert os.path.isfile(frame_file)
sequence_file = os.path.join(
dataset_root, category, "sequence_annotations.jgz"
)
assert os.path.isfile(sequence_file)
subset_lists_file = os.path.join(dataset_root, category, "set_lists.json")
assert os.path.isfile(subset_lists_file)
# TODO: maybe directly in param list
params = {
**copy.deepcopy(aux_dataset_kwargs),
"frame_annotations_file": frame_file,
"sequence_annotations_file": sequence_file,
"subset_lists_file": subset_lists_file,
"dataset_root": dataset_root,
"limit_to": limit_to,
"limit_sequences_to": limit_sequences_to,
"n_frames_per_sequence": n_frames_per_sequence
if dataset == "train"
else -1,
"subsets": subsets,
"load_point_clouds": load_point_clouds,
"mask_images": mask_images,
"mask_depths": mask_depths,
"pick_sequence": restrict_sequence_name,
"path_manager": path_manager,
}
datasets[dataset] = ImplicitronDataset(**params)
if dataset == "test":
if len(restrict_sequence_name) > 0:
eval_batch_index = [
b for b in eval_batch_index if b[0][0] in restrict_sequence_name
]
datasets[dataset].eval_batches = datasets[
dataset
].seq_frame_index_to_dataset_index(eval_batch_index)
if assert_single_seq:
# check theres only one sequence in all datasets
assert (
len(
{
e["frame_annotation"].sequence_name
for dset in datasets.values()
for e in dset.frame_annots
}
)
<= 1
), "Multiple sequences loaded but expected one"
else:
raise ValueError(f"Unsupported dataset: {dataset_name}")
if test_on_train:
datasets["val"] = datasets["train"]
datasets["test"] = datasets["train"]
return datasets
def _get_co3d_set_names_mapping(
dataset_name: str,
test_on_train: bool,
only_test: bool,
) -> Dict[str, List[str]]:
"""
Returns the mapping of the common dataset subset names ("train"/"val"/"test")
to the names of the corresponding subsets in the CO3D dataset
("test_known"/"test_unseen"/"train_known"/"train_unseen").
"""
single_seq = dataset_name == "co3d_singlesequence"
if only_test:
set_names_mapping = {}
else:
set_names_mapping = {
"train": [
(DATASET_TYPE_TEST if single_seq else DATASET_TYPE_TRAIN)
+ "_"
+ DATASET_TYPE_KNOWN
]
}
if not test_on_train:
prefixes = [DATASET_TYPE_TEST]
if not single_seq:
prefixes.append(DATASET_TYPE_TRAIN)
set_names_mapping.update(
{
dset: [
p + "_" + t
for p in prefixes
for t in [DATASET_TYPE_KNOWN, DATASET_TYPE_UNKNOWN]
]
for dset in ["val", "test"]
}
)
return set_names_mapping
| [
"os.getenv",
"os.path.join",
"os.path.isfile",
"copy.deepcopy",
"json.load"
] | [((1516, 1550), 'os.getenv', 'os.getenv', (['"""CO3D_DATASET_ROOT"""', '""""""'], {}), "('CO3D_DATASET_ROOT', '')\n", (1525, 1550), False, 'import os\n'), ((4409, 4474), 'os.path.join', 'os.path.join', (['dataset_root', 'category', 'f"""eval_batches_{task}.json"""'], {}), "(dataset_root, category, f'eval_batches_{task}.json')\n", (4421, 4474), False, 'import os\n'), ((4537, 4571), 'os.path.isfile', 'os.path.isfile', (['batch_indices_path'], {}), '(batch_indices_path)\n', (4551, 4571), False, 'import os\n'), ((4857, 4869), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4866, 4869), False, 'import json\n'), ((6124, 6185), 'os.path.join', 'os.path.join', (['dataset_root', 'category', '"""frame_annotations.jgz"""'], {}), "(dataset_root, category, 'frame_annotations.jgz')\n", (6136, 6185), False, 'import os\n'), ((6205, 6231), 'os.path.isfile', 'os.path.isfile', (['frame_file'], {}), '(frame_file)\n', (6219, 6231), False, 'import os\n'), ((6261, 6325), 'os.path.join', 'os.path.join', (['dataset_root', 'category', '"""sequence_annotations.jgz"""'], {}), "(dataset_root, category, 'sequence_annotations.jgz')\n", (6273, 6325), False, 'import os\n'), ((6375, 6404), 'os.path.isfile', 'os.path.isfile', (['sequence_file'], {}), '(sequence_file)\n', (6389, 6404), False, 'import os\n'), ((6438, 6492), 'os.path.join', 'os.path.join', (['dataset_root', 'category', '"""set_lists.json"""'], {}), "(dataset_root, category, 'set_lists.json')\n", (6450, 6492), False, 'import os\n'), ((6512, 6545), 'os.path.isfile', 'os.path.isfile', (['subset_lists_file'], {}), '(subset_lists_file)\n', (6526, 6545), False, 'import os\n'), ((6637, 6670), 'copy.deepcopy', 'copy.deepcopy', (['aux_dataset_kwargs'], {}), '(aux_dataset_kwargs)\n', (6650, 6670), False, 'import copy\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
class RNN(nn.Module):
def __init__(self, num_layers, hidden_size, input_size):
super(RNN, self).__init__()
self.num_layers = num_layers
self.hidden_size = hidden_size
self.encoder = nn.Embedding(input_size, self.hidden_size, padding_idx=0)
self.lstm = nn.LSTM(input_size=self.hidden_size,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=False)
self.decoder = nn.Sequential(
nn.Dropout(p = 0.2),
nn.Linear(self.hidden_size, input_size)
#nn.Softmax(dim=2)
)
self.cuda()
def forward(self, x, h):
x = self.encoder(x)
x, h = self.lstm(x, h)
#print(x.shape)
out = self.decoder(x)
return out, h
def init_state(self, sequence_length=32, cuda=True):
# batch_size x hidden_size
if cuda:
return (torch.zeros(self.num_layers, sequence_length, self.hidden_size).cuda(),
torch.zeros(self.num_layers, sequence_length, self.hidden_size).cuda())
return (torch.zeros(self.num_layers, 1, self.hidden_size),
torch.zeros(self.num_layers, 1, self.hidden_size))
| [
"torch.nn.Dropout",
"torch.nn.LSTM",
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Embedding"
] | [((314, 371), 'torch.nn.Embedding', 'nn.Embedding', (['input_size', 'self.hidden_size'], {'padding_idx': '(0)'}), '(input_size, self.hidden_size, padding_idx=0)\n', (326, 371), True, 'import torch.nn as nn\n'), ((393, 510), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': 'self.hidden_size', 'hidden_size': 'self.hidden_size', 'num_layers': 'self.num_layers', 'batch_first': '(False)'}), '(input_size=self.hidden_size, hidden_size=self.hidden_size,\n num_layers=self.num_layers, batch_first=False)\n', (400, 510), True, 'import torch.nn as nn\n'), ((651, 668), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.2)'}), '(p=0.2)\n', (661, 668), True, 'import torch.nn as nn\n'), ((684, 723), 'torch.nn.Linear', 'nn.Linear', (['self.hidden_size', 'input_size'], {}), '(self.hidden_size, input_size)\n', (693, 723), True, 'import torch.nn as nn\n'), ((1273, 1322), 'torch.zeros', 'torch.zeros', (['self.num_layers', '(1)', 'self.hidden_size'], {}), '(self.num_layers, 1, self.hidden_size)\n', (1284, 1322), False, 'import torch\n'), ((1340, 1389), 'torch.zeros', 'torch.zeros', (['self.num_layers', '(1)', 'self.hidden_size'], {}), '(self.num_layers, 1, self.hidden_size)\n', (1351, 1389), False, 'import torch\n'), ((1092, 1155), 'torch.zeros', 'torch.zeros', (['self.num_layers', 'sequence_length', 'self.hidden_size'], {}), '(self.num_layers, sequence_length, self.hidden_size)\n', (1103, 1155), False, 'import torch\n'), ((1184, 1247), 'torch.zeros', 'torch.zeros', (['self.num_layers', 'sequence_length', 'self.hidden_size'], {}), '(self.num_layers, sequence_length, self.hidden_size)\n', (1195, 1247), False, 'import torch\n')] |
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from brambling.utils.payment import dwolla_update_tokens
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option(
'--days',
action='store',
dest='days',
default=15,
help='Number of days ahead of time to update refresh tokens.'),
)
def handle(self, *args, **options):
try:
days = int(options['days'])
except ValueError:
raise CommandError("Days must be an integer value.")
self.stdout.write("Updating dwolla tokens...")
self.stdout.flush()
count, test_count = dwolla_update_tokens(days)
self.stdout.write("Test tokens updated: {}".format(count))
self.stdout.write("Live tokens updated: {}".format(test_count))
self.stdout.flush()
| [
"brambling.utils.payment.dwolla_update_tokens",
"optparse.make_option",
"django.core.management.base.CommandError"
] | [((737, 763), 'brambling.utils.payment.dwolla_update_tokens', 'dwolla_update_tokens', (['days'], {}), '(days)\n', (757, 763), False, 'from brambling.utils.payment import dwolla_update_tokens\n'), ((242, 372), 'optparse.make_option', 'make_option', (['"""--days"""'], {'action': '"""store"""', 'dest': '"""days"""', 'default': '(15)', 'help': '"""Number of days ahead of time to update refresh tokens."""'}), "('--days', action='store', dest='days', default=15, help=\n 'Number of days ahead of time to update refresh tokens.')\n", (253, 372), False, 'from optparse import make_option\n'), ((579, 625), 'django.core.management.base.CommandError', 'CommandError', (['"""Days must be an integer value."""'], {}), "('Days must be an integer value.')\n", (591, 625), False, 'from django.core.management.base import BaseCommand, CommandError\n')] |
# Everyone did everything here
## JSON, Main UI, Keybinds, 'Backend' mostly by Ethan
## Refractoring, 'Backend' by Jerick
## Everything else + Starting Point by Granwyn
import tkinter as tk
from tkinter import ttk
import os
from tkinter import font
import json
from PIL import ImageTk, Image
from tkinter import Menu
import config
from toolsUI import *
from components.wrappedLabel import WrappingLabel
import os
import webbrowser
import tools.periodicTable as pt
# Path of the different Filed
ROOTDIR, _ =os.path.split(os.path.abspath(os.path.realpath(__file__)))
os. chdir(ROOTDIR)
jsonData = os.path.join(ROOTDIR, '.data.json')
appIconIcon = os.path.join(ROOTDIR,'src','images','AppIcon.ico')
appIconIcns = os.path.join(ROOTDIR,'src','images','AppIcon.icns')
appIconPng = os.path.join(ROOTDIR,'src','images','AppIcon.png')
appThemePath = os.path.join(ROOTDIR,"sun-valley.tcl")
_recentlength=10
FONT='TkDefaultFont'
# Checks for Font Size Change
def reload():
global fontMultiplier
file = open(jsonData)
extractedData = json.load(file)
file.close()
fontMultiplier = float(extractedData["fontMultiplier"])
reload()
# Variables
functionalities = {
"Settings" : Settings,
"Chemical Equation" : ChemicalEquation,
"Rectangle/Square" : Rectangle,
"Ionic Equation" : IonicEqn,
"Salt Solubilities" : SaltSolubility,
"Calculator" : calculate,
"Circle/Semicircle" : Circle,
"Quadratic" : SolveQuad,
"Parallelogram" : Parallelogram,
"Trapezium" : Trapezium,
"Simultaneous" : simsolver,
"Triangle" : triangle,
"Equation of Circle": SolveCircle,
"Periodic Table" : periodicTable,
"Pyramid" : Pyramid,
"Prism" : Prism,
"Sphere" : Sphere,
}
treeview_data = [
("", 1, "Chemistry"),
(1, 2, "Periodic Table"),
(1, 3, "Salt Solubilities"),
(1, 4, "Chemical Equation"),
(1, 5, "Ionic Equation"),
("", 6, "Mathematics"),
(6, 7, "Calculator"),
(6, 8, "Equations"),
(8, 9, "Simultaneous"),
(8, 10, "Quadratic"),
(8, 11, "Cubic"),
(8, 12, "Quartic"),
(6, 13, "Matrices"),
(6, 14, "Inequalities"),
(6, 15, "Mensuration"),
(15, 16, "Area"),
(16, 17, "Rectangle/Square"),
(16, 18, "Triangle"),
(16, 19, "Parallelogram"),
(16, 20, "Rhombus"),
(16, 21, "Trapezium"),
(16, 22, "Circle/Semicircle"),
(15, 23, "Volume and Surface Area"),
(23, 24, "Pyramid"),
(24, 25, "Triangle-Based"),
(24, 26, "Square-Based"),
(24, 27, "Cone"),
(23, 28, "Prism"),
(29, 30, "Triangular Prism"),
(29, 31, "Cylinder"),
(29, 32, "Cuboid/Cube"),
(23, 29, "Sphere"),
(6, 33, "Percentage"),
(6, 38, "Circles"),
(38, 39, "Circle Properties"),
(38, 40, "Equation of Circle"),
("", 41, "Settings"),
]
TOPICS=[treeview_data[i-1][2] for i in [1,6,8,16,15,23,34,24,29]] # Add Items into Treeview
topics = []
class App(ttk.Frame):
def __init__(self, parent):
self.notify("App Shortcuts for Toobox", "Use Control + H to go to the Home Screen and use Control + F to enter Full Screen, Escape Key to exit Full Screen.", "Boop")
self.screenlist = []
# aSecret :) hehehehe
if config.aSecret:
f()
self.check_recently_opened()
# Initalize the app
ttk.Frame.__init__(self)
self.setup_menu()
self.setup_widgets()
# Variable to track if fullscreen
self.fullScreen = False
# Set Bindings/Shortcuts
self.fullScreenBindings()
self.goHome()
# self.resetSettingsSC()
config.currentlySelected = "Home"
def getInputs(self, event):
try:
text.grid_forget()
text.destroy()
except: pass
self.resFrame.destroy()
self.resFrame = self.addframe(self.mainFrame)
e = self.inputField.get().replace(" ", "")
l=pt.search(e)[:6]
newf=self.addframe(self.resFrame,borderwidth=1)
if len(l) > 0:
temp=WrappingLabel(newf, text="Atomic Number", font=(font,int(fontMultiplier*10)))
temp.grid(row=0, column=0, sticky = tk.N+tk.E, padx=2)
temp=WrappingLabel(newf, text="Mass Number", font=(font,int(fontMultiplier*10)))
temp.grid(row=1, column=0, sticky = tk.N+tk.E, padx=2)
temp=WrappingLabel(newf, text="Period"+", "+"Group", font=(font,int(fontMultiplier*10)))
temp.grid(row=2, column=0, sticky = tk.N+tk.E, padx=2)
temp=WrappingLabel(newf, text="Symbol", font=(font,int(fontMultiplier*15), 'bold'))
temp.grid(row=3, column=0, sticky = tk.N+tk.E, padx=2)
temp=WrappingLabel(newf, text="Element", font=(font,int(fontMultiplier*12), 'bold'))
temp.grid(row=4, column=0, sticky = tk.N+tk.E, padx=2)
temp=WrappingLabel(newf, text="Atomic Mass" , font=(font,int(fontMultiplier*10)))
temp.grid(row=5, column=0, sticky = tk.N+tk.E, padx=2)
temp=WrappingLabel(newf, text=", ".join(["Protons","Neutrons","Electrons"]), font=(font,int(fontMultiplier*10)))
temp.grid(row=6, column=0, sticky = tk.N+tk.E, padx=2)
temp=WrappingLabel(newf, text="Atomic Radius" , font=(font,int(fontMultiplier*10)))
temp.grid(row=7, column=0, sticky = tk.N+tk.E, padx=2)
temp=WrappingLabel(newf, text="Electron Shells" , font=(font,int(fontMultiplier*10)))
temp.grid(row=8, column=0, sticky = tk.N+tk.E, padx=2)
temp=WrappingLabel(newf, text="Valence Electrons" , font=(font,int(fontMultiplier*10)))
temp.grid(row=9, column=0, sticky = tk.N+tk.E, padx=2)
temp=WrappingLabel(newf, text="Electronic Configuration" , font=(font,int(fontMultiplier*10)))
temp.grid(row=10, column=0, sticky = tk.N+tk.E, padx=2)
temp=WrappingLabel(newf, text="Isotopes" , font=(font,int(fontMultiplier*10)))
temp.grid(row=11, column=0, sticky = tk.N+tk.E, padx=2)
temp=WrappingLabel(newf, text=" ".join(["[{}]".format("Phase"), ", ".join(["Melting Point", "Boiling Point"])]), font=(font,int(fontMultiplier*10)))
temp.grid(row=12, column=0, sticky = tk.N+tk.E, padx=2)
temp=WrappingLabel(newf, text="Type" , font=(font,int(fontMultiplier*10)))
temp.grid(row=13, column=0, sticky = tk.N+tk.E, padx=2)
temp=WrappingLabel(newf, text="Radioactive" , font=(font,int(fontMultiplier*10)))
temp.grid(row=14, column=0, sticky = tk.N+tk.E, padx=2)
temp=WrappingLabel(newf, text="Natural" , font=(font,int(fontMultiplier*10)))
temp.grid(row=15, column=0, sticky = tk.N+tk.E, padx=2)
temp=WrappingLabel(newf, text="Density" , font=(font,int(fontMultiplier*10)))
temp.grid(row=16, column=0, sticky = tk.N+tk.E, padx=2)
temp=WrappingLabel(newf, text="Electronegativity" , font=(font,int(fontMultiplier*10)))
temp.grid(row=17, column=0, sticky = tk.N+tk.E, padx=2)
temp=WrappingLabel(newf, text="First Ionisation Energy" , font=(font,int(fontMultiplier*10)))
temp.grid(row=18, column=0, sticky = tk.N+tk.E, padx=2)
temp=WrappingLabel(newf, text="Specific Heat Capacity / J⋅kg⁻¹⋅K⁻¹" , font=(font,int(fontMultiplier*10)))
temp.grid(row=19, column=0, sticky = tk.N+tk.E, padx=2)
temp=WrappingLabel(newf, text="Discovered" , font=(font,int(fontMultiplier*10)))
temp.grid(row=20, column=0, sticky = tk.N+tk.E, padx=2)
newf.grid(row=0, column=0, sticky = tk.N+tk.E, padx=2)
r=1
for i in l:
newf=self.addframe(self.resFrame,borderwidth=1)
temp=WrappingLabel(newf, text=int(pt.ELEMENTDATA["AtomicNumber"][i]), font=(font,int(fontMultiplier*10)))
temp.grid(row=0, column=0, sticky = tk.N+tk.W, padx=2)
temp=WrappingLabel(newf, text=str(int(pt.ELEMENTDATA["MassNumber"][i])), font=(font,int(fontMultiplier*10)))
temp.grid(row=1, column=0, sticky = tk.N+tk.W, padx=2)
temp=WrappingLabel(newf, text=str(int(pt.ELEMENTDATA["Period"][i]))+", "+str(int(pt.ELEMENTDATA["Group"][i])), font=(font,int(fontMultiplier*10)))
temp.grid(row=2, column=0, sticky = tk.N+tk.W, padx=2)
temp=WrappingLabel(newf, text=str(pt.ELEMENTDATA["Symbol"][i]), font=(font,int(fontMultiplier*15), 'bold'))
temp.grid(row=3, column=0, sticky = tk.N+tk.W, padx=2)
temp=WrappingLabel(newf, text=str(pt.ELEMENTDATA["Element"][i]), font=(font,int(fontMultiplier*12), 'bold'))
temp.grid(row=4, column=0, sticky = tk.N+tk.W, padx=2)
temp=WrappingLabel(newf, text=str(pt.ELEMENTDATA["AtomicMass"][i]) , font=(font,int(fontMultiplier*10)))
temp.grid(row=5, column=0, sticky = tk.N+tk.W, padx=2)
temp=WrappingLabel(newf, text=", ".join([str(int(pt.ELEMENTDATA[j][i])) for j in ["Protons","Neutrons","Electrons"]]), font=(font,int(fontMultiplier*10)))
temp.grid(row=6, column=0, sticky = tk.N+tk.W, padx=2)
temp=WrappingLabel(newf, text=str(pt.ELEMENTDATA["AtomicRadius"][i]).title(), font=(font,int(fontMultiplier*10)))
temp.grid(row=7, column=0, sticky = tk.N+tk.W, padx=2)
temp=WrappingLabel(newf, text=str(int(pt.ELEMENTDATA["Shells"][i])).title(), font=(font,int(fontMultiplier*10)))
temp.grid(row=8, column=0, sticky = tk.N+tk.W, padx=2)
temp=WrappingLabel(newf, text=str(int(pt.ELEMENTDATA["Valence"][i])).title(), font=(font,int(fontMultiplier*10)))
temp.grid(row=9, column=0, sticky = tk.N+tk.W, padx=2)
temp=WrappingLabel(newf, text=str(pt.ELEMENTDATA["Config"][i]), font=(font,int(fontMultiplier*10)))
temp.grid(row=10, column=0, sticky = tk.N+tk.W, padx=2)
iso = str(pt.ELEMENTDATA["Isotopes"][i])
temp=WrappingLabel(newf, text=str(int(float(iso))) if iso.replace('.','',1).isdigit() else "-", font=(font,int(fontMultiplier*10)))
temp.grid(row=11, column=0, sticky = tk.N+tk.W, padx=2)
temp=WrappingLabel(newf, text=" ".join(["[{}]".format(str(pt.ELEMENTDATA["Phase"][i]).title()), ", ".join([str(pt.ELEMENTDATA["MeltingPoint"][i]).title()+"K", str(pt.ELEMENTDATA["BoilingPoint"][i]).title()+"K"])]), font=(font,int(fontMultiplier*10)))
temp.grid(row=12, column=0, sticky = tk.N+tk.W, padx=2)
temp=WrappingLabel(newf, text=str(pt.ELEMENTDATA["Type"][i]).title(), font=(font,int(fontMultiplier*10)))
temp.grid(row=13, column=0, sticky = tk.N+tk.W, padx=2)
temp=WrappingLabel(newf, text="Yes" if pt.ELEMENTDATA["Radioactive"][i] else "No", font=(font,int(fontMultiplier*10)))
temp.grid(row=14, column=0, sticky = tk.N+tk.W, padx=2)
temp=WrappingLabel(newf, text="Yes" if pt.ELEMENTDATA["Natural"][i] else "No", font=(font,int(fontMultiplier*10)))
temp.grid(row=15, column=0, sticky = tk.N+tk.W, padx=2)
temp=WrappingLabel(newf, text=str(pt.ELEMENTDATA["Density"][i]).title(), font=(font,int(fontMultiplier*10)))
temp.grid(row=16, column=0, sticky = tk.N+tk.W, padx=2)
e = str(pt.ELEMENTDATA["Electronegativity"][i]).title()
temp=WrappingLabel(newf, text=e if e.replace('.','',1).isdigit() else "-", font=(font,int(fontMultiplier*10)))
temp.grid(row=17, column=0, sticky = tk.N+tk.W, padx=2)
temp=WrappingLabel(newf, text=str(pt.ELEMENTDATA["FirstIonization"][i]).title(), font=(font,int(fontMultiplier*10)))
temp.grid(row=18, column=0, sticky = tk.N+tk.W, padx=2)
temp=WrappingLabel(newf, text=str(pt.ELEMENTDATA["SpecificHeat"][i]).title(), font=(font,int(fontMultiplier*10)))
temp.grid(row=19, column=0, sticky = tk.N+tk.W, padx=2)
temp=WrappingLabel(newf, text=", ".join([str(pt.ELEMENTDATA["Discoverer"][i]).title(), str(pt.ELEMENTDATA["Year"][i]).title()]), font=(font,int(fontMultiplier*10)))
temp.grid(row=20, column=0, sticky = tk.N+tk.W, padx=2)
newf.grid(row=0, column=r, sticky = tk.N, padx=2)
r+=1
else:
text = WrappingLabel(self.mainFrame, text="Invalid Input. Please enter a valid Symbol, Element Name, Symbol, Atomic Number, or Number of Electrons, Protons or Neutrons.", font=(font,int(fontMultiplier*14)))
text.grid(row=1, column=0, padx=2,pady=2, sticky = tk.W+tk.E, columnspan=5)
# self.resFrame.grid(row=1, column=len(l)+1, rowspan=10, columnspan=10, padx=2)
self.resFrame.grid(row=1, column=0, rowspan=len(l)+1, columnspan=10, padx=2)
def check_recently_opened(self):
file = open(jsonData)
try:
data = json.load(file)
file.close()
if type(data["recentlyOpened"]) == list:
return
except Exception as e:
file.close()
file = open(jsonData, 'w')
json.dump({'fontMultiplier': float(1),'recentlyOpened': [], "default-theme": "dark"}, file)
file.close()
# Theme switching
def change_theme(self):
file = open(jsonData, "r")
data = json.load(file)
file.close()
file = open(jsonData, "w+")
if root.tk.call("ttk::style", "theme", "use") == "sun-valley-dark":
# Set light theme
data['default-theme'] = "light"
else:
# Set dark theme
data['default-theme'] = "dark"
root.tk.call("set_theme", data['default-theme'])
json.dump(data, file)
file.close()
# MacOS Menu Bar Buttons
def setup_menu(self):
menubar = Menu(root)
# file
file=Menu(menubar, tearoff=0)
file.add_command(label="Settings", command=(lambda *args:self.run_func("Settings")))
menubar.add_cascade(label="File", menu=file)
# tools
tools=Menu(menubar, tearoff=0)
for i in sorted(list(functionalities.keys())):
tools.add_command(label=i, command=lambda i=i: self.run_func(i))
menubar.add_cascade(label="Tools", menu=tools)
root.config(menu=menubar)
# Recently Opened
romenu=Menu(menubar, tearoff=0)
file = open(jsonData)
data = json.load(file)
file.close()
data = list(set(data['recentlyOpened']))
for i in data:
if i != "Home":
romenu.add_command(label=i, command=lambda i=i: self.run_func(i))
else:
romenu.add_command(label=i, command=self.handleBackToHS)
menubar.add_cascade(label="Recently Opened", menu=romenu)
# Keybinds :D
## Full Screen Toggle
def fullScreenBindings(self):
root.attributes("-fullscreen", self.fullScreen)
root.bind("<Control-f>", self.toggleFullScreen)
root.bind("<F11>", self.toggleFullScreen)
root.bind("<Escape>", self.quitFullScreen)
root.bind("<Control-,>", (lambda e: self.run_func("Settings")))
## Back to Home
def goHome(self):
root.bind("<Control-h>", self.handleBackToHS)
## Reset Settings
# def resetSettingsSC(self):
# root.bind("<Control-`>", self.resetSettings)
def periodicTableScreen(self, yes):
if yes:
root.bind("<Return>", self.getInputs)
else:
root.bind("<Return>", (lambda e: self.periodicTableScreen(False)))
root.unbind_all('<Return>')
def removeSelectedTreeView(self):
config.currentlySelected = "Home"
if len(self.treeview.selection()) > 0:
self.treeview.selection_remove(self.treeview.selection()[0])
def handleBackToHS(self, event):
self.removeSelectedTreeView()
self.showHomeScreen()
def toggleFullScreen(self, event):
self.fullScreen = not self.fullScreen
root.attributes("-fullscreen", self.fullScreen)
def quitFullScreen(self, event):
self.fullScreen = False
root.attributes("-fullscreen", self.fullScreen)
# Setup Widgets
def setup_widgets(self):
# Panedwindow
self.paned = ttk.PanedWindow(self, orient="horizontal")
self.paned.pack(fill="both", expand=True, anchor="center")
# Selection Pane
self.pane_1 = ttk.Frame(self.paned, padding=5)
self.paned.add(self.pane_1, weight=1)
self.newpane = ttk.PanedWindow(self.pane_1, orient="horizontal")
## Treeview Label
self.treeViewTopLab = WrappingLabel(self.newpane, text="Tools", font=(FONT, int(fontMultiplier*23), 'bold'))
self.treeViewTopLab.pack(side="left",padx=5, anchor="w", fill="y")
# Switching of Themes
self.switch = ttk.Checkbutton(
self.newpane, text="Change Theme", style="Switch.TCheckbutton", command=self.change_theme
)
self.switch.pack(side="right", padx=5, anchor="e", fill="y")
self.newpane.pack(fill="x", anchor="n", pady=10)
# Scrollbar
self.scrollbar = ttk.Scrollbar(self.pane_1)
self.scrollbar.pack(side="right", fill="y")
# Treeview
self.treeview = ttk.Treeview(
self.pane_1,
selectmode="browse",
yscrollcommand=self.scrollbar.set,
style="MainUI.Treeview",
takefocus=False
)
self.treeview.bind("<<TreeviewSelect>>", self.on_tree_select)
self.treeview.pack(expand=True, fill="both")
self.scrollbar.config(command=self.treeview.yview)
## Treeview columns
self.treeview.column("#0", anchor="w", minwidth=100)
# Insert treeview data
for item in treeview_data:
if item[2] in functionalities or item[0] == "" or item[1] in {8, 15, 16, 23, 24, 29, 34, 38, 41}:
self.treeview.insert(
parent=item[0], index="end", iid=item[1], text=item[2]
)
topics.append(item[2])
if item[0] == "" or item[1] in {8, 15, 16, 23, 24, 29, 34, 38, 41}:
self.treeview.item(item[1], open=True) # Open parents
# Select and scroll
self.treeview.see(1)
# Home Screen UI
## Main Home Screen Frame
self.homeScreen = ttk.Frame(self.paned, padding=5)
self.paned.add(self.homeScreen, weight=10)
self.notebook = tk.Canvas(self.homeScreen, highlightthickness=0)
self.notebook.pack(fill="both", expand=True)
## Sizegrip
self.sizegrip = ttk.Sizegrip(self)
## Show Home Screen
self.showHomeScreen()
# Function to clear the screen of any Frames (Leaving root behind)
def clearScreen(self):
# Clear Right Side of the Screen
try:
for i in self.screenlist[::-1]:
try:
i.pack_forget()
i.place_forget()
except: pass
self.screenlist.pop(-1)
except:
pass
finally:
try:
self.thingFrame.pack_forget()
self.mainFrame.pack_forget()
self.scrolly.pack_forget()
self.scrollx.pack_forget()
except: pass
def run_func(self, current):
reload()
file = open(jsonData)
data = json.load(file)
file.close()
config.currentlySelected = current
self.clearScreen()
# First in First out
if (len(data['recentlyOpened']) <= _recentlength):
if config.currentlySelected not in data['recentlyOpened']:
data['recentlyOpened'].insert(0, config.currentlySelected)
else:
data['recentlyOpened'].insert(0, config.currentlySelected)
data['recentlyOpened'].pop(_recentlength-1)
with open(jsonData, 'w') as f:
json.dump(data,f)
self.holdROItemFrame.pack_forget()
for ropenedItem in data:
self.ropenedItemBtn = ttk.Button(self.holdROItemFrame, text=ropenedItem, width=30)
self.ropenedItemBtn.pack(side="top", pady=2)
self.notebook.update()
if config.currentlySelected in functionalities and config.currentlySelected != "Home":
functionalities[config.currentlySelected](self)
else:
if config.currentlySelected != "Home":
infoFrame(self, config.currentlySelected)
self.setup_menu()
root.update()
# Function that will run when the an item in the tree is selected
def on_tree_select(self, event):
try:
self.run_func(self.treeview.item(self.treeview.selection()[0])['text'])
self.periodicTableScreen(self.treeview.item(self.treeview.selection()[0])['text'] == "Periodic Table")
except: pass
# Function to create a screen to our default parameters
def addframe(self,frame="",**args):
if frame == "":
frame=self.notebook
self.screenlist.append(ttk.Frame(frame,**args))
return self.screenlist[-1]
def showHomeScreen(self):
# Config
config.currentlySelected = "Home"
self.clearScreen()
# Top Frame (Hello, xxx)
self.welcomeFrame = self.addframe()
self.welcomeFrame.pack(side="top", padx=25, pady=18, anchor="w")
self.helloUserLab = WrappingLabel(self.welcomeFrame,text="Hello, {}".format(config.username), font=(FONT, int(fontMultiplier*50),'bold'))
self.helloUserLab.pack(pady=2,fill="x")
self.welcomeLab = WrappingLabel(self.welcomeFrame, text="Welcome to Toobox!",font=(FONT, int(fontMultiplier*15)))
self.welcomeLab.pack(side="left", fill="x")
self.welcomeLab2 = WrappingLabel(self.welcomeFrame, text="Select a tool to get started!",font=(FONT, int(fontMultiplier*15)))
# Toobox App Logo and App Description
self.widthOfTooboxInfo = 200
self.tooboxInfoFrame = self.addframe(width=self.widthOfTooboxInfo)
self.tooboxInfoFrame.pack(side="left", padx=25, pady=18, anchor="w")
appIconImg = ImageTk.PhotoImage(Image.open(appIconPng).resize((self.widthOfTooboxInfo-20,self.widthOfTooboxInfo-20), Image.ANTIALIAS))
self.imgPanel = WrappingLabel(self.tooboxInfoFrame, image=appIconImg)
self.imgPanel.image = appIconImg
self.appDescText = WrappingLabel(self.tooboxInfoFrame, font=(fontMultiplier*17), wraplength=self.widthOfTooboxInfo, justify="left" ,text="Toobox is an app is a Toolbox of different tools to help in your Academics. Toobox provides various tools for a wide range of topics and subjects that will definately help you while revising and studying.")
self.appDescText.pack(side="bottom")
self.imgPanel.pack(side="bottom", fill="both", expand="yes", pady=32)
file = open(jsonData)
data = json.load(file)
file.close()
data = list(set(data['recentlyOpened']))
# Recently Opened
self.recentlyOpenedFrame = self.addframe(width=self.widthOfTooboxInfo)
self.recentlyOpenedFrame.pack(side="left", padx=20, pady=18, anchor="w")
self.recentlyOpenedText = WrappingLabel(self.recentlyOpenedFrame, text="Recently Opened ({})".format(str(len(data[:3]))),font=(FONT, int(fontMultiplier*20), "bold"))
self.recentlyOpenedText.pack(side="top", pady=3)
self.screenlist.append(ttk.Frame(self.recentlyOpenedFrame))
self.holdROItemFrame = self.screenlist[-1]
self.holdROItemFrame.pack(side="top")
if len(data) == 0:
self.noROText = WrappingLabel(self.recentlyOpenedFrame, text="You have not opened anything recently.".format(str(len(data[:3]))),font=(FONT, int(fontMultiplier*17)), wraplength=self.widthOfTooboxInfo)
self.noROText.pack(side="top", pady=3, anchor="w")
else:
for i in range(len(data[:3])): # Loop through all the Recently Opened Items
temp=str(data[i])
def test(x=temp):
return self.run_func(str(x))
self.ropenedItemBtn = ttk.Button(self.holdROItemFrame, text=temp, width=30, command=test)
self.ropenedItemBtn.pack(side="top", pady=2)
# Credits Section
self.creditsFrame = self.addframe(width=self.widthOfTooboxInfo)
self.creditsFrame.pack(side="left", padx=20, pady=18, anchor="w")
self.creditsTitle = WrappingLabel(self.creditsFrame, text="Credits",font=(FONT, int(fontMultiplier*20), "bold"), justify="left")
self.creditsTitle.pack(side="top", pady=3, anchor="w")
self.developersHeader = WrappingLabel(self.creditsFrame, text="Developers:",font=(FONT, int(fontMultiplier*17), "bold"), justify="left")
self.developersHeader.pack(side="top", anchor="w")
self.developersList = WrappingLabel(self.creditsFrame, text="<NAME>, <NAME>, <NAME>",font=(FONT, int(fontMultiplier*17)), justify="left")
self.developersList.pack(side="top", pady=3, anchor="w")
self.dtHeader = WrappingLabel(self.creditsFrame, text="Dependencies and Themes:",font=(FONT, int(fontMultiplier*17), "bold"), justify="left")
self.dtHeader.pack(side="top", anchor="w")
self.dtList = WrappingLabel(self.creditsFrame, text="chemlib, tkinter, numpy and the sun-valley Theme for tkinker",font=(FONT, int(fontMultiplier*17)), justify="left")
self.dtList.pack(side="top", pady=3, anchor="w")
# Destroys and 'Quits' the app
def _quit(self):
root.quit()
root.destroy()
# Function to allow for sending of notifications through AppleScript
def notify(self, title, text, sound):
os.system("""
osascript -e 'display notification "{}" with title "{}" sound name "{}"'
""".format(text, title, sound))
# Its a... Secret :)
f=(lambda:exec("\x69\x6d\x70\x6f\x72\x74\x20\x77\x65\x62\x62\x72\x6f\x77\x73\x65\x72\x0a\x77\x65\x62\x62\x72\x6f\x77\x73\x65\x72\x2e\x6f\x70\x65\x6e\x5f\x6e\x65\x77\x28\x22\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x79\x6f\x75\x74\x75\x62\x65\x2e\x63\x6f\x6d\x2f\x77\x61\x74\x63\x68\x3f\x76\x3d\x64\x51\x77\x34\x77\x39\x57\x67\x58\x63\x51\x22\x29"))
if __name__ == "__main__":
root = tk.Tk()
root.title("Toobox")
# Simply set the theme
root.tk.call("source", appThemePath)
file = open(jsonData, "r")
data = json.load(file)
file.close()
root.tk.call("set_theme", data['default-theme'])
# Set App Icon
# root.iconbitmap(appIconIcns)
img = tk.Image("photo", file=appIconPng)
root.tk.call('wm','iconphoto', root._w, img)
app = App(root)
app.pack(fill="both", expand=True)
root.update()
root.minsize(root.winfo_width(), root.winfo_height())
x_cordinate = root.winfo_screenwidth()
y_cordinate = root.winfo_screenheight()
root.geometry("+{}+{}".format(x_cordinate, y_cordinate))
root.state('zoomed')
root.mainloop()
| [
"tkinter.ttk.Button",
"components.wrappedLabel.WrappingLabel",
"tkinter.ttk.Scrollbar",
"tkinter.Canvas",
"tkinter.ttk.Treeview",
"tkinter.ttk.Frame",
"tkinter.Menu",
"tkinter.ttk.Checkbutton",
"tkinter.Image",
"tools.periodicTable.search",
"tkinter.ttk.PanedWindow",
"PIL.Image.open",
"os.pa... | [((567, 584), 'os.chdir', 'os.chdir', (['ROOTDIR'], {}), '(ROOTDIR)\n', (575, 584), False, 'import os\n'), ((597, 632), 'os.path.join', 'os.path.join', (['ROOTDIR', '""".data.json"""'], {}), "(ROOTDIR, '.data.json')\n", (609, 632), False, 'import os\n'), ((647, 700), 'os.path.join', 'os.path.join', (['ROOTDIR', '"""src"""', '"""images"""', '"""AppIcon.ico"""'], {}), "(ROOTDIR, 'src', 'images', 'AppIcon.ico')\n", (659, 700), False, 'import os\n'), ((712, 766), 'os.path.join', 'os.path.join', (['ROOTDIR', '"""src"""', '"""images"""', '"""AppIcon.icns"""'], {}), "(ROOTDIR, 'src', 'images', 'AppIcon.icns')\n", (724, 766), False, 'import os\n'), ((777, 830), 'os.path.join', 'os.path.join', (['ROOTDIR', '"""src"""', '"""images"""', '"""AppIcon.png"""'], {}), "(ROOTDIR, 'src', 'images', 'AppIcon.png')\n", (789, 830), False, 'import os\n'), ((844, 883), 'os.path.join', 'os.path.join', (['ROOTDIR', '"""sun-valley.tcl"""'], {}), "(ROOTDIR, 'sun-valley.tcl')\n", (856, 883), False, 'import os\n'), ((1038, 1053), 'json.load', 'json.load', (['file'], {}), '(file)\n', (1047, 1053), False, 'import json\n'), ((27416, 27423), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (27421, 27423), True, 'import tkinter as tk\n'), ((27560, 27575), 'json.load', 'json.load', (['file'], {}), '(file)\n', (27569, 27575), False, 'import json\n'), ((27711, 27745), 'tkinter.Image', 'tk.Image', (['"""photo"""'], {'file': 'appIconPng'}), "('photo', file=appIconPng)\n", (27719, 27745), True, 'import tkinter as tk\n'), ((538, 564), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (554, 564), False, 'import os\n'), ((4114, 4138), 'tkinter.ttk.Frame.__init__', 'ttk.Frame.__init__', (['self'], {}), '(self)\n', (4132, 4138), False, 'from tkinter import ttk\n'), ((14336, 14351), 'json.load', 'json.load', (['file'], {}), '(file)\n', (14345, 14351), False, 'import json\n'), ((14710, 14731), 'json.dump', 'json.dump', (['data', 'file'], {}), '(data, file)\n', (14719, 14731), False, 'import json\n'), ((14827, 14837), 'tkinter.Menu', 'Menu', (['root'], {}), '(root)\n', (14831, 14837), False, 'from tkinter import Menu\n'), ((14866, 14890), 'tkinter.Menu', 'Menu', (['menubar'], {'tearoff': '(0)'}), '(menubar, tearoff=0)\n', (14870, 14890), False, 'from tkinter import Menu\n'), ((15067, 15091), 'tkinter.Menu', 'Menu', (['menubar'], {'tearoff': '(0)'}), '(menubar, tearoff=0)\n', (15071, 15091), False, 'from tkinter import Menu\n'), ((15355, 15379), 'tkinter.Menu', 'Menu', (['menubar'], {'tearoff': '(0)'}), '(menubar, tearoff=0)\n', (15359, 15379), False, 'from tkinter import Menu\n'), ((15425, 15440), 'json.load', 'json.load', (['file'], {}), '(file)\n', (15434, 15440), False, 'import json\n'), ((17283, 17325), 'tkinter.ttk.PanedWindow', 'ttk.PanedWindow', (['self'], {'orient': '"""horizontal"""'}), "(self, orient='horizontal')\n", (17298, 17325), False, 'from tkinter import ttk\n'), ((17441, 17473), 'tkinter.ttk.Frame', 'ttk.Frame', (['self.paned'], {'padding': '(5)'}), '(self.paned, padding=5)\n', (17450, 17473), False, 'from tkinter import ttk\n'), ((17544, 17593), 'tkinter.ttk.PanedWindow', 'ttk.PanedWindow', (['self.pane_1'], {'orient': '"""horizontal"""'}), "(self.pane_1, orient='horizontal')\n", (17559, 17593), False, 'from tkinter import ttk\n'), ((17866, 17977), 'tkinter.ttk.Checkbutton', 'ttk.Checkbutton', (['self.newpane'], {'text': '"""Change Theme"""', 'style': '"""Switch.TCheckbutton"""', 'command': 'self.change_theme'}), "(self.newpane, text='Change Theme', style=\n 'Switch.TCheckbutton', command=self.change_theme)\n", (17881, 17977), False, 'from tkinter import ttk\n'), ((18168, 18194), 'tkinter.ttk.Scrollbar', 'ttk.Scrollbar', (['self.pane_1'], {}), '(self.pane_1)\n', (18181, 18194), False, 'from tkinter import ttk\n'), ((18291, 18419), 'tkinter.ttk.Treeview', 'ttk.Treeview', (['self.pane_1'], {'selectmode': '"""browse"""', 'yscrollcommand': 'self.scrollbar.set', 'style': '"""MainUI.Treeview"""', 'takefocus': '(False)'}), "(self.pane_1, selectmode='browse', yscrollcommand=self.\n scrollbar.set, style='MainUI.Treeview', takefocus=False)\n", (18303, 18419), False, 'from tkinter import ttk\n'), ((19407, 19439), 'tkinter.ttk.Frame', 'ttk.Frame', (['self.paned'], {'padding': '(5)'}), '(self.paned, padding=5)\n', (19416, 19439), False, 'from tkinter import ttk\n'), ((19515, 19563), 'tkinter.Canvas', 'tk.Canvas', (['self.homeScreen'], {'highlightthickness': '(0)'}), '(self.homeScreen, highlightthickness=0)\n', (19524, 19563), True, 'import tkinter as tk\n'), ((19662, 19680), 'tkinter.ttk.Sizegrip', 'ttk.Sizegrip', (['self'], {}), '(self)\n', (19674, 19680), False, 'from tkinter import ttk\n'), ((20497, 20512), 'json.load', 'json.load', (['file'], {}), '(file)\n', (20506, 20512), False, 'import json\n'), ((23412, 23465), 'components.wrappedLabel.WrappingLabel', 'WrappingLabel', (['self.tooboxInfoFrame'], {'image': 'appIconImg'}), '(self.tooboxInfoFrame, image=appIconImg)\n', (23425, 23465), False, 'from components.wrappedLabel import WrappingLabel\n'), ((23534, 23874), 'components.wrappedLabel.WrappingLabel', 'WrappingLabel', (['self.tooboxInfoFrame'], {'font': '(fontMultiplier * 17)', 'wraplength': 'self.widthOfTooboxInfo', 'justify': '"""left"""', 'text': '"""Toobox is an app is a Toolbox of different tools to help in your Academics. Toobox provides various tools for a wide range of topics and subjects that will definately help you while revising and studying."""'}), "(self.tooboxInfoFrame, font=fontMultiplier * 17, wraplength=\n self.widthOfTooboxInfo, justify='left', text=\n 'Toobox is an app is a Toolbox of different tools to help in your Academics. Toobox provides various tools for a wide range of topics and subjects that will definately help you while revising and studying.'\n )\n", (23547, 23874), False, 'from components.wrappedLabel import WrappingLabel\n'), ((24031, 24046), 'json.load', 'json.load', (['file'], {}), '(file)\n', (24040, 24046), False, 'import json\n'), ((4703, 4715), 'tools.periodicTable.search', 'pt.search', (['e'], {}), '(e)\n', (4712, 4715), True, 'import tools.periodicTable as pt\n'), ((13888, 13903), 'json.load', 'json.load', (['file'], {}), '(file)\n', (13897, 13903), False, 'import json\n'), ((21034, 21052), 'json.dump', 'json.dump', (['data', 'f'], {}), '(data, f)\n', (21043, 21052), False, 'import json\n'), ((21163, 21223), 'tkinter.ttk.Button', 'ttk.Button', (['self.holdROItemFrame'], {'text': 'ropenedItem', 'width': '(30)'}), '(self.holdROItemFrame, text=ropenedItem, width=30)\n', (21173, 21223), False, 'from tkinter import ttk\n'), ((22171, 22195), 'tkinter.ttk.Frame', 'ttk.Frame', (['frame'], {}), '(frame, **args)\n', (22180, 22195), False, 'from tkinter import ttk\n'), ((24566, 24601), 'tkinter.ttk.Frame', 'ttk.Frame', (['self.recentlyOpenedFrame'], {}), '(self.recentlyOpenedFrame)\n', (24575, 24601), False, 'from tkinter import ttk\n'), ((25260, 25327), 'tkinter.ttk.Button', 'ttk.Button', (['self.holdROItemFrame'], {'text': 'temp', 'width': '(30)', 'command': 'test'}), '(self.holdROItemFrame, text=temp, width=30, command=test)\n', (25270, 25327), False, 'from tkinter import ttk\n'), ((23285, 23307), 'PIL.Image.open', 'Image.open', (['appIconPng'], {}), '(appIconPng)\n', (23295, 23307), False, 'from PIL import ImageTk, Image\n')] |
import traceback
from pathlib import Path
import discord
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from discord.ext import commands, fancyhelp
from pytz import utc
from testbot import __version__
MAIN_GUILD_ID = 845688627265536010
STDOUT_CHANNEL_ID = 845691044527210515
class Bot(commands.Bot):
__slots__ = ("ready", "extensions", "scheduler", "error_handler")
def __init__(self) -> None:
self.ready = False
self.extensions = [p.stem for p in Path(".").glob("./testbot/bot/extensions/*.py")]
self.scheduler = AsyncIOScheduler()
self.scheduler.configure(timezone=utc)
self.error_handler = None
super().__init__(
command_prefix="-",
case_insensitive=True,
intents=discord.Intents.all(),
help_command=fancyhelp.EmbeddedHelpCommand(),
activity=discord.Activity(
name=f"-help | Version {__version__}",
type=discord.ActivityType.watching,
),
)
def setup(self) -> None:
print("Running setup...")
for ext in self.extensions:
self.load_extension(f"testbot.bot.extensions.{ext}")
print(f" `{ext}` extension loaded.")
def run(self) -> None:
self.setup()
with open("./secrets/token", mode="r", encoding="utf-8") as f:
token = f.read()
print("Running bot...")
super().run(token, reconnect=True)
async def close(self) -> None:
print("Shutting down...")
self.scheduler.shutdown()
await self.stdout.send(f"Shutting down testbot v{__version__}.")
await super().close()
async def on_connect(self) -> None:
print(f" Bot connected. DWSP latency: {self.latency * 1000:,.0f} ms")
async def on_disconnect(self) -> None:
print(f" Bot disconnected.")
async def on_error(self, err: str, *args, **kwargs):
if err == "on_command_error":
await args[0].send("Something went wrong.")
traceback.print_exc()
async def on_command_error(self, ctx: commands.Context, exc: Exception):
await self.error_handler.command_error(ctx, exc)
async def on_ready(self) -> None:
if self.ready:
return
self.guild = self.get_guild(MAIN_GUILD_ID)
self.stdout = self.guild.get_channel(STDOUT_CHANNEL_ID)
self.scheduler.start()
print(f" Scheduler started ({len(self.scheduler.get_jobs()):,} job(s) scheduled)")
await self.stdout.send(f"testbot v{__version__} is online!")
self.ready = True
print(" Bot ready!")
async def on_message(self, message: discord.Message) -> None:
if message.author.bot or isinstance(message.channel, discord.DMChannel):
return
await self.process_commands(message)
async def process_commands(self, message: discord.Message) -> None:
ctx = await self.get_context(message, cls=commands.Context)
if ctx.command is None:
return
await self.invoke(ctx)
| [
"discord.ext.fancyhelp.EmbeddedHelpCommand",
"pathlib.Path",
"discord.Intents.all",
"apscheduler.schedulers.asyncio.AsyncIOScheduler",
"discord.Activity",
"traceback.print_exc"
] | [((565, 583), 'apscheduler.schedulers.asyncio.AsyncIOScheduler', 'AsyncIOScheduler', ([], {}), '()\n', (581, 583), False, 'from apscheduler.schedulers.asyncio import AsyncIOScheduler\n'), ((2039, 2060), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2058, 2060), False, 'import traceback\n'), ((779, 800), 'discord.Intents.all', 'discord.Intents.all', ([], {}), '()\n', (798, 800), False, 'import discord\n'), ((827, 858), 'discord.ext.fancyhelp.EmbeddedHelpCommand', 'fancyhelp.EmbeddedHelpCommand', ([], {}), '()\n', (856, 858), False, 'from discord.ext import commands, fancyhelp\n'), ((881, 977), 'discord.Activity', 'discord.Activity', ([], {'name': 'f"""-help | Version {__version__}"""', 'type': 'discord.ActivityType.watching'}), "(name=f'-help | Version {__version__}', type=discord.\n ActivityType.watching)\n", (897, 977), False, 'import discord\n'), ((491, 500), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (495, 500), False, 'from pathlib import Path\n')] |
"""Added a flag to commands if it should be run through the banphrases
Revision ID: 8feba263d722
Revises: a6f9b5c3ba83
Create Date: 2016-05-24 22:04:49.803097
"""
# revision identifiers, used by Alembic.
revision = '8feba263d722'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('tb_command', sa.Column('run_through_banphrases', sa.Boolean(), server_default='0', nullable=False))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('tb_command', 'run_through_banphrases')
### end Alembic commands ###
| [
"sqlalchemy.Boolean",
"alembic.op.drop_column"
] | [((664, 718), 'alembic.op.drop_column', 'op.drop_column', (['"""tb_command"""', '"""run_through_banphrases"""'], {}), "('tb_command', 'run_through_banphrases')\n", (678, 718), False, 'from alembic import op\n'), ((493, 505), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (503, 505), True, 'import sqlalchemy as sa\n')] |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""Common functions used by the metadata encoding scripts"""
import json
def load_metadata_dicts(metadata_files):
metadata_dicts = []
for metadata_file in metadata_files:
with open(metadata_file, "r") as fp:
metadata_dicts.append(json.load(fp))
return metadata_dicts
def save_metadata_dicts(metadata_dicts, metadata_files):
assert(len(metadata_dicts) == len(metadata_files))
for i in range(len(metadata_dicts)):
with open(metadata_files[i], "w") as fp:
json.dump(metadata_dicts[i], fp, indent=4)
| [
"json.load",
"json.dump"
] | [((606, 648), 'json.dump', 'json.dump', (['metadata_dicts[i]', 'fp'], {'indent': '(4)'}), '(metadata_dicts[i], fp, indent=4)\n', (615, 648), False, 'import json\n'), ((341, 354), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (350, 354), False, 'import json\n')] |
from MELC.utils.myDatasets import generate_workingRaw_from_raw, MELCStructureDataset
import numpy as np
import tifffile as tiff
from MELC.utils.registration_daria import register
import matplotlib.pyplot as plt
import cv2
from MELC.utils.Files import create_folder
from skimage import img_as_float, img_as_uint
from MELC.utils.f_transformations import filterLowFrequencies, visualize_frequencies
import glob
from os.path import join
from config import *
import argparse
SEPARATOR = '/'
def parse_args():
"""Parse input arguments"""
parser = argparse.ArgumentParser(description='Run Training of Mask R-CNN')
parser.add_argument(
'--path', dest='path', required=True,
help='Config file for training (and optionally testing)')
return parser.parse_args()
class MELCImageProcessing:
def __init__(self, path: str, melc_structure_generated: bool = True):
self._path = path
self._path_registered_fluor = ''
self._path_registered_bleach = ''
self._path_registered_phase = ''
self._path_registered_vis_fluor = ''
self._path_registered_vis_bleach = ''
self._path_registered_vis_phase = ''
self._path_bg_corr = ''
self._path_bg_corr_f = ''
self._path_bg_corr_v_f = ''
self._path_normalized_f = ''
self._path_normalized_v_f = ''
'''
Extract MELC data and calibration data
'''
w_raw = self._path + SEPARATOR + 'w_raw'
if not melc_structure_generated:
generate_workingRaw_from_raw(self._path, w_raw)
melc_dataset = MELCStructureDataset(w_raw)
'''
Sort by creation date
'''
self._melc_fluor = melc_dataset.fluor_pd.sort_values('order_index', ascending=True)
self._melc_phase = melc_dataset.phase_pd.sort_values('order_index', ascending=True)
self._melc_bleach = melc_dataset.bleach_pd.sort_values('order_index', ascending=True)
self._melc_phasebleach = melc_dataset.phasebleach_pd.sort_values('order_index', ascending=True)
self.create_folders()
self._corrected_bf_im = self.generate_bg_correction_img()
self.process_images()
def create_folders(self):
'''
Create folders for registered images
'''
path_processed = join(self._path, 'processed')
path_registered = join(path_processed, 'registered')
self._path_registered_fluor = join(path_registered, 'fluor')
self._path_registered_bleach = join(path_registered, 'bleach')
self._path_registered_phase = join(path_registered, 'phase')
self._path_registered_vis_fluor = join(path_registered, 'vis_fluor')
self._path_registered_vis_bleach = join(path_registered, 'vis_bleach')
self._path_registered_vis_phase = join(path_registered, 'vis_phase')
create_folder(path_processed)
create_folder(path_registered)
create_folder(self._path_registered_fluor)
create_folder(self._path_registered_bleach)
create_folder(self._path_registered_phase)
create_folder(self._path_registered_vis_fluor)
create_folder(self._path_registered_vis_bleach)
create_folder(self._path_registered_vis_phase)
'''
Create folders for background corrected images
'''
self._path_bg_corr = self._path + SEPARATOR + 'processed' + SEPARATOR + 'background_corr' + SEPARATOR
self._path_bg_corr_f = self._path_bg_corr + 'fluor' + SEPARATOR
self._path_bg_corr_v_f = self._path_bg_corr + 'vis_fluor' + SEPARATOR
self._path_bg_corr_p = self._path_bg_corr + 'phase' + SEPARATOR
self._path_bg_corr_v_p = self._path_bg_corr + 'vis_phase' + SEPARATOR
create_folder(self._path_bg_corr)
create_folder(self._path_bg_corr_f)
create_folder(self._path_bg_corr_v_f)
create_folder(self._path_bg_corr_p)
create_folder(self._path_bg_corr_v_p)
'''
Create folders for normalized images
'''
path_normalized = self._path + SEPARATOR + 'processed' + SEPARATOR + 'normalized'
self._path_normalized_f = path_normalized + SEPARATOR + 'fluor' + SEPARATOR
self._path_normalized_v_f = path_normalized + SEPARATOR + 'vis_fluor' + SEPARATOR
self._path_normalized_p = path_normalized + SEPARATOR + 'phase' + SEPARATOR
self._path_normalized_v_p = path_normalized + SEPARATOR + 'vis_phase' + SEPARATOR
create_folder(path_normalized)
create_folder(self._path_normalized_f)
create_folder(self._path_normalized_v_f)
create_folder(self._path_normalized_p)
create_folder(self._path_normalized_v_p)
def generate_bg_correction_img(self):
'''
Create correction image for fluorescence and bleaching images
'''
brightfield_im = []
darkframe_im = []
filter_names = ['XF116-2', 'XF111-2']
calibration_path = self._path + SEPARATOR +'w_raw' + SEPARATOR + 'calibration' + SEPARATOR
brightfield_im.append(np.int16(tiff.imread(glob.glob(calibration_path + '*_cal_b001_5000_XF116-2_000.tif'))))
brightfield_im.append(np.int16(tiff.imread(glob.glob(calibration_path + '*_cal_b001_5000_XF111-2_000.tif'))))
darkframe_im.append(np.int16(tiff.imread(glob.glob(calibration_path + '*_cal_d001_5000_XF116-2_000.tif'))))
darkframe_im.append(np.int16(tiff.imread(glob.glob(calibration_path + '*_cal_d001_5000_XF111-2_000.tif'))))
corrected_brightfield_im = [(brightfield_im[i] - darkframe_im[i]) for i in range(len(filter_names))]
corrected_brightfield_im[0][corrected_brightfield_im[0] <= 0] = 0
corrected_brightfield_im[1][corrected_brightfield_im[1] <= 0] = 0
return corrected_brightfield_im
def process_images(self):
'''
Registration, background correction and normalization of images
'''
'''
Registration
'''
ref_image = tiff.imread(glob.glob(self._path + SEPARATOR + 'w_raw' + SEPARATOR + 'phase' + SEPARATOR + '*_Propidium iodide_200_XF116*.tif'))
for i in range(0, (len(self._melc_fluor)-1)):
pb_idx = np.where(self._melc_phasebleach['order_index'] == self._melc_bleach.iloc[i]['order_index'])[0][0]
phasebleach_image = tiff.imread(self._melc_phasebleach.iloc[pb_idx]['path'])
bleach_image = tiff.imread(self._melc_bleach.iloc[i]['path'])
registered_bleach_image = register(ref_image, phasebleach_image, bleach_image)
filename_bleach = SEPARATOR + str(int(self._melc_bleach.iloc[i]['order_index'])) + '_' + '_'.join(
self._melc_bleach.iloc[i]['fid'].split('_')[:-1]) + '.tif'
tiff.imsave(self._path_registered_bleach + filename_bleach, registered_bleach_image)
save_vis_img(registered_bleach_image, self._path_registered_vis_bleach, filename_bleach)
p_idx = np.where(self._melc_phase['order_index'] == self._melc_fluor.iloc[i+1]['order_index'])[0][0]
phase_image = tiff.imread(self._melc_phase.iloc[p_idx]['path'])
fluorescence_image = tiff.imread(self._melc_fluor.iloc[i+1]['path'])
registered_phase_image = register(ref_image, phase_image, phase_image)
registered_fluor_image = register(ref_image, phase_image, fluorescence_image)
filename_fluor = SEPARATOR + str(int(self._melc_fluor.iloc[i+1]['order_index'])) + '_' + '_'.join(
self._melc_fluor.iloc[i+1]['fid'].split('_')[:-1]) + '.tif'
tiff.imsave(self._path_registered_fluor + filename_fluor, registered_fluor_image)
tiff.imsave(self._path_registered_phase + filename_fluor, registered_fluor_image)
save_vis_img(registered_fluor_image, self._path_registered_vis_fluor, filename_fluor)
save_vis_img(registered_phase_image, self._path_registered_vis_phase, filename_fluor)
'''
Background Correction
'''
bleach = np.int16(registered_bleach_image)
fluor = np.int16(registered_fluor_image)
phase = np.int16(registered_phase_image)
if self._melc_fluor.iloc[i+1]['filter'] == 'XF111-2':
fluor -= self._corrected_bf_im[1]
phase -= self._corrected_bf_im[1]
else:
fluor -= self._corrected_bf_im[0]
phase -= self._corrected_bf_im[0]
if self._melc_bleach.iloc[i]['filter'] == 'XF111-2':
bleach -= self._corrected_bf_im[1]
else:
bleach -= self._corrected_bf_im[0]
phase[phase < 0] = 0
# Substraction of bleaching image
fluor_wo_bg = fluor - bleach
fluor_wo_bg[fluor_wo_bg < 0] = 0
tiff.imsave(self._path_bg_corr_f + filename_fluor, fluor_wo_bg)
save_vis_img(fluor_wo_bg, self._path_bg_corr_v_f, filename_fluor)
tiff.imsave(self._path_bg_corr_p + filename_fluor, phase)
save_vis_img(phase, self._path_bg_corr_v_p, filename_fluor)
'''
Normalization
'''
fluor_wo_bg_normalized = melc_normalization(fluor_wo_bg)
phase_bc_normalized = melc_normalization(phase)
tiff.imsave(self._path_normalized_f + filename_fluor, fluor_wo_bg_normalized)
save_vis_img(fluor_wo_bg_normalized, self._path_normalized_v_f, filename_fluor)
tiff.imsave(self._path_normalized_p + filename_fluor, phase_bc_normalized)
save_vis_img(phase_bc_normalized, self._path_normalized_v_p, filename_fluor)
def save_vis_img(img: np.ndarray, path: str, filename: str):
img_float = img_as_float(img.astype(int))
img_float = img_float - np.percentile(img_float[20:-20, 20:-20], 0.135) # subtract background
if not np.percentile(img_float[20:-20, 20:-20], 100 - 0.135) == 0.0:
img_float /= np.percentile(img_float[20:-20, 20:-20], 100 - 0.135) # normalize to 99.865% of max value
img_float[img_float < 0] = 0
img_float[img_float > 1] = 1 # cut-off high intensities
tiff.imsave(path + filename, img_as_uint(img_float))
def melc_normalization(img: np.ndarray):
sorted_img = np.sort(np.ravel(img))[::-1]
img[img > sorted_img[3]] = sorted_img[3] # cut off high intensities
return img[15:-15, 15:-15]
'''
For visualization and inspection of images
***Using normalization
registered_u8 = cv2.convertScaleAbs(registered_image, alpha=(255.0/65535.0))
kernel = np.ones((2, 2), np.float32)/4
mean_filtered_img = cv2.filter2D(registered_float, -1, kernel)
normalized_img = cv2.normalize(mean_filtered_img, None, 0, 255, cv2.NORM_MINMAX)
***Using FFT - cut 0.00001 percent of highest frequencies
images = []
images.append(registered_float)
visualize_frequencies(images)
pixels = registered_float.size
high_intensity_pixels = 3
percentage_non_artificial = 100-high_intensity_pixels/pixels
filtered_img = filterLowFrequencies(registered_float, percentage_non_artificial)
images.append(filtered_img)
visualize_frequencies(images)
***Plot histogram
hist = cv2.calcHist([registered_image], [0], None, [65535], [0, 65535])
plt.plot(hist)
plt.xticks(np.arange(0, 65535, step=2000))
plt.grid(True)
plt.yscale('log') # plt.xlim([0, 65535])
plt.show()
'''
if __name__ == '__main__':
args = parse_args()
MELCImageProcessing(args.path, melc_structure_generated=False)
# raw_1 = r'G:\FORSCHUNG\LAB4\VISIOMICS\MELC\2019\3rdFinalPanel_18-6056\201912201349_1'
# melc_processed_data = MELCImageProcessing(raw_1, melc_structure_generated=False)
x = 0
| [
"MELC.utils.registration_daria.register",
"MELC.utils.myDatasets.MELCStructureDataset",
"tifffile.imread",
"argparse.ArgumentParser",
"skimage.img_as_uint",
"numpy.where",
"numpy.int16",
"os.path.join",
"MELC.utils.Files.create_folder",
"tifffile.imsave",
"numpy.percentile",
"MELC.utils.myData... | [((550, 615), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run Training of Mask R-CNN"""'}), "(description='Run Training of Mask R-CNN')\n", (573, 615), False, 'import argparse\n'), ((1600, 1627), 'MELC.utils.myDatasets.MELCStructureDataset', 'MELCStructureDataset', (['w_raw'], {}), '(w_raw)\n', (1620, 1627), False, 'from MELC.utils.myDatasets import generate_workingRaw_from_raw, MELCStructureDataset\n'), ((2317, 2346), 'os.path.join', 'join', (['self._path', '"""processed"""'], {}), "(self._path, 'processed')\n", (2321, 2346), False, 'from os.path import join\n'), ((2373, 2407), 'os.path.join', 'join', (['path_processed', '"""registered"""'], {}), "(path_processed, 'registered')\n", (2377, 2407), False, 'from os.path import join\n'), ((2446, 2476), 'os.path.join', 'join', (['path_registered', '"""fluor"""'], {}), "(path_registered, 'fluor')\n", (2450, 2476), False, 'from os.path import join\n'), ((2516, 2547), 'os.path.join', 'join', (['path_registered', '"""bleach"""'], {}), "(path_registered, 'bleach')\n", (2520, 2547), False, 'from os.path import join\n'), ((2586, 2616), 'os.path.join', 'join', (['path_registered', '"""phase"""'], {}), "(path_registered, 'phase')\n", (2590, 2616), False, 'from os.path import join\n'), ((2659, 2693), 'os.path.join', 'join', (['path_registered', '"""vis_fluor"""'], {}), "(path_registered, 'vis_fluor')\n", (2663, 2693), False, 'from os.path import join\n'), ((2737, 2772), 'os.path.join', 'join', (['path_registered', '"""vis_bleach"""'], {}), "(path_registered, 'vis_bleach')\n", (2741, 2772), False, 'from os.path import join\n'), ((2815, 2849), 'os.path.join', 'join', (['path_registered', '"""vis_phase"""'], {}), "(path_registered, 'vis_phase')\n", (2819, 2849), False, 'from os.path import join\n'), ((2859, 2888), 'MELC.utils.Files.create_folder', 'create_folder', (['path_processed'], {}), '(path_processed)\n', (2872, 2888), False, 'from MELC.utils.Files import create_folder\n'), ((2897, 2927), 'MELC.utils.Files.create_folder', 'create_folder', (['path_registered'], {}), '(path_registered)\n', (2910, 2927), False, 'from MELC.utils.Files import create_folder\n'), ((2936, 2978), 'MELC.utils.Files.create_folder', 'create_folder', (['self._path_registered_fluor'], {}), '(self._path_registered_fluor)\n', (2949, 2978), False, 'from MELC.utils.Files import create_folder\n'), ((2987, 3030), 'MELC.utils.Files.create_folder', 'create_folder', (['self._path_registered_bleach'], {}), '(self._path_registered_bleach)\n', (3000, 3030), False, 'from MELC.utils.Files import create_folder\n'), ((3039, 3081), 'MELC.utils.Files.create_folder', 'create_folder', (['self._path_registered_phase'], {}), '(self._path_registered_phase)\n', (3052, 3081), False, 'from MELC.utils.Files import create_folder\n'), ((3090, 3136), 'MELC.utils.Files.create_folder', 'create_folder', (['self._path_registered_vis_fluor'], {}), '(self._path_registered_vis_fluor)\n', (3103, 3136), False, 'from MELC.utils.Files import create_folder\n'), ((3145, 3192), 'MELC.utils.Files.create_folder', 'create_folder', (['self._path_registered_vis_bleach'], {}), '(self._path_registered_vis_bleach)\n', (3158, 3192), False, 'from MELC.utils.Files import create_folder\n'), ((3201, 3247), 'MELC.utils.Files.create_folder', 'create_folder', (['self._path_registered_vis_phase'], {}), '(self._path_registered_vis_phase)\n', (3214, 3247), False, 'from MELC.utils.Files import create_folder\n'), ((3748, 3781), 'MELC.utils.Files.create_folder', 'create_folder', (['self._path_bg_corr'], {}), '(self._path_bg_corr)\n', (3761, 3781), False, 'from MELC.utils.Files import create_folder\n'), ((3790, 3825), 'MELC.utils.Files.create_folder', 'create_folder', (['self._path_bg_corr_f'], {}), '(self._path_bg_corr_f)\n', (3803, 3825), False, 'from MELC.utils.Files import create_folder\n'), ((3834, 3871), 'MELC.utils.Files.create_folder', 'create_folder', (['self._path_bg_corr_v_f'], {}), '(self._path_bg_corr_v_f)\n', (3847, 3871), False, 'from MELC.utils.Files import create_folder\n'), ((3880, 3915), 'MELC.utils.Files.create_folder', 'create_folder', (['self._path_bg_corr_p'], {}), '(self._path_bg_corr_p)\n', (3893, 3915), False, 'from MELC.utils.Files import create_folder\n'), ((3924, 3961), 'MELC.utils.Files.create_folder', 'create_folder', (['self._path_bg_corr_v_p'], {}), '(self._path_bg_corr_v_p)\n', (3937, 3961), False, 'from MELC.utils.Files import create_folder\n'), ((4479, 4509), 'MELC.utils.Files.create_folder', 'create_folder', (['path_normalized'], {}), '(path_normalized)\n', (4492, 4509), False, 'from MELC.utils.Files import create_folder\n'), ((4518, 4556), 'MELC.utils.Files.create_folder', 'create_folder', (['self._path_normalized_f'], {}), '(self._path_normalized_f)\n', (4531, 4556), False, 'from MELC.utils.Files import create_folder\n'), ((4565, 4605), 'MELC.utils.Files.create_folder', 'create_folder', (['self._path_normalized_v_f'], {}), '(self._path_normalized_v_f)\n', (4578, 4605), False, 'from MELC.utils.Files import create_folder\n'), ((4614, 4652), 'MELC.utils.Files.create_folder', 'create_folder', (['self._path_normalized_p'], {}), '(self._path_normalized_p)\n', (4627, 4652), False, 'from MELC.utils.Files import create_folder\n'), ((4661, 4701), 'MELC.utils.Files.create_folder', 'create_folder', (['self._path_normalized_v_p'], {}), '(self._path_normalized_v_p)\n', (4674, 4701), False, 'from MELC.utils.Files import create_folder\n'), ((9815, 9862), 'numpy.percentile', 'np.percentile', (['img_float[20:-20, 20:-20]', '(0.135)'], {}), '(img_float[20:-20, 20:-20], 0.135)\n', (9828, 9862), True, 'import numpy as np\n'), ((9987, 10040), 'numpy.percentile', 'np.percentile', (['img_float[20:-20, 20:-20]', '(100 - 0.135)'], {}), '(img_float[20:-20, 20:-20], 100 - 0.135)\n', (10000, 10040), True, 'import numpy as np\n'), ((10215, 10237), 'skimage.img_as_uint', 'img_as_uint', (['img_float'], {}), '(img_float)\n', (10226, 10237), False, 'from skimage import img_as_float, img_as_uint\n'), ((1528, 1575), 'MELC.utils.myDatasets.generate_workingRaw_from_raw', 'generate_workingRaw_from_raw', (['self._path', 'w_raw'], {}), '(self._path, w_raw)\n', (1556, 1575), False, 'from MELC.utils.myDatasets import generate_workingRaw_from_raw, MELCStructureDataset\n'), ((6013, 6132), 'glob.glob', 'glob.glob', (["(self._path + SEPARATOR + 'w_raw' + SEPARATOR + 'phase' + SEPARATOR +\n '*_Propidium iodide_200_XF116*.tif')"], {}), "(self._path + SEPARATOR + 'w_raw' + SEPARATOR + 'phase' +\n SEPARATOR + '*_Propidium iodide_200_XF116*.tif')\n", (6022, 6132), False, 'import glob\n'), ((6336, 6392), 'tifffile.imread', 'tiff.imread', (["self._melc_phasebleach.iloc[pb_idx]['path']"], {}), "(self._melc_phasebleach.iloc[pb_idx]['path'])\n", (6347, 6392), True, 'import tifffile as tiff\n'), ((6420, 6466), 'tifffile.imread', 'tiff.imread', (["self._melc_bleach.iloc[i]['path']"], {}), "(self._melc_bleach.iloc[i]['path'])\n", (6431, 6466), True, 'import tifffile as tiff\n'), ((6505, 6557), 'MELC.utils.registration_daria.register', 'register', (['ref_image', 'phasebleach_image', 'bleach_image'], {}), '(ref_image, phasebleach_image, bleach_image)\n', (6513, 6557), False, 'from MELC.utils.registration_daria import register\n'), ((6756, 6844), 'tifffile.imsave', 'tiff.imsave', (['(self._path_registered_bleach + filename_bleach)', 'registered_bleach_image'], {}), '(self._path_registered_bleach + filename_bleach,\n registered_bleach_image)\n', (6767, 6844), True, 'import tifffile as tiff\n'), ((7083, 7132), 'tifffile.imread', 'tiff.imread', (["self._melc_phase.iloc[p_idx]['path']"], {}), "(self._melc_phase.iloc[p_idx]['path'])\n", (7094, 7132), True, 'import tifffile as tiff\n'), ((7166, 7215), 'tifffile.imread', 'tiff.imread', (["self._melc_fluor.iloc[i + 1]['path']"], {}), "(self._melc_fluor.iloc[i + 1]['path'])\n", (7177, 7215), True, 'import tifffile as tiff\n'), ((7251, 7296), 'MELC.utils.registration_daria.register', 'register', (['ref_image', 'phase_image', 'phase_image'], {}), '(ref_image, phase_image, phase_image)\n', (7259, 7296), False, 'from MELC.utils.registration_daria import register\n'), ((7334, 7386), 'MELC.utils.registration_daria.register', 'register', (['ref_image', 'phase_image', 'fluorescence_image'], {}), '(ref_image, phase_image, fluorescence_image)\n', (7342, 7386), False, 'from MELC.utils.registration_daria import register\n'), ((7586, 7671), 'tifffile.imsave', 'tiff.imsave', (['(self._path_registered_fluor + filename_fluor)', 'registered_fluor_image'], {}), '(self._path_registered_fluor + filename_fluor,\n registered_fluor_image)\n', (7597, 7671), True, 'import tifffile as tiff\n'), ((7680, 7765), 'tifffile.imsave', 'tiff.imsave', (['(self._path_registered_phase + filename_fluor)', 'registered_fluor_image'], {}), '(self._path_registered_phase + filename_fluor,\n registered_fluor_image)\n', (7691, 7765), True, 'import tifffile as tiff\n'), ((8047, 8080), 'numpy.int16', 'np.int16', (['registered_bleach_image'], {}), '(registered_bleach_image)\n', (8055, 8080), True, 'import numpy as np\n'), ((8101, 8133), 'numpy.int16', 'np.int16', (['registered_fluor_image'], {}), '(registered_fluor_image)\n', (8109, 8133), True, 'import numpy as np\n'), ((8154, 8186), 'numpy.int16', 'np.int16', (['registered_phase_image'], {}), '(registered_phase_image)\n', (8162, 8186), True, 'import numpy as np\n'), ((8838, 8901), 'tifffile.imsave', 'tiff.imsave', (['(self._path_bg_corr_f + filename_fluor)', 'fluor_wo_bg'], {}), '(self._path_bg_corr_f + filename_fluor, fluor_wo_bg)\n', (8849, 8901), True, 'import tifffile as tiff\n'), ((8993, 9050), 'tifffile.imsave', 'tiff.imsave', (['(self._path_bg_corr_p + filename_fluor)', 'phase'], {}), '(self._path_bg_corr_p + filename_fluor, phase)\n', (9004, 9050), True, 'import tifffile as tiff\n'), ((9324, 9401), 'tifffile.imsave', 'tiff.imsave', (['(self._path_normalized_f + filename_fluor)', 'fluor_wo_bg_normalized'], {}), '(self._path_normalized_f + filename_fluor, fluor_wo_bg_normalized)\n', (9335, 9401), True, 'import tifffile as tiff\n'), ((9506, 9580), 'tifffile.imsave', 'tiff.imsave', (['(self._path_normalized_p + filename_fluor)', 'phase_bc_normalized'], {}), '(self._path_normalized_p + filename_fluor, phase_bc_normalized)\n', (9517, 9580), True, 'import tifffile as tiff\n'), ((9900, 9953), 'numpy.percentile', 'np.percentile', (['img_float[20:-20, 20:-20]', '(100 - 0.135)'], {}), '(img_float[20:-20, 20:-20], 100 - 0.135)\n', (9913, 9953), True, 'import numpy as np\n'), ((10312, 10325), 'numpy.ravel', 'np.ravel', (['img'], {}), '(img)\n', (10320, 10325), True, 'import numpy as np\n'), ((5091, 5154), 'glob.glob', 'glob.glob', (["(calibration_path + '*_cal_b001_5000_XF116-2_000.tif')"], {}), "(calibration_path + '*_cal_b001_5000_XF116-2_000.tif')\n", (5100, 5154), False, 'import glob\n'), ((5209, 5272), 'glob.glob', 'glob.glob', (["(calibration_path + '*_cal_b001_5000_XF111-2_000.tif')"], {}), "(calibration_path + '*_cal_b001_5000_XF111-2_000.tif')\n", (5218, 5272), False, 'import glob\n'), ((5325, 5388), 'glob.glob', 'glob.glob', (["(calibration_path + '*_cal_d001_5000_XF116-2_000.tif')"], {}), "(calibration_path + '*_cal_d001_5000_XF116-2_000.tif')\n", (5334, 5388), False, 'import glob\n'), ((5441, 5504), 'glob.glob', 'glob.glob', (["(calibration_path + '*_cal_d001_5000_XF111-2_000.tif')"], {}), "(calibration_path + '*_cal_d001_5000_XF111-2_000.tif')\n", (5450, 5504), False, 'import glob\n'), ((6206, 6302), 'numpy.where', 'np.where', (["(self._melc_phasebleach['order_index'] == self._melc_bleach.iloc[i][\n 'order_index'])"], {}), "(self._melc_phasebleach['order_index'] == self._melc_bleach.iloc[i]\n ['order_index'])\n", (6214, 6302), True, 'import numpy as np\n'), ((6964, 7057), 'numpy.where', 'np.where', (["(self._melc_phase['order_index'] == self._melc_fluor.iloc[i + 1]['order_index']\n )"], {}), "(self._melc_phase['order_index'] == self._melc_fluor.iloc[i + 1][\n 'order_index'])\n", (6972, 7057), True, 'import numpy as np\n')] |
from itertools import chain
from random import choice
from lib.durak.exceptions import IllegalAction
class AI:
class CannotPerform(IllegalAction):
pass
def __init__(self, *, game):
self._game = game
def perform_action(self, *, player):
"""
Have the user perform a random action.
Do nothing if yielded.
"""
_player = self._player(player)
if self._player(_player) in self._game._yielded.get():
raise self.CannotPerform("Already yielded")
action_type, selected_action = choice(self._potential_actions(player=_player))
selected_action(player=_player)
return action_type
def _potential_actions(self, *, player):
# TODO: report action on event?
defending = player == self._game.defender
not_defending = not defending
return list(
chain(
[("attacked", self._attack)] * 3 * not_defending,
# self._pass_card,
[("defended", self._defend)] * 9 * defending,
[("gave_up", self._give_up)] * 1 * defending,
[("yielded_attack", self._yield_attack)] * 7 * not_defending,
)
)
def _attack(self, *, player):
"""
Throw a random, legal attack card
"""
try:
potential_cards = list(
set(self._game._legal_attacks._cards)
& set(self._player(player).cards())
)
card = choice(potential_cards)
self._game.legally_attack(player=player, cards=[card])
except (IllegalAction, IndexError):
raise self.CannotPerform
def _yield_attack(self, *, player):
try:
self._game.yield_attack(player=player)
except IllegalAction:
raise self.CannotPerform
def _defend(self, *, player):
"""
Defend randomly
"""
try:
base_card, potential_cards = choice(
list(self._game.legal_defenses._legal_defenses.items())
)
if not potential_cards:
self._game.give_up(player=player)
return
card = choice(list(potential_cards))
self._game.legally_defend(player=player, base_card=base_card, card=card)
except (IllegalAction, IndexError):
raise self.CannotPerform
def _give_up(self, *, player):
try:
self._game.give_up(player=player)
except IllegalAction:
raise self.CannotPerform
def serialize(self):
return [player.serialize() for player in self.ordered()]
def _player(self, player_or_id):
return self._game.player(player_or_id)
| [
"itertools.chain",
"random.choice"
] | [((894, 1112), 'itertools.chain', 'chain', (["([('attacked', self._attack)] * 3 * not_defending)", "([('defended', self._defend)] * 9 * defending)", "([('gave_up', self._give_up)] * 1 * defending)", "([('yielded_attack', self._yield_attack)] * 7 * not_defending)"], {}), "([('attacked', self._attack)] * 3 * not_defending, [('defended', self.\n _defend)] * 9 * defending, [('gave_up', self._give_up)] * 1 * defending,\n [('yielded_attack', self._yield_attack)] * 7 * not_defending)\n", (899, 1112), False, 'from itertools import chain\n'), ((1517, 1540), 'random.choice', 'choice', (['potential_cards'], {}), '(potential_cards)\n', (1523, 1540), False, 'from random import choice\n')] |
import numpy as np
from utils import C_bohr
__all__ = ['Grid']
class Grid:
def __init__(self, npoints, rgrid, solver='sinc', alpha=0.0, rbar=0.0):
self.ngrid = npoints
self.rmin = rgrid[0] / C_bohr
self.rmax = rgrid[1] / C_bohr
rbar = rbar / C_bohr
self.solver = solver.lower()
self.Gy = np.ones(self.ngrid)
self.Fy = np.zeros(self.ngrid)
if self.solver == 'sinc':
self.rgrid, self.rstep = self.generate_sinc_uniform_grid()
else:
self.rgrid, self.rstep = self.generate_fourier_uniform_grid()
if alpha > 0.0:
# mapping is allowed with sinc method only
self.solver = 'sinc'
self.rmin = self.get_grid_bounding_values(self.rmin, rbar, alpha)
self.rmax = self.get_grid_bounding_values(self.rmax, rbar, alpha)
self.rgrid, ygrid = self.generate_nonuniform_grid(alpha, rbar)
gy_power1 = np.power(1.0+ygrid, (1.0/alpha)-1.0)
gy_power2 = np.power(1.0-ygrid, (1.0/alpha)+1.0)
self.Gy = (2.0*rbar/alpha) * gy_power1 / gy_power2
fy_power = (np.power((1.0 - np.power(ygrid, 2)), 2))
self.Fy = (1.0 - (1.0/(alpha**2))) / fy_power
def get_grid_points(self):
return self.rgrid * C_bohr
def get_grid_bounding_values(self, rlimit, rbar, alpha):
return ((rlimit/rbar)**alpha - 1.0) / ((rlimit/rbar)**alpha + 1.0)
def generate_fourier_uniform_grid(self):
return np.linspace(
self.rmin, self.rmax, num=self.ngrid, endpoint=False, retstep=True
)
def generate_sinc_uniform_grid(self):
return np.linspace(
self.rmin, self.rmax, num=self.ngrid, endpoint=True, retstep=True
)
def calculate_sinc_basis_functions(self, r):
# numpy sinc function is defined as sin(pi*x)/(pi*x) where pi is
# used for normalization. Thus I do not need to multiply by pi
# for j in range(0, self.nch*self.ngrid):
for j in range(0, self.ngrid):
arg = (r - self.rgrid[j]) / self.rstep
# return one column from a matrix
return np.sinc(arg)
def generate_nonuniform_grid(self, alpha, rbar):
ystep = (self.rmax - self.rmin) / (self.ngrid - 1) # / ngrid - 1 ??
# ygrid = np.ogrid[self.rmin+ystep:self.rmax+ystep:ystep]
# ygrid = np.ogrid[self.rmin:self.rmax:ystep]
# ygrid = np.linspace(self.rmin, self.rmax, num=self.ngrid)
# ygrid = np.arange(self.rmin, self.rmax, step=ystep)
# ygrid = np.linspace(
# self.rmin, self.rmax, num=self.ngrid, endpoint=True
# )
ygrid = np.empty(self.ngrid)
for j in range(1, self.ngrid+1):
ygrid[j-1] = self.rmin + ystep*(j-1.0)
Ry = rbar * np.power((1.0+ygrid) / (1.0-ygrid), 1.0/alpha)
print(ygrid)
print(len(ygrid))
return Ry, ygrid
| [
"numpy.ones",
"numpy.power",
"numpy.sinc",
"numpy.zeros",
"numpy.linspace",
"numpy.empty"
] | [((346, 365), 'numpy.ones', 'np.ones', (['self.ngrid'], {}), '(self.ngrid)\n', (353, 365), True, 'import numpy as np\n'), ((384, 404), 'numpy.zeros', 'np.zeros', (['self.ngrid'], {}), '(self.ngrid)\n', (392, 404), True, 'import numpy as np\n'), ((1523, 1602), 'numpy.linspace', 'np.linspace', (['self.rmin', 'self.rmax'], {'num': 'self.ngrid', 'endpoint': '(False)', 'retstep': '(True)'}), '(self.rmin, self.rmax, num=self.ngrid, endpoint=False, retstep=True)\n', (1534, 1602), True, 'import numpy as np\n'), ((1684, 1762), 'numpy.linspace', 'np.linspace', (['self.rmin', 'self.rmax'], {'num': 'self.ngrid', 'endpoint': '(True)', 'retstep': '(True)'}), '(self.rmin, self.rmax, num=self.ngrid, endpoint=True, retstep=True)\n', (1695, 1762), True, 'import numpy as np\n'), ((2181, 2193), 'numpy.sinc', 'np.sinc', (['arg'], {}), '(arg)\n', (2188, 2193), True, 'import numpy as np\n'), ((2698, 2718), 'numpy.empty', 'np.empty', (['self.ngrid'], {}), '(self.ngrid)\n', (2706, 2718), True, 'import numpy as np\n'), ((970, 1010), 'numpy.power', 'np.power', (['(1.0 + ygrid)', '(1.0 / alpha - 1.0)'], {}), '(1.0 + ygrid, 1.0 / alpha - 1.0)\n', (978, 1010), True, 'import numpy as np\n'), ((1031, 1071), 'numpy.power', 'np.power', (['(1.0 - ygrid)', '(1.0 / alpha + 1.0)'], {}), '(1.0 - ygrid, 1.0 / alpha + 1.0)\n', (1039, 1071), True, 'import numpy as np\n'), ((2833, 2885), 'numpy.power', 'np.power', (['((1.0 + ygrid) / (1.0 - ygrid))', '(1.0 / alpha)'], {}), '((1.0 + ygrid) / (1.0 - ygrid), 1.0 / alpha)\n', (2841, 2885), True, 'import numpy as np\n'), ((1172, 1190), 'numpy.power', 'np.power', (['ygrid', '(2)'], {}), '(ygrid, 2)\n', (1180, 1190), True, 'import numpy as np\n')] |
import random
class Codec:
base62 = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
baseurl = 'http://tinyurl.com/'
tinyTolong = {'1':'1'}
longTotiny = {}
def encode(self, longUrl):
"""Encodes a URL to a shortened URL.
:type longUrl: str
:rtype: str
"""
if longUrl in Codec.longTotiny:
return Codec.longTotiny[longUrl]
shortUrl = '1'
while shortUrl in Codec.tinyTolong:
shortUrl = ''.join(random.choice(Codec.base62) for i in range(6))
shortUrl = Codec.baseurl + shortUrl
Codec.tinyTolong[shortUrl] = longUrl
Codec.longTotiny[longUrl] = shortUrl
return shortUrl
def decode(self, shortUrl):
"""Decodes a shortened URL to its original URL.
:type shortUrl: str
:rtype: str
"""
longUrl = Codec.tinyTolong[shortUrl]
return longUrl | [
"random.choice"
] | [((516, 543), 'random.choice', 'random.choice', (['Codec.base62'], {}), '(Codec.base62)\n', (529, 543), False, 'import random\n')] |
import requests
from bs4 import BeautifulSoup
import os
import shutil
import threading
import uuid
import time
import sys
import helper
import threadpool
import mzitu_frame
def __task(url):
#print(url)
while not mzitu_frame.DownloadImage(url):pass
def DownloadImagePage(url):
bs = helper.GetBs(url)
#print(bs)
if not bs:return False
try:
all_li = bs.select('ul[id="pins"]>li')
pool = threadpool.ThreadPool(8)
for li in all_li:
if li.get('class') is None: #屏蔽广告
href = li.a.get('href')
title = li.a.img.get('alt')
#print(href,title)
req = threadpool.WorkRequest(__task,[href])
pool.putRequest(req)
pool.wait()
print(url,"整页下载完成")
return True
except Exception as ex:
print(ex)
return False
if __name__ == "__main__":
url = r''
if not url:
argv = sys.argv
if len(argv) == 1:
print("请输入page url")
sys.exit(-1)
url = argv[1]
#print(url)
print(DownloadImagePage(url))
print("download over") | [
"helper.GetBs",
"sys.exit",
"mzitu_frame.DownloadImage",
"threadpool.ThreadPool",
"threadpool.WorkRequest"
] | [((297, 314), 'helper.GetBs', 'helper.GetBs', (['url'], {}), '(url)\n', (309, 314), False, 'import helper\n'), ((223, 253), 'mzitu_frame.DownloadImage', 'mzitu_frame.DownloadImage', (['url'], {}), '(url)\n', (248, 253), False, 'import mzitu_frame\n'), ((428, 452), 'threadpool.ThreadPool', 'threadpool.ThreadPool', (['(8)'], {}), '(8)\n', (449, 452), False, 'import threadpool\n'), ((1040, 1052), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (1048, 1052), False, 'import sys\n'), ((666, 704), 'threadpool.WorkRequest', 'threadpool.WorkRequest', (['__task', '[href]'], {}), '(__task, [href])\n', (688, 704), False, 'import threadpool\n')] |
from inspect import isawaitable
from sanic import Sanic
from sanic.response import redirect, json, text
from sanic.exceptions import SanicException
from sanic_plugin_toolkit import SanicPluginRealm
from sanic_oauthlib.client import oauthclient
def create_oauth(app):
realm = SanicPluginRealm(app)
try:
oauth = realm.register_plugin(oauthclient)
except ValueError as v:
_, oauth = v
return oauth
def create_remote(app, oauth=None):
if not oauth:
oauth = create_oauth(app)
remote = oauth.remote_app(
'dev',
consumer_key='dev',
consumer_secret='devsecret',
request_token_params={'realm': 'email'},
base_url='http://127.0.0.1:5001/api/',
request_token_url='http://127.0.0.1:5001/oauth/request_token',
access_token_method='GET',
access_token_url='http://127.0.0.1:5001/oauth/access_token',
authorize_url='http://127.0.0.1:5001/oauth/authorize'
)
return remote
def create_client(app, oauth=None, remote=None):
if not oauth:
oauth = create_oauth(app)
if not remote:
remote = create_remote(app, oauth)
session = {}
#TODO: make a better client session for test
@app.middleware
async def add_dummy_session(request):
context = oauth.context
shared_context = oauth.context.shared
shared_request_context = shared_context.request[id(request)]
shared_request_context['session'] = session
@app.route('/')
async def index(request):
if 'dev_oauth' in session:
ret = await remote.get('email')
if isinstance(ret.data, dict):
return json(ret.data)
return str(ret.data)
return redirect(app.url_for('login'))
@app.route('/login')
@remote.autoauthorize
async def login(request, context):
return {'callback': app.url_for('authorized', _external=True, _scheme='http')}
@app.route('/logout')
def logout(request):
session.pop('dev_oauth', None)
return redirect(app.url_for('index'))
@app.route('/authorized')
@remote.authorized_handler
async def authorized(request, data, context):
if data is None:
return 'Access denied: error=%s' % (
request.args['error']
)
resp = {k: v[0] for k, v in data.items()}
if 'oauth_token' in resp:
session['dev_oauth'] = resp
return json(resp)
return text(str(resp))
@app.route('/address')
async def address(request):
ret = await remote.get('address/hangzhou')
if ret.status not in (200, 201):
raise SanicException(ret.data, status_code=ret.status)
return text(ret.raw_data)
@app.route('/method/<name>')
async def method(request, name):
func = getattr(remote, name)
ret = func('method')
if isawaitable(ret):
ret = await ret
return text(ret.raw_data)
@remote.tokengetter
async def get_oauth_token():
if 'dev_oauth' in session:
resp = session['dev_oauth']
return resp['oauth_token'], resp['oauth_token_secret']
return remote
if __name__ == '__main__':
app = Sanic("test_main")
create_client(app)
app.run(host='localhost', port=8000, debug=True, auto_reload=False)
| [
"sanic.response.json",
"inspect.isawaitable",
"sanic.Sanic",
"sanic.exceptions.SanicException",
"sanic_plugin_toolkit.SanicPluginRealm",
"sanic.response.text"
] | [((281, 302), 'sanic_plugin_toolkit.SanicPluginRealm', 'SanicPluginRealm', (['app'], {}), '(app)\n', (297, 302), False, 'from sanic_plugin_toolkit import SanicPluginRealm\n'), ((3251, 3269), 'sanic.Sanic', 'Sanic', (['"""test_main"""'], {}), "('test_main')\n", (3256, 3269), False, 'from sanic import Sanic\n'), ((2746, 2764), 'sanic.response.text', 'text', (['ret.raw_data'], {}), '(ret.raw_data)\n', (2750, 2764), False, 'from sanic.response import redirect, json, text\n'), ((2913, 2929), 'inspect.isawaitable', 'isawaitable', (['ret'], {}), '(ret)\n', (2924, 2929), False, 'from inspect import isawaitable\n'), ((2974, 2992), 'sanic.response.text', 'text', (['ret.raw_data'], {}), '(ret.raw_data)\n', (2978, 2992), False, 'from sanic.response import redirect, json, text\n'), ((2470, 2480), 'sanic.response.json', 'json', (['resp'], {}), '(resp)\n', (2474, 2480), False, 'from sanic.response import redirect, json, text\n'), ((2682, 2730), 'sanic.exceptions.SanicException', 'SanicException', (['ret.data'], {'status_code': 'ret.status'}), '(ret.data, status_code=ret.status)\n', (2696, 2730), False, 'from sanic.exceptions import SanicException\n'), ((1680, 1694), 'sanic.response.json', 'json', (['ret.data'], {}), '(ret.data)\n', (1684, 1694), False, 'from sanic.response import redirect, json, text\n')] |
import os
import glob
from tqdm import tqdm
import argparse
from PIL import Image
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.utils.data as data
from torchvision import transforms, datasets
from networks.dan import DAN
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--aff_path', type=str, default='datasets/AfectNet/', help='AfectNet dataset path.')
parser.add_argument('--batch_size', type=int, default=256, help='Batch size.')
parser.add_argument('--lr', type=float, default=0.0001, help='Initial learning rate for adam.')
parser.add_argument('--workers', default=8, type=int, help='Number of data loading workers.')
parser.add_argument('--epochs', type=int, default=40, help='Total training epochs.')
parser.add_argument('--num_head', type=int, default=4, help='Number of attention head.')
parser.add_argument('--num_class', type=int, default=8, help='Number of class.')
return parser.parse_args()
class AffectNet(data.Dataset):
def __init__(self, aff_path, phase, use_cache = True, transform = None):
self.phase = phase
self.transform = transform
self.aff_path = aff_path
if use_cache:
cache_path = os.path.join(aff_path,'affectnet.csv')
if os.path.exists(cache_path):
df = pd.read_csv(cache_path)
else:
df = self.get_df()
df.to_csv(cache_path)
else:
df = self.get_df()
self.data = df[df['phase'] == phase]
self.file_paths = self.data.loc[:, 'img_path'].values
self.label = self.data.loc[:, 'label'].values
_, self.sample_counts = np.unique(self.label, return_counts=True)
# print(f' distribution of {phase} samples: {self.sample_counts}')
def get_df(self):
train_path = os.path.join(self.aff_path,'train_set/')
val_path = os.path.join(self.aff_path,'val_set/')
data = []
for anno in glob.glob(train_path + 'annotations/*_exp.npy'):
idx = os.path.basename(anno).split('_')[0]
img_path = os.path.join(train_path,f'images/{idx}.jpg')
label = int(np.load(anno))
data.append(['train',img_path,label])
for anno in glob.glob(val_path + 'annotations/*_exp.npy'):
idx = os.path.basename(anno).split('_')[0]
img_path = os.path.join(val_path,f'images/{idx}.jpg')
label = int(np.load(anno))
data.append(['val',img_path,label])
return pd.DataFrame(data = data,columns = ['phase','img_path','label'])
def __len__(self):
return len(self.file_paths)
def __getitem__(self, idx):
path = self.file_paths[idx]
image = Image.open(path).convert('RGB')
label = self.label[idx]
if self.transform is not None:
image = self.transform(image)
return image, label
class AffinityLoss(nn.Module):
def __init__(self, device, num_class=8, feat_dim=512):
super(AffinityLoss, self).__init__()
self.num_class = num_class
self.feat_dim = feat_dim
self.gap = nn.AdaptiveAvgPool2d(1)
self.device = device
self.centers = nn.Parameter(torch.randn(self.num_class, self.feat_dim).to(device))
def forward(self, x, labels):
x = self.gap(x).view(x.size(0), -1)
batch_size = x.size(0)
distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_class) + \
torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_class, batch_size).t()
distmat.addmm_(x, self.centers.t(), beta=1, alpha=-2)
classes = torch.arange(self.num_class).long().to(self.device)
labels = labels.unsqueeze(1).expand(batch_size, self.num_class)
mask = labels.eq(classes.expand(batch_size, self.num_class))
dist = distmat * mask.float()
dist = dist / self.centers.var(dim=0).sum()
loss = dist.clamp(min=1e-12, max=1e+12).sum() / batch_size
return loss
class PartitionLoss(nn.Module):
def __init__(self, ):
super(PartitionLoss, self).__init__()
def forward(self, x):
num_head = x.size(1)
if num_head > 1:
var = x.var(dim=1).mean()
loss = torch.log(1+num_head/var)
else:
loss = 0
return loss
class ImbalancedDatasetSampler(data.sampler.Sampler):
def __init__(self, dataset, indices: list = None, num_samples: int = None):
self.indices = list(range(len(dataset))) if indices is None else indices
self.num_samples = len(self.indices) if num_samples is None else num_samples
df = pd.DataFrame()
df["label"] = self._get_labels(dataset)
df.index = self.indices
df = df.sort_index()
label_to_count = df["label"].value_counts()
weights = 1.0 / label_to_count[df["label"]]
self.weights = torch.DoubleTensor(weights.to_list())
# self.weights = self.weights.clamp(min=1e-5)
def _get_labels(self, dataset):
if isinstance(dataset, datasets.ImageFolder):
return [x[1] for x in dataset.imgs]
elif isinstance(dataset, torch.utils.data.Subset):
return [dataset.dataset.imgs[i][1] for i in dataset.indices]
else:
raise NotImplementedError
def __iter__(self):
return (self.indices[i] for i in torch.multinomial(self.weights, self.num_samples, replacement=True))
def __len__(self):
return self.num_samples
def run_training():
args = parse_args()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.enabled = True
model = DAN(num_class=args.num_class, num_head=args.num_head)
model.to(device)
data_transforms = transforms.Compose([
transforms.Resize((224, 224)),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([
transforms.RandomAffine(20, scale=(0.8, 1), translate=(0.2, 0.2)),
], p=0.7),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
transforms.RandomErasing(),
])
# train_dataset = AffectNet(args.aff_path, phase = 'train', transform = data_transforms) # loading dynamically
train_dataset = datasets.ImageFolder(f'{args.aff_path}/train', transform = data_transforms) # loading statically
if args.num_class == 7: # ignore the 8-th class
idx = [i for i in range(len(train_dataset)) if train_dataset.imgs[i][1] != 7]
train_dataset = data.Subset(train_dataset, idx)
print('Whole train set size:', train_dataset.__len__())
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size = args.batch_size,
num_workers = args.workers,
sampler=ImbalancedDatasetSampler(train_dataset),
shuffle = False,
pin_memory = True)
data_transforms_val = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
# val_dataset = AffectNet(args.aff_path, phase = 'val', transform = data_transforms_val) # loading dynamically
val_dataset = datasets.ImageFolder(f'{args.aff_path}/val', transform = data_transforms_val) # loading statically
if args.num_class == 7: # ignore the 8-th class
idx = [i for i in range(len(val_dataset)) if val_dataset.imgs[i][1] != 7]
val_dataset = data.Subset(val_dataset, idx)
print('Validation set size:', val_dataset.__len__())
val_loader = torch.utils.data.DataLoader(val_dataset,
batch_size = args.batch_size,
num_workers = args.workers,
shuffle = False,
pin_memory = True)
criterion_cls = torch.nn.CrossEntropyLoss().to(device)
criterion_af = AffinityLoss(device, num_class=args.num_class)
criterion_pt = PartitionLoss()
params = list(model.parameters()) + list(criterion_af.parameters())
optimizer = torch.optim.Adam(params,args.lr,weight_decay = 0)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma = 0.6)
best_acc = 0
for epoch in tqdm(range(1, args.epochs + 1)):
running_loss = 0.0
correct_sum = 0
iter_cnt = 0
model.train()
for (imgs, targets) in train_loader:
iter_cnt += 1
optimizer.zero_grad()
imgs = imgs.to(device)
targets = targets.to(device)
out,feat,heads = model(imgs)
loss = criterion_cls(out,targets) + criterion_af(feat,targets) + criterion_pt(heads)
loss.backward()
optimizer.step()
running_loss += loss
_, predicts = torch.max(out, 1)
correct_num = torch.eq(predicts, targets).sum()
correct_sum += correct_num
acc = correct_sum.float() / float(train_dataset.__len__())
running_loss = running_loss/iter_cnt
tqdm.write('[Epoch %d] Training accuracy: %.4f. Loss: %.3f. LR %.6f' % (epoch, acc, running_loss,optimizer.param_groups[0]['lr']))
with torch.no_grad():
running_loss = 0.0
iter_cnt = 0
bingo_cnt = 0
sample_cnt = 0
model.eval()
for imgs, targets in val_loader:
imgs = imgs.to(device)
targets = targets.to(device)
out,feat,heads = model(imgs)
loss = criterion_cls(out,targets) + criterion_af(feat,targets) + criterion_pt(heads)
running_loss += loss
iter_cnt+=1
_, predicts = torch.max(out, 1)
correct_num = torch.eq(predicts,targets)
bingo_cnt += correct_num.sum().cpu()
sample_cnt += out.size(0)
running_loss = running_loss/iter_cnt
scheduler.step()
acc = bingo_cnt.float()/float(sample_cnt)
acc = np.around(acc.numpy(),4)
best_acc = max(acc,best_acc)
tqdm.write("[Epoch %d] Validation accuracy:%.4f. Loss:%.3f" % (epoch, acc, running_loss))
tqdm.write("best_acc:" + str(best_acc))
if args.num_class == 7 and acc > 0.65:
torch.save({'iter': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),},
os.path.join('checkpoints', "affecnet7_epoch"+str(epoch)+"_acc"+str(acc)+".pth"))
tqdm.write('Model saved.')
elif args.num_class == 8 and acc > 0.62:
torch.save({'iter': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),},
os.path.join('checkpoints', "affecnet8_epoch"+str(epoch)+"_acc"+str(acc)+".pth"))
tqdm.write('Model saved.')
if __name__ == "__main__":
run_training() | [
"torch.nn.CrossEntropyLoss",
"pandas.read_csv",
"torch.max",
"torch.pow",
"torch.eq",
"torch.cuda.is_available",
"torch.arange",
"os.path.exists",
"argparse.ArgumentParser",
"tqdm.tqdm.write",
"torchvision.datasets.ImageFolder",
"torch.nn.AdaptiveAvgPool2d",
"pandas.DataFrame",
"torchvisio... | [((298, 323), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (321, 323), False, 'import argparse\n'), ((5792, 5817), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5815, 5817), False, 'import torch\n'), ((5972, 6025), 'networks.dan.DAN', 'DAN', ([], {'num_class': 'args.num_class', 'num_head': 'args.num_head'}), '(num_class=args.num_class, num_head=args.num_head)\n', (5975, 6025), False, 'from networks.dan import DAN\n'), ((6661, 6734), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['f"""{args.aff_path}/train"""'], {'transform': 'data_transforms'}), "(f'{args.aff_path}/train', transform=data_transforms)\n", (6681, 6734), False, 'from torchvision import transforms, datasets\n'), ((7907, 7982), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['f"""{args.aff_path}/val"""'], {'transform': 'data_transforms_val'}), "(f'{args.aff_path}/val', transform=data_transforms_val)\n", (7927, 7982), False, 'from torchvision import transforms, datasets\n'), ((8278, 8408), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_dataset'], {'batch_size': 'args.batch_size', 'num_workers': 'args.workers', 'shuffle': '(False)', 'pin_memory': '(True)'}), '(val_dataset, batch_size=args.batch_size,\n num_workers=args.workers, shuffle=False, pin_memory=True)\n', (8305, 8408), False, 'import torch\n'), ((8854, 8903), 'torch.optim.Adam', 'torch.optim.Adam', (['params', 'args.lr'], {'weight_decay': '(0)'}), '(params, args.lr, weight_decay=0)\n', (8870, 8903), False, 'import torch\n'), ((8920, 8980), 'torch.optim.lr_scheduler.ExponentialLR', 'torch.optim.lr_scheduler.ExponentialLR', (['optimizer'], {'gamma': '(0.6)'}), '(optimizer, gamma=0.6)\n', (8958, 8980), False, 'import torch\n'), ((1733, 1774), 'numpy.unique', 'np.unique', (['self.label'], {'return_counts': '(True)'}), '(self.label, return_counts=True)\n', (1742, 1774), True, 'import numpy as np\n'), ((1894, 1935), 'os.path.join', 'os.path.join', (['self.aff_path', '"""train_set/"""'], {}), "(self.aff_path, 'train_set/')\n", (1906, 1935), False, 'import os\n'), ((1954, 1993), 'os.path.join', 'os.path.join', (['self.aff_path', '"""val_set/"""'], {}), "(self.aff_path, 'val_set/')\n", (1966, 1993), False, 'import os\n'), ((2040, 2087), 'glob.glob', 'glob.glob', (["(train_path + 'annotations/*_exp.npy')"], {}), "(train_path + 'annotations/*_exp.npy')\n", (2049, 2087), False, 'import glob\n'), ((2330, 2375), 'glob.glob', 'glob.glob', (["(val_path + 'annotations/*_exp.npy')"], {}), "(val_path + 'annotations/*_exp.npy')\n", (2339, 2375), False, 'import glob\n'), ((2609, 2672), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data', 'columns': "['phase', 'img_path', 'label']"}), "(data=data, columns=['phase', 'img_path', 'label'])\n", (2621, 2672), True, 'import pandas as pd\n'), ((3225, 3248), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (3245, 3248), True, 'import torch.nn as nn\n'), ((4800, 4814), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4812, 4814), True, 'import pandas as pd\n'), ((6924, 6955), 'torch.utils.data.Subset', 'data.Subset', (['train_dataset', 'idx'], {}), '(train_dataset, idx)\n', (6935, 6955), True, 'import torch.utils.data as data\n'), ((8168, 8197), 'torch.utils.data.Subset', 'data.Subset', (['val_dataset', 'idx'], {}), '(val_dataset, idx)\n', (8179, 8197), True, 'import torch.utils.data as data\n'), ((9853, 9989), 'tqdm.tqdm.write', 'tqdm.write', (["('[Epoch %d] Training accuracy: %.4f. Loss: %.3f. LR %.6f' % (epoch, acc,\n running_loss, optimizer.param_groups[0]['lr']))"], {}), "('[Epoch %d] Training accuracy: %.4f. Loss: %.3f. LR %.6f' % (\n epoch, acc, running_loss, optimizer.param_groups[0]['lr']))\n", (9863, 9989), False, 'from tqdm import tqdm\n'), ((1274, 1313), 'os.path.join', 'os.path.join', (['aff_path', '"""affectnet.csv"""'], {}), "(aff_path, 'affectnet.csv')\n", (1286, 1313), False, 'import os\n'), ((1328, 1354), 'os.path.exists', 'os.path.exists', (['cache_path'], {}), '(cache_path)\n', (1342, 1354), False, 'import os\n'), ((2167, 2212), 'os.path.join', 'os.path.join', (['train_path', 'f"""images/{idx}.jpg"""'], {}), "(train_path, f'images/{idx}.jpg')\n", (2179, 2212), False, 'import os\n'), ((2263, 2302), 'torch.utils.data.append', 'data.append', (["['train', img_path, label]"], {}), "(['train', img_path, label])\n", (2274, 2302), True, 'import torch.utils.data as data\n'), ((2455, 2498), 'os.path.join', 'os.path.join', (['val_path', 'f"""images/{idx}.jpg"""'], {}), "(val_path, f'images/{idx}.jpg')\n", (2467, 2498), False, 'import os\n'), ((2549, 2586), 'torch.utils.data.append', 'data.append', (["['val', img_path, label]"], {}), "(['val', img_path, label])\n", (2560, 2586), True, 'import torch.utils.data as data\n'), ((4390, 4419), 'torch.log', 'torch.log', (['(1 + num_head / var)'], {}), '(1 + num_head / var)\n', (4399, 4419), False, 'import torch\n'), ((5746, 5771), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5769, 5771), False, 'import torch\n'), ((6108, 6137), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224, 224)'], {}), '((224, 224))\n', (6125, 6137), False, 'from torchvision import transforms, datasets\n'), ((6147, 6180), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (6178, 6180), False, 'from torchvision import transforms, datasets\n'), ((6330, 6351), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6349, 6351), False, 'from torchvision import transforms, datasets\n'), ((6361, 6436), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (6381, 6436), False, 'from torchvision import transforms, datasets\n'), ((6479, 6505), 'torchvision.transforms.RandomErasing', 'transforms.RandomErasing', ([], {}), '()\n', (6503, 6505), False, 'from torchvision import transforms, datasets\n'), ((7514, 7543), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224, 224)'], {}), '((224, 224))\n', (7531, 7543), False, 'from torchvision import transforms, datasets\n'), ((7553, 7574), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (7572, 7574), False, 'from torchvision import transforms, datasets\n'), ((7584, 7659), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (7604, 7659), False, 'from torchvision import transforms, datasets\n'), ((8625, 8652), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (8650, 8652), False, 'import torch\n'), ((9615, 9632), 'torch.max', 'torch.max', (['out', '(1)'], {}), '(out, 1)\n', (9624, 9632), False, 'import torch\n'), ((10006, 10021), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10019, 10021), False, 'import torch\n'), ((10958, 11051), 'tqdm.tqdm.write', 'tqdm.write', (["('[Epoch %d] Validation accuracy:%.4f. Loss:%.3f' % (epoch, acc, running_loss))"], {}), "('[Epoch %d] Validation accuracy:%.4f. Loss:%.3f' % (epoch, acc,\n running_loss))\n", (10968, 11051), False, 'from tqdm import tqdm\n'), ((1377, 1400), 'pandas.read_csv', 'pd.read_csv', (['cache_path'], {}), '(cache_path)\n', (1388, 1400), True, 'import pandas as pd\n'), ((2236, 2249), 'numpy.load', 'np.load', (['anno'], {}), '(anno)\n', (2243, 2249), True, 'import numpy as np\n'), ((2522, 2535), 'numpy.load', 'np.load', (['anno'], {}), '(anno)\n', (2529, 2535), True, 'import numpy as np\n'), ((2819, 2835), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (2829, 2835), False, 'from PIL import Image\n'), ((5536, 5603), 'torch.multinomial', 'torch.multinomial', (['self.weights', 'self.num_samples'], {'replacement': '(True)'}), '(self.weights, self.num_samples, replacement=True)\n', (5553, 5603), False, 'import torch\n'), ((10538, 10555), 'torch.max', 'torch.max', (['out', '(1)'], {}), '(out, 1)\n', (10547, 10555), False, 'import torch\n'), ((10587, 10614), 'torch.eq', 'torch.eq', (['predicts', 'targets'], {}), '(predicts, targets)\n', (10595, 10614), False, 'import torch\n'), ((11469, 11495), 'tqdm.tqdm.write', 'tqdm.write', (['"""Model saved."""'], {}), "('Model saved.')\n", (11479, 11495), False, 'from tqdm import tqdm\n'), ((3315, 3357), 'torch.randn', 'torch.randn', (['self.num_class', 'self.feat_dim'], {}), '(self.num_class, self.feat_dim)\n', (3326, 3357), False, 'import torch\n'), ((6231, 6296), 'torchvision.transforms.RandomAffine', 'transforms.RandomAffine', (['(20)'], {'scale': '(0.8, 1)', 'translate': '(0.2, 0.2)'}), '(20, scale=(0.8, 1), translate=(0.2, 0.2))\n', (6254, 6296), False, 'from torchvision import transforms, datasets\n'), ((9659, 9686), 'torch.eq', 'torch.eq', (['predicts', 'targets'], {}), '(predicts, targets)\n', (9667, 9686), False, 'import torch\n'), ((11867, 11893), 'tqdm.tqdm.write', 'tqdm.write', (['"""Model saved."""'], {}), "('Model saved.')\n", (11877, 11893), False, 'from tqdm import tqdm\n'), ((2107, 2129), 'os.path.basename', 'os.path.basename', (['anno'], {}), '(anno)\n', (2123, 2129), False, 'import os\n'), ((2395, 2417), 'os.path.basename', 'os.path.basename', (['anno'], {}), '(anno)\n', (2411, 2417), False, 'import os\n'), ((3769, 3797), 'torch.arange', 'torch.arange', (['self.num_class'], {}), '(self.num_class)\n', (3781, 3797), False, 'import torch\n'), ((3499, 3514), 'torch.pow', 'torch.pow', (['x', '(2)'], {}), '(x, 2)\n', (3508, 3514), False, 'import torch\n'), ((3597, 3623), 'torch.pow', 'torch.pow', (['self.centers', '(2)'], {}), '(self.centers, 2)\n', (3606, 3623), False, 'import torch\n')] |
import os
import keyboard
banner = '''\u001b[34m____ ____ _ _ ___ ____ _ _ _ ____ ___ ____ ____ _ _ ____ ____ _ _
| | | |\/| |__] | | | | | |__| | |___ |__/ |\/| |__| |__/ |_/
|___ |__| | | |__] |__| |_|_| | | | |___ | \ | | | | | \ | \_ \u001b[37m'''
class Watermark():
def All(type, author, checker):
try:
with open('Combos.txt', "r") as Combo_File:
for line in Combo_File:
line = line.replace('\n', '')
Account_Combo = line.split(':')
email = Account_Combo[0]
password = Account_Combo[1]
with open('New_Combo.txt', "a+") as f:
f.write(f'{email}:{password} | Type: {checker} | Checked By: {author} | Checker: {checker}\n')
print('\n[\u001b[34mWATERMARK\u001b[37m] Success')
input()
os._exit(0)
except IndexError:
print('\n[\u001b[34mWATERMARK\u001b[37m] Invalid Combo Syntax, Example <EMAIL>:FuckingNoLife123')
input()
os._exit(0)
def Type(type):
try:
with open('Combos.txt', "r") as Combo_File:
for line in Combo_File:
line = line.replace('\n', '')
Account_Combo = line.split(':')
email = Account_Combo[0]
password = Account_Combo[1]
with open('New_Combo.txt', "a+") as f:
f.write(f'{email}:{password} | Type: {type}\n')
print('\n[\u001b[34mWATERMARK\u001b[37m] Success')
input()
os._exit(0)
except IndexError:
print('\n[\u001b[34mWATERMARK\u001b[37m] Invalid Combo Syntax, Example <EMAIL>:FuckingNoLife123')
input()
os._exit(0)
def Author(author):
try:
with open('Combos.txt', "r") as Combo_File:
for line in Combo_File:
line = line.replace('\n', '')
Account_Combo = line.split(':')
email = Account_Combo[0]
password = Account_Combo[1]
with open('New_Combo.txt', "a+") as f:
f.write(f'{email}:{password} | Checked By: {author}\n')
print('\n[\u001b[34mWATERMARK\u001b[37m] Success')
input()
os._exit(0)
except IndexError:
print('\n[\u001b[34mWATERMARK\u001b[37m] Invalid Combo Syntax, Example <EMAIL>:FuckingNoLife123')
input()
os._exit(0)
def Checker(checker):
try:
with open('Combos.txt', "r") as Combo_File:
for line in Combo_File:
line = line.replace('\n', '')
Account_Combo = line.split(':')
email = Account_Combo[0]
password = Account_Combo[1]
with open('New_Combo.txt', "a+") as f:
f.write(f'{email}:{password} | Checker: {checker}\n')
print('\n[\u001b[34mWATERMARK\u001b[37m] Success')
input()
os._exit(0)
except IndexError:
print('\n[\u001b[34mWATERMARK\u001b[37m] Invalid Combo Syntax, Example <EMAIL>:FuckingNoLife123')
input()
os._exit(0)
if __name__ == "__main__":
os.system('cls & title [Combo Watermarker] By Dropout')
print(
f'{banner}\n',
'\n[\u001b[34m1\u001b[37m] All',
'\n[\u001b[34m2\u001b[37m] Combo Type',
'\n[\u001b[34m3\u001b[37m] Author',
'\n[\u001b[34m4\u001b[37m] Checker'
)
while True:
try:
if keyboard.is_pressed('1'):
keyboard.write('\b')
cmd_type = input('\n\u001b[34m>\u001b[37m Combo Type: ')
author = input('\u001b[34m>\u001b[37m Author: ')
checker = input('\u001b[34m>\u001b[37m Checker: ')
Watermark.All(cmd_type, author, checker)
break
elif keyboard.is_pressed('2'):
keyboard.write('\b')
cmd_type = input('\n\u001b[34m>\u001b[37m Combo Type: ')
Watermark.Type(cmd_type)
break
elif keyboard.is_pressed('3'):
keyboard.write('\b')
author = input('\n\u001b[34m>\u001b[37m Author: ')
Watermark.Author(author)
break
elif keyboard.is_pressed('4'):
keyboard.write('\b')
checker = input('\n\u001b[34m>\u001b[37m Checker: ')
Watermark.Checker(checker)
break
except:
continue | [
"os.system",
"os._exit",
"keyboard.write",
"keyboard.is_pressed"
] | [((3586, 3641), 'os.system', 'os.system', (['"""cls & title [Combo Watermarker] By Dropout"""'], {}), "('cls & title [Combo Watermarker] By Dropout')\n", (3595, 3641), False, 'import os\n'), ((3917, 3941), 'keyboard.is_pressed', 'keyboard.is_pressed', (['"""1"""'], {}), "('1')\n", (3936, 3941), False, 'import keyboard\n'), ((950, 961), 'os._exit', 'os._exit', (['(0)'], {}), '(0)\n', (958, 961), False, 'import os\n'), ((1142, 1153), 'os._exit', 'os._exit', (['(0)'], {}), '(0)\n', (1150, 1153), False, 'import os\n'), ((1738, 1749), 'os._exit', 'os._exit', (['(0)'], {}), '(0)\n', (1746, 1749), False, 'import os\n'), ((1931, 1942), 'os._exit', 'os._exit', (['(0)'], {}), '(0)\n', (1939, 1942), False, 'import os\n'), ((2539, 2550), 'os._exit', 'os._exit', (['(0)'], {}), '(0)\n', (2547, 2550), False, 'import os\n'), ((2730, 2741), 'os._exit', 'os._exit', (['(0)'], {}), '(0)\n', (2738, 2741), False, 'import os\n'), ((3340, 3351), 'os._exit', 'os._exit', (['(0)'], {}), '(0)\n', (3348, 3351), False, 'import os\n'), ((3534, 3545), 'os._exit', 'os._exit', (['(0)'], {}), '(0)\n', (3542, 3545), False, 'import os\n'), ((3960, 3982), 'keyboard.write', 'keyboard.write', (['"""\x08"""'], {}), "('\\x08')\n", (3974, 3982), False, 'import keyboard\n'), ((4288, 4312), 'keyboard.is_pressed', 'keyboard.is_pressed', (['"""2"""'], {}), "('2')\n", (4307, 4312), False, 'import keyboard\n'), ((4331, 4353), 'keyboard.write', 'keyboard.write', (['"""\x08"""'], {}), "('\\x08')\n", (4345, 4353), False, 'import keyboard\n'), ((4509, 4533), 'keyboard.is_pressed', 'keyboard.is_pressed', (['"""3"""'], {}), "('3')\n", (4528, 4533), False, 'import keyboard\n'), ((4552, 4574), 'keyboard.write', 'keyboard.write', (['"""\x08"""'], {}), "('\\x08')\n", (4566, 4574), False, 'import keyboard\n'), ((4724, 4748), 'keyboard.is_pressed', 'keyboard.is_pressed', (['"""4"""'], {}), "('4')\n", (4743, 4748), False, 'import keyboard\n'), ((4767, 4789), 'keyboard.write', 'keyboard.write', (['"""\x08"""'], {}), "('\\x08')\n", (4781, 4789), False, 'import keyboard\n')] |
#! /bin/python
import re
from sympy import exp, ln, sin, cos
from sympy.abc import x
from chebyshev.approximation import get_best_approximation
table_of_contents = []
function_entries = []
def generate_for_readme(approximation):
function_filename = re.sub(r'( |/)', '_', str(approximation.function))
approximation_plot_filename = f'images/{function_filename}_approximation.png'
absolute_error_plot_filename = f'images/{function_filename}_absolute_error.png'
table_of_contents.append((str(approximation.function),
re.sub(r'(\+|/|\(|\))', '', str(approximation.function))
.replace(' ', '-')))
function_entries.append(f"""## `{approximation.function}`
Coefficients for `{approximation.function}` on the `[{approximation.interval[0]}, {approximation.interval[
1]}]` interval:
{approximation.get_coeffs_as_table()}
Maximum error on that interval is `{approximation.get_error()}`
<img src="{approximation_plot_filename}" alt="{approximation_plot_filename}" width="50%"><img src="{absolute_error_plot_filename}" alt="{absolute_error_plot_filename}" width="50%">
""")
plotted = approximation.plot_approximation(show=False)
backend = plotted.backend(plotted)
backend.process_series()
backend.fig.savefig(approximation_plot_filename, dpi=300)
plotted = approximation.plot_absolute_error(show=False)
backend = plotted.backend(plotted)
backend.process_series()
backend.fig.savefig(absolute_error_plot_filename, dpi=300)
generate_for_readme(get_best_approximation(exp(x), (0, 1), 6, 7, point=0.5))
generate_for_readme(get_best_approximation(ln(1 + x), (0, 1), 6, 20))
generate_for_readme(get_best_approximation(sin(x) / x, (-1, 1), 8, 18))
generate_for_readme(get_best_approximation(cos(x), (-1, 1), 8, 18))
print('## Approximated Functions\n')
for (function_name, function_link) in table_of_contents:
print(f'* [`{function_name}`](#{function_link})')
print('\n')
for function_entry in function_entries:
print(function_entry)
| [
"sympy.exp",
"sympy.sin",
"sympy.ln",
"sympy.cos"
] | [((1590, 1596), 'sympy.exp', 'exp', (['x'], {}), '(x)\n', (1593, 1596), False, 'from sympy import exp, ln, sin, cos\n'), ((1667, 1676), 'sympy.ln', 'ln', (['(1 + x)'], {}), '(1 + x)\n', (1669, 1676), False, 'from sympy import exp, ln, sin, cos\n'), ((1809, 1815), 'sympy.cos', 'cos', (['x'], {}), '(x)\n', (1812, 1815), False, 'from sympy import exp, ln, sin, cos\n'), ((1737, 1743), 'sympy.sin', 'sin', (['x'], {}), '(x)\n', (1740, 1743), False, 'from sympy import exp, ln, sin, cos\n')] |
# Licensed under an MIT open source license - see LICENSE
'''
Test functions for VCA
'''
from unittest import TestCase
import numpy as np
import numpy.testing as npt
from ..statistics import VCA, VCA_Distance
from ._testing_data import \
dataset1, dataset2, computed_data, computed_distances
class testVCA(TestCase):
def setUp(self):
self.dataset1 = dataset1
self.dataset2 = dataset2
def test_VCA_method(self):
self.tester = VCA(dataset1["cube"][0],
dataset1["cube"][1],
slice_sizes=[1.0])
self.tester.run()
assert np.allclose(self.tester.ps1D, computed_data['vca_val'])
def test_VCA_distance(self):
self.tester_dist = \
VCA_Distance(dataset1["cube"],
dataset2["cube"]).distance_metric()
npt.assert_almost_equal(self.tester_dist.distance,
computed_distances['vca_distance'])
| [
"numpy.testing.assert_almost_equal",
"numpy.allclose"
] | [((627, 682), 'numpy.allclose', 'np.allclose', (['self.tester.ps1D', "computed_data['vca_val']"], {}), "(self.tester.ps1D, computed_data['vca_val'])\n", (638, 682), True, 'import numpy as np\n'), ((858, 949), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['self.tester_dist.distance', "computed_distances['vca_distance']"], {}), "(self.tester_dist.distance, computed_distances[\n 'vca_distance'])\n", (881, 949), True, 'import numpy.testing as npt\n')] |
import FWCore.ParameterSet.Config as cms
from ElectroWeakAnalysis.ZMuMu.ZMuMuCategoriesSequences_cff import *
import copy
zPlots = cms.PSet(
histograms = cms.VPSet(
cms.PSet(
min = cms.untracked.double(0.0),
max = cms.untracked.double(200.0),
nbins = cms.untracked.int32(200),
name = cms.untracked.string("zMass"),
description = cms.untracked.string("Z mass [GeV/c^{2}]"),
plotquantity = cms.untracked.string("mass")
),
cms.PSet(
min = cms.untracked.double(0.0),
max = cms.untracked.double(200.0),
nbins = cms.untracked.int32(200),
name = cms.untracked.string("mu1Pt"),
description = cms.untracked.string("Highest muon p_{t} [GeV/c]"),
plotquantity = cms.untracked.string("max(daughter(0).pt,daughter(1).pt)")
),
cms.PSet(
min = cms.untracked.double(0.0),
max = cms.untracked.double(200.0),
nbins = cms.untracked.int32(200),
name = cms.untracked.string("mu2Pt"),
description = cms.untracked.string("Lowest muon p_{t} [GeV/c]"),
plotquantity = cms.untracked.string("min(daughter(0).pt,daughter(1).pt)")
)
)
)
# ZMuMu at least 1 HLT + 2 track-iso (Shape)
goodZToMuMuPlotsLoose = cms.EDAnalyzer(
"CandViewHistoAnalyzer",
zPlots,
src = cms.InputTag("goodZToMuMuAtLeast1HLTLoose")
)
goodZToMuMuPlots = cms.EDAnalyzer(
"CandViewHistoAnalyzer",
zPlots,
src = cms.InputTag("goodZToMuMuAtLeast1HLT")
)
## #### plot for loose cuts
## goodZToMuMuSequence.__iadd__(goodZToMuMuPlots)
## goodZToMuMuSequence.setLabel("goodZToMuMuAtLeast1HLT")
## #ZMuMu 2 HLT + 2 track-iso
## goodZToMuMu2HLTPlots = copy.deepcopy(goodZToMuMuPlots)
## goodZToMuMu2HLTPlots.src = cms.InputTag("goodZToMuMu2HLT")
## goodZToMuMu2HLTSequence.__iadd__(goodZToMuMu2HLTPlots)
## goodZToMuMu2HLTSequence.setLabel("goodZToMuMu2HLT")
## #ZMuMu 1 HLT + 2 track-iso
## goodZToMuMu1HLTPlots = copy.deepcopy(goodZToMuMuPlots)
## goodZToMuMu1HLTPlots.src = cms.InputTag("goodZToMuMu1HLT")
## goodZToMuMu1HLTSequence.__iadd__(goodZToMuMu1HLTPlots)
## #ZMuMu at least 1 HLT + at least 1 NON track-iso
## nonIsolatedZToMuMuPlots = copy.deepcopy(goodZToMuMuPlots)
## nonIsolatedZToMuMuPlots.src = cms.InputTag("nonIsolatedZToMuMuAtLeast1HLT")
## nonIsolatedZToMuMuSequence.__iadd__(nonIsolatedZToMuMuPlots)
## #ZMuMu at least 1 HLT + 1 NON track-iso
## oneNonIsolatedZToMuMuPlots = copy.deepcopy(goodZToMuMuPlots)
## oneNonIsolatedZToMuMuPlots.src = cms.InputTag("oneNonIsolatedZToMuMuAtLeast1HLT")
## oneNonIsolatedZToMuMuSequence.__iadd__(oneNonIsolatedZToMuMuPlots)
## #ZMuMu at least 1 HLT + 2 NON track-iso
## twoNonIsolatedZToMuMuPlots = copy.deepcopy(goodZToMuMuPlots)
## twoNonIsolatedZToMuMuPlots.src = cms.InputTag("twoNonIsolatedZToMuMuAtLeast1HLT")
## twoNonIsolatedZToMuMuSequence.__iadd__(twoNonIsolatedZToMuMuPlots)
## #ZMuSta First HLT + 2 track-iso
## goodZToMuMuOneStandAloneMuonPlots = copy.deepcopy(goodZToMuMuPlots)
## goodZToMuMuOneStandAloneMuonPlots.src = cms.InputTag("goodZToMuMuOneStandAloneMuonFirstHLT")
## goodZToMuMuOneStandAloneMuonSequence.__iadd__(goodZToMuMuOneStandAloneMuonPlots)
## #ZMuTk First HLT + 2 track-iso
## goodZToMuMuOneTrackPlots = copy.deepcopy(goodZToMuMuPlots)
## goodZToMuMuOneTrackPlots.src = cms.InputTag("goodZToMuMuOneTrackFirstHLT")
## goodZToMuMuOneTrackSequence.__iadd__(goodZToMuMuOneTrackPlots)
## #ZMuMu same charge
## goodZToMuMuSameChargeAtLeast1HLTPlots = copy.deepcopy(goodZToMuMuPlots)
## goodZToMuMuSameChargeAtLeast1HLTPlots.src = cms.InputTag("goodZToMuMuSameChargeAtLeast1HLT")
## goodZToMuMuSameChargeSequence.__iadd__(goodZToMuMuSameChargeAtLeast1HLTPlots)
## goodZToMuMuSameCharge2HLTPlots = copy.deepcopy(goodZToMuMuPlots)
## goodZToMuMuSameCharge2HLTPlots.src = cms.InputTag("goodZToMuMuSameCharge2HLT")
## goodZToMuMuSameCharge2HLTSequence.__iadd__(goodZToMuMuSameCharge2HLTPlots)
## goodZToMuMuSameCharge1HLTPlots = copy.deepcopy(goodZToMuMuPlots)
## goodZToMuMuSameCharge1HLTPlots.src = cms.InputTag("goodZToMuMuSameCharge1HLT")
## goodZToMuMuSameCharge1HLTSequence.__iadd__(goodZToMuMuSameCharge1HLTPlots)
#### plot for tight cuts
goodZToMuMuPath.__iadd__(goodZToMuMuPlots)
goodZToMuMuPath.setLabel("goodZToMuMuAtLeast1HLT")
#ZMuMu 2 HLT + 2 track-iso
goodZToMuMu2HLTPlots = copy.deepcopy(goodZToMuMuPlots)
goodZToMuMu2HLTPlots.src = cms.InputTag("goodZToMuMu2HLT")
goodZToMuMu2HLTPath.__iadd__(goodZToMuMu2HLTPlots)
goodZToMuMu2HLTPath.setLabel("goodZToMuMu2HLT")
#ZMuMu 1 HLT + 2 track-iso
goodZToMuMu1HLTPlots= copy.deepcopy(goodZToMuMuPlots)
goodZToMuMu1HLTPlots.src = cms.InputTag("goodZToMuMu1HLT")
goodZToMuMu1HLTPath.__iadd__(goodZToMuMu1HLTPlots)
##### plot for AB and BB region
goodZToMuMuAB1HLTPlots= copy.deepcopy(goodZToMuMuPlots)
goodZToMuMuAB1HLTPlots.src = cms.InputTag("goodZToMuMuAB1HLT")
goodZToMuMuAB1HLTPath.__iadd__(goodZToMuMuAB1HLTPlots)
goodZToMuMuBB2HLTPlots= copy.deepcopy(goodZToMuMuPlots)
goodZToMuMuBB2HLTPlots.src = cms.InputTag("goodZToMuMuBB2HLT")
goodZToMuMuBB2HLTPath.__iadd__(goodZToMuMuBB2HLTPlots)
#ZMuMu at least 1 HLT + at least 1 NON track-iso
nonIsolatedZToMuMuPlots = copy.deepcopy(goodZToMuMuPlots)
nonIsolatedZToMuMuPlots.src = cms.InputTag("nonIsolatedZToMuMuAtLeast1HLT")
nonIsolatedZToMuMuPath.__iadd__(nonIsolatedZToMuMuPlots)
#ZMuMu at least 1 HLT + 1 NON track-iso
oneNonIsolatedZToMuMuPlots = copy.deepcopy(goodZToMuMuPlots)
oneNonIsolatedZToMuMuPlots.src = cms.InputTag("oneNonIsolatedZToMuMuAtLeast1HLT")
oneNonIsolatedZToMuMuPath.__iadd__(oneNonIsolatedZToMuMuPlots)
#ZMuMu at least 1 HLT + 2 NON track-iso
twoNonIsolatedZToMuMuPlots = copy.deepcopy(goodZToMuMuPlots)
twoNonIsolatedZToMuMuPlots.src = cms.InputTag("twoNonIsolatedZToMuMuAtLeast1HLT")
twoNonIsolatedZToMuMuPath.__iadd__(twoNonIsolatedZToMuMuPlots)
#ZMuSta global HLT + 2 track-iso
goodZToMuMuOneStandAloneMuonPlots = copy.deepcopy(goodZToMuMuPlots)
goodZToMuMuOneStandAloneMuonPlots.src = cms.InputTag("goodZToMuMuOneStandAloneMuonFirstHLT")
goodZToMuMuOneStandAloneMuonPath.__iadd__(goodZToMuMuOneStandAloneMuonPlots)
#ZMuTk First HLT + 2 track-iso
goodZToMuMuOneTrackPlots = copy.deepcopy(goodZToMuMuPlots)
goodZToMuMuOneTrackPlots.src = cms.InputTag("goodZToMuMuOneTrackFirstHLT")
goodZToMuMuOneTrackPath.__iadd__(goodZToMuMuOneTrackPlots)
#ZMuTkMu global HLT + 2 track-iso
goodZToMuMuOneTrackerMuonPlots = copy.deepcopy(goodZToMuMuPlots)
goodZToMuMuOneTrackerMuonPlots.src = cms.InputTag("goodZToMuMuOneTrackerMuonFirstHLT")
goodZToMuMuOneTrackerMuonPath.__iadd__(goodZToMuMuOneTrackerMuonPlots)
#ZMuMu same charge
goodZToMuMuSameChargeAtLeast1HLTPlots = copy.deepcopy(goodZToMuMuPlots)
goodZToMuMuSameChargeAtLeast1HLTPlots.src = cms.InputTag("goodZToMuMuSameChargeAtLeast1HLT")
goodZToMuMuSameChargePath.__iadd__(goodZToMuMuSameChargeAtLeast1HLTPlots)
goodZToMuMuSameCharge2HLTPlots = copy.deepcopy(goodZToMuMuPlots)
goodZToMuMuSameCharge2HLTPlots.src = cms.InputTag("goodZToMuMuSameCharge2HLT")
goodZToMuMuSameCharge2HLTPath.__iadd__(goodZToMuMuSameCharge2HLTPlots)
goodZToMuMuSameCharge1HLTPlots = copy.deepcopy(goodZToMuMuPlots)
goodZToMuMuSameCharge1HLTPlots.src = cms.InputTag("goodZToMuMuSameCharge1HLT")
goodZToMuMuSameCharge1HLTPath.__iadd__(goodZToMuMuSameCharge1HLTPlots)
| [
"FWCore.ParameterSet.Config.untracked.double",
"FWCore.ParameterSet.Config.untracked.string",
"FWCore.ParameterSet.Config.InputTag",
"copy.deepcopy",
"FWCore.ParameterSet.Config.untracked.int32"
] | [((4281, 4312), 'copy.deepcopy', 'copy.deepcopy', (['goodZToMuMuPlots'], {}), '(goodZToMuMuPlots)\n', (4294, 4312), False, 'import copy\n'), ((4340, 4371), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""goodZToMuMu2HLT"""'], {}), "('goodZToMuMu2HLT')\n", (4352, 4371), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4523, 4554), 'copy.deepcopy', 'copy.deepcopy', (['goodZToMuMuPlots'], {}), '(goodZToMuMuPlots)\n', (4536, 4554), False, 'import copy\n'), ((4582, 4613), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""goodZToMuMu1HLT"""'], {}), "('goodZToMuMu1HLT')\n", (4594, 4613), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4724, 4755), 'copy.deepcopy', 'copy.deepcopy', (['goodZToMuMuPlots'], {}), '(goodZToMuMuPlots)\n', (4737, 4755), False, 'import copy\n'), ((4785, 4818), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""goodZToMuMuAB1HLT"""'], {}), "('goodZToMuMuAB1HLT')\n", (4797, 4818), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4899, 4930), 'copy.deepcopy', 'copy.deepcopy', (['goodZToMuMuPlots'], {}), '(goodZToMuMuPlots)\n', (4912, 4930), False, 'import copy\n'), ((4960, 4993), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""goodZToMuMuBB2HLT"""'], {}), "('goodZToMuMuBB2HLT')\n", (4972, 4993), True, 'import FWCore.ParameterSet.Config as cms\n'), ((5127, 5158), 'copy.deepcopy', 'copy.deepcopy', (['goodZToMuMuPlots'], {}), '(goodZToMuMuPlots)\n', (5140, 5158), False, 'import copy\n'), ((5189, 5234), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""nonIsolatedZToMuMuAtLeast1HLT"""'], {}), "('nonIsolatedZToMuMuAtLeast1HLT')\n", (5201, 5234), True, 'import FWCore.ParameterSet.Config as cms\n'), ((5363, 5394), 'copy.deepcopy', 'copy.deepcopy', (['goodZToMuMuPlots'], {}), '(goodZToMuMuPlots)\n', (5376, 5394), False, 'import copy\n'), ((5428, 5476), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""oneNonIsolatedZToMuMuAtLeast1HLT"""'], {}), "('oneNonIsolatedZToMuMuAtLeast1HLT')\n", (5440, 5476), True, 'import FWCore.ParameterSet.Config as cms\n'), ((5613, 5644), 'copy.deepcopy', 'copy.deepcopy', (['goodZToMuMuPlots'], {}), '(goodZToMuMuPlots)\n', (5626, 5644), False, 'import copy\n'), ((5678, 5726), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""twoNonIsolatedZToMuMuAtLeast1HLT"""'], {}), "('twoNonIsolatedZToMuMuAtLeast1HLT')\n", (5690, 5726), True, 'import FWCore.ParameterSet.Config as cms\n'), ((5863, 5894), 'copy.deepcopy', 'copy.deepcopy', (['goodZToMuMuPlots'], {}), '(goodZToMuMuPlots)\n', (5876, 5894), False, 'import copy\n'), ((5935, 5987), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""goodZToMuMuOneStandAloneMuonFirstHLT"""'], {}), "('goodZToMuMuOneStandAloneMuonFirstHLT')\n", (5947, 5987), True, 'import FWCore.ParameterSet.Config as cms\n'), ((6127, 6158), 'copy.deepcopy', 'copy.deepcopy', (['goodZToMuMuPlots'], {}), '(goodZToMuMuPlots)\n', (6140, 6158), False, 'import copy\n'), ((6190, 6233), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""goodZToMuMuOneTrackFirstHLT"""'], {}), "('goodZToMuMuOneTrackFirstHLT')\n", (6202, 6233), True, 'import FWCore.ParameterSet.Config as cms\n'), ((6364, 6395), 'copy.deepcopy', 'copy.deepcopy', (['goodZToMuMuPlots'], {}), '(goodZToMuMuPlots)\n', (6377, 6395), False, 'import copy\n'), ((6433, 6482), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""goodZToMuMuOneTrackerMuonFirstHLT"""'], {}), "('goodZToMuMuOneTrackerMuonFirstHLT')\n", (6445, 6482), True, 'import FWCore.ParameterSet.Config as cms\n'), ((6620, 6651), 'copy.deepcopy', 'copy.deepcopy', (['goodZToMuMuPlots'], {}), '(goodZToMuMuPlots)\n', (6633, 6651), False, 'import copy\n'), ((6696, 6744), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""goodZToMuMuSameChargeAtLeast1HLT"""'], {}), "('goodZToMuMuSameChargeAtLeast1HLT')\n", (6708, 6744), True, 'import FWCore.ParameterSet.Config as cms\n'), ((6854, 6885), 'copy.deepcopy', 'copy.deepcopy', (['goodZToMuMuPlots'], {}), '(goodZToMuMuPlots)\n', (6867, 6885), False, 'import copy\n'), ((6923, 6964), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""goodZToMuMuSameCharge2HLT"""'], {}), "('goodZToMuMuSameCharge2HLT')\n", (6935, 6964), True, 'import FWCore.ParameterSet.Config as cms\n'), ((7071, 7102), 'copy.deepcopy', 'copy.deepcopy', (['goodZToMuMuPlots'], {}), '(goodZToMuMuPlots)\n', (7084, 7102), False, 'import copy\n'), ((7140, 7181), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""goodZToMuMuSameCharge1HLT"""'], {}), "('goodZToMuMuSameCharge1HLT')\n", (7152, 7181), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1254, 1297), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""goodZToMuMuAtLeast1HLTLoose"""'], {}), "('goodZToMuMuAtLeast1HLTLoose')\n", (1266, 1297), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1387, 1425), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""goodZToMuMuAtLeast1HLT"""'], {}), "('goodZToMuMuAtLeast1HLT')\n", (1399, 1425), True, 'import FWCore.ParameterSet.Config as cms\n'), ((196, 221), 'FWCore.ParameterSet.Config.untracked.double', 'cms.untracked.double', (['(0.0)'], {}), '(0.0)\n', (216, 221), True, 'import FWCore.ParameterSet.Config as cms\n'), ((233, 260), 'FWCore.ParameterSet.Config.untracked.double', 'cms.untracked.double', (['(200.0)'], {}), '(200.0)\n', (253, 260), True, 'import FWCore.ParameterSet.Config as cms\n'), ((274, 298), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(200)'], {}), '(200)\n', (293, 298), True, 'import FWCore.ParameterSet.Config as cms\n'), ((311, 340), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""zMass"""'], {}), "('zMass')\n", (331, 340), True, 'import FWCore.ParameterSet.Config as cms\n'), ((360, 402), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""Z mass [GeV/c^{2}]"""'], {}), "('Z mass [GeV/c^{2}]')\n", (380, 402), True, 'import FWCore.ParameterSet.Config as cms\n'), ((423, 451), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""mass"""'], {}), "('mass')\n", (443, 451), True, 'import FWCore.ParameterSet.Config as cms\n'), ((483, 508), 'FWCore.ParameterSet.Config.untracked.double', 'cms.untracked.double', (['(0.0)'], {}), '(0.0)\n', (503, 508), True, 'import FWCore.ParameterSet.Config as cms\n'), ((520, 547), 'FWCore.ParameterSet.Config.untracked.double', 'cms.untracked.double', (['(200.0)'], {}), '(200.0)\n', (540, 547), True, 'import FWCore.ParameterSet.Config as cms\n'), ((561, 585), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(200)'], {}), '(200)\n', (580, 585), True, 'import FWCore.ParameterSet.Config as cms\n'), ((598, 627), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""mu1Pt"""'], {}), "('mu1Pt')\n", (618, 627), True, 'import FWCore.ParameterSet.Config as cms\n'), ((647, 697), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""Highest muon p_{t} [GeV/c]"""'], {}), "('Highest muon p_{t} [GeV/c]')\n", (667, 697), True, 'import FWCore.ParameterSet.Config as cms\n'), ((718, 776), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""max(daughter(0).pt,daughter(1).pt)"""'], {}), "('max(daughter(0).pt,daughter(1).pt)')\n", (738, 776), True, 'import FWCore.ParameterSet.Config as cms\n'), ((808, 833), 'FWCore.ParameterSet.Config.untracked.double', 'cms.untracked.double', (['(0.0)'], {}), '(0.0)\n', (828, 833), True, 'import FWCore.ParameterSet.Config as cms\n'), ((845, 872), 'FWCore.ParameterSet.Config.untracked.double', 'cms.untracked.double', (['(200.0)'], {}), '(200.0)\n', (865, 872), True, 'import FWCore.ParameterSet.Config as cms\n'), ((886, 910), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(200)'], {}), '(200)\n', (905, 910), True, 'import FWCore.ParameterSet.Config as cms\n'), ((923, 952), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""mu2Pt"""'], {}), "('mu2Pt')\n", (943, 952), True, 'import FWCore.ParameterSet.Config as cms\n'), ((972, 1021), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""Lowest muon p_{t} [GeV/c]"""'], {}), "('Lowest muon p_{t} [GeV/c]')\n", (992, 1021), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1042, 1100), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""min(daughter(0).pt,daughter(1).pt)"""'], {}), "('min(daughter(0).pt,daughter(1).pt)')\n", (1062, 1100), True, 'import FWCore.ParameterSet.Config as cms\n')] |
# Copyright 2019 Graphcore Ltd.
# coding=utf-8
import tensorflow as tf
from utils.optimisers import VcdRMSPropOptimizer
def vcd_lr_schedule(base_lr, current_step, total_steps, n_epoch, iter_timescale=15000, decay_factor=0.9):
"""
Exponential LR decay: lr <- lr * 0.9 applied every 15000 iterations
"""
n_timescales = tf.cast(current_step // iter_timescale, tf.float32)
lr = base_lr * decay_factor ** n_timescales
return lr
optimiser_configs = {
'vcd': [VcdRMSPropOptimizer,
{'decay': 0.9,
'epsilon': 1.,
'base_learning_rate': {'encoder': {'mean': 5e-4,
'std': 2.5e-4},
'decoder': 5e-4},
'learning_rate_func': vcd_lr_schedule}]
}
| [
"tensorflow.cast"
] | [((336, 387), 'tensorflow.cast', 'tf.cast', (['(current_step // iter_timescale)', 'tf.float32'], {}), '(current_step // iter_timescale, tf.float32)\n', (343, 387), True, 'import tensorflow as tf\n')] |
import sys
import os
if sys.platform == 'linux':
sys.path.append('/n/groups/patel/samuel/Aging')
elif sys.platform == 'darwin':
sys.path.append('/Users/samuel/Desktop/Aging')
from aging.model.environment_predictor import EnvironmentPredictor
name = sys.argv[1]
n_iter = int(sys.argv[2])
target_dataset = sys.argv[3]
input_dataset = sys.argv[4]
n_splits = int(sys.argv[5])
hyperparameters = dict()
hyperparameters['name'] = name
hyperparameters['n_splits'] = n_splits
hyperparameters['n_iter'] = n_iter
hyperparameters['target_dataset'] = target_dataset
hyperparameters['input_dataset'] = input_dataset
print(hyperparameters)
gp = EnvironmentPredictor(name, -1, n_splits, n_iter, target_dataset, input_dataset, -1)
print("Loading Dataset")
df = gp.load_dataset().dropna()
print("Dataset Loaded, optimizing hyper")
#df_scaled = gp.normalise_dataset(df)
feature_importance_cols = gp.feature_importance(df)
print("Feature importance over, saving file")
gp.save_features(feature_importance_cols)
print("task complete")
| [
"sys.path.append",
"aging.model.environment_predictor.EnvironmentPredictor"
] | [((648, 735), 'aging.model.environment_predictor.EnvironmentPredictor', 'EnvironmentPredictor', (['name', '(-1)', 'n_splits', 'n_iter', 'target_dataset', 'input_dataset', '(-1)'], {}), '(name, -1, n_splits, n_iter, target_dataset,\n input_dataset, -1)\n', (668, 735), False, 'from aging.model.environment_predictor import EnvironmentPredictor\n'), ((55, 102), 'sys.path.append', 'sys.path.append', (['"""/n/groups/patel/samuel/Aging"""'], {}), "('/n/groups/patel/samuel/Aging')\n", (70, 102), False, 'import sys\n'), ((138, 184), 'sys.path.append', 'sys.path.append', (['"""/Users/samuel/Desktop/Aging"""'], {}), "('/Users/samuel/Desktop/Aging')\n", (153, 184), False, 'import sys\n')] |
# coding=utf-8
from __future__ import absolute_import, division, print_function
import logging
import argparse
import os
import random
import numpy as np
from datetime import timedelta
import torch
import torch.distributed as dist
from tqdm import tqdm
from torch.utils.tensorboard import SummaryWriter
from apex import amp
from apex.parallel import DistributedDataParallel as DDP
from models.modeling import VisionTransformer, CONFIGS
from utils.scheduler import WarmupLinearSchedule, WarmupCosineSchedule
from utils.data_utils import get_loader
from utils.dist_util import get_world_size
logger = logging.getLogger(__name__)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def test(model, args):
_, test_loader = get_loader(args)
eval_losses = AverageMeter()
logger.info("***** Running Test *****")
model.eval()
all_preds, all_label = [], []
epoch_iterator = tqdm(test_loader,
desc="Testing... (loss=X.X)",
bar_format="{l_bar}{r_bar}",
dynamic_ncols=True,
disable=args.local_rank not in [-1, 0])
loss_fct = torch.nn.CrossEntropyLoss()
for step, batch in enumerate(epoch_iterator):
batch = tuple(t.to(args.device) for t in batch)
x, y = batch
with torch.no_grad():
logits = model(x)[0]
eval_loss = loss_fct(logits, y)
eval_losses.update(eval_loss.item())
preds = torch.argmax(logits, dim=-1)
if len(all_preds) == 0:
all_preds.append(preds.detach().cpu().numpy())
all_label.append(y.detach().cpu().numpy())
else:
all_preds[0] = np.append(
all_preds[0], preds.detach().cpu().numpy(), axis=0
)
all_label[0] = np.append(
all_label[0], y.detach().cpu().numpy(), axis=0
)
epoch_iterator.set_description("Testing... (loss=%2.5f)" % eval_losses.val)
all_preds, all_label = all_preds[0], all_label[0]
accuracy = simple_accuracy(all_preds, all_label)
print("\n")
print("Testing Results")
print("Test Loss: %2.5f" % eval_losses.avg)
print("Test Accuracy: %2.5f" % accuracy)
f = open("./test_result/"+args.model+".txt", 'w')
f.write("##Testing Results##\n\n")
f.write("Model name: %s\n" % args.model)
f.write("Test Loss: %2.5f\n" % eval_losses.avg)
f.write("Test Accuracy: %2.5f\n" % accuracy)
f.close()
return
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--local_rank", type=int, default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument("--img_size", default=224, type=int,
help="Resolution size")
parser.add_argument("--dataset", choices=["cifar10", "cifar100"], default="cifar10",
help="Which downstream task.")
parser.add_argument("--train_batch_size", default=512, type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size", default=64, type=int,
help="Total batch size for eval.")
parser.add_argument("--model_type", choices=["ViT-B_16", "ViT-B_32", "ViT-L_16",
"ViT-L_32", "ViT-H_14", "R50-ViT-B_16"],
default="ViT-B_16",
help="Which variant to use.")
parser.add_argument("--model", type=str, default="ViT_1st")
args = parser.parse_args()
device = torch.device("cuda")
args.device = device
config = CONFIGS[args.model_type]
num_classes = 10 if args.dataset == "cifar10" else 100
model = VisionTransformer(config, args.img_size, zero_head=True, num_classes=num_classes)
MODEL_PATH = "./output/" + args.model+".bin"
model.load_state_dict(torch.load(MODEL_PATH))
model.to(args.device)
model.eval()
test(model, args)
if __name__ == "__main__":
main()
| [
"logging.getLogger",
"torch.nn.CrossEntropyLoss",
"argparse.ArgumentParser",
"torch.load",
"tqdm.tqdm",
"models.modeling.VisionTransformer",
"torch.argmax",
"torch.no_grad",
"utils.data_utils.get_loader",
"torch.device"
] | [((605, 632), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (622, 632), False, 'import logging\n'), ((1144, 1160), 'utils.data_utils.get_loader', 'get_loader', (['args'], {}), '(args)\n', (1154, 1160), False, 'from utils.data_utils import get_loader\n'), ((1312, 1452), 'tqdm.tqdm', 'tqdm', (['test_loader'], {'desc': '"""Testing... (loss=X.X)"""', 'bar_format': '"""{l_bar}{r_bar}"""', 'dynamic_ncols': '(True)', 'disable': '(args.local_rank not in [-1, 0])'}), "(test_loader, desc='Testing... (loss=X.X)', bar_format='{l_bar}{r_bar}',\n dynamic_ncols=True, disable=args.local_rank not in [-1, 0])\n", (1316, 1452), False, 'from tqdm import tqdm\n'), ((1568, 1595), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (1593, 1595), False, 'import torch\n'), ((2952, 2977), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2975, 2977), False, 'import argparse\n'), ((4008, 4028), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (4020, 4028), False, 'import torch\n'), ((4163, 4249), 'models.modeling.VisionTransformer', 'VisionTransformer', (['config', 'args.img_size'], {'zero_head': '(True)', 'num_classes': 'num_classes'}), '(config, args.img_size, zero_head=True, num_classes=\n num_classes)\n', (4180, 4249), False, 'from models.modeling import VisionTransformer, CONFIGS\n'), ((4320, 4342), 'torch.load', 'torch.load', (['MODEL_PATH'], {}), '(MODEL_PATH)\n', (4330, 4342), False, 'import torch\n'), ((1736, 1751), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1749, 1751), False, 'import torch\n'), ((1901, 1929), 'torch.argmax', 'torch.argmax', (['logits'], {'dim': '(-1)'}), '(logits, dim=-1)\n', (1913, 1929), False, 'import torch\n')] |
# Copyright (c) 2018 The Pooch Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
#
# This code is part of the Fatiando a Terra project (https://www.fatiando.org)
#
# pylint: disable=redefined-outer-name
"""
Test the hash calculation and checking functions.
"""
import os
from pathlib import Path
from tempfile import NamedTemporaryFile
import pytest
from ..core import Pooch
from ..hashes import (
make_registry,
file_hash,
hash_matches,
)
from .utils import check_tiny_data, mirror_directory
DATA_DIR = str(Path(__file__).parent / "data" / "store")
REGISTRY = (
"tiny-data.txt baee0894dba14b12085eacb204284b97e362f4f3e5a5807693cc90ef415c1b2d\n"
)
REGISTRY_RECURSIVE = (
"subdir/tiny-data.txt baee0894dba14b12085eacb204284b97e362f4f3e5a5807693cc90ef415c1b2d\n"
"tiny-data.txt baee0894dba14b12085eacb204284b97e362f4f3e5a5807693cc90ef415c1b2d\n"
)
TINY_DATA_HASHES_HASHLIB = {
"sha1": "c03148994acd89317915ea2f2d080d6dd127aa09",
"sha256": "baee0894dba14b12085eacb204284b97e362f4f3e5a5807693cc90ef415c1b2d",
"md5": "70e2afd3fd7e336ae478b1e740a5f08e",
}
TINY_DATA_HASHES_XXH = {
"xxh128": "0267d220db258fffb0c567c0ecd1b689",
"xxh3_128": "0267d220db258fffb0c567c0ecd1b689",
"xxh64": "f843815fe57948fa",
"xxh3_64": "811e3f2a12aec53f",
"xxh32": "98d6f1a2",
}
TINY_DATA_HASHES = TINY_DATA_HASHES_HASHLIB.copy()
TINY_DATA_HASHES.update(TINY_DATA_HASHES_XXH)
@pytest.fixture
def data_dir_mirror(tmp_path):
"""
Mirror the test data folder on a temporary directory. Needed to avoid
permission errors when pooch is installed on a non-writable path.
"""
return mirror_directory(DATA_DIR, tmp_path)
def test_make_registry(data_dir_mirror):
"Check that the registry builder creates the right file names and hashes"
outfile = NamedTemporaryFile(delete=False)
# Need to close the file before writing to it.
outfile.close()
try:
make_registry(data_dir_mirror, outfile.name, recursive=False)
with open(outfile.name) as fout:
registry = fout.read()
assert registry == REGISTRY
# Check that the registry can be used.
pup = Pooch(path=data_dir_mirror, base_url="some bogus URL", registry={})
pup.load_registry(outfile.name)
true = str(data_dir_mirror / "tiny-data.txt")
fname = pup.fetch("tiny-data.txt")
assert true == fname
check_tiny_data(fname)
finally:
os.remove(outfile.name)
def test_make_registry_recursive(data_dir_mirror):
"Check that the registry builder works in recursive mode"
outfile = NamedTemporaryFile(delete=False)
# Need to close the file before writing to it.
outfile.close()
try:
make_registry(data_dir_mirror, outfile.name, recursive=True)
with open(outfile.name) as fout:
registry = fout.read()
assert registry == REGISTRY_RECURSIVE
# Check that the registry can be used.
pup = Pooch(path=data_dir_mirror, base_url="some bogus URL", registry={})
pup.load_registry(outfile.name)
assert str(data_dir_mirror / "tiny-data.txt") == pup.fetch("tiny-data.txt")
check_tiny_data(pup.fetch("tiny-data.txt"))
true = str(data_dir_mirror / "subdir" / "tiny-data.txt")
assert true == pup.fetch("subdir/tiny-data.txt")
check_tiny_data(pup.fetch("subdir/tiny-data.txt"))
finally:
os.remove(outfile.name)
def test_file_hash_invalid_algorithm():
"Test an invalid hashing algorithm"
with pytest.raises(ValueError) as exc:
file_hash(fname="something", alg="blah")
assert "'blah'" in str(exc.value)
@pytest.mark.parametrize(
"alg,expected_hash",
list(TINY_DATA_HASHES.items()),
ids=list(TINY_DATA_HASHES.keys()),
)
def test_file_hash(alg, expected_hash):
"Test the hash calculation using hashlib and xxhash"
if alg.startswith("xxh"):
pytest.importorskip("xxhash")
fname = os.path.join(DATA_DIR, "tiny-data.txt")
check_tiny_data(fname)
returned_hash = file_hash(fname, alg)
assert returned_hash == expected_hash
@pytest.mark.parametrize(
"alg,expected_hash",
list(TINY_DATA_HASHES.items()),
ids=list(TINY_DATA_HASHES.keys()),
)
def test_hash_matches(alg, expected_hash):
"Make sure the hash checking function works"
if alg.startswith("xxh"):
pytest.importorskip("xxhash")
fname = os.path.join(DATA_DIR, "tiny-data.txt")
check_tiny_data(fname)
# Check if the check passes
known_hash = f"{alg}:{expected_hash}"
assert hash_matches(fname, known_hash)
# And also if it fails
known_hash = f"{alg}:blablablabla"
assert not hash_matches(fname, known_hash)
@pytest.mark.parametrize(
"alg,expected_hash",
list(TINY_DATA_HASHES_HASHLIB.items()),
ids=list(TINY_DATA_HASHES_HASHLIB.keys()),
)
def test_hash_matches_strict(alg, expected_hash):
"Make sure the hash checking function raises an exception if strict"
fname = os.path.join(DATA_DIR, "tiny-data.txt")
check_tiny_data(fname)
# Check if the check passes
known_hash = f"{alg}:{expected_hash}"
assert hash_matches(fname, known_hash, strict=True)
# And also if it fails
bad_hash = f"{alg}:blablablabla"
with pytest.raises(ValueError) as error:
hash_matches(fname, bad_hash, strict=True, source="Neverland")
assert "Neverland" in str(error.value)
with pytest.raises(ValueError) as error:
hash_matches(fname, bad_hash, strict=True, source=None)
assert fname in str(error.value)
def test_hash_matches_none():
"The hash checking function should always returns True if known_hash=None"
fname = os.path.join(DATA_DIR, "tiny-data.txt")
assert hash_matches(fname, known_hash=None)
# Should work even if the file is invalid
assert hash_matches(fname="", known_hash=None)
# strict should cause an error if this wasn't working
assert hash_matches(fname, known_hash=None, strict=True)
@pytest.mark.parametrize(
"alg,expected_hash",
list(TINY_DATA_HASHES_HASHLIB.items()),
ids=list(TINY_DATA_HASHES_HASHLIB.keys()),
)
def test_hash_matches_uppercase(alg, expected_hash):
"Hash matching should be independent of upper or lower case"
fname = os.path.join(DATA_DIR, "tiny-data.txt")
check_tiny_data(fname)
# Check if the check passes
known_hash = f"{alg}:{expected_hash.upper()}"
assert hash_matches(fname, known_hash, strict=True)
# And also if it fails
with pytest.raises(ValueError) as error:
hash_matches(fname, known_hash[:-5], strict=True, source="Neverland")
assert "Neverland" in str(error.value)
| [
"pathlib.Path",
"os.path.join",
"pytest.importorskip",
"pytest.raises",
"tempfile.NamedTemporaryFile",
"os.remove"
] | [((1861, 1893), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (1879, 1893), False, 'from tempfile import NamedTemporaryFile\n'), ((2656, 2688), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (2674, 2688), False, 'from tempfile import NamedTemporaryFile\n'), ((4010, 4049), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""tiny-data.txt"""'], {}), "(DATA_DIR, 'tiny-data.txt')\n", (4022, 4049), False, 'import os\n'), ((4463, 4502), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""tiny-data.txt"""'], {}), "(DATA_DIR, 'tiny-data.txt')\n", (4475, 4502), False, 'import os\n'), ((5041, 5080), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""tiny-data.txt"""'], {}), "(DATA_DIR, 'tiny-data.txt')\n", (5053, 5080), False, 'import os\n'), ((5730, 5769), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""tiny-data.txt"""'], {}), "(DATA_DIR, 'tiny-data.txt')\n", (5742, 5769), False, 'import os\n'), ((6310, 6349), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""tiny-data.txt"""'], {}), "(DATA_DIR, 'tiny-data.txt')\n", (6322, 6349), False, 'import os\n'), ((2503, 2526), 'os.remove', 'os.remove', (['outfile.name'], {}), '(outfile.name)\n', (2512, 2526), False, 'import os\n'), ((3467, 3490), 'os.remove', 'os.remove', (['outfile.name'], {}), '(outfile.name)\n', (3476, 3490), False, 'import os\n'), ((3582, 3607), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3595, 3607), False, 'import pytest\n'), ((3968, 3997), 'pytest.importorskip', 'pytest.importorskip', (['"""xxhash"""'], {}), "('xxhash')\n", (3987, 3997), False, 'import pytest\n'), ((4421, 4450), 'pytest.importorskip', 'pytest.importorskip', (['"""xxhash"""'], {}), "('xxhash')\n", (4440, 4450), False, 'import pytest\n'), ((5311, 5336), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5324, 5336), False, 'import pytest\n'), ((5470, 5495), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5483, 5495), False, 'import pytest\n'), ((6551, 6576), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6564, 6576), False, 'import pytest\n'), ((584, 598), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (588, 598), False, 'from pathlib import Path\n')] |
# -*- coding:utf-8 -*-
import numpy as np
def sin_sin(x,y):
return 1000*abs(np.sin(x/2000*np.pi) + np.sin(y/2000.0*np.pi))+100
| [
"numpy.sin"
] | [((80, 104), 'numpy.sin', 'np.sin', (['(x / 2000 * np.pi)'], {}), '(x / 2000 * np.pi)\n', (86, 104), True, 'import numpy as np\n'), ((103, 129), 'numpy.sin', 'np.sin', (['(y / 2000.0 * np.pi)'], {}), '(y / 2000.0 * np.pi)\n', (109, 129), True, 'import numpy as np\n')] |
import codecs
from solthiruthi.dictionary import *
from tamil import wordutils
TVU, TVU_size = DictionaryBuilder.create(TamilVU)
ag, ag2 = wordutils.anagrams_in_dictionary(TVU)
with codecs.open("demo.txt", "w", "utf-8") as fp:
itr = 1
for k, c in ag:
v = ag2[k]
fp.write("%03d) %s\n" % (itr, " | ".join(v)))
itr += 1
| [
"codecs.open",
"tamil.wordutils.anagrams_in_dictionary"
] | [((141, 178), 'tamil.wordutils.anagrams_in_dictionary', 'wordutils.anagrams_in_dictionary', (['TVU'], {}), '(TVU)\n', (173, 178), False, 'from tamil import wordutils\n'), ((184, 221), 'codecs.open', 'codecs.open', (['"""demo.txt"""', '"""w"""', '"""utf-8"""'], {}), "('demo.txt', 'w', 'utf-8')\n", (195, 221), False, 'import codecs\n')] |
import numpy as np
import datetime
import smtplib
import logging
import time
import csv
import cv2
print ("======================START======================")
all_count = 0 #Checking finding count
true_count = 0 #Checking detection count
#open result CSV file
file = open('./result/res_Insert_name.csv', 'w')
#https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_frontalface_default.xml
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
one_m_timer_start = time.time()
while 1:
s = time.clock() #Start time
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
all_count = all_count + 1 #Plus finding count
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
true_count = true_count + 1
e = time.clock() #Finish time
msg = str(s) + ',' + str(e) + ',' + str(e-s) + ',' + str(true_count) +'\n'
file.write(msg) #writing about start time, end time, spend time, face detection count
print ("Detection Face Number : ", true_count)
cv2.imshow('img',img)
k = cv2.waitKey(30) & 0xff #If you press "ESC" button on your keyboard program is end
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
file.close()
print ("All count :" , all_count) #show all_count
print ("Detection count :" , true_count) #show detection count
print ("======================END======================")
# :: Last Edit ::
# :: 2018-04-03 ::
# :: Poberlater ::
| [
"cv2.rectangle",
"time.clock",
"cv2.imshow",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"cv2.CascadeClassifier",
"time.time"
] | [((434, 494), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haarcascade_frontalface_default.xml"""'], {}), "('haarcascade_frontalface_default.xml')\n", (455, 494), False, 'import cv2\n'), ((501, 520), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (517, 520), False, 'import cv2\n'), ((542, 553), 'time.time', 'time.time', ([], {}), '()\n', (551, 553), False, 'import time\n'), ((1422, 1445), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1443, 1445), False, 'import cv2\n'), ((571, 583), 'time.clock', 'time.clock', ([], {}), '()\n', (581, 583), False, 'import time\n'), ((634, 671), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (646, 671), False, 'import cv2\n'), ((1260, 1282), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'img'], {}), "('img', img)\n", (1270, 1282), False, 'import cv2\n'), ((816, 874), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + w, y + h)', '(255, 0, 0)', '(2)'], {}), '(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\n', (829, 874), False, 'import cv2\n'), ((987, 999), 'time.clock', 'time.clock', ([], {}), '()\n', (997, 999), False, 'import time\n'), ((1290, 1305), 'cv2.waitKey', 'cv2.waitKey', (['(30)'], {}), '(30)\n', (1301, 1305), False, 'import cv2\n')] |
"""
Scraping Tools
Provides all tools for getting data onto/off the pi and submitting back to google drive.
"""
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
import requests
import time
# for twint
import twint
import nest_asyncio
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : id }, stream = True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
def download_files():
with open('download_ids_and_locations.csv') as file:
ids_and_locations = [line.rstrip('\n').split(',')
for line in file.readlines()]
for i in range(len(ids_and_locations)):
file_id = ids_and_locations[i][0]
destination = ids_and_locations[i][1]
download_file_from_google_drive(file_id, destination)
def upload_files():
gauth = GoogleAuth()
drive = GoogleDrive(gauth)
with open('upload_ids_and_locations.csv') as file:
ids_and_locations = [line.rstrip('\n').split(',')
for line in file.readlines()]
for i in range(len(ids_and_locations)):
gfile = drive.CreateFile({'parents': [{'id': ids_and_locations[i][0]}],
'id': ids_and_locations[i][1]})
filename = ids_and_locations[i][2].split('/')
filename = filename[len(filename)-1]
gfile.SetContentFile(filename)
gfile.Upload()
time.sleep(5)
def scrape_twitter():
nest_asyncio.apply()
file = open('accountList.txt')
text = file.readlines()
file.close()
userids = [userid.strip('\n') for userid in text]
broken_ids = list()
count=0
while count < len(userids) - 1:
if count % 250 == 0: print(count, 'usernames reached.')
try:
c = twint.Config()
c.Username = userids[count]
c.Limit = 100
c.Store_csv = True
c.Output = 'TweetData/' + userids[count] + ".csv"
c.Hide_output = True
twint.run.Search(c)
del c
time.sleep(15)
count+=1
except ValueError:
broken_ids.append(userids[count])
count+=1 | [
"twint.Config",
"requests.Session",
"pydrive.drive.GoogleDrive",
"time.sleep",
"pydrive.auth.GoogleAuth",
"twint.run.Search",
"nest_asyncio.apply"
] | [((388, 406), 'requests.Session', 'requests.Session', ([], {}), '()\n', (404, 406), False, 'import requests\n'), ((1590, 1602), 'pydrive.auth.GoogleAuth', 'GoogleAuth', ([], {}), '()\n', (1600, 1602), False, 'from pydrive.auth import GoogleAuth\n'), ((1625, 1643), 'pydrive.drive.GoogleDrive', 'GoogleDrive', (['gauth'], {}), '(gauth)\n', (1636, 1643), False, 'from pydrive.drive import GoogleDrive\n'), ((2262, 2282), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (2280, 2282), False, 'import nest_asyncio\n'), ((2212, 2225), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (2222, 2225), False, 'import time\n'), ((2606, 2620), 'twint.Config', 'twint.Config', ([], {}), '()\n', (2618, 2620), False, 'import twint\n'), ((2838, 2857), 'twint.run.Search', 'twint.run.Search', (['c'], {}), '(c)\n', (2854, 2857), False, 'import twint\n'), ((2888, 2902), 'time.sleep', 'time.sleep', (['(15)'], {}), '(15)\n', (2898, 2902), False, 'import time\n')] |
from __future__ import division
import numpy as np
from path import Path
from imageio import imread
from skimage.transform import resize as imresize
from kitti_util import pose_from_oxts_packet, generate_depth_map, read_calib_file, transform_from_rot_trans
from datetime import datetime
class KittiRawLoader(object):
def __init__(self,
dataset_dir,
static_frames_file=None,
img_height=128,
img_width=416,
min_disp=0.2,
get_depth=False,
get_pose=False,
depth_size_ratio=1):
dir_path = Path(__file__).realpath().dirname()
test_scene_file = dir_path/'test_scenes.txt'
self.from_speed = static_frames_file is None
if static_frames_file is not None:
self.collect_static_frames(static_frames_file)
with open(test_scene_file, 'r') as f:
test_scenes = f.readlines()
self.test_scenes = [t[:-1] for t in test_scenes]
self.dataset_dir = dataset_dir
self.img_height = img_height
self.img_width = img_width
self.cam_ids = ['02', '03']
self.date_list = ['2011_09_26', '2011_09_28', '2011_09_29', '2011_09_30', '2011_10_03']
self.min_disp = min_disp
self.get_depth = get_depth
self.get_pose = get_pose
self.depth_size_ratio = depth_size_ratio
self.collect_train_folders()
def collect_static_frames(self, static_frames_file):
with open(static_frames_file, 'r') as f:
frames = f.readlines()
self.static_frames = {}
for fr in frames:
if fr == '\n':
continue
date, drive, frame_id = fr.split(' ')
curr_fid = '%.10d' % (np.int(frame_id[:-1]))
if drive not in self.static_frames.keys():
self.static_frames[drive] = []
self.static_frames[drive].append(curr_fid)
def collect_train_folders(self):
self.scenes = []
for date in self.date_list:
drive_set = (self.dataset_dir/date).dirs()
for dr in drive_set:
if dr.name[:-5] not in self.test_scenes:
self.scenes.append(dr)
def collect_scenes(self, drive):
train_scenes = []
for c in self.cam_ids:
oxts = sorted((drive/'oxts'/'data').files('*.txt'))
with open(drive/'oxts'/'timestamps.txt', 'r') as f:
times = [datetime.strptime(time_string[:-4], "%Y-%m-%d %H:%M:%S.%f") for time_string in f.readlines()]
scene_data = {'cid': c,
'dir': drive,
'speed': [],
'time': [t.timestamp() for t in times],
'frame_id': [],
'pose': [],
'rel_path': drive.name + '_' + c}
scale = None
origin = None
imu2velo = read_calib_file(drive.parent/'calib_imu_to_velo.txt')
velo2cam = read_calib_file(drive.parent/'calib_velo_to_cam.txt')
cam2cam = read_calib_file(drive.parent/'calib_cam_to_cam.txt')
velo2cam_mat = transform_from_rot_trans(velo2cam['R'], velo2cam['T'])
imu2velo_mat = transform_from_rot_trans(imu2velo['R'], imu2velo['T'])
cam_2rect_mat = transform_from_rot_trans(cam2cam['R_rect_00'], np.zeros(3))
imu2cam = cam_2rect_mat @ velo2cam_mat @ imu2velo_mat
for n, f in enumerate(oxts):
metadata = np.genfromtxt(f)
speed = metadata[8:11]
scene_data['speed'].append(speed)
scene_data['frame_id'].append('{:010d}'.format(n))
lat = metadata[0]
if scale is None:
scale = np.cos(lat * np.pi / 180.)
pose_matrix = pose_from_oxts_packet(metadata[:6], scale)
if origin is None:
origin = pose_matrix
odo_pose = imu2cam @ np.linalg.inv(origin) @ pose_matrix @ np.linalg.inv(imu2cam)
scene_data['pose'].append(odo_pose[:3])
sample = self.load_image(scene_data, 0)
if sample is None:
return []
scene_data['P_rect'] = self.get_P_rect(scene_data, sample[1], sample[2])
scene_data['intrinsics'] = scene_data['P_rect'][:, :3]
train_scenes.append(scene_data)
return train_scenes
def get_scene_imgs(self, scene_data):
def construct_sample(scene_data, i, frame_id):
sample = {"img": self.load_image(scene_data, i)[0], "id": frame_id}
if self.get_depth:
sample['depth'] = self.get_depth_map(scene_data, i)
if self.get_pose:
sample['pose'] = scene_data['pose'][i]
return sample
if self.from_speed:
cum_displacement = np.zeros(3)
for i, (speed1, speed2, t1, t2) in enumerate(zip(scene_data['speed'][1:],
scene_data['speed'][:-1],
scene_data['time'][1:],
scene_data['time'][:-1])):
print(speed1, speed2, t1, t2)
cum_displacement += 0.5*(speed1 + speed2) / (t2-t1)
disp_mag = np.linalg.norm(cum_displacement)
if disp_mag > self.min_disp:
frame_id = scene_data['frame_id'][i]
yield construct_sample(scene_data, i, frame_id)
cum_displacement *= 0
else: # from static frame file
drive = str(scene_data['dir'].name)
for (i, frame_id) in enumerate(scene_data['frame_id']):
if (drive not in self.static_frames.keys()) or (frame_id not in self.static_frames[drive]):
yield construct_sample(scene_data, i, frame_id)
def get_P_rect(self, scene_data, zoom_x, zoom_y):
calib_file = scene_data['dir'].parent/'calib_cam_to_cam.txt'
filedata = read_calib_file(calib_file)
P_rect = np.reshape(filedata['P_rect_' + scene_data['cid']], (3, 4))
P_rect[0] *= zoom_x
P_rect[1] *= zoom_y
return P_rect
def load_image(self, scene_data, tgt_idx):
img_file = scene_data['dir']/'image_{}'.format(scene_data['cid'])/'data'/scene_data['frame_id'][tgt_idx]+'.png'
if not img_file.isfile():
return None
img = imread(img_file)
zoom_y = self.img_height/img.shape[0]
zoom_x = self.img_width/img.shape[1]
img = imresize(img, (self.img_height, self.img_width))
# workaround for skimage (float [0 .. 1]) and imageio (uint8 [0 .. 255]) interoperability
img = (img * 255).astype(np.uint8)
return img, zoom_x, zoom_y
def get_depth_map(self, scene_data, tgt_idx):
# compute projection matrix velodyne->image plane
R_cam2rect = np.eye(4)
calib_dir = scene_data['dir'].parent
cam2cam = read_calib_file(calib_dir/'calib_cam_to_cam.txt')
velo2cam = read_calib_file(calib_dir/'calib_velo_to_cam.txt')
velo2cam = np.hstack((velo2cam['R'].reshape(3, 3), velo2cam['T'][..., np.newaxis]))
velo2cam = np.vstack((velo2cam, np.array([0, 0, 0, 1.0])))
R_cam2rect[:3, :3] = cam2cam['R_rect_00'].reshape(3, 3)
velo2cam = np.dot(R_cam2rect, velo2cam)
velo_file_name = scene_data['dir']/'velodyne_points'/'data'/'{}.bin'.format(scene_data['frame_id'][tgt_idx])
return generate_depth_map(velo_file_name, scene_data['P_rect'], velo2cam,
self.img_width, self.img_height, self.depth_size_ratio)
| [
"numpy.eye",
"kitti_util.read_calib_file",
"numpy.reshape",
"numpy.genfromtxt",
"datetime.datetime.strptime",
"path.Path",
"numpy.array",
"numpy.dot",
"kitti_util.generate_depth_map",
"kitti_util.transform_from_rot_trans",
"numpy.zeros",
"numpy.cos",
"numpy.linalg.norm",
"imageio.imread",
... | [((6194, 6221), 'kitti_util.read_calib_file', 'read_calib_file', (['calib_file'], {}), '(calib_file)\n', (6209, 6221), False, 'from kitti_util import pose_from_oxts_packet, generate_depth_map, read_calib_file, transform_from_rot_trans\n'), ((6239, 6298), 'numpy.reshape', 'np.reshape', (["filedata['P_rect_' + scene_data['cid']]", '(3, 4)'], {}), "(filedata['P_rect_' + scene_data['cid']], (3, 4))\n", (6249, 6298), True, 'import numpy as np\n'), ((6617, 6633), 'imageio.imread', 'imread', (['img_file'], {}), '(img_file)\n', (6623, 6633), False, 'from imageio import imread\n'), ((6739, 6787), 'skimage.transform.resize', 'imresize', (['img', '(self.img_height, self.img_width)'], {}), '(img, (self.img_height, self.img_width))\n', (6747, 6787), True, 'from skimage.transform import resize as imresize\n'), ((7097, 7106), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (7103, 7106), True, 'import numpy as np\n'), ((7171, 7222), 'kitti_util.read_calib_file', 'read_calib_file', (["(calib_dir / 'calib_cam_to_cam.txt')"], {}), "(calib_dir / 'calib_cam_to_cam.txt')\n", (7186, 7222), False, 'from kitti_util import pose_from_oxts_packet, generate_depth_map, read_calib_file, transform_from_rot_trans\n'), ((7240, 7292), 'kitti_util.read_calib_file', 'read_calib_file', (["(calib_dir / 'calib_velo_to_cam.txt')"], {}), "(calib_dir / 'calib_velo_to_cam.txt')\n", (7255, 7292), False, 'from kitti_util import pose_from_oxts_packet, generate_depth_map, read_calib_file, transform_from_rot_trans\n'), ((7534, 7562), 'numpy.dot', 'np.dot', (['R_cam2rect', 'velo2cam'], {}), '(R_cam2rect, velo2cam)\n', (7540, 7562), True, 'import numpy as np\n'), ((7697, 7824), 'kitti_util.generate_depth_map', 'generate_depth_map', (['velo_file_name', "scene_data['P_rect']", 'velo2cam', 'self.img_width', 'self.img_height', 'self.depth_size_ratio'], {}), "(velo_file_name, scene_data['P_rect'], velo2cam, self.\n img_width, self.img_height, self.depth_size_ratio)\n", (7715, 7824), False, 'from kitti_util import pose_from_oxts_packet, generate_depth_map, read_calib_file, transform_from_rot_trans\n'), ((2993, 3048), 'kitti_util.read_calib_file', 'read_calib_file', (["(drive.parent / 'calib_imu_to_velo.txt')"], {}), "(drive.parent / 'calib_imu_to_velo.txt')\n", (3008, 3048), False, 'from kitti_util import pose_from_oxts_packet, generate_depth_map, read_calib_file, transform_from_rot_trans\n'), ((3070, 3125), 'kitti_util.read_calib_file', 'read_calib_file', (["(drive.parent / 'calib_velo_to_cam.txt')"], {}), "(drive.parent / 'calib_velo_to_cam.txt')\n", (3085, 3125), False, 'from kitti_util import pose_from_oxts_packet, generate_depth_map, read_calib_file, transform_from_rot_trans\n'), ((3146, 3200), 'kitti_util.read_calib_file', 'read_calib_file', (["(drive.parent / 'calib_cam_to_cam.txt')"], {}), "(drive.parent / 'calib_cam_to_cam.txt')\n", (3161, 3200), False, 'from kitti_util import pose_from_oxts_packet, generate_depth_map, read_calib_file, transform_from_rot_trans\n'), ((3227, 3281), 'kitti_util.transform_from_rot_trans', 'transform_from_rot_trans', (["velo2cam['R']", "velo2cam['T']"], {}), "(velo2cam['R'], velo2cam['T'])\n", (3251, 3281), False, 'from kitti_util import pose_from_oxts_packet, generate_depth_map, read_calib_file, transform_from_rot_trans\n'), ((3309, 3363), 'kitti_util.transform_from_rot_trans', 'transform_from_rot_trans', (["imu2velo['R']", "imu2velo['T']"], {}), "(imu2velo['R'], imu2velo['T'])\n", (3333, 3363), False, 'from kitti_util import pose_from_oxts_packet, generate_depth_map, read_calib_file, transform_from_rot_trans\n'), ((4974, 4985), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4982, 4985), True, 'import numpy as np\n'), ((1789, 1810), 'numpy.int', 'np.int', (['frame_id[:-1]'], {}), '(frame_id[:-1])\n', (1795, 1810), True, 'import numpy as np\n'), ((3439, 3450), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3447, 3450), True, 'import numpy as np\n'), ((3588, 3604), 'numpy.genfromtxt', 'np.genfromtxt', (['f'], {}), '(f)\n', (3601, 3604), True, 'import numpy as np\n'), ((3916, 3958), 'kitti_util.pose_from_oxts_packet', 'pose_from_oxts_packet', (['metadata[:6]', 'scale'], {}), '(metadata[:6], scale)\n', (3937, 3958), False, 'from kitti_util import pose_from_oxts_packet, generate_depth_map, read_calib_file, transform_from_rot_trans\n'), ((5473, 5505), 'numpy.linalg.norm', 'np.linalg.norm', (['cum_displacement'], {}), '(cum_displacement)\n', (5487, 5505), True, 'import numpy as np\n'), ((7423, 7447), 'numpy.array', 'np.array', (['[0, 0, 0, 1.0]'], {}), '([0, 0, 0, 1.0])\n', (7431, 7447), True, 'import numpy as np\n'), ((2504, 2563), 'datetime.datetime.strptime', 'datetime.strptime', (['time_string[:-4]', '"""%Y-%m-%d %H:%M:%S.%f"""'], {}), "(time_string[:-4], '%Y-%m-%d %H:%M:%S.%f')\n", (2521, 2563), False, 'from datetime import datetime\n'), ((3858, 3885), 'numpy.cos', 'np.cos', (['(lat * np.pi / 180.0)'], {}), '(lat * np.pi / 180.0)\n', (3864, 3885), True, 'import numpy as np\n'), ((4111, 4133), 'numpy.linalg.inv', 'np.linalg.inv', (['imu2cam'], {}), '(imu2cam)\n', (4124, 4133), True, 'import numpy as np\n'), ((634, 648), 'path.Path', 'Path', (['__file__'], {}), '(__file__)\n', (638, 648), False, 'from path import Path\n'), ((4073, 4094), 'numpy.linalg.inv', 'np.linalg.inv', (['origin'], {}), '(origin)\n', (4086, 4094), True, 'import numpy as np\n')] |
import pickle
import numpy as np
import feature_extraction as fe
""" source : https://www.census.gov/quickfacts/fact/table/alleghenycountypennsylvania/PST045216 """
CURR_YEAR = 2015
# gender
FEMALE_PERCENT = 0.517 # 4327
# MALE = 0.483 # 3134
# age
# BELOW_18 = 0.189 # 0
OVER_65_PERCENT = 0.18 # 4353
# OTHER = 0.631 # 3108
OTHER = 0.82
# race
WHITE = 0.805 # 3184
BLACK = 0.134 # 2294
ASIAN = 0.037 # 1244
# OTHER = 0.024 # 739
def draw_general_sample(num_samples, modified_patient_data, feature='gender', percent=[FEMALE_PERCENT]):
# check if num_samples is reasonable
if num_samples > len(modified_patient_data):
print('data points collected fewer than required!')
return None
# check if the feature categories and given number of percentages is correct
if not ((feature.lower() == 'gender' and len(percent) == 1) \
or (feature.lower() == 'age' and len(percent) == 1) \
or (feature.lower() == 'race' and len(percent) == 3)):
print('unmatched percentage!')
return None
# add age
_add_age(modified_patient_data)
# draw samples
if feature.lower() == 'gender':
FEMALE_PERCENT = percent[0]
# group patient data
female_need = int(num_samples * FEMALE_PERCENT)
male_need = int(num_samples * (1 - FEMALE_PERCENT))
female_group, male_group = _split_gender(modified_patient_data)
# get id
fp_id = np.random.choice(list(female_group.keys()), female_need)
mp_id = np.random.choice(list(male_group.keys()), female_need)
# get sample
sample_chosen = {k : v for k, v in modified_patient_data.iteritems() if k in fp_id or k in mp_id}
elif feature.lower() == 'age':
OVER_65_PERCENT = percent[0]
# group patient data
elder_need = int(num_samples * OVER_65_PERCENT)
adult_need = int(num_samples * (1 - OVER_65_PERCENT))
adult, elder = _split_age(modified_patient_data)
# get id
ap_id = np.random.choice(list(adult.keys()), elder_need)
ep_id = np.random.choice(list(elder.keys()), adult_need)
# get sample
sample_chosen = {k : v for k, v in modified_patient_data.iteritems() if k in ap_id or k in ep_id}
elif feature.lower() == 'race':
WHITE = percent[0]
BLACK = percent[1]
ASIAN = percent[2]
OTHER = 1 - WHITE - BLACK - ASIAN
# group patient data
white_need = int(num_samples * WHITE)
black_need = int(num_samples * BLACK)
asian_need = int(num_samples * ASIAN)
other_need = int(num_samples * OTHER)
white, black, asian, other = _split_race(modified_patient_data)
# get id
w_id = np.random.choice(list(white.keys()), white_need)
b_id = np.random.choice(list(black.keys()), black_need)
a_id = np.random.choice(list(asian.keys()), asian_need)
o_id = np.random.choice(list(other.keys()), other_need)
# get sample
sample_chosen = {k : v for k, v in modified_patient_data.iteritems() if k in w_id or k in b_id or k in a_id or k in o_id}
return sample_chosen
def _add_age(modified_patient_data):
for pid in modified_patient_data:
data = modified_patient_data[pid]
birth_year = int(data['dob'].split('-')[0])
data['age'] = int(CURR_YEAR - birth_year)
def _split_gender(modified_patient_data):
female_group = {}
male_group = {}
for pid in modified_patient_data:
data = modified_patient_data[pid]
if data['gender'].lower() == 'female':
female_group[pid] = data
elif data['gender'].lower() == 'male':
male_group[pid] = data
elif np.random.randint(2): # Unknown case
female_group[pid] = data
else:
male_group[pid] = data
return female_group, male_group
def _split_age(single_group):
adult = {}
elder = {}
for pid in single_group:
data = single_group[pid]
if data['age'] > 65:
elder[pid] = data
else:
adult[pid] = data
return adult, elder
def _split_race(single_group):
white = {}
black = {}
asian = {}
other = {}
for pid in single_group:
data = single_group[pid]
if data['race'].lower() == 'white':
white[pid] = data
elif data['race'].lower() == 'black':
black[pid] = data
elif data['race'].lower() == 'asian':
asian[pid] = data
else:
other[pid] = data
return white, black, asian, other
if __name__ == "__main__":
draw_general_sample(2000)
| [
"numpy.random.randint"
] | [((3692, 3712), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (3709, 3712), True, 'import numpy as np\n')] |
# !/usr/bin/env python
# -- coding: utf-8 --
# @Author zengxiaohui
# Datatime:8/20/2021 8:39 AM
# @File:knn
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn import datasets
X, y = datasets.load_iris(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0)
kNN_classifier = KNeighborsClassifier(n_neighbors=6)
kNN_classifier.fit(X_train,y_train)
kNN_classifier.predict(X_test)
best_score = 0.0
best_k = -1
for k in range(1,11):
knn_clf = KNeighborsClassifier(n_neighbors=k)
knn_clf.fit(X_train,y_train)
score = knn_clf.score(X_test,y_test)
if score > best_score:
best_k = k
best_score=score
print("best_k=",best_k)
print("best_score=",best_score)
| [
"sklearn.datasets.load_iris",
"sklearn.model_selection.train_test_split",
"sklearn.neighbors.KNeighborsClassifier"
] | [((248, 283), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (266, 283), False, 'from sklearn import datasets\n'), ((319, 372), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.4)', 'random_state': '(0)'}), '(X, y, test_size=0.4, random_state=0)\n', (335, 372), False, 'from sklearn.model_selection import train_test_split\n'), ((391, 426), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(6)'}), '(n_neighbors=6)\n', (411, 426), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((559, 594), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': 'k'}), '(n_neighbors=k)\n', (579, 594), False, 'from sklearn.neighbors import KNeighborsClassifier\n')] |
#!/usr/bin/python3
from lab01_authserver_app.oauthclient import *
import json
import requests
import time
import requests
import logging
import http.client
http.client.HTTPConnection.debuglevel = 1
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
pp = PasswordPlugin('admin', 'admin')
client = OAuthClient('http://127.0.0.1:39000', pp, 'debug_client', 'mysecret', 'localhost')
print('\nVerification using password plugin\n')
print('\n\n' + repr(client.verify()) + '\n')
print('\nissuing tokens...\n')
tokens = client.issue_tokens()
print('\n\ntokens:\n')
print('\n' + repr(tokens) + '\n')
tp = TokenPlugin(atoken = tokens['access_token'], rtoken = tokens['refresh_token'])
client.auth_plugin = tp
print('\nVerification using token plugin...\n')
print('\n\n' + repr(client.verify()) + '\n')
#time.sleep(1)
#print('\nVerification using token plugin again...\n')
#print('\n\n' + repr(client.verify()) + '\n')
print('\nrefreshing tokens...\n')
tokens = client.issue_tokens()
print('\n\ntokens:\n')
print('\n' + repr(tokens) + '\n')
print('\nme information...\n')
me = client.me()
print('\n\nme:\n')
print('\n' + repr(me) + '\n')
| [
"logging.basicConfig",
"logging.getLogger"
] | [((203, 224), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (222, 224), False, 'import logging\n'), ((284, 330), 'logging.getLogger', 'logging.getLogger', (['"""requests.packages.urllib3"""'], {}), "('requests.packages.urllib3')\n", (301, 330), False, 'import logging\n'), ((225, 244), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (242, 244), False, 'import logging\n')] |
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
import test_subnets as SNET
LOG = logging.getLogger(__name__)
class FlatNetworksTestJSON(SNET.SubnetTestJSON):
_interface = 'json'
_provider_network_body = {
'name': data_utils.rand_name('FLAT-network'),
'provider:network_type': 'flat'}
@classmethod
def resource_setup(cls):
super(FlatNetworksTestJSON, cls).resource_setup()
def _create_network(self, _auto_clean_up=True, network_name=None,
**kwargs):
network_name = network_name or data_utils.rand_name('flat-netwk')
# self.create_network expect network_name
# self.admin_client.create_network()
# and self.client.create_network() expect name
post_body = {'name': network_name,
'provider:network_type': 'flat'}
post_body.update(kwargs)
LOG.debug("create FLAT network: %s", str(post_body))
body = self.admin_networks_client.create_network(**post_body)
network = body['network']
if _auto_clean_up:
self.addCleanup(self._try_delete_network, network['id'])
return network
@decorators.idempotent_id('dc2f2f46-0577-4e2a-b35d-3c8c8bbce5bf')
def test_create_network(self):
# Create a network as an admin user specifying the
# flat network type attribute
network = self._create_network()
# Verifies router:network_type parameter
self.assertIsNotNone(network['id'])
self.assertEqual(network.get('provider:network_type'), 'flat')
@decorators.idempotent_id('777fc335-b26c-42ea-9759-c71dff2ce1c6')
def test_update_network(self):
# Update flat network as an admin user specifying the
# flat network attribute
network = self._create_network(shared=True, _auto_clean_up=False)
self.assertEqual(network.get('shared'), True)
new_name = network['name'] + "-updated"
update_body = {'shared': False, 'name': new_name}
body = self.update_network(network['id'], **update_body)
updated_network = body['network']
# Verify that name and shared parameters were updated
self.assertEqual(updated_network['shared'], False)
self.assertEqual(updated_network['name'], new_name)
# get flat network attributes and verify them
body = self.show_network(network['id'])
updated_network = body['network']
# Verify that name and shared parameters were updated
self.assertEqual(updated_network['shared'], False)
self.assertEqual(updated_network['name'], new_name)
self.assertEqual(updated_network['status'], network['status'])
self.assertEqual(updated_network['subnets'], network['subnets'])
self._delete_network(network['id'])
@decorators.idempotent_id('1dfc1c11-e838-464c-85b2-ed5e4c477c64')
def test_list_networks(self):
# Create flat network
network = self._create_network(shared=True)
# List networks as a normal user and confirm it is available
body = self.list_networks(client=self.networks_client)
network_list = [net['id'] for net in body['networks']]
self.assertIn(network['id'], network_list)
update_body = {'shared': False}
body = self.update_network(network['id'], **update_body)
# List networks as a normal user and confirm it is not available
body = self.list_networks(client=self.networks_client)
network_list = [net['id'] for net in body['networks']]
self.assertNotIn(network['id'], network_list)
@decorators.idempotent_id('b5649fe2-a214-4105-8053-1825a877c45b')
def test_show_network_attributes(self):
# Create flat network
network = self._create_network(shared=True)
# Show a flat network as a normal user and confirm the
# flat network attribute is returned.
body = self.show_network(network['id'], client=self.networks_client)
show_net = body['network']
self.assertEqual(network['name'], show_net['name'])
self.assertEqual(network['id'], show_net['id'])
# provider attributes are for admin only
body = self.show_network(network['id'])
show_net = body['network']
net_attr_list = show_net.keys()
for attr in ('admin_state_up', 'port_security_enabled', 'shared',
'status', 'subnets', 'tenant_id', 'router:external',
'provider:network_type', 'provider:physical_network',
'provider:segmentation_id'):
self.assertIn(attr, net_attr_list)
| [
"tempest.lib.common.utils.data_utils.rand_name",
"tempest.lib.decorators.idempotent_id",
"oslo_log.log.getLogger"
] | [((791, 818), 'oslo_log.log.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (808, 818), True, 'from oslo_log import log as logging\n'), ((1877, 1941), 'tempest.lib.decorators.idempotent_id', 'decorators.idempotent_id', (['"""dc2f2f46-0577-4e2a-b35d-3c8c8bbce5bf"""'], {}), "('dc2f2f46-0577-4e2a-b35d-3c8c8bbce5bf')\n", (1901, 1941), False, 'from tempest.lib import decorators\n'), ((2285, 2349), 'tempest.lib.decorators.idempotent_id', 'decorators.idempotent_id', (['"""777fc335-b26c-42ea-9759-c71dff2ce1c6"""'], {}), "('777fc335-b26c-42ea-9759-c71dff2ce1c6')\n", (2309, 2349), False, 'from tempest.lib import decorators\n'), ((3521, 3585), 'tempest.lib.decorators.idempotent_id', 'decorators.idempotent_id', (['"""1dfc1c11-e838-464c-85b2-ed5e4c477c64"""'], {}), "('1dfc1c11-e838-464c-85b2-ed5e4c477c64')\n", (3545, 3585), False, 'from tempest.lib import decorators\n'), ((4312, 4376), 'tempest.lib.decorators.idempotent_id', 'decorators.idempotent_id', (['"""b5649fe2-a214-4105-8053-1825a877c45b"""'], {}), "('b5649fe2-a214-4105-8053-1825a877c45b')\n", (4336, 4376), False, 'from tempest.lib import decorators\n'), ((941, 977), 'tempest.lib.common.utils.data_utils.rand_name', 'data_utils.rand_name', (['"""FLAT-network"""'], {}), "('FLAT-network')\n", (961, 977), False, 'from tempest.lib.common.utils import data_utils\n'), ((1270, 1304), 'tempest.lib.common.utils.data_utils.rand_name', 'data_utils.rand_name', (['"""flat-netwk"""'], {}), "('flat-netwk')\n", (1290, 1304), False, 'from tempest.lib.common.utils import data_utils\n')] |
#!/usr/bin/env python3
"""
Check that PEP8 format is followed
Author: <NAME>
Email: <EMAIL>
:copyright: 2020 by Optionset authors, see AUTHORS for more details.
:license: GPLv3, see LICENSE for more details.
"""
import subprocess
def check_format(py_file_path):
"""Check format of Python file. """
print("="*60)
run_str = f"pycodestyle -v {py_file_path}"
subproc = subprocess.run(run_str, shell=True, capture_output=True,
check=False)
print(subproc.stdout.decode('UTF-8'), end='')
print("="*60)
check_format("../engutils/engutils.py")
check_format("runtests.py")
| [
"subprocess.run"
] | [((385, 454), 'subprocess.run', 'subprocess.run', (['run_str'], {'shell': '(True)', 'capture_output': '(True)', 'check': '(False)'}), '(run_str, shell=True, capture_output=True, check=False)\n', (399, 454), False, 'import subprocess\n')] |
import sys
sys.path.append('../vmdgadgets')
import vmdutil
import functools
def alt_dot_v(v1, v2):
return functools.reduce(
lambda i, j: i + j, [x * y for x, y in zip(v1, v2)])
vmdutil.vmdutil.dot_v = alt_dot_v
vmdutil.dot_v = alt_dot_v
class strexp():
def __init__(self, val):
self.val = val
def __mul__(self, other):
if other.val == '0' or self.val == '0':
return strexp('0')
elif other.val == '1':
return self
elif self.val == '1':
return other
else:
return strexp('(' + self.val + ')*(' + other.val + ')')
def __add__(self, other):
if other.val == '0':
return self
elif self.val == '0':
return other
else:
return strexp(self.val + '+' + other.val)
def __sub__(self, other):
if other.val == '0':
return self
elif self.val == '0':
return strexp('-' + other.val)
else:
return strexp(self.val + '-' + other.val)
def __neg__(self):
return strexp('-' + self.val)
def __repr__(self):
return self.val
if __name__ == '__main__':
zero = strexp('0')
one = strexp('1')
p = [strexp('sp'), zero, zero, strexp('cp')] # q(1, 0, 0, wx)
y = [zero, strexp('sy'), zero, strexp('cy')]
r = [zero, zero, strexp('sr'), strexp('cr')]
o = vmdutil.multiply_quaternion(
vmdutil.multiply_quaternion(r, p), y)
print('euler2quaternion of z-x-y(global)')
print(o)
print()
# rotx = [[1, 0, 0], [0, cx, -sx], [0, sx, cx]]
# roty = [[cy, 0, sy], [0, 1, 0], [-sy, 0, cy]]
# rotz = [[cz, -sz, 0], [sz, cz, 0], [0, 0, 1]]
cx = strexp('cx')
cy = strexp('cy')
cz = strexp('cz')
sx = strexp('sx')
sy = strexp('sy')
sz = strexp('sz')
print('euler2matrix of z-x-y(global)')
o = vmdutil.dot_m(
vmdutil.dot_m(
[[cz, -sz, zero], [sz, cz, zero], [zero, zero, one]],
[[one, zero, zero], [zero, cx, -sx], [zero, sx, cx]]),
[[cy, zero, sy], [zero, one, zero], [-sy, zero, cy]])
for r in o:
print(r)
| [
"vmdutil.multiply_quaternion",
"sys.path.append",
"vmdutil.dot_m"
] | [((11, 43), 'sys.path.append', 'sys.path.append', (['"""../vmdgadgets"""'], {}), "('../vmdgadgets')\n", (26, 43), False, 'import sys\n'), ((1449, 1482), 'vmdutil.multiply_quaternion', 'vmdutil.multiply_quaternion', (['r', 'p'], {}), '(r, p)\n', (1476, 1482), False, 'import vmdutil\n'), ((1927, 2052), 'vmdutil.dot_m', 'vmdutil.dot_m', (['[[cz, -sz, zero], [sz, cz, zero], [zero, zero, one]]', '[[one, zero, zero], [zero, cx, -sx], [zero, sx, cx]]'], {}), '([[cz, -sz, zero], [sz, cz, zero], [zero, zero, one]], [[one,\n zero, zero], [zero, cx, -sx], [zero, sx, cx]])\n', (1940, 2052), False, 'import vmdutil\n')] |
from pathlib import Path
from typing import List, Optional, Tuple
from app.api.meta.service import task_type_from_id
from fedot.core.data.data import DataTypesEnum, InputData
from fedot.core.repository.tasks import Task, TaskParams, TsForecastingParams
from flask import current_app
from utils import project_root
default_datasets = {
'scoring': {
'train': 'scoring/scoring_train.csv',
'test': 'scoring/scoring_test.csv',
'data_type': DataTypesEnum.table
},
'metocean': {
'train': 'metocean/metocean_train.csv',
'test': 'metocean/metocean_test.csv',
'data_type': DataTypesEnum.ts
},
'oil': {
'train': 'oil/oil_train.csv',
'test': 'oil/oil_test.csv',
'data_type': DataTypesEnum.table
}
}
data_types = {
'ts': DataTypesEnum.ts,
'table': DataTypesEnum.table,
'image': DataTypesEnum.image,
'text': DataTypesEnum.text,
}
def get_datasets_names() -> List[str]:
return list(default_datasets)
def get_dataset_metadata(dataset_name: str, sample_type: str) -> Tuple[int, int]:
data = get_input_data(dataset_name, sample_type)
if data is None:
raise ValueError(f'Data for dataset_name={dataset_name} with sample_type={sample_type} must exists')
if len(data.features.shape) > 1:
n_features, n_rows = data.features.shape[1], data.features.shape[0]
else:
n_features, n_rows = 1, len(data.features)
return n_features, n_rows
def get_input_data(dataset_name: str, sample_type: str,
task_type: Optional[str] = None,
task_params: Optional[TaskParams] = None) -> Optional[InputData]:
try:
dataset = default_datasets[dataset_name]
data_path = dataset[sample_type]
if task_params is None and task_type == 'ts_forecasting':
# forecast_length should be defined
task_params = TsForecastingParams(forecast_length=30)
task = Task(task_type_from_id(task_type), task_params) if task_type is not None else None
file_path = Path(project_root(), 'data', data_path)
if dataset['data_type'] == DataTypesEnum.ts:
data = InputData.from_csv_time_series(file_path=file_path, task=task, target_column='target')
else:
data = InputData.from_csv(file_path=file_path, task=task, data_type=dataset['data_type'])
return data
except KeyError as ex:
print(f'Dataset {dataset_name} has no data for {sample_type}: {ex}')
return None
| [
"fedot.core.data.data.InputData.from_csv",
"app.api.meta.service.task_type_from_id",
"utils.project_root",
"fedot.core.repository.tasks.TsForecastingParams",
"fedot.core.data.data.InputData.from_csv_time_series"
] | [((1912, 1951), 'fedot.core.repository.tasks.TsForecastingParams', 'TsForecastingParams', ([], {'forecast_length': '(30)'}), '(forecast_length=30)\n', (1931, 1951), False, 'from fedot.core.repository.tasks import Task, TaskParams, TsForecastingParams\n'), ((2077, 2091), 'utils.project_root', 'project_root', ([], {}), '()\n', (2089, 2091), False, 'from utils import project_root\n'), ((2185, 2275), 'fedot.core.data.data.InputData.from_csv_time_series', 'InputData.from_csv_time_series', ([], {'file_path': 'file_path', 'task': 'task', 'target_column': '"""target"""'}), "(file_path=file_path, task=task,\n target_column='target')\n", (2215, 2275), False, 'from fedot.core.data.data import DataTypesEnum, InputData\n'), ((2305, 2392), 'fedot.core.data.data.InputData.from_csv', 'InputData.from_csv', ([], {'file_path': 'file_path', 'task': 'task', 'data_type': "dataset['data_type']"}), "(file_path=file_path, task=task, data_type=dataset[\n 'data_type'])\n", (2323, 2392), False, 'from fedot.core.data.data import DataTypesEnum, InputData\n'), ((1973, 2001), 'app.api.meta.service.task_type_from_id', 'task_type_from_id', (['task_type'], {}), '(task_type)\n', (1990, 2001), False, 'from app.api.meta.service import task_type_from_id\n')] |
import re
from data.scrape.link_extractors.create_extractor import create_extractor
from data.scrape.utils import clean_url
from .constants import ID
class Strategy:
def __init__(self, url_pattern, template=None, **extractor_args):
self.url_pattern = url_pattern.format(ID=ID)
self.url_regex = re.compile(
self.url_pattern.replace(".", r"\."), flags=re.IGNORECASE
)
self.extractor_args = extractor_args
self.guideline_url_template = template
def match_url(self, url):
url = clean_url(url)
return self.url_regex.search(url)
def matches_url(self, url):
return bool(self.match_url(url))
def create_link_extractor(self, url):
return create_extractor(url, allow_domains=[], **self.extractor_args)
def generate_guideline_urls(self, url, row):
if self.guideline_url_template is None:
return []
match = self.match_url(url)
urls = [self.guideline_url_template.format(**match.groupdict(), **row)]
urls = [url for url in urls if url]
return urls
def __repr__(self):
return f"<Strategy: {self.url_pattern}>"
| [
"data.scrape.link_extractors.create_extractor.create_extractor",
"data.scrape.utils.clean_url"
] | [((546, 560), 'data.scrape.utils.clean_url', 'clean_url', (['url'], {}), '(url)\n', (555, 560), False, 'from data.scrape.utils import clean_url\n'), ((735, 797), 'data.scrape.link_extractors.create_extractor.create_extractor', 'create_extractor', (['url'], {'allow_domains': '[]'}), '(url, allow_domains=[], **self.extractor_args)\n', (751, 797), False, 'from data.scrape.link_extractors.create_extractor import create_extractor\n')] |
from django.db import models
class Publisher(models.Model):
name = models.CharField(max_length=100)
class Author(models.Model):
name = models.CharField(max_length=100)
class Book(models.Model):
name = models.CharField(max_length=100)
authors = models.ManyToManyField(Author, related_name='books')
publisher = models.ForeignKey(Publisher, related_name='books')
__test__ = {'one':"""
#
# RelatedManager
#
# First create a Publisher.
>>> p = Publisher.objects.create(name='Acme Publishing')
# Create a book through the publisher.
>>> book, created = p.books.get_or_create(name='The Book of Ed & Fred')
>>> created
True
# The publisher should have one book.
>>> p.books.count()
1
# Try get_or_create again, this time nothing should be created.
>>> book, created = p.books.get_or_create(name='The Book of Ed & Fred')
>>> created
False
# And the publisher should still have one book.
>>> p.books.count()
1
#
# ManyRelatedManager
#
# Add an author to the book.
>>> ed, created = book.authors.get_or_create(name='Ed')
>>> created
True
# Book should have one author.
>>> book.authors.count()
1
# Try get_or_create again, this time nothing should be created.
>>> ed, created = book.authors.get_or_create(name='Ed')
>>> created
False
# And the book should still have one author.
>>> book.authors.count()
1
# Add a second author to the book.
>>> fred, created = book.authors.get_or_create(name='Fred')
>>> created
True
# The book should have two authors now.
>>> book.authors.count()
2
# Create an Author not tied to any books.
>>> Author.objects.create(name='Ted')
<Author: Author object>
# There should be three Authors in total. The book object should have two.
>>> Author.objects.count()
3
>>> book.authors.count()
2
# Try creating a book through an author.
>>> ed.books.get_or_create(name="<NAME>", publisher=p)
(<Book: Book object>, True)
# Now Ed has two Books, Fred just one.
>>> ed.books.count()
2
>>> fred.books.count()
1
"""}
| [
"django.db.models.ManyToManyField",
"django.db.models.CharField",
"django.db.models.ForeignKey"
] | [((72, 104), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (88, 104), False, 'from django.db import models\n'), ((145, 177), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (161, 177), False, 'from django.db import models\n'), ((216, 248), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (232, 248), False, 'from django.db import models\n'), ((263, 315), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Author'], {'related_name': '"""books"""'}), "(Author, related_name='books')\n", (285, 315), False, 'from django.db import models\n'), ((332, 382), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Publisher'], {'related_name': '"""books"""'}), "(Publisher, related_name='books')\n", (349, 382), False, 'from django.db import models\n')] |
import math
# 10進数 -> base進数に変換
def base10to(x: int, base: int) -> str:
s = ''
while x > 0:
s = str(x % base) + s
x = x // base
return s
# 最小公倍数
def lcm(a: int, b: int) -> int:
return int(a * b / math.gcd(a, b))
# n以下の素数の一覧
def make_primes(n: int) -> list:
is_prime = [False, False] + ([True] * (n + 1))
for i in range(2, int(n**0.5) + 1):
if not is_prime[i]: continue
for j in range(i * 2, n + 1, i):
is_prime[j] = False
return [i for i in range(n + 1) if is_prime[i]]
| [
"math.gcd"
] | [((232, 246), 'math.gcd', 'math.gcd', (['a', 'b'], {}), '(a, b)\n', (240, 246), False, 'import math\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-06 20:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('entertainment_tonight', '0002_auto_20170321_1517'),
]
operations = [
migrations.AddField(
model_name='event',
name='upload_photo',
field=models.CharField(default=1, max_length=200),
preserve_default=False,
),
]
| [
"django.db.models.CharField"
] | [((418, 461), 'django.db.models.CharField', 'models.CharField', ([], {'default': '(1)', 'max_length': '(200)'}), '(default=1, max_length=200)\n', (434, 461), False, 'from django.db import migrations, models\n')] |
from datetime import datetime, date, timezone, timedelta
from swpt_accounts.models import Account
D_ID = -1
C_ID = 1
def test_sibnalbus_burst_count(app):
from swpt_accounts import models as m
assert isinstance(m.RejectedTransferSignal.signalbus_burst_count, int)
assert isinstance(m.PreparedTransferSignal.signalbus_burst_count, int)
assert isinstance(m.FinalizedTransferSignal.signalbus_burst_count, int)
assert isinstance(m.AccountTransferSignal.signalbus_burst_count, int)
assert isinstance(m.AccountUpdateSignal.signalbus_burst_count, int)
assert isinstance(m.AccountPurgeSignal.signalbus_burst_count, int)
assert isinstance(m.RejectedConfigSignal.signalbus_burst_count, int)
assert isinstance(m.PendingBalanceChangeSignal.signalbus_burst_count, int)
def test_configure_account():
one_year = timedelta(days=365.25)
current_ts = datetime.now(tz=timezone.utc)
committed_at = current_ts - 2 * one_year
account = Account(
debtor_id=D_ID,
creditor_id=C_ID,
creation_date=date(1970, 1, 1),
principal=1000,
total_locked_amount=0,
pending_transfers_count=0,
last_transfer_id=0,
status_flags=0,
last_change_ts=current_ts,
previous_interest_rate=0.0,
last_interest_rate_change_ts=current_ts - one_year,
interest_rate=10.0,
)
i = account.calc_due_interest(1000, committed_at, current_ts)
assert abs(i - 100) < 1e-12
i = account.calc_due_interest(-1000, committed_at, current_ts)
assert abs(i + 100) < 1e-12
assert account.calc_due_interest(1000, committed_at, committed_at) == 0
assert account.calc_due_interest(1000, current_ts, current_ts) == 0
assert account.calc_due_interest(1000, current_ts, committed_at) == 0
i = account.calc_due_interest(1000, current_ts - timedelta(days=1), current_ts)
assert abs(i - 0.26098) < 1e-3
i = account.calc_due_interest(1000, committed_at, committed_at + timedelta(days=1))
assert abs(i) == 0
| [
"datetime.datetime.now",
"datetime.timedelta",
"datetime.date"
] | [((841, 863), 'datetime.timedelta', 'timedelta', ([], {'days': '(365.25)'}), '(days=365.25)\n', (850, 863), False, 'from datetime import datetime, date, timezone, timedelta\n'), ((881, 910), 'datetime.datetime.now', 'datetime.now', ([], {'tz': 'timezone.utc'}), '(tz=timezone.utc)\n', (893, 910), False, 'from datetime import datetime, date, timezone, timedelta\n'), ((1051, 1067), 'datetime.date', 'date', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (1055, 1067), False, 'from datetime import datetime, date, timezone, timedelta\n'), ((1851, 1868), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1860, 1868), False, 'from datetime import datetime, date, timezone, timedelta\n'), ((1987, 2004), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1996, 2004), False, 'from datetime import datetime, date, timezone, timedelta\n')] |
# -*- coding: UTF-8 -*-
import socket
import pyaudio
import numpy as np
import time
import logging
address = ('127.0.0.1', 8301)
RATE = 8000
RECORD_SECONDS = 10 #录制时长,单位秒
FORMAT = pyaudio.paInt16
CHANNELS = 1
CHUNK=256
DEBUG=1
def start_client ():
#socket init
tcpClient = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcpClient.connect(address)
logging.info(" connect to %s:%s OK" % ( address[0],address[1]))
#pyaudio init
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK) #创建录音文件
logging.info("Please speak.")
#控制录音时长,开始发送
cnt=0
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
samples = stream.read(CHUNK)
#buff=np.float32(np.frombuffer(samples, dtype=np.int16)) #16为bytes转int
tcpClient.send(samples)
msg=tcpClient.recv(1024).decode("utf-8")
if msg != " ":
logging.debug("result: %s " % msg)
cnt=cnt+1
logging.debug ("audio length: %d, recv count : %d " % (len(samples),cnt))
#end for
#发送结束符号,长度为1值为0的数组,暂不支持其它
eos=np.zeros(1)
tcpClient.send(bytes(eos))
msg=tcpClient.recv(1024).decode("utf-8")
logging.info("final result: %s " % msg )
#close socket and recording
stream.stop_stream()
stream.close()
p.terminate()
tcpClient.close()
if __name__ == '__main__':
logfile="log.asr_server"
if DEBUG:
logging.basicConfig( filename = "", level=logging.DEBUG)
else:
logging.basicConfig( filename = "", level=logging.INFO)
time_start = time.time()
start_client()
logging.info ( "** total time : %8.2fs" % ( time.time() - time_start )) | [
"logging.basicConfig",
"logging.debug",
"socket.socket",
"numpy.zeros",
"time.time",
"pyaudio.PyAudio",
"logging.info"
] | [((286, 335), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (299, 335), False, 'import socket\n'), ((373, 436), 'logging.info', 'logging.info', (["(' connect to %s:%s OK' % (address[0], address[1]))"], {}), "(' connect to %s:%s OK' % (address[0], address[1]))\n", (385, 436), False, 'import logging\n'), ((464, 481), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (479, 481), False, 'import pyaudio\n'), ((600, 629), 'logging.info', 'logging.info', (['"""Please speak."""'], {}), "('Please speak.')\n", (612, 629), False, 'import logging\n'), ((1148, 1159), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (1156, 1159), True, 'import numpy as np\n'), ((1240, 1280), 'logging.info', 'logging.info', (["('final result: %s ' % msg)"], {}), "('final result: %s ' % msg)\n", (1252, 1280), False, 'import logging\n'), ((1629, 1640), 'time.time', 'time.time', ([], {}), '()\n', (1638, 1640), False, 'import time\n'), ((1481, 1534), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '""""""', 'level': 'logging.DEBUG'}), "(filename='', level=logging.DEBUG)\n", (1500, 1534), False, 'import logging\n'), ((1556, 1608), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '""""""', 'level': 'logging.INFO'}), "(filename='', level=logging.INFO)\n", (1575, 1608), False, 'import logging\n'), ((954, 988), 'logging.debug', 'logging.debug', (["('result: %s ' % msg)"], {}), "('result: %s ' % msg)\n", (967, 988), False, 'import logging\n'), ((1708, 1719), 'time.time', 'time.time', ([], {}), '()\n', (1717, 1719), False, 'import time\n')] |
import os
import click
from developers_chamber.scripts import cli
from developers_chamber.version_utils import bump_to_next_version as bump_to_next_version_func
from developers_chamber.version_utils import get_next_version, get_version
from developers_chamber.types import EnumType, ReleaseType
default_version_files = os.environ.get('VERSION_FILES', 'version.json').split(',')
@cli.command()
@click.option('--release_type', help='release type', type=EnumType(ReleaseType), required=True)
@click.option('--build_hash', help='hash of the build', type=str)
@click.option('--file', help='path to the version file', type=str, default=default_version_files, required=True,
multiple=True)
def version_bump_to_next(release_type, build_hash, file):
"""
Bump JSON file (or files) version number
"""
click.echo(bump_to_next_version_func(release_type, build_hash, file))
@cli.command()
@click.option('--file', help='path to the version file', type=str, default=default_version_files[0], required=True)
def version_print(file):
"""
Return current project version according to version JSON file
"""
click.echo(get_version(file))
@cli.command()
@click.option('--release_type', help='release type', type=EnumType(ReleaseType), required=True)
@click.option('--build_hash', help='hash of the build', type=str)
@click.option('--file', help='path to the version file', type=str, default=default_version_files[0], required=True)
def version_print_next(release_type, build_hash, file):
"""
Return next version according to input release type, build hash and version JSON file
"""
click.echo(get_next_version(release_type, build_hash, file))
| [
"developers_chamber.scripts.cli.command",
"click.option",
"os.environ.get",
"developers_chamber.version_utils.get_version",
"developers_chamber.version_utils.get_next_version",
"developers_chamber.types.EnumType",
"developers_chamber.version_utils.bump_to_next_version"
] | [((385, 398), 'developers_chamber.scripts.cli.command', 'cli.command', ([], {}), '()\n', (396, 398), False, 'from developers_chamber.scripts import cli\n'), ((496, 560), 'click.option', 'click.option', (['"""--build_hash"""'], {'help': '"""hash of the build"""', 'type': 'str'}), "('--build_hash', help='hash of the build', type=str)\n", (508, 560), False, 'import click\n'), ((563, 694), 'click.option', 'click.option', (['"""--file"""'], {'help': '"""path to the version file"""', 'type': 'str', 'default': 'default_version_files', 'required': '(True)', 'multiple': '(True)'}), "('--file', help='path to the version file', type=str, default=\n default_version_files, required=True, multiple=True)\n", (575, 694), False, 'import click\n'), ((901, 914), 'developers_chamber.scripts.cli.command', 'cli.command', ([], {}), '()\n', (912, 914), False, 'from developers_chamber.scripts import cli\n'), ((916, 1035), 'click.option', 'click.option', (['"""--file"""'], {'help': '"""path to the version file"""', 'type': 'str', 'default': 'default_version_files[0]', 'required': '(True)'}), "('--file', help='path to the version file', type=str, default=\n default_version_files[0], required=True)\n", (928, 1035), False, 'import click\n'), ((1175, 1188), 'developers_chamber.scripts.cli.command', 'cli.command', ([], {}), '()\n', (1186, 1188), False, 'from developers_chamber.scripts import cli\n'), ((1286, 1350), 'click.option', 'click.option', (['"""--build_hash"""'], {'help': '"""hash of the build"""', 'type': 'str'}), "('--build_hash', help='hash of the build', type=str)\n", (1298, 1350), False, 'import click\n'), ((1353, 1472), 'click.option', 'click.option', (['"""--file"""'], {'help': '"""path to the version file"""', 'type': 'str', 'default': 'default_version_files[0]', 'required': '(True)'}), "('--file', help='path to the version file', type=str, default=\n default_version_files[0], required=True)\n", (1365, 1472), False, 'import click\n'), ((323, 370), 'os.environ.get', 'os.environ.get', (['"""VERSION_FILES"""', '"""version.json"""'], {}), "('VERSION_FILES', 'version.json')\n", (337, 370), False, 'import os\n'), ((839, 896), 'developers_chamber.version_utils.bump_to_next_version', 'bump_to_next_version_func', (['release_type', 'build_hash', 'file'], {}), '(release_type, build_hash, file)\n', (864, 896), True, 'from developers_chamber.version_utils import bump_to_next_version as bump_to_next_version_func\n'), ((457, 478), 'developers_chamber.types.EnumType', 'EnumType', (['ReleaseType'], {}), '(ReleaseType)\n', (465, 478), False, 'from developers_chamber.types import EnumType, ReleaseType\n'), ((1153, 1170), 'developers_chamber.version_utils.get_version', 'get_version', (['file'], {}), '(file)\n', (1164, 1170), False, 'from developers_chamber.version_utils import get_next_version, get_version\n'), ((1646, 1694), 'developers_chamber.version_utils.get_next_version', 'get_next_version', (['release_type', 'build_hash', 'file'], {}), '(release_type, build_hash, file)\n', (1662, 1694), False, 'from developers_chamber.version_utils import get_next_version, get_version\n'), ((1247, 1268), 'developers_chamber.types.EnumType', 'EnumType', (['ReleaseType'], {}), '(ReleaseType)\n', (1255, 1268), False, 'from developers_chamber.types import EnumType, ReleaseType\n')] |
import requests
class TMDBClient:
BASE_URL = "https://api.themoviedb.org/3/discover/movie"
API_KEY = ""
def request_best_movies_from_year(self, year):
params = {
'language': 'pt-BR',
'primary_release_year': year,
'sort_by': 'vote_average.desc',
'api_key': self.API_KEY,
'vote_count.gte': 100
}
r = requests.get(self.BASE_URL, params)
data = r.json()
movies = []
for d in data["results"]:
movie = {
'id': d["id"],
'title': d["title"],
'original_title': d["original_title"],
'release_date': d["release_date"],
'overview': d["overview"],
'vote_average': d["vote_average"],
'vote_count': d["vote_count"]
}
movies.append(movie)
return movies
| [
"requests.get"
] | [((398, 433), 'requests.get', 'requests.get', (['self.BASE_URL', 'params'], {}), '(self.BASE_URL, params)\n', (410, 433), False, 'import requests\n')] |
import codecs
import os
import os.path
import string
import random
from random import shuffle
import csv
import time
import hashlib
import struct
import binascii
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
DefaultSize = "mode con: cols=100 lines=20"
os.system(DefaultSize)
pre = "C:\ProgramData\PassPY"
if not os.path.exists(pre):
os.makedirs(pre)
account = ""
cypher = ""
username = ""
user_name = ""
m = ""
def clrscr():
# Check if Operating System is Mac and Linux or Windows
if os.name == 'posix':
_ = os.system('clear')
else:
# Else Operating System is Windows (os.name = nt)
_ = os.system('cls')
def logo():
print("________ __________ __\n___ __ \_____ _________________ __ \ \/ /\n__ /_/ / __ `/_ ___/_ ___/_ /_/ /_ / \n_ ____// /_/ /_(__ )_(__ )_ ____/_ / \n/_/ \__,_/ /____/ /____/ /_/ /_/ \n\n\n\n\n")
def header():
clrscr()
os.system(DefaultSize)
logo()
def PassGen(user_name,acc,uN,pre):
header()
x = ''
x = input("1: Have PassPY generate a password with a length you choose for " + acc + "\n2: Type your own password for " + acc + "\n")
if x == '1':
header()
length = float(input("How many characters would you like the password to be for " + acc + "? \n"))
div = int(length/3)
r = int(length%3)
seed = string.ascii_letters # Generating letters
letters = ( ''. join(random.choice(seed) for i in range(div)) )
seed = string.digits # generating digits
numbers = ( ''.join(random.choice(seed) for i in range(div)) )
seed = string.punctuation # generating punctuation
punctuation = ( ''.join(random.choice(seed) for i in range(div + r)) )
hold = letters + numbers + punctuation
pW = ( ''.join(random.sample(hold, len(hold))))
print("here is the generated password: " + pW)
preKey = acc + uN + pW
lineHash = hashlib.sha256(preKey.encode('utf-8'))
half = hashlib.sha256(user_name.encode('utf-8')).hexdigest()
lineHashHexidecimal = lineHash.hexdigest()
smosh = hashlib.sha256(bytes(half + lineHashHexidecimal, 'utf8'))
key = smosh.digest()
iv = bytes(int(len(key)/2))
acc = bytes(acc, 'utf8')
uN = bytes(uN, 'utf8')
pW = bytes(pW, 'utf8')
cipher = Cipher(algorithms.AES(key), modes.CTR(iv))
encryptor = cipher.encryptor()
uN = encryptor.update(uN) + encryptor.finalize()
uN = bytes.hex(uN)
encryptor = cipher.encryptor()
acc = encryptor.update(acc) + encryptor.finalize()
acc = bytes.hex(acc)
encryptor = cipher.encryptor()
pW = encryptor.update(pW) + encryptor.finalize()
pW = bytes.hex(pW)
lineEncrypted = bytes(acc + uN + pW, 'utf8')
lineChecksum = hashlib.sha256(lineEncrypted).hexdigest()
newline = acc + "\t" + uN + "\t" + pW + "\t" + str(lineHashHexidecimal) + "\t" + str(lineChecksum) + "\n"
post = user_name + "50" + ".passpy"
location = os.path.join(pre, post)
with open(location, "a", newline="\n") as filea:
filea.write(newline + "\n")
input("press Enter once the password is memorized (dont worry if you forget, it was saved in your password directory.)\n")
MainMenu(user_name)
elif x == '2':
header()
pW = input("Type the password for " + acc + ", then press Enter: \n")
preKey = acc + uN + pW
lineHash = hashlib.sha256(preKey.encode('utf-8'))
half = hashlib.sha256(user_name.encode('utf-8')).hexdigest()
lineHashHexidecimal = lineHash.hexdigest()
smosh = hashlib.sha256(bytes(half + lineHashHexidecimal, 'utf8'))
key = smosh.digest()
iv = bytes(int(len(key)/2))
acc = bytes(acc, 'utf8')
uN = bytes(uN, 'utf8')
pW = bytes(pW, 'utf8')
cipher = Cipher(algorithms.AES(key), modes.CTR(iv))
smosh = ''
key = ''
iv = ''
encryptor = cipher.encryptor()
uN = encryptor.update(uN) + encryptor.finalize()
uN = bytes.hex(uN)
encryptor = cipher.encryptor()
acc = encryptor.update(acc) + encryptor.finalize()
acc = bytes.hex(acc)
encryptor = cipher.encryptor()
pW = encryptor.update(pW) + encryptor.finalize()
pW = bytes.hex(pW)
lineEncrypted = bytes(acc + uN + pW, 'utf8')
lineChecksum = hashlib.sha256(lineEncrypted).hexdigest()
newline = acc + "\t" + uN + "\t" + pW + "\t" + str(lineHashHexidecimal) + "\t" + str(lineChecksum) + "\n"
post = user_name + "50" + ".passpy"
location = os.path.join(pre, post)
with open(location, "a", newline="\n") as filea:
filea.write(newline)
MainMenu(user_name)
else:
PassGen(user_name,acc,uN,pre)
def Signin(pre):
header()
user_name = input("Enter Username: ").encode("utf-8").hex()
if user_name == "":
input("Press enter to returnt to the Sign In screen and enter a user name\n")
Signin(pre)
nametest2 = user_name + "4c" + ".passpy"
location = os.path.join(pre, nametest2)
try: #check to see if the account exists
usersearch = open(location,"r") #search for user's password file
lst = list(usersearch.readlines())
confirm = lst[-1]
print("Hello " + str(codecs.decode(user_name, "hex"), "utf-8"))
password = input("Enter Password: ").encode("utf-8").hex()
s(user_name,password)
compare = line
if compare == confirm:
print("Access Granted")
MainMenu(user_name)
else:
print("Access Denied")
Signin(pre)
except FileNotFoundError:
header()
print("Username not found!")
input("please press enter to continue")
Login(pre)
def AddEntry(user_name,pre):
header()
acc = input("what account is this password for? (e.g. GitHub)\n")
uN = input("What is the username for " + acc + "?\n")
PassGen(user_name,acc,uN,pre)
print("Done!")
def PasswordSearch(user_name,pre):
c = ""
header()
post = user_name + "50" + ".passpy"
location = os.path.join(pre, post)
half = hashlib.sha256(user_name.encode('utf-8')).hexdigest()
SearchColumn = input("Password Search Menu:\nPress 1 to show all passwords\nPress 2 to search by account\nAll of the following options will NOT work!\nPress 3 to search by username\nPress 4 to search by password\nPress 5 to return to the Main Menu\n ")
try: #make sure there is a password file to search through
with open(location) as csv_file:
csv_reader = csv.reader(csv_file, delimiter="\t")
next(csv_reader)
if SearchColumn == '1':
header()
print("Here are all of the stored passwords: ")
for row in csv_reader: # !!!START HERE!!! Decrypt single item line by line
smosh = hashlib.sha256(bytes(half + str(row[3]), 'utf8'))
key = smosh.digest()
iv = bytes(int(len(key)/2))
cipher = Cipher(algorithms.AES(key), modes.CTR(iv))
decryptor = cipher.decryptor()
bEntry = bytes.fromhex(str(row[2]).lower())
bct = str(decryptor.update(bEntry), "utf8")
print(bct)
input("Press Enter to continue to the Main Menu")
MainMenu(user_name)
elif SearchColumn == '2':
header()
search = bytes(input("What Account are you looking for? \n"), 'utf8')
for row in csv_reader:
half = hashlib.sha256(user_name.encode('utf-8')).hexdigest()
smosh = hashlib.sha256(bytes(half + str(row[3]), 'utf8'))
key = smosh.digest()
iv = bytes(int(len(key)/2))
cipher = Cipher(algorithms.AES(key), modes.CTR(iv))
decryptor = cipher.decryptor()
encryptor = cipher.encryptor()
sup = encryptor.update(search) + encryptor.finalize()
sable = bytes.hex(sup)
if sable == row[0]:
decryptor = cipher.decryptor()
a = bytes.fromhex(str(row[0]).lower())
a = str(decryptor.update(a), "utf8")
decryptor = cipher.decryptor()
u = bytes.fromhex(str(row[1]).lower())
u = str(decryptor.update(u), "utf8")
decryptor = cipher.decryptor()
p = bytes.fromhex(str(row[2]).lower())
p = str(decryptor.update(p), "utf8")
header()
c = input("The Account, Username and Password information for " + a + " are:\n\nAccount------" + a + "\nUser Name----" + u + "\nPassword-----" + p + "\n\nEnter 1 if you want to copy the password to the clipboard\nEnter 2 if you want to continue searching\n")
if c == '1':
target = p
header()
print("The Account, Username and Password information for " + a + " are:\n\nAccount------" + a + "\nUser Name----" + u + "\nPassword-----" + p + "\n")
Clipboard(target)
MainMenu(user_name)
elif c == '2':
print("Password NOT copied, continuing to search")
time.sleep(2)
continue
else:
print("Returning to the Main Menu")
time.sleep(1)
MainMenu(user_name)
MainMenu(user_name)
elif SearchColumn == '3':
header()
search = bytes(input("What Username are you looking for? \n"), 'utf8')
for row in csv_reader:
half = hashlib.sha256(user_name.encode('utf-8')).hexdigest()
smosh = hashlib.sha256(bytes(half + str(row[3]), 'utf8'))
key = smosh.digest()
iv = bytes(int(len(key)/2))
cipher = Cipher(algorithms.AES(key), modes.CTR(iv))
decryptor = cipher.decryptor()
encryptor = cipher.encryptor()
sup = encryptor.update(search) + encryptor.finalize()
sable = bytes.hex(sup)
if sable == row[1]:
decryptor = cipher.decryptor()
a = bytes.fromhex(str(row[0]).lower())
a = str(decryptor.update(a), "utf8")
decryptor = cipher.decryptor()
u = bytes.fromhex(str(row[1]).lower())
u = str(decryptor.update(u), "utf8")
decryptor = cipher.decryptor()
p = bytes.fromhex(str(row[2]).lower())
p = str(decryptor.update(p), "utf8")
header()
c = input("The Account, Username and Password information for " + a + " are:\n\nAccount------" + a + "\nUser Name----" + u + "\nPassword-----" + p + "\n\nEnter 1 if you want to copy the password to the clipboard\nEnter 2 if you do not\n")
if c == '1':
target = p
header()
print("The Account, Username and Password information for " + a + " are:\n\nAccount------" + a + "\nUser Name----" + u + "\nPassword-----" + p + "\n")
Clipboard(target)
MainMenu(user_name)
elif c == '2':
input("Password NOT copied, Press enter to return to continue searching")
continue
else:
input("Password NOT copied, Press enter to return to the Main Menu")
MainMenu(user_name)
continue
MainMenu(user_name)
elif SearchColumn == '4':
header()
search = bytes(input("What password are you looking for? \n"), 'utf8')
for row in csv_reader:
half = hashlib.sha256(user_name.encode('utf-8')).hexdigest()
smosh = hashlib.sha256(bytes(half + str(row[3]), 'utf8'))
key = smosh.digest()
iv = bytes(int(len(key)/2))
cipher = Cipher(algorithms.AES(key), modes.CTR(iv))
decryptor = cipher.decryptor()
encryptor = cipher.encryptor()
sup = encryptor.update(search) + encryptor.finalize()
sable = bytes.hex(sup)
if sable == row[2]:
decryptor = cipher.decryptor()
a = bytes.fromhex(str(row[0]).lower())
a = str(decryptor.update(a), "utf8")
decryptor = cipher.decryptor()
u = bytes.fromhex(str(row[1]).lower())
u = str(decryptor.update(u), "utf8")
decryptor = cipher.decryptor()
p = bytes.fromhex(str(row[2]).lower())
p = str(decryptor.update(p), "utf8")
header()
c = input("The Account, Username and Password information for " + a + " are:\n\nAccount------" + a + "\nUser Name----" + u + "\nPassword-----" + p + "\n\nEnter 1 if you want to copy the password to the clipboard\nEnter 2 if you do not\n")
if c == '1':
target = p
header()
print("The Account, Username and Password information for " + a + " are:\n\nAccount------" + a + "\nUser Name----" + u + "\nPassword-----" + p + "\n")
Clipboard(target)
MainMenu(user_name)
elif c == '2':
input("Password NOT copied, Press enter to return to continue")
continue
else:
input("Password NOT copied, Press enter to return to the Main Menu")
MainMenu(user_name)
continue
MainMenu(user_name)
elif SearchColumn == '5':
MainMenu(user_name)
else:
m = input("enter 1, 2, 3 or 4:\n")
PasswordSearch(user_name,pre)
MainMenu(user_name)
except FileNotFoundError:
header()
print("Please register some passwords for me to search through.")
input("please press enter to continue")
MainMenu(user_name)
def Clipboard(target):
command = 'echo ' + target.strip() + '| clip'
os.system(command)
time.sleep(1)
print("The clipboard will be cleared in 5 seconds")
time.sleep(1)
print("The clipboard will be cleared in 4 seconds")
time.sleep(1)
print("The clipboard will be cleared in 3 seconds")
time.sleep(1)
print("The clipboard will be cleared in 2 seconds")
time.sleep(1)
print("The clipboard will be cleared in 1 seconds")
time.sleep(1)
print("The clipboard will be cleared now")
os.system("echo.| clip")
def MainMenu(user_name):
header()
print("Menu:\n 1: New password - register new password\n 2: List - show passwords\n 3: Exit")
menu = input("Enter a number:\n")
if menu == '1':
AddEntry(user_name,pre)
elif menu == '2':
PasswordSearch(user_name,pre)
elif menu == '3':
clrscr()
exit()
elif menu == '':
MainMenu(user_name)
else:
MainMenu(user_name)
def s(user_name,password):
uhold = hashlib.sha256(user_name.encode('utf-8')).hexdigest()
phold = hashlib.sha256(password.encode('utf-8')).hexdigest()
for i in uhold:
if i.isdigit():
ucount = i
break
for i in phold:
if i.isdigit():
pcount = i
break
if int(pcount) % 2 == 0:
global line
line = uhold * int(pcount) + phold * int(ucount)
else:
line = phold * int(pcount) + uhold * int(pcount)
line = hashlib.sha256(line.encode('utf-8')).hexdigest()
def Register(pre):
header()
user_name = input("Enter Username: ").encode("utf-8").hex()
if user_name == "":
input("Press enter to return to the Sign In screen and enter a user name\n")
Register(pre)
nametest1 = user_name + "4c" + ".passpy"
location = os.path.join(pre, nametest1)
try:
usersearch = open(location) #search for user's password file
usersearch.close()
header()
print("User name not available")
input("Press Enter to try again: ")
Register(pre)
except FileNotFoundError:
header()
print("User name is available")
with open(location,"a") as create: #create user's password file
password = input("enter desired password:\n").encode("utf-8").hex()
while password == "":
header()
password = input("An empty password is not useful\nPlease enter desired password:\n")
s(user_name,password)
create.write(line)
second = user_name + "50" + ".passpy"
location = os.path.join(pre, second)
with open(location, "a", newline="\n") as create:
first = "count"
b = "0"
third = "empty"
hold = first + b + third
fourth = hashlib.sha256(bytes(hold, 'utf8')).hexdigest()
half = hashlib.sha256(user_name.encode('utf-8')).hexdigest()
smosh = hashlib.sha256(bytes(half + hold, 'utf8'))
key = smosh.digest()
iv = bytes(int(len(key)/2))
cipher = Cipher(algorithms.AES(key), modes.CTR(iv))
encryptor = cipher.encryptor()
first = bytes(first, 'utf8')
first = bytes.hex(encryptor.update(first) + encryptor.finalize())
encryptor = cipher.encryptor()
b = bytes(b, 'utf8')
b = bytes.hex(encryptor.update(b) + encryptor.finalize())
encryptor = cipher.encryptor()
third = bytes(third, 'utf8')
third = bytes.hex(encryptor.update(third) + encryptor.finalize())
hold = bytes(first + b + third, "utf8")
fifth = hashlib.sha256(hold).hexdigest()
firstLine = first + "\t" + b + "\t" + third + "\t" + fourth + "\t" + fifth + "\n"
create.write(firstLine)
header()
input("Done! \nNew account created!\nWelcome!")
MainMenu(user_name)
def Login(pre):
header()
print("Welcome!\n 1: New users - register your account\n 2: Existing users - log in\n 3: Exit - close the application.")
login = input("Enter a number:\n")
if login == '1':
Register(pre)
elif login == '2':
Signin(pre)
elif login == '3':
clrscr()
exit()
else:
Login(pre)
# Startup Phase
header()
print("Welcome to PassPY, the python based, opensource password storage\nIf you like PassPY, share it with a friend github.com/kayakers6/passpy\nIf you love PassPY, BTC: bc1qsqc3v2jt3lh0kq9addf4gu6e2uq5vxxfk35pl\n SNX: 0x05E8813B7dc3c4e039D898CB13f21A6E4d675bc1")
start = input("Press ENTER to start")
Login(pre)
| [
"os.path.exists",
"hashlib.sha256",
"random.choice",
"os.makedirs",
"os.path.join",
"time.sleep",
"cryptography.hazmat.primitives.ciphers.algorithms.AES",
"cryptography.hazmat.primitives.ciphers.modes.CTR",
"os.system",
"csv.reader",
"codecs.decode"
] | [((284, 306), 'os.system', 'os.system', (['DefaultSize'], {}), '(DefaultSize)\n', (293, 306), False, 'import os\n'), ((345, 364), 'os.path.exists', 'os.path.exists', (['pre'], {}), '(pre)\n', (359, 364), False, 'import os\n'), ((370, 386), 'os.makedirs', 'os.makedirs', (['pre'], {}), '(pre)\n', (381, 386), False, 'import os\n'), ((963, 985), 'os.system', 'os.system', (['DefaultSize'], {}), '(DefaultSize)\n', (972, 985), False, 'import os\n'), ((5304, 5332), 'os.path.join', 'os.path.join', (['pre', 'nametest2'], {}), '(pre, nametest2)\n', (5316, 5332), False, 'import os\n'), ((6380, 6403), 'os.path.join', 'os.path.join', (['pre', 'post'], {}), '(pre, post)\n', (6392, 6403), False, 'import os\n'), ((15423, 15441), 'os.system', 'os.system', (['command'], {}), '(command)\n', (15432, 15441), False, 'import os\n'), ((15446, 15459), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (15456, 15459), False, 'import time\n'), ((15520, 15533), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (15530, 15533), False, 'import time\n'), ((15594, 15607), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (15604, 15607), False, 'import time\n'), ((15668, 15681), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (15678, 15681), False, 'import time\n'), ((15742, 15755), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (15752, 15755), False, 'import time\n'), ((15816, 15829), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (15826, 15829), False, 'import time\n'), ((15881, 15905), 'os.system', 'os.system', (['"""echo.| clip"""'], {}), "('echo.| clip')\n", (15890, 15905), False, 'import os\n'), ((17195, 17223), 'os.path.join', 'os.path.join', (['pre', 'nametest1'], {}), '(pre, nametest1)\n', (17207, 17223), False, 'import os\n'), ((560, 578), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (569, 578), False, 'import os\n'), ((654, 670), 'os.system', 'os.system', (['"""cls"""'], {}), "('cls')\n", (663, 670), False, 'import os\n'), ((3163, 3186), 'os.path.join', 'os.path.join', (['pre', 'post'], {}), '(pre, post)\n', (3175, 3186), False, 'import os\n'), ((2454, 2473), 'cryptography.hazmat.primitives.ciphers.algorithms.AES', 'algorithms.AES', (['key'], {}), '(key)\n', (2468, 2473), False, 'from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\n'), ((2475, 2488), 'cryptography.hazmat.primitives.ciphers.modes.CTR', 'modes.CTR', (['iv'], {}), '(iv)\n', (2484, 2488), False, 'from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\n'), ((4829, 4852), 'os.path.join', 'os.path.join', (['pre', 'post'], {}), '(pre, post)\n', (4841, 4852), False, 'import os\n'), ((6861, 6897), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '"""\t"""'}), "(csv_file, delimiter='\\t')\n", (6871, 6897), False, 'import csv\n'), ((17986, 18011), 'os.path.join', 'os.path.join', (['pre', 'second'], {}), '(pre, second)\n', (17998, 18011), False, 'import os\n'), ((1483, 1502), 'random.choice', 'random.choice', (['seed'], {}), '(seed)\n', (1496, 1502), False, 'import random\n'), ((1607, 1626), 'random.choice', 'random.choice', (['seed'], {}), '(seed)\n', (1620, 1626), False, 'import random\n'), ((1745, 1764), 'random.choice', 'random.choice', (['seed'], {}), '(seed)\n', (1758, 1764), False, 'import random\n'), ((2944, 2973), 'hashlib.sha256', 'hashlib.sha256', (['lineEncrypted'], {}), '(lineEncrypted)\n', (2958, 2973), False, 'import hashlib\n'), ((4051, 4070), 'cryptography.hazmat.primitives.ciphers.algorithms.AES', 'algorithms.AES', (['key'], {}), '(key)\n', (4065, 4070), False, 'from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\n'), ((4072, 4085), 'cryptography.hazmat.primitives.ciphers.modes.CTR', 'modes.CTR', (['iv'], {}), '(iv)\n', (4081, 4085), False, 'from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\n'), ((4610, 4639), 'hashlib.sha256', 'hashlib.sha256', (['lineEncrypted'], {}), '(lineEncrypted)\n', (4624, 4639), False, 'import hashlib\n'), ((5554, 5585), 'codecs.decode', 'codecs.decode', (['user_name', '"""hex"""'], {}), "(user_name, 'hex')\n", (5567, 5585), False, 'import codecs\n'), ((18489, 18508), 'cryptography.hazmat.primitives.ciphers.algorithms.AES', 'algorithms.AES', (['key'], {}), '(key)\n', (18503, 18508), False, 'from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\n'), ((18510, 18523), 'cryptography.hazmat.primitives.ciphers.modes.CTR', 'modes.CTR', (['iv'], {}), '(iv)\n', (18519, 18523), False, 'from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\n'), ((7349, 7368), 'cryptography.hazmat.primitives.ciphers.algorithms.AES', 'algorithms.AES', (['key'], {}), '(key)\n', (7363, 7368), False, 'from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\n'), ((7370, 7383), 'cryptography.hazmat.primitives.ciphers.modes.CTR', 'modes.CTR', (['iv'], {}), '(iv)\n', (7379, 7383), False, 'from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\n'), ((19067, 19087), 'hashlib.sha256', 'hashlib.sha256', (['hold'], {}), '(hold)\n', (19081, 19087), False, 'import hashlib\n'), ((8170, 8189), 'cryptography.hazmat.primitives.ciphers.algorithms.AES', 'algorithms.AES', (['key'], {}), '(key)\n', (8184, 8189), False, 'from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\n'), ((8191, 8204), 'cryptography.hazmat.primitives.ciphers.modes.CTR', 'modes.CTR', (['iv'], {}), '(iv)\n', (8200, 8204), False, 'from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\n'), ((10595, 10614), 'cryptography.hazmat.primitives.ciphers.algorithms.AES', 'algorithms.AES', (['key'], {}), '(key)\n', (10609, 10614), False, 'from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\n'), ((10616, 10629), 'cryptography.hazmat.primitives.ciphers.modes.CTR', 'modes.CTR', (['iv'], {}), '(iv)\n', (10625, 10629), False, 'from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\n'), ((9850, 9863), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (9860, 9863), False, 'import time\n'), ((10023, 10036), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (10033, 10036), False, 'import time\n'), ((13005, 13024), 'cryptography.hazmat.primitives.ciphers.algorithms.AES', 'algorithms.AES', (['key'], {}), '(key)\n', (13019, 13024), False, 'from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\n'), ((13026, 13039), 'cryptography.hazmat.primitives.ciphers.modes.CTR', 'modes.CTR', (['iv'], {}), '(iv)\n', (13035, 13039), False, 'from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\n')] |
"""Asynchronous MongoDB and Redis connections."""
from functools import partial
import motor
import tornadoredis
from cloudtunes import settings
RedisClient = partial(tornadoredis.Client, **settings.REDIS)
mongo = motor.MotorClient(**settings.MONGODB).cloudtunes
redis = RedisClient()
| [
"functools.partial",
"motor.MotorClient"
] | [((163, 209), 'functools.partial', 'partial', (['tornadoredis.Client'], {}), '(tornadoredis.Client, **settings.REDIS)\n', (170, 209), False, 'from functools import partial\n'), ((220, 257), 'motor.MotorClient', 'motor.MotorClient', ([], {}), '(**settings.MONGODB)\n', (237, 257), False, 'import motor\n')] |
#!/usr/bin/env python3
import os
from generate_cnn_model import *
from generate_transformer_model import *
parser = argparse.ArgumentParser()
parser.add_argument("model_path", type=str)
parser.add_argument("--batch_size", type=int, default=128)
args = parser.parse_args()
input_channel_num = 42
board_size = 9
policy_channel_num = 27
input_tensor = torch.randn([args.batch_size, input_channel_num, board_size, board_size]).cuda()
script_model = torch.jit.load(args.model_path)
filename = os.path.splitext(os.path.basename(args.model_path))[0]
parts = filename.split("_")
block_num = None
channel_num = None
for part in parts:
if "bl" in part:
block_num = int(part.replace("bl", ""))
if "ch" in part:
channel_num = int(part.replace("ch", ""))
print(f"block_num = {block_num}, channel_num = {channel_num}")
model = None
if "transformer" in args.model_path:
model = TransformerModel(input_channel_num, block_num=block_num, channel_num=channel_num,
policy_channel_num=policy_channel_num,
board_size=board_size)
else:
model = CategoricalNetwork(input_channel_num, block_num=block_num, channel_num=channel_num,
policy_channel_num=policy_channel_num,
board_size=board_size)
model.load_state_dict(script_model.state_dict())
model.eval()
model.cuda()
save_path = args.model_path.replace(".model", ".onnx")
torch.onnx.export(model, input_tensor, save_path)
print(f"export to {save_path}")
| [
"os.path.basename"
] | [((509, 542), 'os.path.basename', 'os.path.basename', (['args.model_path'], {}), '(args.model_path)\n', (525, 542), False, 'import os\n')] |
import os
import time
class Var(object):
# Get a bot token from botfather
BOT_TOKEN = os.environ.get("BOT_TOKEN", "")
# Get from my.telegram.org
API_ID = int(os.environ.get("API_ID", 12345))
# Get from my.telegram.org
API_HASH = os.environ.get("API_HASH", "")
# To record start time of bot
BOT_START_TIME = time.time()
# You Can Get An API Key From https://api.imgbb.com.
API = os.environ.get("API", None)
OWNER_ID = int(os.environ.get("OWNER_ID", "1453690249"))
BOT_NAME = os.environ.get("BOT_NAME", "ImgBB")
START_PIC = "https://telegra.ph/file/e162f5f8554a9bf66e830.jpg"
HELP_PIC = "https://telegra.ph/file/e162f5f8554a9bf66e830.jpg"
class Tr(object):
START_TEXT = """
👋 Hi {},
I’m **[ImgBB](telegram.me/xImgBBbot)**. I can upload images on **ImgBB.com** & generate shareable link for it!
BTW, do press **Help** for more information about the process.
"""
ABOUT_TEXT = """🤖 **My Name:** [ImgBB](telegram.me/xImgBBbot)
📝 **Language:** [Python 3](https://www.python.org)
📚 **Framework:** [Pyrogram](https://github.com/pyrogram/pyrogram)
📡 **Hosted On:** [Railway](https://railway.app)
👨💻 **Developer:** [𖤍 Λℓσηє 𖤍](t.me/xDune)
👥 **Support Group:** [Marine Support](https://t.me/MarineChats)
📢 **Updates Channel:** [Marine Bots](https://t.me/MarineBots)
"""
HELP_TEXT = """You may have already known my function. As you have seen in the start message, I can upload images on **ImgBB.com** & generate shareable link for it, which can be deleted after a specific time or stay there forever ~ according to your selection...🙃
Steps:
• Post/Forward an image...
• Select an option ~ whether to delete it automatically within the given period or keep it permanently...
• BOOM!💥 Your image is uploaded! You will be provided with a link to view the image, as well as, a link to delete it."""
ERR_TEXT = "⚠️ API Not Found"
ERRTOKEN_TEXT = "😶 The Access Token Provided Has Expired, Revoked, Malformed Or Invalid For Other Reasons. Report this at @MarineBots",
WAIT = "💬 Please Wait !!"
| [
"os.environ.get",
"time.time"
] | [((97, 128), 'os.environ.get', 'os.environ.get', (['"""BOT_TOKEN"""', '""""""'], {}), "('BOT_TOKEN', '')\n", (111, 128), False, 'import os\n'), ((258, 288), 'os.environ.get', 'os.environ.get', (['"""API_HASH"""', '""""""'], {}), "('API_HASH', '')\n", (272, 288), False, 'import os\n'), ((346, 357), 'time.time', 'time.time', ([], {}), '()\n', (355, 357), False, 'import time\n'), ((426, 453), 'os.environ.get', 'os.environ.get', (['"""API"""', 'None'], {}), "('API', None)\n", (440, 453), False, 'import os\n'), ((531, 566), 'os.environ.get', 'os.environ.get', (['"""BOT_NAME"""', '"""ImgBB"""'], {}), "('BOT_NAME', 'ImgBB')\n", (545, 566), False, 'import os\n'), ((178, 209), 'os.environ.get', 'os.environ.get', (['"""API_ID"""', '(12345)'], {}), "('API_ID', 12345)\n", (192, 209), False, 'import os\n'), ((474, 514), 'os.environ.get', 'os.environ.get', (['"""OWNER_ID"""', '"""1453690249"""'], {}), "('OWNER_ID', '1453690249')\n", (488, 514), False, 'import os\n')] |
# add LDDMM shooting code into path
import sys
sys.path.append('../vectormomentum/Code/Python');
sys.path.append('../library')
from subprocess import call
import argparse
import os.path
#Add deep learning related libraries
from collections import Counter
import torch
import prediction_network
import util
import numpy as np
from skimage import exposure
#Add LDDMM registration related libraries
# pyca modules
import PyCA.Core as ca
import PyCA.Common as common
#import PyCA.Display as display
# vector momentum modules
# others
import logging
import copy
import math
import registration_methods
#parse command line input
parser = argparse.ArgumentParser(description='Deformation prediction given set of moving and target images.')
requiredNamed = parser.add_argument_group('required named arguments')
requiredNamed.add_argument('--moving-image', nargs='+', required=True, metavar=('m1', 'm2, m3...'),
help='List of moving images, seperated by space.')
requiredNamed.add_argument('--target-image', nargs='+', required=True, metavar=('t1', 't2, t3...'),
help='List of target images, seperated by space.')
requiredNamed.add_argument('--output-prefix', nargs='+', required=True, metavar=('o1', 'o2, o3...'),
help='List of registration output prefixes for every moving/target image pair, seperated by space. Preferred to be a directory (e.g. /some_path/output_dir/)')
parser.add_argument('--samples', type=int, default=50, metavar='N',
help='number of times to sample the network (default: 64)')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for prediction network (default: 64)')
parser.add_argument('--n-GPU', type=int, default=1, metavar='N',
help='number of GPUs used for prediction (default: 1). For maximum efficiency please set the batch size divisible by the number of GPUs.')
parser.add_argument('--use-CPU-for-shooting', action='store_true', default=False,
help='Use CPU for geodesic shooting. Slow, but saves GPU memory.')
parser.add_argument('--shoot-steps', type=int, default=0, metavar='N',
help='time steps for geodesic shooting. Ignore this option to use the default step size used by the registration model.')
parser.add_argument('--affine-align', action='store_true', default=False,
help='Perform affine registration to align moving and target images to ICBM152 atlas space. Require niftireg.')
parser.add_argument('--histeq', action='store_true', default=False,
help='Perform histogram equalization to the moving and target images.')
parser.add_argument('--atlas', default="../data/atlas/icbm152.nii",
help="Atlas to use for (affine) pre-registration")
parser.add_argument('--prediction-parameter', default='../../network_configs/OASIS_predict_probabilistic.pth.tar',
help="network parameters for the prediction network")
args = parser.parse_args()
# check validity of input arguments from command line
def check_args(args):
# number of input images/output prefix consistency check
n_moving_images = len(args.moving_image)
n_target_images = len(args.target_image)
n_output_prefix = len(args.output_prefix)
if (n_moving_images != n_target_images):
print('The number of moving images is not consistent with the number of target images!')
sys.exit(1)
elif (n_moving_images != n_output_prefix ):
print('The number of output prefix is not consistent with the number of input images!')
sys.exit(1)
# number of GPU check (positive integers)
if (args.n_GPU <= 0):
print('Number of GPUs must be positive!')
sys.exit(1)
# geodesic shooting step check (positive integers)
if (args.shoot_steps < 0):
print('Shooting steps (--shoot-steps) is negative. Using model default step.')
# geodesic shooting step check (positive integers)
if (args.samples < 1):
print('Number of samples (--samples) is smaller than 1. Using model default step.')
#enddef
def create_net(args, network_config):
net_single = prediction_network.net(network_config['network_feature']).cuda();
net_single.load_state_dict(network_config['state_dict'])
if (args.n_GPU > 1) :
device_ids=range(0, args.n_GPU)
net = torch.nn.DataParallel(net_single, device_ids=device_ids).cuda()
else:
net = net_single
net.train()
return net;
#enddef
def preprocess_image(image_pyca, histeq):
image_np = common.AsNPCopy(image_pyca)
nan_mask = np.isnan(image_np)
image_np[nan_mask] = 0
image_np /= np.amax(image_np)
# perform histogram equalization if needed
if histeq:
image_np[image_np != 0] = exposure.equalize_hist(image_np[image_np != 0])
return image_np
#perform deformation prediction
def predict_image(args):
if (args.use_CPU_for_shooting):
mType = ca.MEM_HOST
else:
mType = ca.MEM_DEVICE
# load the prediction network
predict_network_config = torch.load(args.prediction_parameter)
prediction_net = create_net(args, predict_network_config);
batch_size = args.batch_size
patch_size = predict_network_config['patch_size']
input_batch = torch.zeros(batch_size, 2, patch_size, patch_size, patch_size).cuda()
# start prediction
for i in range(0, len(args.moving_image)):
common.Mkdir_p(os.path.dirname(args.output_prefix[i]))
if (args.affine_align):
# Perform affine registration to both moving and target image to the ICBM152 atlas space.
# Registration is done using Niftireg.
call(["reg_aladin",
"-noSym", "-speeeeed", "-ref", args.atlas ,
"-flo", args.moving_image[i],
"-res", args.output_prefix[i]+"moving_affine.nii",
"-aff", args.output_prefix[i]+'moving_affine_transform.txt'])
call(["reg_aladin",
"-noSym", "-speeeeed" ,"-ref", args.atlas ,
"-flo", args.target_image[i],
"-res", args.output_prefix[i]+"target_affine.nii",
"-aff", args.output_prefix[i]+'target_affine_transform.txt'])
moving_image = common.LoadITKImage(args.output_prefix[i]+"moving_affine.nii", mType)
target_image = common.LoadITKImage(args.output_prefix[i]+"target_affine.nii", mType)
else:
moving_image = common.LoadITKImage(args.moving_image[i], mType)
target_image = common.LoadITKImage(args.target_image[i], mType)
#preprocessing of the image
moving_image_np = preprocess_image(moving_image, args.histeq);
target_image_np = preprocess_image(target_image, args.histeq);
grid = moving_image.grid()
moving_image_processed = common.ImFromNPArr(moving_image_np, mType)
target_image_processed = common.ImFromNPArr(target_image_np, mType)
moving_image.setGrid(grid)
target_image.setGrid(grid)
predict_transform_space = False
if 'matlab_t7' in predict_network_config:
predict_transform_space = True
# run actual prediction
prediction_result = util.predict_momentum(moving_image_np, target_image_np, input_batch, batch_size, patch_size, prediction_net, predict_transform_space);
m0 = prediction_result['image_space']
m0_reg = common.FieldFromNPArr(prediction_result['image_space'], mType);
registration_result = registration_methods.geodesic_shooting(moving_image_processed, target_image_processed, m0_reg, args.shoot_steps, mType, predict_network_config)
phi = common.AsNPCopy(registration_result['phiinv'])
phi_square = np.power(phi,2)
for sample_iter in range(1, args.samples):
print(sample_iter)
prediction_result = util.predict_momentum(moving_image_np, target_image_np, input_batch, batch_size, patch_size, prediction_net, predict_transform_space);
m0 += prediction_result['image_space']
m0_reg = common.FieldFromNPArr(prediction_result['image_space'], mType);
registration_result = registration_methods.geodesic_shooting(moving_image_processed, target_image_processed, m0_reg, args.shoot_steps, mType, predict_network_config)
phi += common.AsNPCopy(registration_result['phiinv'])
phi_square += np.power(common.AsNPCopy(registration_result['phiinv']),2)
m0_mean = np.divide(m0, args.samples);
m0_reg = common.FieldFromNPArr(m0_mean, mType);
registration_result = registration_methods.geodesic_shooting(moving_image_processed, target_image_processed, m0_reg, args.shoot_steps, mType, predict_network_config)
phi_mean = registration_result['phiinv']
phi_var = np.divide(phi_square, args.samples) - np.power(np.divide(phi, args.samples), 2)
#save result
common.SaveITKImage(registration_result['I1'], args.output_prefix[i]+"I1.mhd")
common.SaveITKField(phi_mean, args.output_prefix[i]+"phiinv_mean.mhd")
common.SaveITKField(common.FieldFromNPArr(phi_var, mType), args.output_prefix[i]+"phiinv_var.mhd")
#enddef
if __name__ == '__main__':
check_args(args);
predict_image(args)
| [
"PyCA.Common.ImFromNPArr",
"PyCA.Common.SaveITKField",
"sys.exit",
"PyCA.Common.FieldFromNPArr",
"sys.path.append",
"numpy.divide",
"argparse.ArgumentParser",
"PyCA.Common.LoadITKImage",
"util.predict_momentum",
"subprocess.call",
"registration_methods.geodesic_shooting",
"skimage.exposure.equ... | [((47, 95), 'sys.path.append', 'sys.path.append', (['"""../vectormomentum/Code/Python"""'], {}), "('../vectormomentum/Code/Python')\n", (62, 95), False, 'import sys\n'), ((97, 126), 'sys.path.append', 'sys.path.append', (['"""../library"""'], {}), "('../library')\n", (112, 126), False, 'import sys\n'), ((638, 743), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Deformation prediction given set of moving and target images."""'}), "(description=\n 'Deformation prediction given set of moving and target images.')\n", (661, 743), False, 'import argparse\n'), ((4658, 4685), 'PyCA.Common.AsNPCopy', 'common.AsNPCopy', (['image_pyca'], {}), '(image_pyca)\n', (4673, 4685), True, 'import PyCA.Common as common\n'), ((4701, 4719), 'numpy.isnan', 'np.isnan', (['image_np'], {}), '(image_np)\n', (4709, 4719), True, 'import numpy as np\n'), ((4763, 4780), 'numpy.amax', 'np.amax', (['image_np'], {}), '(image_np)\n', (4770, 4780), True, 'import numpy as np\n'), ((5173, 5210), 'torch.load', 'torch.load', (['args.prediction_parameter'], {}), '(args.prediction_parameter)\n', (5183, 5210), False, 'import torch\n'), ((3507, 3518), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3515, 3518), False, 'import sys\n'), ((3814, 3825), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3822, 3825), False, 'import sys\n'), ((4878, 4925), 'skimage.exposure.equalize_hist', 'exposure.equalize_hist', (['image_np[image_np != 0]'], {}), '(image_np[image_np != 0])\n', (4900, 4925), False, 'from skimage import exposure\n'), ((6970, 7012), 'PyCA.Common.ImFromNPArr', 'common.ImFromNPArr', (['moving_image_np', 'mType'], {}), '(moving_image_np, mType)\n', (6988, 7012), True, 'import PyCA.Common as common\n'), ((7046, 7088), 'PyCA.Common.ImFromNPArr', 'common.ImFromNPArr', (['target_image_np', 'mType'], {}), '(target_image_np, mType)\n', (7064, 7088), True, 'import PyCA.Common as common\n'), ((7353, 7490), 'util.predict_momentum', 'util.predict_momentum', (['moving_image_np', 'target_image_np', 'input_batch', 'batch_size', 'patch_size', 'prediction_net', 'predict_transform_space'], {}), '(moving_image_np, target_image_np, input_batch,\n batch_size, patch_size, prediction_net, predict_transform_space)\n', (7374, 7490), False, 'import util\n'), ((7560, 7622), 'PyCA.Common.FieldFromNPArr', 'common.FieldFromNPArr', (["prediction_result['image_space']", 'mType'], {}), "(prediction_result['image_space'], mType)\n", (7581, 7622), True, 'import PyCA.Common as common\n'), ((7654, 7805), 'registration_methods.geodesic_shooting', 'registration_methods.geodesic_shooting', (['moving_image_processed', 'target_image_processed', 'm0_reg', 'args.shoot_steps', 'mType', 'predict_network_config'], {}), '(moving_image_processed,\n target_image_processed, m0_reg, args.shoot_steps, mType,\n predict_network_config)\n', (7692, 7805), False, 'import registration_methods\n'), ((7812, 7858), 'PyCA.Common.AsNPCopy', 'common.AsNPCopy', (["registration_result['phiinv']"], {}), "(registration_result['phiinv'])\n", (7827, 7858), True, 'import PyCA.Common as common\n'), ((7880, 7896), 'numpy.power', 'np.power', (['phi', '(2)'], {}), '(phi, 2)\n', (7888, 7896), True, 'import numpy as np\n'), ((8630, 8657), 'numpy.divide', 'np.divide', (['m0', 'args.samples'], {}), '(m0, args.samples)\n', (8639, 8657), True, 'import numpy as np\n'), ((8676, 8713), 'PyCA.Common.FieldFromNPArr', 'common.FieldFromNPArr', (['m0_mean', 'mType'], {}), '(m0_mean, mType)\n', (8697, 8713), True, 'import PyCA.Common as common\n'), ((8745, 8896), 'registration_methods.geodesic_shooting', 'registration_methods.geodesic_shooting', (['moving_image_processed', 'target_image_processed', 'm0_reg', 'args.shoot_steps', 'mType', 'predict_network_config'], {}), '(moving_image_processed,\n target_image_processed, m0_reg, args.shoot_steps, mType,\n predict_network_config)\n', (8783, 8896), False, 'import registration_methods\n'), ((9066, 9151), 'PyCA.Common.SaveITKImage', 'common.SaveITKImage', (["registration_result['I1']", "(args.output_prefix[i] + 'I1.mhd')"], {}), "(registration_result['I1'], args.output_prefix[i] + 'I1.mhd'\n )\n", (9085, 9151), True, 'import PyCA.Common as common\n'), ((9153, 9225), 'PyCA.Common.SaveITKField', 'common.SaveITKField', (['phi_mean', "(args.output_prefix[i] + 'phiinv_mean.mhd')"], {}), "(phi_mean, args.output_prefix[i] + 'phiinv_mean.mhd')\n", (9172, 9225), True, 'import PyCA.Common as common\n'), ((3671, 3682), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3679, 3682), False, 'import sys\n'), ((4251, 4308), 'prediction_network.net', 'prediction_network.net', (["network_config['network_feature']"], {}), "(network_config['network_feature'])\n", (4273, 4308), False, 'import prediction_network\n'), ((5380, 5442), 'torch.zeros', 'torch.zeros', (['batch_size', '(2)', 'patch_size', 'patch_size', 'patch_size'], {}), '(batch_size, 2, patch_size, patch_size, patch_size)\n', (5391, 5442), False, 'import torch\n'), ((5785, 6003), 'subprocess.call', 'call', (["['reg_aladin', '-noSym', '-speeeeed', '-ref', args.atlas, '-flo', args.\n moving_image[i], '-res', args.output_prefix[i] + 'moving_affine.nii',\n '-aff', args.output_prefix[i] + 'moving_affine_transform.txt']"], {}), "(['reg_aladin', '-noSym', '-speeeeed', '-ref', args.atlas, '-flo', args\n .moving_image[i], '-res', args.output_prefix[i] + 'moving_affine.nii',\n '-aff', args.output_prefix[i] + 'moving_affine_transform.txt'])\n", (5789, 6003), False, 'from subprocess import call\n'), ((6077, 6295), 'subprocess.call', 'call', (["['reg_aladin', '-noSym', '-speeeeed', '-ref', args.atlas, '-flo', args.\n target_image[i], '-res', args.output_prefix[i] + 'target_affine.nii',\n '-aff', args.output_prefix[i] + 'target_affine_transform.txt']"], {}), "(['reg_aladin', '-noSym', '-speeeeed', '-ref', args.atlas, '-flo', args\n .target_image[i], '-res', args.output_prefix[i] + 'target_affine.nii',\n '-aff', args.output_prefix[i] + 'target_affine_transform.txt'])\n", (6081, 6295), False, 'from subprocess import call\n'), ((6384, 6455), 'PyCA.Common.LoadITKImage', 'common.LoadITKImage', (["(args.output_prefix[i] + 'moving_affine.nii')", 'mType'], {}), "(args.output_prefix[i] + 'moving_affine.nii', mType)\n", (6403, 6455), True, 'import PyCA.Common as common\n'), ((6481, 6552), 'PyCA.Common.LoadITKImage', 'common.LoadITKImage', (["(args.output_prefix[i] + 'target_affine.nii')", 'mType'], {}), "(args.output_prefix[i] + 'target_affine.nii', mType)\n", (6500, 6552), True, 'import PyCA.Common as common\n'), ((6594, 6642), 'PyCA.Common.LoadITKImage', 'common.LoadITKImage', (['args.moving_image[i]', 'mType'], {}), '(args.moving_image[i], mType)\n', (6613, 6642), True, 'import PyCA.Common as common\n'), ((6670, 6718), 'PyCA.Common.LoadITKImage', 'common.LoadITKImage', (['args.target_image[i]', 'mType'], {}), '(args.target_image[i], mType)\n', (6689, 6718), True, 'import PyCA.Common as common\n'), ((8011, 8148), 'util.predict_momentum', 'util.predict_momentum', (['moving_image_np', 'target_image_np', 'input_batch', 'batch_size', 'patch_size', 'prediction_net', 'predict_transform_space'], {}), '(moving_image_np, target_image_np, input_batch,\n batch_size, patch_size, prediction_net, predict_transform_space)\n', (8032, 8148), False, 'import util\n'), ((8218, 8280), 'PyCA.Common.FieldFromNPArr', 'common.FieldFromNPArr', (["prediction_result['image_space']", 'mType'], {}), "(prediction_result['image_space'], mType)\n", (8239, 8280), True, 'import PyCA.Common as common\n'), ((8316, 8467), 'registration_methods.geodesic_shooting', 'registration_methods.geodesic_shooting', (['moving_image_processed', 'target_image_processed', 'm0_reg', 'args.shoot_steps', 'mType', 'predict_network_config'], {}), '(moving_image_processed,\n target_image_processed, m0_reg, args.shoot_steps, mType,\n predict_network_config)\n', (8354, 8467), False, 'import registration_methods\n'), ((8479, 8525), 'PyCA.Common.AsNPCopy', 'common.AsNPCopy', (["registration_result['phiinv']"], {}), "(registration_result['phiinv'])\n", (8494, 8525), True, 'import PyCA.Common as common\n'), ((8956, 8991), 'numpy.divide', 'np.divide', (['phi_square', 'args.samples'], {}), '(phi_square, args.samples)\n', (8965, 8991), True, 'import numpy as np\n'), ((9252, 9289), 'PyCA.Common.FieldFromNPArr', 'common.FieldFromNPArr', (['phi_var', 'mType'], {}), '(phi_var, mType)\n', (9273, 9289), True, 'import PyCA.Common as common\n'), ((4459, 4515), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['net_single'], {'device_ids': 'device_ids'}), '(net_single, device_ids=device_ids)\n', (4480, 4515), False, 'import torch\n'), ((8561, 8607), 'PyCA.Common.AsNPCopy', 'common.AsNPCopy', (["registration_result['phiinv']"], {}), "(registration_result['phiinv'])\n", (8576, 8607), True, 'import PyCA.Common as common\n'), ((9003, 9031), 'numpy.divide', 'np.divide', (['phi', 'args.samples'], {}), '(phi, args.samples)\n', (9012, 9031), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import math
from typing import Iterable, Optional, Tuple, Union
import numpy as np
from analysis import linearity
from utils import utils
from unit_test import unit_test
from processor import ProcessorBase
from generation import signal_generation
def generate_impulse(n_samp, amplitude=1.0) -> np.ndarray:
x = np.zeros(n_samp, dtype=np.float64)
x[0] = amplitude
return x
def generate_step(n_samp, amplitude=1.0) -> np.ndarray:
return np.ones(n_samp) * amplitude
def generate_ramp(n_samp, slope=1.0) -> np.ndarray:
y = (np.arange(n_samp) + 1).astype(np.float64) * slope
assert utils.approx_equal(y[0], slope)
assert utils.approx_equal(y[1], 2*slope)
return y
def get_impulse_response(system, n_samp, amplitude=1.0, reset=True, negative=False) -> np.ndarray:
# Assuming system is LTI & causal, and that system.reset() works as it should,
# we can ignore negative half of impulse/step response, as zero-input will have zero-output
x = generate_impulse(n_samp, amplitude)
if negative:
x = -x
if reset:
system.reset()
return system.process_vector(x)
def get_step_response(system, n_samp, amplitude=1.0, reset=True, negative=False) -> np.ndarray:
x = generate_step(n_samp, amplitude)
if negative:
x = -x
if reset:
system.reset()
return system.process_vector(x)
def get_ramp_response(system, n_samp, slope=1.0, reset=True, negative=False) -> np.ndarray:
x = generate_ramp(n_samp, slope)
if negative:
x = -x
if reset:
system.reset()
return system.process_vector(x)
| [
"utils.utils.approx_equal",
"numpy.zeros",
"numpy.ones",
"numpy.arange"
] | [((354, 388), 'numpy.zeros', 'np.zeros', (['n_samp'], {'dtype': 'np.float64'}), '(n_samp, dtype=np.float64)\n', (362, 388), True, 'import numpy as np\n'), ((640, 671), 'utils.utils.approx_equal', 'utils.approx_equal', (['y[0]', 'slope'], {}), '(y[0], slope)\n', (658, 671), False, 'from utils import utils\n'), ((681, 716), 'utils.utils.approx_equal', 'utils.approx_equal', (['y[1]', '(2 * slope)'], {}), '(y[1], 2 * slope)\n', (699, 716), False, 'from utils import utils\n'), ((489, 504), 'numpy.ones', 'np.ones', (['n_samp'], {}), '(n_samp)\n', (496, 504), True, 'import numpy as np\n'), ((581, 598), 'numpy.arange', 'np.arange', (['n_samp'], {}), '(n_samp)\n', (590, 598), True, 'import numpy as np\n')] |
"""
Copyright 2021 Objectiv B.V.
"""
import datetime
import warnings
from abc import ABC
from enum import Enum
from typing import Union, cast, List, Tuple, Optional, Any
import numpy
import pandas
from sqlalchemy.engine import Dialect
from bach import DataFrame
from bach.series import Series, SeriesString, SeriesBoolean, SeriesFloat64, SeriesInt64
from bach.expression import Expression, join_expressions
from bach.series.series import WrappedPartition, ToPandasInfo
from bach.series.utils.datetime_formats import parse_c_standard_code_to_postgres_code, \
parse_c_code_to_bigquery_code
from bach.types import DtypeOrAlias, StructuredDtype
from sql_models.constants import DBDialect
from sql_models.util import is_postgres, is_bigquery, DatabaseNotSupportedException
class DatePart(str, Enum):
DAY = 'days'
HOUR = 'hours'
MINUTE = 'minutes'
SECOND = 'seconds'
MILLISECOND = 'milliseconds'
MICROSECOND = 'microseconds'
# conversions for date parts to seconds
# when adjusting intervals, 30-day time periods are represented as months
# BigQuery seems to follow Postgres threshold
# https://www.postgresql.org/docs/current/functions-datetime.html#:~:text=justify_days%20(%20interval%20)%20%E2%86%92%20interval,mon%205%20days
# For example 395 days is equal to 1 year, 1 month and 5 days.
_TOTAL_SECONDS_PER_DATE_PART = {
DatePart.DAY: 24 * 60 * 60,
DatePart.HOUR: 60 * 60,
DatePart.MINUTE: 60,
DatePart.SECOND: 1,
DatePart.MILLISECOND: 1e-3,
DatePart.MICROSECOND: 1e-6,
}
class DateTimeOperation:
def __init__(self, series: 'SeriesAbstractDateTime'):
self._series = series
def sql_format(self, format_str: str) -> SeriesString:
"""
Allow formatting of this Series (to a string type).
:param format_str: The format to apply to the date/time column.
Currently, this uses Postgres' data format string syntax:
https://www.postgresql.org/docs/14/functions-formatting.html
.. warning::
This method is deprecated, we recommend using :meth:`SeriesAbstractDateTime.dt.strftime` instead.
.. code-block:: python
df['year'] = df.some_date_series.dt.sql_format('YYYY') # return year
df['date'] = df.some_date_series.dt.sql_format('YYYYMMDD') # return date
:returns: a SeriesString containing the formatted date.
"""
warnings.warn(
'Call to deprecated method, we recommend to use SeriesAbstractDateTime.dt.strftime instead',
category=DeprecationWarning,
)
expression = Expression.construct('to_char({}, {})',
self._series, Expression.string_value(format_str))
str_series = self._series.copy_override_type(SeriesString).copy_override(expression=expression)
return str_series
def strftime(self, format_str: str) -> SeriesString:
"""
Allow formatting of this Series (to a string type).
:param format_str: The format to apply to the date/time column.
This uses 1989 C standard format codes:
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes
.. code-block:: python
df['year'] = df.some_date_series.dt.sql_format('%Y') # return year
df['date'] = df.some_date_series.dt.sql_format('%Y%m%d') # return date
:returns: a SeriesString containing the formatted date.
"""
engine = self._series.engine
if is_postgres(engine):
parsed_format_str = parse_c_standard_code_to_postgres_code(format_str)
expression = Expression.construct(
'to_char({}, {})', self._series, Expression.string_value(parsed_format_str),
)
elif is_bigquery(engine):
# BQ uses C Standard Codes
# https://cloud.google.com/bigquery/docs/reference/standard-sql/format-elements#format_elements_date_time
parsed_format_str = parse_c_code_to_bigquery_code(format_str)
expression = Expression.construct(
'format_date({}, {})',
Expression.string_value(parsed_format_str),
self._series,
)
else:
raise DatabaseNotSupportedException(engine)
str_series = self._series.copy_override_type(SeriesString).copy_override(expression=expression)
return str_series
class TimedeltaOperation(DateTimeOperation):
def _get_conversion_df(self) -> 'DataFrame':
"""
generates a dataframe containing the amounts of seconds a supported date part has.
"""
from bach import DataFrame
conversion_df = pandas.DataFrame(
data=[
{
self._format_converted_series_name(dp): ts
for dp, ts in _TOTAL_SECONDS_PER_DATE_PART.items()
},
]
)
convert_df = DataFrame.from_pandas(df=conversion_df, engine=self._series.engine, convert_objects=True)
return convert_df.reset_index(drop=True)
@staticmethod
def _format_converted_series_name(date_part: DatePart) -> str:
return f'_SECONDS_IN_{date_part.name}'
@property
def components(self) -> DataFrame:
"""
:returns: a DataFrame containing all date parts from the timedelta.
"""
df = self.total_seconds.to_frame()
df = df.merge(self._get_conversion_df(), how='cross')
# justifies total seconds into the units of each date component
# after adjustment, it converts it back into seconds
for date_part in DatePart:
converted_series_name = self._format_converted_series_name(DatePart(date_part))
df[f'ts_{date_part}'] = df['total_seconds'] // df[converted_series_name]
df[f'ts_{date_part}'] *= df[converted_series_name]
# materialize to avoid complex subquery
df = df.materialize(node_name='justified_date_components')
components_series_names = []
prev_ts = ''
# extract actual date component from justified seconds
# by getting the difference between current and previous components
# this helps on normalizing negative time deltas and have only negative values
# in days.
for date_part in DatePart:
converted_series_name = self._format_converted_series_name(DatePart(date_part))
component_name = f'{date_part}'
current_ts = f'ts_{date_part}'
if not prev_ts:
df[component_name] = df[current_ts] / df[converted_series_name]
else:
df[component_name] = (df[current_ts] - df[prev_ts]) / df[converted_series_name]
df[component_name] = cast(SeriesFloat64, df[component_name]).round(decimals=0)
components_series_names.append(component_name)
prev_ts = current_ts
return df[components_series_names].astype('int64')
@property
def days(self) -> SeriesInt64:
"""
converts total seconds into days and returns only the integral part of the result
"""
day_series = self.total_seconds // _TOTAL_SECONDS_PER_DATE_PART[DatePart.DAY]
day_series = day_series.astype('int64')
return (
day_series
.copy_override_type(SeriesInt64)
.copy_override(name='days')
)
@property
def seconds(self) -> SeriesInt64:
"""
removes days from total seconds (self.total_seconds % _SECONDS_IN_DAY)
and returns only the integral part of the result
"""
seconds_series = (self.total_seconds % _TOTAL_SECONDS_PER_DATE_PART[DatePart.DAY]) // 1
seconds_series = seconds_series.astype('int64')
return (
seconds_series
.copy_override_type(SeriesInt64)
.copy_override(name='seconds')
)
@property
def microseconds(self) -> SeriesInt64:
"""
considers only the fractional part of the total seconds and converts it into microseconds
"""
microseconds_series = (
(self.total_seconds % 1) / _TOTAL_SECONDS_PER_DATE_PART[DatePart.MICROSECOND]
)
microseconds_series = microseconds_series.astype('int64')
return (
microseconds_series
.copy_override_type(SeriesInt64)
.copy_override(name='microseconds')
)
@property
def total_seconds(self) -> SeriesFloat64:
"""
returns the total amount of seconds in the interval
"""
if not is_bigquery(self._series.engine):
# extract(epoch from source) returns the total number of seconds in the interval
expression = Expression.construct(f'extract(epoch from {{}})', self._series)
else:
# bq cannot extract epoch from interval
expression = Expression.construct(
(
f"UNIX_MICROS(CAST('1970-01-01' AS TIMESTAMP) + {{}}) "
f"* {_TOTAL_SECONDS_PER_DATE_PART[DatePart.MICROSECOND]}"
),
self._series,
)
return (
self._series
.copy_override_type(SeriesFloat64)
.copy_override(name='total_seconds', expression=expression)
)
class SeriesAbstractDateTime(Series, ABC):
"""
A Series that represents the generic date/time type and its specific operations. Selected arithmetic
operations are accepted using the usual operators.
**Date/Time Operations**
On any of the subtypes, you can access date operations through the `dt` accessor.
"""
@property
def dt(self) -> DateTimeOperation:
"""
Get access to date operations.
.. autoclass:: bach.series.series_datetime.DateTimeOperation
:members:
"""
return DateTimeOperation(self)
def _comparator_operation(self, other, comparator,
other_dtypes=('timestamp', 'date', 'time', 'string')) -> 'SeriesBoolean':
return super()._comparator_operation(other, comparator, other_dtypes)
@classmethod
def _cast_to_date_if_dtype_date(cls, series: 'Series') -> 'Series':
# PG returns timestamp in all cases were we expect date
# Make sure we cast properly, and round similar to python datetime: add 12 hours and cast to date
if series.dtype == 'date':
td_12_hours = datetime.timedelta(seconds=3600 * 12)
series_12_hours = SeriesTimedelta.from_value(base=series, value=td_12_hours, name='tmp')
expr_12_hours = series_12_hours.expression
return series.copy_override(
expression=Expression.construct("cast({} + {} as date)", series, expr_12_hours)
)
else:
return series
def dt_strip_timezone(value: Optional[datetime.datetime]) -> Optional[datetime.datetime]:
if value is None:
return None
return value.replace(tzinfo=None)
class SeriesTimestamp(SeriesAbstractDateTime):
"""
A Series that represents the timestamp/datetime type and its specific operations.
Timestamps are assumed to be in UTC, or without a timezone, both cases are treated the same.
These timestamps have a microsecond precision at best, in contrast to numpy's datetime64 which supports
up to attoseconds precision.
**Database support and types**
* Postgres: utilizes the 'timestamp without time zone' database type.
* BigQuery: utilizes the 'TIMESTAMP' database type.
"""
dtype = 'timestamp'
dtype_aliases = ('datetime64', 'datetime64[ns]', numpy.datetime64)
supported_db_dtype = {
DBDialect.POSTGRES: 'timestamp without time zone',
DBDialect.BIGQUERY: 'TIMESTAMP',
}
supported_value_types = (datetime.datetime, numpy.datetime64, datetime.date, str)
@classmethod
def supported_literal_to_expression(cls, dialect: Dialect, literal: Expression) -> Expression:
return Expression.construct(f'cast({{}} as {cls.get_db_dtype(dialect)})', literal)
@classmethod
def supported_value_to_literal(
cls,
dialect: Dialect,
value: Union[datetime.datetime, numpy.datetime64, datetime.date, str, None],
dtype: StructuredDtype
) -> Expression:
if value is None:
return Expression.raw('NULL')
# if value is not a datetime or date, then convert it to datetime first
dt_value: Union[datetime.datetime, datetime.date, None] = None
if isinstance(value, str):
formats = ['%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M', '%Y-%m-%d']
for format in formats:
try:
dt_value = datetime.datetime.strptime(value, format)
break
except ValueError:
continue
if dt_value is None:
raise ValueError(f'Not a valid timestamp string literal: {value}.'
f'Supported formats: {formats}')
elif isinstance(value, numpy.datetime64):
if numpy.isnat(value):
return Expression.raw('NULL')
# Weird trick: count number of microseconds in datetime, but only works on timedelta, so convert
# to a timedelta first, by subtracting 0 (epoch = 1970-01-01 00:00:00)
# Rounding can be unpredictable because of limited precision, so always truncate excess precision
microseconds = int((value - numpy.datetime64('1970', 'us')) // numpy.timedelta64(1, 'us'))
dt_value = datetime.datetime.utcfromtimestamp(microseconds / 1_000_000)
elif isinstance(value, (datetime.datetime, datetime.date)):
dt_value = value
if dt_value is None:
raise ValueError(f'Not a valid timestamp literal: {value}')
str_value = dt_value.strftime('%Y-%m-%d %H:%M:%S.%f')
return Expression.string_value(str_value)
@classmethod
def dtype_to_expression(cls, dialect: Dialect, source_dtype: str, expression: Expression) -> Expression:
if source_dtype == 'timestamp':
return expression
else:
if source_dtype not in ['string', 'date']:
raise ValueError(f'cannot convert {source_dtype} to timestamp')
return Expression.construct(f'cast({{}} as {cls.get_db_dtype(dialect)})', expression)
def to_pandas_info(self) -> Optional['ToPandasInfo']:
if is_postgres(self.engine):
return ToPandasInfo('datetime64[ns]', None)
if is_bigquery(self.engine):
return ToPandasInfo('datetime64[ns, UTC]', dt_strip_timezone)
return None
def __add__(self, other) -> 'Series':
return self._arithmetic_operation(other, 'add', '({}) + ({})', other_dtypes=tuple(['timedelta']))
def __sub__(self, other) -> 'Series':
type_mapping = {
'timedelta': 'timestamp',
'timestamp': 'timedelta'
}
return self._arithmetic_operation(other, 'sub', '({}) - ({})',
other_dtypes=tuple(type_mapping.keys()),
dtype=type_mapping)
class SeriesDate(SeriesAbstractDateTime):
"""
A Series that represents the date type and its specific operations
**Database support and types**
* Postgres: utilizes the 'date' database type.
* BigQuery: utilizes the 'DATE' database type.
"""
dtype = 'date'
dtype_aliases: Tuple[DtypeOrAlias, ...] = tuple()
supported_db_dtype = {
DBDialect.POSTGRES: 'date',
DBDialect.BIGQUERY: 'DATE'
}
supported_value_types = (datetime.datetime, datetime.date, str)
@classmethod
def supported_literal_to_expression(cls, dialect: Dialect, literal: Expression) -> Expression:
return Expression.construct(f'cast({{}} as date)', literal)
@classmethod
def supported_value_to_literal(
cls,
dialect: Dialect,
value: Union[str, datetime.date],
dtype: StructuredDtype
) -> Expression:
if isinstance(value, datetime.date):
value = str(value)
# TODO: check here already that the string has the correct format
return Expression.string_value(value)
@classmethod
def dtype_to_expression(cls, dialect: Dialect, source_dtype: str, expression: Expression) -> Expression:
if source_dtype == 'date':
return expression
else:
if source_dtype not in ['string', 'timestamp']:
raise ValueError(f'cannot convert {source_dtype} to date')
return Expression.construct(f'cast({{}} as {cls.get_db_dtype(dialect)})', expression)
def __add__(self, other) -> 'Series':
type_mapping = {
'timedelta': 'date' # PG returns timestamp, needs explicit cast to date
}
return self._cast_to_date_if_dtype_date(
self._arithmetic_operation(other, 'add', '({}) + ({})',
other_dtypes=tuple(type_mapping.keys()),
dtype=type_mapping)
)
def __sub__(self, other) -> 'Series':
type_mapping = {
'date': 'timedelta',
'timedelta': 'date', # PG returns timestamp, needs explicit cast to date
}
if other.dtype == 'date':
# PG does unexpected things when doing date - date. Work around that.
fmt_str = 'cast(cast({} as timestamp) - ({}) as interval)'
else:
fmt_str = '({}) - ({})'
return self._cast_to_date_if_dtype_date(
self._arithmetic_operation(other, 'sub', fmt_str,
other_dtypes=tuple(type_mapping.keys()),
dtype=type_mapping)
)
class SeriesTime(SeriesAbstractDateTime):
"""
A Series that represents the date time and its specific operations
**Database support and types**
* Postgres: utilizes the 'time without time zone' database type.
* BigQuery: utilizes the 'TIME' database type.
"""
dtype = 'time'
dtype_aliases: Tuple[DtypeOrAlias, ...] = tuple()
supported_db_dtype = {
DBDialect.POSTGRES: 'time without time zone',
DBDialect.BIGQUERY: 'TIME',
}
supported_value_types = (datetime.time, str)
@classmethod
def supported_literal_to_expression(cls, dialect: Dialect, literal: Expression) -> Expression:
return Expression.construct(f'cast({{}} as {cls.get_db_dtype(dialect)})', literal)
@classmethod
def supported_value_to_literal(
cls,
dialect: Dialect,
value: Union[str, datetime.time],
dtype: StructuredDtype
) -> Expression:
value = str(value)
# TODO: check here already that the string has the correct format
return Expression.string_value(value)
@classmethod
def dtype_to_expression(cls, dialect: Dialect, source_dtype: str, expression: Expression) -> Expression:
if source_dtype == 'time':
return expression
else:
if source_dtype not in ['string', 'timestamp']:
raise ValueError(f'cannot convert {source_dtype} to time')
return Expression.construct(f'cast({{}} as {cls.get_db_dtype(dialect)})', expression)
# python supports no arithmetic on Time
class SeriesTimedelta(SeriesAbstractDateTime):
"""
A Series that represents the timedelta type and its specific operations
**Database support and types**
* Postgres: utilizes the 'interval' database type.
* BigQuery: support coming soon
"""
dtype = 'timedelta'
dtype_aliases = ('interval',)
supported_db_dtype = {
DBDialect.POSTGRES: 'interval',
DBDialect.BIGQUERY: 'INTERVAL',
}
supported_value_types = (datetime.timedelta, numpy.timedelta64, str)
@classmethod
def supported_literal_to_expression(cls, dialect: Dialect, literal: Expression) -> Expression:
return Expression.construct(f'cast({{}} as {cls.get_db_dtype(dialect)})', literal)
@classmethod
def supported_value_to_literal(
cls,
dialect: Dialect,
value: Union[str, numpy.timedelta64, datetime.timedelta],
dtype: StructuredDtype
) -> Expression:
# pandas.Timedelta checks already that the string has the correct format
# round it up to microseconds precision in order to avoid problems with BigQuery
# pandas by default uses nanoseconds precision
value_td = pandas.Timedelta(value).round(freq='us')
if value_td is pandas.NaT:
return Expression.construct('NULL')
# interval values in iso format are allowed in SQL (both BQ and PG)
# https://www.postgresql.org/docs/8.4/datatype-datetime.html#:~:text=interval%20values%20can%20also%20be%20written%20as%20iso%208601%20time%20intervals%2C
return Expression.string_value(value_td.isoformat())
def to_pandas_info(self) -> Optional[ToPandasInfo]:
if is_bigquery(self.engine):
return ToPandasInfo(dtype='object', function=self._parse_interval_bigquery)
return None
def _parse_interval_bigquery(self, value: Optional[Any]) -> Optional[pandas.Timedelta]:
if value is None:
return None
# BigQuery returns a MonthDayNano object
# we need to normalize months to days (1 month == 30 day period)
return pandas.Timedelta(
days=value.days + value.months * 30,
nanoseconds=value.nanoseconds,
)
@classmethod
def dtype_to_expression(cls, dialect: Dialect, source_dtype: str, expression: Expression) -> Expression:
if source_dtype == 'timedelta':
return expression
else:
if not source_dtype == 'string':
raise ValueError(f'cannot convert {source_dtype} to timedelta')
return Expression.construct(f'cast({{}} as {cls.get_db_dtype(dialect)})', expression)
def _comparator_operation(self, other, comparator,
other_dtypes=('timedelta', 'string')) -> SeriesBoolean:
return super()._comparator_operation(other, comparator, other_dtypes)
def __add__(self, other) -> 'Series':
type_mapping = {
'date': 'date', # PG makes this a timestamp
'timedelta': 'timedelta',
'timestamp': 'timestamp'
}
return self._cast_to_date_if_dtype_date(
self._arithmetic_operation(other, 'add', '({}) + ({})',
other_dtypes=tuple(type_mapping.keys()),
dtype=type_mapping))
def __sub__(self, other) -> 'Series':
type_mapping = {
'timedelta': 'timedelta',
}
return self._arithmetic_operation(other, 'sub', '({}) - ({})',
other_dtypes=tuple(type_mapping.keys()),
dtype=type_mapping)
def __mul__(self, other) -> 'Series':
return self._arithmetic_operation(other, 'mul', '({}) * ({})', other_dtypes=('int64', 'float64'))
def __truediv__(self, other) -> 'Series':
return self._arithmetic_operation(other, 'div', '({}) / ({})', other_dtypes=('int64', 'float64'))
@property
def dt(self) -> TimedeltaOperation:
"""
Get access to date operations.
.. autoclass:: bach.series.series_datetime.TimedeltaOperation
:members:
"""
return TimedeltaOperation(self)
def sum(self, partition: WrappedPartition = None,
skipna: bool = True, min_count: int = None) -> 'SeriesTimedelta':
"""
:meta private:
"""
result = self._derived_agg_func(
partition=partition,
expression='sum',
skipna=skipna,
min_count=min_count
)
return result.copy_override_type(SeriesTimedelta)
def mean(self, partition: WrappedPartition = None, skipna: bool = True) -> 'SeriesTimedelta':
"""
:meta private:
"""
result = self._derived_agg_func(
partition=partition,
expression='avg',
skipna=skipna
)
result = result.copy_override_type(SeriesTimedelta)
if is_bigquery(self.engine):
result = result._remove_nano_precision_bigquery()
return result
def _remove_nano_precision_bigquery(self) -> 'SeriesTimedelta':
"""
Helper function that removes nano-precision from intervals.
"""
series = self.copy()
# https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#interval_type
_BQ_INTERVAL_FORMAT = '%d-%d %d %d:%d:%d.%06.0f'
_BQ_SUPPORTED_INTERVAL_PARTS = [
'YEAR', 'MONTH', 'DAY', 'HOUR', 'MINUTE', 'SECOND'
]
# aggregating intervals by average might generate a result with
# nano-precision, which is not supported by BigQuery TimeStamps
# therefore we need to make sure we always generate values up to
# microseconds precision
# https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#timestamp_type
all_extracted_parts_expr = [
Expression.construct(f'EXTRACT({date_part} FROM {{}})', series)
for date_part in _BQ_SUPPORTED_INTERVAL_PARTS
]
# convert nanoseconds to microseconds
all_extracted_parts_expr.append(
Expression.construct(f'EXTRACT(NANOSECOND FROM {{}}) / 1000', series)
)
format_arguments_expr = join_expressions(all_extracted_parts_expr)
# All parts will create a string with following format
# '%d-%d %d %d:%d:%d.%06.0f'
# where the first 6 digits are date parts from YEAR to SECOND
# Format specifier %06.0f will format fractional part of seconds with maximum width of 6 digits
# for example:
# nanoseconds = 1142857, converting them into microseconds is 1142.857
# when applying string formatting, the value will be rounded into 1143 (.0 precision)
# and will be left padded by 2 leading zeros: 001143 (0 flag and 6 minimum width)
# for more information:
# https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#format_string
format_expr = Expression.construct(
f'format({{}}, {{}})',
Expression.string_value(_BQ_INTERVAL_FORMAT),
format_arguments_expr,
)
return series.copy_override(
expression=self.dtype_to_expression(
self.engine, source_dtype='string', expression=format_expr,
)
)
def quantile(
self, partition: WrappedPartition = None, q: Union[float, List[float]] = 0.5,
) -> 'SeriesTimedelta':
"""
When q is a float or len(q) == 1, the resultant series index will remain
In case multiple quantiles are calculated, the resultant series index will have all calculated
quantiles as index values.
"""
from bach.quantile import calculate_quantiles
if not is_bigquery(self.engine):
return (
calculate_quantiles(series=self.copy(), partition=partition, q=q)
.copy_override_type(SeriesTimedelta)
)
# calculate quantiles based on total microseconds
# using total seconds might lose precision,
# since TIMESTAMP_SECONDS accepts only integers, therefore
# microseconds will be lost due to rounding
total_microseconds_series = (
self.dt.total_seconds / _TOTAL_SECONDS_PER_DATE_PART[DatePart.MICROSECOND]
)
total_microseconds_series = total_microseconds_series.copy_override_type(SeriesFloat64)
result = calculate_quantiles(series=total_microseconds_series, partition=partition, q=q)
# result must be a timedelta
result = result.copy_override(
expression=Expression.construct(
f"TIMESTAMP_MICROS({{}}) - CAST('1970-01-01' AS TIMESTAMP)",
result.astype('int64'),
),
name=self.name,
)
return result.copy_override_type(SeriesTimedelta)
| [
"datetime.datetime.utcfromtimestamp",
"bach.series.utils.datetime_formats.parse_c_code_to_bigquery_code",
"datetime.timedelta",
"bach.series.series.ToPandasInfo",
"bach.DataFrame.from_pandas",
"bach.expression.Expression.construct",
"numpy.datetime64",
"warnings.warn",
"bach.series.utils.datetime_fo... | [((2410, 2555), 'warnings.warn', 'warnings.warn', (['"""Call to deprecated method, we recommend to use SeriesAbstractDateTime.dt.strftime instead"""'], {'category': 'DeprecationWarning'}), "(\n 'Call to deprecated method, we recommend to use SeriesAbstractDateTime.dt.strftime instead'\n , category=DeprecationWarning)\n", (2423, 2555), False, 'import warnings\n'), ((3540, 3559), 'sql_models.util.is_postgres', 'is_postgres', (['engine'], {}), '(engine)\n', (3551, 3559), False, 'from sql_models.util import is_postgres, is_bigquery, DatabaseNotSupportedException\n'), ((4977, 5070), 'bach.DataFrame.from_pandas', 'DataFrame.from_pandas', ([], {'df': 'conversion_df', 'engine': 'self._series.engine', 'convert_objects': '(True)'}), '(df=conversion_df, engine=self._series.engine,\n convert_objects=True)\n', (4998, 5070), False, 'from bach import DataFrame\n'), ((14053, 14087), 'bach.expression.Expression.string_value', 'Expression.string_value', (['str_value'], {}), '(str_value)\n', (14076, 14087), False, 'from bach.expression import Expression, join_expressions\n'), ((14602, 14626), 'sql_models.util.is_postgres', 'is_postgres', (['self.engine'], {}), '(self.engine)\n', (14613, 14626), False, 'from sql_models.util import is_postgres, is_bigquery, DatabaseNotSupportedException\n'), ((14695, 14719), 'sql_models.util.is_bigquery', 'is_bigquery', (['self.engine'], {}), '(self.engine)\n', (14706, 14719), False, 'from sql_models.util import is_postgres, is_bigquery, DatabaseNotSupportedException\n'), ((15980, 16032), 'bach.expression.Expression.construct', 'Expression.construct', (['f"""cast({{}} as date)"""', 'literal'], {}), "(f'cast({{}} as date)', literal)\n", (16000, 16032), False, 'from bach.expression import Expression, join_expressions\n'), ((16385, 16415), 'bach.expression.Expression.string_value', 'Expression.string_value', (['value'], {}), '(value)\n', (16408, 16415), False, 'from bach.expression import Expression, join_expressions\n'), ((19024, 19054), 'bach.expression.Expression.string_value', 'Expression.string_value', (['value'], {}), '(value)\n', (19047, 19054), False, 'from bach.expression import Expression, join_expressions\n'), ((21210, 21234), 'sql_models.util.is_bigquery', 'is_bigquery', (['self.engine'], {}), '(self.engine)\n', (21221, 21234), False, 'from sql_models.util import is_postgres, is_bigquery, DatabaseNotSupportedException\n'), ((21625, 21714), 'pandas.Timedelta', 'pandas.Timedelta', ([], {'days': '(value.days + value.months * 30)', 'nanoseconds': 'value.nanoseconds'}), '(days=value.days + value.months * 30, nanoseconds=value.\n nanoseconds)\n', (21641, 21714), False, 'import pandas\n'), ((24521, 24545), 'sql_models.util.is_bigquery', 'is_bigquery', (['self.engine'], {}), '(self.engine)\n', (24532, 24545), False, 'from sql_models.util import is_postgres, is_bigquery, DatabaseNotSupportedException\n'), ((25831, 25873), 'bach.expression.join_expressions', 'join_expressions', (['all_extracted_parts_expr'], {}), '(all_extracted_parts_expr)\n', (25847, 25873), False, 'from bach.expression import Expression, join_expressions\n'), ((28058, 28137), 'bach.quantile.calculate_quantiles', 'calculate_quantiles', ([], {'series': 'total_microseconds_series', 'partition': 'partition', 'q': 'q'}), '(series=total_microseconds_series, partition=partition, q=q)\n', (28077, 28137), False, 'from bach.quantile import calculate_quantiles\n'), ((2699, 2734), 'bach.expression.Expression.string_value', 'Expression.string_value', (['format_str'], {}), '(format_str)\n', (2722, 2734), False, 'from bach.expression import Expression, join_expressions\n'), ((3593, 3643), 'bach.series.utils.datetime_formats.parse_c_standard_code_to_postgres_code', 'parse_c_standard_code_to_postgres_code', (['format_str'], {}), '(format_str)\n', (3631, 3643), False, 'from bach.series.utils.datetime_formats import parse_c_standard_code_to_postgres_code, parse_c_code_to_bigquery_code\n'), ((3811, 3830), 'sql_models.util.is_bigquery', 'is_bigquery', (['engine'], {}), '(engine)\n', (3822, 3830), False, 'from sql_models.util import is_postgres, is_bigquery, DatabaseNotSupportedException\n'), ((8655, 8687), 'sql_models.util.is_bigquery', 'is_bigquery', (['self._series.engine'], {}), '(self._series.engine)\n', (8666, 8687), False, 'from sql_models.util import is_postgres, is_bigquery, DatabaseNotSupportedException\n'), ((8807, 8870), 'bach.expression.Expression.construct', 'Expression.construct', (['f"""extract(epoch from {{}})"""', 'self._series'], {}), "(f'extract(epoch from {{}})', self._series)\n", (8827, 8870), False, 'from bach.expression import Expression, join_expressions\n'), ((8962, 9117), 'bach.expression.Expression.construct', 'Expression.construct', (['f"""UNIX_MICROS(CAST(\'1970-01-01\' AS TIMESTAMP) + {{}}) * {_TOTAL_SECONDS_PER_DATE_PART[DatePart.MICROSECOND]}"""', 'self._series'], {}), '(\n f"UNIX_MICROS(CAST(\'1970-01-01\' AS TIMESTAMP) + {{}}) * {_TOTAL_SECONDS_PER_DATE_PART[DatePart.MICROSECOND]}"\n , self._series)\n', (8982, 9117), False, 'from bach.expression import Expression, join_expressions\n'), ((10536, 10573), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(3600 * 12)'}), '(seconds=3600 * 12)\n', (10554, 10573), False, 'import datetime\n'), ((12448, 12470), 'bach.expression.Expression.raw', 'Expression.raw', (['"""NULL"""'], {}), "('NULL')\n", (12462, 12470), False, 'from bach.expression import Expression, join_expressions\n'), ((14647, 14683), 'bach.series.series.ToPandasInfo', 'ToPandasInfo', (['"""datetime64[ns]"""', 'None'], {}), "('datetime64[ns]', None)\n", (14659, 14683), False, 'from bach.series.series import WrappedPartition, ToPandasInfo\n'), ((14740, 14794), 'bach.series.series.ToPandasInfo', 'ToPandasInfo', (['"""datetime64[ns, UTC]"""', 'dt_strip_timezone'], {}), "('datetime64[ns, UTC]', dt_strip_timezone)\n", (14752, 14794), False, 'from bach.series.series import WrappedPartition, ToPandasInfo\n'), ((20812, 20840), 'bach.expression.Expression.construct', 'Expression.construct', (['"""NULL"""'], {}), "('NULL')\n", (20832, 20840), False, 'from bach.expression import Expression, join_expressions\n'), ((21255, 21323), 'bach.series.series.ToPandasInfo', 'ToPandasInfo', ([], {'dtype': '"""object"""', 'function': 'self._parse_interval_bigquery'}), "(dtype='object', function=self._parse_interval_bigquery)\n", (21267, 21323), False, 'from bach.series.series import WrappedPartition, ToPandasInfo\n'), ((25488, 25551), 'bach.expression.Expression.construct', 'Expression.construct', (['f"""EXTRACT({date_part} FROM {{}})"""', 'series'], {}), "(f'EXTRACT({date_part} FROM {{}})', series)\n", (25508, 25551), False, 'from bach.expression import Expression, join_expressions\n'), ((25719, 25788), 'bach.expression.Expression.construct', 'Expression.construct', (['f"""EXTRACT(NANOSECOND FROM {{}}) / 1000"""', 'series'], {}), "(f'EXTRACT(NANOSECOND FROM {{}}) / 1000', series)\n", (25739, 25788), False, 'from bach.expression import Expression, join_expressions\n'), ((26661, 26705), 'bach.expression.Expression.string_value', 'Expression.string_value', (['_BQ_INTERVAL_FORMAT'], {}), '(_BQ_INTERVAL_FORMAT)\n', (26684, 26705), False, 'from bach.expression import Expression, join_expressions\n'), ((27384, 27408), 'sql_models.util.is_bigquery', 'is_bigquery', (['self.engine'], {}), '(self.engine)\n', (27395, 27408), False, 'from sql_models.util import is_postgres, is_bigquery, DatabaseNotSupportedException\n'), ((3740, 3782), 'bach.expression.Expression.string_value', 'Expression.string_value', (['parsed_format_str'], {}), '(parsed_format_str)\n', (3763, 3782), False, 'from bach.expression import Expression, join_expressions\n'), ((4021, 4062), 'bach.series.utils.datetime_formats.parse_c_code_to_bigquery_code', 'parse_c_code_to_bigquery_code', (['format_str'], {}), '(format_str)\n', (4050, 4062), False, 'from bach.series.utils.datetime_formats import parse_c_standard_code_to_postgres_code, parse_c_code_to_bigquery_code\n'), ((4285, 4322), 'sql_models.util.DatabaseNotSupportedException', 'DatabaseNotSupportedException', (['engine'], {}), '(engine)\n', (4314, 4322), False, 'from sql_models.util import is_postgres, is_bigquery, DatabaseNotSupportedException\n'), ((13221, 13239), 'numpy.isnat', 'numpy.isnat', (['value'], {}), '(value)\n', (13232, 13239), False, 'import numpy\n'), ((13715, 13773), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['(microseconds / 1000000)'], {}), '(microseconds / 1000000)\n', (13749, 13773), False, 'import datetime\n'), ((20716, 20739), 'pandas.Timedelta', 'pandas.Timedelta', (['value'], {}), '(value)\n', (20732, 20739), False, 'import pandas\n'), ((4165, 4207), 'bach.expression.Expression.string_value', 'Expression.string_value', (['parsed_format_str'], {}), '(parsed_format_str)\n', (4188, 4207), False, 'from bach.expression import Expression, join_expressions\n'), ((6810, 6849), 'typing.cast', 'cast', (['SeriesFloat64', 'df[component_name]'], {}), '(SeriesFloat64, df[component_name])\n', (6814, 6849), False, 'from typing import Union, cast, List, Tuple, Optional, Any\n'), ((10799, 10867), 'bach.expression.Expression.construct', 'Expression.construct', (['"""cast({} + {} as date)"""', 'series', 'expr_12_hours'], {}), "('cast({} + {} as date)', series, expr_12_hours)\n", (10819, 10867), False, 'from bach.expression import Expression, join_expressions\n'), ((12842, 12883), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['value', 'format'], {}), '(value, format)\n', (12868, 12883), False, 'import datetime\n'), ((13264, 13286), 'bach.expression.Expression.raw', 'Expression.raw', (['"""NULL"""'], {}), "('NULL')\n", (13278, 13286), False, 'from bach.expression import Expression, join_expressions\n'), ((13664, 13690), 'numpy.timedelta64', 'numpy.timedelta64', (['(1)', '"""us"""'], {}), "(1, 'us')\n", (13681, 13690), False, 'import numpy\n'), ((13629, 13659), 'numpy.datetime64', 'numpy.datetime64', (['"""1970"""', '"""us"""'], {}), "('1970', 'us')\n", (13645, 13659), False, 'import numpy\n')] |
# Hash Time Lock Contract Example in pyTeal
# Add parent directory to path so that algobpy can be imported
import sys
sys.path.insert(0,'..')
from algobpy.parse import parse_params
from pyteal import *
def htlc(tmpl_bob, tmpl_alice, tmpl_secret, tmpl_timeout):
common_fields = And(
Txn.type_enum() == TxnType.Payment,
Txn.rekey_to() == Global.zero_address(),
Txn.close_remainder_to() == Global.zero_address(),
Txn.fee() <= Int(10000)
)
recv_cond = And(
Txn.receiver() == tmpl_alice,
Sha256(Arg(0)) == Bytes("base64", tmpl_secret)
)
esc_cond = And(
Txn.receiver() == tmpl_bob,
Txn.first_valid() > Int(tmpl_timeout)
)
return And(
common_fields,
Or(recv_cond, esc_cond)
)
if __name__ == "__main__":
params = {
"bob": "<KEY>",
"alice": "<KEY>",
"hash_image": "QzYhq9JlYbn2QdOMrhyxVlNtNjeyvyJc/I8d8VAGfGc=",
"timeout": 3001
}
# Overwrite params if sys.argv[1] is passed
if(len(sys.argv) > 1):
params = parse_params(sys.argv[1], params)
print(compileTeal(htlc(
Addr(params["bob"]),
Addr(params["alice"]),
params["hash_image"],
params["timeout"]), Mode.Signature))
| [
"algobpy.parse.parse_params",
"sys.path.insert"
] | [((119, 143), 'sys.path.insert', 'sys.path.insert', (['(0)', '""".."""'], {}), "(0, '..')\n", (134, 143), False, 'import sys\n'), ((1076, 1109), 'algobpy.parse.parse_params', 'parse_params', (['sys.argv[1]', 'params'], {}), '(sys.argv[1], params)\n', (1088, 1109), False, 'from algobpy.parse import parse_params\n')] |
import sys
from PyQt5 import QtGui, QtCore
from PyQt5.QtWidgets import QApplication, QDialog, QGroupBox, QVBoxLayout, QHBoxLayout, QRadioButton, QLabel
class MainWindow(QDialog):
def __init__(self):
super().__init__()
self.title = "PyQt5 Radio Button"
self.top = 400
self.left = 200
self.width = 400
self.height = 150
self.icon_name = "images/home.png"
self._init_window()
def _init_window(self):
self.setWindowIcon(QtGui.QIcon(self.icon_name))
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self._create_ui_components()
vbox = QVBoxLayout()
vbox.addWidget(self.group_box)
self.lbl_Info = QLabel(self)
self.lbl_Info.setFont(QtGui.QFont("Sanserif", 15))
vbox.addWidget(self.lbl_Info)
self.setLayout(vbox)
self.show()
def _create_ui_components(self):
self.group_box = QGroupBox("What is your favorite sport?")
self.group_box.setFont(QtGui.QFont("Sanserif", 13))
hbox_layout = QHBoxLayout()
self.rdbtn_soccer = QRadioButton("Soccer")
self.rdbtn_soccer.setIcon(QtGui.QIcon("images/soccer.png"))
self.rdbtn_soccer.setIconSize(QtCore.QSize(30, 30))
self.rdbtn_soccer.setFont(QtGui.QFont("Sanserif", 13))
self.rdbtn_soccer.toggled.connect(self._on_radiobutton_checked)
hbox_layout.addWidget(self.rdbtn_soccer)
self.rdbtn_basketball = QRadioButton("Basketball")
self.rdbtn_basketball.setIcon(QtGui.QIcon("images/basketball.png"))
self.rdbtn_basketball.setIconSize(QtCore.QSize(30, 30))
self.rdbtn_basketball.setFont(QtGui.QFont("Sanserif", 13))
self.rdbtn_basketball.toggled.connect(self._on_radiobutton_checked)
hbox_layout.addWidget(self.rdbtn_basketball)
self.rdbtn_tennis = QRadioButton("Tennis")
self.rdbtn_tennis.setIcon(QtGui.QIcon("images/tennis.png"))
self.rdbtn_tennis.setIconSize(QtCore.QSize(30, 30))
self.rdbtn_tennis.setFont(QtGui.QFont("Sanserif", 13))
self.rdbtn_tennis.toggled.connect(self._on_radiobutton_checked)
hbox_layout.addWidget(self.rdbtn_tennis)
self.group_box.setLayout(hbox_layout)
def _on_radiobutton_checked(self):
rdbtn = self.sender()
if rdbtn.isChecked():
self.lbl_Info.setText(f"Sport selected: {rdbtn.text()}")
if __name__ == "__main__":
app = QApplication(sys.argv)
window = MainWindow()
sys.exit(app.exec()) | [
"PyQt5.QtGui.QIcon",
"PyQt5.QtGui.QFont",
"PyQt5.QtWidgets.QRadioButton",
"PyQt5.QtWidgets.QHBoxLayout",
"PyQt5.QtWidgets.QGroupBox",
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QApplication",
"PyQt5.QtWidgets.QVBoxLayout",
"PyQt5.QtCore.QSize"
] | [((2555, 2577), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (2567, 2577), False, 'from PyQt5.QtWidgets import QApplication, QDialog, QGroupBox, QVBoxLayout, QHBoxLayout, QRadioButton, QLabel\n'), ((704, 717), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (715, 717), False, 'from PyQt5.QtWidgets import QApplication, QDialog, QGroupBox, QVBoxLayout, QHBoxLayout, QRadioButton, QLabel\n'), ((782, 794), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['self'], {}), '(self)\n', (788, 794), False, 'from PyQt5.QtWidgets import QApplication, QDialog, QGroupBox, QVBoxLayout, QHBoxLayout, QRadioButton, QLabel\n'), ((1022, 1063), 'PyQt5.QtWidgets.QGroupBox', 'QGroupBox', (['"""What is your favorite sport?"""'], {}), "('What is your favorite sport?')\n", (1031, 1063), False, 'from PyQt5.QtWidgets import QApplication, QDialog, QGroupBox, QVBoxLayout, QHBoxLayout, QRadioButton, QLabel\n'), ((1155, 1168), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (1166, 1168), False, 'from PyQt5.QtWidgets import QApplication, QDialog, QGroupBox, QVBoxLayout, QHBoxLayout, QRadioButton, QLabel\n'), ((1198, 1220), 'PyQt5.QtWidgets.QRadioButton', 'QRadioButton', (['"""Soccer"""'], {}), "('Soccer')\n", (1210, 1220), False, 'from PyQt5.QtWidgets import QApplication, QDialog, QGroupBox, QVBoxLayout, QHBoxLayout, QRadioButton, QLabel\n'), ((1566, 1592), 'PyQt5.QtWidgets.QRadioButton', 'QRadioButton', (['"""Basketball"""'], {}), "('Basketball')\n", (1578, 1592), False, 'from PyQt5.QtWidgets import QApplication, QDialog, QGroupBox, QVBoxLayout, QHBoxLayout, QRadioButton, QLabel\n'), ((1958, 1980), 'PyQt5.QtWidgets.QRadioButton', 'QRadioButton', (['"""Tennis"""'], {}), "('Tennis')\n", (1970, 1980), False, 'from PyQt5.QtWidgets import QApplication, QDialog, QGroupBox, QVBoxLayout, QHBoxLayout, QRadioButton, QLabel\n'), ((502, 529), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', (['self.icon_name'], {}), '(self.icon_name)\n', (513, 529), False, 'from PyQt5 import QtGui, QtCore\n'), ((825, 852), 'PyQt5.QtGui.QFont', 'QtGui.QFont', (['"""Sanserif"""', '(15)'], {}), "('Sanserif', 15)\n", (836, 852), False, 'from PyQt5 import QtGui, QtCore\n'), ((1095, 1122), 'PyQt5.QtGui.QFont', 'QtGui.QFont', (['"""Sanserif"""', '(13)'], {}), "('Sanserif', 13)\n", (1106, 1122), False, 'from PyQt5 import QtGui, QtCore\n'), ((1255, 1287), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', (['"""images/soccer.png"""'], {}), "('images/soccer.png')\n", (1266, 1287), False, 'from PyQt5 import QtGui, QtCore\n'), ((1327, 1347), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(30)', '(30)'], {}), '(30, 30)\n', (1339, 1347), False, 'from PyQt5 import QtGui, QtCore\n'), ((1383, 1410), 'PyQt5.QtGui.QFont', 'QtGui.QFont', (['"""Sanserif"""', '(13)'], {}), "('Sanserif', 13)\n", (1394, 1410), False, 'from PyQt5 import QtGui, QtCore\n'), ((1631, 1667), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', (['"""images/basketball.png"""'], {}), "('images/basketball.png')\n", (1642, 1667), False, 'from PyQt5 import QtGui, QtCore\n'), ((1711, 1731), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(30)', '(30)'], {}), '(30, 30)\n', (1723, 1731), False, 'from PyQt5 import QtGui, QtCore\n'), ((1771, 1798), 'PyQt5.QtGui.QFont', 'QtGui.QFont', (['"""Sanserif"""', '(13)'], {}), "('Sanserif', 13)\n", (1782, 1798), False, 'from PyQt5 import QtGui, QtCore\n'), ((2015, 2047), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', (['"""images/tennis.png"""'], {}), "('images/tennis.png')\n", (2026, 2047), False, 'from PyQt5 import QtGui, QtCore\n'), ((2087, 2107), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(30)', '(30)'], {}), '(30, 30)\n', (2099, 2107), False, 'from PyQt5 import QtGui, QtCore\n'), ((2143, 2170), 'PyQt5.QtGui.QFont', 'QtGui.QFont', (['"""Sanserif"""', '(13)'], {}), "('Sanserif', 13)\n", (2154, 2170), False, 'from PyQt5 import QtGui, QtCore\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-01-23 18:09
from __future__ import unicode_literals
from django.db import migrations, models
import sorl.thumbnail.fields
class Migration(migrations.Migration):
dependencies = [
('guides', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='guide',
name='author_bio',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='guide',
name='author_name',
field=models.CharField(blank=True, max_length=128),
),
migrations.AddField(
model_name='guide',
name='author_photo',
field=sorl.thumbnail.fields.ImageField(blank=True, null=True, upload_to='img/uploads/guide_author_images'),
),
migrations.AddField(
model_name='guidearticle',
name='external_author_name',
field=models.CharField(blank=True, max_length=128),
),
migrations.AddField(
model_name='guidearticle',
name='external_organization_name',
field=models.CharField(blank=True, max_length=128),
),
]
| [
"django.db.models.TextField",
"django.db.models.CharField"
] | [((419, 447), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (435, 447), False, 'from django.db import migrations, models\n'), ((571, 615), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(128)'}), '(blank=True, max_length=128)\n', (587, 615), False, 'from django.db import migrations, models\n'), ((980, 1024), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(128)'}), '(blank=True, max_length=128)\n', (996, 1024), False, 'from django.db import migrations, models\n'), ((1170, 1214), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(128)'}), '(blank=True, max_length=128)\n', (1186, 1214), False, 'from django.db import migrations, models\n')] |
# Generated by Django 3.2 on 2021-04-25 02:49
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('hipyelp', '0013_alter_drinktag_tagname'),
]
operations = [
migrations.RenameField(
model_name='drinktag',
old_name='tagName',
new_name='drinktagName',
),
]
| [
"django.db.migrations.RenameField"
] | [((229, 324), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""drinktag"""', 'old_name': '"""tagName"""', 'new_name': '"""drinktagName"""'}), "(model_name='drinktag', old_name='tagName', new_name=\n 'drinktagName')\n", (251, 324), False, 'from django.db import migrations\n')] |
from recip.util import DataType
from recip.util import Validator
def toAddressBytes(address):
if address.startswith('0x'):
address = address[2:]
return DataType.fromHex(address)
def toAddressStr(addressBytes):
return DataType.toHex(addressBytes)
def to0xAddress(addressBytes):
address = toAddressStr(addressBytes)
return "0x{0}".format(address) | [
"recip.util.DataType.fromHex",
"recip.util.DataType.toHex"
] | [((169, 194), 'recip.util.DataType.fromHex', 'DataType.fromHex', (['address'], {}), '(address)\n', (185, 194), False, 'from recip.util import DataType\n'), ((239, 267), 'recip.util.DataType.toHex', 'DataType.toHex', (['addressBytes'], {}), '(addressBytes)\n', (253, 267), False, 'from recip.util import DataType\n')] |
from datetime import datetime
from webapp.services import db
class ReqErrorLog(db.Model):
__tablename__ = 'req_error_log'
id = db.Column(db.Integer, primary_key=True)
action = db.Column(db.String(50))
key = db.Column(db.String(255))
msg = db.Column(db.String(2000))
created_time = db.Column(db.DateTime, default=datetime.now)
def __init__(self, action, key, msg):
self.action = action
self.key = key
self.msg = msg
def __repr__(self):
return '<ReqErrorLog %r>' % self.id
| [
"webapp.services.db.String",
"webapp.services.db.Column"
] | [((139, 178), 'webapp.services.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (148, 178), False, 'from webapp.services import db\n'), ((309, 353), 'webapp.services.db.Column', 'db.Column', (['db.DateTime'], {'default': 'datetime.now'}), '(db.DateTime, default=datetime.now)\n', (318, 353), False, 'from webapp.services import db\n'), ((202, 215), 'webapp.services.db.String', 'db.String', (['(50)'], {}), '(50)\n', (211, 215), False, 'from webapp.services import db\n'), ((237, 251), 'webapp.services.db.String', 'db.String', (['(255)'], {}), '(255)\n', (246, 251), False, 'from webapp.services import db\n'), ((273, 288), 'webapp.services.db.String', 'db.String', (['(2000)'], {}), '(2000)\n', (282, 288), False, 'from webapp.services import db\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class SupplierScorecardStanding(Document):
pass
@frappe.whitelist()
def get_scoring_standing(standing_name):
standing = frappe.get_doc("Supplier Scorecard Standing", standing_name)
return standing
@frappe.whitelist()
def get_standings_list():
standings = frappe.db.sql("""
SELECT
scs.name
FROM
`tabSupplier Scorecard Standing` scs""",
{}, as_dict=1)
return standings | [
"frappe.whitelist",
"frappe.get_doc",
"frappe.db.sql"
] | [((294, 312), 'frappe.whitelist', 'frappe.whitelist', ([], {}), '()\n', (310, 312), False, 'import frappe\n'), ((448, 466), 'frappe.whitelist', 'frappe.whitelist', ([], {}), '()\n', (464, 466), False, 'import frappe\n'), ((366, 426), 'frappe.get_doc', 'frappe.get_doc', (['"""Supplier Scorecard Standing"""', 'standing_name'], {}), "('Supplier Scorecard Standing', standing_name)\n", (380, 426), False, 'import frappe\n'), ((506, 619), 'frappe.db.sql', 'frappe.db.sql', (['"""\n\t\tSELECT\n\t\t\tscs.name\n\t\tFROM\n\t\t\t`tabSupplier Scorecard Standing` scs"""', '{}'], {'as_dict': '(1)'}), '(\n """\n\t\tSELECT\n\t\t\tscs.name\n\t\tFROM\n\t\t\t`tabSupplier Scorecard Standing` scs""",\n {}, as_dict=1)\n', (519, 619), False, 'import frappe\n')] |
#!/usr/bin/python3.6
# -*- coding: utf-8 -*-
import re
import ftplib
import calendar
import time
import sys
import os
import json
from pprint import pprint
from datetime import datetime
from datetime import date, timedelta
from bson import ObjectId
from helper.ftp import Ftp
from helper.mongod import Mongodb
from helper.excel import Excel
from helper.jaccs import Config
from helper.common import Common
from helper.mongodbaggregate import Mongodbaggregate
mongodb = Mongodb("worldfone4xs")
_mongodb = Mongodb("_worldfone4xs")
excel = Excel()
config = Config()
ftp = Ftp()
common = Common()
mongodbaggregate = Mongodbaggregate("worldfone4xs")
base_url = common.base_url()
wff_env = common.wff_env(base_url)
mongodb = Mongodb(MONGODB="worldfone4xs", WFF_ENV=wff_env)
_mongodb = Mongodb(MONGODB="_worldfone4xs", WFF_ENV=wff_env)
log = open(base_url + "cronjob/python/Loan/log/saveDailyProdProdEachUserGroup.txt","a")
now = datetime.now()
subUserType = 'LO'
collection = common.getSubUser(subUserType, 'Daily_prod_working_day')
try:
insertData = []
updateData = []
listDebtGroup = []
dpWorkingdaysdaycol = {'1': 'No. of Overdue accounts', '2': 'No. of Paid accounts end of day', '3': 'No. of Paid accounts Accumulated', '4': 'Collected ratio (account)', '5': 'Overdue outstanding balance', '6': 'Collected amount (end of day)', '7': 'Collected amount Accumulated', '8': 'Collected ratio (amount)'}
due = {
'01' : '12th',
'02' : '22nd',
'03' : '31st'
}
today = date.today()
# today = datetime.strptime('20/11/2019', "%d/%m/%Y").date()
day = today.day
month = today.month
year = today.year
weekday = today.weekday()
lastDayOfMonth = calendar.monthrange(year, month)[1]
todayString = today.strftime("%d/%m/%Y")
todayTimeStamp = int(time.mktime(time.strptime(str(todayString + " 00:00:00"), "%d/%m/%Y %H:%M:%S")))
startMonth = int(time.mktime(time.strptime(str('01/' + str(month) + '/' + str(year) + " 00:00:00"), "%d/%m/%Y %H:%M:%S")))
endMonth = int(time.mktime(time.strptime(str(str(lastDayOfMonth) + '/' + str(month) + '/' + str(year) + " 23:59:59"), "%d/%m/%Y %H:%M:%S")))
holidayOfMonth = mongodb.get(MONGO_COLLECTION=common.getSubUser(subUserType, 'Report_off_sys'))
listHoliday = map(lambda offDateRow: {offDateRow['off_date']}, holidayOfMonth)
dueDateThisMonth = mongodb.get(MONGO_COLLECTION=common.getSubUser(subUserType, 'Report_due_date'))
if todayTimeStamp in listHoliday or (weekday == 5) or weekday == 6:
sys.exit()
todayString = today.strftime("%d/%m/%Y")
starttime = int(time.mktime(time.strptime(str(todayString + " 00:00:00"), "%d/%m/%Y %H:%M:%S")))
endtime = int(time.mktime(time.strptime(str(todayString + " 23:59:59"), "%d/%m/%Y %H:%M:%S")))
yesterday_starttime = starttime - 86400
yesterday_endtime = endtime - 86400
mainProduct = {}
mainProductRaw = mongodb.get(MONGO_COLLECTION=common.getSubUser(subUserType, 'Product'))
for prod in mainProductRaw:
mainProduct[prod['code']] = prod['name']
debtGroup = _mongodb.getOne(MONGO_COLLECTION=common.getSubUser(subUserType, 'Jsondata'), WHERE={'tags': ['debt', 'group']})
dueDate = _mongodb.getOne(MONGO_COLLECTION=common.getSubUser(subUserType, 'Jsondata'), WHERE={'tags': ['debt', 'duedate']})
for group in debtGroup['data']:
for duedate in dueDate['data']:
listDebtGroup.append(group['text'] + duedate['text'])
listDebtGroup = sorted(listDebtGroup)
listGroupProductRaw = _mongodb.getOne(MONGO_COLLECTION=common.getSubUser(subUserType, 'Jsondata'), WHERE={'tags': ['group', 'debt', 'product']})
listGroupProduct = listGroupProductRaw['data']
lnjc05 = mongodb.get(MONGO_COLLECTION=common.getSubUser(subUserType, 'LNJC05'))
total_lnjc05 = 0
total_cur_bal_lnjc05 = 0
for lnjc05_row in lnjc05:
total_lnjc05 += 1
total_cur_bal_lnjc05 += lnjc05_row['current_balance']
list_acc = mongodb.get(MONGO_COLLECTION=common.getSubUser(subUserType, 'List_of_account_in_collection'))
total_list_acc = 0
total_cur_bal_list_acc = 0
for list_acc_row in list_acc:
total_list_acc += 1
total_cur_bal_list_acc += list_acc_row['cur_bal']
for debtGroupCell in list(listDebtGroup):
if debtGroupCell[0:1] is not 'F':
dueDayOfMonth = mongodb.getOne(MONGO_COLLECTION=common.getSubUser(subUserType, 'Report_due_date'), WHERE={'for_month': str(month), 'debt_group': debtGroupCell[1:3]})
dueDayLastMonth = mongodb.getOne(MONGO_COLLECTION=common.getSubUser(subUserType, 'Report_due_date'), WHERE={'for_month': str(month - 1), 'debt_group': debtGroupCell[1:3]})
if todayTimeStamp > dueDayOfMonth['due_date_add_1']:
todayIndex = str(common.countWorkingDaysBetweendate(starttime = dueDayOfMonth['due_date_add_1'], endtime = todayTimeStamp, mongodb=mongodb))
else:
todayIndex = str(common.countWorkingDaysBetweendate(starttime = dueDayLastMonth['due_date_add_1'], endtime = todayTimeStamp, mongodb=mongodb))
for groupProductCell in listGroupProduct:
for key in dpWorkingdaysdaycol:
groupInfoByDueDate = list(mongodb.get(MONGO_COLLECTION=common.getSubUser(subUserType, 'Group'), WHERE={'debt_groups': debtGroupCell, 'name': {"$regex": groupProductCell['text'] + '.*'}}))
groupInfoByDueDate.extend([{'name': 'Total'}])
for groupCell in groupInfoByDueDate:
debtList = []
cur_bal = 0
if groupProductCell['value'] == 'SIBS':
count_acc = total_lnjc05
cur_bal = total_cur_bal_lnjc05
if groupProductCell['value'] == 'Card':
count_acc = total_list_acc
cur_bal = total_cur_bal_list_acc
no_overdue = count_acc
no_paid_acc_accumulayed = 0
no_overdue_amt = cur_bal
no_paid_acc_accumulayed_amt = 0
if(groupCell['name'] != 'Total'):
temp = {
'group' : debtGroupCell[0:1] + ' GROUP',
'month' : today.strftime("%b-%y"),
'due' : due[debtGroupCell[1:3]],
'product' : groupProductCell['value'],
'day' : dpWorkingdaysdaycol[key],
'day_code' : key,
'team_name' : groupCell['name'],
'team_id' : str(groupCell['_id']),
}
if todayTimeStamp < dueDayOfMonth['due_date_add_1']:
temp['due_date'] = dueDayLastMonth['due_date'] if dueDayLastMonth is not None else ''
# #Lay gia tri no vao ngay due date + 1#
# incidenceInfo = mongodb.getOne(MONGO_COLLECTION=common.getSubUser(subUserType, 'Due_date_next_date'), WHERE={'for_month': str(month - 1), 'team_id': str(groupCell['_id'])})
# #Lay gia tri no vao ngay due date + 1#
else:
# incidenceInfo = mongodb.getOne(MONGO_COLLECTION=common.getSubUser(subUserType, 'Due_date_next_date'), WHERE={'for_month': str(month), 'team_id': str(groupCell['_id'])})
temp['due_date'] = dueDayOfMonth['due_date']
if key == '2':
temp['index_' + todayIndex] = 0
if key == '6':
temp['index_' + todayIndex] = 0
if key == '1':
temp['index_' + todayIndex] = no_overdue
if key == '3':
temp['index_' + todayIndex] = no_paid_acc_accumulayed
if key == '5':
temp['index_' + todayIndex] = no_overdue_amt
if key == '7':
temp['index_' + todayIndex] = no_paid_acc_accumulayed_amt
# Không cho tính target trong này, phải lấy từ bảng đầu tháng
# if todayTimeStamp == dueDayOfMonth['due_date_add_1']:
# temp['target'] = target['target'],
# temp['target_acc'] = (no_overdue * int(temp['target'])) / 100
# temp['target_amt'] = (no_overdue_amt * int(temp['target'])) / 100
temp['start_acc'] = 0
temp['start_amt'] = 0
if key == '4':
# temp['col_ratio_acc'] = no_acc_end_date / no_overdue if no_overdue not in [None, 0] else 0
temp['index_' + todayIndex] = 0
if key == '8':
temp['index_' + todayIndex] = 0
if todayTimeStamp != dueDayOfMonth['due_date_add_1']:
yesterdayData = mongodb.getOne(MONGO_COLLECTION=common.getSubUser(subUserType, 'Daily_prod_working_day'), WHERE={'team_id': str(groupCell['_id']), 'day_code': key, 'updated_at': {'$gte': yesterday_starttime, '$lte': yesterday_endtime}})
no_acc_end_date = 0
no_acc_end_date_amt = 0
if yesterdayData is not None:
# pprint(temp)
no_acc_end_date = yesterdayData['no_overdue'] - no_overdue
no_acc_end_date_amt = yesterdayData['no_overdue_amt'] - no_overdue_amt
updateDataYesterday = {}
if key == '2':
updateDataYesterday['index_' + (todayIndex - 1)] = no_acc_end_date
if key == '6':
updateDataYesterday['index_' + (todayIndex - 1)] = no_acc_end_date_amt
updateDataYesterday['index_' + todayIndex] = temp['index_' + todayIndex]
updateDataYesterday['updated_at'] = time.time()
# pprint(temp)
mongodb.update(MONGO_COLLECTION=common.getSubUser(subUserType, 'Daily_prod_working_day'), WHERE={'team_id': str(groupCell['_id']), 'day_code': key, 'updated_at': {'$gte': yesterday_starttime, '$lte': yesterday_endtime}}, VALUE=updateDataYesterday)
else:
pprint(temp)
mongodb.insert(MONGO_COLLECTION=common.getSubUser(subUserType, 'Daily_prod_working_day'), insert_data=temp)
# checkYesterdayExist =
print('DONE')
except Exception as e:
# log.write(now.strftime("%d/%m/%Y, %H:%M:%S") + ': ' + str(e) + '\n')
pprint(str(e))
| [
"helper.ftp.Ftp",
"helper.mongod.Mongodb",
"helper.common.Common",
"helper.excel.Excel",
"helper.jaccs.Config",
"datetime.datetime.now",
"calendar.monthrange",
"helper.mongodbaggregate.Mongodbaggregate",
"sys.exit",
"datetime.date.today",
"time.time",
"pprint.pprint"
] | [((492, 515), 'helper.mongod.Mongodb', 'Mongodb', (['"""worldfone4xs"""'], {}), "('worldfone4xs')\n", (499, 515), False, 'from helper.mongod import Mongodb\n'), ((528, 552), 'helper.mongod.Mongodb', 'Mongodb', (['"""_worldfone4xs"""'], {}), "('_worldfone4xs')\n", (535, 552), False, 'from helper.mongod import Mongodb\n'), ((562, 569), 'helper.excel.Excel', 'Excel', ([], {}), '()\n', (567, 569), False, 'from helper.excel import Excel\n'), ((580, 588), 'helper.jaccs.Config', 'Config', ([], {}), '()\n', (586, 588), False, 'from helper.jaccs import Config\n'), ((596, 601), 'helper.ftp.Ftp', 'Ftp', ([], {}), '()\n', (599, 601), False, 'from helper.ftp import Ftp\n'), ((612, 620), 'helper.common.Common', 'Common', ([], {}), '()\n', (618, 620), False, 'from helper.common import Common\n'), ((641, 673), 'helper.mongodbaggregate.Mongodbaggregate', 'Mongodbaggregate', (['"""worldfone4xs"""'], {}), "('worldfone4xs')\n", (657, 673), False, 'from helper.mongodbaggregate import Mongodbaggregate\n'), ((751, 799), 'helper.mongod.Mongodb', 'Mongodb', ([], {'MONGODB': '"""worldfone4xs"""', 'WFF_ENV': 'wff_env'}), "(MONGODB='worldfone4xs', WFF_ENV=wff_env)\n", (758, 799), False, 'from helper.mongod import Mongodb\n'), ((812, 861), 'helper.mongod.Mongodb', 'Mongodb', ([], {'MONGODB': '"""_worldfone4xs"""', 'WFF_ENV': 'wff_env'}), "(MONGODB='_worldfone4xs', WFF_ENV=wff_env)\n", (819, 861), False, 'from helper.mongod import Mongodb\n'), ((958, 972), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (970, 972), False, 'from datetime import datetime\n'), ((1570, 1582), 'datetime.date.today', 'date.today', ([], {}), '()\n', (1580, 1582), False, 'from datetime import date, timedelta\n'), ((1773, 1805), 'calendar.monthrange', 'calendar.monthrange', (['year', 'month'], {}), '(year, month)\n', (1792, 1805), False, 'import calendar\n'), ((2621, 2631), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2629, 2631), False, 'import sys\n'), ((10996, 11007), 'time.time', 'time.time', ([], {}), '()\n', (11005, 11007), False, 'import time\n'), ((11421, 11433), 'pprint.pprint', 'pprint', (['temp'], {}), '(temp)\n', (11427, 11433), False, 'from pprint import pprint\n')] |
import re
from pathlib import Path
from typing import Union
from kgextractiontoolbox.document.document import TaggedDocument
class Classifier:
def __init__(self, classification, rule_path: Union[str, Path]):
self.rules = []
self.explanations = []
self.classification = classification
self.rules, self.rules_org_str = Classifier.read_ruleset(rule_path)
def classify_document(self, doc: TaggedDocument, consider_sections=False):
"""
Classify whether a document text content matches on of the classifier rules
:param doc: the document to classify
:param consider_sections: should sections be considered?
:return:
"""
matches = []
for content, offset in doc.iterate_over_text_elements(sections=consider_sections):
for idx, rule in enumerate(self.rules):
rule_match = []
for idx2, term in enumerate(rule):
# the rules are split by a ' '
rule_org_str = self.rules_org_str[idx][idx2]
term_match = term.search(content)
if not term_match:
break
else:
pos = term_match.regs[0]
pos = str((pos[0] + offset, pos[1] + offset))
rule_match.append(f"{rule_org_str}:{term_match.group(0)}{pos}")
# else will be executed if loop does not encounter a break
else:
matches.append(' AND '.join([rm for rm in rule_match]))
# Execute all rules - if a rule matches then add classification
if matches:
doc.classification[self.classification] = ';'.join([m for m in matches])
return doc
@staticmethod
def compile_entry_to_regex(term):
term = term.strip()
# replace the * operator
term = term.replace("*", "\\w*")
# add that the word must start with the term
term = term + "\\b"
# check if there is the w/1 operator for one arbitrary word
if 'w/' in term:
term_rule = term
for subterm in term.split(' '):
# replace w/1 by only one word
if subterm[0] == 'w' and subterm[1] == '/':
word_count = int(subterm.split('/')[1])
word_sequence = []
for i in range(0, word_count):
word_sequence.append(r'\w*')
word_sequence = ' '.join([w for w in word_sequence])
term_rule = term_rule.replace(subterm, word_sequence)
# set term now to the new rule
term = term_rule
return re.compile(term, re.IGNORECASE)
@staticmethod
def compile_line_to_regex(line: str):
return list([Classifier.compile_entry_to_regex(term) for term in line.split("AND")])
@staticmethod
def read_ruleset(filepath: Union[str, Path]):
ruleset = []
rule_org_str = []
with open(filepath, "r") as f:
for line in f:
rule_string = line.strip()
rule_org_str.append(rule_string.replace('AND ', '').split(' '))
terms = Classifier.compile_line_to_regex(rule_string)
ruleset.append(terms)
return ruleset, rule_org_str
| [
"re.compile"
] | [((2744, 2775), 're.compile', 're.compile', (['term', 're.IGNORECASE'], {}), '(term, re.IGNORECASE)\n', (2754, 2775), False, 'import re\n')] |
#!/usr/bin/env python
from ism_pkg.kernels.gaussian import gaussian
from ism_pkg.optimizer.full_ism import full_ism
from ism_pkg.optimizer.stochastic_ism import stochastic_ism
from ism_pkg.tools.HSIC_IDS_optimizer import HSIC_IDS_optimizer
from ism_pkg.tools.terminal_print import *
from ism_pkg.tools.rff_layer import *
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC
from sklearn.linear_model import SGDClassifier
from sklearn.kernel_approximation import RBFSampler
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
class ism():
def __init__(self, stochastic=False, var_percentage=0.9, debug_mode=True, max_repeat=200, batch_size_per_class=10):
self.db = {}
self.db['stochastic'] = stochastic
self.db['batch_size_per_class'] = batch_size_per_class
self.db['max_repeat'] = max_repeat
self.db['var_percentage'] = var_percentage
self.db['debug_mode'] = debug_mode
self.db['convergence_method'] = 'use_eigen_values' # use_eigen_values is faster but gradient might not = 0 and use_W is slower but more accurate with gradient = 0
self.db['kernel'] = gaussian(self.db) # try : gaussian, polynomial, squared, linear
self.rff = None
if stochastic: self.db['optimizer'] = stochastic_ism(self.db)
else: self.db['optimizer'] = full_ism(self.db)
print('stochastic:%s, class batch size:%d'%(self.db['stochastic'], self.db['batch_size_per_class']))
def __del__(self):
pass
#del self.db['kernel']
#del self.db['optimizer']
#self.db.clear()
def fit(self, X, Y, use_RFF=False):
db = self.db
Optimizer = db['optimizer']
Optimizer.initialize(X,Y)
Optimizer.update_f(X, Y)
self.W = self.db['W']
self.σ = self.db['kernel'].σ
if use_RFF: self.rff = rff_layer(X@self.W, self.σ)
def predict(self, X, RFF_out=False):
if RFF_out:
return self.rff.apply_layer(X)
else:
return [X@self.W, self.σ]
def classify_fit(self, X, Y, σ, W=None, classifier_type='HSIC_optimizer', class_batch_size=200, n_components=600): #classifier_type='HSIC_optimizer', or 'svm'
print('\nClassifier: %s'%classifier_type)
if W is not None: X = X@W
self.classifier_type = classifier_type
γ = 1/(2*σ*2)
if classifier_type == 'svm':
svm = SVC(gamma=γ)
svm.fit(X,Y)
self.classifier = svm
if classifier_type == 'svm_stochastic':
self.ℱₓ = RBFSampler(gamma=γ, random_state=1, n_components=n_components)
Φx = self.ℱₓ.fit_transform(X)
clf = SGDClassifier(max_iter=5000, tol=1e-3, verbose=False)
clf.fit(Φx, Y)
self.classifier = clf
if classifier_type == 'HSIC_optimizer':
Ƕ = HSIC_IDS_optimizer(σ, self.db['stochastic'], class_batch_size)
Ƕ.fit(X, Y)
self.classifier = Ƕ
return self.classifier
def classify_predict(self, X, Y, W=None):
if W is not None: X = X@W
#if self.rff is not None: X = self.predict(X, RFF_out=True)
if self.classifier_type == 'svm_stochastic':
X = self.ℱₓ.fit_transform(X)
Cf = self.classifier
Ŷ = Cf.predict(X)
Ŷ = LabelEncoder().fit_transform(Ŷ)
Y = LabelEncoder().fit_transform(Y)
ᘔ = accuracy_score(Y, Ŷ)
return [Ŷ, ᘔ]
| [
"sklearn.linear_model.SGDClassifier",
"sklearn.preprocessing.LabelEncoder",
"sklearn.kernel_approximation.RBFSampler",
"ism_pkg.optimizer.full_ism.full_ism",
"ism_pkg.optimizer.stochastic_ism.stochastic_ism",
"ism_pkg.kernels.gaussian.gaussian",
"ism_pkg.tools.HSIC_IDS_optimizer.HSIC_IDS_optimizer",
"... | [((1180, 1197), 'ism_pkg.kernels.gaussian.gaussian', 'gaussian', (['self.db'], {}), '(self.db)\n', (1188, 1197), False, 'from ism_pkg.kernels.gaussian import gaussian\n'), ((3130, 3150), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['Y', 'Ŷ'], {}), '(Y, Ŷ)\n', (3144, 3150), False, 'from sklearn.metrics import accuracy_score\n'), ((1306, 1329), 'ism_pkg.optimizer.stochastic_ism.stochastic_ism', 'stochastic_ism', (['self.db'], {}), '(self.db)\n', (1320, 1329), False, 'from ism_pkg.optimizer.stochastic_ism import stochastic_ism\n'), ((1361, 1378), 'ism_pkg.optimizer.full_ism.full_ism', 'full_ism', (['self.db'], {}), '(self.db)\n', (1369, 1378), False, 'from ism_pkg.optimizer.full_ism import full_ism\n'), ((2290, 2302), 'sklearn.svm.SVC', 'SVC', ([], {'gamma': 'γ'}), '(gamma=γ)\n', (2293, 2302), False, 'from sklearn.svm import SVC\n'), ((2405, 2467), 'sklearn.kernel_approximation.RBFSampler', 'RBFSampler', ([], {'gamma': 'γ', 'random_state': '(1)', 'n_components': 'n_components'}), '(gamma=γ, random_state=1, n_components=n_components)\n', (2415, 2467), False, 'from sklearn.kernel_approximation import RBFSampler\n'), ((2507, 2561), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'max_iter': '(5000)', 'tol': '(0.001)', 'verbose': '(False)'}), '(max_iter=5000, tol=0.001, verbose=False)\n', (2520, 2561), False, 'from sklearn.linear_model import SGDClassifier\n'), ((2655, 2717), 'ism_pkg.tools.HSIC_IDS_optimizer.HSIC_IDS_optimizer', 'HSIC_IDS_optimizer', (['σ', "self.db['stochastic']", 'class_batch_size'], {}), "(σ, self.db['stochastic'], class_batch_size)\n", (2673, 2717), False, 'from ism_pkg.tools.HSIC_IDS_optimizer import HSIC_IDS_optimizer\n'), ((3053, 3067), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (3065, 3067), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((3090, 3104), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (3102, 3104), False, 'from sklearn.preprocessing import LabelEncoder\n')] |
import jinja2
class TimeDelay(object):
def __init__(self, name, dw, aw):
self.aw = aw
self.dw = dw
context = {'name': name, 'dw': dw, 'aw': aw}
loader = jinja2.PackageLoader('controlinverilog', 'templates')
env = jinja2.Environment(loader=loader)
template = env.get_template('delay.v')
self.verilog = template.render(context)
def print_summary(self):
print('Delay formula (taps): <delay> - 1')
print('Delay formula (s): (<delay> - 1)/<fexe>')
print('Max delay (s): %d/<fexe>' % (2 ** self.aw))
print('Data word length: %d' % self.dw)
def print_verilog(self, filename=None):
if filename is None:
print(self.verilog)
else:
with open(filename, 'w') as text_file:
text_file.write(self.verilog)
| [
"jinja2.Environment",
"jinja2.PackageLoader"
] | [((195, 248), 'jinja2.PackageLoader', 'jinja2.PackageLoader', (['"""controlinverilog"""', '"""templates"""'], {}), "('controlinverilog', 'templates')\n", (215, 248), False, 'import jinja2\n'), ((263, 296), 'jinja2.Environment', 'jinja2.Environment', ([], {'loader': 'loader'}), '(loader=loader)\n', (281, 296), False, 'import jinja2\n')] |
# -*- coding: utf-8 -*-
## Design notes & thoughts ##############################
##
## Goals:
## * Support nested dict and array objects.
## * Access elements & sub-trees via "obj['key.key.index']"
## * Access via JSON-Path
## - https://jsonpath-rw.readthedocs.io/en/latest/
## * Serializable to JSON
## * De-serializable from YAML & JSON
## * Support serialization for types:
## - ISO8601 timestamps -> Py datetime
## - UUID -> Py uuid.UUID
##
## Inspiration:
## * https://configtree.readthedocs.io/en/latest/
## * http://www.kr41.net/2015/06-15-about_configtree.html
##
##
from collections.abc import Mapping, MutableMapping
from collections.abc import Sequence, MutableSequence
## Note - there should be an IN-mutable base class, but the
## code using class 'Config' does not treat the Config as
## an inmutable object -- fix and replace class Config with
## ConfigDO, and then make it a 'Mapping' and derive a
## 'State' class from MutableMapping.
class PropPath:
def __init__(self, path):
#
# Validate path string, split and put in []
#
self.path = []
self.path[0] = path
def next():
return self.path[0]
def next_is_digit():
return isinstance(self.path[0], int)
def remainder():
if len(self.path) > 1:
return self.path[1:]
else:
return None
import re
from collections import Iterable
key_regex = r'([\w\-]+)'
idx_regex = r'((\[\d+\])|(\d+))'
sfx_regex = r'(\.'+key_regex+r')|(\.?'+idx_regex+r')'
k_regex = r'^(?P<key>'+key_regex+r')(?P<sk>('+sfx_regex+r')*)$'
kre = re.compile(k_regex)
i_regex = r'^(?P<key>'+idx_regex+r')(?P<sk>('+sfx_regex+r')*)$'
ire = re.compile(i_regex)
def splitkey(key):
if isinstance(key, int):
return (key, None)
# else
m = ire.fullmatch(key)
if m is not None:
k = m.group('key')
sk = m.group('sk').lstrip('.')
if k is None:
raise KeyError(key)
return (int(k.strip('[]')), sk)
# else
m = kre.fullmatch(key)
if m is not None:
k = m.group('key')
sk = m.group('sk').lstrip('.')
if k is None:
raise KeyError(key)
return (k, sk)
# else
raise KeyError("Invalid key, '{}'".format(key))
def mksubkeytype(subkey):
(k, sk) = splitkey(subkey)
if isinstance(k, int):
return PropList()
else:
return PropMap()
class PropMap(MutableMapping):
def __init__(self):
# print("PropMap.__init__")
self._data = dict()
def load(self, *args, **kwargs):
# print("PropMap.load()")
self._data = dict()
for a in args:
if isinstance(a, Mapping):
for (k,v) in a.items():
self.__setitem__(k,v)
elif not isinstance(a, str) and isinstance(a, Sequence):
for (k,v) in a:
self.__setitem__(k,v)
for (k,v) in kwargs:
self.__setitem__(k,v)
return self
def __getitem__(self, key):
# print("getting: {}".format(key))
(k, sk) = splitkey(key)
if sk:
#print("get recursing: k='{}', sk='{}'".format(k, sk))
return self._data[k][sk]
return self._data[k]
def __setitem__(self, key, value):
# print("PropMap._setitem: {} = {}".format(key, value))
(k, sk) = splitkey(key)
v = value
if isinstance(value, Mapping):
v = PropMap().load(value)
if not isinstance(value, str) and isinstance(value, Sequence):
v = PropList().load(value)
if sk:
if k not in self._data:
self._data[k] = mksubkeytype(sk)
#print("created sub-element")
self._data[k].__setitem__(sk,v)
else:
self._data[k] = v
def __delitem__(self, key):
(k, sk) = splitkey(key)
if sk:
del self._data[k][sk]
else:
del self._data[k]
def __iter__(self):
return iter(self._data)
def __len__(self):
return len(self._data)
def __repr__(self):
return dict.__repr__(dict(self))
def _flatten(self, prefix=''):
flat = dict()
for (k, v) in self._data.items():
key = k
if len(prefix) > 0:
key = ".".join((prefix, k))
if isinstance(v, PropMap):
flat.update( v._flatten(key) )
elif isinstance(v, PropList):
flat.update( v._flatten(key) )
else:
flat[key] = v
return flat
def as_properties(self):
return self._flatten()
def as_yaml(self):
#
# TODO
#
return ""
def as_json(self):
#
# TODO
#
return ""
def dump(self, prefix=''):
for (k, v) in self._data.items():
key = k
if len(prefix) > 0:
key = ".".join((prefix, k))
if isinstance(v, PropMap):
v.dump(key)
else:
print("{}: {}".format(key, v))
class PropList(MutableSequence):
def __init__(self):
# print("PropList.__init__")
self._data = list()
def load(self, *args):
# print("PropList.load()")
self._data = list()
for a in args:
v = a
if isinstance(a, Mapping):
v = PropMap().load(a)
self._data.append(v)
elif not isinstance(a, str) and isinstance(a, Sequence):
for ii in a:
self._append(ii)
return self
def __getitem__(self, key):
# print("getting: {}".format(key))
(k, sk) = splitkey(key)
if sk:
#print("getting recursing: k='{}', sk='{}'".format(k, sk))
return self._data[k][sk]
return self._data[k]
def __setitem__(self, key, value):
# print("setting: {}".format(key))
(k, sk) = splitkey(key)
if not isinstance(k, int):
raise KeyError("Key is not an int")
while len(self._data) < k+1:
self._data.append(None)
v = value
if isinstance(value, Mapping):
v = PropMap().load(value)
elif not isinstance(value, str) and isinstance(value, Sequence):
v = PropList().load(value)
if sk:
if k not in self._data:
self._data[k] = mksubkeytype(sk)
#print("created sub-element")
self._data[k].__setitem__(sk,v)
else:
self._data[k] = v
def insert(self, key, value):
(k, sk) = splitkey(key)
if not isinstance(k, int):
raise KeyError("Key is not an int")
if sk:
if k >= len(self._data):
k = len(self._data)
self._data.insert(k, None)
self.__setitem__("{}.{}".format(k,sk),v)
else:
self._data.insert(k, value)
def __delitem__(self, key):
(k, sk) = splitkey(key)
if sk:
del self._data[k][sk]
else:
del self._data[k]
def _append(self, value):
idx = len(self._data)
self._data.append(None)
self.__setitem__(idx, value)
def __len__(self):
return len(self._data)
def __repr__(self):
return list.__repr__(list(self))
def _flatten(self, prefix=''):
flat = dict()
ii = 0
for v in self._data:
key = "{}[{}]".format(prefix, ii)
ii += 1
if isinstance(v, PropMap):
flat.update( v._flatten(key) )
elif isinstance(v, PropList):
flat.update( v._flatten(key) )
else:
flat[key] = v
return flat
| [
"re.compile"
] | [((1617, 1636), 're.compile', 're.compile', (['k_regex'], {}), '(k_regex)\n', (1627, 1636), False, 'import re\n'), ((1707, 1726), 're.compile', 're.compile', (['i_regex'], {}), '(i_regex)\n', (1717, 1726), False, 'import re\n')] |
import os
import logging
from timeit import default_timer as timer
import numpy as np
from automon import AutomonNode
from automon.zmq_socket_utils import init_client_socket
from function_def import func_inner_product
logging.getLogger('automon').setLevel(logging.INFO)
def time_to_wait_for_next_sample_milliseconds(start_time, num_received_samples):
return (num_received_samples - (timer() - start_time)) * 1000
NODE_IDX = int(os.getenv('NODE_IDX', '0')) # Change the node index for different nodes
node = AutomonNode(idx=NODE_IDX, func_to_monitor=func_inner_product, d=40)
# Open a client socket and connect to the server socket. Wait for 'start' message from the server.
client_socket = init_client_socket(NODE_IDX, host=os.getenv('HOST', '127.0.0.1'), port=6400)
# Wait for a message from the coordinator (local data requests or local constraint updates) and send the reply to the coordinator.
# Read new data samples every 1 second and update the node local vector. Report violations to the coordinator.
start = timer()
num_data_samples = 0
while True:
if time_to_wait_for_next_sample_milliseconds(start, num_data_samples) <= 0:
# Time to read the next data sample
data = np.random.normal(loc=1, scale=0.1, size=(40,))
message_violation = node.update_data(data)
if message_violation:
client_socket.send(message_violation)
num_data_samples += 1
event = client_socket.poll(timeout=time_to_wait_for_next_sample_milliseconds(start, num_data_samples))
if event != 0:
# Received a message from the coordinator before the timeout has reached
message = client_socket.recv()
reply = node.parse_message(message)
if reply:
client_socket.send(reply)
| [
"logging.getLogger",
"numpy.random.normal",
"os.getenv",
"timeit.default_timer",
"automon.AutomonNode"
] | [((516, 583), 'automon.AutomonNode', 'AutomonNode', ([], {'idx': 'NODE_IDX', 'func_to_monitor': 'func_inner_product', 'd': '(40)'}), '(idx=NODE_IDX, func_to_monitor=func_inner_product, d=40)\n', (527, 583), False, 'from automon import AutomonNode\n'), ((1027, 1034), 'timeit.default_timer', 'timer', ([], {}), '()\n', (1032, 1034), True, 'from timeit import default_timer as timer\n'), ((436, 462), 'os.getenv', 'os.getenv', (['"""NODE_IDX"""', '"""0"""'], {}), "('NODE_IDX', '0')\n", (445, 462), False, 'import os\n'), ((218, 246), 'logging.getLogger', 'logging.getLogger', (['"""automon"""'], {}), "('automon')\n", (235, 246), False, 'import logging\n'), ((733, 763), 'os.getenv', 'os.getenv', (['"""HOST"""', '"""127.0.0.1"""'], {}), "('HOST', '127.0.0.1')\n", (742, 763), False, 'import os\n'), ((1207, 1253), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(1)', 'scale': '(0.1)', 'size': '(40,)'}), '(loc=1, scale=0.1, size=(40,))\n', (1223, 1253), True, 'import numpy as np\n'), ((389, 396), 'timeit.default_timer', 'timer', ([], {}), '()\n', (394, 396), True, 'from timeit import default_timer as timer\n')] |
import os
import re
import shutil
from datetime import date
customHeader = """
---
layout: post
title: {}
categories: {}
excerpt: {}
---
"""
def ModifiedMarkDownFile():
#Loop each file
blog = [filename for filename in os.listdir('notion-backup') if filename.startswith("Blog") and filename.endswith(".md")][0]
os.chdir('notion-backup/{}'.format(blog.replace('.md','')))
for file in os.listdir():
if file.endswith('.md'):
notionMarkDownFile = file
#Read Front
lines = []
with open(notionMarkDownFile, 'r') as f:
lines = f.readlines()
data = lines[4].split('|')
data = data[1:-1]
title = data[0]
title = title[1:-1]
categories = data[1]
categories = categories[1:-1]
excerpt = data[2]
excerpt = excerpt[1:-1]
date = data[3]
date = date[1:-1]
#New File Name
fileName = title.replace(' ', '_').lower()
newMarkdownFileName="{}-{}.md".format(date, fileName)
#Clean Header
imagesOrigen = notionMarkDownFile.replace('.md','')
notionMarkDownFolder = imagesOrigen.replace(' ', '%20')
newHeader = customHeader.format(title, categories, excerpt)
with open(notionMarkDownFile, 'w') as f:
f.write(newHeader)
for number, line in enumerate(lines[5:]):
if line.startswith('!['):
line = line.replace(notionMarkDownFolder, 'images')
f.write(line)
#Rename file
os.rename(notionMarkDownFile, newMarkdownFileName)
#Move Resouces
shutil.move(newMarkdownFileName, '../../_posts/{}'.format(newMarkdownFileName))
if os.path.isdir(imagesOrigen):
allImages = os.listdir(imagesOrigen)
for image in allImages:
shutil.move(imagesOrigen + '/' + image, '../../images/' + image)
#Remove md file
shutil.rmtree('../../notion-backup')
if __name__ == '__main__':
ModifiedMarkDownFile()
| [
"os.listdir",
"shutil.move",
"os.rename",
"os.path.isdir",
"shutil.rmtree"
] | [((401, 413), 'os.listdir', 'os.listdir', ([], {}), '()\n', (411, 413), False, 'import os\n'), ((2095, 2131), 'shutil.rmtree', 'shutil.rmtree', (['"""../../notion-backup"""'], {}), "('../../notion-backup')\n", (2108, 2131), False, 'import shutil\n'), ((1677, 1727), 'os.rename', 'os.rename', (['notionMarkDownFile', 'newMarkdownFileName'], {}), '(notionMarkDownFile, newMarkdownFileName)\n', (1686, 1727), False, 'import os\n'), ((1863, 1890), 'os.path.isdir', 'os.path.isdir', (['imagesOrigen'], {}), '(imagesOrigen)\n', (1876, 1890), False, 'import os\n'), ((229, 256), 'os.listdir', 'os.listdir', (['"""notion-backup"""'], {}), "('notion-backup')\n", (239, 256), False, 'import os\n'), ((1920, 1944), 'os.listdir', 'os.listdir', (['imagesOrigen'], {}), '(imagesOrigen)\n', (1930, 1944), False, 'import os\n'), ((2005, 2069), 'shutil.move', 'shutil.move', (["(imagesOrigen + '/' + image)", "('../../images/' + image)"], {}), "(imagesOrigen + '/' + image, '../../images/' + image)\n", (2016, 2069), False, 'import shutil\n')] |
from dataclasses import dataclass
from raytracer.tuple import tuple, point, vector, magnitude, normalize, dot, cross
from raytracer.util import equal
@dataclass
class Projectile:
position: tuple # point
velocity: tuple # vector
@dataclass
class Environment:
gravity: tuple # vector
wind: tuple # vector
def tick(env, proj):
position = proj.position + proj.velocity
velocity = proj.velocity + env.gravity + env.wind
return Projectile(position, velocity)
# projectile starts one unit above the origin
p = Projectile(point(0, 1, 0), normalize(vector(1, 1, 0)))
# gravity -0.1 unit/tick, and wind is -0.01 unit/tick
e = Environment(vector(0, -0.1, 0), vector(-0.01, 0, 0))
i = 0
while p.position.y > 0:
p = tick(e, p)
print(f"iteration {i} {p}")
i += 1 | [
"raytracer.tuple.vector",
"raytracer.tuple.point"
] | [((554, 568), 'raytracer.tuple.point', 'point', (['(0)', '(1)', '(0)'], {}), '(0, 1, 0)\n', (559, 568), False, 'from raytracer.tuple import tuple, point, vector, magnitude, normalize, dot, cross\n'), ((669, 687), 'raytracer.tuple.vector', 'vector', (['(0)', '(-0.1)', '(0)'], {}), '(0, -0.1, 0)\n', (675, 687), False, 'from raytracer.tuple import tuple, point, vector, magnitude, normalize, dot, cross\n'), ((689, 708), 'raytracer.tuple.vector', 'vector', (['(-0.01)', '(0)', '(0)'], {}), '(-0.01, 0, 0)\n', (695, 708), False, 'from raytracer.tuple import tuple, point, vector, magnitude, normalize, dot, cross\n'), ((580, 595), 'raytracer.tuple.vector', 'vector', (['(1)', '(1)', '(0)'], {}), '(1, 1, 0)\n', (586, 595), False, 'from raytracer.tuple import tuple, point, vector, magnitude, normalize, dot, cross\n')] |
# Generated by Django 3.1.7 on 2021-04-29 15:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('rbac', '0002_auto_20210426_2345'),
('patient', '0006_auto_20210429_1431'),
]
operations = [
migrations.RenameModel(
old_name='PatientURLPermissions',
new_name='PatientURLPermission',
),
]
| [
"django.db.migrations.RenameModel"
] | [((285, 379), 'django.db.migrations.RenameModel', 'migrations.RenameModel', ([], {'old_name': '"""PatientURLPermissions"""', 'new_name': '"""PatientURLPermission"""'}), "(old_name='PatientURLPermissions', new_name=\n 'PatientURLPermission')\n", (307, 379), False, 'from django.db import migrations\n')] |
import os
from celery import Celery
from celery.schedules import crontab
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'flatgov.dev')
app = Celery('flatgov')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
app.conf.redbeat_redis_url = os.getenv('REDIS_URL', 'redis://localhost:6379/0')
app.conf.broker_pool_limit = 1
app.conf.broker_heartbeat = None
app.conf.broker_connection_timeout = 30
app.conf.worker_prefetch_multiplier = 1
app.conf.beat_schedule = {
'download_sources': {
'task': 'events.tasks.download_sources',
'schedule': crontab(minute=0, hour=19)
},
'process_sources': {
'task': 'events.tasks.process_sources',
'schedule': crontab(minute=5, hour=19)
},
'update_bills_daily': {
# Triggers bill download
# When this completes and SUCCESS= True,
# The rest of the bill similarity tasks are triggered in uscongress/models.py
'task': 'uscongress.tasks.update_bill_task',
'schedule': crontab(minute=1, hour=1),
# 'options': {'queue': 'bill'}
},
'sap_biden_scraper_daily': {
# this task is independent of other tasks
# It takes less than 1 minute
'task': 'bills.tasks.sap_biden_task',
'schedule': crontab(minute=0, hour=3),
# 'options': {'queue': 'bill'}
},
'committee_report_scraper_daily': {
# this task depends on updates from the update_bills task
# It takes less than 5 minutes
'task': 'bills.tasks.committee_report_scrapy_task',
'schedule': crontab(minute=10, hour=3),
# 'options': {'queue': 'bill'}
},
'update_cbo_scores_daily': {
# this task depends on updates from the update_bills task
# it runs on only the directory of the current congress
# and should take less than 20 minutes
'task': 'bills.tasks.cbo_task',
'schedule': crontab(minute=30, hour=3),
# 'options': {'queue': 'bill'}
},
'update_cosponsor_daily': {
# the update_cosponsor task deletes the cosponsor table and recreates it
# it takes about 1 hour to run
# this is independent of other tasks, since it gets data directly
# from the YAML file in the unitedstates Github repo
'task': 'bills.tasks.update_cosponsor_comm_task',
'schedule': crontab(minute=20, hour=4),
# 'options': {'queue': 'bill'}
},
'crs_scraper_daily': {
# this task depends on updates from the update_bills task
# to link reports to bills
'task': 'bills.tasks.crs_task',
'schedule': crontab(minute=0, hour=5),
'schedule': crontab(minute=0, hour=5),
# 'options': {'queue': 'bill'}
},
}
app.conf.timezone = 'UTC' | [
"os.environ.setdefault",
"celery.Celery",
"celery.schedules.crontab",
"os.getenv"
] | [((74, 136), 'os.environ.setdefault', 'os.environ.setdefault', (['"""DJANGO_SETTINGS_MODULE"""', '"""flatgov.dev"""'], {}), "('DJANGO_SETTINGS_MODULE', 'flatgov.dev')\n", (95, 136), False, 'import os\n'), ((144, 161), 'celery.Celery', 'Celery', (['"""flatgov"""'], {}), "('flatgov')\n", (150, 161), False, 'from celery import Celery\n'), ((283, 333), 'os.getenv', 'os.getenv', (['"""REDIS_URL"""', '"""redis://localhost:6379/0"""'], {}), "('REDIS_URL', 'redis://localhost:6379/0')\n", (292, 333), False, 'import os\n'), ((601, 627), 'celery.schedules.crontab', 'crontab', ([], {'minute': '(0)', 'hour': '(19)'}), '(minute=0, hour=19)\n', (608, 627), False, 'from celery.schedules import crontab\n'), ((728, 754), 'celery.schedules.crontab', 'crontab', ([], {'minute': '(5)', 'hour': '(19)'}), '(minute=5, hour=19)\n', (735, 754), False, 'from celery.schedules import crontab\n'), ((1031, 1056), 'celery.schedules.crontab', 'crontab', ([], {'minute': '(1)', 'hour': '(1)'}), '(minute=1, hour=1)\n', (1038, 1056), False, 'from celery.schedules import crontab\n'), ((1290, 1315), 'celery.schedules.crontab', 'crontab', ([], {'minute': '(0)', 'hour': '(3)'}), '(minute=0, hour=3)\n', (1297, 1315), False, 'from celery.schedules import crontab\n'), ((1587, 1613), 'celery.schedules.crontab', 'crontab', ([], {'minute': '(10)', 'hour': '(3)'}), '(minute=10, hour=3)\n', (1594, 1613), False, 'from celery.schedules import crontab\n'), ((1931, 1957), 'celery.schedules.crontab', 'crontab', ([], {'minute': '(30)', 'hour': '(3)'}), '(minute=30, hour=3)\n', (1938, 1957), False, 'from celery.schedules import crontab\n'), ((2371, 2397), 'celery.schedules.crontab', 'crontab', ([], {'minute': '(20)', 'hour': '(4)'}), '(minute=20, hour=4)\n', (2378, 2397), False, 'from celery.schedules import crontab\n'), ((2633, 2658), 'celery.schedules.crontab', 'crontab', ([], {'minute': '(0)', 'hour': '(5)'}), '(minute=0, hour=5)\n', (2640, 2658), False, 'from celery.schedules import crontab\n'), ((2680, 2705), 'celery.schedules.crontab', 'crontab', ([], {'minute': '(0)', 'hour': '(5)'}), '(minute=0, hour=5)\n', (2687, 2705), False, 'from celery.schedules import crontab\n')] |
from sqlalchemy import Column, Integer, Numeric
from sqlalchemy.ext.declarative import declared_attr
from .pengaturan_base import MxPengaturanBase
class MxPengaturan(MxPengaturanBase):
# TODO: Set precision & scale for Numerics
@declared_attr
def bpjs_ketenagakerjaan_perusahaan(cls):
return Column(Numeric, nullable=False)
@declared_attr
def bpjs_ketenagakerjaan_karyawan(cls):
return Column(Numeric, nullable=False)
@declared_attr
def bpjs_kesehatan_perusahaan(cls):
return Column(Numeric, nullable=False)
@declared_attr
def bpjs_kesehatan_karyawan(cls):
return Column(Numeric, nullable=False)
@declared_attr
def upah_minimum(cls):
return Column(Integer, nullable=False)
@declared_attr
def iuran_rumah(cls):
return Column(Integer, nullable=False)
@declared_attr
def iuran_koperasi(cls):
return Column(Integer, nullable=False)
@declared_attr
def pendaftaran_koperasi(cls):
return Column(Integer, nullable=False)
@declared_attr
def uang_makan(cls):
return Column(Integer, nullable=False)
@declared_attr
def uang_transport(cls):
return Column(Integer, nullable=False)
@declared_attr
def koef_absen(cls):
return Column(Numeric, nullable=False)
'''
def mx_init(
self,
*args,
bpjs_ketenagakerjaan_perusahaan,
bpjs_ketenagakerjaan_karyawan,
bpjs_kesehatan_perusahaan,
bpjs_kesehatan_karyawan,
upah_minimum,
iuran_rumah,
iuran_koperasi,
pendaftaran_koperasi,
uang_makan,
uang_transport,
koef_absen,
**kwargs
):
MxPengaturanBase.mx_init(*args, **kwargs)
self.bpjs_ketenagakerjaan_perusahaan = bpjs_ketenagakerjaan_perusahaan
self.bpjs_ketenagakerjaan_karyawan = bpjs_ketenagakerjaan_karyawan
self.bpjs_kesehatan_perusahaan = bpjs_kesehatan_perusahaan
self.bpjs_ketenagakerjaan_karyawan = bpjs_ketenagakerjaan_karyawan
self.upah_minimum = upah_minimum
self.iuran_rumah = iuran_rumah
self.iuran_koperasi = iuran_koperasi
self.pendaftaran_koperasi = pendaftaran_koperasi
self.uang_makan = uang_makan
self.uang_transport = uang_transport
self.koef_absen = koef_absen
'''
def mx_reconstruct(self):
MxPengaturanBase.mx_reconstruct(self)
def mx_repr(self):
return '%s' % (MxPengaturanBase.mx_repr(self),)
'''
def mx_repr(self):
return "TODO" % (
self.id, self.nama,
)
'''
def mx_init_repr(self):
ret = MxPengaturanBase.mx_init_repr(self)
ret.update({
'bpjs_ketenagakerjaan_perusahaan': self.bpjs_ketenagakerjaan_perusahaan,
'bpjs_ketenagakerjaan_karyawan': self.bpjs_ketenagakerjaan_karyawan,
'bpjs_kesehatan_perusahaan': self.bpjs_kesehatan_perusahaan,
'bpjs_kesehatan_karyawan': self.bpjs_kesehatan_karyawan,
'upah_minimum': self.upah_minimum,
'iuran_rumah': self.iuran_rumah,
'iuran_koperasi': self.iuran_koperasi,
'pendaftaran_koperasi': self.pendaftaran_koperasi,
'uang_makan': self.uang_makan,
'uang_transport': self.uang_transport,
'koef_absen': self.koef_absen
})
return ret
| [
"sqlalchemy.Column"
] | [((314, 345), 'sqlalchemy.Column', 'Column', (['Numeric'], {'nullable': '(False)'}), '(Numeric, nullable=False)\n', (320, 345), False, 'from sqlalchemy import Column, Integer, Numeric\n'), ((425, 456), 'sqlalchemy.Column', 'Column', (['Numeric'], {'nullable': '(False)'}), '(Numeric, nullable=False)\n', (431, 456), False, 'from sqlalchemy import Column, Integer, Numeric\n'), ((532, 563), 'sqlalchemy.Column', 'Column', (['Numeric'], {'nullable': '(False)'}), '(Numeric, nullable=False)\n', (538, 563), False, 'from sqlalchemy import Column, Integer, Numeric\n'), ((637, 668), 'sqlalchemy.Column', 'Column', (['Numeric'], {'nullable': '(False)'}), '(Numeric, nullable=False)\n', (643, 668), False, 'from sqlalchemy import Column, Integer, Numeric\n'), ((731, 762), 'sqlalchemy.Column', 'Column', (['Integer'], {'nullable': '(False)'}), '(Integer, nullable=False)\n', (737, 762), False, 'from sqlalchemy import Column, Integer, Numeric\n'), ((824, 855), 'sqlalchemy.Column', 'Column', (['Integer'], {'nullable': '(False)'}), '(Integer, nullable=False)\n', (830, 855), False, 'from sqlalchemy import Column, Integer, Numeric\n'), ((920, 951), 'sqlalchemy.Column', 'Column', (['Integer'], {'nullable': '(False)'}), '(Integer, nullable=False)\n', (926, 951), False, 'from sqlalchemy import Column, Integer, Numeric\n'), ((1022, 1053), 'sqlalchemy.Column', 'Column', (['Integer'], {'nullable': '(False)'}), '(Integer, nullable=False)\n', (1028, 1053), False, 'from sqlalchemy import Column, Integer, Numeric\n'), ((1114, 1145), 'sqlalchemy.Column', 'Column', (['Integer'], {'nullable': '(False)'}), '(Integer, nullable=False)\n', (1120, 1145), False, 'from sqlalchemy import Column, Integer, Numeric\n'), ((1210, 1241), 'sqlalchemy.Column', 'Column', (['Integer'], {'nullable': '(False)'}), '(Integer, nullable=False)\n', (1216, 1241), False, 'from sqlalchemy import Column, Integer, Numeric\n'), ((1302, 1333), 'sqlalchemy.Column', 'Column', (['Numeric'], {'nullable': '(False)'}), '(Numeric, nullable=False)\n', (1308, 1333), False, 'from sqlalchemy import Column, Integer, Numeric\n')] |
"""
socat - UNIX-CONNECT:repl.sock
import sys, threading, pdb, functools
def _attach(repl):
frame = sys._current_frames()[threading.enumerate()[0].ident]
debugger = pdb.Pdb(
stdin=repl.conn.makefile('r'),
stdout=repl.conn.makefile('w'),
)
debugger.reset()
while frame:
frame.f_trace = debugger.trace_dispatch
debugger.botframe = frame
frame = frame.f_back
debugger.set_step()
frame.f_trace = debugger.trace_dispatch
"""
import ast
import codeop
import contextlib
import errno
import functools
import logging
import os
import socket as socket_
import sys
import threading
import traceback
import types
import typing as ta
import weakref
from . import check
log = logging.getLogger(__name__)
class DisconnectException(Exception):
pass
class InteractiveSocketConsole:
"""code.InteractiveConsole but just different enough to not be worth subclassing."""
ENCODING = 'utf-8'
def __init__(
self,
conn: socket_.socket,
locals: ta.MutableMapping = None,
filename: str = '<console>'
) -> None:
super().__init__()
if locals is None:
locals = {
'__name__': '__console__',
'__doc__': None,
'__console__': self,
}
self._conn = conn
self._locals = locals
self._filename = filename
self._compiler = codeop.CommandCompiler()
self._buffer: ta.List[str] = []
self._count = 0
self._write_count = -1
def reset_buffer(self) -> None:
self._buffer = []
@property
def conn(self) -> socket_.socket:
return self._conn
CPRT = 'Type "help", "copyright", "credits" or "license" for more information.'
def interact(self, banner: str = None, exitmsg: str = None) -> None:
log.info(f'Console {id(self)} on thread {threading.current_thread().ident} interacting')
try:
ps1 = getattr(sys, 'ps1', '>>> ')
ps2 = getattr(sys, 'ps2', '... ')
if banner is None:
self.write(
'Python %s on %s\n%s\n(%s)\n' %
(sys.version, sys.platform, self.CPRT, self.__class__.__name__))
elif banner:
self.write('%s\n' % (str(banner),))
more = False
while True:
try:
try:
line = self.raw_input(ps2 if more else ps1)
except EOFError:
self.write('\n')
break
else:
more = self.push_line(line)
except KeyboardInterrupt:
self.write('\nKeyboardInterrupt\n')
self.reset_buffer()
more = False
if exitmsg is None:
self.write('now exiting %s...\n' % self.__class__.__name__)
elif exitmsg != '':
self.write('%s\n' % exitmsg)
except DisconnectException:
pass
except OSError as oe:
if oe.errno == errno.EBADF:
pass
finally:
log.info(f'Console {id(self)} on thread {threading.current_thread().ident} finished')
def push_line(self, line: str) -> bool:
self._buffer.append(line)
source = '\n'.join(self._buffer)
more = self.run_source(source, self._filename)
if not more:
self.reset_buffer()
return more
def raw_input(self, prompt: str = '') -> str:
self.write(prompt)
buf = b''
while True:
b = self._conn.recv(1)
if not b:
raise DisconnectException
if b == b'\n':
break
buf += b
return buf.decode(self.ENCODING)
def write(self, data: str) -> None:
self._conn.send(data.encode(self.ENCODING))
def compile(
self,
source: ta.Union[str, ast.AST],
filename: str = '<input>',
symbol: str = 'single'
) -> ta.Optional[types.CodeType]:
if isinstance(source, ast.AST):
return self._compiler.compiler(source, filename, symbol)
else:
return self._compiler(source, filename, symbol)
def run_source(
self,
source: ta.Union[str, ast.AST],
filename: str = '<input>',
symbol: str = 'single',
) -> bool:
try:
code = self.compile(source, filename, symbol)
except (OverflowError, SyntaxError, ValueError):
# Case 1 (incorrect)
self.show_syntax_error(filename)
return False
if code is None:
# Case 2 (incomplete)
return True
# Case 3 (complete)
try:
node = ast.parse(source)
except (OverflowError, SyntaxError, ValueError):
return True
if isinstance(node, ast.Module) and node.body and isinstance(node.body[-1], ast.Expr):
expr = node.body[-1]
source = ast.Interactive(
[
*node.body[:-1],
ast.Assign(
[ast.Name(
f'_{self._count}',
ast.Store(),
lineno=expr.lineno,
col_offset=expr.col_offset,
)],
expr.value,
lineno=expr.lineno,
col_offset=expr.col_offset,
)
],
)
ast.fix_missing_locations(source)
self._write_count = self._count
code = self.compile(source, filename, symbol)
self.run_code(code)
return False
def run_code(self, code: types.CodeType) -> None:
try:
exec(code, self._locals)
except SystemExit:
raise
except Exception:
self.show_traceback()
else:
if self._count == self._write_count:
self.write(repr(self._locals[f'_{self._count}']))
self.write('\n')
self._count += 1
def show_traceback(self) -> None:
sys.last_type, sys.last_value, last_tb = ei = sys.exc_info()
sys.last_traceback = last_tb
try:
lines = traceback.format_exception(ei[0], ei[1], last_tb.tb_next)
self.write(''.join(lines))
finally:
last_tb = ei = None
def show_syntax_error(self, filename: str = None) -> None:
type, value, tb = sys.exc_info()
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
if filename and type is SyntaxError:
# Work hard to stuff the correct filename in the exception
try:
msg, (dummy_filename, lineno, offset, line) = value.args
except ValueError:
# Not the format we expect; leave it alone
pass
else:
# Stuff in the right filename
value = SyntaxError(msg, (filename, lineno, offset, line))
sys.last_value = value
lines = traceback.format_exception_only(type, value)
self.write(''.join(lines))
class ReplServer:
CONNECTION_THREAD_NAME = 'ReplServerConnection'
def __init__(
self,
path: str,
*,
file_mode: int = None,
poll_interval: float = 0.5,
exit_timeout: float = 10.0,
) -> None:
super().__init__()
self._path = path
self._file_mode = file_mode
self._poll_interval = poll_interval
self._exit_timeout = exit_timeout
self._socket: socket_.socket = None
self._is_running = False
self._consoles_by_threads: ta.MutableMapping[threading.Thread, InteractiveSocketConsole] = weakref.WeakKeyDictionary() # noqa
self._is_shut_down = threading.Event()
self._should_shutdown = False
def __enter__(self):
check.state(not self._is_running)
check.state(not self._is_shut_down.is_set())
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not self._is_shut_down.is_set():
self.shutdown(True, self._exit_timeout)
def run(self) -> None:
check.state(not self._is_running)
check.state(not self._is_shut_down.is_set())
if os.path.exists(self._path):
os.unlink(self._path)
self._socket = socket_.socket(socket_.AF_UNIX, socket_.SOCK_STREAM)
self._socket.settimeout(self._poll_interval)
self._socket.bind(self._path)
with contextlib.closing(self._socket):
self._socket.listen(1)
log.info(f'Repl server listening on file {self._path}')
self._is_running = True
try:
while not self._should_shutdown:
try:
conn, _ = self._socket.accept()
except socket_.timeout:
continue
log.info(f'Got repl server connection on file {self._path}')
def run(conn):
with contextlib.closing(conn):
variables = globals().copy()
console = InteractiveSocketConsole(conn, variables)
variables['__console__'] = console
log.info(
f'Starting console {id(console)} repl server connection '
f'on file {self._path} '
f'on thread {threading.current_thread().ident}'
)
self._consoles_by_threads[threading.current_thread()] = console
console.interact()
thread = threading.Thread(
target=functools.partial(run, conn),
daemon=True,
name=self.CONNECTION_THREAD_NAME)
thread.start()
for thread, console in self._consoles_by_threads.items():
try:
console.conn.close()
except Exception:
log.exception('Error shutting down')
for thread in self._consoles_by_threads.keys():
try:
thread.join(self._exit_timeout)
except Exception:
log.exception('Error shutting down')
os.unlink(self._path)
finally:
self._is_shut_down.set()
self._is_running = False
def shutdown(self, block: bool = False, timeout: float = None) -> None:
self._should_shutdown = True
if block:
self._is_shut_down.wait(timeout=timeout)
def _main():
with ReplServer('repl.sock') as repl_server:
repl_server.run()
if __name__ == '__main__':
_main()
| [
"logging.getLogger",
"weakref.WeakKeyDictionary",
"traceback.format_exception_only",
"os.path.exists",
"threading.current_thread",
"socket.socket",
"traceback.format_exception",
"threading.Event",
"sys.exc_info",
"functools.partial",
"codeop.CommandCompiler",
"os.unlink",
"contextlib.closing... | [((735, 762), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (752, 762), False, 'import logging\n'), ((1453, 1477), 'codeop.CommandCompiler', 'codeop.CommandCompiler', ([], {}), '()\n', (1475, 1477), False, 'import codeop\n'), ((6400, 6414), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (6412, 6414), False, 'import sys\n'), ((6721, 6735), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (6733, 6735), False, 'import sys\n'), ((7339, 7383), 'traceback.format_exception_only', 'traceback.format_exception_only', (['type', 'value'], {}), '(type, value)\n', (7370, 7383), False, 'import traceback\n'), ((8050, 8077), 'weakref.WeakKeyDictionary', 'weakref.WeakKeyDictionary', ([], {}), '()\n', (8075, 8077), False, 'import weakref\n'), ((8115, 8132), 'threading.Event', 'threading.Event', ([], {}), '()\n', (8130, 8132), False, 'import threading\n'), ((8596, 8622), 'os.path.exists', 'os.path.exists', (['self._path'], {}), '(self._path)\n', (8610, 8622), False, 'import os\n'), ((8682, 8734), 'socket.socket', 'socket_.socket', (['socket_.AF_UNIX', 'socket_.SOCK_STREAM'], {}), '(socket_.AF_UNIX, socket_.SOCK_STREAM)\n', (8696, 8734), True, 'import socket as socket_\n'), ((4913, 4930), 'ast.parse', 'ast.parse', (['source'], {}), '(source)\n', (4922, 4930), False, 'import ast\n'), ((5720, 5753), 'ast.fix_missing_locations', 'ast.fix_missing_locations', (['source'], {}), '(source)\n', (5745, 5753), False, 'import ast\n'), ((6485, 6542), 'traceback.format_exception', 'traceback.format_exception', (['ei[0]', 'ei[1]', 'last_tb.tb_next'], {}), '(ei[0], ei[1], last_tb.tb_next)\n', (6511, 6542), False, 'import traceback\n'), ((8636, 8657), 'os.unlink', 'os.unlink', (['self._path'], {}), '(self._path)\n', (8645, 8657), False, 'import os\n'), ((8839, 8871), 'contextlib.closing', 'contextlib.closing', (['self._socket'], {}), '(self._socket)\n', (8857, 8871), False, 'import contextlib\n'), ((10792, 10813), 'os.unlink', 'os.unlink', (['self._path'], {}), '(self._path)\n', (10801, 10813), False, 'import os\n'), ((1923, 1949), 'threading.current_thread', 'threading.current_thread', ([], {}), '()\n', (1947, 1949), False, 'import threading\n'), ((3277, 3303), 'threading.current_thread', 'threading.current_thread', ([], {}), '()\n', (3301, 3303), False, 'import threading\n'), ((9385, 9409), 'contextlib.closing', 'contextlib.closing', (['conn'], {}), '(conn)\n', (9403, 9409), False, 'import contextlib\n'), ((10126, 10154), 'functools.partial', 'functools.partial', (['run', 'conn'], {}), '(run, conn)\n', (10143, 10154), False, 'import functools\n'), ((5376, 5387), 'ast.Store', 'ast.Store', ([], {}), '()\n', (5385, 5387), False, 'import ast\n'), ((9962, 9988), 'threading.current_thread', 'threading.current_thread', ([], {}), '()\n', (9986, 9988), False, 'import threading\n'), ((9843, 9869), 'threading.current_thread', 'threading.current_thread', ([], {}), '()\n', (9867, 9869), False, 'import threading\n')] |
#!/usr/bin/python
#-*- coding: utf-8 -*-
# >.>.>.>.>.>.>.>.>.>.>.>.>.>.>.>.
# Licensed under the Apache License, Version 2.0 (the "License")
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# --- File Name: shapes3d.py
# --- Creation Date: 16-01-2021
# --- Last Modified: Tue 13 Apr 2021 16:55:42 AEST
# --- Author: <NAME>
# .<.<.<.<.<.<.<.<.<.<.<.<.<.<.<.<
"""
Dataset for 3D Shapes
"""
import numpy as np
from torch.utils.data import Dataset
import os
import shutil
import h5py
import zipfile
from PIL import Image
import torch
import random
from datasets.transforms import PairTransform
class shapes3d(Dataset):
"""
Args:
root (str): Root directory of dataset containing 3dshapes.h5
transform (``Transform``, optional): A function/transform that takes in an PIL image and returns a transformed version. E.g, ``transforms.RandomCrop``
"""
def __init__(self, root, transform=None, fixed_shape=None):
super(shapes3d, self).__init__()
self.file = root
self.transform = transform
self.fixed_shape = fixed_shape
self.dataset_zip = self.load_data()
self.data = self.dataset_zip['images'][:] # array shape [480000,64,64,3], uint8 in range(256)
# self.latents_sizes = np.array([3, 6, 40, 32, 32])
self.latents_sizes = np.array([10, 10, 10, 8, 4, 15])
self.latents_bases = np.concatenate((self.latents_sizes[::-1].cumprod()[::-1][1:], np.array([1, ])))
# self.latents_classes = np.load(os.path.join(self.file, "latents_classes.npy"))
self.latents_classes = self.dataset_zip['labels'][:] # array shape [480000,6], float64
# if fixed_shape is not None:
# self._reduce_data(fixed_shape)
def generative_factors(self, index):
return self.latents_classes[index]
def latent_to_index(self, latents):
return np.dot(latents, self.latents_bases).astype(int)
def index_to_latent(self, index):
return self.latents_classes[index]
def get_img_by_latent(self, latent_code):
"""
Returns the image defined by the latent code
Args:
latent_code (:obj:`list` of :obj:`int`): Latent code of length 6 defining each generative factor
Returns:
Image defined by given code
"""
idx = self.latent_to_index(latent_code)
return self.__getitem__(idx)
def sample_latent(self):
f = []
for factor in self.latents_sizes:
f.append(np.random.randint(0, factor))
return np.array(f)
def load_data(self):
root = os.path.join(self.file, "3dshapes.h5")
dataset_zip = h5py.File(root, 'r')
# data = np.load(root)
return dataset_zip
def __getitem__(self, index):
data = self.data[index]
data = Image.fromarray(data)
labels = self.latents_classes[index]
if self.transform is not None:
data = self.transform(data)
return data, labels[1:]
def __len__(self):
return self.data.shape[0]
class PairShapes3D(shapes3d):
def __init__(self, root, download=False, transform=None, offset=2, max_varied=1, wrapping=False, noise_name=None, output_targets=True, fixed_shape=None):
""" dSprites dataset with symmetry sampling included if output_targets is True.
Args:
root (str): Root directory of dataset containing '3dshapes.h5' or to download it to
transform (``Transform``, optional): A function/transform that takes in an PIL image and returns a transformed version. E.g, ``transforms.RandomCrop``
offset (int, list[int]): Offset of generative factor indices when sampling symmetries
max_varied (int): Max number of symmetries acting per observation
wrapping (bool): Wrap at boundaries or invert action
noise_name (str): Name of noise to add, default None
output_targets (bool): If True output image pair corresponding to symmetry action. If False, standard dSprites.
"""
super().__init__(root, transform)
self.factor = [0, 1, 2, 3, 5]
self.offset = offset
self.max_varied = max_varied
self.wrapping = wrapping
self.noise_transform = PairTransform(noise_name) if noise_name is not None else None
self.output_targets = output_targets
def get_next_img_by_offset(self, label1, img1, factor):
max_offsets = [10, 10, 10, 8, 1, 15]
new_latents = np.array(list(label1))
offset = torch.zeros(label1.shape).to(img1.device)
for f in factor:
cur_offset = self.offset if self.offset < max_offsets[f] else max_offsets[f]
if torch.rand(1) < 0.5:
cur_offset = cur_offset * -1
if self.wrapping:
new_latents[f] = (label1[f] + cur_offset) % (self.latents_sizes[f])
else:
new_latents[f] = (label1[f] + cur_offset).clip(min=0, max=self.latents_sizes[f]-1)
offset[f] = cur_offset
idx = self.latent_to_index(new_latents)
return idx, offset
def get_next_img_by_rand(self, latent1):
idx = torch.randint(len(self), (1,)).int()
offset = self.index_to_latent(idx)[1:] - latent1
return idx, offset
def __getitem__(self, index):
factor = self.factor
img1, label1 = super().__getitem__(index)
if not self.output_targets:
return img1, label1
if not isinstance(factor, list):
factor = [factor]
else:
factor = random.choices(factor, k=self.max_varied)
# TODO: Always set offset to 1 for val set? So we can eval metrics. Images wouldn't show multi steps though...
if self.offset != -1:
idx, offset = self.get_next_img_by_offset(label1, img1, factor)
else:
idx, offset = self.get_next_img_by_rand(label1)
img2, label2 = super().__getitem__(idx)
if self.noise_transform is not None:
img1, img2 = self.noise_transform(img1, img2)
return (img1, offset), img2
| [
"PIL.Image.fromarray",
"os.path.join",
"h5py.File",
"numpy.array",
"random.choices",
"numpy.dot",
"numpy.random.randint",
"datasets.transforms.PairTransform",
"torch.zeros",
"torch.rand"
] | [((1353, 1385), 'numpy.array', 'np.array', (['[10, 10, 10, 8, 4, 15]'], {}), '([10, 10, 10, 8, 4, 15])\n', (1361, 1385), True, 'import numpy as np\n'), ((2578, 2589), 'numpy.array', 'np.array', (['f'], {}), '(f)\n', (2586, 2589), True, 'import numpy as np\n'), ((2631, 2669), 'os.path.join', 'os.path.join', (['self.file', '"""3dshapes.h5"""'], {}), "(self.file, '3dshapes.h5')\n", (2643, 2669), False, 'import os\n'), ((2692, 2712), 'h5py.File', 'h5py.File', (['root', '"""r"""'], {}), "(root, 'r')\n", (2701, 2712), False, 'import h5py\n'), ((2853, 2874), 'PIL.Image.fromarray', 'Image.fromarray', (['data'], {}), '(data)\n', (2868, 2874), False, 'from PIL import Image\n'), ((4295, 4320), 'datasets.transforms.PairTransform', 'PairTransform', (['noise_name'], {}), '(noise_name)\n', (4308, 4320), False, 'from datasets.transforms import PairTransform\n'), ((5623, 5664), 'random.choices', 'random.choices', (['factor'], {'k': 'self.max_varied'}), '(factor, k=self.max_varied)\n', (5637, 5664), False, 'import random\n'), ((1477, 1490), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (1485, 1490), True, 'import numpy as np\n'), ((1905, 1940), 'numpy.dot', 'np.dot', (['latents', 'self.latents_bases'], {}), '(latents, self.latents_bases)\n', (1911, 1940), True, 'import numpy as np\n'), ((2533, 2561), 'numpy.random.randint', 'np.random.randint', (['(0)', 'factor'], {}), '(0, factor)\n', (2550, 2561), True, 'import numpy as np\n'), ((4571, 4596), 'torch.zeros', 'torch.zeros', (['label1.shape'], {}), '(label1.shape)\n', (4582, 4596), False, 'import torch\n'), ((4743, 4756), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (4753, 4756), False, 'import torch\n')] |
from collections import OrderedDict
from rest_framework import serializers
from data_import.models import DataFile
from open_humans.models import User
from private_sharing.models import project_membership_visible
class PublicDataFileSerializer(serializers.ModelSerializer):
"""
Serialize a public data file.
"""
metadata = serializers.JSONField()
def to_representation(self, data):
ret = OrderedDict()
fields = self.get_fields()
query_params = dict(self.context.get("request").query_params)
source = getattr(data, "source")
user_t = getattr(data, "user")
usernames = []
if "username" in query_params:
usernames = query_params["username"]
visible = project_membership_visible(user_t.member, source)
if (user_t.username in usernames) and not visible:
return ret
request = self.context.get("request", None)
for field in fields:
item = getattr(data, str(field))
if isinstance(item, User):
if visible:
member = getattr(user_t, "member")
user = {
"id": getattr(member, "member_id"),
"name": getattr(member, "name"),
"username": getattr(item, "username"),
}
else:
user = {"id": None, "name": None, "username": None}
ret["user"] = user
elif field == "download_url":
ret["download_url"] = item(request)
else:
ret[str(field)] = getattr(data, field)
return ret
class Meta: # noqa: D101
model = DataFile
fields = (
"id",
"basename",
"created",
"download_url",
"metadata",
"source",
"user",
)
| [
"private_sharing.models.project_membership_visible",
"collections.OrderedDict",
"rest_framework.serializers.JSONField"
] | [((344, 367), 'rest_framework.serializers.JSONField', 'serializers.JSONField', ([], {}), '()\n', (365, 367), False, 'from rest_framework import serializers\n'), ((422, 435), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (433, 435), False, 'from collections import OrderedDict\n'), ((750, 799), 'private_sharing.models.project_membership_visible', 'project_membership_visible', (['user_t.member', 'source'], {}), '(user_t.member, source)\n', (776, 799), False, 'from private_sharing.models import project_membership_visible\n')] |
import json
import os
import time
from django.template import Context
from django.template.loader import get_template
from django.utils.safestring import mark_safe
from monocle.settings import settings
class Resource(object):
"""
A JSON compatible response from an OEmbed provider
"""
def __init__(self, url, data=None):
self.url = url
self.created = time.time()
self._data = data or {}
def __getitem__(self, key):
if key == 'cache_age':
return self.ttl
return self._data.get(key, '')
def __setitem__(self, key, value):
if key == 'cache_age':
self.ttl = value
else:
self._data[key] = value
def __contains__(self, key):
return key in self._data
def render(self):
"""
Renders this resource to the template corresponding to this resource type.
The template is rendered with variables ``url`` and ``resource`` that represent
the original requested URL and this resource respectively.
If the resource is considered invalid from :func:`is_valid`, the original
requested URL is returned unless ``RESOURCE_URLIZE_INVALID`` is configured
in :mod:`monocle.settings`. If so, then the original URL is returned hyperlinked
:returns: Rendered oembed content
"""
if not self.is_valid:
if settings.RESOURCE_URLIZE_INVALID:
template_name = 'monocle/link.html'
else:
return self.url
else:
template_name = os.path.join('monocle', '%s.html' % self._data['type'])
template = get_template(template_name)
return mark_safe(template.render(Context({'url': self.url, 'resource': self})))
@property
def is_valid(self):
"""
Perform validation against this resource object. The resource is considered
valid if it meets the following criteria:
* It has oembed response data
* It is a valid oembed resource type
* It has the required attributes based on its type
"""
# We can create resources without valid data
if not self._data:
return False
# Must be a valid type
if self._data.get('type') not in settings.RESOURCE_TYPES:
return False
# Must have required fields
has_required = True
for field in settings.RESOURCE_REQUIRED_ATTRS[self._data['type']]:
has_required = has_required and (field in self._data)
if not has_required:
return False
return True
@property
def is_stale(self):
"""
True of the current timestamp is greater than the sum of the resource's
creation timestamp plus its TTL, False otherwise.
"""
return (time.time() - self.created) > self.ttl
def refresh(self):
"""
Returns a version of this resource that is considered fresh by updating
its internal timestamp to now
"""
self.created = time.time()
return self
@property
def json(self):
"""
A JSON string without any empty or null keys
"""
return json.dumps(dict([(k, v) for k, v in self._data.items() if v]))
def get_ttl(self):
"""
Returns the TTL of this resource ensuring that it at minimum the value
of ``RESOURCE_MIN_TTL`` from :mod:`monocle.settings`.
This value could be specified by the provider via the property ``cache_age``.
If it is not, the value ``RESOURCE_DEFAULT_TTL`` from :mod:`monocle.settings`
is used.
:returns: TTL in seconds
"""
try:
return max(settings.RESOURCE_MIN_TTL,
int(self._data.get('cache_age', settings.RESOURCE_DEFAULT_TTL)))
except (ValueError, TypeError):
return settings.RESOURCE_DEFAULT_TTL
def set_ttl(self, value):
"""
Sets the TTL value of this resource ensuring that it is at minimum the value
of ``RESOURCE_MIN_TTL`` from :mod:`monocle.settings`. If it is not, the value
of ``RESOURCE_DEFAULT_TTL`` from :mod:`monocle.settings` is used.
"""
try:
value = max(settings.RESOURCE_MIN_TTL, int(value))
except (ValueError, TypeError):
value = settings.RESOURCE_DEFAULT_TTL
self._data['cache_age'] = value
ttl = property(get_ttl, set_ttl)
| [
"os.path.join",
"time.time",
"django.template.loader.get_template",
"django.template.Context"
] | [((388, 399), 'time.time', 'time.time', ([], {}), '()\n', (397, 399), False, 'import time\n'), ((1662, 1689), 'django.template.loader.get_template', 'get_template', (['template_name'], {}), '(template_name)\n', (1674, 1689), False, 'from django.template.loader import get_template\n'), ((3073, 3084), 'time.time', 'time.time', ([], {}), '()\n', (3082, 3084), False, 'import time\n'), ((1586, 1641), 'os.path.join', 'os.path.join', (['"""monocle"""', "('%s.html' % self._data['type'])"], {}), "('monocle', '%s.html' % self._data['type'])\n", (1598, 1641), False, 'import os\n'), ((1731, 1775), 'django.template.Context', 'Context', (["{'url': self.url, 'resource': self}"], {}), "({'url': self.url, 'resource': self})\n", (1738, 1775), False, 'from django.template import Context\n'), ((2845, 2856), 'time.time', 'time.time', ([], {}), '()\n', (2854, 2856), False, 'import time\n')] |
from warnings import warn
from charms.layer.nginx import * # noqa
warn('nginxlib is being deprecated, use charms.layer.nginx instead')
| [
"warnings.warn"
] | [((68, 136), 'warnings.warn', 'warn', (['"""nginxlib is being deprecated, use charms.layer.nginx instead"""'], {}), "('nginxlib is being deprecated, use charms.layer.nginx instead')\n", (72, 136), False, 'from warnings import warn\n')] |
# -*- coding: utf-8 -*-
# Name: <NAME>
# NUSP: 9778985
# Course Code: SCC0251
# Semester: 2019/1
# Assignment: 2 - Image enhancement and filtering
# -
import numpy as np
import imageio
# ## Defining functions
# +
# method 1 - limiarization
def limiarization(img, t0):
t = 0.5 * (np.nanmean(np.where(img > t0, img, np.NaN)) + np.nanmean(np.where(img <= t0, img, np.NaN))) # calculating threshold
while(abs(t-t0) > 0.5):
t0 = t
m1 = np.nanmean(np.where(img > t, img, np.NaN)) # mean of group1
m2 = np.nanmean(np.where(img <= t, img, np.NaN)) # mean of group2
t = 0.5 * (m1 + m2)
return np.where(img > t, 1, 0)
# method 2 - 1d filtering
def filter1d(img, w):
imgFlat = img.flatten() # flattening img
imgFinal = np.zeros(imgFlat.shape, dtype=np.double) # creating new array and applying filter
for i in range(imgFlat.shape[0]):
imgFinal[i] = np.sum([imgFlat[(i+j) % imgFlat.shape[0]] * w[j] for j in range(len(w))])
return imgFinal.reshape(img.shape)
# method 3 - 2d filtering
def filter2d(img, w, t0):
imgPad = np.pad(img, w.shape[0]//2, 'symmetric') # padding input image to apply filter
imgFinal = np.zeros(img.shape, dtype=np.double) # creating new array and applying filter
for i in range(0, img.shape[0]):
for j in range(0, img.shape[1]):
imgFinal[i][j] = np.sum([[imgPad[i+x][j+y] * w[x][y] for x in range(w.shape[0])] for y in range(w.shape[1])])
return limiarization(imgFinal, t0) # return limiarization of filtered image
# method 4 - 2d median filter
def medianFilter2d(img, n):
imgPad = np.pad(img, n//2, 'constant', constant_values = 0) # padding input image to apply filter
imgFinal = np.zeros(img.shape, dtype=np.double) # creating new array and applying filter
for i in range(0, img.shape[0]):
for j in range(0, img.shape[1]):
imgFinal[i][j] = np.median(imgPad[i:i+n, j:j+n])
return imgFinal
# Normalize value of an numpy array between 0 and a given max value
def normalize (arr, maxvalue):
return (arr-arr.min()) * (maxvalue / (arr.max()-arr.min()))
# root mean squared error (RMSE) function
def rmse (img_g, img_r):
return np.sqrt((1/(img_g.shape[0]*img_g.shape[1])) * np.sum(np.power(img_g.astype(np.double) - img_r.astype(np.double), 2)))
# -
# ## Main function
if __name__ == '__main__':
# get user input
filename = str(input()).strip()
sourceImg = imageio.imread(filename)
method = int(input())
# executing processing based on value of "method" variable
if method == 1:
t0 = np.double(input())
outputImg = normalize(limiarization(sourceImg, t0), 255).astype(np.uint8)
elif method == 2:
n = int(input())
w = np.array(input().split(), dtype=np.double)
if w.shape[0] != n:
raise ValueError("unexpected number of values for filter.")
outputImg = normalize(filter1d(sourceImg, w), 255).astype(np.uint8)
elif method == 3:
n = int(input())
w = np.array([input().split() for i in range(n)], dtype=np.double)
if w.shape != (n, n):
raise ValueError("unexpected number of values for filter.")
t0 = np.double(input())
outputImg = normalize(filter2d(sourceImg, w, t0), 255).astype(np.uint8)
elif method == 4:
n = int(input())
outputImg = normalize(medianFilter2d(sourceImg, n), 255).astype(np.uint8)
else:
raise ValueError("method value not in supported range (minimum = 1, maximum = 4).")
# printing output
print('%.4f' % rmse(sourceImg, outputImg))
| [
"numpy.median",
"numpy.where",
"numpy.zeros",
"imageio.imread",
"numpy.pad"
] | [((632, 655), 'numpy.where', 'np.where', (['(img > t)', '(1)', '(0)'], {}), '(img > t, 1, 0)\n', (640, 655), True, 'import numpy as np\n'), ((765, 805), 'numpy.zeros', 'np.zeros', (['imgFlat.shape'], {'dtype': 'np.double'}), '(imgFlat.shape, dtype=np.double)\n', (773, 805), True, 'import numpy as np\n'), ((1087, 1128), 'numpy.pad', 'np.pad', (['img', '(w.shape[0] // 2)', '"""symmetric"""'], {}), "(img, w.shape[0] // 2, 'symmetric')\n", (1093, 1128), True, 'import numpy as np\n'), ((1180, 1216), 'numpy.zeros', 'np.zeros', (['img.shape'], {'dtype': 'np.double'}), '(img.shape, dtype=np.double)\n', (1188, 1216), True, 'import numpy as np\n'), ((1610, 1660), 'numpy.pad', 'np.pad', (['img', '(n // 2)', '"""constant"""'], {'constant_values': '(0)'}), "(img, n // 2, 'constant', constant_values=0)\n", (1616, 1660), True, 'import numpy as np\n'), ((1714, 1750), 'numpy.zeros', 'np.zeros', (['img.shape'], {'dtype': 'np.double'}), '(img.shape, dtype=np.double)\n', (1722, 1750), True, 'import numpy as np\n'), ((2439, 2463), 'imageio.imread', 'imageio.imread', (['filename'], {}), '(filename)\n', (2453, 2463), False, 'import imageio\n'), ((470, 500), 'numpy.where', 'np.where', (['(img > t)', 'img', 'np.NaN'], {}), '(img > t, img, np.NaN)\n', (478, 500), True, 'import numpy as np\n'), ((543, 574), 'numpy.where', 'np.where', (['(img <= t)', 'img', 'np.NaN'], {}), '(img <= t, img, np.NaN)\n', (551, 574), True, 'import numpy as np\n'), ((1899, 1934), 'numpy.median', 'np.median', (['imgPad[i:i + n, j:j + n]'], {}), '(imgPad[i:i + n, j:j + n])\n', (1908, 1934), True, 'import numpy as np\n'), ((298, 329), 'numpy.where', 'np.where', (['(img > t0)', 'img', 'np.NaN'], {}), '(img > t0, img, np.NaN)\n', (306, 329), True, 'import numpy as np\n'), ((344, 376), 'numpy.where', 'np.where', (['(img <= t0)', 'img', 'np.NaN'], {}), '(img <= t0, img, np.NaN)\n', (352, 376), True, 'import numpy as np\n')] |
# Generated by Django 2.0.3 on 2018-03-25 15:29
from django.conf import settings
import django.contrib.auth.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('organization', '0001_initial'),
]
operations = [
migrations.AlterModelManagers(
name='team',
managers=[
('objects', django.contrib.auth.models.GroupManager()),
],
),
migrations.AlterField(
model_name='team',
name='ciso',
field=models.ForeignKey(help_text='chief information security officer', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='ciso', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='team',
name='manager',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='manager', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='team',
name='technical_contact',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='technical_contact', to=settings.AUTH_USER_MODEL),
),
]
| [
"django.db.models.ForeignKey"
] | [((611, 791), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'help_text': '"""chief information security officer"""', 'null': '(True)', 'on_delete': 'django.db.models.deletion.PROTECT', 'related_name': '"""ciso"""', 'to': 'settings.AUTH_USER_MODEL'}), "(help_text='chief information security officer', null=True,\n on_delete=django.db.models.deletion.PROTECT, related_name='ciso', to=\n settings.AUTH_USER_MODEL)\n", (628, 791), False, 'from django.db import migrations, models\n'), ((903, 1033), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.PROTECT', 'related_name': '"""manager"""', 'to': 'settings.AUTH_USER_MODEL'}), "(null=True, on_delete=django.db.models.deletion.PROTECT,\n related_name='manager', to=settings.AUTH_USER_MODEL)\n", (920, 1033), False, 'from django.db import migrations, models\n'), ((1160, 1300), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.PROTECT', 'related_name': '"""technical_contact"""', 'to': 'settings.AUTH_USER_MODEL'}), "(null=True, on_delete=django.db.models.deletion.PROTECT,\n related_name='technical_contact', to=settings.AUTH_USER_MODEL)\n", (1177, 1300), False, 'from django.db import migrations, models\n')] |
import torch
import pickle
import argparse
import os
from tqdm import trange, tqdm
import torch
import torchtext
from torchtext import data
from torchtext import datasets
from torch import nn
import torch.nn.functional as F
import math
from models import SimpleLSTMModel, AttentionRNN
from train_args import get_arg_parser
import constants
from vocab import Vocabulary, load_vocab
import dataset as d
def build_model(
parser: argparse.ArgumentParser,
en_vocab: Vocabulary,
fr_vocab: Vocabulary,
) -> nn.Module:
# TODO make switch case
args = parser.parse_args()
if args.model_type == 'SimpleLSTM':
SimpleLSTMModel.add_args(parser)
args = parser.parse_args()
return SimpleLSTMModel.build_model(
src_vocab=en_vocab,
trg_vocab=fr_vocab,
encoder_embed_dim=args.encoder_embed_dim,
encoder_hidden_dim=args.encoder_hidden_dim,
encoder_dropout=args.encoder_dropout,
encoder_num_layers=args.encoder_layers,
decoder_embed_dim=args.decoder_embed_dim,
decoder_hidden_dim=args.decoder_hidden_dim,
decoder_dropout=args.decoder_dropout,
decoder_num_layers=args.decoder_layers,
)
elif args.model_type == 'AttentionRNN':
AttentionRNN.add_args(parser)
args = parser.parse_args()
return AttentionRNN.build_model(
src_vocab=en_vocab,
trg_vocab=fr_vocab,
encoder_embed_dim=args.encoder_embed_dim,
encoder_hidden_dim=args.encoder_hidden_dim,
encoder_dropout=args.encoder_dropout,
encoder_num_layers=args.encoder_layers,
decoder_embed_dim=args.decoder_embed_dim,
decoder_hidden_dim=args.decoder_hidden_dim,
decoder_dropout=args.decoder_dropout,
decoder_num_layers=args.decoder_layers,
teacher_student_ratio=args.teacher_student_ratio,
)
else:
raise Exception(
"Unknown Model Type: {}".format(args.model_type)
)
def train(
train_loader: d.BatchedIterator,
valid_loader: d.BatchedIterator,
model: nn.Module,
epochs: int,
learning_rate: float,
weight_decay: float,
log_dir: str,
save_dir: str,
en_vocab: Vocabulary,
fr_vocab: Vocabulary,
device: str,
multi_gpu: bool,
save_step: int,
model_name: str,
optimizer: str,
) -> None:
model = model.to(device)
if multi_gpu and device == 'cuda':
print('Using multi gpu training')
model = torch.nn.DataParallel(model, device_ids=[0, 1]).cuda()
if optimizer == "sgd":
print("using stochastic gradient descent optimizer")
optim = torch.optim.SGD(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
elif optimizer == "adam":
print("using adam optimizer")
optim = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
else:
raise Exception("Illegal Optimizer {}".format(optimizer))
# [DEBUG]: count number of nans
nan_count = 0
for e in range(epochs):
total_loss = 0.0
count = 0
with tqdm(train_loader, total=len(train_loader)) as pbar:
for i, data in enumerate(pbar):
src, trg, src_lengths, trg_lengths, prev_tokens, prev_lengths = data
src = src.to(device)
trg = trg.to(device)
src_lengths = src_lengths.to(device)
trg_lengths = trg_lengths.to(device)
prev_tokens = prev_tokens.to(device)
prev_lengths = prev_lengths.to(device)
# feed everything into model
# compute loss
# call backwards
# trg_tensor = torch.cat([trg, eos_tensor], dim=1).to(device)
# prev_tokens = torch.cat([eos_tensor, trg], dim=1).to(device)
optim.zero_grad()
predicted, _ = model.forward(src, src_lengths, prev_tokens)
if not multi_gpu:
loss = model.loss(predicted.view(-1, predicted.size(-1)), trg.view(-1))
else:
# if using data parallel, loss has to be computed here
# there is no longer a model loss function that we have
# access to.
# TODO: data parallel kills the computer, why?
loss = F.cross_entropy(
predicted.view(-1, predicted.size(-1)),
trg_tensor.view(-1),
ignore_index=fr_vocab.word2idx(constants.PAD_TOKEN),
)
if math.isnan(loss.item()):
'''
Ignore nan loss for backward, and continue forward
'''
nan_count += 1
print('found nan at {}'.format(i))
torch.save(
model.state_dict(),
os.path.join(save_dir, model_name, 'unk_problem.pt')
)
return
loss.backward()
optim.step()
total_loss += loss.item()
count += 1
pbar.set_postfix(
loss_avg=total_loss/(count),
epoch="{}/{}".format(e + 1, epochs),
curr_loss=loss.item(),
nan_count=nan_count,
)
pbar.refresh()
if (i + 1) % save_step == 0:
print('Saving model at iteration {} for epoch {}'.format(i, e))
model_file_name = "model_epoch_{}_itr_{}".format(e, i)
torch.save(
model.state_dict(),
os.path.join(save_dir, model_name, model_file_name)
)
print("Summary: Total Loss {} | Count {} | Average {}".format(total_loss, count, total_loss / count))
model_file_name = "model_epoch_{}_final".format(e)
print('saving to {}'.format(os.path.join(save_dir, model_name, model_file_name)))
torch.save(
model.state_dict(),
os.path.join(save_dir, model_name, model_file_name)
)
train_loader.reset()
# valid_loader.reset()
def main() -> None:
parser = get_arg_parser()
args = parser.parse_args()
device = "cuda" if torch.cuda.is_available() and args.cuda else "cpu"
print('using device {}'.format(device))
print('loading vocabulary...')
if args.small:
print('using small training set')
en_vocab = load_vocab(constants.SMALL_TRAIN_EN_VOCAB_FILE)
fr_vocab = load_vocab(constants.SMALL_TRAIN_FR_VOCAB_FILE)
else:
en_vocab = load_vocab(constants.TRAIN_EN_VOCAB_FILE)
fr_vocab = load_vocab(constants.TRAIN_FR_VOCAB_FILE)
print('loaded vocabulary')
print('loading datasets...')
if args.small:
train_dataset = d.ShardedCSVDataset(constants.WMT14_EN_FR_SMALL_TRAIN_SHARD)
else:
train_dataset = d.ShardedCSVDataset(constants.WMT14_EN_FR_TRAIN_SHARD)
# valid_dataset = d.DualFileDataset(
# constants.WMT14_EN_FR_VALID + ".en",
# constants.WMT14_EN_FR_VALID + ".fr",
# )
train_loader = d.BatchedIterator(
args.batch_size,
train_dataset,
en_vocab,
fr_vocab,
args.max_sequence_length,
)
# valid_loader = d.BatchedIterator(
# 1,
# valid_dataset,
# en_vocab,
# fr_vocab,
# args.max_sequence_length,
# )
model = build_model(parser, en_vocab, fr_vocab)
print('using model...')
print(model)
if not os.path.exists(args.log_dir):
os.makedirs(args.log_dir)
if not os.path.exists(os.path.join(args.save_dir, args.model_name)):
os.makedirs(os.path.join(args.save_dir, args.model_name))
# model.load_state_dict(torch.load('delete/model_1543183590.2138884/unk_problem.pt'))
train(
train_loader=train_loader,
valid_loader=None, # valid_loader,
model=model,
epochs=args.num_epochs,
learning_rate=args.learning_rate,
weight_decay=args.weight_decay,
log_dir=args.log_dir,
save_dir=args.save_dir,
en_vocab=en_vocab,
fr_vocab=fr_vocab,
device=device,
multi_gpu=args.multi_gpu,
save_step=args.save_step,
model_name=args.model_name,
optimizer=args.optimizer,
)
if __name__ == "__main__":
main() | [
"models.SimpleLSTMModel.add_args",
"os.path.exists",
"os.makedirs",
"vocab.load_vocab",
"os.path.join",
"torch.nn.DataParallel",
"dataset.ShardedCSVDataset",
"models.SimpleLSTMModel.build_model",
"torch.cuda.is_available",
"dataset.BatchedIterator",
"models.AttentionRNN.add_args",
"models.Atte... | [((6460, 6476), 'train_args.get_arg_parser', 'get_arg_parser', ([], {}), '()\n', (6474, 6476), False, 'from train_args import get_arg_parser\n'), ((7411, 7511), 'dataset.BatchedIterator', 'd.BatchedIterator', (['args.batch_size', 'train_dataset', 'en_vocab', 'fr_vocab', 'args.max_sequence_length'], {}), '(args.batch_size, train_dataset, en_vocab, fr_vocab, args.\n max_sequence_length)\n', (7428, 7511), True, 'import dataset as d\n'), ((632, 664), 'models.SimpleLSTMModel.add_args', 'SimpleLSTMModel.add_args', (['parser'], {}), '(parser)\n', (656, 664), False, 'from models import SimpleLSTMModel, AttentionRNN\n'), ((715, 1137), 'models.SimpleLSTMModel.build_model', 'SimpleLSTMModel.build_model', ([], {'src_vocab': 'en_vocab', 'trg_vocab': 'fr_vocab', 'encoder_embed_dim': 'args.encoder_embed_dim', 'encoder_hidden_dim': 'args.encoder_hidden_dim', 'encoder_dropout': 'args.encoder_dropout', 'encoder_num_layers': 'args.encoder_layers', 'decoder_embed_dim': 'args.decoder_embed_dim', 'decoder_hidden_dim': 'args.decoder_hidden_dim', 'decoder_dropout': 'args.decoder_dropout', 'decoder_num_layers': 'args.decoder_layers'}), '(src_vocab=en_vocab, trg_vocab=fr_vocab,\n encoder_embed_dim=args.encoder_embed_dim, encoder_hidden_dim=args.\n encoder_hidden_dim, encoder_dropout=args.encoder_dropout,\n encoder_num_layers=args.encoder_layers, decoder_embed_dim=args.\n decoder_embed_dim, decoder_hidden_dim=args.decoder_hidden_dim,\n decoder_dropout=args.decoder_dropout, decoder_num_layers=args.\n decoder_layers)\n', (742, 1137), False, 'from models import SimpleLSTMModel, AttentionRNN\n'), ((6742, 6789), 'vocab.load_vocab', 'load_vocab', (['constants.SMALL_TRAIN_EN_VOCAB_FILE'], {}), '(constants.SMALL_TRAIN_EN_VOCAB_FILE)\n', (6752, 6789), False, 'from vocab import Vocabulary, load_vocab\n'), ((6809, 6856), 'vocab.load_vocab', 'load_vocab', (['constants.SMALL_TRAIN_FR_VOCAB_FILE'], {}), '(constants.SMALL_TRAIN_FR_VOCAB_FILE)\n', (6819, 6856), False, 'from vocab import Vocabulary, load_vocab\n'), ((6886, 6927), 'vocab.load_vocab', 'load_vocab', (['constants.TRAIN_EN_VOCAB_FILE'], {}), '(constants.TRAIN_EN_VOCAB_FILE)\n', (6896, 6927), False, 'from vocab import Vocabulary, load_vocab\n'), ((6947, 6988), 'vocab.load_vocab', 'load_vocab', (['constants.TRAIN_FR_VOCAB_FILE'], {}), '(constants.TRAIN_FR_VOCAB_FILE)\n', (6957, 6988), False, 'from vocab import Vocabulary, load_vocab\n'), ((7097, 7157), 'dataset.ShardedCSVDataset', 'd.ShardedCSVDataset', (['constants.WMT14_EN_FR_SMALL_TRAIN_SHARD'], {}), '(constants.WMT14_EN_FR_SMALL_TRAIN_SHARD)\n', (7116, 7157), True, 'import dataset as d\n'), ((7192, 7246), 'dataset.ShardedCSVDataset', 'd.ShardedCSVDataset', (['constants.WMT14_EN_FR_TRAIN_SHARD'], {}), '(constants.WMT14_EN_FR_TRAIN_SHARD)\n', (7211, 7246), True, 'import dataset as d\n'), ((7828, 7856), 'os.path.exists', 'os.path.exists', (['args.log_dir'], {}), '(args.log_dir)\n', (7842, 7856), False, 'import os\n'), ((7866, 7891), 'os.makedirs', 'os.makedirs', (['args.log_dir'], {}), '(args.log_dir)\n', (7877, 7891), False, 'import os\n'), ((1294, 1323), 'models.AttentionRNN.add_args', 'AttentionRNN.add_args', (['parser'], {}), '(parser)\n', (1315, 1323), False, 'from models import SimpleLSTMModel, AttentionRNN\n'), ((1374, 1843), 'models.AttentionRNN.build_model', 'AttentionRNN.build_model', ([], {'src_vocab': 'en_vocab', 'trg_vocab': 'fr_vocab', 'encoder_embed_dim': 'args.encoder_embed_dim', 'encoder_hidden_dim': 'args.encoder_hidden_dim', 'encoder_dropout': 'args.encoder_dropout', 'encoder_num_layers': 'args.encoder_layers', 'decoder_embed_dim': 'args.decoder_embed_dim', 'decoder_hidden_dim': 'args.decoder_hidden_dim', 'decoder_dropout': 'args.decoder_dropout', 'decoder_num_layers': 'args.decoder_layers', 'teacher_student_ratio': 'args.teacher_student_ratio'}), '(src_vocab=en_vocab, trg_vocab=fr_vocab,\n encoder_embed_dim=args.encoder_embed_dim, encoder_hidden_dim=args.\n encoder_hidden_dim, encoder_dropout=args.encoder_dropout,\n encoder_num_layers=args.encoder_layers, decoder_embed_dim=args.\n decoder_embed_dim, decoder_hidden_dim=args.decoder_hidden_dim,\n decoder_dropout=args.decoder_dropout, decoder_num_layers=args.\n decoder_layers, teacher_student_ratio=args.teacher_student_ratio)\n', (1398, 1843), False, 'from models import SimpleLSTMModel, AttentionRNN\n'), ((6531, 6556), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6554, 6556), False, 'import torch\n'), ((7923, 7967), 'os.path.join', 'os.path.join', (['args.save_dir', 'args.model_name'], {}), '(args.save_dir, args.model_name)\n', (7935, 7967), False, 'import os\n'), ((7990, 8034), 'os.path.join', 'os.path.join', (['args.save_dir', 'args.model_name'], {}), '(args.save_dir, args.model_name)\n', (8002, 8034), False, 'import os\n'), ((2565, 2612), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {'device_ids': '[0, 1]'}), '(model, device_ids=[0, 1])\n', (2586, 2612), False, 'import torch\n'), ((6298, 6349), 'os.path.join', 'os.path.join', (['save_dir', 'model_name', 'model_file_name'], {}), '(save_dir, model_name, model_file_name)\n', (6310, 6349), False, 'import os\n'), ((6167, 6218), 'os.path.join', 'os.path.join', (['save_dir', 'model_name', 'model_file_name'], {}), '(save_dir, model_name, model_file_name)\n', (6179, 6218), False, 'import os\n'), ((5065, 5117), 'os.path.join', 'os.path.join', (['save_dir', 'model_name', '"""unk_problem.pt"""'], {}), "(save_dir, model_name, 'unk_problem.pt')\n", (5077, 5117), False, 'import os\n'), ((5876, 5927), 'os.path.join', 'os.path.join', (['save_dir', 'model_name', 'model_file_name'], {}), '(save_dir, model_name, model_file_name)\n', (5888, 5927), False, 'import os\n')] |
from django.db import connection
class RawQuery:
# return a list of dicts
# e.g. SELECT * FROM my_table
# [
# {'a': 1, 'b': 2, 'c': 3},
# {'a': 1, 'b': 2, 'c': 3},
# ]
def multiple_rows(self, sql, params=[]):
cursor = self._do_query(sql, params)
columns = [col[0] for col in cursor.description]
return [
dict(zip(columns, row))
for row in cursor.fetchall()
]
# return a single dict
# e.g. SELECT COUNT(*) AS count, AVG(price) AS avg_price FROM my_table
# { 'count': 12, 'avg_price': 95.2 }
def single_row(self, sql, params=[]):
return self.multiple_rows(sql, params)[0]
# return a single value
# e.g. SELECT COUNT(*) FROM my_table
# 134
def single_value(self, sql, params=[]):
cursor = self._do_query(sql, params)
return cursor.fetchone()[0]
# return a list of single values
# e.g. SELECT id FROM my_table
# [1, 2, 3, 4, 5]
def multiple_values(self, sql, params=[]):
cursor = self._do_query(sql, params)
return [row[0] for row in cursor.fetchall()]
# UPDATE, INSERT, etc.
def run(self, sql, params=[]):
cursor = self._do_query(sql, params)
return cursor.rowcount
def _do_query(self, sql, params):
cursor = connection.cursor()
cursor.execute(sql, params)
return cursor
| [
"django.db.connection.cursor"
] | [((1326, 1345), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (1343, 1345), False, 'from django.db import connection\n')] |
from functools import reduce
import numpy as np
import json
import tensorflow as tf
from scipy.optimize import linear_sum_assignment
import os
import time
def deleteDuplicate_v1(input_dict_lst):
f = lambda x,y:x if y in x else x + [y]
return reduce(f, [[], ] + input_dict_lst)
def get_context_pair(resp, l):
label_weights = l['label_weights']
valid_resp = {}
for key in resp:
valid_resp[key] = []
for index, value in enumerate(resp[key]):
if label_weights[index] == 1:
valid_resp[key].append(value)
answer = l['answer_tokens']
position_tokens = l['tokens']
label_position = [lpos-1 for index, lpos in enumerate(l['label_positions']) if label_weights[index]==1]
score_label = []
for index in range(len(valid_resp['pred_label'])):
label = valid_resp['pred_label'][index]
score = valid_resp['max_prob'][index]
position = label_position[index]
position_token = position_tokens[str(position)][1]
if label == 1:
score = 1 - score
score_label.append({"score":score, "label":label,
"position_token":position_token,
"answer":answer})
return score_label
def format_socre_matrix(result_lst, score_merge='mean'):
answer_dict = {}
candidate_dict = {}
answer_index = 0
pos_index = 0
for item in result_lst:
if item['answer'] not in answer_dict:
answer_dict[item['answer']] = answer_index
answer_index += 1
if item['position_token'] not in candidate_dict:
candidate_dict[item['position_token']] = pos_index
pos_index += 1
score_matrix = -np.ones((len(answer_dict), len(candidate_dict)))
for item in result_lst:
answer_pos = answer_dict[item['answer']]
candidate_pos = candidate_dict[item['position_token']]
score_matrix_score = score_matrix[answer_pos, candidate_pos]
if score_matrix_score == -1:
score_matrix[answer_pos, candidate_pos] = item['score']
else:
if score_merge == 'mean':
score_matrix[answer_pos, candidate_pos] += item['score']
score_matrix[answer_pos, candidate_pos] /= 2
elif score_merge == 'max':
if item['score'] > score_matrix[answer_pos, candidate_pos]:
score_matrix[answer_pos, candidate_pos] = item['score']
return score_matrix, answer_dict, candidate_dict
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.logging.set_verbosity(tf.logging.INFO)
flags.DEFINE_string("buckets", "", "oss buckets")
flags.DEFINE_string(
"input_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"output_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"model_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"score_merge", "max",
"Input TF example files (can be a glob or comma separated).")
input_file = os.path.join(FLAGS.buckets, FLAGS.input_file)
output_file = os.path.join(FLAGS.buckets, FLAGS.output_file)
model_file = os.path.join(FLAGS.buckets, FLAGS.model_file)
from tensorflow.contrib import predictor
# model_dict = {
# "model":'/data/xuht/albert.xht/nlpcc2019/open_data/model/1566283032'
# }
model_dict = {
"model":model_file
}
chid_model = predictor.from_saved_model(model_dict['model'])
fwobj = tf.gfile.Open(output_file, "w")
cnt = 0
valid_keys = ['input_ids', 'label_weights',
'label_positions', 'label_ids',
'segment_ids']
with tf.gfile.Open(input_file, "r") as f:
for index, line in enumerate(f):
content = json.loads(line.strip())
total_resp = []
start = time.time()
for t in content:
tmp = {}
for l in t:
for key in valid_keys:
if key in tmp:
tmp[key].append(l[key])
else:
tmp[key] = [l[key]]
# tmp = {
# "input_ids":np.array([l['input_ids']]),
# 'label_weights':np.array([l['label_weights']]),
# 'label_positions':np.array([l['label_positions']]),
# 'label_ids':np.array([l['label_ids']]),
# 'segment_ids':np.array([l['segment_ids']]),
# }
resp = chid_model(tmp)
resp_lst = []
batch_size = int(resp['pred_label'].shape[0]/5)
for key in resp:
resp[key] = np.reshape(resp[key], [-1, 5]).tolist()
for i_index in range(batch_size):
tmp = {
"pred_label":resp['pred_label'][i_index],
"max_prob":resp['max_prob'][i_index],
}
resp_lst.append(tmp)
for i_index in range(len(t)):
resp_ = resp_lst[i_index]
l_ = t[i_index]
result = get_context_pair(resp_, l_)
total_resp.extend(result)
total_resp = deleteDuplicate_v1(total_resp)
resp = format_socre_matrix(total_resp, score_merge=FLAGS.score_merge)
row_ind, col_ind = linear_sum_assignment(resp[0])
mapping_dict = dict(zip(col_ind, row_ind))
dura = time.time()-start
candidte_dict = resp[-1]
candidate_inverse_dict = {}
for key in candidte_dict:
candidate_inverse_dict[candidte_dict[key]] = key
candidate_name_dict = {}
for col in mapping_dict:
col_name = candidate_inverse_dict[col]
candidate_name_dict[col_name] = int(mapping_dict[col])
cnt += len(candidate_name_dict)
if np.mod(index, 100) == 0:
print(candidate_name_dict, index, dura)
fwobj.write(json.dumps(candidate_name_dict, ensure_ascii=False)+"\n")
fwobj.close()
print('==total cnt==', cnt)
| [
"tensorflow.gfile.Open",
"numpy.reshape",
"scipy.optimize.linear_sum_assignment",
"tensorflow.contrib.predictor.from_saved_model",
"functools.reduce",
"json.dumps",
"os.path.join",
"tensorflow.logging.set_verbosity",
"numpy.mod",
"time.time"
] | [((2296, 2337), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (2320, 2337), True, 'import tensorflow as tf\n'), ((2830, 2875), 'os.path.join', 'os.path.join', (['FLAGS.buckets', 'FLAGS.input_file'], {}), '(FLAGS.buckets, FLAGS.input_file)\n', (2842, 2875), False, 'import os\n'), ((2890, 2936), 'os.path.join', 'os.path.join', (['FLAGS.buckets', 'FLAGS.output_file'], {}), '(FLAGS.buckets, FLAGS.output_file)\n', (2902, 2936), False, 'import os\n'), ((2950, 2995), 'os.path.join', 'os.path.join', (['FLAGS.buckets', 'FLAGS.model_file'], {}), '(FLAGS.buckets, FLAGS.model_file)\n', (2962, 2995), False, 'import os\n'), ((3186, 3233), 'tensorflow.contrib.predictor.from_saved_model', 'predictor.from_saved_model', (["model_dict['model']"], {}), "(model_dict['model'])\n", (3212, 3233), False, 'from tensorflow.contrib import predictor\n'), ((3243, 3274), 'tensorflow.gfile.Open', 'tf.gfile.Open', (['output_file', '"""w"""'], {}), "(output_file, 'w')\n", (3256, 3274), True, 'import tensorflow as tf\n'), ((245, 277), 'functools.reduce', 'reduce', (['f', '([[]] + input_dict_lst)'], {}), '(f, [[]] + input_dict_lst)\n', (251, 277), False, 'from functools import reduce\n'), ((3387, 3417), 'tensorflow.gfile.Open', 'tf.gfile.Open', (['input_file', '"""r"""'], {}), "(input_file, 'r')\n", (3400, 3417), True, 'import tensorflow as tf\n'), ((3523, 3534), 'time.time', 'time.time', ([], {}), '()\n', (3532, 3534), False, 'import time\n'), ((4611, 4641), 'scipy.optimize.linear_sum_assignment', 'linear_sum_assignment', (['resp[0]'], {}), '(resp[0])\n', (4632, 4641), False, 'from scipy.optimize import linear_sum_assignment\n'), ((4696, 4707), 'time.time', 'time.time', ([], {}), '()\n', (4705, 4707), False, 'import time\n'), ((5047, 5065), 'numpy.mod', 'np.mod', (['index', '(100)'], {}), '(index, 100)\n', (5053, 5065), True, 'import numpy as np\n'), ((5129, 5180), 'json.dumps', 'json.dumps', (['candidate_name_dict'], {'ensure_ascii': '(False)'}), '(candidate_name_dict, ensure_ascii=False)\n', (5139, 5180), False, 'import json\n'), ((4107, 4137), 'numpy.reshape', 'np.reshape', (['resp[key]', '[-1, 5]'], {}), '(resp[key], [-1, 5])\n', (4117, 4137), True, 'import numpy as np\n')] |
from typing import List
from .exceptions import InvalidConfigError
from .utils import check_schema
try:
import ujson as json
except Exception:
import json
class _config:
def __init__(self, c: dict) -> None:
# 与iotbot 对应的配置, 不存在只能为None
# ip
host = c.get('host')
if host:
self.host = check_schema(str(host))
else:
self.host = None
# port
try:
self.port = int(c.get('port'))
except Exception:
self.port = None
# 群黑名单
self.group_blacklist: List[int] = c.get('group_blacklist')
# 好友黑名单
self.friend_blacklist: List[int] = c.get('friend_blacklist')
# webhook 相关配置
# 开关
self.webhook = bool(c.get('webhook'))
# 推送地址
webhook_post_url = c.get('webhook_post_url')
if webhook_post_url:
self.webhook_post_url = check_schema(str(webhook_post_url))
else:
self.webhook_post_url = None
# 推送等待延时
try:
self.webhook_timeout = int(c.get('webhook_timeout'))
except Exception:
self.webhook_timeout = 10
_config_dict = {}
try:
with open('./.iotbot.json', encoding='utf-8') as f:
_config_dict = json.load(f)
except FileNotFoundError:
pass
except json.JSONDecodeError as e:
raise InvalidConfigError('配置文件不规范') from e
config = _config(_config_dict)
# print('=====config=====')
# print('port: ', config.port)
# print('host: ', config.host)
# print('webhook: ', config.webhook)
# print('webhook_post_url: ', config.webhook_post_url)
# print('webhook_timeout: ', config.webhook_timeout)
# print('================')
| [
"json.load"
] | [((1272, 1284), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1281, 1284), False, 'import json\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
remove wrong sentence breaking marks after period error eojeol
__author__ = 'Jamie (<EMAIL>)'
__copyright__ = 'Copyright (C) 2017-, Kakao Corp. All rights reserved.'
"""
###########
# imports #
###########
from argparse import ArgumentParser
import logging
import os
import re
import sys
from typing import TextIO, Tuple
from khaiii.munjong.sejong_corpus import Morph, WORD_ID_PTN
#############
# functions #
#############
def _get_three_lines(fin: TextIO) -> Tuple[str, str, str]:
"""
get three lines tuple from file (generator)
Args:
fin: input file
Yields:
prev. prev. line
prev. line
curr. line
"""
prev_prev_line = fin.readline().rstrip('\r\n')
prev_line = fin.readline().rstrip('\r\n')
# print first two lines
print(prev_prev_line)
print(prev_line)
for curr_line in fin:
curr_line = curr_line.rstrip('\r\n')
yield prev_prev_line, prev_line, curr_line
prev_prev_line = prev_line
prev_line = curr_line
def _is_known_period_error_eojeol(line: str) -> bool:
"""
알려진 특정 문장분리 오류를 포함하는 어절인 지 여부
Args:
line: line (eojeol)
Returns:
whether has error or not
"""
cols = line.split('\t')
if len(cols) != 3 or not WORD_ID_PTN.match(cols[0]):
return False
if '/SF + ' not in cols[2] or re.match(r'.+/EF \+ ./SF$', cols[2]):
return False
if re.match(r'.+/SF \+ [\'"’”]/SS$', cols[2]):
return False
morphs = [Morph.parse(_) for _ in cols[2].split(' + ')]
tags_str = '+'.join([_.tag for _ in morphs])
if 'SN+SF+SN' in tags_str and not tags_str.endswith('+SF'):
# 4.6판: 4/SN + ./SF + 6/SN + 판/NNB
if 'XSN+SF+SN' not in tags_str:
return True
elif 'SL+SF+SL' in tags_str and not tags_str.endswith('+SF'):
# S.M.오너: S/SL + ./SF + M/SL + ./SF + 오너/NNG
return True
return False
def run():
"""
run function which is the start point of program
"""
file_name = os.path.basename(sys.stdin.name)
for line_num, (prev_prev_line, prev_line, curr_line) in enumerate(_get_three_lines(sys.stdin),
start=1):
if curr_line == '</p>' and _is_known_period_error_eojeol(prev_line):
continue
elif prev_line == '</p>' and curr_line == '<p>' and \
_is_known_period_error_eojeol(prev_prev_line):
logging.info('%s:%d\t%s', file_name, line_num, prev_prev_line)
continue
print(curr_line)
########
# main #
########
def main():
"""
main function processes only argument parsing
"""
parser = ArgumentParser(description='remove wrong sentence breaking marks after'
' period error eojeol')
parser.add_argument('--input', help='input file <default: stdin>', metavar='FILE')
parser.add_argument('--output', help='output file <default: stdout>', metavar='FILE')
parser.add_argument('--debug', help='enable debug', action='store_true')
args = parser.parse_args()
if args.input:
sys.stdin = open(args.input, 'rt')
if args.output:
sys.stdout = open(args.output, 'wt')
if args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
run()
if __name__ == '__main__':
main()
| [
"logging.basicConfig",
"argparse.ArgumentParser",
"khaiii.munjong.sejong_corpus.WORD_ID_PTN.match",
"re.match",
"os.path.basename",
"khaiii.munjong.sejong_corpus.Morph.parse",
"logging.info"
] | [((1470, 1514), 're.match', 're.match', (['""".+/SF \\\\+ [\\\\\'"’”]/SS$"""', 'cols[2]'], {}), '(\'.+/SF \\\\+ [\\\\\\\'"’”]/SS$\', cols[2])\n', (1478, 1514), False, 'import re\n'), ((2069, 2101), 'os.path.basename', 'os.path.basename', (['sys.stdin.name'], {}), '(sys.stdin.name)\n', (2085, 2101), False, 'import os\n'), ((2745, 2842), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""remove wrong sentence breaking marks after period error eojeol"""'}), "(description=\n 'remove wrong sentence breaking marks after period error eojeol')\n", (2759, 2842), False, 'from argparse import ArgumentParser\n'), ((1404, 1440), 're.match', 're.match', (['""".+/EF \\\\+ ./SF$"""', 'cols[2]'], {}), "('.+/EF \\\\+ ./SF$', cols[2])\n", (1412, 1440), False, 'import re\n'), ((1549, 1563), 'khaiii.munjong.sejong_corpus.Morph.parse', 'Morph.parse', (['_'], {}), '(_)\n', (1560, 1563), False, 'from khaiii.munjong.sejong_corpus import Morph, WORD_ID_PTN\n'), ((3321, 3361), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (3340, 3361), False, 'import logging\n'), ((3380, 3419), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (3399, 3419), False, 'import logging\n'), ((1321, 1347), 'khaiii.munjong.sejong_corpus.WORD_ID_PTN.match', 'WORD_ID_PTN.match', (['cols[0]'], {}), '(cols[0])\n', (1338, 1347), False, 'from khaiii.munjong.sejong_corpus import Morph, WORD_ID_PTN\n'), ((2516, 2578), 'logging.info', 'logging.info', (['"""%s:%d\t%s"""', 'file_name', 'line_num', 'prev_prev_line'], {}), "('%s:%d\\t%s', file_name, line_num, prev_prev_line)\n", (2528, 2578), False, 'import logging\n')] |
from google.cloud import storage
import json
client = storage.Client()
bucket = client.get_bucket('ibvdata')
blob = bucket.get_blob("experiments/2018-04-14-03-06-01/outputs/json/a0.05_r3.00_p0.05_t1.00")
check = json.loads(blob.download_as_string())
print(check)
| [
"google.cloud.storage.Client"
] | [((56, 72), 'google.cloud.storage.Client', 'storage.Client', ([], {}), '()\n', (70, 72), False, 'from google.cloud import storage\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
def residuals(fit, obs):
"""Calculate residuals for fit compared to observed data
:fit: list of discrete fit data points
:obs: list of observed data points
:returns: fit minus observed data points
"""
return fit-obs
def fit_stats(obs, fit):
"""
https://stackoverflow.com/questions/19189362/getting-the-r-squared-
value-using-curve-fit
"""
resid = fit - obs
ss_res = np.sum(resid**2)
ss_tot = np.sum((obs - np.mean(obs))**2)
r_squared = 1 - (ss_res / ss_tot)
return r_squared, ss_tot, ss_res, resid
def sum_squares_total(calc, obs):
"""
https://stackoverflow.com/questions/19189362/getting-the-r-squared-
value-using-curve-fit
"""
return np.sum((obs - np.mean(obs))**2)
def sum_squares_residuals(calc, obs):
"""
https://stackoverflow.com/questions/19189362/getting-the-r-squared-
value-using-curve-fit
"""
resids = residuals(calc, obs)
return np.sum(resids**2)
def rms_error(calc, obs):
"""Calculate root mean squared deviation
:calc: calculated data from fit
:obs: experimentally observed data
:returns: rmsd
"""
resids = residuals(calc, obs)
mean_sqrd = np.mean(resids**2)
return np.sqrt(mean_sqrd)
def r_squared(calc, obs):
"""
https://stackoverflow.com/questions/19189362/getting-the-r-squared-
value-using-curve-fit
"""
ss_res = sum_squares_residuals(calc, obs)
ss_tot = sum_squares_total(calc, obs)
return 1 - (ss_res / ss_tot)
| [
"numpy.sum",
"numpy.mean",
"numpy.sqrt"
] | [((486, 504), 'numpy.sum', 'np.sum', (['(resid ** 2)'], {}), '(resid ** 2)\n', (492, 504), True, 'import numpy as np\n'), ((1022, 1041), 'numpy.sum', 'np.sum', (['(resids ** 2)'], {}), '(resids ** 2)\n', (1028, 1041), True, 'import numpy as np\n'), ((1266, 1286), 'numpy.mean', 'np.mean', (['(resids ** 2)'], {}), '(resids ** 2)\n', (1273, 1286), True, 'import numpy as np\n'), ((1296, 1314), 'numpy.sqrt', 'np.sqrt', (['mean_sqrd'], {}), '(mean_sqrd)\n', (1303, 1314), True, 'import numpy as np\n'), ((530, 542), 'numpy.mean', 'np.mean', (['obs'], {}), '(obs)\n', (537, 542), True, 'import numpy as np\n'), ((805, 817), 'numpy.mean', 'np.mean', (['obs'], {}), '(obs)\n', (812, 817), True, 'import numpy as np\n')] |
"""
Test Production model
"""
import pytest
from mixer.backend.flask import mixer
from cubbie.model import Production, Capability
from cubbie.fixture import create_production_fixtures
def test_fixtures_created(productions):
"""The production fixture should have > 3 productions."""
assert Production.query.count() > 3
def test_delete_production_cascades_capabilities(session, productions, users):
cap = mixer.blend(Capability, user=mixer.SELECT, production=mixer.SELECT)
cap_prod = cap.production
session.add(cap)
session.commit()
cap_id = cap.id
assert Capability.query.get(cap_id) is not None
session.delete(cap_prod)
session.commit()
assert Capability.query.get(cap_id) is None
| [
"cubbie.model.Production.query.count",
"cubbie.model.Capability.query.get",
"mixer.backend.flask.mixer.blend"
] | [((419, 486), 'mixer.backend.flask.mixer.blend', 'mixer.blend', (['Capability'], {'user': 'mixer.SELECT', 'production': 'mixer.SELECT'}), '(Capability, user=mixer.SELECT, production=mixer.SELECT)\n', (430, 486), False, 'from mixer.backend.flask import mixer\n'), ((300, 324), 'cubbie.model.Production.query.count', 'Production.query.count', ([], {}), '()\n', (322, 324), False, 'from cubbie.model import Production, Capability\n'), ((591, 619), 'cubbie.model.Capability.query.get', 'Capability.query.get', (['cap_id'], {}), '(cap_id)\n', (611, 619), False, 'from cubbie.model import Production, Capability\n'), ((694, 722), 'cubbie.model.Capability.query.get', 'Capability.query.get', (['cap_id'], {}), '(cap_id)\n', (714, 722), False, 'from cubbie.model import Production, Capability\n')] |
#!/usr/bin/env python3
import os
import subprocess
import pytest
@pytest.mark.parametrize(
'testfile',
[
('foo.txt', 'ZnjDGxQAAAAAAAAABAAAAAAAAAA=\n'),
('loremipsum.txt', 'ZNqmJyqS9l79QjW7eNx0qjaDpMY=\n'),
('binary.bin', 'GgQWCSPUD9bQ/3xxO0VcOoxc4ZM=\n'),
]
)
def test_qxh(testfile):
basepath = os.path.dirname(os.path.realpath(__file__))
qxh_bin = os.path.realpath(os.path.join(basepath, '..', 'quickxorhash'))
res = subprocess.run(
[qxh_bin, os.path.join(basepath, testfile[0])],
capture_output=True,
text=True,
)
assert res.stdout == testfile[1]
| [
"os.path.realpath",
"pytest.mark.parametrize",
"os.path.join"
] | [((70, 273), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""testfile"""', '[(\'foo.txt\', \'ZnjDGxQAAAAAAAAABAAAAAAAAAA=\\n\'), (\'loremipsum.txt\',\n \'ZNqmJyqS9l79QjW7eNx0qjaDpMY=\\n\'), (\'binary.bin\',\n """GgQWCSPUD9bQ/3xxO0VcOoxc4ZM=\n""")]'], {}), '(\'testfile\', [(\'foo.txt\',\n \'ZnjDGxQAAAAAAAAABAAAAAAAAAA=\\n\'), (\'loremipsum.txt\',\n """ZNqmJyqS9l79QjW7eNx0qjaDpMY=\n"""), (\'binary.bin\',\n \'GgQWCSPUD9bQ/3xxO0VcOoxc4ZM=\\n\')])\n', (93, 273), False, 'import pytest\n'), ((355, 381), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (371, 381), False, 'import os\n'), ((414, 458), 'os.path.join', 'os.path.join', (['basepath', '""".."""', '"""quickxorhash"""'], {}), "(basepath, '..', 'quickxorhash')\n", (426, 458), False, 'import os\n'), ((505, 540), 'os.path.join', 'os.path.join', (['basepath', 'testfile[0]'], {}), '(basepath, testfile[0])\n', (517, 540), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File : load_cub_hypernym.py
# Author : <NAME>, <NAME>
# Email : <EMAIL>, <EMAIL>
# Date : 07.08.2019
# Last Modified Date: 20.11.2019
# Last Modified By : Chi Han
#
# This file is part of the VCML codebase
# Distributed under MIT license
from ..utils import load_questions
from utility.common import contains
def hypernym_questions(dataset, args, logger):
hyp_questions = load_questions.load_question_file(
dataset, 'hypernym_cub', args, logger
)
return hyp_questions
def raw_hypernym(dataset, args, logger):
logger('Identical hypernym suite')
with logger.levelup():
hyp_questions = hypernym_questions(dataset, args, logger)
hyp_full = load_questions.identical_suite(hyp_questions, logger)
return hyp_full
def hypernym_balanced_full(dataset, args, logger):
logger('Loading a full hypernym suite')
with logger.levelup():
hyp_suite = raw_hypernym(dataset, args, logger)
balanced = load_questions.balance_KwAns_suite(hyp_suite, logger)
return balanced
def hypernym_balanced_split(test_concepts, dataset, args, logger):
logger('Loading balaced hypernym suite splitted by test_concepts')
with logger.levelup():
hyp_suite = raw_hypernym(dataset, args, logger)
hyp_suite = load_questions.split_testConcepts(
hyp_suite, test_concepts, logger)
hyp_suite = load_questions.balance_KwAns_suite(hyp_suite, logger)
return hyp_suite
# Visual part
def raw_classify_hypernym(dataset, args, logger):
logger('Loading classify-hypernym suite')
with logger.levelup():
raw_suite = {
'train': load_questions.load_question_file(
dataset, 'train_cub_classification_hypernym', args, logger
),
'val': load_questions.load_question_file(
dataset, 'val_cub_classification_hypernym', args, logger
),
'test': load_questions.load_question_file(
dataset, 'test_cub_classification_hypernym', args, logger
),
}
return raw_suite
def raw_exist_hypernym(dataset, args, logger):
logger('Loading exist-hypernym suite')
with logger.levelup():
raw_suite = {
'train': load_questions.load_question_file(
dataset, 'train_cub_exist_hypernym', args, logger
),
'val': load_questions.load_question_file(
dataset, 'val_cub_exist_hypernym', args, logger
),
'test': load_questions.load_question_file(
dataset, 'test_cub_exist_hypernym', args, logger
),
}
return raw_suite
def balanced_exist(dataset, args, logger):
logger('Loading a balanced exist suite')
raw_suite = raw_exist_hypernym(dataset, args, logger)
balanced_suite = load_questions.balance_KwAns_suite(raw_suite, logger)
return balanced_suite
def biased_exist(dataset, test_concepts, args, logger):
logger('Biasing dataset ratio according to test concepts')
with logger.levelup():
raw_suite = raw_exist_hypernym(dataset, args, logger)
biased = {
'train': load_questions.fewer_bias(
raw_suite['train'], test_concepts, args.fewshot_ratio, logger
),
'val': raw_suite['val'].filter(
lambda q: not contains(q['keywords'], test_concepts)
),
'test': raw_suite['test'].filter(
lambda q: contains(q['keywords'], test_concepts)
)
}
balanced = load_questions.balance_KwAns_suite(biased, logger)
return balanced
def biased_classify(dataset, test_concepts, args, logger):
logger('Biasing classify dataset according to test concepts')
with logger.levelup():
raw_suite = raw_classify_hypernym(dataset, args, logger)
biased = {
'train': load_questions.fewer_bias_clsf(
raw_suite['train'], test_concepts, args.fewshot_ratio, logger
),
'val': load_questions.fewer_bias_clsf(
raw_suite['val'], test_concepts, 0, logger
),
'test': load_questions.fewer_bias_clsf(
raw_suite['test'], test_concepts, 0, logger,
reverse=True
)
}
return biased
| [
"utility.common.contains"
] | [((3573, 3611), 'utility.common.contains', 'contains', (["q['keywords']", 'test_concepts'], {}), "(q['keywords'], test_concepts)\n", (3581, 3611), False, 'from utility.common import contains\n'), ((3447, 3485), 'utility.common.contains', 'contains', (["q['keywords']", 'test_concepts'], {}), "(q['keywords'], test_concepts)\n", (3455, 3485), False, 'from utility.common import contains\n')] |
"""Helper functions and classes for users.
They should not be used in skorch directly.
"""
from collections import Sequence
from collections import namedtuple
from functools import partial
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
import torch
from skorch.cli import parse_args
from skorch.utils import _make_split
from skorch.utils import is_torch_data_type
from skorch.utils import to_tensor
class SliceDict(dict):
"""Wrapper for Python dict that makes it sliceable across values.
Use this if your input data is a dictionary and you have problems
with sklearn not being able to slice it. Wrap your dict with
SliceDict and it should usually work.
Note:
* SliceDict cannot be indexed by integers, if you want one row,
say row 3, use `[3:4]`.
* SliceDict accepts numpy arrays and torch tensors as values.
Examples
--------
>>> X = {'key0': val0, 'key1': val1}
>>> search = GridSearchCV(net, params, ...)
>>> search.fit(X, y) # raises error
>>> Xs = SliceDict(key0=val0, key1=val1) # or Xs = SliceDict(**X)
>>> search.fit(Xs, y) # works
"""
def __init__(self, **kwargs):
lengths = [value.shape[0] for value in kwargs.values()]
lengths_set = set(lengths)
if lengths_set and (len(lengths_set) != 1):
raise ValueError(
"Initialized with items of different lengths: {}"
"".format(', '.join(map(str, sorted(lengths_set)))))
if not lengths:
self._len = 0
else:
self._len = lengths[0]
super(SliceDict, self).__init__(**kwargs)
def __len__(self):
return self._len
def __getitem__(self, sl):
if isinstance(sl, int):
# Indexing with integers is not well-defined because that
# recudes the dimension of arrays by one, messing up
# lengths and shapes.
raise ValueError("SliceDict cannot be indexed by integers.")
if isinstance(sl, str):
return super(SliceDict, self).__getitem__(sl)
return SliceDict(**{k: v[sl] for k, v in self.items()})
def __setitem__(self, key, value):
if not isinstance(key, str):
raise TypeError("Key must be str, not {}.".format(type(key)))
length = value.shape[0]
if not self.keys():
self._len = length
if self._len != length:
raise ValueError(
"Cannot set array with shape[0] != {}"
"".format(self._len))
super(SliceDict, self).__setitem__(key, value)
def update(self, kwargs):
for key, value in kwargs.items():
self.__setitem__(key, value)
def __repr__(self):
out = super(SliceDict, self).__repr__()
return "SliceDict(**{})".format(out)
@property
def shape(self):
return (self._len,)
def copy(self):
return type(self)(**self)
def fromkeys(self, *args, **kwargs):
"""fromkeys method makes no sense with SliceDict and is thus not
supported."""
raise TypeError("SliceDict does not support fromkeys.")
def __eq__(self, other):
if self.keys() != other.keys():
return False
for key, val in self.items():
val_other = other[key]
# torch tensors
if is_torch_data_type(val):
if not is_torch_data_type(val_other):
return False
if not (val == val_other).all():
return False
continue
# numpy arrays
if isinstance(val, np.ndarray):
if not isinstance(val_other, np.ndarray):
return False
if not (val == val_other).all():
return False
continue
# rest
if val != val_other:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
# This class must be an instance of Sequence and have an ndim
# attribute because sklearn will test this.
class SliceDataset(Sequence):
# pylint: disable=anomalous-backslash-in-string
"""Helper class that wraps a torch dataset to make it work with
sklearn.
Sometimes, sklearn will touch the input data, e.g. when splitting
the data for a grid search. This will fail when the input data is
a torch dataset. To prevent this, use this wrapper class for your
dataset.
Note: This class will only return the X value by default (i.e. the
first value returned by indexing the original dataset). Sklearn,
and hence skorch, always require 2 values, X and y. Therefore, you
still need to provide the y data separately.
Note: This class behaves similarly to a PyTorch
:class:`~torch.utils.data.Subset` when it is indexed by a slice or
numpy array: It will return another ``SliceDataset`` that
references the subset instead of the actual values. Only when it
is indexed by an int does it return the actual values. The reason
for this is to avoid loading all data into memory when sklearn,
for instance, creates a train/validation split on the
dataset. Data will only be loaded in batches during the fit loop.
Examples
--------
>>> X = MyCustomDataset()
>>> search = GridSearchCV(net, params, ...)
>>> search.fit(X, y) # raises error
>>> ds = SliceDataset(X)
>>> search.fit(ds, y) # works
Parameters
----------
dataset : torch.utils.data.Dataset
A valid torch dataset.
idx : int (default=0)
Indicates which element of the dataset should be
returned. Typically, the dataset returns both X and y
values. SliceDataset can only return 1 value. If you want to
get X, choose idx=0 (default), if you want y, choose idx=1.
indices : list, np.ndarray, or None (default=None)
If you only want to return a subset of the dataset, indicate
which subset that is by passing this argument. Typically, this
can be left to be None, which returns all the data. See also
:class:`~torch.utils.data.Subset`.
"""
def __init__(self, dataset, idx=0, indices=None):
self.dataset = dataset
self.idx = idx
self.indices = indices
self.indices_ = (self.indices if self.indices is not None
else np.arange(len(self.dataset)))
self.ndim = 1
def __len__(self):
return len(self.indices_)
@property
def shape(self):
return (len(self),)
def transform(self, data):
"""Additional transformations on ``data``.
Note: If you use this in conjuction with PyTorch
:class:`~torch.utils.data.DataLoader`, the latter will call
the dataset for each row separately, which means that the
incoming ``data`` is a single rows.
"""
return data
def _select_item(self, Xn):
# Raise a custom error message when accessing out of
# bounds. However, this will only trigger as soon as this is
# indexed by an integer.
try:
return Xn[self.idx]
except IndexError:
name = self.__class__.__name__
msg = ("{} is trying to access element {} but there are only "
"{} elements.".format(name, self.idx, len(Xn)))
raise IndexError(msg)
def __getitem__(self, i):
if isinstance(i, (int, np.integer)):
Xn = self.dataset[self.indices_[i]]
Xi = self._select_item(Xn)
return self.transform(Xi)
if isinstance(i, slice):
return SliceDataset(self.dataset, idx=self.idx, indices=self.indices_[i])
if isinstance(i, np.ndarray):
if i.ndim != 1:
raise IndexError("SliceDataset only supports slicing with 1 "
"dimensional arrays, got {} dimensions instead."
"".format(i.ndim))
if i.dtype == np.bool:
i = np.flatnonzero(i)
return SliceDataset(self.dataset, idx=self.idx, indices=self.indices_[i])
def predefined_split(dataset):
"""Uses ``dataset`` for validiation in :class:`.NeuralNet`.
Examples
--------
>>> valid_ds = skorch.Dataset(X, y)
>>> net = NeuralNet(..., train_split=predefined_split(valid_ds))
Parameters
----------
dataset: torch Dataset
Validiation dataset
"""
return partial(_make_split, valid_ds=dataset)
class DataFrameTransformer(BaseEstimator, TransformerMixin):
"""Transform a DataFrame into a dict useful for working with skorch.
Transforms cardinal data to floats and categorical data to vectors
of ints so that they can be embedded.
Although skorch can deal with pandas DataFrames, the default
behavior is often not very useful. Use this transformer to
transform the DataFrame into a dict with all float columns
concatenated using the key "X" and all categorical values encoded
as integers, using their respective column names as keys.
Your module must have a matching signature for this to work. It
must accept an argument ``X`` for all cardinal
values. Additionally, for all categorical values, it must accept
an argument with the same name as the corresponding column (see
example below). If you need help with the required signature, use
the ``describe_signature`` method of this class and pass it your
data.
You can choose whether you want to treat int columns the same as
float columns (default) or as categorical values.
To one-hot encode categorical features, initialize their
corresponding embedding layers using the identity matrix.
Examples
--------
>>> df = pd.DataFrame({
... 'col_floats': np.linspace(0, 1, 12),
... 'col_ints': [11, 11, 10] * 4,
... 'col_cats': ['a', 'b', 'a'] * 4,
... })
>>> # cast to category dtype to later learn embeddings
>>> df['col_cats'] = df['col_cats'].astype('category')
>>> y = np.asarray([0, 1, 0] * 4)
>>> class MyModule(nn.Module):
... def __init__(self):
... super().__init__()
... self.reset_params()
>>> def reset_params(self):
... self.embedding = nn.Embedding(2, 10)
... self.linear = nn.Linear(2, 10)
... self.out = nn.Linear(20, 2)
... self.nonlin = nn.Softmax(dim=-1)
>>> def forward(self, X, col_cats):
... # "X" contains the values from col_floats and col_ints
... # "col_cats" contains the values from "col_cats"
... X_lin = self.linear(X)
... X_cat = self.embedding(col_cats)
... X_concat = torch.cat((X_lin, X_cat), dim=1)
... return self.nonlin(self.out(X_concat))
>>> net = NeuralNetClassifier(MyModule)
>>> pipe = Pipeline([
... ('transform', DataFrameTransformer()),
... ('net', net),
... ])
>>> pipe.fit(df, y)
Parameters
----------
treat_int_as_categorical : bool (default=False)
Whether to treat integers as categorical values or as cardinal
values, i.e. the same as floats.
float_dtype : numpy dtype or None (default=np.float32)
The dtype to cast the cardinal values to. If None, don't change
them.
int_dtype : numpy dtype or None (default=np.int64)
The dtype to cast the categorical values to. If None, don't
change them. If you do this, it can happen that the categorical
values will have different dtypes, reflecting the number of
unique categories.
Notes
-----
The value of X will always be 2-dimensional, even if it only
contains 1 column.
"""
import pandas as pd
def __init__(
self,
treat_int_as_categorical=False,
float_dtype=np.float32,
int_dtype=np.int64,
):
self.treat_int_as_categorical = treat_int_as_categorical
self.float_dtype = float_dtype
self.int_dtype = int_dtype
def _check_dtypes(self, df):
"""Perform a check on the DataFrame to detect wrong dtypes or keys.
Makes sure that there are no conflicts in key names.
If dtypes are found that cannot be dealt with, raises a
TypeError with a message indicating which ones caused trouble.
Raises
------
ValueError
If there already is a column named 'X'.
TypeError
If a wrong dtype is found.
"""
if 'X' in df:
raise ValueError(
"DataFrame contains a column named 'X', which clashes "
"with the name chosen for cardinal features; consider "
"renaming that column.")
wrong_dtypes = []
for col, dtype in zip(df, df.dtypes):
if isinstance(dtype, self.pd.api.types.CategoricalDtype):
continue
if np.issubdtype(dtype, np.integer):
continue
if np.issubdtype(dtype, np.floating):
continue
wrong_dtypes.append((col, dtype))
if not wrong_dtypes:
return
wrong_dtypes = sorted(wrong_dtypes, key=lambda tup: tup[0])
msg_dtypes = ", ".join(
"{} ({})".format(col, dtype) for col, dtype in wrong_dtypes)
msg = ("The following columns have dtypes that cannot be "
"interpreted as numerical dtypes: {}".format(msg_dtypes))
raise TypeError(msg)
# pylint: disable=unused-argument
def fit(self, df, y=None, **fit_params):
self._check_dtypes(df)
return self
def transform(self, df):
"""Transform DataFrame to become a dict that works well with skorch.
Parameters
----------
df : pd.DataFrame
Incoming DataFrame.
Returns
-------
X_dict: dict
Dictionary with all floats concatenated using the key "X"
and all categorical values encoded as integers, using their
respective column names as keys.
"""
self._check_dtypes(df)
X_dict = {}
Xf = [] # floats
for col, dtype in zip(df, df.dtypes):
X_col = df[col]
if isinstance(dtype, self.pd.api.types.CategoricalDtype):
x = X_col.cat.codes.values
if self.int_dtype is not None:
x = x.astype(self.int_dtype)
X_dict[col] = x
continue
if (
np.issubdtype(dtype, np.integer)
and self.treat_int_as_categorical
):
x = X_col.astype('category').cat.codes.values
if self.int_dtype is not None:
x = x.astype(self.int_dtype)
X_dict[col] = x
continue
Xf.append(X_col.values)
if not Xf:
return X_dict
X = np.stack(Xf, axis=1)
if self.float_dtype is not None:
X = X.astype(self.float_dtype)
X_dict['X'] = X
return X_dict
def describe_signature(self, df):
"""Describe the signature required for the given data.
Pass the DataFrame to receive a description of the signature
required for the module's forward method. The description
consists of three parts:
1. The names of the arguments that the forward method
needs.
2. The dtypes of the torch tensors passed to forward.
3. The number of input units that are required for the
corresponding argument. For the float parameter, this is just
the number of dimensions of the tensor. For categorical
parameters, it is the number of unique elements.
Returns
-------
signature : dict
Returns a dict with each key corresponding to one key
required for the forward method. The values are dictionaries
of two elements. The key "dtype" describes the torch dtype
of the resulting tensor, the key "input_units" describes the
required number of input units.
"""
X_dict = self.fit_transform(df)
signature = {}
X = X_dict.get('X')
if X is not None:
signature['X'] = dict(
dtype=to_tensor(X, device='cpu').dtype,
input_units=X.shape[1],
)
for key, val in X_dict.items():
if key == 'X':
continue
tensor = to_tensor(val, device='cpu')
nunique = len(torch.unique(tensor))
signature[key] = dict(
dtype=tensor.dtype,
input_units=nunique,
)
return signature
| [
"torch.unique",
"numpy.flatnonzero",
"numpy.stack",
"numpy.issubdtype",
"skorch.utils.to_tensor",
"functools.partial",
"skorch.utils.is_torch_data_type"
] | [((8580, 8618), 'functools.partial', 'partial', (['_make_split'], {'valid_ds': 'dataset'}), '(_make_split, valid_ds=dataset)\n', (8587, 8618), False, 'from functools import partial\n'), ((15120, 15140), 'numpy.stack', 'np.stack', (['Xf'], {'axis': '(1)'}), '(Xf, axis=1)\n', (15128, 15140), True, 'import numpy as np\n'), ((3407, 3430), 'skorch.utils.is_torch_data_type', 'is_torch_data_type', (['val'], {}), '(val)\n', (3425, 3430), False, 'from skorch.utils import is_torch_data_type\n'), ((13092, 13124), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.integer'], {}), '(dtype, np.integer)\n', (13105, 13124), True, 'import numpy as np\n'), ((13166, 13199), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.floating'], {}), '(dtype, np.floating)\n', (13179, 13199), True, 'import numpy as np\n'), ((16702, 16730), 'skorch.utils.to_tensor', 'to_tensor', (['val'], {'device': '"""cpu"""'}), "(val, device='cpu')\n", (16711, 16730), False, 'from skorch.utils import to_tensor\n'), ((8141, 8158), 'numpy.flatnonzero', 'np.flatnonzero', (['i'], {}), '(i)\n', (8155, 8158), True, 'import numpy as np\n'), ((14707, 14739), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.integer'], {}), '(dtype, np.integer)\n', (14720, 14739), True, 'import numpy as np\n'), ((16757, 16777), 'torch.unique', 'torch.unique', (['tensor'], {}), '(tensor)\n', (16769, 16777), False, 'import torch\n'), ((3455, 3484), 'skorch.utils.is_torch_data_type', 'is_torch_data_type', (['val_other'], {}), '(val_other)\n', (3473, 3484), False, 'from skorch.utils import is_torch_data_type\n'), ((16499, 16525), 'skorch.utils.to_tensor', 'to_tensor', (['X'], {'device': '"""cpu"""'}), "(X, device='cpu')\n", (16508, 16525), False, 'from skorch.utils import to_tensor\n')] |
from collections import OrderedDict
import pandas as pd
from tests import project_test, assert_output
@project_test
def test_csv_to_close(fn):
tickers = ['A', 'B', 'C']
dates = ['2017-09-22', '2017-09-25', '2017-09-26', '2017-09-27', '2017-09-28']
fn_inputs = {
'csv_filepath': 'prices_2017_09_22_2017-09-28.csv',
'field_names': ['ticker', 'date', 'open', 'high', 'low', 'close', 'volume', 'adj_close', 'adj_volume']}
fn_correct_outputs = OrderedDict([
(
'close',
pd.DataFrame(
[
[152.48000000, 149.19000000, 59.35000000],
[151.11000000, 145.06000000, 60.29000000],
[152.42000000, 145.21000000, 57.74000000],
[154.34000000, 147.02000000, 58.41000000],
[153.68000000, 147.19000000, 56.76000000]],
dates, tickers))])
assert_output(fn, fn_inputs, fn_correct_outputs)
| [
"pandas.DataFrame",
"tests.assert_output"
] | [((919, 967), 'tests.assert_output', 'assert_output', (['fn', 'fn_inputs', 'fn_correct_outputs'], {}), '(fn, fn_inputs, fn_correct_outputs)\n', (932, 967), False, 'from tests import project_test, assert_output\n'), ((531, 695), 'pandas.DataFrame', 'pd.DataFrame', (['[[152.48, 149.19, 59.35], [151.11, 145.06, 60.29], [152.42, 145.21, 57.74],\n [154.34, 147.02, 58.41], [153.68, 147.19, 56.76]]', 'dates', 'tickers'], {}), '([[152.48, 149.19, 59.35], [151.11, 145.06, 60.29], [152.42, \n 145.21, 57.74], [154.34, 147.02, 58.41], [153.68, 147.19, 56.76]],\n dates, tickers)\n', (543, 695), True, 'import pandas as pd\n')] |
"""
Starting with 1 and spiralling anticlockwise in the following way,
a square spiral with side length 7 is formed.
37 36 35 34 33 32 31
38 17 16 15 14 13 30
39 18 5 4 3 12 29
40 19 6 1 2 11 28
41 20 7 8 9 10 27
42 21 22 23 24 25 26
43 44 45 46 47 48 49
It is interesting to note that the odd squares lie along
the bottom right diagonal,
but what is more interesting is that 8 out of the 13 numbers
lying along both diagonals are prime; that is, a ratio of 8/13 ≈ 62%.
If one complete new layer is wrapped around the spiral above,
a square spiral with side length 9 will be formed.
If this process is continued,
what is the side length of the square spiral
for which the ratio of primes along both diagonals first falls below 10%?
"""""
import time
def isprime(n):
if n < 2:
return False
if n % 2 == 0 or n % 3 == 0 or n % 5 == 0:
return n == 2 or n == 3 or n == 5
return all (n % k != 0 for k in range (7, int(n**.5) + 1, 2))
if __name__ == '__main__':
start = time.process_time()
count = 3
i = 3
while count / (2*i - 1) >= 0.1:
i += 2
count += [isprime(x) for x in [i**2 - i + 1,
i**2 - 2*i + 2, i**2 - 3 * i + 3]].count(True)
print(i)
print('Runtime is', time.process_time() - start) | [
"time.process_time"
] | [((1030, 1049), 'time.process_time', 'time.process_time', ([], {}), '()\n', (1047, 1049), False, 'import time\n'), ((1278, 1297), 'time.process_time', 'time.process_time', ([], {}), '()\n', (1295, 1297), False, 'import time\n')] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains base save widget for data items
"""
from __future__ import print_function, division, absolute_import
import os
import logging
import traceback
from Qt.QtCore import Signal, QSize
from Qt.QtWidgets import QSizePolicy, QFrame, QDialogButtonBox, QFileDialog
from tpDcc import dcc
from tpDcc.managers import resources
from tpDcc.libs.resources.core import theme
from tpDcc.libs.python import decorators
from tpDcc.libs.qt.core import base, qtutils
from tpDcc.libs.qt.widgets import layouts, label, buttons, formwidget, messagebox, snapshot
from tpDcc.tools.datalibrary.core import utils
from tpDcc.tools.datalibrary.widgets import sequence
LOGGER = logging.getLogger('tpDcc-libs-datalibrary')
class _MetaSaveWidget(type):
def __call__(self, *args, **kwargs):
as_class = kwargs.get('as_class', False)
if dcc.client().is_maya():
from tpDcc.tools.datalibrary.dccs.maya.widgets import save
if as_class:
return save.MayaSaveWidget
else:
return type.__call__(save.MayaSaveWidget, *args, **kwargs)
else:
if as_class:
return BaseSaveWidget
else:
return type.__call__(BaseSaveWidget, *args, **kwargs)
@theme.mixin
class BaseSaveWidget(base.BaseWidget, object):
cancelled = Signal()
saved = Signal()
ENABLE_THUMBNAIL_CAPTURE = True
def __init__(self, item_view, client=None, *args, **kwargs):
self._item_view = item_view
self._client = client
self._form_widget = None
self._sequence_widget = None
super(BaseSaveWidget, self).__init__(*args, **kwargs)
self.setObjectName('LibrarySaveWidget')
self._create_sequence_widget()
self.update_thumbnail_size()
self.set_item_view(item_view)
# ============================================================================================================
# OVERRIDES
# ============================================================================================================
def get_main_layout(self):
return layouts.VerticalLayout(spacing=4, margins=(0, 0, 0, 0))
def ui(self):
super(BaseSaveWidget, self).ui()
self.setWindowTitle('Save Item')
title_frame = QFrame(self)
title_frame_layout = layouts.VerticalLayout(spacing=0, margins=(0, 0, 0, 0))
title_frame.setLayout(title_frame_layout)
title_widget = QFrame(self)
title_layout = layouts.VerticalLayout(spacing=0, margins=(0, 0, 0, 0))
title_widget.setLayout(title_layout)
title_buttons_layout = layouts.HorizontalLayout(spacing=0, margins=(0, 0, 0, 0))
title_layout.addLayout(title_buttons_layout)
title_icon = label.BaseLabel(parent=self)
title_button = label.BaseLabel(self.item().menu_name(), parent=self)
title_button.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred)
self._menu_button = buttons.BaseButton(parent=self)
self._menu_button.setIcon(resources.icon('menu_dots'))
self._menu_button.setVisible(False) # Hide by default
title_buttons_layout.addWidget(title_icon)
title_buttons_layout.addSpacing(5)
title_buttons_layout.addWidget(title_button)
title_buttons_layout.addWidget(self._menu_button)
title_frame_layout.addWidget(title_widget)
item_icon_name = self.item().icon() or 'tpDcc'
item_icon = resources.icon(item_icon_name)
if not item_icon:
item_icon = resources.icon('tpDcc')
title_icon.setPixmap(item_icon.pixmap(QSize(20, 20)))
thumbnail_layout = layouts.HorizontalLayout(spacing=0, margins=(0, 0, 0, 0))
self._thumbnail_frame = QFrame(self)
thumbnail_frame_layout = layouts.VerticalLayout(spacing=0, margins=(0, 2, 0, 2))
self._thumbnail_frame.setLayout(thumbnail_frame_layout)
thumbnail_layout.addWidget(self._thumbnail_frame)
self._options_frame = QFrame(self)
options_frame_layout = layouts.VerticalLayout(spacing=0, margins=(4, 2, 4, 2))
self._options_frame.setLayout(options_frame_layout)
preview_buttons_frame = QFrame(self)
self._preview_buttons_layout = layouts.HorizontalLayout(spacing=0, margins=(4, 2, 4, 2))
preview_buttons_frame.setLayout(self._preview_buttons_layout)
self._save_button = buttons.BaseButton('Save', parent=self)
self._save_button.setIcon(resources.icon('save'))
self._cancel_button = buttons.BaseButton('Cancel', parent=self)
self._cancel_button.setIcon(resources.icon('cancel'))
self._preview_buttons_layout.addStretch()
self._preview_buttons_layout.addWidget(self._save_button)
self._preview_buttons_layout.addStretch()
self._preview_buttons_layout.addWidget(self._cancel_button)
self._preview_buttons_layout.addStretch()
self.main_layout.addWidget(title_frame)
self.main_layout.addLayout(thumbnail_layout)
self.main_layout.addWidget(self._options_frame)
self.main_layout.addWidget(preview_buttons_frame)
def setup_signals(self):
self._menu_button.clicked.connect(self._on_show_menu)
self._save_button.clicked.connect(self._on_save)
self._cancel_button.clicked.connect(self._on_cancel)
def resizeEvent(self, event):
"""
Overrides base QWidget resizeEvent function
:param event: QResizeEvent
"""
self.update_thumbnail_size()
def close(self):
"""
Overrides base QWidget close function to disable script job when its is done
"""
if self._form_widget:
self._form_widget.save_persistent_values()
super(BaseSaveWidget, self).close()
# ============================================================================================================
# BASE
# ============================================================================================================
def folder_path(self):
"""
Returns the folder path
:return: str
"""
return self.form_widget().value('folder')
def set_folder_path(self, path):
"""
Sets the destination folder path
:param path: str
"""
self.form_widget().set_value('folder', path)
def set_thumbnail_path(self, path):
"""
Sets the path to the thumbnail image or the image sequence directory
:param path: str
"""
file_name, extension = os.path.splitext(path)
target = utils.temp_path('thumbnail{}'.format(extension))
utils.copy_path(path, target, force=True)
self._sequence_widget.set_path(target)
def library_window(self):
"""
Returns library widget window for the item
:return: LibraryWindow
"""
return self.item_view().library_window()
def set_library_window(self, library_window):
"""
Sets the library widget for the item
:param library_window: LibraryWindow
"""
self.item_view().set_library_window(library_window)
def form_widget(self):
"""
Returns the form widget instance
:return: FormWidget
"""
return self._form_widget
def item(self):
"""
Returns current item
:return:
"""
return self.item_view().item
def item_view(self):
"""
Returns the current item view
:return: LibraryItem
"""
return self._item_view
def set_item_view(self, item_view):
"""
Sets the base item to be created
:param item_view: LibraryItem
"""
self._item_view = item_view
if os.path.exists(item_view.image_sequence_path()):
self.set_thumbnail_path(item_view.image_sequence_path())
elif not item_view.is_default_thumbnail_path():
self.set_thumbnail_path(item_view.thumbnail_path())
schema = self.item().save_schema()
if schema:
form_widget = formwidget.FormWidget(self)
form_widget.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
form_widget.set_schema(schema)
form_widget.set_validator(self.item().save_validator)
# item_name = os.path.basename(item.path())
# form_widget.set_values({'name': item_name})
self._options_frame.layout().addWidget(form_widget)
form_widget.validate()
self._form_widget = form_widget
else:
self._options_frame.setVisible(False)
def update_thumbnail_size(self):
"""
Updates the thumbnail button to teh size of the widget
"""
width = self.width() - 10
if width > 250:
width = 250
size = QSize(width, width)
if self._sequence_widget:
self._sequence_widget.setIconSize(size)
self._sequence_widget.setMaximumSize(size)
self._thumbnail_frame.setMaximumSize(size)
def show_thumbnail_capture_dialog(self):
"""
Asks the user if they would like to capture a thumbnail
:return: int
"""
buttons = QDialogButtonBox.Yes | QDialogButtonBox.Ignore | QDialogButtonBox.Cancel
parent = self.item_view().library_window()
btn = messagebox.MessageBox.question(
parent, 'Create a thumbnail', 'Would you like to capture a thumbnail?', buttons=buttons)
if btn == QDialogButtonBox.Yes:
self.thumbnail_capture()
return btn
def show_by_frame_dialog(self):
"""
Show the by frame dialog
"""
help_text = """
To help speed up the playblast you can set the "by frame" to another greater than 1.
For example if the "by frame" is set to 2 it will playblast every second frame
"""
result = None
options = self.form_widget().values()
by_frame = options.get('byFrame', 1)
start_frame, end_frame = options.get('frameRange', [None, None])
duration = end_frame - start_frame if start_frame is not None and end_frame is not None else 1
if duration > 100 and by_frame == 1:
buttons = QDialogButtonBox.Ok | QDialogButtonBox.Cancel
result = messagebox.MessageBox.question(
self.library_window(), title='Tip', text=help_text, buttons=buttons, enable_dont_show_checkbox=True
)
return result
def thumbnail_capture(self, show=False):
"""
Captures a playblast and saves it to the temporal thumbnail path
:param show: bool
"""
options = self.form_widget().values()
start_frame, end_frame = options.get('frameRange', [None, None])
step = options.get('byFrame', 1)
if not qtutils.is_control_modifier():
result = self.show_by_frame_dialog()
if result == QDialogButtonBox.Cancel:
return
path = utils.temp_path('sequence', 'thumbnail.jpg')
try:
snapshot.SnapshotWindow(path=path, on_save=self._on_thumbnail_captured)
# thumbnail.ThumbnailCaptureDialog.thumbnail_capture(
# path=self._temp_path,
# show=show,
# start_frame=start_frame,
# end_frame=end_frame,
# step=step,
# clear_cache=False,
# captured=self._on_thumbnail_captured
# )
except Exception as e:
messagebox.MessageBox.critical(self.library_window(), 'Error while capturing thumbnail', str(e))
LOGGER.error(traceback.format_exc())
def save(self, path, thumbnail):
"""
Saves the item with the given objects to the given disk location path
:param path: str
:param thumbnail: str
"""
kwargs = self.form_widget().values()
sequence_path = self._sequence_widget.dirname()
item_view = self.item_view()
item_view.item_view.path = path
library_window = self.library_window()
valid_save = item_view.safe_save(thumbnail=thumbnail, sequence_path=sequence_path, **kwargs)
if valid_save:
if library_window:
library_window.refresh()
library_window.select_folder_path(path)
self.saved.emit()
self.close()
# ============================================================================================================
# INTERNAL
# ============================================================================================================
def _create_sequence_widget(self):
"""
Internal function that creates a sequence widget to replace the static thumbnail widget
"""
self._sequence_widget = sequence.ImageSequenceWidget(self)
self._sequence_widget.setObjectName('thumbnailButton')
self._thumbnail_frame.layout().insertWidget(0, self._sequence_widget)
self._sequence_widget.clicked.connect(self._on_thumbnail_capture)
self._sequence_widget.setToolTip(
'Click to capture a thumbnail from the current model panel.\n'
'CTRL + Click to show the capture window for better framing.')
camera_icon = resources.get('icons', self.theme().style(), 'camera.png')
expand_icon = resources.get('icons', self.theme().style(), 'full_screen.png')
folder_icon = resources.get('icons', self.theme().style(), 'folder.png')
self._sequence_widget.addAction(
camera_icon, 'Capture new image', 'Capture new image', self._on_thumbnail_capture)
self._sequence_widget.addAction(
expand_icon, 'Show Capture window', 'Show Capture window', self._on_show_capture_window)
self._sequence_widget.addAction(
folder_icon, 'Load image from disk', 'Load image from disk', self._on_show_browse_image_dialog)
self._sequence_widget.setIcon(resources.icon('tpdcc'))
# ============================================================================================================
# CALLBACKS
# ============================================================================================================
def _on_show_menu(self):
"""
Internal callback function that is called when menu button is clicked byu the user
:return: QAction
"""
pass
def _on_save(self):
if not self.library_window():
return False
library = self.library_window().library()
if not library:
return False
try:
self.form_widget().validate()
if self.form_widget().has_errors():
raise Exception('\n'.join(self.form_widget().errors()))
has_frames = self._sequence_widget.has_frames()
if not has_frames and self.ENABLE_THUMBNAIL_CAPTURE:
button = self.show_thumbnail_capture_dialog()
if button == QDialogButtonBox.Cancel:
return False
name = self.form_widget().value('name')
folder = self.form_widget().value('folder')
comment = self.form_widget().value('comment') or ''
extension = self.item().extension()
if extension and not name.endswith(extension):
name = '{}{}'.format(name, extension)
path = folder + '/' + name
thumbnail = self._sequence_widget.first_frame()
save_item = library.get(path, only_extension=True)
save_function = save_item.functionality().get('save')
if not save_function:
LOGGER.warning('Item "{}" does not supports save operation'.format(save_item))
return False
library_path = self.item().library.identifier
if not library_path or not os.path.isfile(library_path):
LOGGER.warning('Impossible to save data "{}" because its library does not exists: "{}"'.format(
self.item(), library_path))
return
values = self.form_widget().values()
try:
if self._client:
success, message, dependencies = self._client().save_data(
library_path=library_path, data_path=path, values=values)
if not success:
messagebox.MessageBox.critical(self.library_window(), 'Error while saving', str(message))
LOGGER.error(str(message))
return False
else:
dependencies = save_function(**values)
except Exception as exc:
messagebox.MessageBox.critical(self.library_window(), 'Error while saving', str(exc))
LOGGER.error(traceback.format_exc())
return False
except Exception as exc:
messagebox.MessageBox.critical(self.library_window(), 'Error while saving', str(exc))
LOGGER.error(traceback.format_exc())
raise
new_item_path = save_item.format_identifier()
if not new_item_path or not os.path.isfile(new_item_path):
LOGGER.warning('Although saving process for item "{}" was completed, '
'it seems no new data has been generated!'.format(save_item))
self.saved.emit()
return False
save_item.library.add(new_item_path)
# # TODO: Instead of creating a local version, we will use a git system to upload our data to our project repo
# # TODO: Should we save new versions of dependencies too?
# valid = save_item.create_version(comment=comment)
# if not valid:
# LOGGER.warning('Impossible to store new version for data "{}"'.format(save_item))
if thumbnail and os.path.isfile(thumbnail):
save_item.store_thumbnail(thumbnail)
self.library_window().sync()
save_item.update_dependencies(dependencies=dependencies)
self.saved.emit()
return True
def _on_cancel(self):
self.cancelled.emit()
self.close()
def _on_thumbnail_capture(self):
"""
Internal callback function that is called when a thumbnail capture must be done
"""
self.thumbnail_capture(show=False)
def _on_thumbnail_captured(self, captured_path):
"""
Internal callback function that is called when thumbnail is captured
:param captured_path: str
"""
thumb_path = os.path.dirname(captured_path)
self.set_thumbnail_path(thumb_path)
def _on_show_capture_window(self):
"""
Internal callback function that shows the capture window for framing
"""
self.thumbnail_capture(show=True)
def _on_show_browse_image_dialog(self):
"""
Internal callback function that shows a file dialog for choosing an image from disk
"""
file_dialog = QFileDialog(self, caption='Open Image', filter='Image Files (*.png *.jpg)')
file_dialog.fileSelected.connect(self.set_thumbnail_path)
file_dialog.exec_()
@decorators.add_metaclass(_MetaSaveWidget)
class SaveWidget(object):
pass
| [
"logging.getLogger",
"tpDcc.libs.qt.widgets.snapshot.SnapshotWindow",
"tpDcc.dcc.client",
"tpDcc.libs.qt.widgets.layouts.HorizontalLayout",
"tpDcc.libs.qt.widgets.label.BaseLabel",
"tpDcc.libs.qt.widgets.formwidget.FormWidget",
"tpDcc.tools.datalibrary.core.utils.copy_path",
"Qt.QtWidgets.QFileDialog"... | [((724, 767), 'logging.getLogger', 'logging.getLogger', (['"""tpDcc-libs-datalibrary"""'], {}), "('tpDcc-libs-datalibrary')\n", (741, 767), False, 'import logging\n'), ((19417, 19458), 'tpDcc.libs.python.decorators.add_metaclass', 'decorators.add_metaclass', (['_MetaSaveWidget'], {}), '(_MetaSaveWidget)\n', (19441, 19458), False, 'from tpDcc.libs.python import decorators\n'), ((1401, 1409), 'Qt.QtCore.Signal', 'Signal', ([], {}), '()\n', (1407, 1409), False, 'from Qt.QtCore import Signal, QSize\n'), ((1422, 1430), 'Qt.QtCore.Signal', 'Signal', ([], {}), '()\n', (1428, 1430), False, 'from Qt.QtCore import Signal, QSize\n'), ((2192, 2247), 'tpDcc.libs.qt.widgets.layouts.VerticalLayout', 'layouts.VerticalLayout', ([], {'spacing': '(4)', 'margins': '(0, 0, 0, 0)'}), '(spacing=4, margins=(0, 0, 0, 0))\n', (2214, 2247), False, 'from tpDcc.libs.qt.widgets import layouts, label, buttons, formwidget, messagebox, snapshot\n'), ((2373, 2385), 'Qt.QtWidgets.QFrame', 'QFrame', (['self'], {}), '(self)\n', (2379, 2385), False, 'from Qt.QtWidgets import QSizePolicy, QFrame, QDialogButtonBox, QFileDialog\n'), ((2415, 2470), 'tpDcc.libs.qt.widgets.layouts.VerticalLayout', 'layouts.VerticalLayout', ([], {'spacing': '(0)', 'margins': '(0, 0, 0, 0)'}), '(spacing=0, margins=(0, 0, 0, 0))\n', (2437, 2470), False, 'from tpDcc.libs.qt.widgets import layouts, label, buttons, formwidget, messagebox, snapshot\n'), ((2544, 2556), 'Qt.QtWidgets.QFrame', 'QFrame', (['self'], {}), '(self)\n', (2550, 2556), False, 'from Qt.QtWidgets import QSizePolicy, QFrame, QDialogButtonBox, QFileDialog\n'), ((2580, 2635), 'tpDcc.libs.qt.widgets.layouts.VerticalLayout', 'layouts.VerticalLayout', ([], {'spacing': '(0)', 'margins': '(0, 0, 0, 0)'}), '(spacing=0, margins=(0, 0, 0, 0))\n', (2602, 2635), False, 'from tpDcc.libs.qt.widgets import layouts, label, buttons, formwidget, messagebox, snapshot\n'), ((2712, 2769), 'tpDcc.libs.qt.widgets.layouts.HorizontalLayout', 'layouts.HorizontalLayout', ([], {'spacing': '(0)', 'margins': '(0, 0, 0, 0)'}), '(spacing=0, margins=(0, 0, 0, 0))\n', (2736, 2769), False, 'from tpDcc.libs.qt.widgets import layouts, label, buttons, formwidget, messagebox, snapshot\n'), ((2844, 2872), 'tpDcc.libs.qt.widgets.label.BaseLabel', 'label.BaseLabel', ([], {'parent': 'self'}), '(parent=self)\n', (2859, 2872), False, 'from tpDcc.libs.qt.widgets import layouts, label, buttons, formwidget, messagebox, snapshot\n'), ((3059, 3090), 'tpDcc.libs.qt.widgets.buttons.BaseButton', 'buttons.BaseButton', ([], {'parent': 'self'}), '(parent=self)\n', (3077, 3090), False, 'from tpDcc.libs.qt.widgets import layouts, label, buttons, formwidget, messagebox, snapshot\n'), ((3552, 3582), 'tpDcc.managers.resources.icon', 'resources.icon', (['item_icon_name'], {}), '(item_icon_name)\n', (3566, 3582), False, 'from tpDcc.managers import resources\n'), ((3747, 3804), 'tpDcc.libs.qt.widgets.layouts.HorizontalLayout', 'layouts.HorizontalLayout', ([], {'spacing': '(0)', 'margins': '(0, 0, 0, 0)'}), '(spacing=0, margins=(0, 0, 0, 0))\n', (3771, 3804), False, 'from tpDcc.libs.qt.widgets import layouts, label, buttons, formwidget, messagebox, snapshot\n'), ((3837, 3849), 'Qt.QtWidgets.QFrame', 'QFrame', (['self'], {}), '(self)\n', (3843, 3849), False, 'from Qt.QtWidgets import QSizePolicy, QFrame, QDialogButtonBox, QFileDialog\n'), ((3883, 3938), 'tpDcc.libs.qt.widgets.layouts.VerticalLayout', 'layouts.VerticalLayout', ([], {'spacing': '(0)', 'margins': '(0, 2, 0, 2)'}), '(spacing=0, margins=(0, 2, 0, 2))\n', (3905, 3938), False, 'from tpDcc.libs.qt.widgets import layouts, label, buttons, formwidget, messagebox, snapshot\n'), ((4092, 4104), 'Qt.QtWidgets.QFrame', 'QFrame', (['self'], {}), '(self)\n', (4098, 4104), False, 'from Qt.QtWidgets import QSizePolicy, QFrame, QDialogButtonBox, QFileDialog\n'), ((4136, 4191), 'tpDcc.libs.qt.widgets.layouts.VerticalLayout', 'layouts.VerticalLayout', ([], {'spacing': '(0)', 'margins': '(4, 2, 4, 2)'}), '(spacing=0, margins=(4, 2, 4, 2))\n', (4158, 4191), False, 'from tpDcc.libs.qt.widgets import layouts, label, buttons, formwidget, messagebox, snapshot\n'), ((4285, 4297), 'Qt.QtWidgets.QFrame', 'QFrame', (['self'], {}), '(self)\n', (4291, 4297), False, 'from Qt.QtWidgets import QSizePolicy, QFrame, QDialogButtonBox, QFileDialog\n'), ((4337, 4394), 'tpDcc.libs.qt.widgets.layouts.HorizontalLayout', 'layouts.HorizontalLayout', ([], {'spacing': '(0)', 'margins': '(4, 2, 4, 2)'}), '(spacing=0, margins=(4, 2, 4, 2))\n', (4361, 4394), False, 'from tpDcc.libs.qt.widgets import layouts, label, buttons, formwidget, messagebox, snapshot\n'), ((4493, 4532), 'tpDcc.libs.qt.widgets.buttons.BaseButton', 'buttons.BaseButton', (['"""Save"""'], {'parent': 'self'}), "('Save', parent=self)\n", (4511, 4532), False, 'from tpDcc.libs.qt.widgets import layouts, label, buttons, formwidget, messagebox, snapshot\n'), ((4621, 4662), 'tpDcc.libs.qt.widgets.buttons.BaseButton', 'buttons.BaseButton', (['"""Cancel"""'], {'parent': 'self'}), "('Cancel', parent=self)\n", (4639, 4662), False, 'from tpDcc.libs.qt.widgets import layouts, label, buttons, formwidget, messagebox, snapshot\n'), ((6660, 6682), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (6676, 6682), False, 'import os\n'), ((6757, 6798), 'tpDcc.tools.datalibrary.core.utils.copy_path', 'utils.copy_path', (['path', 'target'], {'force': '(True)'}), '(path, target, force=True)\n', (6772, 6798), False, 'from tpDcc.tools.datalibrary.core import utils\n'), ((8978, 8997), 'Qt.QtCore.QSize', 'QSize', (['width', 'width'], {}), '(width, width)\n', (8983, 8997), False, 'from Qt.QtCore import Signal, QSize\n'), ((9502, 9625), 'tpDcc.libs.qt.widgets.messagebox.MessageBox.question', 'messagebox.MessageBox.question', (['parent', '"""Create a thumbnail"""', '"""Would you like to capture a thumbnail?"""'], {'buttons': 'buttons'}), "(parent, 'Create a thumbnail',\n 'Would you like to capture a thumbnail?', buttons=buttons)\n", (9532, 9625), False, 'from tpDcc.libs.qt.widgets import layouts, label, buttons, formwidget, messagebox, snapshot\n'), ((11168, 11212), 'tpDcc.tools.datalibrary.core.utils.temp_path', 'utils.temp_path', (['"""sequence"""', '"""thumbnail.jpg"""'], {}), "('sequence', 'thumbnail.jpg')\n", (11183, 11212), False, 'from tpDcc.tools.datalibrary.core import utils\n'), ((13017, 13051), 'tpDcc.tools.datalibrary.widgets.sequence.ImageSequenceWidget', 'sequence.ImageSequenceWidget', (['self'], {}), '(self)\n', (13045, 13051), False, 'from tpDcc.tools.datalibrary.widgets import sequence\n'), ((18801, 18831), 'os.path.dirname', 'os.path.dirname', (['captured_path'], {}), '(captured_path)\n', (18816, 18831), False, 'import os\n'), ((19244, 19319), 'Qt.QtWidgets.QFileDialog', 'QFileDialog', (['self'], {'caption': '"""Open Image"""', 'filter': '"""Image Files (*.png *.jpg)"""'}), "(self, caption='Open Image', filter='Image Files (*.png *.jpg)')\n", (19255, 19319), False, 'from Qt.QtWidgets import QSizePolicy, QFrame, QDialogButtonBox, QFileDialog\n'), ((3125, 3152), 'tpDcc.managers.resources.icon', 'resources.icon', (['"""menu_dots"""'], {}), "('menu_dots')\n", (3139, 3152), False, 'from tpDcc.managers import resources\n'), ((3633, 3656), 'tpDcc.managers.resources.icon', 'resources.icon', (['"""tpDcc"""'], {}), "('tpDcc')\n", (3647, 3656), False, 'from tpDcc.managers import resources\n'), ((4567, 4589), 'tpDcc.managers.resources.icon', 'resources.icon', (['"""save"""'], {}), "('save')\n", (4581, 4589), False, 'from tpDcc.managers import resources\n'), ((4699, 4723), 'tpDcc.managers.resources.icon', 'resources.icon', (['"""cancel"""'], {}), "('cancel')\n", (4713, 4723), False, 'from tpDcc.managers import resources\n'), ((8213, 8240), 'tpDcc.libs.qt.widgets.formwidget.FormWidget', 'formwidget.FormWidget', (['self'], {}), '(self)\n', (8234, 8240), False, 'from tpDcc.libs.qt.widgets import layouts, label, buttons, formwidget, messagebox, snapshot\n'), ((10999, 11028), 'tpDcc.libs.qt.core.qtutils.is_control_modifier', 'qtutils.is_control_modifier', ([], {}), '()\n', (11026, 11028), False, 'from tpDcc.libs.qt.core import base, qtutils\n'), ((11239, 11310), 'tpDcc.libs.qt.widgets.snapshot.SnapshotWindow', 'snapshot.SnapshotWindow', ([], {'path': 'path', 'on_save': 'self._on_thumbnail_captured'}), '(path=path, on_save=self._on_thumbnail_captured)\n', (11262, 11310), False, 'from tpDcc.libs.qt.widgets import layouts, label, buttons, formwidget, messagebox, snapshot\n'), ((14175, 14198), 'tpDcc.managers.resources.icon', 'resources.icon', (['"""tpdcc"""'], {}), "('tpdcc')\n", (14189, 14198), False, 'from tpDcc.managers import resources\n'), ((18090, 18115), 'os.path.isfile', 'os.path.isfile', (['thumbnail'], {}), '(thumbnail)\n', (18104, 18115), False, 'import os\n'), ((901, 913), 'tpDcc.dcc.client', 'dcc.client', ([], {}), '()\n', (911, 913), False, 'from tpDcc import dcc\n'), ((3703, 3716), 'Qt.QtCore.QSize', 'QSize', (['(20)', '(20)'], {}), '(20, 20)\n', (3708, 3716), False, 'from Qt.QtCore import Signal, QSize\n'), ((17393, 17422), 'os.path.isfile', 'os.path.isfile', (['new_item_path'], {}), '(new_item_path)\n', (17407, 17422), False, 'import os\n'), ((11830, 11852), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (11850, 11852), False, 'import traceback\n'), ((16089, 16117), 'os.path.isfile', 'os.path.isfile', (['library_path'], {}), '(library_path)\n', (16103, 16117), False, 'import os\n'), ((17260, 17282), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (17280, 17282), False, 'import traceback\n'), ((17050, 17072), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (17070, 17072), False, 'import traceback\n')] |
from flask import Flask
app = Flask(__name__)
if __name__ == "__main__":
from api import *
app.run(host ='0.0.0.0', port = 5000, debug = False) | [
"flask.Flask"
] | [((30, 45), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (35, 45), False, 'from flask import Flask\n')] |
#############################################################################
#
# VFRAME
# MIT License
# Copyright (c) 2019 <NAME> and VFRAME
# https://vframe.io
#
#############################################################################
import click
from vframe.settings import app_cfg
ext_choices = ['jpg', 'png']
@click.command()
@click.option('-i', '--input', 'opt_input', required=True,
help='Input file CSV')
@click.option('-o', '--output', 'opt_output',
help='Input file CSV')
@click.option('--label', 'opt_labels_from_to', required=True, type=(str,str),
multiple=True, help='Label from, to')
@click.pass_context
def cli(ctx, opt_input, opt_output, opt_labels_from_to):
"""Relabel label enum in annotation CSV"""
import pandas as pd
log = app_cfg.LOG
opt_output = opt_output if opt_output else opt_input
df_meta = pd.read_csv(opt_input)
for label_from, label_to in opt_labels_from_to:
df_meta.loc[(df_meta.label_enum == label_from), 'label_enum'] = label_to
# write csv
df_meta.to_csv(opt_output, index=False)
| [
"click.option",
"click.command",
"pandas.read_csv"
] | [((325, 340), 'click.command', 'click.command', ([], {}), '()\n', (338, 340), False, 'import click\n'), ((342, 427), 'click.option', 'click.option', (['"""-i"""', '"""--input"""', '"""opt_input"""'], {'required': '(True)', 'help': '"""Input file CSV"""'}), "('-i', '--input', 'opt_input', required=True, help='Input file CSV'\n )\n", (354, 427), False, 'import click\n'), ((426, 493), 'click.option', 'click.option', (['"""-o"""', '"""--output"""', '"""opt_output"""'], {'help': '"""Input file CSV"""'}), "('-o', '--output', 'opt_output', help='Input file CSV')\n", (438, 493), False, 'import click\n'), ((497, 617), 'click.option', 'click.option', (['"""--label"""', '"""opt_labels_from_to"""'], {'required': '(True)', 'type': '(str, str)', 'multiple': '(True)', 'help': '"""Label from, to"""'}), "('--label', 'opt_labels_from_to', required=True, type=(str, str\n ), multiple=True, help='Label from, to')\n", (509, 617), False, 'import click\n'), ((848, 870), 'pandas.read_csv', 'pd.read_csv', (['opt_input'], {}), '(opt_input)\n', (859, 870), True, 'import pandas as pd\n')] |