code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
import requests
from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE
from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar
from tkinter import messagebox
import json
import re
#from tkscrolledframe import ScrolledFrame
from requests.models import MissingSchema
import functions
import objects
api_url = ""
# Cria os widgets de botões para a visão read only
def draw_interface_applied(root, frame0, frame1, frame2, frame3, usr_tokens, usr_token_key, api_base, api_base_key):
for widget in frame1.winfo_children():
widget.destroy()
for widget in frame2.winfo_children():
widget.destroy()
for widget in frame3.winfo_children():
widget.destroy()
# cada dicionário tem 2 funções as keys são o que se escolhe no menu e os valores são o que a vem no JSON (fiz um de - para com os dados no help do Nagios
available_objects = objects.available_objects
api_object = StringVar()
# Definido api_object default copiando a key de available_objects
api_object.set("objects/hoststatus")
tit_api_object_menu = Message(frame1, text="API Objects", aspect=400)
tit_api_object_menu.place(x=1, y=10)
api_object_menu = OptionMenu(frame1, api_object, *available_objects)
api_object_menu.place(x=130, y=5)
for widget in frame2.winfo_children():
widget.destroy()
tit_api_filter = Message(frame2, text="Search Parameters", aspect=600)
tit_api_filter.place(x=1, y=0)
api_filter = Entry(frame2)
api_filter.place(x=130, y=0, width=400, height=20)
#api_filter.insert(END, "name=lk:apple")
titulo_show_api = Message(frame2, text="Get API", aspect=400)
titulo_show_api.place(x=1, y=40)
show_api = Message(frame2, aspect=2050, anchor=W, relief=SUNKEN)
show_api.place(x=130, y=45, width=820, height=40)
text_area_json = scrolledtext.ScrolledText(frame2, width=111, height=12)
text_area_json.place(x=50, y=100)
text_area_json.insert(END, "JSON Contents")
for widget in frame3.winfo_children():
widget.destroy()
def button_build_API():
global api_url
api_url = functions.build_API(api_object, api_filter, usr_token_key, usr_tokens, api_base_key, api_base)
functions.update_api(api_url, frame2)
button_build_API = Button(frame3, text="Build API", command=button_build_API)
button_build_API.place(x=50, y=10, width=150, height=30)
def button_get_json():
api_selected_object = str(api_object.get())
type_oper = "applied"
api_method = "get"
try:
list_json = functions.get_json(type_oper, api_method, api_url, available_objects, api_selected_object)
except ConnectionError:
messagebox.showerror("Conexão", "API Inválida!")
except MissingSchema:
messagebox.showerror("Conexão", "Monte a API primeiro!")
except Exception as e:
print(e)
messagebox.showerror("Conexão", "Erro desconhecido!")
else:
# Antes de jogar na tela o JSON ele testa o tamanho, se for muito grande pergunta se quer salvar em arquivo direto
if int(list_json[0]) > 50:
if messagebox.askyesno("Resposta muito grande", "Gostaria de salvar em arquivo?"):
functions.save_file(list_json[1], "json")
else:
text_area_json.delete(1.0, END)
text_area_json.insert(END, list_json[1])
button_get_jason = Button(frame3, text="Get JSON", command=button_get_json)
button_get_jason.place(x=210, y=10, width=150, height=30)
def button_clear_text():
global api_url
api_url = ""
text_area_json.delete(1.0, END)
# text_area_json.insert(END, resposta)
button_clear = Button(frame3, text="Clear", command=button_clear_text)
button_clear.place(x=370, y=10, width=150, height=30)
def button_save_json():
functions.save_file(text_area_json.get(1.0, END), "json")
button_save = Button(frame3, text="Save JSON", command=button_save_json)
button_save.place(x=50, y=60, width=150, height=30)
def button_load_json():
text_area_json.delete(1.0, END)
text_area_json.insert(END, json.loads(functions.load_file()))
button_load = Button(frame3, text="Load JSON", command=button_load_json)
button_load.place(x=210, y=60, width=150, height=30)
def button_convert_json():
functions.convert_json()
button_convert = Button(frame3, text="JSON -> CSV", command=button_convert_json)
button_convert.place(x=370, y=60, width=150, height=30)
def button_quit_program():
functions.quit_program(root)
button_quit = Button(frame3, text="Quit", command=button_quit_program)
button_quit.place(x=800, y=10, width=150, height=30)
# Desenha os widgets de botões para a visão de config
def draw_interface_config(root, frame0, frame1, frame2, frame3, usr_tokens, usr_token_key, api_base, api_base_key):
for widget in frame1.winfo_children():
widget.destroy()
for widget in frame2.winfo_children():
widget.destroy()
for widget in frame3.winfo_children():
widget.destroy()
# Listas com as opções de configuração
available_objects_config = objects.available_objects_config
options_available_config_host = objects.options_available_config_host
options_available_config_service = objects.options_available_config_service
options_available_config_hostgroup = objects.options_available_config_hostgroup
options_available_config_servicegroup = objects.options_available_config_servicegroup
options_available_config_command = objects.options_available_config_command
options_available_config_contact = objects.options_available_config_contact
options_available_config_contactgroup = objects.options_available_config_contactgroup
options_available_config_timeperiod = objects.options_available_config_timeperiod
api_object = StringVar()
# Definido api_object default copiando a key de available_objects
api_object.set("config/host")
tit_api_object_menu_config = Message(frame1, text="API Objects", aspect=400)
tit_api_object_menu_config.place(x=1, y=36)
api_object_menu_config = OptionMenu(frame1, api_object, *available_objects_config)
api_object_menu_config.place(x=128, y=36)
def draw_buttons_config_get():
#global api_filter
for widget in frame2.winfo_children():
widget.destroy()
for widget in frame3.winfo_children():
widget.destroy()
tit_api_filter = Message(frame2, text="Search Parameters", aspect=600)
tit_api_filter.place(x=1, y=0)
api_filter = Entry(frame2)
api_filter.place(x=130, y=0, width=400, height=20)
#api_filter.insert(END, "name=lk:apple")
titulo_show_api = Message(frame2, text="Get API", aspect=400)
titulo_show_api.place(x=1, y=40)
show_api = Message(frame2, aspect=2050, anchor=W, relief=SUNKEN)
show_api.place(x=130, y=45, width=820, height=40)
text_area_json = scrolledtext.ScrolledText(frame2, width=111, height=12)
text_area_json.place(x=50, y=100)
text_area_json.insert(END, "JSON Contents")
def button_build_API():
global api_url
api_url = functions.build_API(api_object, api_filter, usr_token_key, usr_tokens, api_base_key, api_base)
functions.update_api(api_url, frame2)
button_build_API = Button(frame3, text="Build API", command=button_build_API)
button_build_API.place(x=50, y=10, width=150, height=30)
def button_get_json_config():
api_selected_object = str(api_object.get())
type_oper="config"
try:
list_json = functions.get_json(type_oper, api_methods[int(api_method_radiobutton.get())], api_url, available_objects_config, api_selected_object)
except ConnectionError:
messagebox.showerror("Conexão", "API Inválida!")
except MissingSchema:
messagebox.showerror("Conexão", "Monte a API primeiro!")
except Exception as e:
print(e)
messagebox.showerror("Conexão", "Erro desconhecido!")
else:
# Antes de jogar na tela o JSON ele testa o tamanho, se for muito grande pergunta se quer salvar em arquivo direto
if int(list_json[0]) > 50:
if messagebox.askyesno("Resposta muito grande", "Gostaria de salvar em arquivo?"):
functions.save_file(list_json[1], "json")
else:
text_area_json.delete(1.0, END)
text_area_json.insert(END, list_json[1])
button_get_jason_config = Button(frame3, text="Get JSON Config", command=button_get_json_config)
button_get_jason_config.place(x=210, y=10, width=150, height=30)
def button_clear_text():
global api_url
api_url = ""
text_area_json.delete(1.0, END)
# text_area_json.insert(END, resposta)
button_clear = Button(frame3, text="Clear", command=button_clear_text)
button_clear.place(x=370, y=10, width=150, height=30)
def button_save_json():
functions.save_file(text_area_json.get(1.0, END), "json")
button_save = Button(frame3, text="Save JSON", command=button_save_json)
button_save.place(x=50, y=60, width=150, height=30)
def button_load_json():
text_area_json.delete(1.0, END)
text_area_json.insert(END, json.loads(functions.load_file()))
button_load = Button(frame3, text="Load JSON", command=button_load_json)
button_load.place(x=210, y=60, width=150, height=30)
def button_convert_json():
functions.convert_json()
button_convert = Button(frame3, text="JSON -> CSV", command=button_convert_json)
button_convert.place(x=370, y=60, width=150, height=30)
def button_quit_program():
functions.quit_program(root)
button_quit = Button(frame3, text="Quit", command=button_quit_program)
button_quit.place(x=800, y=10, width=150, height=30)
def draw_buttons_config_post():
for widget in frame2.winfo_children():
widget.destroy()
for widget in frame3.winfo_children():
widget.destroy()
titulo_show_api = Message(frame2, text="Post API", aspect=400)
titulo_show_api.place(x=0, y=0)
show_api = Message(frame2, text="", aspect=2050, relief=SUNKEN)
show_api.place(x=130, y=0, width=820, height=60)
option_available = None
if api_object.get() == "config/host":
option_available = options_available_config_host["post"]
if api_object.get() == "config/hostgroup":
option_available = options_available_config_hostgroup["post"]
if api_object.get() == "config/service":
option_available = options_available_config_service["post"]
if api_object.get() == "config/servicegroup":
option_available = options_available_config_servicegroup["post"]
if api_object.get() == "config/command":
option_available = options_available_config_command["post"]
if api_object.get() == "config/contact":
option_available = options_available_config_contact["post"]
if api_object.get() == "config/contactgroup":
option_available = options_available_config_contactgroup["post"]
if api_object.get() == "config/timeperiod":
option_available = options_available_config_timeperiod["post"]
y_axis = 80
given_values = list()
for i in option_available:
tit_option = Message(frame2, text="{}".format(i), aspect=600)
tit_option.place(x=1, y=y_axis)
option_value = Entry(frame2)
option_value.place(x=170, y=y_axis, width=360, height=20)
given_values.append(option_value)
y_axis += 23
apply_value = IntVar()
apply_value_check = Checkbutton(frame2, text="Apply?", variable=apply_value)
apply_value_check.place(x=165, y=y_axis)
text_area_json = scrolledtext.ScrolledText(frame2, width=50, height=12)
text_area_json.place(x=550, y=80)
text_area_json.insert(END, "JSON Contents")
def button_build_API_config():
functions.update_api_config(functions.build_API_config(api_object, usr_token_key, usr_tokens, api_base_key, api_base, apply_value, option_available, given_values), frame2)
button_build_API_config = Button(frame3, text="Build API", command=button_build_API_config)
button_build_API_config.place(x=50, y=10, width=150, height=30)
def button_post_json_config():
api_selected_object = str(api_object.get())
type_oper="config"
api_url_list = functions.build_API_config(api_object, usr_token_key, usr_tokens, api_base_key, api_base, apply_value, option_available, given_values)
response_json = functions.post_json(type_oper, api_methods[int(api_method_radiobutton.get())], api_url_list, available_objects_config, api_selected_object)
# Antes de jogar na tela o JSON ele testa o tamanho, se for muito grande pergunta se quer salvar em arquivo direto
if len(response_json) > 50:
if messagebox.askyesno("Resposta muito grande", "Gostaria de salvar em arquivo?"):
functions.save_file(response_json, "json")
else:
text_area_json.delete(1.0, END)
text_area_json.insert(END, response_json)
button_post_jason_config = Button(frame3, text="Post JSON Config", command=button_post_json_config)
button_post_jason_config.place(x=210, y=10, width=150, height=30)
def button_clear_text():
text_area_json.delete(1.0, END)
# text_area_json.insert(END, resposta)
button_clear = Button(frame3, text="Clear Response", command=button_clear_text)
button_clear.place(x=370, y=10, width=150, height=30)
def button_quit_program():
functions.quit_program(root)
button_quit = Button(frame3, text="Quit", command=button_quit_program)
button_quit.place(x=800, y=10, width=150, height=30)
def draw_buttons_config_put():
for widget in frame2.winfo_children():
widget.destroy()
for widget in frame3.winfo_children():
widget.destroy()
titulo_show_api = Message(frame2, text="Put API", aspect=400)
titulo_show_api.place(x=0, y=0)
show_api = Message(frame2, text="", aspect=2050, anchor=W, relief=SUNKEN)
show_api.place(x=130, y=0, width=820, height=60)
option_available = None
if api_object.get() == "config/host":
option_available = options_available_config_host["put"]
if api_object.get() == "config/hostgroup":
option_available = options_available_config_hostgroup["put"]
if api_object.get() == "config/service":
option_available = options_available_config_service["put"]
if api_object.get() == "config/servicegroup":
option_available = options_available_config_servicegroup["put"]
if api_object.get() == "config/command":
option_available = options_available_config_command["put"]
if api_object.get() == "config/contact":
option_available = options_available_config_contact["put"]
if api_object.get() == "config/contactgroup":
option_available = options_available_config_contactgroup["put"]
if api_object.get() == "config/timeperiod":
option_available = options_available_config_timeperiod["put"]
y_axis = 80
given_values = list()
for i in option_available:
tit_option = Message(frame2, text="{}".format(i), aspect=600)
tit_option.place(x=1, y=y_axis)
option_value = Entry(frame2)
option_value.place(x=170, y=y_axis, width=360, height=20)
given_values.append(option_value)
y_axis += 23
apply_value = IntVar()
apply_value_check = Checkbutton(frame2, text="Apply?", variable=apply_value)
apply_value_check.place(x=165, y=y_axis)
text_area_json = scrolledtext.ScrolledText(frame2, width=50, height=12)
text_area_json.place(x=550, y=80)
text_area_json.insert(END, "JSON Contents")
def button_build_API_config():
functions.update_api_config(functions.build_API_config(api_object, usr_token_key, usr_tokens, api_base_key, api_base, apply_value, option_available, given_values), frame2)
button_build_API_config = Button(frame3, text="Build API", command=button_build_API_config)
button_build_API_config.place(x=50, y=10, width=150, height=30)
def button_put_json_config():
api_selected_object = str(api_object.get())
type_oper="config"
#print("API METHOD: {}".format(int(api_method_radiobutton.get())))api_object, api_config_values, usr_token_key, usr_tokens, api_base_key, api_base, apply
api_url_list = functions.build_API_config(api_object, usr_token_key, usr_tokens, api_base_key, api_base, apply_value, option_available, given_values)
response_json = functions.put_json(type_oper, api_methods[int(api_method_radiobutton.get())], api_url_list, available_objects_config, api_selected_object)
# Antes de jogar na tela o JSON ele testa o tamanho, se for muito grande pergunta se quer salvar em arquivo direto
if len(response_json) > 50:
if messagebox.askyesno("Resposta muito grande", "Gostaria de salvar em arquivo?"):
functions.save_file(response_json, "json")
else:
text_area_json.delete(1.0, END)
text_area_json.insert(END, response_json)
button_put_jason_config = Button(frame3, text="Put JSON Config", command=button_put_json_config)
button_put_jason_config.place(x=210, y=10, width=150, height=30)
def button_clear_text():
text_area_json.delete(1.0, END)
# text_area_json.insert(END, resposta)
button_clear = Button(frame3, text="Clear Response", command=button_clear_text)
button_clear.place(x=370, y=10, width=150, height=30)
def button_quit_program():
functions.quit_program(root)
button_quit = Button(frame3, text="Quit", command=button_quit_program)
button_quit.place(x=800, y=10, width=150, height=30)
def draw_buttons_config_delete():
for widget in frame2.winfo_children():
widget.destroy()
for widget in frame3.winfo_children():
widget.destroy()
titulo_show_api = Message(frame2, text="Delete API", aspect=400)
titulo_show_api.place(x=0, y=0)
show_api = Message(frame2, text="", aspect=2050, anchor=W, relief=SUNKEN)
show_api.place(x=130, y=0, width=820, height=60)
option_available = None
if api_object.get() == "config/host":
option_available = options_available_config_host["delete"]
if api_object.get() == "config/hostgroup":
option_available = options_available_config_hostgroup["delete"]
if api_object.get() == "config/service":
option_available = options_available_config_service["delete"]
if api_object.get() == "config/servicegroup":
option_available = options_available_config_servicegroup["delete"]
if api_object.get() == "config/command":
option_available = options_available_config_command["delete"]
if api_object.get() == "config/contact":
option_available = options_available_config_contact["delete"]
if api_object.get() == "config/contactgroup":
option_available = options_available_config_contactgroup["delete"]
if api_object.get() == "config/timeperiod":
option_available = options_available_config_timeperiod["delete"]
y_axis = 80
given_values = list()
for i in option_available:
tit_option = Message(frame2, text="{}".format(i), aspect=600)
tit_option.place(x=1, y=y_axis)
option_value = Entry(frame2)
option_value.place(x=170, y=y_axis, width=360, height=20)
given_values.append(option_value)
y_axis += 23
apply_value = IntVar()
apply_value_check = Checkbutton(frame2, text="Apply?", variable=apply_value)
apply_value_check.place(x=165, y=y_axis)
text_area_json = scrolledtext.ScrolledText(frame2, width=50, height=12)
text_area_json.place(x=550, y=80)
text_area_json.insert(END, "JSON Contents")
def button_build_API_config():
functions.update_api_config(functions.build_API_config(api_object, usr_token_key, usr_tokens, api_base_key, api_base, apply_value, option_available, given_values), frame2)
button_build_API_config = Button(frame3, text="Build API", command=button_build_API_config)
button_build_API_config.place(x=50, y=10, width=150, height=30)
def button_delete_json_config():
api_selected_object = str(api_object.get())
type_oper="config"
#print("API METHOD: {}".format(int(api_method_radiobutton.get())))api_object, api_config_values, usr_token_key, usr_tokens, api_base_key, api_base, apply
api_url_list = functions.build_API_config(api_object, usr_token_key, usr_tokens, api_base_key, api_base, apply_value, option_available, given_values)
response_json = functions.delete_json(type_oper, api_methods[int(api_method_radiobutton.get())], api_url_list, available_objects_config, api_selected_object)
# Antes de jogar na tela o JSON ele testa o tamanho, se for muito grande pergunta se quer salvar em arquivo direto
if len(response_json) > 50:
if messagebox.askyesno("Resposta muito grande", "Gostaria de salvar em arquivo?"):
functions.save_file(response_json, "json")
else:
text_area_json.delete(1.0, END)
text_area_json.insert(END, response_json)
button_put_jason_config = Button(frame3, text="Delete JSON Config", command=button_delete_json_config)
button_put_jason_config.place(x=210, y=10, width=150, height=30)
def button_clear_text():
text_area_json.delete(1.0, END)
# text_area_json.insert(END, resposta)
button_clear = Button(frame3, text="Clear Response", command=button_clear_text)
button_clear.place(x=370, y=10, width=150, height=30)
def button_quit_program():
functions.quit_program(root)
button_quit = Button(frame3, text="Quit", command=button_quit_program)
button_quit.place(x=800, y=10, width=150, height=30)
# Visualização principal
api_method_radiobutton = IntVar()
api_method_radiobutton.set(0)
tit_api_method = Message(frame1, text="API Method", aspect=400)
tit_api_method.place(x=1, y=10)
Radiobutton(frame1, text="Get", variable = api_method_radiobutton, command=draw_buttons_config_get, value = 0).place(x=130, y=10)
Radiobutton(frame1, text="Post", variable = api_method_radiobutton, command=draw_buttons_config_post, value = 1).place(x=230, y=10)
Radiobutton(frame1, text="Put", variable = api_method_radiobutton, command=draw_buttons_config_put, value = 2).place(x=330, y=10)
Radiobutton(frame1, text="Delete", variable = api_method_radiobutton, command=draw_buttons_config_delete, value = 3).place(x=430, y=10)
api_methods = ["get", "post", "put", "delete"]
# Desenhando os botoes do get como padrão
draw_buttons_config_get()
def button_quit_program():
functions.quit_program(root)
button_quit = Button(frame3, text="Quit", command=button_quit_program)
button_quit.place(x=800, y=10, width=150, height=30)
# Desenha os widgets de botões para a visão de system
def draw_interface_system(root, frame0, frame1, frame2, frame3, usr_tokens, usr_token_key, api_base, api_base_key):
for widget in frame1.winfo_children():
widget.destroy()
for widget in frame2.winfo_children():
widget.destroy()
for widget in frame3.winfo_children():
widget.destroy()
# Listas com as opções de systemuração
available_objects_system = objects.available_objects_system
options_available_system_status = objects.options_available_system_status
options_available_system_statusdetail = objects.options_available_system_statusdetail
options_available_system_info = objects.options_available_system_info
options_available_system_command = objects.options_available_system_command
options_available_system_applyconfig = objects.options_available_system_applyconfig
options_available_system_importconfig = objects.options_available_system_importconfig
options_available_system_corecommand = objects.options_available_system_corecommand
options_available_system_scheduleddowntime = objects.options_available_system_scheduleddowntime
options_available_system_user = objects.options_available_system_user
options_available_system_authserver = objects.options_available_system_authserver
api_object = StringVar()
# Definido api_object default copiando a key de available_objects
api_object.set("system/status")
tit_api_object_menu_system = Message(frame1, text="API Objects", aspect=400)
tit_api_object_menu_system.place(x=1, y=36)
api_object_menu_system = OptionMenu(frame1, api_object, *available_objects_system)
api_object_menu_system.place(x=128, y=36)
def draw_buttons_system_get():
#global api_filter
for widget in frame2.winfo_children():
widget.destroy()
for widget in frame3.winfo_children():
widget.destroy()
tit_api_filter = Message(frame2, text="Search Parameters", aspect=600)
tit_api_filter.place(x=1, y=0)
api_filter = Entry(frame2)
api_filter.place(x=130, y=0, width=400, height=20)
#api_filter.insert(END, "name=lk:apple")
titulo_show_api = Message(frame2, text="Get API", aspect=400)
titulo_show_api.place(x=1, y=40)
show_api = Message(frame2, aspect=2050, anchor=W, relief=SUNKEN)
show_api.place(x=130, y=45, width=820, height=40)
text_area_json = scrolledtext.ScrolledText(frame2, width=111, height=12)
text_area_json.place(x=50, y=100)
text_area_json.insert(END, "JSON Contents")
def button_build_API():
global api_url
api_url = functions.build_API(api_object, api_filter, usr_token_key, usr_tokens, api_base_key, api_base)
functions.update_api(api_url, frame2)
button_build_API = Button(frame3, text="Build API", command=button_build_API)
button_build_API.place(x=50, y=10, width=150, height=30)
def button_get_json_system():
api_selected_object = str(api_object.get())
type_oper="system"
try:
list_json = functions.get_json_system(type_oper, api_methods[int(api_method_radiobutton.get())], api_url, available_objects_system, api_selected_object)
except ConnectionError:
messagebox.showerror("Conexão", "API Inválida!")
except MissingSchema:
messagebox.showerror("Conexão", "Monte a API primeiro!")
except Exception as e:
print(e)
messagebox.showerror("Conexão", "Erro desconhecido!")
else:
text_area_json.delete(1.0, END)
text_area_json.insert(END, list_json)
button_get_jason_system = Button(frame3, text="Get JSON system", command=button_get_json_system)
button_get_jason_system.place(x=210, y=10, width=150, height=30)
def button_clear_text():
global api_url
api_url = ""
text_area_json.delete(1.0, END)
# text_area_json.insert(END, resposta)
button_clear = Button(frame3, text="Clear", command=button_clear_text)
button_clear.place(x=370, y=10, width=150, height=30)
def button_save_json():
functions.save_file(text_area_json.get(1.0, END), "json")
button_save = Button(frame3, text="Save JSON", command=button_save_json)
button_save.place(x=50, y=60, width=150, height=30)
def button_load_json():
text_area_json.delete(1.0, END)
text_area_json.insert(END, json.loads(functions.load_file()))
button_load = Button(frame3, text="Load JSON", command=button_load_json)
button_load.place(x=210, y=60, width=150, height=30)
def button_convert_json():
functions.convert_json()
button_convert = Button(frame3, text="JSON -> CSV", command=button_convert_json)
button_convert.place(x=370, y=60, width=150, height=30)
def button_quit_program():
functions.quit_program(root)
button_quit = Button(frame3, text="Quit", command=button_quit_program)
button_quit.place(x=800, y=10, width=150, height=30)
def draw_buttons_system_post():
for widget in frame2.winfo_children():
widget.destroy()
for widget in frame3.winfo_children():
widget.destroy()
titulo_show_api = Message(frame2, text="Post API", aspect=400)
titulo_show_api.place(x=0, y=0)
show_api = Message(frame2, text="", aspect=2050, relief=SUNKEN)
show_api.place(x=130, y=0, width=820, height=60)
option_available = None
if api_object.get() == "system/status":
option_available = options_available_system_status["post"]
if api_object.get() == "system/statusdetails":
option_available = options_available_system_statusdetail["post"]
if api_object.get() == "system/info":
option_available = options_available_system_info["post"]
if api_object.get() == "system/command":
option_available = options_available_system_command["post"]
if api_object.get() == "system/applyconfig":
option_available = options_available_system_applyconfig["post"]
if api_object.get() == "system/importconfig":
option_available = options_available_system_importconfig["post"]
if api_object.get() == "system/corecommand":
option_available = options_available_system_corecommand["post"]
if api_object.get() == "system/scheduleddowntime":
option_available = options_available_system_scheduleddowntime["post"]
if api_object.get() == "system/user":
option_available = options_available_system_user["post"]
if api_object.get() == "system/authserver":
option_available = options_available_system_authserver["post"]
y_axis = 80
given_values = list()
for i in option_available:
tit_option = Message(frame2, text="{}".format(i), aspect=600)
tit_option.place(x=1, y=y_axis)
option_value = Entry(frame2)
option_value.place(x=170, y=y_axis, width=360, height=20)
given_values.append(option_value)
y_axis += 23
apply_value = IntVar()
apply_value_check = Checkbutton(frame2, text="Apply?", variable=apply_value)
apply_value_check.place(x=165, y=y_axis)
text_area_json = scrolledtext.ScrolledText(frame2, width=50, height=12)
text_area_json.place(x=550, y=80)
text_area_json.insert(END, "JSON Contents")
def button_build_API_system():
functions.update_api_system(functions.build_API_system(api_object, usr_token_key, usr_tokens, api_base_key, api_base, apply_value, option_available, given_values), frame2)
button_build_API_system = Button(frame3, text="Build API", command=button_build_API_system)
button_build_API_system.place(x=50, y=10, width=150, height=30)
def button_post_json_system():
api_selected_object = str(api_object.get())
type_oper="system"
api_url_list = functions.build_API_system(api_object, usr_token_key, usr_tokens, api_base_key, api_base, apply_value, option_available, given_values)
response_json = functions.post_json(type_oper, api_methods[int(api_method_radiobutton.get())], api_url_list, available_objects_system, api_selected_object)
# Antes de jogar na tela o JSON ele testa o tamanho, se for muito grande pergunta se quer salvar em arquivo direto
if len(response_json) > 50:
if messagebox.askyesno("Resposta muito grande", "Gostaria de salvar em arquivo?"):
functions.save_file(response_json, "json")
else:
text_area_json.delete(1.0, END)
text_area_json.insert(END, response_json)
button_post_jason_system = Button(frame3, text="Post JSON system", command=button_post_json_system)
button_post_jason_system.place(x=210, y=10, width=150, height=30)
def button_clear_text():
text_area_json.delete(1.0, END)
# text_area_json.insert(END, resposta)
button_clear = Button(frame3, text="Clear Response", command=button_clear_text)
button_clear.place(x=370, y=10, width=150, height=30)
def button_quit_program():
functions.quit_program(root)
button_quit = Button(frame3, text="Quit", command=button_quit_program)
button_quit.place(x=800, y=10, width=150, height=30)
def draw_buttons_system_put():
for widget in frame2.winfo_children():
widget.destroy()
for widget in frame3.winfo_children():
widget.destroy()
titulo_show_api = Message(frame2, text="Put API", aspect=400)
titulo_show_api.place(x=0, y=0)
show_api = Message(frame2, text="", aspect=2050, anchor=W, relief=SUNKEN)
show_api.place(x=130, y=0, width=820, height=60)
option_available = None
if api_object.get() == "system/status":
option_available = options_available_system_status["put"]
if api_object.get() == "system/statusdetails":
option_available = options_available_system_statusdetail["put"]
if api_object.get() == "system/info":
option_available = options_available_system_info["put"]
if api_object.get() == "system/command":
option_available = options_available_system_command["put"]
if api_object.get() == "system/applyconfig":
option_available = options_available_system_applyconfig["put"]
if api_object.get() == "system/importconfig":
option_available = options_available_system_importconfig["put"]
if api_object.get() == "system/corecommand":
option_available = options_available_system_corecommand["put"]
if api_object.get() == "system/scheduleddowntime":
option_available = options_available_system_scheduleddowntime["put"]
if api_object.get() == "system/user":
option_available = options_available_system_user["put"]
if api_object.get() == "system/authserver":
option_available = options_available_system_authserver["put"]
y_axis = 80
given_values = list()
for i in option_available:
tit_option = Message(frame2, text="{}".format(i), aspect=600)
tit_option.place(x=1, y=y_axis)
option_value = Entry(frame2)
option_value.place(x=170, y=y_axis, width=360, height=20)
given_values.append(option_value)
y_axis += 23
apply_value = IntVar()
apply_value_check = Checkbutton(frame2, text="Apply?", variable=apply_value)
apply_value_check.place(x=165, y=y_axis)
text_area_json = scrolledtext.ScrolledText(frame2, width=50, height=12)
text_area_json.place(x=550, y=80)
text_area_json.insert(END, "JSON Contents")
def button_build_API_system():
functions.update_api_system(functions.build_API_system(api_object, usr_token_key, usr_tokens, api_base_key, api_base, apply_value, option_available, given_values), frame2)
button_build_API_system = Button(frame3, text="Build API", command=button_build_API_system)
button_build_API_system.place(x=50, y=10, width=150, height=30)
def button_put_json_system():
api_selected_object = str(api_object.get())
type_oper="system"
#print("API METHOD: {}".format(int(api_method_radiobutton.get())))api_object, api_system_values, usr_token_key, usr_tokens, api_base_key, api_base, apply
api_url_list = functions.build_API_system(api_object, usr_token_key, usr_tokens, api_base_key, api_base, apply_value, option_available, given_values)
response_json = functions.put_json(type_oper, api_methods[int(api_method_radiobutton.get())], api_url_list, available_objects_system, api_selected_object)
# Antes de jogar na tela o JSON ele testa o tamanho, se for muito grande pergunta se quer salvar em arquivo direto
if len(response_json) > 50:
if messagebox.askyesno("Resposta muito grande", "Gostaria de salvar em arquivo?"):
functions.save_file(response_json, "json")
else:
text_area_json.delete(1.0, END)
text_area_json.insert(END, response_json)
button_put_jason_system = Button(frame3, text="Put JSON system", command=button_put_json_system)
button_put_jason_system.place(x=210, y=10, width=150, height=30)
def button_clear_text():
text_area_json.delete(1.0, END)
# text_area_json.insert(END, resposta)
button_clear = Button(frame3, text="Clear Response", command=button_clear_text)
button_clear.place(x=370, y=10, width=150, height=30)
def button_quit_program():
functions.quit_program(root)
button_quit = Button(frame3, text="Quit", command=button_quit_program)
button_quit.place(x=800, y=10, width=150, height=30)
def draw_buttons_system_delete():
for widget in frame2.winfo_children():
widget.destroy()
for widget in frame3.winfo_children():
widget.destroy()
titulo_show_api = Message(frame2, text="Delete API", aspect=400)
titulo_show_api.place(x=0, y=0)
show_api = Message(frame2, text="", aspect=2050, anchor=W, relief=SUNKEN)
show_api.place(x=130, y=0, width=820, height=60)
option_available = None
if api_object.get() == "system/status":
option_available = options_available_system_status["delete"]
if api_object.get() == "system/statusdetails":
option_available = options_available_system_statusdetail["delete"]
if api_object.get() == "system/info":
option_available = options_available_system_info["delete"]
if api_object.get() == "system/command":
option_available = options_available_system_command["delete"]
if api_object.get() == "system/applyconfig":
option_available = options_available_system_applyconfig["delete"]
if api_object.get() == "system/importconfig":
option_available = options_available_system_importconfig["delete"]
if api_object.get() == "system/corecommand":
option_available = options_available_system_corecommand["delete"]
if api_object.get() == "system/scheduleddowntime":
option_available = options_available_system_scheduleddowntime["delete"]
if api_object.get() == "system/user":
option_available = options_available_system_user["delete"]
if api_object.get() == "system/authserver":
option_available = options_available_system_authserver["delete"]
y_axis = 80
given_values = list()
for i in option_available:
tit_option = Message(frame2, text="{}".format(i), aspect=600)
tit_option.place(x=1, y=y_axis)
option_value = Entry(frame2)
option_value.place(x=170, y=y_axis, width=360, height=20)
given_values.append(option_value)
y_axis += 23
apply_value = IntVar()
apply_value_check = Checkbutton(frame2, text="Apply?", variable=apply_value)
apply_value_check.place(x=165, y=y_axis)
text_area_json = scrolledtext.ScrolledText(frame2, width=50, height=12)
text_area_json.place(x=550, y=80)
text_area_json.insert(END, "JSON Contents")
def button_build_API_system():
functions.update_api_system(functions.build_API_system(api_object, usr_token_key, usr_tokens, api_base_key, api_base, apply_value, option_available, given_values), frame2)
button_build_API_system = Button(frame3, text="Build API", command=button_build_API_system)
button_build_API_system.place(x=50, y=10, width=150, height=30)
def button_delete_json_system():
api_selected_object = str(api_object.get())
type_oper="system"
#print("API METHOD: {}".format(int(api_method_radiobutton.get())))api_object, api_system_values, usr_token_key, usr_tokens, api_base_key, api_base, apply
api_url_list = functions.build_API_system(api_object, usr_token_key, usr_tokens, api_base_key, api_base, apply_value, option_available, given_values)
response_json = functions.delete_json(type_oper, api_methods[int(api_method_radiobutton.get())], api_url_list, available_objects_system, api_selected_object)
# Antes de jogar na tela o JSON ele testa o tamanho, se for muito grande pergunta se quer salvar em arquivo direto
if len(response_json) > 50:
if messagebox.askyesno("Resposta muito grande", "Gostaria de salvar em arquivo?"):
functions.save_file(response_json, "json")
else:
text_area_json.delete(1.0, END)
text_area_json.insert(END, response_json)
button_put_jason_system = Button(frame3, text="Delete JSON system", command=button_delete_json_system)
button_put_jason_system.place(x=210, y=10, width=150, height=30)
def button_clear_text():
text_area_json.delete(1.0, END)
# text_area_json.insert(END, resposta)
button_clear = Button(frame3, text="Clear Response", command=button_clear_text)
button_clear.place(x=370, y=10, width=150, height=30)
def button_quit_program():
functions.quit_program(root)
button_quit = Button(frame3, text="Quit", command=button_quit_program)
button_quit.place(x=800, y=10, width=150, height=30)
# Visualização principal
api_method_radiobutton = IntVar()
api_method_radiobutton.set(0)
tit_api_method = Message(frame1, text="API Method", aspect=400)
tit_api_method.place(x=1, y=10)
Radiobutton(frame1, text="Get", variable = api_method_radiobutton, command=draw_buttons_system_get, value = 0).place(x=130, y=10)
Radiobutton(frame1, text="Post", variable = api_method_radiobutton, command=draw_buttons_system_post, value = 1).place(x=230, y=10)
Radiobutton(frame1, text="Put", variable = api_method_radiobutton, command=draw_buttons_system_put, value = 2).place(x=330, y=10)
Radiobutton(frame1, text="Delete", variable = api_method_radiobutton, command=draw_buttons_system_delete, value = 3).place(x=430, y=10)
api_methods = ["get", "post", "put", "delete"]
# Desenhando os botoes do get como padrão
draw_buttons_system_get()
def button_quit_program():
functions.quit_program(root)
button_quit = Button(frame3, text="Quit", command=button_quit_program)
button_quit.place(x=800, y=10, width=150, height=30)
|
[
"tkinter.StringVar",
"functions.get_json",
"tkinter.Message",
"tkinter.Checkbutton",
"functions.convert_json",
"tkinter.Button",
"tkinter.Entry",
"functions.update_api",
"functions.build_API_config",
"functions.build_API",
"tkinter.IntVar",
"tkinter.messagebox.showerror",
"tkinter.scrolledtext.ScrolledText",
"functions.build_API_system",
"functions.quit_program",
"functions.save_file",
"tkinter.OptionMenu",
"tkinter.Radiobutton",
"tkinter.messagebox.askyesno",
"functions.load_file"
] |
[((1065, 1076), 'tkinter.StringVar', 'StringVar', ([], {}), '()\n', (1074, 1076), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((1219, 1266), 'tkinter.Message', 'Message', (['frame1'], {'text': '"""API Objects"""', 'aspect': '(400)'}), "(frame1, text='API Objects', aspect=400)\n", (1226, 1266), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((1332, 1382), 'tkinter.OptionMenu', 'OptionMenu', (['frame1', 'api_object', '*available_objects'], {}), '(frame1, api_object, *available_objects)\n', (1342, 1382), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((1518, 1571), 'tkinter.Message', 'Message', (['frame2'], {'text': '"""Search Parameters"""', 'aspect': '(600)'}), "(frame2, text='Search Parameters', aspect=600)\n", (1525, 1571), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((1626, 1639), 'tkinter.Entry', 'Entry', (['frame2'], {}), '(frame2)\n', (1631, 1639), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((1767, 1810), 'tkinter.Message', 'Message', (['frame2'], {'text': '"""Get API"""', 'aspect': '(400)'}), "(frame2, text='Get API', aspect=400)\n", (1774, 1810), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((1865, 1918), 'tkinter.Message', 'Message', (['frame2'], {'aspect': '(2050)', 'anchor': 'W', 'relief': 'SUNKEN'}), '(frame2, aspect=2050, anchor=W, relief=SUNKEN)\n', (1872, 1918), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((2000, 2055), 'tkinter.scrolledtext.ScrolledText', 'scrolledtext.ScrolledText', (['frame2'], {'width': '(111)', 'height': '(12)'}), '(frame2, width=111, height=12)\n', (2025, 2055), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((2462, 2520), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Build API"""', 'command': 'button_build_API'}), "(frame3, text='Build API', command=button_build_API)\n", (2468, 2520), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((3670, 3726), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Get JSON"""', 'command': 'button_get_json'}), "(frame3, text='Get JSON', command=button_get_json)\n", (3676, 3726), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((3979, 4034), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Clear"""', 'command': 'button_clear_text'}), "(frame3, text='Clear', command=button_clear_text)\n", (3985, 4034), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((4213, 4271), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Save JSON"""', 'command': 'button_save_json'}), "(frame3, text='Save JSON', command=button_save_json)\n", (4219, 4271), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((4493, 4551), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Load JSON"""', 'command': 'button_load_json'}), "(frame3, text='Load JSON', command=button_load_json)\n", (4499, 4551), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((4706, 4769), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""JSON -> CSV"""', 'command': 'button_convert_json'}), "(frame3, text='JSON -> CSV', command=button_convert_json)\n", (4712, 4769), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((4924, 4980), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Quit"""', 'command': 'button_quit_program'}), "(frame3, text='Quit', command=button_quit_program)\n", (4930, 4980), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((6228, 6239), 'tkinter.StringVar', 'StringVar', ([], {}), '()\n', (6237, 6239), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((6382, 6429), 'tkinter.Message', 'Message', (['frame1'], {'text': '"""API Objects"""', 'aspect': '(400)'}), "(frame1, text='API Objects', aspect=400)\n", (6389, 6429), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((6509, 6566), 'tkinter.OptionMenu', 'OptionMenu', (['frame1', 'api_object', '*available_objects_config'], {}), '(frame1, api_object, *available_objects_config)\n', (6519, 6566), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((23886, 23894), 'tkinter.IntVar', 'IntVar', ([], {}), '()\n', (23892, 23894), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((23958, 24004), 'tkinter.Message', 'Message', (['frame1'], {'text': '"""API Method"""', 'aspect': '(400)'}), "(frame1, text='API Method', aspect=400)\n", (23965, 24004), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((24831, 24887), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Quit"""', 'command': 'button_quit_program'}), "(frame3, text='Quit', command=button_quit_program)\n", (24837, 24887), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((26321, 26332), 'tkinter.StringVar', 'StringVar', ([], {}), '()\n', (26330, 26332), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((26477, 26524), 'tkinter.Message', 'Message', (['frame1'], {'text': '"""API Objects"""', 'aspect': '(400)'}), "(frame1, text='API Objects', aspect=400)\n", (26484, 26524), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((26604, 26661), 'tkinter.OptionMenu', 'OptionMenu', (['frame1', 'api_object', '*available_objects_system'], {}), '(frame1, api_object, *available_objects_system)\n', (26614, 26661), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((44430, 44438), 'tkinter.IntVar', 'IntVar', ([], {}), '()\n', (44436, 44438), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((44502, 44548), 'tkinter.Message', 'Message', (['frame1'], {'text': '"""API Method"""', 'aspect': '(400)'}), "(frame1, text='API Method', aspect=400)\n", (44509, 44548), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((45375, 45431), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Quit"""', 'command': 'button_quit_program'}), "(frame3, text='Quit', command=button_quit_program)\n", (45381, 45431), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((2294, 2392), 'functions.build_API', 'functions.build_API', (['api_object', 'api_filter', 'usr_token_key', 'usr_tokens', 'api_base_key', 'api_base'], {}), '(api_object, api_filter, usr_token_key, usr_tokens,\n api_base_key, api_base)\n', (2313, 2392), False, 'import functions\n'), ((2398, 2435), 'functions.update_api', 'functions.update_api', (['api_url', 'frame2'], {}), '(api_url, frame2)\n', (2418, 2435), False, 'import functions\n'), ((4653, 4677), 'functions.convert_json', 'functions.convert_json', ([], {}), '()\n', (4675, 4677), False, 'import functions\n'), ((4874, 4902), 'functions.quit_program', 'functions.quit_program', (['root'], {}), '(root)\n', (4896, 4902), False, 'import functions\n'), ((6866, 6919), 'tkinter.Message', 'Message', (['frame2'], {'text': '"""Search Parameters"""', 'aspect': '(600)'}), "(frame2, text='Search Parameters', aspect=600)\n", (6873, 6919), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((6982, 6995), 'tkinter.Entry', 'Entry', (['frame2'], {}), '(frame2)\n', (6987, 6995), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((7135, 7178), 'tkinter.Message', 'Message', (['frame2'], {'text': '"""Get API"""', 'aspect': '(400)'}), "(frame2, text='Get API', aspect=400)\n", (7142, 7178), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((7241, 7294), 'tkinter.Message', 'Message', (['frame2'], {'aspect': '(2050)', 'anchor': 'W', 'relief': 'SUNKEN'}), '(frame2, aspect=2050, anchor=W, relief=SUNKEN)\n', (7248, 7294), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((7382, 7437), 'tkinter.scrolledtext.ScrolledText', 'scrolledtext.ScrolledText', (['frame2'], {'width': '(111)', 'height': '(12)'}), '(frame2, width=111, height=12)\n', (7407, 7437), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((7796, 7854), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Build API"""', 'command': 'button_build_API'}), "(frame3, text='Build API', command=button_build_API)\n", (7802, 7854), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((9118, 9188), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Get JSON Config"""', 'command': 'button_get_json_config'}), "(frame3, text='Get JSON Config', command=button_get_json_config)\n", (9124, 9188), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((9476, 9531), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Clear"""', 'command': 'button_clear_text'}), "(frame3, text='Clear', command=button_clear_text)\n", (9482, 9531), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((9726, 9784), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Save JSON"""', 'command': 'button_save_json'}), "(frame3, text='Save JSON', command=button_save_json)\n", (9732, 9784), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((10026, 10084), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Load JSON"""', 'command': 'button_load_json'}), "(frame3, text='Load JSON', command=button_load_json)\n", (10032, 10084), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((10251, 10314), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""JSON -> CSV"""', 'command': 'button_convert_json'}), "(frame3, text='JSON -> CSV', command=button_convert_json)\n", (10257, 10314), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((10485, 10541), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Quit"""', 'command': 'button_quit_program'}), "(frame3, text='Quit', command=button_quit_program)\n", (10491, 10541), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((10828, 10872), 'tkinter.Message', 'Message', (['frame2'], {'text': '"""Post API"""', 'aspect': '(400)'}), "(frame2, text='Post API', aspect=400)\n", (10835, 10872), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((10934, 10986), 'tkinter.Message', 'Message', (['frame2'], {'text': '""""""', 'aspect': '(2050)', 'relief': 'SUNKEN'}), "(frame2, text='', aspect=2050, relief=SUNKEN)\n", (10941, 10986), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((12529, 12537), 'tkinter.IntVar', 'IntVar', ([], {}), '()\n', (12535, 12537), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((12567, 12623), 'tkinter.Checkbutton', 'Checkbutton', (['frame2'], {'text': '"""Apply?"""', 'variable': 'apply_value'}), "(frame2, text='Apply?', variable=apply_value)\n", (12578, 12623), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((12702, 12756), 'tkinter.scrolledtext.ScrolledText', 'scrolledtext.ScrolledText', (['frame2'], {'width': '(50)', 'height': '(12)'}), '(frame2, width=50, height=12)\n', (12727, 12756), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((13131, 13196), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Build API"""', 'command': 'button_build_API_config'}), "(frame3, text='Build API', command=button_build_API_config)\n", (13137, 13196), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((14231, 14303), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Post JSON Config"""', 'command': 'button_post_json_config'}), "(frame3, text='Post JSON Config', command=button_post_json_config)\n", (14237, 14303), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((14538, 14602), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Clear Response"""', 'command': 'button_clear_text'}), "(frame3, text='Clear Response', command=button_clear_text)\n", (14544, 14602), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((14771, 14827), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Quit"""', 'command': 'button_quit_program'}), "(frame3, text='Quit', command=button_quit_program)\n", (14777, 14827), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((15113, 15156), 'tkinter.Message', 'Message', (['frame2'], {'text': '"""Put API"""', 'aspect': '(400)'}), "(frame2, text='Put API', aspect=400)\n", (15120, 15156), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((15218, 15280), 'tkinter.Message', 'Message', (['frame2'], {'text': '""""""', 'aspect': '(2050)', 'anchor': 'W', 'relief': 'SUNKEN'}), "(frame2, text='', aspect=2050, anchor=W, relief=SUNKEN)\n", (15225, 15280), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((16807, 16815), 'tkinter.IntVar', 'IntVar', ([], {}), '()\n', (16813, 16815), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((16845, 16901), 'tkinter.Checkbutton', 'Checkbutton', (['frame2'], {'text': '"""Apply?"""', 'variable': 'apply_value'}), "(frame2, text='Apply?', variable=apply_value)\n", (16856, 16901), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((16980, 17034), 'tkinter.scrolledtext.ScrolledText', 'scrolledtext.ScrolledText', (['frame2'], {'width': '(50)', 'height': '(12)'}), '(frame2, width=50, height=12)\n', (17005, 17034), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((17417, 17482), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Build API"""', 'command': 'button_build_API_config'}), "(frame3, text='Build API', command=button_build_API_config)\n", (17423, 17482), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((18681, 18751), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Put JSON Config"""', 'command': 'button_put_json_config'}), "(frame3, text='Put JSON Config', command=button_put_json_config)\n", (18687, 18751), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((18985, 19049), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Clear Response"""', 'command': 'button_clear_text'}), "(frame3, text='Clear Response', command=button_clear_text)\n", (18991, 19049), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((19218, 19274), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Quit"""', 'command': 'button_quit_program'}), "(frame3, text='Quit', command=button_quit_program)\n", (19224, 19274), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((19563, 19609), 'tkinter.Message', 'Message', (['frame2'], {'text': '"""Delete API"""', 'aspect': '(400)'}), "(frame2, text='Delete API', aspect=400)\n", (19570, 19609), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((19671, 19733), 'tkinter.Message', 'Message', (['frame2'], {'text': '""""""', 'aspect': '(2050)', 'anchor': 'W', 'relief': 'SUNKEN'}), "(frame2, text='', aspect=2050, anchor=W, relief=SUNKEN)\n", (19678, 19733), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((21284, 21292), 'tkinter.IntVar', 'IntVar', ([], {}), '()\n', (21290, 21292), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((21322, 21378), 'tkinter.Checkbutton', 'Checkbutton', (['frame2'], {'text': '"""Apply?"""', 'variable': 'apply_value'}), "(frame2, text='Apply?', variable=apply_value)\n", (21333, 21378), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((21457, 21511), 'tkinter.scrolledtext.ScrolledText', 'scrolledtext.ScrolledText', (['frame2'], {'width': '(50)', 'height': '(12)'}), '(frame2, width=50, height=12)\n', (21482, 21511), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((21886, 21951), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Build API"""', 'command': 'button_build_API_config'}), "(frame3, text='Build API', command=button_build_API_config)\n", (21892, 21951), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((23156, 23232), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Delete JSON Config"""', 'command': 'button_delete_json_config'}), "(frame3, text='Delete JSON Config', command=button_delete_json_config)\n", (23162, 23232), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((23466, 23530), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Clear Response"""', 'command': 'button_clear_text'}), "(frame3, text='Clear Response', command=button_clear_text)\n", (23472, 23530), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((23699, 23755), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Quit"""', 'command': 'button_quit_program'}), "(frame3, text='Quit', command=button_quit_program)\n", (23705, 23755), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((24781, 24809), 'functions.quit_program', 'functions.quit_program', (['root'], {}), '(root)\n', (24803, 24809), False, 'import functions\n'), ((26961, 27014), 'tkinter.Message', 'Message', (['frame2'], {'text': '"""Search Parameters"""', 'aspect': '(600)'}), "(frame2, text='Search Parameters', aspect=600)\n", (26968, 27014), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((27077, 27090), 'tkinter.Entry', 'Entry', (['frame2'], {}), '(frame2)\n', (27082, 27090), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((27230, 27273), 'tkinter.Message', 'Message', (['frame2'], {'text': '"""Get API"""', 'aspect': '(400)'}), "(frame2, text='Get API', aspect=400)\n", (27237, 27273), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((27336, 27389), 'tkinter.Message', 'Message', (['frame2'], {'aspect': '(2050)', 'anchor': 'W', 'relief': 'SUNKEN'}), '(frame2, aspect=2050, anchor=W, relief=SUNKEN)\n', (27343, 27389), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((27477, 27532), 'tkinter.scrolledtext.ScrolledText', 'scrolledtext.ScrolledText', (['frame2'], {'width': '(111)', 'height': '(12)'}), '(frame2, width=111, height=12)\n', (27502, 27532), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((27891, 27949), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Build API"""', 'command': 'button_build_API'}), "(frame3, text='Build API', command=button_build_API)\n", (27897, 27949), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((28839, 28909), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Get JSON system"""', 'command': 'button_get_json_system'}), "(frame3, text='Get JSON system', command=button_get_json_system)\n", (28845, 28909), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((29197, 29252), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Clear"""', 'command': 'button_clear_text'}), "(frame3, text='Clear', command=button_clear_text)\n", (29203, 29252), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((29447, 29505), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Save JSON"""', 'command': 'button_save_json'}), "(frame3, text='Save JSON', command=button_save_json)\n", (29453, 29505), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((29747, 29805), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Load JSON"""', 'command': 'button_load_json'}), "(frame3, text='Load JSON', command=button_load_json)\n", (29753, 29805), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((29972, 30035), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""JSON -> CSV"""', 'command': 'button_convert_json'}), "(frame3, text='JSON -> CSV', command=button_convert_json)\n", (29978, 30035), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((30206, 30262), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Quit"""', 'command': 'button_quit_program'}), "(frame3, text='Quit', command=button_quit_program)\n", (30212, 30262), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((30551, 30595), 'tkinter.Message', 'Message', (['frame2'], {'text': '"""Post API"""', 'aspect': '(400)'}), "(frame2, text='Post API', aspect=400)\n", (30558, 30595), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((30657, 30709), 'tkinter.Message', 'Message', (['frame2'], {'text': '""""""', 'aspect': '(2050)', 'relief': 'SUNKEN'}), "(frame2, text='', aspect=2050, relief=SUNKEN)\n", (30664, 30709), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((32525, 32533), 'tkinter.IntVar', 'IntVar', ([], {}), '()\n', (32531, 32533), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((32563, 32619), 'tkinter.Checkbutton', 'Checkbutton', (['frame2'], {'text': '"""Apply?"""', 'variable': 'apply_value'}), "(frame2, text='Apply?', variable=apply_value)\n", (32574, 32619), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((32698, 32752), 'tkinter.scrolledtext.ScrolledText', 'scrolledtext.ScrolledText', (['frame2'], {'width': '(50)', 'height': '(12)'}), '(frame2, width=50, height=12)\n', (32723, 32752), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((33127, 33192), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Build API"""', 'command': 'button_build_API_system'}), "(frame3, text='Build API', command=button_build_API_system)\n", (33133, 33192), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((34227, 34299), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Post JSON system"""', 'command': 'button_post_json_system'}), "(frame3, text='Post JSON system', command=button_post_json_system)\n", (34233, 34299), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((34534, 34598), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Clear Response"""', 'command': 'button_clear_text'}), "(frame3, text='Clear Response', command=button_clear_text)\n", (34540, 34598), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((34767, 34823), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Quit"""', 'command': 'button_quit_program'}), "(frame3, text='Quit', command=button_quit_program)\n", (34773, 34823), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((35111, 35154), 'tkinter.Message', 'Message', (['frame2'], {'text': '"""Put API"""', 'aspect': '(400)'}), "(frame2, text='Put API', aspect=400)\n", (35118, 35154), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((35216, 35278), 'tkinter.Message', 'Message', (['frame2'], {'text': '""""""', 'aspect': '(2050)', 'anchor': 'W', 'relief': 'SUNKEN'}), "(frame2, text='', aspect=2050, anchor=W, relief=SUNKEN)\n", (35223, 35278), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((37074, 37082), 'tkinter.IntVar', 'IntVar', ([], {}), '()\n', (37080, 37082), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((37112, 37168), 'tkinter.Checkbutton', 'Checkbutton', (['frame2'], {'text': '"""Apply?"""', 'variable': 'apply_value'}), "(frame2, text='Apply?', variable=apply_value)\n", (37123, 37168), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((37247, 37301), 'tkinter.scrolledtext.ScrolledText', 'scrolledtext.ScrolledText', (['frame2'], {'width': '(50)', 'height': '(12)'}), '(frame2, width=50, height=12)\n', (37272, 37301), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((37684, 37749), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Build API"""', 'command': 'button_build_API_system'}), "(frame3, text='Build API', command=button_build_API_system)\n", (37690, 37749), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((38948, 39018), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Put JSON system"""', 'command': 'button_put_json_system'}), "(frame3, text='Put JSON system', command=button_put_json_system)\n", (38954, 39018), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((39252, 39316), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Clear Response"""', 'command': 'button_clear_text'}), "(frame3, text='Clear Response', command=button_clear_text)\n", (39258, 39316), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((39485, 39541), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Quit"""', 'command': 'button_quit_program'}), "(frame3, text='Quit', command=button_quit_program)\n", (39491, 39541), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((39832, 39878), 'tkinter.Message', 'Message', (['frame2'], {'text': '"""Delete API"""', 'aspect': '(400)'}), "(frame2, text='Delete API', aspect=400)\n", (39839, 39878), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((39940, 40002), 'tkinter.Message', 'Message', (['frame2'], {'text': '""""""', 'aspect': '(2050)', 'anchor': 'W', 'relief': 'SUNKEN'}), "(frame2, text='', aspect=2050, anchor=W, relief=SUNKEN)\n", (39947, 40002), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((41828, 41836), 'tkinter.IntVar', 'IntVar', ([], {}), '()\n', (41834, 41836), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((41866, 41922), 'tkinter.Checkbutton', 'Checkbutton', (['frame2'], {'text': '"""Apply?"""', 'variable': 'apply_value'}), "(frame2, text='Apply?', variable=apply_value)\n", (41877, 41922), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((42001, 42055), 'tkinter.scrolledtext.ScrolledText', 'scrolledtext.ScrolledText', (['frame2'], {'width': '(50)', 'height': '(12)'}), '(frame2, width=50, height=12)\n', (42026, 42055), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((42430, 42495), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Build API"""', 'command': 'button_build_API_system'}), "(frame3, text='Build API', command=button_build_API_system)\n", (42436, 42495), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((43700, 43776), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Delete JSON system"""', 'command': 'button_delete_json_system'}), "(frame3, text='Delete JSON system', command=button_delete_json_system)\n", (43706, 43776), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((44010, 44074), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Clear Response"""', 'command': 'button_clear_text'}), "(frame3, text='Clear Response', command=button_clear_text)\n", (44016, 44074), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((44243, 44299), 'tkinter.Button', 'Button', (['frame3'], {'text': '"""Quit"""', 'command': 'button_quit_program'}), "(frame3, text='Quit', command=button_quit_program)\n", (44249, 44299), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((45325, 45353), 'functions.quit_program', 'functions.quit_program', (['root'], {}), '(root)\n', (45347, 45353), False, 'import functions\n'), ((2764, 2858), 'functions.get_json', 'functions.get_json', (['type_oper', 'api_method', 'api_url', 'available_objects', 'api_selected_object'], {}), '(type_oper, api_method, api_url, available_objects,\n api_selected_object)\n', (2782, 2858), False, 'import functions\n'), ((7620, 7718), 'functions.build_API', 'functions.build_API', (['api_object', 'api_filter', 'usr_token_key', 'usr_tokens', 'api_base_key', 'api_base'], {}), '(api_object, api_filter, usr_token_key, usr_tokens,\n api_base_key, api_base)\n', (7639, 7718), False, 'import functions\n'), ((7728, 7765), 'functions.update_api', 'functions.update_api', (['api_url', 'frame2'], {}), '(api_url, frame2)\n', (7748, 7765), False, 'import functions\n'), ((10198, 10222), 'functions.convert_json', 'functions.convert_json', ([], {}), '()\n', (10220, 10222), False, 'import functions\n'), ((10431, 10459), 'functions.quit_program', 'functions.quit_program', (['root'], {}), '(root)\n', (10453, 10459), False, 'import functions\n'), ((12346, 12359), 'tkinter.Entry', 'Entry', (['frame2'], {}), '(frame2)\n', (12351, 12359), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((13429, 13567), 'functions.build_API_config', 'functions.build_API_config', (['api_object', 'usr_token_key', 'usr_tokens', 'api_base_key', 'api_base', 'apply_value', 'option_available', 'given_values'], {}), '(api_object, usr_token_key, usr_tokens,\n api_base_key, api_base, apply_value, option_available, given_values)\n', (13455, 13567), False, 'import functions\n'), ((14717, 14745), 'functions.quit_program', 'functions.quit_program', (['root'], {}), '(root)\n', (14739, 14745), False, 'import functions\n'), ((16624, 16637), 'tkinter.Entry', 'Entry', (['frame2'], {}), '(frame2)\n', (16629, 16637), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((17881, 18019), 'functions.build_API_config', 'functions.build_API_config', (['api_object', 'usr_token_key', 'usr_tokens', 'api_base_key', 'api_base', 'apply_value', 'option_available', 'given_values'], {}), '(api_object, usr_token_key, usr_tokens,\n api_base_key, api_base, apply_value, option_available, given_values)\n', (17907, 18019), False, 'import functions\n'), ((19164, 19192), 'functions.quit_program', 'functions.quit_program', (['root'], {}), '(root)\n', (19186, 19192), False, 'import functions\n'), ((21101, 21114), 'tkinter.Entry', 'Entry', (['frame2'], {}), '(frame2)\n', (21106, 21114), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((22353, 22491), 'functions.build_API_config', 'functions.build_API_config', (['api_object', 'usr_token_key', 'usr_tokens', 'api_base_key', 'api_base', 'apply_value', 'option_available', 'given_values'], {}), '(api_object, usr_token_key, usr_tokens,\n api_base_key, api_base, apply_value, option_available, given_values)\n', (22379, 22491), False, 'import functions\n'), ((23645, 23673), 'functions.quit_program', 'functions.quit_program', (['root'], {}), '(root)\n', (23667, 23673), False, 'import functions\n'), ((24049, 24160), 'tkinter.Radiobutton', 'Radiobutton', (['frame1'], {'text': '"""Get"""', 'variable': 'api_method_radiobutton', 'command': 'draw_buttons_config_get', 'value': '(0)'}), "(frame1, text='Get', variable=api_method_radiobutton, command=\n draw_buttons_config_get, value=0)\n", (24060, 24160), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((24184, 24297), 'tkinter.Radiobutton', 'Radiobutton', (['frame1'], {'text': '"""Post"""', 'variable': 'api_method_radiobutton', 'command': 'draw_buttons_config_post', 'value': '(1)'}), "(frame1, text='Post', variable=api_method_radiobutton, command=\n draw_buttons_config_post, value=1)\n", (24195, 24297), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((24329, 24440), 'tkinter.Radiobutton', 'Radiobutton', (['frame1'], {'text': '"""Put"""', 'variable': 'api_method_radiobutton', 'command': 'draw_buttons_config_put', 'value': '(2)'}), "(frame1, text='Put', variable=api_method_radiobutton, command=\n draw_buttons_config_put, value=2)\n", (24340, 24440), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((24464, 24581), 'tkinter.Radiobutton', 'Radiobutton', (['frame1'], {'text': '"""Delete"""', 'variable': 'api_method_radiobutton', 'command': 'draw_buttons_config_delete', 'value': '(3)'}), "(frame1, text='Delete', variable=api_method_radiobutton, command\n =draw_buttons_config_delete, value=3)\n", (24475, 24581), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((27715, 27813), 'functions.build_API', 'functions.build_API', (['api_object', 'api_filter', 'usr_token_key', 'usr_tokens', 'api_base_key', 'api_base'], {}), '(api_object, api_filter, usr_token_key, usr_tokens,\n api_base_key, api_base)\n', (27734, 27813), False, 'import functions\n'), ((27823, 27860), 'functions.update_api', 'functions.update_api', (['api_url', 'frame2'], {}), '(api_url, frame2)\n', (27843, 27860), False, 'import functions\n'), ((29919, 29943), 'functions.convert_json', 'functions.convert_json', ([], {}), '()\n', (29941, 29943), False, 'import functions\n'), ((30152, 30180), 'functions.quit_program', 'functions.quit_program', (['root'], {}), '(root)\n', (30174, 30180), False, 'import functions\n'), ((32342, 32355), 'tkinter.Entry', 'Entry', (['frame2'], {}), '(frame2)\n', (32347, 32355), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((33425, 33563), 'functions.build_API_system', 'functions.build_API_system', (['api_object', 'usr_token_key', 'usr_tokens', 'api_base_key', 'api_base', 'apply_value', 'option_available', 'given_values'], {}), '(api_object, usr_token_key, usr_tokens,\n api_base_key, api_base, apply_value, option_available, given_values)\n', (33451, 33563), False, 'import functions\n'), ((34713, 34741), 'functions.quit_program', 'functions.quit_program', (['root'], {}), '(root)\n', (34735, 34741), False, 'import functions\n'), ((36891, 36904), 'tkinter.Entry', 'Entry', (['frame2'], {}), '(frame2)\n', (36896, 36904), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((38148, 38286), 'functions.build_API_system', 'functions.build_API_system', (['api_object', 'usr_token_key', 'usr_tokens', 'api_base_key', 'api_base', 'apply_value', 'option_available', 'given_values'], {}), '(api_object, usr_token_key, usr_tokens,\n api_base_key, api_base, apply_value, option_available, given_values)\n', (38174, 38286), False, 'import functions\n'), ((39431, 39459), 'functions.quit_program', 'functions.quit_program', (['root'], {}), '(root)\n', (39453, 39459), False, 'import functions\n'), ((41645, 41658), 'tkinter.Entry', 'Entry', (['frame2'], {}), '(frame2)\n', (41650, 41658), False, 'from tkinter import Checkbutton, Tk, filedialog, Message, Entry, StringVar, Text, Scrollbar, scrolledtext, LEFT, RIGHT, Y, END, W, SUNKEN, OUTSIDE\n'), ((42897, 43035), 'functions.build_API_system', 'functions.build_API_system', (['api_object', 'usr_token_key', 'usr_tokens', 'api_base_key', 'api_base', 'apply_value', 'option_available', 'given_values'], {}), '(api_object, usr_token_key, usr_tokens,\n api_base_key, api_base, apply_value, option_available, given_values)\n', (42923, 43035), False, 'import functions\n'), ((44189, 44217), 'functions.quit_program', 'functions.quit_program', (['root'], {}), '(root)\n', (44211, 44217), False, 'import functions\n'), ((44593, 44704), 'tkinter.Radiobutton', 'Radiobutton', (['frame1'], {'text': '"""Get"""', 'variable': 'api_method_radiobutton', 'command': 'draw_buttons_system_get', 'value': '(0)'}), "(frame1, text='Get', variable=api_method_radiobutton, command=\n draw_buttons_system_get, value=0)\n", (44604, 44704), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((44728, 44841), 'tkinter.Radiobutton', 'Radiobutton', (['frame1'], {'text': '"""Post"""', 'variable': 'api_method_radiobutton', 'command': 'draw_buttons_system_post', 'value': '(1)'}), "(frame1, text='Post', variable=api_method_radiobutton, command=\n draw_buttons_system_post, value=1)\n", (44739, 44841), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((44873, 44984), 'tkinter.Radiobutton', 'Radiobutton', (['frame1'], {'text': '"""Put"""', 'variable': 'api_method_radiobutton', 'command': 'draw_buttons_system_put', 'value': '(2)'}), "(frame1, text='Put', variable=api_method_radiobutton, command=\n draw_buttons_system_put, value=2)\n", (44884, 44984), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((45008, 45125), 'tkinter.Radiobutton', 'Radiobutton', (['frame1'], {'text': '"""Delete"""', 'variable': 'api_method_radiobutton', 'command': 'draw_buttons_system_delete', 'value': '(3)'}), "(frame1, text='Delete', variable=api_method_radiobutton, command\n =draw_buttons_system_delete, value=3)\n", (45019, 45125), False, 'from tkinter import Button, Frame, IntVar, Radiobutton, Widget, OptionMenu, Scrollbar\n'), ((2901, 2949), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Conexão"""', '"""API Inválida!"""'], {}), "('Conexão', 'API Inválida!')\n", (2921, 2949), False, 'from tkinter import messagebox\n'), ((2994, 3050), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Conexão"""', '"""Monte a API primeiro!"""'], {}), "('Conexão', 'Monte a API primeiro!')\n", (3014, 3050), False, 'from tkinter import messagebox\n'), ((3118, 3171), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Conexão"""', '"""Erro desconhecido!"""'], {}), "('Conexão', 'Erro desconhecido!')\n", (3138, 3171), False, 'from tkinter import messagebox\n'), ((3375, 3453), 'tkinter.messagebox.askyesno', 'messagebox.askyesno', (['"""Resposta muito grande"""', '"""Gostaria de salvar em arquivo?"""'], {}), "('Resposta muito grande', 'Gostaria de salvar em arquivo?')\n", (3394, 3453), False, 'from tkinter import messagebox\n'), ((4448, 4469), 'functions.load_file', 'functions.load_file', ([], {}), '()\n', (4467, 4469), False, 'import functions\n'), ((12950, 13088), 'functions.build_API_config', 'functions.build_API_config', (['api_object', 'usr_token_key', 'usr_tokens', 'api_base_key', 'api_base', 'apply_value', 'option_available', 'given_values'], {}), '(api_object, usr_token_key, usr_tokens,\n api_base_key, api_base, apply_value, option_available, given_values)\n', (12976, 13088), False, 'import functions\n'), ((13922, 14000), 'tkinter.messagebox.askyesno', 'messagebox.askyesno', (['"""Resposta muito grande"""', '"""Gostaria de salvar em arquivo?"""'], {}), "('Resposta muito grande', 'Gostaria de salvar em arquivo?')\n", (13941, 14000), False, 'from tkinter import messagebox\n'), ((17236, 17374), 'functions.build_API_config', 'functions.build_API_config', (['api_object', 'usr_token_key', 'usr_tokens', 'api_base_key', 'api_base', 'apply_value', 'option_available', 'given_values'], {}), '(api_object, usr_token_key, usr_tokens,\n api_base_key, api_base, apply_value, option_available, given_values)\n', (17262, 17374), False, 'import functions\n'), ((18373, 18451), 'tkinter.messagebox.askyesno', 'messagebox.askyesno', (['"""Resposta muito grande"""', '"""Gostaria de salvar em arquivo?"""'], {}), "('Resposta muito grande', 'Gostaria de salvar em arquivo?')\n", (18392, 18451), False, 'from tkinter import messagebox\n'), ((21705, 21843), 'functions.build_API_config', 'functions.build_API_config', (['api_object', 'usr_token_key', 'usr_tokens', 'api_base_key', 'api_base', 'apply_value', 'option_available', 'given_values'], {}), '(api_object, usr_token_key, usr_tokens,\n api_base_key, api_base, apply_value, option_available, given_values)\n', (21731, 21843), False, 'import functions\n'), ((22848, 22926), 'tkinter.messagebox.askyesno', 'messagebox.askyesno', (['"""Resposta muito grande"""', '"""Gostaria de salvar em arquivo?"""'], {}), "('Resposta muito grande', 'Gostaria de salvar em arquivo?')\n", (22867, 22926), False, 'from tkinter import messagebox\n'), ((32946, 33084), 'functions.build_API_system', 'functions.build_API_system', (['api_object', 'usr_token_key', 'usr_tokens', 'api_base_key', 'api_base', 'apply_value', 'option_available', 'given_values'], {}), '(api_object, usr_token_key, usr_tokens,\n api_base_key, api_base, apply_value, option_available, given_values)\n', (32972, 33084), False, 'import functions\n'), ((33918, 33996), 'tkinter.messagebox.askyesno', 'messagebox.askyesno', (['"""Resposta muito grande"""', '"""Gostaria de salvar em arquivo?"""'], {}), "('Resposta muito grande', 'Gostaria de salvar em arquivo?')\n", (33937, 33996), False, 'from tkinter import messagebox\n'), ((37503, 37641), 'functions.build_API_system', 'functions.build_API_system', (['api_object', 'usr_token_key', 'usr_tokens', 'api_base_key', 'api_base', 'apply_value', 'option_available', 'given_values'], {}), '(api_object, usr_token_key, usr_tokens,\n api_base_key, api_base, apply_value, option_available, given_values)\n', (37529, 37641), False, 'import functions\n'), ((38640, 38718), 'tkinter.messagebox.askyesno', 'messagebox.askyesno', (['"""Resposta muito grande"""', '"""Gostaria de salvar em arquivo?"""'], {}), "('Resposta muito grande', 'Gostaria de salvar em arquivo?')\n", (38659, 38718), False, 'from tkinter import messagebox\n'), ((42249, 42387), 'functions.build_API_system', 'functions.build_API_system', (['api_object', 'usr_token_key', 'usr_tokens', 'api_base_key', 'api_base', 'apply_value', 'option_available', 'given_values'], {}), '(api_object, usr_token_key, usr_tokens,\n api_base_key, api_base, apply_value, option_available, given_values)\n', (42275, 42387), False, 'import functions\n'), ((43392, 43470), 'tkinter.messagebox.askyesno', 'messagebox.askyesno', (['"""Resposta muito grande"""', '"""Gostaria de salvar em arquivo?"""'], {}), "('Resposta muito grande', 'Gostaria de salvar em arquivo?')\n", (43411, 43470), False, 'from tkinter import messagebox\n'), ((3476, 3517), 'functions.save_file', 'functions.save_file', (['list_json[1]', '"""json"""'], {}), "(list_json[1], 'json')\n", (3495, 3517), False, 'import functions\n'), ((8286, 8334), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Conexão"""', '"""API Inválida!"""'], {}), "('Conexão', 'API Inválida!')\n", (8306, 8334), False, 'from tkinter import messagebox\n'), ((8387, 8443), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Conexão"""', '"""Monte a API primeiro!"""'], {}), "('Conexão', 'Monte a API primeiro!')\n", (8407, 8443), False, 'from tkinter import messagebox\n'), ((8523, 8576), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Conexão"""', '"""Erro desconhecido!"""'], {}), "('Conexão', 'Erro desconhecido!')\n", (8543, 8576), False, 'from tkinter import messagebox\n'), ((8796, 8874), 'tkinter.messagebox.askyesno', 'messagebox.askyesno', (['"""Resposta muito grande"""', '"""Gostaria de salvar em arquivo?"""'], {}), "('Resposta muito grande', 'Gostaria de salvar em arquivo?')\n", (8815, 8874), False, 'from tkinter import messagebox\n'), ((9977, 9998), 'functions.load_file', 'functions.load_file', ([], {}), '()\n', (9996, 9998), False, 'import functions\n'), ((14023, 14065), 'functions.save_file', 'functions.save_file', (['response_json', '"""json"""'], {}), "(response_json, 'json')\n", (14042, 14065), False, 'import functions\n'), ((18474, 18516), 'functions.save_file', 'functions.save_file', (['response_json', '"""json"""'], {}), "(response_json, 'json')\n", (18493, 18516), False, 'import functions\n'), ((22949, 22991), 'functions.save_file', 'functions.save_file', (['response_json', '"""json"""'], {}), "(response_json, 'json')\n", (22968, 22991), False, 'import functions\n'), ((28388, 28436), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Conexão"""', '"""API Inválida!"""'], {}), "('Conexão', 'API Inválida!')\n", (28408, 28436), False, 'from tkinter import messagebox\n'), ((28489, 28545), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Conexão"""', '"""Monte a API primeiro!"""'], {}), "('Conexão', 'Monte a API primeiro!')\n", (28509, 28545), False, 'from tkinter import messagebox\n'), ((28625, 28678), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Conexão"""', '"""Erro desconhecido!"""'], {}), "('Conexão', 'Erro desconhecido!')\n", (28645, 28678), False, 'from tkinter import messagebox\n'), ((29698, 29719), 'functions.load_file', 'functions.load_file', ([], {}), '()\n', (29717, 29719), False, 'import functions\n'), ((34019, 34061), 'functions.save_file', 'functions.save_file', (['response_json', '"""json"""'], {}), "(response_json, 'json')\n", (34038, 34061), False, 'import functions\n'), ((38741, 38783), 'functions.save_file', 'functions.save_file', (['response_json', '"""json"""'], {}), "(response_json, 'json')\n", (38760, 38783), False, 'import functions\n'), ((43493, 43535), 'functions.save_file', 'functions.save_file', (['response_json', '"""json"""'], {}), "(response_json, 'json')\n", (43512, 43535), False, 'import functions\n'), ((8901, 8942), 'functions.save_file', 'functions.save_file', (['list_json[1]', '"""json"""'], {}), "(list_json[1], 'json')\n", (8920, 8942), False, 'import functions\n')]
|
from scipy.integrate import quad,dblquad
import numpy as np
from scipy.special import gamma
from scipy.special import gammainc
def poisson_integrand(tau, rho, beta, fm=1, K=1, alpha=2):
#lambda = beta*f(m)*rho*tau
L = np.array([(tau*beta*rho*fm)**k/gamma(k+1) for k in range(K)])
return (1-np.exp(-tau*beta*rho*fm)*np.sum(L))*tau**(-alpha-1)
def exponential_integrand(tau, rho, beta, fm=1, K=1, alpha=2):
scale = tau*rho*beta*fm
return np.exp(-K/scale)*tau**(-alpha-1)
def exponential_pdf(kappa,scale=1):
return np.exp(-kappa/scale)/scale
def weibull_integrand(tau, rho, beta, fm=1, K=1, alpha=2, shape=2):
scale = tau*rho*beta*fm/gamma(1+1/shape) #mean = tau*rho*beta
return np.exp(-(K/scale)**shape)*tau**(-alpha-1)
def weibull_pdf(kappa,scale=1,shape=2):
return (shape/scale)*(kappa/scale)**(shape-1)*np.exp(-(kappa/scale)**shape)
def frechet_integrand(tau, rho, beta, fm=1, K=1, alpha=2, shape=2):
scale = tau*rho*beta*fm/gamma(1-1/shape)
return (1-np.exp(-(K/scale)**(-shape)))*tau**(-alpha-1)
def frechet_pdf(kappa,scale=1,shape=2):
return (shape/scale)*(kappa/scale)**(-shape-1)*np.exp(-(kappa/scale)**(-shape))
def gamma_special_integrand(tau, rho, beta, fm=1, K=1, alpha=2 ,z = 0.):
#play with the scale/shape instead of just the scale, such that variance != mean^2
#z = 0 is equivalent to the exponential
param = tau*rho*beta*fm
return (1 - gammainc(param**z,K/param**(1-z)))*tau**(-alpha-1)
def kernel(rho, beta, fm=1, K=1, alpha=2., tmin=1, T=np.inf,
integrand=exponential_integrand,
args=tuple()):
Z = (tmin**(-alpha)-T**(-alpha))/alpha
_args = (rho,beta,fm,K,alpha,*args)
return quad(integrand,tmin,T,args=_args)[0]/Z
#same as kernel, but put beta first for integration and multiply by Q(beta)
def kernel2(beta, Q, rho, fm=1, K=1, alpha=2.,tmin=1, T=np.inf,
integrand=exponential_integrand,
args=tuple()):
_args = (rho,beta,fm,K,alpha, *args)
return Q(beta)*quad(integrand,tmin,T,args=_args)[0]
def kernel_het_beta(rho, fm=1, K=1, alpha=2., tmin=1, T=np.inf,
integrand=exponential_integrand,args=tuple(),
Q=lambda b: np.exp(-b),betalim=(0,np.inf)):
Z = (tmin**(-alpha)-T**(-alpha))/alpha
_args=(Q,rho,fm,K,alpha,tmin,T,integrand,args)
return quad(kernel2,betalim[0],betalim[1],args=_args)[0]/Z
if __name__ == '__main__':
import matplotlib.pyplot as plt
alpha_list = [0.5,1.,1.5,2.]
rho_list = np.logspace(-3,0,100)
beta = 0.1
for alpha in alpha_list:
label=fr"$\alpha = {alpha}$"
kernel_list = [kernel(rho,alpha,beta,K=0.1,tmin=1,
integrand=gamma_integrand,
args=tuple())
for rho in rho_list]
plt.loglog(rho_list,kernel_list, '-',label=label)
plt.loglog(rho_list,rho_list**alpha, '--',label=label)
plt.legend()
plt.xlabel(r"$\rho$")
plt.ylabel(r"$\theta_m(\rho)$")
plt.show()
|
[
"matplotlib.pyplot.loglog",
"matplotlib.pyplot.show",
"numpy.sum",
"scipy.integrate.quad",
"numpy.logspace",
"matplotlib.pyplot.legend",
"scipy.special.gammainc",
"numpy.exp",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"scipy.special.gamma"
] |
[((2512, 2535), 'numpy.logspace', 'np.logspace', (['(-3)', '(0)', '(100)'], {}), '(-3, 0, 100)\n', (2523, 2535), True, 'import numpy as np\n'), ((2945, 2957), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2955, 2957), True, 'import matplotlib.pyplot as plt\n'), ((2962, 2983), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\rho$"""'], {}), "('$\\\\rho$')\n", (2972, 2983), True, 'import matplotlib.pyplot as plt\n'), ((2988, 3020), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\theta_m(\\\\rho)$"""'], {}), "('$\\\\theta_m(\\\\rho)$')\n", (2998, 3020), True, 'import matplotlib.pyplot as plt\n'), ((3024, 3034), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3032, 3034), True, 'import matplotlib.pyplot as plt\n'), ((458, 476), 'numpy.exp', 'np.exp', (['(-K / scale)'], {}), '(-K / scale)\n', (464, 476), True, 'import numpy as np\n'), ((539, 561), 'numpy.exp', 'np.exp', (['(-kappa / scale)'], {}), '(-kappa / scale)\n', (545, 561), True, 'import numpy as np\n'), ((663, 683), 'scipy.special.gamma', 'gamma', (['(1 + 1 / shape)'], {}), '(1 + 1 / shape)\n', (668, 683), False, 'from scipy.special import gamma\n'), ((712, 741), 'numpy.exp', 'np.exp', (['(-(K / scale) ** shape)'], {}), '(-(K / scale) ** shape)\n', (718, 741), True, 'import numpy as np\n'), ((845, 878), 'numpy.exp', 'np.exp', (['(-(kappa / scale) ** shape)'], {}), '(-(kappa / scale) ** shape)\n', (851, 878), True, 'import numpy as np\n'), ((972, 992), 'scipy.special.gamma', 'gamma', (['(1 - 1 / shape)'], {}), '(1 - 1 / shape)\n', (977, 992), False, 'from scipy.special import gamma\n'), ((1141, 1175), 'numpy.exp', 'np.exp', (['(-(kappa / scale) ** -shape)'], {}), '(-(kappa / scale) ** -shape)\n', (1147, 1175), True, 'import numpy as np\n'), ((2211, 2221), 'numpy.exp', 'np.exp', (['(-b)'], {}), '(-b)\n', (2217, 2221), True, 'import numpy as np\n'), ((2827, 2878), 'matplotlib.pyplot.loglog', 'plt.loglog', (['rho_list', 'kernel_list', '"""-"""'], {'label': 'label'}), "(rho_list, kernel_list, '-', label=label)\n", (2837, 2878), True, 'import matplotlib.pyplot as plt\n'), ((2885, 2943), 'matplotlib.pyplot.loglog', 'plt.loglog', (['rho_list', '(rho_list ** alpha)', '"""--"""'], {'label': 'label'}), "(rho_list, rho_list ** alpha, '--', label=label)\n", (2895, 2943), True, 'import matplotlib.pyplot as plt\n'), ((1003, 1033), 'numpy.exp', 'np.exp', (['(-(K / scale) ** -shape)'], {}), '(-(K / scale) ** -shape)\n', (1009, 1033), True, 'import numpy as np\n'), ((1423, 1465), 'scipy.special.gammainc', 'gammainc', (['(param ** z)', '(K / param ** (1 - z))'], {}), '(param ** z, K / param ** (1 - z))\n', (1431, 1465), False, 'from scipy.special import gammainc\n'), ((1700, 1736), 'scipy.integrate.quad', 'quad', (['integrand', 'tmin', 'T'], {'args': '_args'}), '(integrand, tmin, T, args=_args)\n', (1704, 1736), False, 'from scipy.integrate import quad, dblquad\n'), ((2010, 2046), 'scipy.integrate.quad', 'quad', (['integrand', 'tmin', 'T'], {'args': '_args'}), '(integrand, tmin, T, args=_args)\n', (2014, 2046), False, 'from scipy.integrate import quad, dblquad\n'), ((2348, 2397), 'scipy.integrate.quad', 'quad', (['kernel2', 'betalim[0]', 'betalim[1]'], {'args': '_args'}), '(kernel2, betalim[0], betalim[1], args=_args)\n', (2352, 2397), False, 'from scipy.integrate import quad, dblquad\n'), ((258, 270), 'scipy.special.gamma', 'gamma', (['(k + 1)'], {}), '(k + 1)\n', (263, 270), False, 'from scipy.special import gamma\n'), ((303, 333), 'numpy.exp', 'np.exp', (['(-tau * beta * rho * fm)'], {}), '(-tau * beta * rho * fm)\n', (309, 333), True, 'import numpy as np\n'), ((328, 337), 'numpy.sum', 'np.sum', (['L'], {}), '(L)\n', (334, 337), True, 'import numpy as np\n')]
|
import cherrypy
import GameZero
import os
import resources.infoprovider as infopro
from GameZero import db
from GameZero import search
from GameZero import tasks
from GameZero import update
from GameZero import functions
class site(object):
@cherrypy.expose
def index(self):
return self.LoadTheme("Index")
@cherrypy.expose
def Games(self, Wanted=""):
return self.LoadTheme("Games<br>" + db.wantedLIST(GameZero.DATABASE_PATH, Wanted))
@cherrypy.expose
def Platforms(self):
content = "<br /><table border='1'><thead><tr><th style='text-align:center; font-size: 10px'>Console</th><th style='text-align:center; font-size: 10px'>System</th><th style='text-align:center; font-size: 10px'>Year</th><th style='text-align:center; font-size: 10px'>ROM Extension(s)</th><th style='text-align:center; font-size: 10px'>BIOS</th></tr></thead>"
table_content = db.getinfo(GameZero.DATABASE_PATH, "Systems")
table_data = "<tbody>"
for row in table_content:
table_data = table_data + "<tr>"
table_data = table_data + "<td style='text-align:center;'>" + str(row[1]) + "</td>"
table_data = table_data + "<td style='text-align:center; font-size: 11px'>" + str(row[2]) + "</td>"
table_data = table_data + "<td style='text-align:center; font-size: 10px'>" + str(row[3]) + "</td>"
table_data = table_data + "<td style='text-align:center; font-size: 10px'>" + str(row[4].replace(" .","<br/>.")) + "</td>"
table_data = table_data + "<td style='text-align:center; font-size: 10px'>" + str(row[5]) + "</td>"
table_data = table_data + "</tr>"
table_data = table_data + "</tbody></table>"
content = content + table_data
return self.LoadTheme(content)
@cherrypy.expose
def Stats(self):
return self.LoadTheme("STATS")
@cherrypy.expose
def search(self, keyword):
data = ""
gamesdata = infopro.thegamesdb.getgamelist(keyword)
for dat in gamesdata:
#data = data + str(dat[0]) + "<br>"
db.insertSearchHistory(GameZero.DATABASE_PATH, keyword, dat[0])
gamedata = infopro.thegamesdb.getgame(dat[0])
for gid, gtitle, PlatformId, Platform, ReleaseDate, Overview, Coop, boxart, YouTube, Publisher, Developer, Rating in gamedata:
button = functions.createbutton([gid,gtitle,PlatformId,Platform])
data = data + """<table style="border: 1; width:100%; border-spacing: 2; table-layout: fixed">"""
data = data + """<tr>"""
data = data + """<td rowspan="3" valign="top" style="text-align: center; padding: 5px">"""
data = data + """<a href=http://legacy.thegamesdb.net/game/""" + gid + """/>
<img width='100' height='158' src='http://legacy.thegamesdb.net/banners/""" + boxart + """' alt='""" + gtitle + """' style='border: 1px solid #666;'></a><br/><br/>""" +button+""" </td>"""
data = data + """<td style="padding: 5px; font-size: 10px"><a href=http://legacy.thegamesdb.net/game/""" + gid + """/>""" + gtitle + """</a></td>"""
data = data + """<td style="padding: 5px; text-align: right; font-size: 10px">""" + ReleaseDate + """</td>"""
data = data + """</tr><tr>"""
data = data + """<td colspan="2" style="padding: 5px;font-size: 10px">Rating: """ + Rating + """ <br/> <br/>""" + Overview + """<br/><br/>Co-op: """ + Coop + """<br/>Publisher: """ + Publisher + """<br/>Developer: """ + Developer + """</td>"""
data = data + """</tr><tr>"""
data = data + """<td width="70%" style="padding: 5px; font-size: 10px"><a href=http://legacy.thegamesdb.net/platform/""" + Platform.replace(" ", "-") + """ />""" + Platform + """</a></td>"""
data = data + """<td style="padding: 5px; font-size: 10px; text-align: right">"""
if (YouTube=="N/A"):
data = data + """ Youtube: """ + YouTube
else:
data = data + """ <a href=""" + YouTube + """> Youtube </a>"""
data = data + """</td></tr></table><br/><br/>"""
data = data + """ <br/><br/> """
return self.LoadTheme(data)
@cherrypy.expose
def Settings(self):
if(GameZero.BROWSER == "1"):
launchBrowser = "checked"
else:
launchBrowser = ""
themebox = "<select>"
for item in os.listdir(os.path.join(GameZero.PROG_DIR, GameZero.MY_NAME,"resources","interface")):
if (GameZero.THEME == item):
themebox = themebox + "<option selected value=""" + item + """>""" + item + "</option>"
else:
themebox = themebox + "<option value=""" + item + """>""" + item + "</option>"
themebox = themebox + "</select>"
content = ""
content = content + """
<br />
<div align="right">
<button id="saveButton">Save</button>
</div>
<ul class="idTabs">
<li><a href="#GamezServer">Gamez Server</a></li>
<li><a href="#Downloaders">Downloaders</a></li>
<li><a href="#Searchers">Searchers</a></li>
<li><a href="#PostProcess">Post Process</a></li>
</ul>
<div class="tab-container">
<div id="GamezServer">
<fieldset align="left">
<legend>General</legend>
<div>
Current Version: """ + str(GameZero.VERSION) + """
</div>
<br />
<div>
Host / Port<br />
<input type="input" size="45" id="host" value='""" + str(GameZero.HOST) + """' />
<input type="input" size="5" id="port" value='""" + str(GameZero.SERVERPORT) + """' />
</div>
<br />
<div>
<input type="checkbox" name="launchBrowser" id="launchBrowser" """ + launchBrowser + """ />
Launch browser on startup
</div>
</fieldset>
<br />
<fieldset align="left">
<legend>Theme</legend>
<div>
Default Theme """
content = content + themebox
content = content + """
</div>
</fieldset>
<br />
<fieldset align="left">
<legend>Login</legend>
<div>
<label for="host">Username</label><br />
<input type="input" size="50" id="username" value='""" + str(GameZero.USERNAME) + """' />
</div>
<div>
<label for="host">Password</label><br />
<input type="input" size="50" id="password" value='""" + str(GameZero.PASSWORD) + """' />
</div>
</fieldset>
<br />
<fieldset align="left">
<legend>Recommendations</legend> <div>"""
content = content + """\n <table style="width:100%;" border=1>\n"""
sys = db.getinfo(GameZero.DATABASE_PATH, "Systems")
c = str(GameZero.RECOMENDATIONS).split(";")
count = 0
rcnt = 0
for row in sys:
if count == 0:
content = content + """<tr>\n"""
rcnt = rcnt + 1
chkid = str(count) + str(rcnt)
chk = ""
for tmp in c:
if tmp == chkid:
chk = " checked=checked "
content = content + """<td style='font-size: 10px'><input id=" """ + str(chkid) + """ " type="checkbox" """ + chk + """ /><label for="host">""" + str(row[2]).strip() + """<td/>\n"""
count = count + 1
if count == 3:
content = content + """</tr>\n"""
count = 0
content = content + """</table>\n</div>
</fieldset>
<br />
<fieldset align="left">
<legend>Updates</legend>
<div>
<label for="host">Update URL</label><br />
<input type="input" size="50" id="updateurl" value='""" + str(GameZero.UPDATEURL) + """' />
</div>
<br />
<div align="right">
<a href="Update">Run update NOW!</a>
</div>
</fieldset>
</div>
<div id="Downloaders">Downloaders</div>
<div id="Searchers">
<fieldset align="left">
<legend>General</legend>
<br />
<div>
Retropie System URL <br />
<input type="input" size="50" id="host" value='""" + str(GameZero.RPSYSURL) + """' />
</div>
<br />
<div>
THEGAMEDB api [GetGamesList] URL<br />
<input type="input" size="50" id="host" value='""" + str(GameZero.APIGDBGGL) + """' />
</div>
<br />
<div>
THEGAMEDB api [GetGame] URL<br />
<input type="input" size="50" id="host" value='""" + str(GameZero.APIGDBGG) + """' />
</div>
<br />
<div>
THEGAMEDB api [GetPlatformsList] URL<br />
<input type="input" size="50" id="host" value='""" + str(GameZero.APIGDBGPL) + """' />
</div>
</fieldset>
</div>
<div id="PostProcess">PostProcess</div>
</div>
<br />
<div align="right">
<button id="saveButton">Save</button>
</div>
"""
return self.LoadTheme(content)
@cherrypy.expose
def LoadTheme(self, content):
with open(os.path.join(GameZero.THEMEPATH, "tmpl","header.tpl"), 'r') as thefile:
header = thefile.read()
with open(os.path.join(GameZero.THEMEPATH, "tmpl","footer.tpl"), 'r') as thefile:
footer = thefile.read()
with open(os.path.join(GameZero.THEMEPATH, "tmpl","nav.tpl"), 'r') as thefile:
nav = thefile.read().replace("_WANTEDNAV", db.wantedLIST(GameZero.DATABASE_PATH))
with open(os.path.join(GameZero.THEMEPATH, "tmpl","searchbox.tpl"), 'r') as thefile:
searchbox = thefile.read()
try:
stuff = header + nav + searchbox + content + footer
except:
stuff = header + nav + searchbox + content.encode('ascii', 'ignore') + footer
return stuff
|
[
"GameZero.functions.createbutton",
"GameZero.db.insertSearchHistory",
"GameZero.db.getinfo",
"GameZero.db.wantedLIST",
"resources.infoprovider.thegamesdb.getgamelist",
"resources.infoprovider.thegamesdb.getgame",
"os.path.join"
] |
[((954, 999), 'GameZero.db.getinfo', 'db.getinfo', (['GameZero.DATABASE_PATH', '"""Systems"""'], {}), "(GameZero.DATABASE_PATH, 'Systems')\n", (964, 999), False, 'from GameZero import db\n'), ((2054, 2093), 'resources.infoprovider.thegamesdb.getgamelist', 'infopro.thegamesdb.getgamelist', (['keyword'], {}), '(keyword)\n', (2084, 2093), True, 'import resources.infoprovider as infopro\n'), ((7905, 7950), 'GameZero.db.getinfo', 'db.getinfo', (['GameZero.DATABASE_PATH', '"""Systems"""'], {}), "(GameZero.DATABASE_PATH, 'Systems')\n", (7915, 7950), False, 'from GameZero import db\n'), ((2197, 2260), 'GameZero.db.insertSearchHistory', 'db.insertSearchHistory', (['GameZero.DATABASE_PATH', 'keyword', 'dat[0]'], {}), '(GameZero.DATABASE_PATH, keyword, dat[0])\n', (2219, 2260), False, 'from GameZero import db\n'), ((2285, 2319), 'resources.infoprovider.thegamesdb.getgame', 'infopro.thegamesdb.getgame', (['dat[0]'], {}), '(dat[0])\n', (2311, 2319), True, 'import resources.infoprovider as infopro\n'), ((4734, 4809), 'os.path.join', 'os.path.join', (['GameZero.PROG_DIR', 'GameZero.MY_NAME', '"""resources"""', '"""interface"""'], {}), "(GameZero.PROG_DIR, GameZero.MY_NAME, 'resources', 'interface')\n", (4746, 4809), False, 'import os\n'), ((463, 508), 'GameZero.db.wantedLIST', 'db.wantedLIST', (['GameZero.DATABASE_PATH', 'Wanted'], {}), '(GameZero.DATABASE_PATH, Wanted)\n', (476, 508), False, 'from GameZero import db\n'), ((2504, 2563), 'GameZero.functions.createbutton', 'functions.createbutton', (['[gid, gtitle, PlatformId, Platform]'], {}), '([gid, gtitle, PlatformId, Platform])\n', (2526, 2563), False, 'from GameZero import functions\n'), ((11415, 11469), 'os.path.join', 'os.path.join', (['GameZero.THEMEPATH', '"""tmpl"""', '"""header.tpl"""'], {}), "(GameZero.THEMEPATH, 'tmpl', 'header.tpl')\n", (11427, 11469), False, 'import os\n'), ((11545, 11599), 'os.path.join', 'os.path.join', (['GameZero.THEMEPATH', '"""tmpl"""', '"""footer.tpl"""'], {}), "(GameZero.THEMEPATH, 'tmpl', 'footer.tpl')\n", (11557, 11599), False, 'import os\n'), ((11675, 11726), 'os.path.join', 'os.path.join', (['GameZero.THEMEPATH', '"""tmpl"""', '"""nav.tpl"""'], {}), "(GameZero.THEMEPATH, 'tmpl', 'nav.tpl')\n", (11687, 11726), False, 'import os\n'), ((11800, 11837), 'GameZero.db.wantedLIST', 'db.wantedLIST', (['GameZero.DATABASE_PATH'], {}), '(GameZero.DATABASE_PATH)\n', (11813, 11837), False, 'from GameZero import db\n'), ((11860, 11917), 'os.path.join', 'os.path.join', (['GameZero.THEMEPATH', '"""tmpl"""', '"""searchbox.tpl"""'], {}), "(GameZero.THEMEPATH, 'tmpl', 'searchbox.tpl')\n", (11872, 11917), False, 'import os\n')]
|
"""initial migration
Revision ID: d5f5ec8414a8
Revises:
Create Date: 2021-10-25 12:35:00.804329
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd5f5ec8414a8'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('appointments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('first', sa.String(), nullable=False),
sa.Column('last', sa.String(), nullable=False),
sa.Column('mobile', sa.String(length=50), nullable=False),
sa.Column('dr_first', sa.String(length=50), nullable=False),
sa.Column('dr_last', sa.String(length=50), nullable=False),
sa.Column('location', sa.String(length=255), nullable=False),
sa.Column('interval', sa.Integer(), nullable=False),
sa.Column('time', sa.DateTime(), nullable=False),
sa.Column('timezone', sa.String(length=50), nullable=False),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('appointments')
# ### end Alembic commands ###
|
[
"alembic.op.drop_table",
"sqlalchemy.DateTime",
"sqlalchemy.PrimaryKeyConstraint",
"sqlalchemy.String",
"sqlalchemy.Integer"
] |
[((1152, 1181), 'alembic.op.drop_table', 'op.drop_table', (['"""appointments"""'], {}), "('appointments')\n", (1165, 1181), False, 'from alembic import op\n'), ((992, 1021), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (1015, 1021), True, 'import sqlalchemy as sa\n'), ((418, 430), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (428, 430), True, 'import sqlalchemy as sa\n'), ((472, 483), 'sqlalchemy.String', 'sa.String', ([], {}), '()\n', (481, 483), True, 'import sqlalchemy as sa\n'), ((524, 535), 'sqlalchemy.String', 'sa.String', ([], {}), '()\n', (533, 535), True, 'import sqlalchemy as sa\n'), ((578, 598), 'sqlalchemy.String', 'sa.String', ([], {'length': '(50)'}), '(length=50)\n', (587, 598), True, 'import sqlalchemy as sa\n'), ((643, 663), 'sqlalchemy.String', 'sa.String', ([], {'length': '(50)'}), '(length=50)\n', (652, 663), True, 'import sqlalchemy as sa\n'), ((707, 727), 'sqlalchemy.String', 'sa.String', ([], {'length': '(50)'}), '(length=50)\n', (716, 727), True, 'import sqlalchemy as sa\n'), ((772, 793), 'sqlalchemy.String', 'sa.String', ([], {'length': '(255)'}), '(length=255)\n', (781, 793), True, 'import sqlalchemy as sa\n'), ((838, 850), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (848, 850), True, 'import sqlalchemy as sa\n'), ((891, 904), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (902, 904), True, 'import sqlalchemy as sa\n'), ((949, 969), 'sqlalchemy.String', 'sa.String', ([], {'length': '(50)'}), '(length=50)\n', (958, 969), True, 'import sqlalchemy as sa\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("HISTORY.rst") as history_file:
history = history_file.read()
requirements = [
"click>=7.0.0",
"fsspec>=0.7.0",
"xarray>=0.15.0",
"zarr>=2.3.0",
]
test_requirements = ["pytest"]
setup(
author="<NAME>",
author_email="<EMAIL>",
python_requires=">=3.6",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
description="Describe zarr stores from the command line.",
long_description=readme,
url="https://github.com/oliverwm1/zarrdump",
entry_points={
"console_scripts": [
"zarrdump=zarrdump.core:dump",
]
},
install_requires=requirements,
license="BSD 3-Clause license",
include_package_data=True,
keywords="zarr",
name="zarrdump",
packages=find_packages(),
test_suite="tests",
tests_require=test_requirements,
version="0.2.2",
)
|
[
"setuptools.find_packages"
] |
[((1241, 1256), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (1254, 1256), False, 'from setuptools import setup, find_packages\n')]
|
#!/usr/bin/env python3
import subprocess
import json
import sys
import os
script_dir = os.path.dirname(os.path.realpath(os.path.abspath(__file__)))
is_cxx = "++" in sys.argv[0]
def cc_exec(args):
if os.getenv("GCLANG_PATH"):
cc_name = os.environ["GCLANG_PATH"]
else:
cc_name = "gclang"
if is_cxx:
if os.getenv("GCLANGXX_PATH"):
cc_name = os.environ["GCLANGXX_PATH"]
else:
cc_name = "gclang++"
argv = [cc_name] + args
#print(" ".join(argv))
return subprocess.run(argv)
def get_bc(filename):
if os.getenv("GETBC_PATH"):
cc_name = os.environ["GETBC_PATH"]
else:
cc_name = "get-bc"
argv = ['get-bc', '-b', '-o', filename + '.bc', filename]
#print(" ".join(argv))
return subprocess.run(argv)
def common_opts():
return [
"-g",
#"-fno-inline",
#"-fno-unroll-loops",
#"-O0",
#"-fno-discard-value-names",
]
def cc_mode():
args = common_opts()
args += sys.argv[1:]
return cc_exec(args)
def ld_mode():
args = common_opts()
outname = 'a.out'
old_args = sys.argv[1:]
i = 0
while i < len(old_args):
if old_args[i] == '-o':
outname = old_args[i +1]
args += [outname + '.bc', '-o', outname]
i += 1
elif not old_args[i].endswith(('.c', '.cc', '.cpp', '.h', '.hpp', '.o', '.obj', '.a', '.la')):
args.append(old_args[i])
i += 1
with open(outname + '.link_bc.json', 'w') as j:
json.dump({'original': old_args, 'stripped': args, 'name': outname}, j)
return cc_exec(old_args)
def is_ld_mode():
return not ("--version" in sys.argv or "--target-help" in sys.argv or
"-c" in sys.argv or "-E" in sys.argv or "-S" in sys.argv or
"-shared" in sys.argv)
if len(sys.argv) <= 1:
cc_exec([])
elif is_ld_mode():
ld_mode()
else:
cc_mode()
|
[
"json.dump",
"subprocess.run",
"os.path.abspath",
"os.getenv"
] |
[((207, 231), 'os.getenv', 'os.getenv', (['"""GCLANG_PATH"""'], {}), "('GCLANG_PATH')\n", (216, 231), False, 'import os\n'), ((531, 551), 'subprocess.run', 'subprocess.run', (['argv'], {}), '(argv)\n', (545, 551), False, 'import subprocess\n'), ((582, 605), 'os.getenv', 'os.getenv', (['"""GETBC_PATH"""'], {}), "('GETBC_PATH')\n", (591, 605), False, 'import os\n'), ((787, 807), 'subprocess.run', 'subprocess.run', (['argv'], {}), '(argv)\n', (801, 807), False, 'import subprocess\n'), ((122, 147), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (137, 147), False, 'import os\n'), ((340, 366), 'os.getenv', 'os.getenv', (['"""GCLANGXX_PATH"""'], {}), "('GCLANGXX_PATH')\n", (349, 366), False, 'import os\n'), ((1551, 1622), 'json.dump', 'json.dump', (["{'original': old_args, 'stripped': args, 'name': outname}", 'j'], {}), "({'original': old_args, 'stripped': args, 'name': outname}, j)\n", (1560, 1622), False, 'import json\n')]
|
# -*- coding: utf-8 -*-
# Python 3
# Copia arquivo se a data de modificacao for mais nova ou não existir outro arquivo no lugar.
import os
import sys
import shutil
class File(object):
def __init__(self, path):
self.path = os.path.join(*os.path.splitdrive(path))
self.mtime = os.stat(path).st_mtime
try:
fileNew = File('//SERVIDOR/ftp/Leonardo/Arquivo.xml')
dest = File('C:/Users/leonardo/Desktop')
except: # Nao tem arquivo novo para copiar
sys.exit(0)
try: # Compara data dos arquivos
fileOld = File('C:/Users/leonardo/Desktop/Arquivo.xml')
if fileNew.mtime > fileOld.mtime:
shutil.copy2(fileNew.path, dest.path)
except: # Nao tem arquivo antigo para comparar data, copia direto
shutil.copy2(fileNew.path, dest.path)
|
[
"os.stat",
"shutil.copy2",
"os.path.splitdrive",
"sys.exit"
] |
[((478, 489), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (486, 489), False, 'import sys\n'), ((631, 668), 'shutil.copy2', 'shutil.copy2', (['fileNew.path', 'dest.path'], {}), '(fileNew.path, dest.path)\n', (643, 668), False, 'import shutil\n'), ((740, 777), 'shutil.copy2', 'shutil.copy2', (['fileNew.path', 'dest.path'], {}), '(fileNew.path, dest.path)\n', (752, 777), False, 'import shutil\n'), ((298, 311), 'os.stat', 'os.stat', (['path'], {}), '(path)\n', (305, 311), False, 'import os\n'), ((251, 275), 'os.path.splitdrive', 'os.path.splitdrive', (['path'], {}), '(path)\n', (269, 275), False, 'import os\n')]
|
from os.path import join
from mayavi import mlab
input_file = join('/home/phil/Data', 'pcd_examples.pcd')
with open(input_file, 'r') as f:
data = f.read()
data1 = data.split('\n')
data = data1[11:] # skip the header
x = []
y = []
z = []
for i in data[:-1]:
temp = i.split(' ')
x.append(float(temp[0]))
y.append(float(temp[1]))
z.append(float(temp[2]))
mlab.points3d(x, y, z, mode='point') # 'points' render mode significantly faster than 'spheres'
mlab.show()
|
[
"mayavi.mlab.show",
"os.path.join",
"mayavi.mlab.points3d"
] |
[((65, 108), 'os.path.join', 'join', (['"""/home/phil/Data"""', '"""pcd_examples.pcd"""'], {}), "('/home/phil/Data', 'pcd_examples.pcd')\n", (69, 108), False, 'from os.path import join\n'), ((382, 418), 'mayavi.mlab.points3d', 'mlab.points3d', (['x', 'y', 'z'], {'mode': '"""point"""'}), "(x, y, z, mode='point')\n", (395, 418), False, 'from mayavi import mlab\n'), ((479, 490), 'mayavi.mlab.show', 'mlab.show', ([], {}), '()\n', (488, 490), False, 'from mayavi import mlab\n')]
|
import base64
import telnetlib
import math
from enum import Enum
import re
import time
class RadioMicrohardpDDL1800:
#---------------------------------------------------------------------------
# Public types
#---------------------------------------------------------------------------
class ChannelBandwidth(Enum):
CHANNEL_BANDWIDTH_8_MHZ = 0
CHANNEL_BANDWIDTH_4_MHZ = 1
CHANNEL_BANDWIDTH_2_MHZ = 2
CHANNEL_BANDWIDTH_1_MHZ = 3
#---------------------------------------------------------------------------
# Public constructors
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
def __init__(self, ipAddress, username, password):
self.ipAddress = ipAddress
self.port = 23
self.username = username
self.password = password
self.telnetClient = telnetlib.Telnet()
#---------------------------------------------------------------------------
# Public methods
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
def openTelnet(self):
self.telnetClient.open(self.ipAddress, self.port)
self.telnetClient.read_until("login: ")
self.telnetClient.write(self.username + "\r")
self.telnetClient.read_until("Password: ")
self.telnetClient.write(self.password + "\r")
self.telnetClient.read_until(">")
#---------------------------------------------------------------------------
def closeTelnet(self):
self.telnetClient.write("ATO\r")
self.telnetClient.close()
#---------------------------------------------------------------------------
def rebootModem(self):
self.telnetClient.write("AT+MSREB\r")
response = self.telnetClient.read_until("OK\r")
time.sleep(60)
self.telnetClient.close()
self.openTelnet()
#---------------------------------------------------------------------------
def enableConfigurationChanges(self):
self.telnetClient.write("AT&W\r")
response = self.telnetClient.read_until(">")
#---------------------------------------------------------------------------
def getRadioTxPowerDbm(self):
self.telnetClient.write("AT+MWTXPOWER\r")
response = self.telnetClient.read_until(">")
txPowerDbm = int(re.findall(".* (\d+) dbm.*", response)[0])
return txPowerDbm
#---------------------------------------------------------------------------
def setRadioTxPowerDbm(self, txPowerDbm, enableChange=True):
if txPowerDbm < 7:
txPowerDbm = 7
elif txPowerDbm > 30:
txPowerDbm = 30
self.telnetClient.write("AT+MWTXPOWER={}\r".format(txPowerDbm))
response = self.telnetClient.read_until(">")
if enableChange:
self.enableConfigurationChanges()
#---------------------------------------------------------------------------
def getRadioTxPowerW(self):
txPowerDbm = self.getRadioTxPowerDbm()
txPowerW = pow(10, (txPowerDbm - 30) / 10)
return txPowerW
#---------------------------------------------------------------------------
def setRadioTxPowerW(self, txPowerW, enableChange=True):
txPowerDbm = 10 * log(txPowerW) + 30
self.setRadioTxPowerDbm(txPowerDbm)
if enableChange:
self.enableConfigurationChanges()
#---------------------------------------------------------------------------
def getRadioChannelFrequencyMhz(self):
self.telnetClient.write("AT+MWFREQ\r")
response = self.telnetClient.read_until(">")
channelFrequencyMhz = int(re.findall(".* (\d+) MHz.*", response)[0])
return channelFrequencyMhz
#---------------------------------------------------------------------------
def setRadioChannelFrequencyMhz(self, channelFrequencyMhz, enableChange=True):
if channelFrequencyMhz < 1814:
channelFrequencyMhz = 1814
elif channelFrequencyMhz > 1866:
channelFrequencyMhz = 1866
channelFrequencyMhz = channelFrequencyMhz - 1810
self.telnetClient.write("AT+MWFREQ1800={}\r".format(channelFrequencyMhz))
response = self.telnetClient.read_until(">")
if enableChange:
self.enableConfigurationChanges()
#---------------------------------------------------------------------------
def getRadioChannelBandwidth(self):
self.telnetClient.write("AT+MWFBAND\r")
response = self.telnetClient.read_until(">")
channelBandwidth = ChannelBandwidth(re.findall(".* (\d+) - .*MHz.*", response)[0])
return channelBandwidth
#---------------------------------------------------------------------------
def setRadioChannelBandwidth(self, channelBandwidth, enableChange=True):
self.telnetClient.write("AT+MWFBAND={}\r".format(channelBandwidth))
response = self.telnetClient.read_until(">")
if enableChange:
self.enableConfigurationChanges()
#---------------------------------------------------------------------------
def getRadioChannelInterferenceTable(self, subBands=2):
interferenceTable = []
for i in range(subBands):
self.telnetClient.write("AT+MWINTFSCAN={}\r".format(i))
response = self.telnetClient.read_until(">", 20)
channels = re.findall("[\n|\r](\d+)", response)
for channel in channels:
interferenceTable.append(int(channel))
interferenceTable.sort()
return interferenceTable
#---------------------------------------------------------------------------
def getRadioNetworkId(self):
self.telnetClient.write("AT+MWNETWORKID\r")
response = self.telnetClient.read_until(">")
networkId = int(re.findall(".* ID: (\w+)", response)[0])
return networkId
#---------------------------------------------------------------------------
def setRadioNetworkId(self, networkId, enableChange=True):
self.telnetClient.write("AT+MWFNETWORKID={}\r".format(networkId))
response = self.telnetClient.read_until(">")
if enableChange:
self.enableConfigurationChanges()
#---------------------------------------------------------------------------
def getRadioEncryptionKey(self):
self.telnetClient.write("AT+MWVENCRYPT\r")
response = self.telnetClient.read_until(">")
encryptionKey = int(re.findall(".* Password: (\w+)", response)[0])
return encryptionKey
#---------------------------------------------------------------------------
def setRadioEncryptionKey(self, encryptionKey, enableChange=True):
self.telnetClient.write("AT+MWVENCRYPT={}\r".format(encryptionKey))
response = self.telnetClient.read_until(">")
if enableChange:
self.enableConfigurationChanges()
#---------------------------------------------------------------------------
def isRadioConnectedToNetwork(self):
self.telnetClient.write("AT+MWSTATUS\r")
response = self.telnetClient.read_until(">")
isConnected = "Connection Info" in response
return isConnected
|
[
"re.findall",
"telnetlib.Telnet",
"time.sleep"
] |
[((969, 987), 'telnetlib.Telnet', 'telnetlib.Telnet', ([], {}), '()\n', (985, 987), False, 'import telnetlib\n'), ((2004, 2018), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (2014, 2018), False, 'import time\n'), ((5637, 5674), 're.findall', 're.findall', (["'[\\n|\\r](\\\\d+)'", 'response'], {}), "('[\\n|\\r](\\\\d+)', response)\n", (5647, 5674), False, 'import re\n'), ((2546, 2585), 're.findall', 're.findall', (['""".* (\\\\d+) dbm.*"""', 'response'], {}), "('.* (\\\\d+) dbm.*', response)\n", (2556, 2585), False, 'import re\n'), ((3890, 3929), 're.findall', 're.findall', (['""".* (\\\\d+) MHz.*"""', 'response'], {}), "('.* (\\\\d+) MHz.*', response)\n", (3900, 3929), False, 'import re\n'), ((4832, 4875), 're.findall', 're.findall', (['""".* (\\\\d+) - .*MHz.*"""', 'response'], {}), "('.* (\\\\d+) - .*MHz.*', response)\n", (4842, 4875), False, 'import re\n'), ((6082, 6119), 're.findall', 're.findall', (['""".* ID: (\\\\w+)"""', 'response'], {}), "('.* ID: (\\\\w+)', response)\n", (6092, 6119), False, 'import re\n'), ((6748, 6791), 're.findall', 're.findall', (['""".* Password: (\\\\w+)"""', 'response'], {}), "('.* Password: (\\\\w+)', response)\n", (6758, 6791), False, 'import re\n')]
|
print(' \033[36;40mExercício Python #056 - Analisador completo\033[m')
print('')
# import
from time import sleep
# Grandezas
maior = 0
nameup = ''
m = 0
f = 0
n = 0
# Repetição
for c in range(1, 5):
print('-' * 5, end='{}ª Pessoa'.format(c))
print('-' * 5)
nome = str(input('Nome: '))
idade = int(input('Idade: ').strip())
sexo = str(input('Sexo[M/F]: ').strip().upper())
# Total idade
n += idade
# Maior idade
if idade > maior:
maior = idade
nameup = nome
# Sexo total
if sexo == 'M':
if idade < 20:
m += 1
if sexo == 'F':
if idade < 20:
f += 1
print('')
print('---' * 20)
sleep(1)
print('Processando...')
sleep(1)
# Calculo da média
media = n / 4
# média do grupo
print(' A média de idade do grupo é de {} anos'.format(media))
print('_+_' * 20)
sleep(1)
# Mais velho
print(' A pessoa mais velha possui {} anos e se chama {}'.format(maior, nameup))
print('_+_' * 20)
sleep(1)
# T = masculino com menos de 20 anos
if m == 1:
print(' Ao todo tem {} pessoa do sexo masculino com menos de 20 anos! '.format(m))
#
print('_+_' * 20)
#
sleep(1)
elif m > 1:
print(' Ao todo são {} pessoas do sexo masculino com menos de 20 anos!'.format(m))
#
print('_+_' * 20)
sleep(1)
#
# T = feminino com menos de 20 anos
if m == 1:
print(' Temos {} pessoa do sexo feminino com menos de 20 anos'.format(f))
#
print('_+_' * 20)
sleep(1)
#
elif f > 1:
print(' São {} pessoas do sexo feminino com menos de 20 anos!'.format(f))
#
print('_+_' * 20)
sleep(1)
#
# Não há pessoas com menos de 20
if f > 20:
print(' Não temos pessoas com menos de 20 anos!')
#
print('_+_' * 20)
sleep(4)
#
print('')
print('-_- The End -_-')
|
[
"time.sleep"
] |
[((670, 678), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (675, 678), False, 'from time import sleep\n'), ((703, 711), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (708, 711), False, 'from time import sleep\n'), ((843, 851), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (848, 851), False, 'from time import sleep\n'), ((964, 972), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (969, 972), False, 'from time import sleep\n'), ((1146, 1154), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (1151, 1154), False, 'from time import sleep\n'), ((1462, 1470), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (1467, 1470), False, 'from time import sleep\n'), ((1744, 1752), 'time.sleep', 'sleep', (['(4)'], {}), '(4)\n', (1749, 1752), False, 'from time import sleep\n'), ((1286, 1294), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (1291, 1294), False, 'from time import sleep\n'), ((1599, 1607), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (1604, 1607), False, 'from time import sleep\n')]
|
# Generated by Django 3.2.9 on 2021-12-03 11:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('project_core', '0174_spiuser'),
]
operations = [
migrations.AlterField(
model_name='historicalproject',
name='closed_on',
field=models.DateTimeField(blank=True, help_text='When the project was closed', null=True),
),
migrations.AlterField(
model_name='project',
name='closed_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='project',
name='closed_on',
field=models.DateTimeField(blank=True, help_text='When the project was closed', null=True),
),
]
|
[
"django.db.models.ForeignKey",
"django.db.models.DateTimeField",
"django.db.migrations.swappable_dependency"
] |
[((227, 284), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (258, 284), False, 'from django.db import migrations, models\n'), ((477, 565), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'help_text': '"""When the project was closed"""', 'null': '(True)'}), "(blank=True, help_text='When the project was closed',\n null=True)\n", (497, 565), False, 'from django.db import migrations, models\n'), ((687, 806), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.PROTECT', 'to': 'settings.AUTH_USER_MODEL'}), '(blank=True, null=True, on_delete=django.db.models.\n deletion.PROTECT, to=settings.AUTH_USER_MODEL)\n', (704, 806), False, 'from django.db import migrations, models\n'), ((927, 1015), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'help_text': '"""When the project was closed"""', 'null': '(True)'}), "(blank=True, help_text='When the project was closed',\n null=True)\n", (947, 1015), False, 'from django.db import migrations, models\n')]
|
from django.views import generic
from django.urls import reverse_lazy
from .models import Textbook, Uhmarketplace, Courses
from django.utils import timezone
from .filters import TextbookFilter, CoursesFilter
from django.contrib.auth.models import User
from django.shortcuts import render, redirect
from django.urls.base import set_urlconf
#####################################
# Home Page #
#####################################
class IndexView(generic.ListView):
template_name = 'uhmarketplace/index.html'
model = Uhmarketplace
#####################################
# Underdeveloped pages #
#####################################
class DormView(generic.ListView):
template_name = 'uhmarketplace/dorm.html'
model = Uhmarketplace
class SuppliesView(generic.ListView):
template_name = 'uhmarketplace/supplies.html'
model = Uhmarketplace
#####################################
# Textbooks tab views #
#####################################
class CreateView(generic.edit.CreateView):
template_name = 'uhmarketplace/createtextbook.html'
model = Textbook
fields = ['book_title','book_author','course','content', 'created_by']
success_url = reverse_lazy('uhmarketplace:textbook') # more robust than hardcoding to /uhmarketplace/; directs user to index view after creating a Uhmarketplace
class UpdateView(generic.edit.UpdateView):
template_name = 'uhmarketplace/updatetextbook.html'
model = Textbook
fields = ['book_title','book_author','course','content']
success_url = reverse_lazy('uhmarketplace:textbook')
class DeleteView(generic.edit.DeleteView):
template_name = 'uhmarketplace/deletetextbook.html' # override default of uhmarketplace/uhmarketplace_confirm_delete.html
model = Textbook
success_url = reverse_lazy('uhmarketplace:textbook')
class TextbookView(generic.ListView):
template_name = 'uhmarketplace/textbook.html'
context_object_name = 'textbook_list'
def get_queryset(self):
"""Return the all uhmarketplace."""
return Textbook.objects.all()
class DecOrderDateView(generic.ListView): #decending order (newest to oldest)
template_name = 'uhmarketplace/decorderdate.html'
context_object_name = 'textbook_list'
def get_queryset(self):
"""Return all the textbooks."""
return Textbook.objects.order_by('-published_date')
class AscOrderDateView(generic.ListView): #ascending order (oldest to newest)
template_name = 'uhmarketplace/ascorderdate.html'
context_object_name = 'textbook_list'
def get_queryset(self):
"""Return all the textbooks."""
return Textbook.objects.order_by(('published_date'))
class SearchTextbookView(generic.ListView):
template_name = 'uhmarketplace/searchtextbook.html'
context_object_name = 'textbook_list'
def get_queryset(self):
"""Return all the textbooks."""
return Textbook.objects.all()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['filter'] = TextbookFilter(self.request.GET, queryset=self.get_queryset())
return context
class FilterCreatedByView(generic.ListView):
template_name = 'uhmarketplace/createdBy.html'
context_object_name = 'textbook_list'
def get_queryset(self):
"""Return all the textbooks."""
me = User.objects.get(username=self.request.user)
return Textbook.objects.filter(created_by=me)
#####################################
# Classes tab views #
#####################################
class CreateCourseView(generic.edit.CreateView):
template_name = 'uhmarketplace/createclasses.html'
model = Courses
fields = ['course_title', 'content', 'created_by']
success_url = reverse_lazy('uhmarketplace:classes') # more robust than hardcoding to /uhmarketplace/; directs user to index view after creating a Uhmarketplace
class UpdateCourseView(generic.edit.UpdateView):
template_name = 'uhmarketplace/updateclasses.html'
model = Courses
fields = ['course_title','content']
success_url = reverse_lazy('uhmarketplace:classes')
class DeleteCourseView(generic.edit.DeleteView):
template_name = 'uhmarketplace/deleteclasses.html' # override default of uhmarketplace/uhmarketplace_confirm_delete.html
model = Courses
success_url = reverse_lazy('uhmarketplace:classes')
class CoursesView(generic.ListView):
template_name = 'uhmarketplace/classes.html'
context_object_name = 'courses_list'
def get_queryset(self):
"""Return the all courses."""
return Courses.objects.all()
class DecOrderCourseDateView(generic.ListView): #decending order (newest to oldest)
template_name = 'uhmarketplace/decorderclassesdate.html'
context_object_name = 'courses_list'
def get_queryset(self):
"""Return all the Courses."""
return Courses.objects.order_by('-published_date')
class AscOrderCourseDateView(generic.ListView): #ascending order (oldest to newest)
template_name = 'uhmarketplace/ascorderclassesdate.html'
context_object_name = 'courses_list'
def get_queryset(self):
"""Return all the Courses."""
return Courses.objects.order_by(('published_date'))
class SearchCourseView(generic.ListView):
template_name = 'uhmarketplace/searchclasses.html'
context_object_name = 'courses_list'
def get_queryset(self):
"""Return all the Courses."""
return Courses.objects.all()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['filter'] = CoursesFilter(self.request.GET, queryset=self.get_queryset())
return context
class FilterCreatedByCoursesView(generic.ListView):
template_name = 'uhmarketplace/createdByClasses.html'
context_object_name = 'courses_list'
def get_queryset(self):
"""Return all the courses."""
me = User.objects.get(username=self.request.user)
return Courses.objects.filter(created_by=me)
|
[
"django.urls.reverse_lazy",
"django.contrib.auth.models.User.objects.get"
] |
[((1229, 1267), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""uhmarketplace:textbook"""'], {}), "('uhmarketplace:textbook')\n", (1241, 1267), False, 'from django.urls import reverse_lazy\n'), ((1576, 1614), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""uhmarketplace:textbook"""'], {}), "('uhmarketplace:textbook')\n", (1588, 1614), False, 'from django.urls import reverse_lazy\n'), ((1824, 1862), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""uhmarketplace:textbook"""'], {}), "('uhmarketplace:textbook')\n", (1836, 1862), False, 'from django.urls import reverse_lazy\n'), ((3812, 3849), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""uhmarketplace:classes"""'], {}), "('uhmarketplace:classes')\n", (3824, 3849), False, 'from django.urls import reverse_lazy\n'), ((4141, 4178), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""uhmarketplace:classes"""'], {}), "('uhmarketplace:classes')\n", (4153, 4178), False, 'from django.urls import reverse_lazy\n'), ((4392, 4429), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""uhmarketplace:classes"""'], {}), "('uhmarketplace:classes')\n", (4404, 4429), False, 'from django.urls import reverse_lazy\n'), ((3399, 3443), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'username': 'self.request.user'}), '(username=self.request.user)\n', (3415, 3443), False, 'from django.contrib.auth.models import User\n'), ((5977, 6021), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'username': 'self.request.user'}), '(username=self.request.user)\n', (5993, 6021), False, 'from django.contrib.auth.models import User\n')]
|
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.generics import ListAPIView
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework.viewsets import ModelViewSet
from .filters import ContactsFilter
from .models import Client, Contact, Employee
from .renderers import ContactRenderer
from .serializers import (ClientSerializer, ContactCSVSerializer,
ContactSerializer, EmployeeSerializer)
class EmployeeViewSet(ModelViewSet):
"""CRUD для сотрудников."""
queryset = Employee.objects.all()
serializer_class = EmployeeSerializer
@action(detail=True, methods=['post'])
def add_contact(self, request, pk):
"""Добавление связи между сотрудником и клиентом."""
employee = self.get_object()
serializer = ContactSerializer(
data=request.data, context={'employee': employee})
if serializer.is_valid():
serializer.save(employee=employee)
return Response(serializer.data)
else:
return Response(
serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ClientViewSet(ModelViewSet):
"""CRUD для клиентов."""
queryset = Client.objects.all()
serializer_class = ClientSerializer
class ContactListCSV(ListAPIView):
"""Выгрузка CSV со списком контактов, с фильтрацией по дате."""
queryset = Contact.objects.all()
serializer_class = ContactCSVSerializer
filter_backends = [DjangoFilterBackend]
filterset_fields = ('date', )
filterset_class = ContactsFilter
renderer_classes = (ContactRenderer, ) + tuple(
api_settings.DEFAULT_RENDERER_CLASSES)
|
[
"rest_framework.response.Response",
"rest_framework.decorators.action"
] |
[((742, 779), 'rest_framework.decorators.action', 'action', ([], {'detail': '(True)', 'methods': "['post']"}), "(detail=True, methods=['post'])\n", (748, 779), False, 'from rest_framework.decorators import action\n'), ((1122, 1147), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (1130, 1147), False, 'from rest_framework.response import Response\n'), ((1181, 1244), 'rest_framework.response.Response', 'Response', (['serializer.errors'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n', (1189, 1244), False, 'from rest_framework.response import Response\n')]
|
import sys
from datetime import datetime
def init_logging(path="process.log"):
logger = Logger(path=path)
sys.stdout = logger
return logger
class Logger(object):
"""Logger that writes all of stdout and stderr to the file passed in as `path`.
Known limitations:
- Doesn't log stderr in Jupyter Notebooks
- Writes stderr to stdout
"""
def __init__(self, path="process.log"):
self.stdout = sys.stdout
self.log = open(path, "w")
def write(self, message):
self.stdout.write(message)
if message.isspace():
self.log.write(message)
else:
self.log.write(f"[{str(datetime.now())}]: " + message)
self.log.flush()
def __getattr__(self, attr):
return getattr(self.stdout, attr)
|
[
"datetime.datetime.now"
] |
[((664, 678), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (676, 678), False, 'from datetime import datetime\n')]
|
import cv2
import numpy as np
import random
import argparse
import logging
import glog as log
import os
import sys
from stcgan.shadow6.nets import *
import stcgan.shadow6.module as module
import glob
import mxnet as mx
# import pydevd
# pydevd.settrace('172.17.122.65', port=10203, stdoutToServer=True, stderrToServer=True)
def get_args(arglist=None):
parser = argparse.ArgumentParser(
description='Shadow Removel Params')
parser.add_argument('-dbprefix', type=str, default='./ISTD_Dataset/train',
help='path of generated dataset prefix')
parser.add_argument('-valprefix', type=str, default='./',
help='path of generated dataset prefix')
parser.add_argument('-logfn', type=str,
default='deshadow_train', help='path to save log file')
parser.add_argument('-gpuid', type=int, default=0,
help='gpu id, -1 for cpu')
parser.add_argument('-lr', type=float, default=2e-3, help="learning rate")
return parser.parse_args() if arglist is None else parser.parse_args(arglist)
def ferr(label, pred):
pred = pred.ravel()
label = label.ravel()
return np.abs(label - (pred > 0.5)).sum() / label.shape[0]
if __name__ == '__main__':
args = get_args()
# environment setting
log_file_name = args.logfn + '.log'
log_file = open(log_file_name, 'w')
log_file.close()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fh = logging.FileHandler(log_file_name)
logger.addHandler(fh)
if args.gpuid >= 0:
context = mx.gpu(args.gpuid)
else:
context = mx.cpu()
if not os.path.exists(args.dbprefix):
logging.info(
"training data not exist, pls check if the file path is correct.")
sys.exit(0)
if not os.path.exists("./result"):
os.mkdir("./result")
if not os.path.exists("./val_result"):
os.mkdir("./val_result")
if not os.path.exists("./trained_params"):
os.mkdir("./trained_params")
mstr = 'train'
train_s_dir = os.path.join(args.dbprefix, '%s_A' % mstr) # with shadow
train_m_dir = os.path.join(args.dbprefix, '%s_B' % mstr) # shadow mask
train_g_dir = os.path.join(args.dbprefix, '%s_C' % mstr) # gt
val_s_dir = os.path.join(args.valprefix, 'test')
# val_m_dir = os.path.join(args.valprefix, 'test_B')
# val_g_dir = os.path.join(args.valprefix, 'test_C')
assert os.path.exists(train_s_dir), '%s_A not exist!' % mstr
assert os.path.exists(train_m_dir), '%s_B not exist!' % mstr
assert os.path.exists(train_g_dir), '%s_C not exist!' % mstr
filenms = os.listdir(train_s_dir)
filenms_test = os.listdir(val_s_dir)
# use rec file to load image.
index = list(range(len(filenms)))
index2 = list(range(len(filenms_test)))
lr = args.lr
beta1 = 0.5
batch_size = 16
# rand_shape = (batch_size, 100)
num_epoch = 1000
width = 256
height = 256
data_g1_shape = (batch_size, 3, width, height)
data_g2_shape = (batch_size, 4, width, height)
data_d1_shape = (batch_size, 4, width, height)
data_d2_shape = (batch_size, 7, width, height)
# initialize net
gmod = module.GANModule(
shadow_det_net_G1_v2(),
shadow_removal_net_G2_v2(),
shadow_det_net_D_v2(),
bce_loss_v2(),
l1_loss_v2(),
context=context,
data_g1_shape=data_g1_shape,
data_g2_shape=data_g2_shape,
data_d1_shape=data_d1_shape,
data_d2_shape=data_d2_shape,
hw=int(width / 32)
)
gmod.init_params(mx.init.Uniform(0.2))
gmod.init_optimizer(lr)
metric_acc1 = mx.metric.CustomMetric(ferr)
metric_acc2 = mx.metric.CustomMetric(ferr)
# load data
for epoch in range(num_epoch):
metric_acc1.reset()
metric_acc2.reset()
random.shuffle(index)
random.shuffle(index2)
data_s = np.zeros((batch_size, 3, width, height))
data_m = np.zeros((batch_size, 1, width, height))
data_g = np.zeros((batch_size, 3, width, height))
for i in range(len(index) // batch_size):
for j in range(batch_size):
data_s_tmp = cv2.resize(cv2.imread(os.path.join(
train_s_dir, filenms[index[i * batch_size + j]])) / 255.0, (width, height))
data_m_tmp = cv2.resize(cv2.imread(os.path.join(
train_m_dir, filenms[index[i * batch_size + j]]), cv2.IMREAD_GRAYSCALE) / 255.0, (width, height))
data_m_tmp[data_m_tmp > 0.5] = 1.0
data_m_tmp[data_m_tmp <= 0.5] = 0.0
data_g_tmp = cv2.resize(cv2.imread(os.path.join(
train_g_dir, filenms[index[i * batch_size + j]])) / 255, (width, height))
# random crop
random_x = random.randint(0, data_s_tmp.shape[1] - height)
random_y = random.randint(0, data_s_tmp.shape[0] - width)
data_s[j, :, :, :] = np.transpose(
data_s_tmp[random_y: random_y + width, random_x: random_x + height, :], (2, 0, 1))
data_m[j, 0, :, :] = data_m_tmp[random_y: random_y +
width, random_x: random_x + height]
data_g[j, :, :, :] = np.transpose(
data_g_tmp[random_y: random_y + width, random_x: random_x + height, :], (2, 0, 1))
gmod.update(mx.nd.array(data_s, ctx=context), mx.nd.array(
data_m, ctx=context), mx.nd.array(data_g, ctx=context))
gmod.temp_label[:] = 0.0
metric_acc1.update([gmod.temp_label], gmod.outputs_fake1)
metric_acc2.update([gmod.temp_label], gmod.outputs_fake2)
gmod.temp_label[:] = 1.0
metric_acc1.update([gmod.temp_label], gmod.outputs_real1)
metric_acc2.update([gmod.temp_label], gmod.outputs_real2)
# training results
log.info('epoch: %d, bce_loss is %.5f, adver_d1_loss is %.5f, l1_loss is %.5f, adver_d2_loss is %.5f'%(
epoch,gmod.loss[0, 0], gmod.loss[0, 1], gmod.loss[0, 2], gmod.loss[0, 3]))
if epoch % 500 == 0 or epoch == num_epoch - 1:
gmod.modG1.save_params('G1_epoch_{}.params'.format(epoch))
gmod.modG2.save_params('G2_epoch_{}.params'.format(epoch))
gmod.modD1.save_params('D1_epoch_{}.params'.format(epoch))
gmod.modD2.save_params('D2_epoch_{}.params'.format(epoch))
img_dir = glob.glob("test/*")
img_name = []
for i in img_dir:
value = i[i.find("test/") + 5:]
# print(value)
# img_name.append(value)
# dir_length = len(img_dir)
# for i in range(dir_length):
# img=cv2.imread(os.path.join(val_s_dir, i[i.find("test/") + 5:]))
img_gt = cv2.imread(os.path.join(
val_s_dir, i[i.find("test/") + 5:]))
# w = cv2.imread(os.path.join(val_s_dir, i[i.find("test/") + 5:]))[1]
# h = cv2.imread(os.path.join(val_s_dir, i[i.find("test/") + 5:]))[0]
data_s_tmp = cv2.resize(cv2.imread(os.path.join(
val_s_dir, i[i.find("test/") + 5:])) / 255.0, (width, height))
# data_m_tmp = cv2.resize(cv2.imread(os.path.join(val_m_dir, filenms_test[index2[i]]),
# cv2.IMREAD_GRAYSCALE), (width, height))
# data_g_tmp = cv2.resize(cv2.imread(os.path.join(
# val_g_dir, filenms_test[index2[i]])), (width, height))
# random crop
random_x = random.randint(0, data_s_tmp.shape[1] - height)
random_y = random.randint(0, data_s_tmp.shape[0] - width)
data_s[0, :, :, :] = np.transpose(
data_s_tmp[random_y: random_y + width, random_x: random_x + height, :], (2, 0, 1))
# data_m[0, 0, :, :] = data_m_tmp[random_y: random_y +
# width, random_x: random_x + height]
# data_g[0, :, :, :] = np.transpose(
# data_g_tmp[random_y: random_y + width, random_x: random_x + height, :], (2, 0, 1))
gmod.forward(mx.nd.array(data_s, ctx=context))
# cv2.imwrite('./val_result/sin_{}_{}.jpg'.format(epoch, i),
# np.round((np.transpose(data_s[0, :, :, :], (1, 2, 0))) * 255))
# cv2.imwrite('./val_result/min_{}_{}.jpg'.format(epoch, i),
# data_m_tmp)
# cv2.imwrite('./val_result/gin_{}_{}.jpg'.format(epoch, i),
# data_g_tmp)
# cv2.imwrite('./SBU/shadow_free/'+img_name[i],#shadow free
# np.clip(np.round((np.transpose(gmod.temp_outG2.asnumpy()[0, :, :, :], (1, 2, 0)) + 1) / 2 * 255), 0, 255).astype(np.uint8))
# cv2.imwrite('./SBU/shadow_mask/'+img_name[i],
# np.round((np.transpose(gmod.temp_outG1.asnumpy()[0, :, :, :], (1, 2, 0)) + 1) / 2 * 255))
# shadow_remove
# shadow_mask
img = np.clip(np.round(np.transpose(gmod.temp_outG2.asnumpy()[
0, :, :, :], (1, 2, 0)) * 255), 0, 255).astype(np.uint8)
img = cv2.resize(img, (img_gt.shape[1], img_gt.shape[0]))
img2 = np.round((np.transpose(gmod.temp_outG1.asnumpy()[0, :, :, :], (1, 2, 0)) * 255).astype(np.uint8))
img2 = cv2.resize(img2, (img_gt.shape[1], img_gt.shape[0]))
cv2.imwrite('result/shadow_remove/' + value,
img)
cv2.imwrite('result/shadow_mask/' + value,
img2)
|
[
"os.mkdir",
"numpy.abs",
"argparse.ArgumentParser",
"random.shuffle",
"glob.glob",
"mxnet.metric.CustomMetric",
"os.path.join",
"logging.FileHandler",
"random.randint",
"glog.info",
"cv2.imwrite",
"os.path.exists",
"numpy.transpose",
"mxnet.gpu",
"cv2.resize",
"mxnet.cpu",
"mxnet.nd.array",
"os.listdir",
"sys.exit",
"mxnet.init.Uniform",
"numpy.zeros",
"logging.info",
"logging.getLogger"
] |
[((369, 429), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Shadow Removel Params"""'}), "(description='Shadow Removel Params')\n", (392, 429), False, 'import argparse\n'), ((1430, 1449), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1447, 1449), False, 'import logging\n'), ((1493, 1527), 'logging.FileHandler', 'logging.FileHandler', (['log_file_name'], {}), '(log_file_name)\n', (1512, 1527), False, 'import logging\n'), ((2082, 2124), 'os.path.join', 'os.path.join', (['args.dbprefix', "('%s_A' % mstr)"], {}), "(args.dbprefix, '%s_A' % mstr)\n", (2094, 2124), False, 'import os\n'), ((2158, 2200), 'os.path.join', 'os.path.join', (['args.dbprefix', "('%s_B' % mstr)"], {}), "(args.dbprefix, '%s_B' % mstr)\n", (2170, 2200), False, 'import os\n'), ((2234, 2276), 'os.path.join', 'os.path.join', (['args.dbprefix', "('%s_C' % mstr)"], {}), "(args.dbprefix, '%s_C' % mstr)\n", (2246, 2276), False, 'import os\n'), ((2299, 2335), 'os.path.join', 'os.path.join', (['args.valprefix', '"""test"""'], {}), "(args.valprefix, 'test')\n", (2311, 2335), False, 'import os\n'), ((2461, 2488), 'os.path.exists', 'os.path.exists', (['train_s_dir'], {}), '(train_s_dir)\n', (2475, 2488), False, 'import os\n'), ((2526, 2553), 'os.path.exists', 'os.path.exists', (['train_m_dir'], {}), '(train_m_dir)\n', (2540, 2553), False, 'import os\n'), ((2591, 2618), 'os.path.exists', 'os.path.exists', (['train_g_dir'], {}), '(train_g_dir)\n', (2605, 2618), False, 'import os\n'), ((2659, 2682), 'os.listdir', 'os.listdir', (['train_s_dir'], {}), '(train_s_dir)\n', (2669, 2682), False, 'import os\n'), ((2702, 2723), 'os.listdir', 'os.listdir', (['val_s_dir'], {}), '(val_s_dir)\n', (2712, 2723), False, 'import os\n'), ((3682, 3710), 'mxnet.metric.CustomMetric', 'mx.metric.CustomMetric', (['ferr'], {}), '(ferr)\n', (3704, 3710), True, 'import mxnet as mx\n'), ((3729, 3757), 'mxnet.metric.CustomMetric', 'mx.metric.CustomMetric', (['ferr'], {}), '(ferr)\n', (3751, 3757), True, 'import mxnet as mx\n'), ((1597, 1615), 'mxnet.gpu', 'mx.gpu', (['args.gpuid'], {}), '(args.gpuid)\n', (1603, 1615), True, 'import mxnet as mx\n'), ((1644, 1652), 'mxnet.cpu', 'mx.cpu', ([], {}), '()\n', (1650, 1652), True, 'import mxnet as mx\n'), ((1665, 1694), 'os.path.exists', 'os.path.exists', (['args.dbprefix'], {}), '(args.dbprefix)\n', (1679, 1694), False, 'import os\n'), ((1704, 1783), 'logging.info', 'logging.info', (['"""training data not exist, pls check if the file path is correct."""'], {}), "('training data not exist, pls check if the file path is correct.')\n", (1716, 1783), False, 'import logging\n'), ((1805, 1816), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1813, 1816), False, 'import sys\n'), ((1828, 1854), 'os.path.exists', 'os.path.exists', (['"""./result"""'], {}), "('./result')\n", (1842, 1854), False, 'import os\n'), ((1864, 1884), 'os.mkdir', 'os.mkdir', (['"""./result"""'], {}), "('./result')\n", (1872, 1884), False, 'import os\n'), ((1896, 1926), 'os.path.exists', 'os.path.exists', (['"""./val_result"""'], {}), "('./val_result')\n", (1910, 1926), False, 'import os\n'), ((1936, 1960), 'os.mkdir', 'os.mkdir', (['"""./val_result"""'], {}), "('./val_result')\n", (1944, 1960), False, 'import os\n'), ((1972, 2006), 'os.path.exists', 'os.path.exists', (['"""./trained_params"""'], {}), "('./trained_params')\n", (1986, 2006), False, 'import os\n'), ((2016, 2044), 'os.mkdir', 'os.mkdir', (['"""./trained_params"""'], {}), "('./trained_params')\n", (2024, 2044), False, 'import os\n'), ((3612, 3632), 'mxnet.init.Uniform', 'mx.init.Uniform', (['(0.2)'], {}), '(0.2)\n', (3627, 3632), True, 'import mxnet as mx\n'), ((3873, 3894), 'random.shuffle', 'random.shuffle', (['index'], {}), '(index)\n', (3887, 3894), False, 'import random\n'), ((3903, 3925), 'random.shuffle', 'random.shuffle', (['index2'], {}), '(index2)\n', (3917, 3925), False, 'import random\n'), ((3943, 3983), 'numpy.zeros', 'np.zeros', (['(batch_size, 3, width, height)'], {}), '((batch_size, 3, width, height))\n', (3951, 3983), True, 'import numpy as np\n'), ((4001, 4041), 'numpy.zeros', 'np.zeros', (['(batch_size, 1, width, height)'], {}), '((batch_size, 1, width, height))\n', (4009, 4041), True, 'import numpy as np\n'), ((4059, 4099), 'numpy.zeros', 'np.zeros', (['(batch_size, 3, width, height)'], {}), '((batch_size, 3, width, height))\n', (4067, 4099), True, 'import numpy as np\n'), ((6537, 6556), 'glob.glob', 'glob.glob', (['"""test/*"""'], {}), "('test/*')\n", (6546, 6556), False, 'import glob\n'), ((5978, 6173), 'glog.info', 'log.info', (["('epoch: %d, bce_loss is %.5f, adver_d1_loss is %.5f, l1_loss is %.5f, adver_d2_loss is %.5f'\n % (epoch, gmod.loss[0, 0], gmod.loss[0, 1], gmod.loss[0, 2], gmod.loss\n [0, 3]))"], {}), "(\n 'epoch: %d, bce_loss is %.5f, adver_d1_loss is %.5f, l1_loss is %.5f, adver_d2_loss is %.5f'\n % (epoch, gmod.loss[0, 0], gmod.loss[0, 1], gmod.loss[0, 2], gmod.loss\n [0, 3]))\n", (5986, 6173), True, 'import glog as log\n'), ((7683, 7730), 'random.randint', 'random.randint', (['(0)', '(data_s_tmp.shape[1] - height)'], {}), '(0, data_s_tmp.shape[1] - height)\n', (7697, 7730), False, 'import random\n'), ((7754, 7800), 'random.randint', 'random.randint', (['(0)', '(data_s_tmp.shape[0] - width)'], {}), '(0, data_s_tmp.shape[0] - width)\n', (7768, 7800), False, 'import random\n'), ((7834, 7931), 'numpy.transpose', 'np.transpose', (['data_s_tmp[random_y:random_y + width, random_x:random_x + height, :]', '(2, 0, 1)'], {}), '(data_s_tmp[random_y:random_y + width, random_x:random_x +\n height, :], (2, 0, 1))\n', (7846, 7931), True, 'import numpy as np\n'), ((9382, 9433), 'cv2.resize', 'cv2.resize', (['img', '(img_gt.shape[1], img_gt.shape[0])'], {}), '(img, (img_gt.shape[1], img_gt.shape[0]))\n', (9392, 9433), False, 'import cv2\n'), ((9572, 9624), 'cv2.resize', 'cv2.resize', (['img2', '(img_gt.shape[1], img_gt.shape[0])'], {}), '(img2, (img_gt.shape[1], img_gt.shape[0]))\n', (9582, 9624), False, 'import cv2\n'), ((9638, 9687), 'cv2.imwrite', 'cv2.imwrite', (["('result/shadow_remove/' + value)", 'img'], {}), "('result/shadow_remove/' + value, img)\n", (9649, 9687), False, 'import cv2\n'), ((9725, 9773), 'cv2.imwrite', 'cv2.imwrite', (["('result/shadow_mask/' + value)", 'img2'], {}), "('result/shadow_mask/' + value, img2)\n", (9736, 9773), False, 'import cv2\n'), ((1187, 1215), 'numpy.abs', 'np.abs', (['(label - (pred > 0.5))'], {}), '(label - (pred > 0.5))\n', (1193, 1215), True, 'import numpy as np\n'), ((4854, 4901), 'random.randint', 'random.randint', (['(0)', '(data_s_tmp.shape[1] - height)'], {}), '(0, data_s_tmp.shape[1] - height)\n', (4868, 4901), False, 'import random\n'), ((4929, 4975), 'random.randint', 'random.randint', (['(0)', '(data_s_tmp.shape[0] - width)'], {}), '(0, data_s_tmp.shape[0] - width)\n', (4943, 4975), False, 'import random\n'), ((5013, 5110), 'numpy.transpose', 'np.transpose', (['data_s_tmp[random_y:random_y + width, random_x:random_x + height, :]', '(2, 0, 1)'], {}), '(data_s_tmp[random_y:random_y + width, random_x:random_x +\n height, :], (2, 0, 1))\n', (5025, 5110), True, 'import numpy as np\n'), ((5320, 5417), 'numpy.transpose', 'np.transpose', (['data_g_tmp[random_y:random_y + width, random_x:random_x + height, :]', '(2, 0, 1)'], {}), '(data_g_tmp[random_y:random_y + width, random_x:random_x +\n height, :], (2, 0, 1))\n', (5332, 5417), True, 'import numpy as np\n'), ((5461, 5493), 'mxnet.nd.array', 'mx.nd.array', (['data_s'], {'ctx': 'context'}), '(data_s, ctx=context)\n', (5472, 5493), True, 'import mxnet as mx\n'), ((5495, 5527), 'mxnet.nd.array', 'mx.nd.array', (['data_m'], {'ctx': 'context'}), '(data_m, ctx=context)\n', (5506, 5527), True, 'import mxnet as mx\n'), ((5546, 5578), 'mxnet.nd.array', 'mx.nd.array', (['data_g'], {'ctx': 'context'}), '(data_g, ctx=context)\n', (5557, 5578), True, 'import mxnet as mx\n'), ((8287, 8319), 'mxnet.nd.array', 'mx.nd.array', (['data_s'], {'ctx': 'context'}), '(data_s, ctx=context)\n', (8298, 8319), True, 'import mxnet as mx\n'), ((4242, 4303), 'os.path.join', 'os.path.join', (['train_s_dir', 'filenms[index[i * batch_size + j]]'], {}), '(train_s_dir, filenms[index[i * batch_size + j]])\n', (4254, 4303), False, 'import os\n'), ((4403, 4464), 'os.path.join', 'os.path.join', (['train_m_dir', 'filenms[index[i * batch_size + j]]'], {}), '(train_m_dir, filenms[index[i * batch_size + j]])\n', (4415, 4464), False, 'import os\n'), ((4689, 4750), 'os.path.join', 'os.path.join', (['train_g_dir', 'filenms[index[i * batch_size + j]]'], {}), '(train_g_dir, filenms[index[i * batch_size + j]])\n', (4701, 4750), False, 'import os\n')]
|
from django.contrib import admin
from .models import Setting
class SettingAdmin(admin.ModelAdmin):
list_display = ('site_title', 'last_update')
def has_add_permission(self, request):
count = Setting.objects.all().count()
if count == 0:
return True
return False
admin.site.register(Setting, SettingAdmin)
|
[
"django.contrib.admin.site.register"
] |
[((306, 348), 'django.contrib.admin.site.register', 'admin.site.register', (['Setting', 'SettingAdmin'], {}), '(Setting, SettingAdmin)\n', (325, 348), False, 'from django.contrib import admin\n')]
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import unittest
if False:
from databp.core.dmo import BluepagesResultMapper
class CreateLanguageDictTest(unittest.TestCase):
tests = [
{
"original": {"OFFICE": "altERNatE"},
"expected": {'office': 'NA'}
},
{
"original": {"OFFICE": "mobile"},
"expected": {'office': 'MOBILE'}
},
{
"original": {"OFFICE": "home"},
"expected": {'office': 'MOBILE'}
}
]
def test_1(self):
for test in self.tests:
actual_result = BluepagesResultMapper(test["original"]).process()
self.assertEquals(actual_result, test["expected"])
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"databp.core.dmo.BluepagesResultMapper"
] |
[((837, 852), 'unittest.main', 'unittest.main', ([], {}), '()\n', (850, 852), False, 'import unittest\n'), ((687, 726), 'databp.core.dmo.BluepagesResultMapper', 'BluepagesResultMapper', (["test['original']"], {}), "(test['original'])\n", (708, 726), False, 'from databp.core.dmo import BluepagesResultMapper\n')]
|
import tensorflow as tf
import tensorflow.keras.backend as K
import unittest
import pytest
from aleatoric_log_loss import AleatoricLogLoss
from aleatoric_reg_loss import AleatoricRegLoss
class LogLossTests(unittest.TestCase):
@pytest.fixture(autouse=True)
def init_params(self):
self.params = {
"batch_size" : 16,
"width" : 32,
"height" : 32,
"n_classes" : 2,
"n_samples" : 20,
}
self.loss_fun = AleatoricLogLoss(self.params["n_samples"])
def create_rand_norm(self):
return K.random_normal((self.params["batch_size"],
self.params["width"],
self.params["height"],
self.params["n_classes"]))
def test1_loss_shape(self):
y_true = self.create_rand_norm()
y_pred = (y_true, y_true)
loss = self.loss_fun(y_true, y_pred)
assert loss.shape == self.params["batch_size"]
def test2_loss_comparison(self):
y_true = self.create_rand_norm()
y_pred = (y_true, y_true)
loss1 = K.sum(self.loss_fun(y_true, y_pred))
y_pred = (1-y_true, y_true)
loss2 = K.sum(self.loss_fun(y_true, y_pred))
assert loss1 < loss2
class RegLossTests(unittest.TestCase):
@pytest.fixture(autouse=True)
def init_params(self):
self.params = {
"batch_size" : 16,
"dims" : [10,2,1]
}
self.loss_fun = AleatoricRegLoss()
def create_rand_norm(self):
return K.random_normal((self.params["batch_size"],
*self.params["dims"]))
def test1_loss_shape(self):
y_true = self.create_rand_norm()
y_pred = (y_true, y_true)
loss = self.loss_fun(y_true, y_pred)
assert loss.shape == self.params["batch_size"]
def test2_loss_comparison(self):
y_true = self.create_rand_norm()
y_pred = (y_true, y_true)
loss1 = K.sum(self.loss_fun(y_true, y_pred))
y_pred = (1-y_true, y_true)
loss2 = K.sum(self.loss_fun(y_true, y_pred))
assert loss1 < loss2
|
[
"aleatoric_log_loss.AleatoricLogLoss",
"tensorflow.keras.backend.random_normal",
"pytest.fixture",
"aleatoric_reg_loss.AleatoricRegLoss"
] |
[((233, 261), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (247, 261), False, 'import pytest\n'), ((1331, 1359), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (1345, 1359), False, 'import pytest\n'), ((490, 532), 'aleatoric_log_loss.AleatoricLogLoss', 'AleatoricLogLoss', (["self.params['n_samples']"], {}), "(self.params['n_samples'])\n", (506, 532), False, 'from aleatoric_log_loss import AleatoricLogLoss\n'), ((581, 701), 'tensorflow.keras.backend.random_normal', 'K.random_normal', (["(self.params['batch_size'], self.params['width'], self.params['height'],\n self.params['n_classes'])"], {}), "((self.params['batch_size'], self.params['width'], self.\n params['height'], self.params['n_classes']))\n", (596, 701), True, 'import tensorflow.keras.backend as K\n'), ((1506, 1524), 'aleatoric_reg_loss.AleatoricRegLoss', 'AleatoricRegLoss', ([], {}), '()\n', (1522, 1524), False, 'from aleatoric_reg_loss import AleatoricRegLoss\n'), ((1573, 1639), 'tensorflow.keras.backend.random_normal', 'K.random_normal', (["(self.params['batch_size'], *self.params['dims'])"], {}), "((self.params['batch_size'], *self.params['dims']))\n", (1588, 1639), True, 'import tensorflow.keras.backend as K\n')]
|
#!/usr/bin/env python3
import vapoursynth as vs
import audiocutter
from subprocess import call
import shutil
import os
core = vs.core
ts_in = r"F:\Convert\[BDMV][180926][Gundam Build Divers][BD-BOX1]\GUNDAM_BUILD_DIVERS_BDBOX1_D3\BDMV\STREAM\00011.m2ts"
src = core.lsmas.LWLibavSource(ts_in)
ac = audiocutter.AudioCutter()
vid = ac.split(src, [(24,2183)])
ac.ready_qp_and_chapters(vid)
vid.set_output(0)
if __name__ == "__main__":
ac.cut_audio('track1_jpn.aac', audio_source=r'F:\Encoding\Audio\qaac_2.64\track1_jpn.aac')
|
[
"audiocutter.AudioCutter"
] |
[((300, 325), 'audiocutter.AudioCutter', 'audiocutter.AudioCutter', ([], {}), '()\n', (323, 325), False, 'import audiocutter\n')]
|
from django.core.handlers.wsgi import WSGIRequest
from django.http import JsonResponse
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.views.generic import DetailView, ListView
from academic_helper.models import Course, floatformat
from academic_helper.utils.logger import log
from academic_helper.views.basic import ExtendedViewMixin
class CourseDetailsView(DetailView, ExtendedViewMixin):
model = Course
template_name = "courses/course-details.html"
@property
def title(self) -> str:
return f"Course {self.object.course_number}"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["semester_rating_description"] = "כמה עמוס הקורס במהלך הסמסטר? כמה קשים שיעורי הבית? (1-קשה, 5-קל)"
context["semester_rating_title"] = "סמסטר"
context["exams_rating_description"] = "כמה קשה הבחינה/פרוייקט גמר? (1-קשה, 5-קל)"
context["exams_rating_title"] = "בחינה"
context["interest_rating_description"] = "כמה מעניין הקורס? כמה כיף? (1-לא מעניין, 5-מעניין)"
context["interest_rating_title"] = "עניין"
return context
@property
def object(self):
query = Course.objects.filter(course_number=self.kwargs["course_number"])
return get_object_or_404(query)
class CoursesView(ExtendedViewMixin, ListView):
model = Course
template_name = "courses/courses.html"
@property
def title(self) -> str:
return "All Courses"
@property
def object_list(self):
return Course.objects.all()[:20]
def post(self, request: WSGIRequest, *args, **kwargs):
if not request.is_ajax():
raise NotImplementedError()
text = request.POST["free_text"]
school = request.POST["school"]
faculty = request.POST["faculty"]
log.info(f"Searching for {text}, school {school}, faculty {faculty}...")
queryset = Course.find_by(text, school, faculty)[:35]
result = [c.as_dict for c in queryset]
result.sort(key=lambda c: c["score"], reverse=True)
for course in result:
course["url"] = reverse("course-details", args=[course["course_number"]])
course["score"] = floatformat(course["score"])
return JsonResponse({"courses": result})
|
[
"academic_helper.models.Course.objects.filter",
"academic_helper.models.Course.objects.all",
"academic_helper.models.floatformat",
"django.http.JsonResponse",
"academic_helper.utils.logger.log.info",
"django.shortcuts.get_object_or_404",
"django.urls.reverse",
"academic_helper.models.Course.find_by"
] |
[((1232, 1297), 'academic_helper.models.Course.objects.filter', 'Course.objects.filter', ([], {'course_number': "self.kwargs['course_number']"}), "(course_number=self.kwargs['course_number'])\n", (1253, 1297), False, 'from academic_helper.models import Course, floatformat\n'), ((1313, 1337), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['query'], {}), '(query)\n', (1330, 1337), False, 'from django.shortcuts import get_object_or_404\n'), ((1870, 1942), 'academic_helper.utils.logger.log.info', 'log.info', (['f"""Searching for {text}, school {school}, faculty {faculty}..."""'], {}), "(f'Searching for {text}, school {school}, faculty {faculty}...')\n", (1878, 1942), False, 'from academic_helper.utils.logger import log\n'), ((2302, 2335), 'django.http.JsonResponse', 'JsonResponse', (["{'courses': result}"], {}), "({'courses': result})\n", (2314, 2335), False, 'from django.http import JsonResponse\n'), ((1579, 1599), 'academic_helper.models.Course.objects.all', 'Course.objects.all', ([], {}), '()\n', (1597, 1599), False, 'from academic_helper.models import Course, floatformat\n'), ((1962, 1999), 'academic_helper.models.Course.find_by', 'Course.find_by', (['text', 'school', 'faculty'], {}), '(text, school, faculty)\n', (1976, 1999), False, 'from academic_helper.models import Course, floatformat\n'), ((2170, 2227), 'django.urls.reverse', 'reverse', (['"""course-details"""'], {'args': "[course['course_number']]"}), "('course-details', args=[course['course_number']])\n", (2177, 2227), False, 'from django.urls import reverse\n'), ((2258, 2286), 'academic_helper.models.floatformat', 'floatformat', (["course['score']"], {}), "(course['score'])\n", (2269, 2286), False, 'from academic_helper.models import Course, floatformat\n')]
|
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.controller import Controller
from netforce.model import get_model,clear_cache,fields
from netforce import database
from netforce import template
from netforce.action import get_action
from netforce.utils import get_data_path,set_data_path
from netforce.database import get_connection,get_active_db
from netforce import config
from netforce import static
from netforce import access
import json
from pprint import pprint
import os
import base64
import urllib
import netforce
import sys
import tempfile
from lxml import etree
import time
def parse_args(handler):
res={}
for path in handler.request.arguments:
for v in handler.get_arguments(path):
if v=="":
continue
set_data_path(res,path,v)
return res
class Export(Controller): # TODO: cleanup
_path="/export"
def get(self):
db=get_connection()
if db:
db.begin()
try:
clear_cache()
ctx={
"request": self.request,
"request_handler": self,
"dbname": get_active_db(),
}
data=self.get_cookies()
if data:
ctx.update(data)
action_vals=parse_args(self)
ctx.update(action_vals)
name=action_vals.get("name")
if name:
action_ctx=action_vals
action=get_action(name,action_ctx)
for k,v in action.items():
if k not in action_vals:
action_vals[k]=v
if "context" in action_vals:
ctx.update(action_vals["context"])
action_vals["context"]=ctx
self.clear_flash()
type=action_vals.get("type","view")
if type=="export":
print("XXX export")
model=action_vals["model"]
m=get_model(model)
ids=action_vals.get("ids")
if ids:
if ids[0]=="[": # XXX
ids=ids[1:-1]
ids=[int(x) for x in ids.split(",")]
else:
condition=action_vals.get("condition")
if condition:
print("condition",condition)
condition=json.loads(condition)
ids=m.search(condition)
else:
ids=m.search([]) # XXX
ctx=action_vals.copy()
if ctx.get("export_fields"):
if isinstance(ctx["export_fields"],str):
ctx["export_fields"]=json.loads(ctx["export_fields"])
else:
try:
view=get_xml_view(model=model,type="export")
doc=etree.fromstring(view["layout"])
field_names=[]
for el in doc.iterfind(".//field"):
name=el.attrib["name"]
field_names.append(name)
ctx["export_fields"]=field_names
except: # default export fields
req_field_names=[]
other_field_names=[]
for n,f in m._fields.items():
if isinstance(f,(fields.One2Many,fields.Many2Many)):
continue
if isinstance(f,fields.Json):
continue
if not f.store and not f.function:
continue
if f.required:
req_field_names.append(n)
else:
other_field_names.append(n)
ctx["export_fields"]=sorted(req_field_names)+sorted(other_field_names)
data=m.export_data(ids,context=ctx)
db=get_connection()
if db:
db.commit()
filename=action_vals.get("filename","export.csv")
self.set_header("Content-Disposition","attachment; filename=%s"%filename)
self.set_header("Content-Type","text/csv")
self.write(data)
else:
raise Exception("Invalid action type: %s"%type)
except Exception as e:
import traceback
traceback.print_exc(file=sys.stdout)
db=get_connection()
if db:
db.rollback()
raise e
Export.register()
|
[
"netforce.database.get_connection",
"traceback.print_exc",
"json.loads",
"netforce.database.get_active_db",
"lxml.etree.fromstring",
"netforce.model.get_model",
"netforce.action.get_action",
"netforce.model.clear_cache",
"netforce.utils.set_data_path"
] |
[((1975, 1991), 'netforce.database.get_connection', 'get_connection', ([], {}), '()\n', (1989, 1991), False, 'from netforce.database import get_connection, get_active_db\n'), ((1840, 1867), 'netforce.utils.set_data_path', 'set_data_path', (['res', 'path', 'v'], {}), '(res, path, v)\n', (1853, 1867), False, 'from netforce.utils import get_data_path, set_data_path\n'), ((2055, 2068), 'netforce.model.clear_cache', 'clear_cache', ([], {}), '()\n', (2066, 2068), False, 'from netforce.model import get_model, clear_cache, fields\n'), ((2195, 2210), 'netforce.database.get_active_db', 'get_active_db', ([], {}), '()\n', (2208, 2210), False, 'from netforce.database import get_connection, get_active_db\n'), ((2517, 2545), 'netforce.action.get_action', 'get_action', (['name', 'action_ctx'], {}), '(name, action_ctx)\n', (2527, 2545), False, 'from netforce.action import get_action\n'), ((3012, 3028), 'netforce.model.get_model', 'get_model', (['model'], {}), '(model)\n', (3021, 3028), False, 'from netforce.model import get_model, clear_cache, fields\n'), ((5118, 5134), 'netforce.database.get_connection', 'get_connection', ([], {}), '()\n', (5132, 5134), False, 'from netforce.database import get_connection, get_active_db\n'), ((5592, 5628), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'sys.stdout'}), '(file=sys.stdout)\n', (5611, 5628), False, 'import traceback\n'), ((5644, 5660), 'netforce.database.get_connection', 'get_connection', ([], {}), '()\n', (5658, 5660), False, 'from netforce.database import get_connection, get_active_db\n'), ((3435, 3456), 'json.loads', 'json.loads', (['condition'], {}), '(condition)\n', (3445, 3456), False, 'import json\n'), ((3768, 3800), 'json.loads', 'json.loads', (["ctx['export_fields']"], {}), "(ctx['export_fields'])\n", (3778, 3800), False, 'import json\n'), ((3945, 3977), 'lxml.etree.fromstring', 'etree.fromstring', (["view['layout']"], {}), "(view['layout'])\n", (3961, 3977), False, 'from lxml import etree\n')]
|
"""Module with various custom loss ops."""
import tensorflow as tf
def smooth_l1_loss(predicted: tf.Tensor, expected: tf.Tensor) -> tf.Tensor:
"""
Calculate piece-wise smooth L1 loss on the given tensors.
Reference: `Fast R-CNN <https://arxiv.org/pdf/1504.08083.pdf>`_
:param predicted: predicted values tensor
:param expected: expected values tensor with the same shape as the ``predicted`` tensor
:return: piece-wise smooth L1 loss
"""
abs_diff = tf.abs(predicted - expected)
return tf.where(tf.less(abs_diff, 1), 0.5 * tf.square(abs_diff), abs_diff - 0.5)
|
[
"tensorflow.less",
"tensorflow.abs",
"tensorflow.square"
] |
[((485, 513), 'tensorflow.abs', 'tf.abs', (['(predicted - expected)'], {}), '(predicted - expected)\n', (491, 513), True, 'import tensorflow as tf\n'), ((534, 554), 'tensorflow.less', 'tf.less', (['abs_diff', '(1)'], {}), '(abs_diff, 1)\n', (541, 554), True, 'import tensorflow as tf\n'), ((562, 581), 'tensorflow.square', 'tf.square', (['abs_diff'], {}), '(abs_diff)\n', (571, 581), True, 'import tensorflow as tf\n')]
|
from setuptools import setup, find_packages
setup(name='segmentation-mibi',
version='0.2.3',
packages=find_packages(),
)
|
[
"setuptools.find_packages"
] |
[((114, 129), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (127, 129), False, 'from setuptools import setup, find_packages\n')]
|
import os
import zlib
import math
import struct
import copy
import chromosome.gene as gene
import chromosome.serializer as serializer
import chromosome.deserializer as deserializer
PNG_SIGNATURE = '\x89\x50\x4e\x47\x0d\x0a\x1a\x0a'
class PNGGene(gene.AbstractGene):
'''
The PNGGene represent a png chunk.
Using the PNGDeserializer, we read the contents of a PNG file,
and hold them into memory. Each PNG chunk corresponds to a PNGGene
object. The contents of the PNG chunk are fuzzed in memory. We have
the capability to fuzz specific parts of the chunk's contents. For
example, it is useless to fuzz the CRC field of a PNG chunk.
'''
def __init__(self, chunk):
super(PNGGene, self).__init__()
self.length = chunk['length']
self.name = chunk['name']
self.data = chunk['data']
self.crc = chunk['crc']
def anomaly(self):
'''
If anomaly returns True, then the current
gene should not be fuzzed.
'''
if self.length == 0:
return True
else:
return False
def is_equal(self, other):
'''
To identify PNG chunks of same type.
'''
if not isinstance(other, self.__class__):
return False
if self.name == other.name and PNGGene.asciiname(self.name) != 'IEND':
return True
else:
return False
# This function must be implemented in order
def serialize(self):
'''
This function is called to serialize in-memory data of a PNG chunk.
'''
self.fix_crc()
bytestring = ''
chunk_data = super(PNGGene, self).serialize()
bytestring += struct.pack('>I', len(chunk_data))
bytestring += struct.pack('>I', self.name)
bytestring += chunk_data
bytestring += struct.pack('>I', self.crc)
return bytestring
def fix_crc(self):
'''
re-calculates the Gene's CRC checksum.
'''
checksum = zlib.crc32(
struct.pack('>I', self.name)
)
self.crc = zlib.crc32(
self.data, checksum
) & 0xffffffff
@staticmethod
def asciiname(chunkname):
'''
Converts a chunk name to ascii and returns it.
'''
return '%c%c%c%c' % (
(chunkname >> 24) & 0xFF,
(chunkname >> 16) & 0xFF,
(chunkname >> 8) & 0xFF,
(chunkname & 0xFF)
)
class PNGSerializer(serializer.BaseSerializer):
'''
The PNG Serializer.
This class is used to serialize a tree of PNGGenes into a file. Since
PNG is just a chunk-based format, there is no a tree of genes, but
a list of genes. During the serialization, the CRC of each chunk is
fixed and some chunks, which are required to be compressed, are
deflated using the zlib.
'''
def __init__(self):
super(PNGSerializer, self).__init__()
@staticmethod
def deflate_idat_chunks(genes):
'''
deflate_idat_chunks takes as input a number of genes. Data stored
only in IDAT genes is collected in a bytestring and it is compressed
using the zlib module. Then the compressed bytestring is divided
again and copied in genes. This functions returns a list with the
deflated genes. Keep in mind that this function is working with a
deep copy of the genes given as input. Hence, do not worry for your
data in the genes passed as argument.
'''
indices = list()
deflated_genes = copy.deepcopy(genes)
datastream = str()
for idx, curr_gene in enumerate(genes):
if PNGGene.asciiname(curr_gene.name) == 'IDAT':
indices.append(idx)
datastream += curr_gene.get_data()
comp = zlib.compress(datastream)
idatno = len(indices)
if idatno > 0:
chunk_len = int(math.ceil(float(len(comp)) / float(idatno)))
for cnt, index in enumerate(indices):
start = cnt * chunk_len
if index != indices[-1]:
deflated_genes[index].set_data(
comp[start : start+chunk_len])
else:
deflated_genes[index].set_data(
comp[start : ]
)
deflated_genes[index].length = len(
deflated_genes[index].get_data()
)
return deflated_genes
def serialize(self, genes):
'''
This method serializes each one of the genes given as argument. The
serialized bytestring of each of the genes is appended in a buffer
that contains the PNG header. The bytestring of the whole PNG
is returned.
'''
bytestring = PNG_SIGNATURE
deflated_genes = PNGSerializer.deflate_idat_chunks(genes)
bytestring += super(PNGSerializer, self).serialize(deflated_genes)
return bytestring
class PNGDeserializer(deserializer.BaseDeserializer):
'''
A parser for PNG files.
This class is used to parse the chunks of a PNG file and construct
PNGGene objects with the contents of the chunks. Moreover, the
deserializer will perform decompression to the zipped data in order to
fuzz them directly in memory.
'''
fsize = None
fstream = None
chunks = None
def __init__(self):
super(PNGDeserializer, self).__init__()
self.fsize = 0
self.fstream = None
self.chunks = list()
def deserialize(self, filename):
'''
Parses the chosen PNG file.
'''
# initialize input file
genes = list()
# open and read PNG header
self._prepare(filename)
self._parse_signature()
# parse data chunks
for chunk in self._parse_chunks():
self.chunks.append(chunk)
# decompress IDAT chunks (zlib streams)
self._inflate_idat_chunks()
# initialize gene list with deflated chunks
for chunk in self.chunks:
genes.append(PNGGene(chunk))
self.fstream.close()
self.fsize = 0
self.chunks = list()
return genes
def _inflate_idat_chunks(self):
'''
This method takes all IDAT PNG chunks that was read and decompress
their data using zlib module.
'''
datastream = str()
indices = list()
for idx, chunk in enumerate(self.chunks):
if PNGGene.asciiname(chunk['name']) == 'IDAT':
datastream += chunk['data']
indices.append(idx)
decomp = zlib.decompress(datastream)
idatno = len(indices)
chunk_len = int(math.ceil(float(len(decomp)) / float(idatno)))
for cnt, index in enumerate(indices):
start = cnt * chunk_len
if index != indices[-1]:
self.chunks[index]['data'] = decomp[start : start + chunk_len]
else:
self.chunks[index]['data'] = decomp[start:]
self.chunks[index]['length'] = len(self.chunks[index]['data'])
def _parse_signature(self):
'''
The first 8 bytes of every PNG image must be the signature.
'''
signature = self.fstream.read(8)
assert len(signature) == 8
def _parse_chunks(self):
'''
A generator that parses all chunks of the chosen PNG image.
'''
index = 0
while self.fsize > self.fstream.tell():
index += 1
chunk = dict()
chunk['index'] = index
chunk['length'], = struct.unpack('>I', self.fstream.read(4))
chunk['name'], = struct.unpack('>I', self.fstream.read(4))
chunk['data'] = self.fstream.read(chunk['length'])
chunk['crc'], = struct.unpack('>I', self.fstream.read(4))
yield chunk
def _get_filesize(self):
'''
Returns the file size.
'''
where = self.fstream.tell()
self.fstream.seek(0, 2)
size = self.fstream.tell()
self.fstream.seek(where, 0)
return size
def _prepare(self, filename):
'''
Preparation before parsing.
'''
if not os.path.isfile(filename):
raise IOError('%s is not a regural file.' % filename)
self.chunks = list()
self.fstream = open(filename, 'rb')
self.fsize = self._get_filesize()
|
[
"copy.deepcopy",
"struct.pack",
"zlib.compress",
"os.path.isfile",
"zlib.decompress",
"zlib.crc32"
] |
[((1823, 1851), 'struct.pack', 'struct.pack', (['""">I"""', 'self.name'], {}), "('>I', self.name)\n", (1834, 1851), False, 'import struct\n'), ((1907, 1934), 'struct.pack', 'struct.pack', (['""">I"""', 'self.crc'], {}), "('>I', self.crc)\n", (1918, 1934), False, 'import struct\n'), ((3727, 3747), 'copy.deepcopy', 'copy.deepcopy', (['genes'], {}), '(genes)\n', (3740, 3747), False, 'import copy\n'), ((3987, 4012), 'zlib.compress', 'zlib.compress', (['datastream'], {}), '(datastream)\n', (4000, 4012), False, 'import zlib\n'), ((6914, 6941), 'zlib.decompress', 'zlib.decompress', (['datastream'], {}), '(datastream)\n', (6929, 6941), False, 'import zlib\n'), ((2104, 2132), 'struct.pack', 'struct.pack', (['""">I"""', 'self.name'], {}), "('>I', self.name)\n", (2115, 2132), False, 'import struct\n'), ((2166, 2197), 'zlib.crc32', 'zlib.crc32', (['self.data', 'checksum'], {}), '(self.data, checksum)\n', (2176, 2197), False, 'import zlib\n'), ((8545, 8569), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (8559, 8569), False, 'import os\n')]
|
"""
recreating the 'cat' command line
$ cat file.txt ---read the file
$ cat file.txt sometext.txt othertext.txt ---read from all the textfile
$ cat file.txt sometext.txt othertext.txt > newtext.txt ---reads all file and copy to newtext.txt
same as mine:
$ python cat.py file.txt ---read the file
$ python cat.py sometext.txt othertext.txt ---read from all the textfile
$ python cat.py sometext.txt othertext.txt > newtext.txt ---reads all file and copy to newtext.txt
and it has an optional argument:
-n --number
"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("filename", metavar='F', type=str, nargs='+', help="get the filename")
parser.add_argument("-n", "--number", action="store_true", help="indicate it's a number")
args = parser.parse_args()
print(">>>Parser argument: ", args)
line_number = 1
for file in args.filename: #loops through all the file in list
text = open(file)
if args.number:
for line in text.readlines():
print(f'\t{line_number}\t{line}')
line_number +=1
else:
print(text.read())
|
[
"argparse.ArgumentParser"
] |
[((587, 612), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (610, 612), False, 'import argparse\n')]
|
from app import app
import os
if __name__ == "__main__":
app.jinja_env.auto_reload = True
app.config["TEMPLATES_AUTO_RELOAD"] = True
app.run(
debug=True,
port=int(os.environ.get("PORT", "3000")),
host="0.0.0.0"
)
|
[
"os.environ.get"
] |
[((192, 222), 'os.environ.get', 'os.environ.get', (['"""PORT"""', '"""3000"""'], {}), "('PORT', '3000')\n", (206, 222), False, 'import os\n')]
|
import pyzbar.pyzbar as pyzbar
import cv2
import math
global image
class VideoCamera(object):
def __init__(self):
# Using OpenCV to capture from device 0. If you have trouble capturing
# from a webcam, comment the line below out and use a video file
# instead.
self.video = cv2.VideoCapture(2)
# If you decide to use video.mp4, you must have this file in the folder
# as the main.py.
# self.video = cv2.VideoCapture('video.mp4')
def __del__(self):
self.video.release()
def get_frame(self):
success, imag = self.video.read()
# We are using Motion JPEG, but OpenCV defaults to capture raw images,
# so we must encode it into JPEG in order to correctly display the
# video stream.
ret, jpeg = cv2.imencode('.jpg', imag)
return jpeg.tobytes()
def get_im(self):
_,im = self.video.read()
a = []
targetx = 0
targety = 0
decodedObjects = pyzbar.decode(im)
if len(decodedObjects) != 2:
a.append('0')
a.append('0')
else:
if len(decodedObjects) == 2:
for obj in decodedObjects:
data = str(obj.data)
if data == 'Robot':
points = obj.polygon
x1 = points[0][0]
y1 = points[0][1]
x2 = points[2][0]
y2 = points[2][1]
x = (x1+x2)/2
y = (y1+y2)/2
x3 = points[1][0]
y3 = points[1][1]
if data == 'robot2':
points = obj.polygon
x1 = points[0][0]
y1 = points[0][1]
x2 = points[2][0]
y2 = points[2][1]
xx = (x1+x2)/2
yy = (y1+y2)/2
robotx = xx-x
roboty = yy-y
userx = targetx - x
usery = targety - y
magrobot = math.sqrt(robotx*robotx+roboty*roboty)
maguser = math.sqrt(userx*userx+usery*usery)
angle = ((userx*robotx+usery*roboty)/(maguser*magrobot))
cv2.line(im,(x,y),(xx,yy),(255,0,0),5)
cv2.line(im,(x,y),(targetx,targety),(255,0,0),5)
print(robotx)
print(roboty)
print(userx)
print(usery)
print(angle)
a.append(angle)
return a
|
[
"cv2.line",
"math.sqrt",
"pyzbar.pyzbar.decode",
"cv2.VideoCapture",
"cv2.imencode"
] |
[((322, 341), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(2)'], {}), '(2)\n', (338, 341), False, 'import cv2\n'), ((835, 861), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'imag'], {}), "('.jpg', imag)\n", (847, 861), False, 'import cv2\n'), ((1054, 1071), 'pyzbar.pyzbar.decode', 'pyzbar.decode', (['im'], {}), '(im)\n', (1067, 1071), True, 'import pyzbar.pyzbar as pyzbar\n'), ((2534, 2578), 'math.sqrt', 'math.sqrt', (['(robotx * robotx + roboty * roboty)'], {}), '(robotx * robotx + roboty * roboty)\n', (2543, 2578), False, 'import math\n'), ((2604, 2644), 'math.sqrt', 'math.sqrt', (['(userx * userx + usery * usery)'], {}), '(userx * userx + usery * usery)\n', (2613, 2644), False, 'import math\n'), ((2738, 2784), 'cv2.line', 'cv2.line', (['im', '(x, y)', '(xx, yy)', '(255, 0, 0)', '(5)'], {}), '(im, (x, y), (xx, yy), (255, 0, 0), 5)\n', (2746, 2784), False, 'import cv2\n'), ((2798, 2854), 'cv2.line', 'cv2.line', (['im', '(x, y)', '(targetx, targety)', '(255, 0, 0)', '(5)'], {}), '(im, (x, y), (targetx, targety), (255, 0, 0), 5)\n', (2806, 2854), False, 'import cv2\n')]
|
# -*- coding: utf-8 -*-
from py4web.core import Fixture, HTTP
from py4web import request, response
from inspect import signature, _empty
import json
import pandas as pd
from io import BytesIO
def unjson(value):
try:
return json.loads(value)
except (json.decoder.JSONDecodeError, TypeError,):
return value
def check_key_in_params(key):
try:
return (key in request.params)
except KeyError:
return False
def webio(func, **defaults):
kwargs = {}
sign = signature(func).parameters
for key,parameter in sign.items():
if parameter.default==_empty:
if key in request.query:
kwargs[key] = unjson(request.query[key])
elif request.json and (key in request.json):
kwargs[key] = request.json[key]
elif key in defaults:
kwargs[key] = defaults[key]
elif key in request.query:
kwargs[key] = unjson(request.query[key])
elif request.json and (key in request.json):
kwargs[key] = request.json[key]
elif check_key_in_params(key):
kwargs[key] = unjson(request.params[key])
elif key in defaults:
kwargs[key] = defaults[key]
else:
kwargs[key] = parameter.default
if not request.query is None:
kwargs.update({k: unjson(v) for k,v in request.query.items() if not k in sign})
elif not request.json is None:
kwargs.update({k: v for k,v in request.json.items() if not k in sign})
kwargs.update({k: v for k,v in defaults.items() if not k in sign})
return kwargs
class WebWrapper(Fixture):
"""docstring for WebWrapper."""
def __init__(self, **defaults):
super(WebWrapper, self).__init__()
self.defaults = defaults
self.update = self.defaults.update
self.__setitem__ = self.defaults.__setitem__
def parse_request(self, func, **defaults):
self.update(defaults)
return webio(func, **self.defaults)
def __call__(self, func, **defaults):
self.update(defaults)
def wrapper():
return func(**webio(func, **self.defaults))
return wrapper
def brap(**defaults):
""" web wrapper
Variables declared in function signature will be taken from request and
decoded as they were json string before being passed to the function.
defaults : Default values that will overwrite the ones defined in signature.
"""
def decorator(func):
def wrapper():
kwargs = {}
sign = signature(func).parameters
for key,parameter in sign.items():
if parameter.default==_empty:
if key in request.query:
kwargs[key] = unjson(request.query[key])
elif request.json and (key in request.json):
kwargs[key] = request.json[key]
elif key in defaults:
kwargs[key] = defaults[key]
elif key in request.query:
kwargs[key] = unjson(request.query[key])
elif request.json and (key in request.json):
kwargs[key] = request.json[key]
elif key in defaults:
kwargs[key] = defaults[key]
else:
kwargs[key] = parameter.default
if not request.query is None:
kwargs.update({k: unjson(v) for k,v in request.query.items() if not k in sign})
elif not request.json is None:
kwargs.update({k: v for k,v in request.json.items() if not k in sign})
kwargs.update({k: v for k,v in defaults.items() if not k in sign})
return func(**kwargs)
return wrapper
return decorator
class LocalsOnly(Fixture):
"""docstring for LocalsOnly."""
def __init__(self):
super(LocalsOnly, self).__init__()
# self.request = request
def on_request(self):
if not request.urlparts.netloc.startswith('localhost'):
raise HTTP(403)
class CORS(Fixture):
""" Fixture helper for sharing web service avoiding cross origin resource sharing problems """
def __init__(self, age=86400, origin="*", headers="*", methods="*"):
super(CORS, self).__init__()
self.age = age
self.origin = origin
self.headers = headers
self.methods = methods
def on_request(self):
response.headers["Access-Control-Allow-Origin"] = self.origin
response.headers["Access-Control-Max-Age"] = self.age
response.headers["Access-Control-Allow-Headers"] = self.headers
response.headers["Access-Control-Allow-Methods"] = self.methods
response.headers["Access-Control-Allow-Credentials"] = "true"
class AsXlsx(Fixture):
""" Export the output to excel format """
def __init__(self, filename='export', columns=None, index=False):
"""
filename @string : Name of the downloading file
columns @list : Sorted list of the column names to export
"""
self.filename = filename
self.columns = columns
self.index = index
def on_success(self, status):
# called when a request is successful
if status==200:
response.headers["Content-Type"] = "application/vnd.ms-excel"
response.headers["Content-Disposition"] = f'inline; filename="{self.filename}.xlsx"'
def transform(self, output, shared_data=None):
"""
output @dict : The decorated controller must returns a dictionary with
the data to export divided by worksheet.
Doc:
Courtesy of:
* https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.ExcelWriter.html
* https://xlsxwriter.readthedocs.io/example_pandas_multiple.html
"""
stream = BytesIO()
with pd.ExcelWriter(stream, engine='xlsxwriter') as writer:
for sensor, data in output.items():
df = pd.DataFrame(data)
df.to_excel(writer, sheet_name=sensor, columns=self.columns, index=self.index)
stream.seek(0)
return stream.read()
|
[
"pandas.DataFrame",
"py4web.request.query.items",
"io.BytesIO",
"json.loads",
"py4web.request.json.items",
"py4web.request.urlparts.netloc.startswith",
"inspect.signature",
"py4web.core.HTTP",
"pandas.ExcelWriter"
] |
[((236, 253), 'json.loads', 'json.loads', (['value'], {}), '(value)\n', (246, 253), False, 'import json\n'), ((508, 523), 'inspect.signature', 'signature', (['func'], {}), '(func)\n', (517, 523), False, 'from inspect import signature, _empty\n'), ((5923, 5932), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (5930, 5932), False, 'from io import BytesIO\n'), ((4027, 4074), 'py4web.request.urlparts.netloc.startswith', 'request.urlparts.netloc.startswith', (['"""localhost"""'], {}), "('localhost')\n", (4061, 4074), False, 'from py4web import request, response\n'), ((4094, 4103), 'py4web.core.HTTP', 'HTTP', (['(403)'], {}), '(403)\n', (4098, 4103), False, 'from py4web.core import Fixture, HTTP\n'), ((5946, 5989), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['stream'], {'engine': '"""xlsxwriter"""'}), "(stream, engine='xlsxwriter')\n", (5960, 5989), True, 'import pandas as pd\n'), ((2569, 2584), 'inspect.signature', 'signature', (['func'], {}), '(func)\n', (2578, 2584), False, 'from inspect import signature, _empty\n'), ((6070, 6088), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (6082, 6088), True, 'import pandas as pd\n'), ((1379, 1400), 'py4web.request.query.items', 'request.query.items', ([], {}), '()\n', (1398, 1400), False, 'from py4web import request, response\n'), ((1494, 1514), 'py4web.request.json.items', 'request.json.items', ([], {}), '()\n', (1512, 1514), False, 'from py4web import request, response\n'), ((3491, 3512), 'py4web.request.query.items', 'request.query.items', ([], {}), '()\n', (3510, 3512), False, 'from py4web import request, response\n'), ((3622, 3642), 'py4web.request.json.items', 'request.json.items', ([], {}), '()\n', (3640, 3642), False, 'from py4web import request, response\n')]
|
'''
SSD1619A EPD IC Framebuf-derived Display Driver for MicroPython
Written by <NAME> - github.com/T-Wilko
'''
from micropython import const
from machine import SPI, Pin
from time import sleep_ms
import ustruct, framebuf
DRIVER_CTRL = const(0x01)
GATE_SCAN_START = const(0x0F)
DATA_ENTRY_MODE = const(0x11)
SET_DUMMY_PERIOD = const(0x3A)
SET_GATE_WIDTH = const(0x3B)
SET_WAVE_CTRL = const(0x3C)
RAM_X_ADDRESS = const(0x44)
RAM_Y_ADDRESS = const(0x45)
SET_RAM_COUNTER_X = const(0x4E)
SET_RAM_COUNTER_Y = const(0x4F)
SOFT_RESET = const(0x12)
MASTER_ACTIVATION = const(0x20)
DISP_UPDATE_1 = const(0x21)
DISP_UPDATE_2 = const(0x22)
WRITE_RAM_BW = const(0x24)
WRITE_RAM_RED = const(0x26)
SET_ANALOGUE_CTRL = const(0x74)
SET_DIGITAL_CTRL = const(0x7E)
class EPD(framebuf.FrameBuffer):
def __init__(self, spi, dc, cs, rst, busy, width, height):
self.spi = spi
self.spi.init()
self.dc = Pin(dc)
self.cs = Pin(cs)
self.rst = Pin(rst)
self.busy = Pin(busy)
self.cs.init(self.cs.OUT, value=1)
self.dc.init(self.dc.OUT, value=0)
self.rst.init(self.rst.OUT, value=0)
self.busy.init(self.busy.IN)
self.width = width
self.height = height
self.pages = self.height // 8
self.buffer = bytearray(self.pages * self.width)
super().__init__(self.buffer, self.width, self.height, framebuf.MONO_VLSB)
self.init_display()
def init_display(self):
# SW reset
self._command(SOFT_RESET)
self.wait_until_idle()
# Set analogue then digital block control
self._command(SET_ANALOGUE_CTRL,'b\x54')
self._command(SET_DIGITAL_CTRL,'b\x3B')
# Set driver output control
self._command(DRIVER_CTRL)
# Set dummy line period, gate line width, waveform control
self._command(SET_DUMMY_PERIOD)
self._command(SET_GATE_WIDTH)
self._command(SET_WAVE_CTRL)
# Set RAM start/end positions
self._command(RAM_X_ADDRESS)
self._command(RAM_Y_ADDRESS)
self._command(SET_RAM_COUNTER_X)
self._command(SET_RAM_COUNTER_Y)
def _command(self, command, data=None):
self.cs(0)
self.dc(0)
self.spi.write(bytearray([command]))
self.cs(1)
if data is not None:
self._data(data)
def _data(self, data):
self.cs(0)
self.dc(1)
self.spi.write(data)
self.cs(1)
self.dc(0)
def wait_until_idle(self):
while self.busy == 1:
pass
return
def reset(self):
self.rst(1)
sleep_ms(1)
self.rst(0)
sleep_ms(10)
self.rst(1)
class EPD_RED(EPD):
def write(self):
self._command(WRITE_RAM_RED)
self._data(self.buffer)
def show(self):
self._command(WRITE_RAM_RED)
for i in range(0, len(self.buffer)):
self._data(bytearray([self.buffer[i]]))
self._command(DISP_UPDATE_2)
self._command(MASTER_ACTIVATION)
self.wait_until_idle()
class EPD_BW(EPD):
def write(self):
self._command(WRITE_RAM_BW)
self._data(self.buffer)
def show(self):
self._command(DISP_UPDATE_2)
self._command(MASTER_ACTIVATION)
self.wait_until_idle()
|
[
"time.sleep_ms",
"micropython.const",
"machine.Pin"
] |
[((251, 259), 'micropython.const', 'const', (['(1)'], {}), '(1)\n', (256, 259), False, 'from micropython import const\n'), ((288, 297), 'micropython.const', 'const', (['(15)'], {}), '(15)\n', (293, 297), False, 'from micropython import const\n'), ((325, 334), 'micropython.const', 'const', (['(17)'], {}), '(17)\n', (330, 334), False, 'from micropython import const\n'), ((362, 371), 'micropython.const', 'const', (['(58)'], {}), '(58)\n', (367, 371), False, 'from micropython import const\n'), ((399, 408), 'micropython.const', 'const', (['(59)'], {}), '(59)\n', (404, 408), False, 'from micropython import const\n'), ((436, 445), 'micropython.const', 'const', (['(60)'], {}), '(60)\n', (441, 445), False, 'from micropython import const\n'), ((473, 482), 'micropython.const', 'const', (['(68)'], {}), '(68)\n', (478, 482), False, 'from micropython import const\n'), ((510, 519), 'micropython.const', 'const', (['(69)'], {}), '(69)\n', (515, 519), False, 'from micropython import const\n'), ((547, 556), 'micropython.const', 'const', (['(78)'], {}), '(78)\n', (552, 556), False, 'from micropython import const\n'), ((584, 593), 'micropython.const', 'const', (['(79)'], {}), '(79)\n', (589, 593), False, 'from micropython import const\n'), ((621, 630), 'micropython.const', 'const', (['(18)'], {}), '(18)\n', (626, 630), False, 'from micropython import const\n'), ((658, 667), 'micropython.const', 'const', (['(32)'], {}), '(32)\n', (663, 667), False, 'from micropython import const\n'), ((695, 704), 'micropython.const', 'const', (['(33)'], {}), '(33)\n', (700, 704), False, 'from micropython import const\n'), ((732, 741), 'micropython.const', 'const', (['(34)'], {}), '(34)\n', (737, 741), False, 'from micropython import const\n'), ((769, 778), 'micropython.const', 'const', (['(36)'], {}), '(36)\n', (774, 778), False, 'from micropython import const\n'), ((806, 815), 'micropython.const', 'const', (['(38)'], {}), '(38)\n', (811, 815), False, 'from micropython import const\n'), ((843, 853), 'micropython.const', 'const', (['(116)'], {}), '(116)\n', (848, 853), False, 'from micropython import const\n'), ((880, 890), 'micropython.const', 'const', (['(126)'], {}), '(126)\n', (885, 890), False, 'from micropython import const\n'), ((1064, 1071), 'machine.Pin', 'Pin', (['dc'], {}), '(dc)\n', (1067, 1071), False, 'from machine import SPI, Pin\n'), ((1090, 1097), 'machine.Pin', 'Pin', (['cs'], {}), '(cs)\n', (1093, 1097), False, 'from machine import SPI, Pin\n'), ((1117, 1125), 'machine.Pin', 'Pin', (['rst'], {}), '(rst)\n', (1120, 1125), False, 'from machine import SPI, Pin\n'), ((1146, 1155), 'machine.Pin', 'Pin', (['busy'], {}), '(busy)\n', (1149, 1155), False, 'from machine import SPI, Pin\n'), ((2850, 2861), 'time.sleep_ms', 'sleep_ms', (['(1)'], {}), '(1)\n', (2858, 2861), False, 'from time import sleep_ms\n'), ((2891, 2903), 'time.sleep_ms', 'sleep_ms', (['(10)'], {}), '(10)\n', (2899, 2903), False, 'from time import sleep_ms\n')]
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Position(models.Model):
"""
Model representing the position of player (e.g. Science Fiction, Non Fiction).
"""
name = models.CharField(max_length=200, help_text="Enter the position of player (e.g. Central Midfielder, Centarl Defender etc.)")
def __str__(self):
"""
String for representing the Model object (in Admin site etc.)
"""
return self.name
from django.urls import reverse #Used to generate URLs by reversing the URL patterns
class Player(models.Model):
"""
Model representing the Player .
"""
Full_name = models.CharField(max_length=200)
current_club = models.ForeignKey('Club', on_delete=models.SET_NULL, null=True)
nationality = models.CharField(max_length=100, null = True)
# Foreign Key used
# Player
information = models.CharField(max_length=200, help_text="Enter description of the player")
age = models.CharField('Age',max_length=13, help_text='13 Character <a href="enter age of the player</a>')
position = models.ForeignKey(Position, on_delete=models.SET_NULL, null=True, help_text="Select a position of player")
#enter
def __str__(self):
"""
String for representing the Model object.
"""
return self.Full_name
def get_absolute_url(self):
"""
Returns the url to access a particular book instance.
"""
return reverse('player_detail', args=[str(self.id)])
class Club(models.Model):
"""
Model representing the club
"""
name = models.CharField(max_length=100)
country = models.CharField(max_length=100)
trophies = models.CharField(null=True, blank=True, max_length=100)
information = models.CharField(max_length=1000, null=True, help_text="Enter description of the player")
my_teams = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, blank=True)
@property
def is_overdue(self):
return True
def get_absolute_url(self):
"""
Returns the url to access a
"""
return reverse('club-detail', args=[str(self.id)])
def __str__(self):
"""
String for representing the Model object.
"""
return (self.name)
class Meta:
ordering = ['name']
|
[
"django.db.models.CharField",
"django.db.models.ForeignKey"
] |
[((242, 375), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'help_text': '"""Enter the position of player (e.g. Central Midfielder, Centarl Defender etc.)"""'}), "(max_length=200, help_text=\n 'Enter the position of player (e.g. Central Midfielder, Centarl Defender etc.)'\n )\n", (258, 375), False, 'from django.db import models\n'), ((695, 727), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (711, 727), False, 'from django.db import models\n'), ((747, 810), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Club"""'], {'on_delete': 'models.SET_NULL', 'null': '(True)'}), "('Club', on_delete=models.SET_NULL, null=True)\n", (764, 810), False, 'from django.db import models\n'), ((830, 873), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)'}), '(max_length=100, null=True)\n', (846, 873), False, 'from django.db import models\n'), ((932, 1009), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'help_text': '"""Enter description of the player"""'}), "(max_length=200, help_text='Enter description of the player')\n", (948, 1009), False, 'from django.db import models\n'), ((1020, 1126), 'django.db.models.CharField', 'models.CharField', (['"""Age"""'], {'max_length': '(13)', 'help_text': '"""13 Character <a href="enter age of the player</a>"""'}), '(\'Age\', max_length=13, help_text=\n \'13 Character <a href="enter age of the player</a>\')\n', (1036, 1126), False, 'from django.db import models\n'), ((1136, 1247), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Position'], {'on_delete': 'models.SET_NULL', 'null': '(True)', 'help_text': '"""Select a position of player"""'}), "(Position, on_delete=models.SET_NULL, null=True, help_text\n ='Select a position of player')\n", (1153, 1247), False, 'from django.db import models\n'), ((1661, 1693), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1677, 1693), False, 'from django.db import models\n'), ((1708, 1740), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1724, 1740), False, 'from django.db import models\n'), ((1756, 1811), 'django.db.models.CharField', 'models.CharField', ([], {'null': '(True)', 'blank': '(True)', 'max_length': '(100)'}), '(null=True, blank=True, max_length=100)\n', (1772, 1811), False, 'from django.db import models\n'), ((1830, 1924), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1000)', 'null': '(True)', 'help_text': '"""Enter description of the player"""'}), "(max_length=1000, null=True, help_text=\n 'Enter description of the player')\n", (1846, 1924), False, 'from django.db import models\n'), ((1937, 2010), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.SET_NULL', 'null': '(True)', 'blank': '(True)'}), '(User, on_delete=models.SET_NULL, null=True, blank=True)\n', (1954, 2010), False, 'from django.db import models\n')]
|
from random import randint
from shutil import rmtree
from django.core.files.storage import default_storage
from .reference import ReferenceModel as rm
from .factories import SuperUserFactory, ProfileFactory, PostFactory, PostCommentsFactory
def make_objects(factor=1):
rm.COMMENT.objects.all().delete()
print_cleared_model(rm.COMMENT)
rm.POST.objects.all().delete()
print_cleared_model(rm.POST)
rm.USER.objects.all().delete()
print_cleared_model(rm.USER)
clear_media_files() # delete all media files
# user_count = randint(1, factor)
# post_count = randint(factor, user_count * factor)
#
# post_comments_factor = randint(post_count, post_count * factor)
user_count = 1
post_count = 100
post_comments_factor = 10
SuperUserFactory.create()
print('Superuser was created.')
total_count = user_count * factor
print_start_create_info(ProfileFactory, total_count)
ProfileFactory.create_batch(size=total_count)
print_create_batch_info(ProfileFactory, total_count)
total_count = post_count * factor
print_start_create_info(PostFactory, total_count)
PostFactory.create_batch(size=total_count)
print_create_batch_info(PostFactory, total_count)
total_count = post_count * post_comments_factor * factor
print_start_create_info(PostCommentsFactory, total_count)
PostCommentsFactory.create_batch(size=total_count)
print_create_batch_info(PostCommentsFactory, total_count)
def clear_media_files():
"""
Delete MEDIA_ROOT directory with media files
:return:
"""
location = default_storage.base_location
try:
listdir = default_storage.listdir(location)[0]
for dir in listdir:
rmtree(default_storage.path(dir))
except OSError as e:
print("Error: %s" % e.strerror)
def print_cleared_model(model, extra_msg=None):
msg = model.__name__ + ' model was cleared.'
if extra_msg:
msg += ' ' + extra_msg
print(msg)
def print_start_create_info(factory, count):
print('Start creating ' + str(count) + ' records of the ' + factory.__name__ + ' factory.')
def print_create_batch_info(factory, count):
print('-- Factory ' + factory.__name__ + ' created batch ' + str(count) + ' count.')
|
[
"django.core.files.storage.default_storage.path",
"django.core.files.storage.default_storage.listdir"
] |
[((1666, 1699), 'django.core.files.storage.default_storage.listdir', 'default_storage.listdir', (['location'], {}), '(location)\n', (1689, 1699), False, 'from django.core.files.storage import default_storage\n'), ((1750, 1775), 'django.core.files.storage.default_storage.path', 'default_storage.path', (['dir'], {}), '(dir)\n', (1770, 1775), False, 'from django.core.files.storage import default_storage\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-04-12 14:03
from __future__ import unicode_literals
from builtins import next
from builtins import str
import json
import logging
import os
import shutil
import tempfile
import zipfile
import fiona
from django.contrib.gis.geos import GEOSGeometry, MultiPolygon
from django.db import migrations
logger = logging.getLogger(__name__)
def geom_from_boundary_file(boundary_file):
""" Opens a local copy of the boundary file and sets geom field
Mostly copied from Neighborhood._set_geom_from_boundary_file because we don't have
access to model methods here
Does not save model
Copies the geom of the first feature found in the shapefile into geom, to be consistent
with the rest of the app
No explicit error handling/logging, will raise original exception if failure
"""
geom = None
try:
tmpdir = tempfile.mkdtemp()
local_zipfile = os.path.join(tmpdir, 'neighborhood.zip')
with open(local_zipfile, 'wb') as zip_handle:
zip_handle.write(boundary_file.read())
with zipfile.ZipFile(local_zipfile, 'r') as zip_handle:
zip_handle.extractall(tmpdir)
shpfiles = [filename for filename in os.listdir(tmpdir) if filename.endswith('shp')]
shp_filename = os.path.join(tmpdir, shpfiles[0])
with fiona.open(shp_filename, 'r') as shp_handle:
feature = next(shp_handle)
geom = GEOSGeometry(json.dumps(feature['geometry']))
if geom.geom_type == 'Polygon':
geom = MultiPolygon([geom])
except:
geom = None
logger.exception('ERROR: {}'.format(str(boundary_file)))
finally:
shutil.rmtree(tmpdir, ignore_errors=True)
return geom
def add_neighborhood_geoms(apps, schema_editor):
Neighborhood = apps.get_model("pfb_analysis", "Neighborhood")
for n in Neighborhood.objects.all():
n.geom = geom_from_boundary_file(n.boundary_file)
n.save()
class Migration(migrations.Migration):
dependencies = [
('pfb_analysis', '0014_neighborhood_geom'),
]
operations = [
migrations.RunPython(add_neighborhood_geoms)
]
|
[
"django.db.migrations.RunPython",
"zipfile.ZipFile",
"fiona.open",
"json.dumps",
"builtins.next",
"tempfile.mkdtemp",
"django.contrib.gis.geos.MultiPolygon",
"shutil.rmtree",
"builtins.str",
"os.path.join",
"os.listdir",
"logging.getLogger"
] |
[((369, 396), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (386, 396), False, 'import logging\n'), ((910, 928), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (926, 928), False, 'import tempfile\n'), ((953, 993), 'os.path.join', 'os.path.join', (['tmpdir', '"""neighborhood.zip"""'], {}), "(tmpdir, 'neighborhood.zip')\n", (965, 993), False, 'import os\n'), ((1321, 1354), 'os.path.join', 'os.path.join', (['tmpdir', 'shpfiles[0]'], {}), '(tmpdir, shpfiles[0])\n', (1333, 1354), False, 'import os\n'), ((1723, 1764), 'shutil.rmtree', 'shutil.rmtree', (['tmpdir'], {'ignore_errors': '(True)'}), '(tmpdir, ignore_errors=True)\n', (1736, 1764), False, 'import shutil\n'), ((2163, 2207), 'django.db.migrations.RunPython', 'migrations.RunPython', (['add_neighborhood_geoms'], {}), '(add_neighborhood_geoms)\n', (2183, 2207), False, 'from django.db import migrations\n'), ((1112, 1147), 'zipfile.ZipFile', 'zipfile.ZipFile', (['local_zipfile', '"""r"""'], {}), "(local_zipfile, 'r')\n", (1127, 1147), False, 'import zipfile\n'), ((1368, 1397), 'fiona.open', 'fiona.open', (['shp_filename', '"""r"""'], {}), "(shp_filename, 'r')\n", (1378, 1397), False, 'import fiona\n'), ((1435, 1451), 'builtins.next', 'next', (['shp_handle'], {}), '(shp_handle)\n', (1439, 1451), False, 'from builtins import next\n'), ((1250, 1268), 'os.listdir', 'os.listdir', (['tmpdir'], {}), '(tmpdir)\n', (1260, 1268), False, 'import os\n'), ((1484, 1515), 'json.dumps', 'json.dumps', (["feature['geometry']"], {}), "(feature['geometry'])\n", (1494, 1515), False, 'import json\n'), ((1584, 1604), 'django.contrib.gis.geos.MultiPolygon', 'MultiPolygon', (['[geom]'], {}), '([geom])\n', (1596, 1604), False, 'from django.contrib.gis.geos import GEOSGeometry, MultiPolygon\n'), ((1681, 1699), 'builtins.str', 'str', (['boundary_file'], {}), '(boundary_file)\n', (1684, 1699), False, 'from builtins import str\n')]
|
import sys,os,time,csv,getopt,cv2,argparse
import numpy, ctypes, array
import numpy as np
#import matplotlib as plt
from datetime import datetime
from ctypes import cdll, c_char_p
from skimage.transform import resize
from numpy.ctypeslib import ndpointer
from lime import lime_image
from skimage.segmentation import mark_boundaries
import ntpath
import scipy.misc
from PIL import Image
AnnInferenceLib = ctypes.cdll.LoadLibrary('/home/rajy/work/inceptionv4/build/libannmodule.so')
inf_fun = AnnInferenceLib.annRunInference
inf_fun.restype = ctypes.c_int
inf_fun.argtypes = [ctypes.c_void_p,
ndpointer(ctypes.c_float, flags="C_CONTIGUOUS"),
ctypes.c_size_t,
ndpointer(ctypes.c_float, flags="C_CONTIGUOUS"),
ctypes.c_size_t]
hdl = 0
def PreprocessImage(img, dim):
imgw = img.shape[1]
imgh = img.shape[0]
imgb = np.empty((dim[0], dim[1], 3)) #for inception v4
imgb.fill(1.0)
if imgh/imgw > dim[1]/dim[0]:
neww = int(imgw * dim[1] / imgh)
newh = dim[1]
else:
newh = int(imgh * dim[0] / imgw)
neww = dim[0]
offx = int((dim[0] - neww)/2)
offy = int((dim[1] - newh)/2)
imgc = img.copy()*(2.0/255.0) - 1.0
#print('INFO:: newW:%d newH:%d offx:%d offy: %d' % (neww, newh, offx, offy))
imgb[offy:offy+newh,offx:offx+neww,:] = resize(imgc,(newh,neww),1.0)
#im = imgb[:,:,(2,1,0)]
return imgb
def runInference(img):
global hdl
imgw = img.shape[1]
imgh = img.shape[0]
#proc_images.append(im)
out_buf = bytearray(1000*4)
#out_buf = memoryview(out_buf)
out = np.frombuffer(out_buf, dtype=numpy.float32)
#im = im.astype(np.float32)
inf_fun(hdl, np.ascontiguousarray(img, dtype=np.float32), (img.shape[0]*img.shape[1]*3*4), np.ascontiguousarray(out, dtype=np.float32), len(out_buf))
return out
def predict_fn(images):
results = np.zeros(shape=(len(images), 1000))
for i in range(len(images)):
results[i] = runInference(images[i])
return results
def lime_explainer(image, preds):
for x in preds.argsort()[0][-5:]:
print (x, names[x], preds[0,x])
top_indeces.append(x)
tmp = datetime.now()
explainer = lime_image.LimeImageExplainer()
# Hide color is the color for a superpixel turned OFF. Alternatively, if it is NONE, the superpixel will be replaced by the average of its pixels
explanation = explainer.explain_instance(image, predict_fn, top_labels=5, hide_color=0, num_samples=1000)
#to see the explanation for the top class
temp, mask = explanation.get_image_and_mask(top_indeces[4], positive_only=True, num_features=5, hide_rest=True)
im_top1 = mark_boundaries(temp / 2 + 0.5, mask)
#print "iminfo",im_top1.shape, im_top1.dtype
im_top1 = im_top1[:,:,(2,1,0)] #BGR to RGB
temp1, mask1 = explanation.get_image_and_mask(top_indeces[3], positive_only=True, num_features=100, hide_rest=True)
im_top2 = mark_boundaries(temp1 / 2 + 0.5, mask1)
im_top2 = im_top2[:,:,(2,1,0)] #BGR to RGB
del top_indeces[:]
return im_top1, im_top2
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--image', dest='image', type=str,
default='./images/dog.jpg', help='An image path.')
parser.add_argument('--video', dest='video', type=str,
default='./videos/car.avi', help='A video path.')
parser.add_argument('--imagefolder', dest='imagefolder', type=str,
default='./', help='A directory with images.')
parser.add_argument('--resultsfolder', dest='resultfolder', type=str,
default='./', help='A directory with images.')
parser.add_argument('--labels', dest='labelfile', type=str,
default='./labels.txt', help='file with labels')
args = parser.parse_args()
imagefile = args.image
videofile = args.video
imagedir = args.imagefolder
outputdir = args.resultfolder
synsetfile = args.labelfile
images = []
proc_images = []
AnnInferenceLib.annCreateContext.argtype = [ctypes.c_char_p]
data_folder = "/home/rajy/work/inceptionv4"
b_data_folder = data_folder.encode('utf-8')
global hdl
hdl = AnnInferenceLib.annCreateContext(b_data_folder)
top_indeces = []
#read synset names
if synsetfile:
fp = open(synsetfile, 'r')
names = fp.readlines()
names = [x.strip() for x in names]
fp.close()
if sys.argv[1] == '--image':
# image preprocess
img = cv2.imread(imagefile)
dim = (299,299)
imgb = PreprocessImage(img, dim)
images.append(imgb)
#proc_images.append(imgb)
start = datetime.now()
preds = predict_fn(images)
end = datetime.now()
elapsedTime = end-start
print ('total time for inference in milliseconds', elapsedTime.total_seconds()*1000)
if False:
for x in preds.argsort()[0][-5:]:
print (x, names[x], preds[0,x])
top_indeces.append(x)
image0 = images[0]
tmp = datetime.now()
explainer = lime_image.LimeImageExplainer()
# Hide color is the color for a superpixel turned OFF. Alternatively, if it is NONE, the superpixel will be replaced by the average of its pixels
explanation = explainer.explain_instance(image0, predict_fn, top_labels=5, hide_color=0, num_samples=1000)
elapsedTime = datetime.now()-tmp
print ('total time for lime is " milliseconds', elapsedTime.total_seconds()*1000)
#to see the explanation for the top class
temp, mask = explanation.get_image_and_mask(top_indeces[4], positive_only=True, num_features=5, hide_rest=True)
im_top1 = mark_boundaries(temp / 2 + 0.5, mask)
#print "iminfo",im_top1.shape, im_top1.dtype
im_top1_save = im_top1[:,:,(2,1,0)] #BGR to RGB
infile = ntpath.basename(imagefile)
inname,ext = infile.split('.')
cv2.imshow('top1', im_top1)
scipy.misc.imsave(outputdir + inname + '_top1.jpg', im_top1_save)
#scipy.imsave(outputdir + inname + '_1.jpg', im_top1)
#im_top1_norm.save(outputdir + inname + '_1.jpg')
temp1, mask1 = explanation.get_image_and_mask(top_indeces[3], positive_only=True, num_features=100, hide_rest=True)
#temp, mask = explanation.get_image_and_mask(top_indeces[3], positive_only=True, num_features=1000, hide_rest=False, min_weight=0.05)
#cv2.imshow('top2', mark_boundaries(temp1 / 2 + 0.5, mask1))
im_top2 = mark_boundaries(temp1 / 2 + 0.5, mask1)
im_top2 = im_top2[:,:,(2,1,0)] #BGR to RGB
scipy.misc.imsave(outputdir + inname + '_top2.jpg', im_top2)
else:
im_top1, im_top2 = lime_explainer(images[0], preds)
infile = ntpath.basename(imagefile)
inname,ext = infile.split('.')
#cv2.imshow('top1', im_top1)
scipy.misc.imsave(outputdir + inname + '_top1.jpg', im_top1)
scipy.misc.imsave(outputdir + inname + '_top2.jpg', im_top2)
#cv2.destroyAllWindows()
AnnInferenceLib.annReleaseContext(ctypes.c_void_p(hdl))
exit()
elif sys.argv[1] == '--imagefolder':
count = 0
start = datetime.now()
for image in sorted(os.listdir(imagedir)):
print('Processing Image ' + image)
img = cv2.imread(imagedir + image)
dim = (299,299)
imgb = PreprocessImage(img, dim)
images.append(imgb)
#proc_images.append(imgb)
preds = predict_fn(images)
im_top1, im_top2 = lime_explainer(images[0], preds)
inname,ext = image.split('.')
#cv2.imshow('top1', im_top1)
scipy.misc.imsave(outputdir + inname + '_top1.jpg', im_top1)
scipy.misc.imsave(outputdir + inname + '_top2.jpg', im_top2)
images.remove(imgb)
count += 1
end = datetime.now()
elapsedTime = end-start
print ('total time is " milliseconds', elapsedTime.total_seconds()*1000)
AnnInferenceLib.annReleaseContext(ctypes.c_void_p(hdl))
exit()
|
[
"skimage.segmentation.mark_boundaries",
"numpy.ctypeslib.ndpointer",
"argparse.ArgumentParser",
"ntpath.basename",
"numpy.frombuffer",
"numpy.empty",
"numpy.ascontiguousarray",
"ctypes.cdll.LoadLibrary",
"lime.lime_image.LimeImageExplainer",
"cv2.imread",
"skimage.transform.resize",
"ctypes.c_void_p",
"cv2.imshow",
"datetime.datetime.now",
"os.listdir"
] |
[((406, 482), 'ctypes.cdll.LoadLibrary', 'ctypes.cdll.LoadLibrary', (['"""/home/rajy/work/inceptionv4/build/libannmodule.so"""'], {}), "('/home/rajy/work/inceptionv4/build/libannmodule.so')\n", (429, 482), False, 'import numpy, ctypes, array\n'), ((609, 656), 'numpy.ctypeslib.ndpointer', 'ndpointer', (['ctypes.c_float'], {'flags': '"""C_CONTIGUOUS"""'}), "(ctypes.c_float, flags='C_CONTIGUOUS')\n", (618, 656), False, 'from numpy.ctypeslib import ndpointer\n'), ((707, 754), 'numpy.ctypeslib.ndpointer', 'ndpointer', (['ctypes.c_float'], {'flags': '"""C_CONTIGUOUS"""'}), "(ctypes.c_float, flags='C_CONTIGUOUS')\n", (716, 754), False, 'from numpy.ctypeslib import ndpointer\n'), ((888, 917), 'numpy.empty', 'np.empty', (['(dim[0], dim[1], 3)'], {}), '((dim[0], dim[1], 3))\n', (896, 917), True, 'import numpy as np\n'), ((1362, 1393), 'skimage.transform.resize', 'resize', (['imgc', '(newh, neww)', '(1.0)'], {}), '(imgc, (newh, neww), 1.0)\n', (1368, 1393), False, 'from skimage.transform import resize\n'), ((1627, 1670), 'numpy.frombuffer', 'np.frombuffer', (['out_buf'], {'dtype': 'numpy.float32'}), '(out_buf, dtype=numpy.float32)\n', (1640, 1670), True, 'import numpy as np\n'), ((2201, 2215), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2213, 2215), False, 'from datetime import datetime\n'), ((2232, 2263), 'lime.lime_image.LimeImageExplainer', 'lime_image.LimeImageExplainer', ([], {}), '()\n', (2261, 2263), False, 'from lime import lime_image\n'), ((2700, 2737), 'skimage.segmentation.mark_boundaries', 'mark_boundaries', (['(temp / 2 + 0.5)', 'mask'], {}), '(temp / 2 + 0.5, mask)\n', (2715, 2737), False, 'from skimage.segmentation import mark_boundaries\n'), ((2968, 3007), 'skimage.segmentation.mark_boundaries', 'mark_boundaries', (['(temp1 / 2 + 0.5)', 'mask1'], {}), '(temp1 / 2 + 0.5, mask1)\n', (2983, 3007), False, 'from skimage.segmentation import mark_boundaries\n'), ((3147, 3172), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3170, 3172), False, 'import sys, os, time, csv, getopt, cv2, argparse\n'), ((1720, 1763), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img'], {'dtype': 'np.float32'}), '(img, dtype=np.float32)\n', (1740, 1763), True, 'import numpy as np\n'), ((1798, 1841), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['out'], {'dtype': 'np.float32'}), '(out, dtype=np.float32)\n', (1818, 1841), True, 'import numpy as np\n'), ((4586, 4607), 'cv2.imread', 'cv2.imread', (['imagefile'], {}), '(imagefile)\n', (4596, 4607), False, 'import sys, os, time, csv, getopt, cv2, argparse\n'), ((4751, 4765), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4763, 4765), False, 'from datetime import datetime\n'), ((4815, 4829), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4827, 4829), False, 'from datetime import datetime\n'), ((5154, 5168), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5166, 5168), False, 'from datetime import datetime\n'), ((5193, 5224), 'lime.lime_image.LimeImageExplainer', 'lime_image.LimeImageExplainer', ([], {}), '()\n', (5222, 5224), False, 'from lime import lime_image\n'), ((5842, 5879), 'skimage.segmentation.mark_boundaries', 'mark_boundaries', (['(temp / 2 + 0.5)', 'mask'], {}), '(temp / 2 + 0.5, mask)\n', (5857, 5879), False, 'from skimage.segmentation import mark_boundaries\n'), ((6019, 6045), 'ntpath.basename', 'ntpath.basename', (['imagefile'], {}), '(imagefile)\n', (6034, 6045), False, 'import ntpath\n'), ((6102, 6129), 'cv2.imshow', 'cv2.imshow', (['"""top1"""', 'im_top1'], {}), "('top1', im_top1)\n", (6112, 6129), False, 'import sys, os, time, csv, getopt, cv2, argparse\n'), ((6713, 6752), 'skimage.segmentation.mark_boundaries', 'mark_boundaries', (['(temp1 / 2 + 0.5)', 'mask1'], {}), '(temp1 / 2 + 0.5, mask1)\n', (6728, 6752), False, 'from skimage.segmentation import mark_boundaries\n'), ((6984, 7010), 'ntpath.basename', 'ntpath.basename', (['imagefile'], {}), '(imagefile)\n', (6999, 7010), False, 'import ntpath\n'), ((7324, 7344), 'ctypes.c_void_p', 'ctypes.c_void_p', (['hdl'], {}), '(hdl)\n', (7339, 7344), False, 'import numpy, ctypes, array\n'), ((7436, 7450), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7448, 7450), False, 'from datetime import datetime\n'), ((8147, 8161), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (8159, 8161), False, 'from datetime import datetime\n'), ((5528, 5542), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5540, 5542), False, 'from datetime import datetime\n'), ((7479, 7499), 'os.listdir', 'os.listdir', (['imagedir'], {}), '(imagedir)\n', (7489, 7499), False, 'import sys, os, time, csv, getopt, cv2, argparse\n'), ((7567, 7595), 'cv2.imread', 'cv2.imread', (['(imagedir + image)'], {}), '(imagedir + image)\n', (7577, 7595), False, 'import sys, os, time, csv, getopt, cv2, argparse\n'), ((8317, 8337), 'ctypes.c_void_p', 'ctypes.c_void_p', (['hdl'], {}), '(hdl)\n', (8332, 8337), False, 'import numpy, ctypes, array\n')]
|
from core.models import User
from django.db.models.functions import TruncDate
from django.db.models import Count
from reports.queries.registry import register_report
from security.constants import SECURITY_GROUPS_PUBLIC
def users_by_date():
"""
User registration count broken by date
"""
users = (
User.objects.annotate(date=TruncDate("created_at"))
.values("date")
.annotate(total=Count("id"))
.values("date", "total")
.order_by("date")
)
data = [{"date": user["date"].strftime("%Y-%m-%d"), "total": user["total"]} for user in users]
return data
def total_public_users():
"""
Total public users
"""
total_users = User.objects.filter(
groups__name__in=SECURITY_GROUPS_PUBLIC,
deleted_at__isnull=True,
).count()
total_verified = User.objects.filter(
groups__name__in=SECURITY_GROUPS_PUBLIC,
userprofile__email_verified_at__isnull=False,
deleted_at__isnull=True,
).count()
data = [
{"total": total_users, "type": "all"},
{
"total": total_verified,
"type": "verified",
},
]
return data
register_report(users_by_date)
register_report(total_public_users)
|
[
"django.db.models.Count",
"core.models.User.objects.filter",
"django.db.models.functions.TruncDate",
"reports.queries.registry.register_report"
] |
[((1185, 1215), 'reports.queries.registry.register_report', 'register_report', (['users_by_date'], {}), '(users_by_date)\n', (1200, 1215), False, 'from reports.queries.registry import register_report\n'), ((1216, 1251), 'reports.queries.registry.register_report', 'register_report', (['total_public_users'], {}), '(total_public_users)\n', (1231, 1251), False, 'from reports.queries.registry import register_report\n'), ((702, 791), 'core.models.User.objects.filter', 'User.objects.filter', ([], {'groups__name__in': 'SECURITY_GROUPS_PUBLIC', 'deleted_at__isnull': '(True)'}), '(groups__name__in=SECURITY_GROUPS_PUBLIC,\n deleted_at__isnull=True)\n', (721, 791), False, 'from core.models import User\n'), ((840, 975), 'core.models.User.objects.filter', 'User.objects.filter', ([], {'groups__name__in': 'SECURITY_GROUPS_PUBLIC', 'userprofile__email_verified_at__isnull': '(False)', 'deleted_at__isnull': '(True)'}), '(groups__name__in=SECURITY_GROUPS_PUBLIC,\n userprofile__email_verified_at__isnull=False, deleted_at__isnull=True)\n', (859, 975), False, 'from core.models import User\n'), ((424, 435), 'django.db.models.Count', 'Count', (['"""id"""'], {}), "('id')\n", (429, 435), False, 'from django.db.models import Count\n'), ((351, 374), 'django.db.models.functions.TruncDate', 'TruncDate', (['"""created_at"""'], {}), "('created_at')\n", (360, 374), False, 'from django.db.models.functions import TruncDate\n')]
|
from source.file import File
from source.udp import Udp
def open(url):
url = url.split('://', 1)
if len(url) == 1:
return File(url[0])
proto, src = url
if proto == 'file':
return File(src)
if proto == 'udp':
return Udp(src)
raise ValueError("Unsupported protocol or URL format")
|
[
"source.file.File",
"source.udp.Udp"
] |
[((141, 153), 'source.file.File', 'File', (['url[0]'], {}), '(url[0])\n', (145, 153), False, 'from source.file import File\n'), ((214, 223), 'source.file.File', 'File', (['src'], {}), '(src)\n', (218, 223), False, 'from source.file import File\n'), ((262, 270), 'source.udp.Udp', 'Udp', (['src'], {}), '(src)\n', (265, 270), False, 'from source.udp import Udp\n')]
|
# Lint as: python3
"""Request handler classes for the extensions."""
import base64
import json
import re
import tornado.gen as gen
import os
from collections import namedtuple
from notebook.base.handlers import APIHandler, app_log
from google.cloud import storage # used for connecting to GCS
from io import BytesIO, StringIO # used for sending GCS blobs in JSON objects
def list_dir(bucket_name, path, blobs_dir_list):
items = []
directories = set()
path = '%s%s' % (path, '' if re.match(".*/$", path) else '/')
# print('list_dir', (bucket_name, path, blobs_dir_list))
for blob in blobs_dir_list:
relative_blob_name = re.sub(r'^' + path, '', blob.name)
relative_path_parts = [
dir
for dir in relative_blob_name.split('/')
if dir
]
if re.match(".*/$", blob.name):
# Add the top directory to the set of directories if one exist
if relative_path_parts:
directories.add(relative_path_parts[0])
else:
if relative_path_parts:
dir_name = relative_path_parts[0]
def blobInDir(parts):
return len(parts) > 1
if blobInDir(relative_path_parts):
directories.add(relative_path_parts[0])
else:
items.append({
'type': 'file',
'path': ('%s/%s' % (bucket_name, blob.name)),
'name': dir_name
})
# print('list_dir', (bucket_name, path))
if path != '/':
path = '/' + path
items = items + [{
'type': 'directory',
'path': ('%s%s%s/' % (bucket_name, path, d)),
'name': d + '/'
} for d in directories]
return items
def getPathContents(path, storage_client):
path = path or '/'
addDir = '/' if re.match(".+/$", path) else ''
path = os.path.normpath(path) + addDir
if path == '/':
buckets = storage_client.list_buckets()
return {
'type':'directory',
'content': [{
'type': 'directory',
'path': b.name + '/',
'name': b.name + '/'
} for b in buckets]
}
else:
# Remove any preceeding '/', and split off the bucket name
bucket_paths = re.sub(r'^/', '', path).split('/', 1)
# The first token should represent the bucket name
bucket_name = bucket_paths[0]
# The rest of the string should represent the blob path, if requested
blob_path = bucket_paths[1] if len(bucket_paths) > 1 else ''
# List blobs in the bucket with the blob_path prefix
blobs = list(storage_client.list_blobs(
bucket_name, prefix=blob_path))
# Find a blob that is not a directory name and fully matches the blob_path
# If there are any matches, we are retriving a single blob
matching_blobs = [b
for b in blobs
# TODO(cbwilkes): protect against empty names
if not re.match(".*/$", b.name) and b.name == blob_path]
if len(matching_blobs) == 1: # Single blob
blob = matching_blobs[0]
file_bytes = BytesIO()
blob.download_to_file(file_bytes)
return {
'type': 'file',
'content': {
'path': ('%s/%s' % (bucket_name, blob.name)),
'type': 'file',
'mimetype': blob.content_type,
'content': base64.encodebytes(
file_bytes.getvalue()).decode('ascii')
}
}
else: # Directory
return {
'type': 'directory',
'content': list_dir(bucket_name, blob_path, blobs)
}
def delete(path, storage_client):
path = path or '/'
addDir = '/' if re.match(".+/$", path) else ''
path = os.path.normpath(path) + addDir
if path == '/':
return {}
else:
# Remove any preceeding '/', and split off the bucket name
bucket_paths = re.sub(r'^/', '', path).split('/', 1)
# The first token should represent the bucket name
bucket_name = bucket_paths[0]
# The rest of the string should represent the blob path, if requested
blob_path = bucket_paths[1] if len(bucket_paths) > 1 else ''
# List blobs in the bucket with the blob_path prefix
blobs = list(storage_client.list_blobs(
bucket_name, prefix=blob_path))
# Find a blob that is not a directory name and fully matches the blob_path
# If there are any matches, we are retriving a single blob
matching_blobs = [b
for b in blobs
# TODO(cbwilkes): protect against empty names
if not re.match(".*/$", b.name) and b.name == blob_path]
if len(matching_blobs) == 1: # Single blob
blob = matching_blobs[0]
blob.delete()
return {}
else: # Directory
return {}
class GCSHandler(APIHandler):
"""Handles requests for GCS operations."""
storage_client = None
@gen.coroutine
def get(self, path=''):
try:
if not self.storage_client:
self.storage_client = storage.Client()
self.finish(json.dumps(
getPathContents(path, self.storage_client)))
except Exception as e:
app_log.exception(str(e))
self.set_status(500, str(e))
class UploadHandler(APIHandler):
@gen.coroutine
def post(self, *args, **kwargs):
model = self.get_json_body()
# Remove any preceeding '/', and split off the bucket name
bucket_paths = re.sub(r'^/', '', model['path']).split('/', 1)
# The first token should represent the bucket name
bucket_name = bucket_paths[0]
# The rest of the string should represent the blob path, if requested
blob_path = bucket_paths[1] if len(bucket_paths) > 1 else ''
if 'chunk' not in model:
storage_client = storage.Client()
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(blob_path)
if model['format'] == 'base64':
bytes_file = BytesIO(base64.b64decode(model['content']))
blob.upload_from_file(bytes_file)
elif model['format'] == 'json':
blob.upload_from_string(json.dumps(model['content']))
else:
blob.upload_from_string(model['content'])
else:
tmp_dir = '/tmp/gcsfilebrowser/'
tmp_blob_path = tmp_dir + model['path']
# Create parent directory if doesn't exist
directory = os.path.dirname(tmp_blob_path)
if not os.path.exists(directory):
os.makedirs(directory)
# Append chunk to the temp file
with open(tmp_blob_path, "a+b") as tmp_file:
print("Saving chunk number %s to %s" % (model['chunk'], tmp_blob_path))
tmp_file.write(base64.b64decode(model['content']))
# Upload the file to GCS after the last chunk
if model['chunk'] == -1:
tmp_file.close()
storage_client = storage.Client()
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(blob_path)
blob.upload_from_filename(tmp_blob_path)
os.remove(tmp_blob_path)
print("File %s uploaded and removed!" % tmp_blob_path)
self.finish({})
class DeleteHandler(APIHandler):
storage_client = None
@gen.coroutine
def delete(self, path=''):
try:
if not self.storage_client:
self.storage_client = storage.Client()
self.finish(json.dumps(delete(path, self.storage_client)))
except Exception as e:
app_log.exception(str(e))
self.set_status(500, str(e))
|
[
"io.BytesIO",
"os.remove",
"os.makedirs",
"os.path.dirname",
"os.path.exists",
"re.match",
"base64.b64decode",
"json.dumps",
"google.cloud.storage.Client",
"os.path.normpath",
"re.sub"
] |
[((640, 673), 're.sub', 're.sub', (["('^' + path)", '""""""', 'blob.name'], {}), "('^' + path, '', blob.name)\n", (646, 673), False, 'import re\n'), ((790, 817), 're.match', 're.match', (['""".*/$"""', 'blob.name'], {}), "('.*/$', blob.name)\n", (798, 817), False, 'import re\n'), ((1782, 1804), 're.match', 're.match', (['""".+/$"""', 'path'], {}), "('.+/$', path)\n", (1790, 1804), False, 'import re\n'), ((1822, 1844), 'os.path.normpath', 'os.path.normpath', (['path'], {}), '(path)\n', (1838, 1844), False, 'import os\n'), ((3619, 3641), 're.match', 're.match', (['""".+/$"""', 'path'], {}), "('.+/$', path)\n", (3627, 3641), False, 'import re\n'), ((3659, 3681), 'os.path.normpath', 'os.path.normpath', (['path'], {}), '(path)\n', (3675, 3681), False, 'import os\n'), ((3061, 3070), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (3068, 3070), False, 'from io import BytesIO, StringIO\n'), ((5639, 5655), 'google.cloud.storage.Client', 'storage.Client', ([], {}), '()\n', (5653, 5655), False, 'from google.cloud import storage\n'), ((6217, 6247), 'os.path.dirname', 'os.path.dirname', (['tmp_blob_path'], {}), '(tmp_blob_path)\n', (6232, 6247), False, 'import os\n'), ((491, 513), 're.match', 're.match', (['""".*/$"""', 'path'], {}), "('.*/$', path)\n", (499, 513), False, 'import re\n'), ((2240, 2262), 're.sub', 're.sub', (['"""^/"""', '""""""', 'path'], {}), "('^/', '', path)\n", (2246, 2262), False, 'import re\n'), ((3814, 3836), 're.sub', 're.sub', (['"""^/"""', '""""""', 'path'], {}), "('^/', '', path)\n", (3820, 3836), False, 'import re\n'), ((4909, 4925), 'google.cloud.storage.Client', 'storage.Client', ([], {}), '()\n', (4923, 4925), False, 'from google.cloud import storage\n'), ((5309, 5340), 're.sub', 're.sub', (['"""^/"""', '""""""', "model['path']"], {}), "('^/', '', model['path'])\n", (5315, 5340), False, 'import re\n'), ((6261, 6286), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (6275, 6286), False, 'import os\n'), ((6296, 6318), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (6307, 6318), False, 'import os\n'), ((6682, 6698), 'google.cloud.storage.Client', 'storage.Client', ([], {}), '()\n', (6696, 6698), False, 'from google.cloud import storage\n'), ((6851, 6875), 'os.remove', 'os.remove', (['tmp_blob_path'], {}), '(tmp_blob_path)\n', (6860, 6875), False, 'import os\n'), ((7142, 7158), 'google.cloud.storage.Client', 'storage.Client', ([], {}), '()\n', (7156, 7158), False, 'from google.cloud import storage\n'), ((5813, 5847), 'base64.b64decode', 'base64.b64decode', (["model['content']"], {}), "(model['content'])\n", (5829, 5847), False, 'import base64\n'), ((6512, 6546), 'base64.b64decode', 'base64.b64decode', (["model['content']"], {}), "(model['content'])\n", (6528, 6546), False, 'import base64\n'), ((2913, 2937), 're.match', 're.match', (['""".*/$"""', 'b.name'], {}), "('.*/$', b.name)\n", (2921, 2937), False, 'import re\n'), ((4487, 4511), 're.match', 're.match', (['""".*/$"""', 'b.name'], {}), "('.*/$', b.name)\n", (4495, 4511), False, 'import re\n'), ((5961, 5989), 'json.dumps', 'json.dumps', (["model['content']"], {}), "(model['content'])\n", (5971, 5989), False, 'import json\n')]
|
import logging
from os.path import dirname, abspath
from os import chdir
moduleDir = dirname(abspath(__file__)) + '/'
chdir(moduleDir)
if __name__ == '__main__':
from .config import logging_format
from .hp_server import serve_forever
logging.basicConfig(format=logging_format, level=logging.DEBUG)
logging.debug('Working dir set to %s', moduleDir)
serve_forever()
|
[
"os.path.abspath",
"logging.debug",
"os.chdir",
"logging.basicConfig"
] |
[((119, 135), 'os.chdir', 'chdir', (['moduleDir'], {}), '(moduleDir)\n', (124, 135), False, 'from os import chdir\n'), ((253, 316), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': 'logging_format', 'level': 'logging.DEBUG'}), '(format=logging_format, level=logging.DEBUG)\n', (272, 316), False, 'import logging\n'), ((321, 370), 'logging.debug', 'logging.debug', (['"""Working dir set to %s"""', 'moduleDir'], {}), "('Working dir set to %s', moduleDir)\n", (334, 370), False, 'import logging\n'), ((94, 111), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (101, 111), False, 'from os.path import dirname, abspath\n')]
|
# Generated by Django 3.1.7 on 2021-04-01 06:22
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('app', '0005_auto_20210401_0121'),
]
operations = [
migrations.AlterField(
model_name='leaderboard',
name='participants',
field=models.ManyToManyField(blank=True, related_name='leaderboards_participated', to=settings.AUTH_USER_MODEL),
),
]
|
[
"django.db.migrations.swappable_dependency",
"django.db.models.ManyToManyField"
] |
[((194, 251), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (225, 251), False, 'from django.db import migrations, models\n'), ((443, 552), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'related_name': '"""leaderboards_participated"""', 'to': 'settings.AUTH_USER_MODEL'}), "(blank=True, related_name='leaderboards_participated',\n to=settings.AUTH_USER_MODEL)\n", (465, 552), False, 'from django.db import migrations, models\n')]
|
# -*- coding: utf-8 -*-
### ATENÇÃO ###
# Antes de executar instale o pyfirmata:
# pip install pyfirmata --user
# E compile no arduino o código do ArduinoIDE encontrado em:
# Arquivo -> Exemplos -> Firmata -> StandardFirmata
### IMPORTANTE ###
# O valor da frequencia fica aproximado
# imports
import pyfirmata
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
from loguru import logger
import pandas as pd
#-------------------------------#-------------------------------#-------------------------------#-------------------------------
### INICIO MUDANÇAS PERMITIDAS ###
#-------------------------------
# Controlador desejado
#controlUse = "sc" # Sem controlador
controlUse = "cavlr1" #Cavlr 1ª ord ********** Controlador em avanço por lugar das raizes para modelo de primeira ordem
#controlUse = "catlr1" #Catlr 1ª ord ********** Controlador em atraso por lugar das raizes para modelo de primeira ordem
#controlUse = "cavatlr1" #Cavatlr 1ª ord ********** Controlador em avanço-atraso por lugar das raizes para modelo de primeira ordem
#controlUse = "cavrf1" #Cavrf 1ª ord ********** Controlador em avanço por resposta em frequencia para modelo de primeira ordem
#controlUse = "catrf1" #Catrf 1ª ord ********** Controlador em atraso por resposta em frequencia para modelo de primeira ordem
#controlUse = "cavlr2" #Cavlr 2ª ord ********** Controlador em avanço por lugar das raizes para modelo de segunda ordem
#controlUse = "catlr2" #Catlr 2ª ord ********** Controlador em atraso por lugar das raizes para modelo de segunda ordem
#controlUse = "cavatlr2" #Cavatlr 2ª ord ********** Controlador em avanço-atraso por lugar das raizes para modelo de segunda ordem
#controlUse = "cavrf2" #Cavrf 2ª ord ********** Controlador em avanço por resposta em frequencia para modelo de segunda ordem
#controlUse = "catrf2" #Catrf 2ª ord ********** Controlador em atraso por resposta em frequencia para modelo de segunda ordem
#-------------------------------
# Configuração do arduino
"""
x:n:t -> ordem de configuração dos pinos sendo:
x - a letra referente ao pino
n - numero do pino
t - tipo que sera utilizado o pino
p - PWM
i - input
o - output
"""
serialPort = '/dev/ttyACM0' # Porta que o arduino esta conectada
outPin = 'd:9:p' # Pino de escrita PWM
inPin = 'a:0:i' # Pino utilizado para ler
#-------------------------------
# dados para salvar imagem
dpiImage = 100 # Dpi da imagem
srcImage = './../../Controles/PRBS-FS10/ord1/real/graph-'+controlUse+'-5Xkc-zero 2Xsigma-esp 0.1.svg' # Endereço e nome da imagem a ser salva, se setar como None não salva
#srcImage = None
formatImage = "svg" # Tipo de imagem a ser salva
width = 1920 # Largura em px (pixels) da imagem salva
height = 1080 # Altura em px (pixels) da imagem salva
#-------------------------------
# dados para salvar csv dos dados
srcFile = './../../Controles/PRBS-FS10/ord1/real/data-'+controlUse+'-5Xkc-zero 2Xsigma-esp 0.1.csv'# Endereço e nome do csv a ser salva, se setar como None não salva
#srcFile # None
#-------------------------------
# frequência de amostragem
freq = 10 # Em amostras por seg (Hz)
#-------------------------------
# Numero total de amostras
N = 400 # Total de amostras
#-------------------------------
# vetor de entrada (yr)
qtdTrocas = 8 # Quantas vezes o sinal vai trocar de nivel
sizeStep = int(N/qtdTrocas) # Calcula o tamanho das janelas
# Monta o vetor de entrada yr como um conjunto de degraus
yr = np.zeros(sizeStep)
yr = np.append(yr,4*np.ones(sizeStep))
yr = np.append(yr, np.zeros(sizeStep))
yr = np.append(yr,5*np.ones(sizeStep))
yr = np.append(yr,1*np.ones(sizeStep))
yr = np.append(yr,2*np.ones(sizeStep))
yr = np.append(yr,0*np.ones(sizeStep))
yr = np.append(yr,3*np.ones(sizeStep))
#-------------------------------
# Valores do arduino
maxValue = 5 # O arduino só aguenta ler/escrever até 5V
minValue = 0 # O arduino só aguenta ler/escrever a partir de 0V
#-------------------------------
# Valores do arduino
erroAcc = 1.15 # quantas vezes é aceito que a frequencia real seja superior a desejada
#-------------------------------
# coeficientes dos controladores
if controlUse == "sc":
controlName = "Sem controlador"
elif controlUse == "cavlr1":
#******* Cavlr 1ª ord ********** Controlador em avanço por lugar das raizes para modelo de primeira ordem
controlName = "Controlador avanço - LR"
# Kc= Kc
# b0 = 2.244
# b1 = -1.964
# b2 = 0
# a1 = -0.4845
# a2 = 0
# Kc= Kc # fs = 100
# b0 = 2.758
# b1 = -2.722
# b2 = 0
# a1 = -0.9329
# a2 = 0
# Kc= Kc e zero em 3/4*sigma
# b0 = 1.13
# b1 = -1.022
# b2 = 0
# a1 = -0.6931
# a2 = 0
# Kc= 5*Kc
#b0 = 11.23
#b1 = -9.823
#b2 = 0
#a1 = -0.4845
#a2 = 0
# Kc= 10*Kc
# b0 = 22.44
# b1 = -19.64
# b2 = 0
# a1 = -0.4845
# a2 = 0
# Kc= 10*Kc # fs = 100
# b0 = 27.58
# b1 = -27.22
# b2 = 0
# a1 = -0.9329
# a2 = 0
# Kc= 10*Kc # zero = 3/4*sigma
# b0 = 11.3
# b1 = -10.22
# b2 = 0
# a1 = -0.6931
# a2 = 0
# Kc= 10*Kc # zero = *sigma/3
# b0 = 4.89
# b1 = -4.652
# b2 = 0
# a1 = -0.8415
# a2 = 0
# Kc= 5*Kc # zero = 2*sigma # e_esp = 0.1
b0 = 6.151
b1 = -4.704
b2 = 0
a1 = -0.6033
a2 = 0
elif controlUse == "cavlr2":
#******* Cavlr 2ª ord ********** Controlador em avanço por lugar das raizes para modelo de segunda ordem
controlName = "Controlador avanço - LR"
# # Colocando zero em sigma *2
# b0 = 3.882
# b1 = -1.664
# b2 = 0
# a1 = -0.0007006
# a2 = 0
# Colocando zero em sigma *3
# b0 = 4.05
# b1 = -1.012
# b2 = 0
# a1 = -0.02119
# a2 = 0
# Colocando zero em sigma *4.5
b0 = 4.061
b1 = -0.214
b2 = 0
a1 = 0.0184
a2 = 0
elif controlUse == "cavrf1":
#******* Cavrf 1ª ord ********** Controlador em avanço por resposta em frequencia para modelo de primeira ordem
controlName = "Controlador avanço - RF"
# b0 = 31.73
# b1 = 20.49
# b2 = 0
# a1 = 0.09445
# a2 = 0
# Kc = Kc /2 -> ficou mais instavel
# b0 = 12.56
# b1 = 5.048
# b2 = 0
# a1 = -0.2618
# a2 = 0
# trocando o erro esperado para 0.1
# b0 = 1.118
# b1 = -0.4326
# b2 = 0
# a1 = -0.8546
# a2 = 0
# trocando o erro esperado para 0.03
b0 = 10.7
b1 = -5.587
b2 = 0
a1 = -0.6781
a2 = 0
elif controlUse == "cavrf2":
#******* Cavrf 2ª ord ********** Controlador em avanço por resposta em frequencia para modelo de segunda ordem
controlName = "Controlador avanço - RF"
b0 = 0.4338
b1 = -0.1238
b2 = 0
a1 = -0.9367
a2 = 0
elif controlUse == "catlr1":
#******* Catlr 1ª ord ********** Controlador em atraso por lugar das raizes para modelo de primeira ordem
controlName = "Controlador atraso - LR"
b0 = 0.825
b1 = -0.651
b2 = 0
a1 = -0.997
a2 = 0
elif controlUse == "catlr2":
#******* Catlr 2ª ord ********** Controlador em atraso por lugar das raizes para modelo de segunda ordem
controlName = "Controlador atraso - LR"
b0 = 4.752
b1 = -3.447
b2 = 0
a1 = -0.996
a2 = 0
elif controlUse == "catrf1":
#******* Catrf 1ª ord ********** Controlador em atraso por resposta em frequencia para modelo de primeira ordem
# b0 = 29.22
# b1 = -15.25
# b2 = 0
# a1 = -0.7072
# a2 = 0
# alterando o erro esperado para 0.1
b0 = 1.086
b1 = -0.5667
b2 = 0
a1 = -0.8912
a2 = 0
controlName = "Controlador atraso - RF"
elif controlUse == "catrf2":
#******* Catrf 2ª ord ********** Controlador em atraso por resposta em frequencia para modelo de segunda ordem
controlName = "Controlador atraso - RF"
b0 = 13.91
b1 = 7.194
b2 = 0
a1 = -0.3594
a2 = 0
elif controlUse == "cavatlr1":
#******* Cavatlr 1ª ord ********** Controlador em avanço-atraso por lugar das raizes para modelo de primeira ordem
controlName = "Controlador avanço-atraso - LR"
# b0 = 2.823
# b1 = -4.129
# b2 = 1.452
# a1 = -1.481
# a2 = 0.483
# Colocando o zero do controlador de avanço em sigma/2
# b0 = 1.133
# b1 = -1.29
# b2 = 0.2146
# a1 = -1.79
# a2 = 0.7911
# Colocando o zero do controlador de avanço em sigma*3/4
b0 = 1.583
b1 = -2.105
b2 = 0.6091
a1 = -1.69
a2 = 0.691
elif controlUse == "cavatlr2":
#******* Cavatlr 2ª ord ********** Controlador em avanço-atraso por lugar das raizes para modelo de segunda ordem
controlName = "Controlador avanço-atraso - LR"
# colocando o zero em sigma * 4.5
b0 = 4.355
b1 = -4.072
b2 = 0.2026
a1 = -0.9776
a2 = -0.01833
elif controlUse == "cavatrf1":
#****************
#******* Cavatrf 1ª ord ********** Controlador em avanço-atraso por resposta em frequencia para modelo de primeira ordem
controlName = "Controlador avanço-atraso - RF"
elif controlUse == "cavatrf2":
#****************
#******* Cavatrf 2ª ord ********** Controlador em avanço-atraso por resposta em frequencia para modelo de segunda ordem
controlName = "Controlador avanço-atraso - RF"
else:
controlName = "Sem controlador"
### FIM MUDANÇAS PERMITIDAS ###
#-------------------------------#-------------------------------#-------------------------------#-------------------------------
# Configurando DEBUG
debugOn = False
# Configuração do arduino
logger.info(f"Configurando conexão com o arduino...")
board = pyfirmata.Arduino(serialPort)
pwmPin = board.get_pin(outPin)
readPin = board.get_pin(inPin)
it = pyfirmata.util.Iterator(board)
it.start()
readPin.enable_reporting()
time.sleep(0.5) # espera as configurações surtirem efeito
# Monta o vetor de saida (y) zerado, o de erro e de controle também
logger.info(f"Inicializando vetoros utilizados...")
y = np.zeros(len(yr)) # vetor de saida
e = np.zeros(len(yr)) # vetor de erro
u = np.zeros(len(yr)) # vetor de controle
#--**----**----**----**----**----**----**----**----**----**--
# Normaliza os dados de entrada
yr = yr/maxValue
# Loop de operações com o arduino
logger.info(f"Tempo total estimado para executar as medições: {len(yr)/freq}")
t_ini = time.time() # registra o tempo de inicio
contLevel = 0 # Inicia o contador de leveis atingidos do yr
for i in range(2,len(yr)):
t_ini_loop = time.time() # registra horario de inicio da interação
#------------------------------
aux = readPin.read() # lê com a porta analogica
if(aux != None):
y[i] = float(aux) # salva no vetor resultado
#------------------------------
e[i] = yr[i] - y[i] # calcula o erro
#------------------------------
# malha de controle
if controlName != "Sem controlador":
u[i] = b0* e[i] + b1*e[i-1] + b2*e[i-2] - a1*u[i-1] - a2*u[i-2]
else:
u[i] = yr[i]
# garante que o sinal estara entre os valores acc pelo arduino
if(u[i] > 1):
u[i] = 1
elif(u[i] < minValue):
u[i] = minValue
#------------------------------
pwmPin.write(u[i]) # escreve no PWM
#------------------------------
if debugOn:
logger.debug(f"{i}:In: {y[i]*maxValue}")
logger.debug(f"{i}:PWM: {u[i]*maxValue}")
logger.debug(f"{i}:yr: {yr[i]*maxValue}")
else:
if(i > contLevel):
contLevel += sizeStep
logger.info(f"Já foram realizados {contLevel/sizeStep}/{qtdTrocas} trocas de niveis!")
#------------------------------
try:
time.sleep((1/freq)-(time.time() - t_ini_loop)) # gera delay para esperar pelo período de amostragem
except:
pass
pwmPin.write(0) # Desliga o motor
t_end = time.time() # registra o tempo de término
#--**----**----**----**----**----**----**----**----**----**--
board.exit() # Encerra conexão com o arduino
# Exibe informações
logger.info(f"Tempo total gasto para executar as medições: {t_end-t_ini}")
logger.info(f"frequencia real: {len(yr)/(t_end-t_ini)}")
if len(yr)/(t_end-t_ini) > erroAcc * freq:
logger.warning(f"frequencia real {len(yr)/(t_end-t_ini)} está superioa a {erroAcc} vezes acima da desejada {freq}")
logger.warning(f"Encerrando execução")
exit()
# Monta dados de saida
yr = yr.astype(np.float64) * maxValue
u = u.astype(np.float64) * maxValue
y = y.astype(np.float64) * maxValue
e = e.astype(np.float64) * maxValue
logger.info(f"Montando data frame")
data = pd.DataFrame()
data.loc[:, 'yr'] = yr
data.loc[:, 'u'] = u
data.loc[:, 'y'] = y
data.loc[0, 'fs'] = freq
if srcFile != None:
logger.info(f"Salvando csv de dados...")
data.to_csv(srcFile, index=False)
# Monta o grafico de resultado
x = [i for i,a in enumerate(yr)] # Monta eixo x dos graficos
sizeImage = (width/dpiImage,height/dpiImage)
fig, axs = plt.subplots(3, sharex=True, figsize=sizeImage, dpi=dpiImage)
axs[0].plot(x,y , color='red', linewidth=4,label='y')
axs[0].plot(x,yr,'--', color='blue', linewidth=2, label='yr')
axs[0].set_ylim(-0.5,5.5)
axs[0].set_title('Dados Lidos - y(k)', fontsize=21)
axs[0].legend(loc="upper right")
axs[0].grid(color='gray')
axs[1].plot(x,u,'--', color='green', linewidth=4)
axs[1].set_ylim(-0.5,5.5)
axs[1].set_title('Saída controlador - u(k)', fontsize=21)
axs[1].grid(color='gray')
axs[2].plot(x,e, color='black', linewidth=4)
axs[2].set_ylim(-5.5,5.5)
axs[2].set_title('Erro - e(k)', fontsize=21)
axs[2].grid(color='gray')
plt.suptitle(controlName, fontsize=26)
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.3)
for ax in axs.flat:
ax.set_ylabel('Voltagem (V)', fontsize=16)
ax.set_xlabel('Amostras (k)', fontsize=18)
for ax in axs.flat:
ax.label_outer()
if srcImage != None:
logger.info(f"Salvando grafico...")
plt.savefig(srcImage, format=formatImage)
plt.show()
logger.info(f"Encerrando execução!")
|
[
"pandas.DataFrame",
"matplotlib.pyplot.show",
"matplotlib.pyplot.suptitle",
"pyfirmata.util.Iterator",
"loguru.logger.warning",
"numpy.zeros",
"numpy.ones",
"time.sleep",
"time.time",
"loguru.logger.info",
"pyfirmata.Arduino",
"matplotlib.pyplot.subplots_adjust",
"loguru.logger.debug",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] |
[((4782, 4800), 'numpy.zeros', 'np.zeros', (['sizeStep'], {}), '(sizeStep)\n', (4790, 4800), True, 'import numpy as np\n'), ((11161, 11214), 'loguru.logger.info', 'logger.info', (['f"""Configurando conexão com o arduino..."""'], {}), "(f'Configurando conexão com o arduino...')\n", (11172, 11214), False, 'from loguru import logger\n'), ((11237, 11266), 'pyfirmata.Arduino', 'pyfirmata.Arduino', (['serialPort'], {}), '(serialPort)\n', (11254, 11266), False, 'import pyfirmata\n'), ((11376, 11406), 'pyfirmata.util.Iterator', 'pyfirmata.util.Iterator', (['board'], {}), '(board)\n', (11399, 11406), False, 'import pyfirmata\n'), ((11445, 11460), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (11455, 11460), False, 'import time\n'), ((11572, 11623), 'loguru.logger.info', 'logger.info', (['f"""Inicializando vetoros utilizados..."""'], {}), "(f'Inicializando vetoros utilizados...')\n", (11583, 11623), False, 'from loguru import logger\n'), ((12160, 12171), 'time.time', 'time.time', ([], {}), '()\n', (12169, 12171), False, 'import time\n'), ((14045, 14056), 'time.time', 'time.time', ([], {}), '()\n', (14054, 14056), False, 'import time\n'), ((14313, 14389), 'loguru.logger.info', 'logger.info', (['f"""Tempo total gasto para executar as medições: {t_end - t_ini}"""'], {}), "(f'Tempo total gasto para executar as medições: {t_end - t_ini}')\n", (14324, 14389), False, 'from loguru import logger\n'), ((14903, 14938), 'loguru.logger.info', 'logger.info', (['f"""Montando data frame"""'], {}), "(f'Montando data frame')\n", (14914, 14938), False, 'from loguru import logger\n'), ((14961, 14975), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (14973, 14975), True, 'import pandas as pd\n'), ((15387, 15448), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)'], {'sharex': '(True)', 'figsize': 'sizeImage', 'dpi': 'dpiImage'}), '(3, sharex=True, figsize=sizeImage, dpi=dpiImage)\n', (15399, 15448), True, 'import matplotlib.pyplot as plt\n'), ((16009, 16047), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['controlName'], {'fontsize': '(26)'}), '(controlName, fontsize=26)\n', (16021, 16047), True, 'import matplotlib.pyplot as plt\n'), ((16048, 16143), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': 'None', 'bottom': 'None', 'right': 'None', 'top': 'None', 'wspace': 'None', 'hspace': '(0.3)'}), '(left=None, bottom=None, right=None, top=None, wspace=\n None, hspace=0.3)\n', (16067, 16143), True, 'import matplotlib.pyplot as plt\n'), ((16403, 16413), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16411, 16413), True, 'import matplotlib.pyplot as plt\n'), ((16415, 16451), 'loguru.logger.info', 'logger.info', (['f"""Encerrando execução!"""'], {}), "(f'Encerrando execução!')\n", (16426, 16451), False, 'from loguru import logger\n'), ((4885, 4903), 'numpy.zeros', 'np.zeros', (['sizeStep'], {}), '(sizeStep)\n', (4893, 4903), True, 'import numpy as np\n'), ((12406, 12417), 'time.time', 'time.time', ([], {}), '()\n', (12415, 12417), False, 'import time\n'), ((14612, 14650), 'loguru.logger.warning', 'logger.warning', (['f"""Encerrando execução"""'], {}), "(f'Encerrando execução')\n", (14626, 14650), False, 'from loguru import logger\n'), ((15101, 15141), 'loguru.logger.info', 'logger.info', (['f"""Salvando csv de dados..."""'], {}), "(f'Salvando csv de dados...')\n", (15112, 15141), False, 'from loguru import logger\n'), ((16320, 16355), 'loguru.logger.info', 'logger.info', (['f"""Salvando grafico..."""'], {}), "(f'Salvando grafico...')\n", (16331, 16355), False, 'from loguru import logger\n'), ((16360, 16401), 'matplotlib.pyplot.savefig', 'plt.savefig', (['srcImage'], {'format': 'formatImage'}), '(srcImage, format=formatImage)\n', (16371, 16401), True, 'import matplotlib.pyplot as plt\n'), ((4834, 4851), 'numpy.ones', 'np.ones', (['sizeStep'], {}), '(sizeStep)\n', (4841, 4851), True, 'import numpy as np\n'), ((4938, 4955), 'numpy.ones', 'np.ones', (['sizeStep'], {}), '(sizeStep)\n', (4945, 4955), True, 'import numpy as np\n'), ((4990, 5007), 'numpy.ones', 'np.ones', (['sizeStep'], {}), '(sizeStep)\n', (4997, 5007), True, 'import numpy as np\n'), ((5042, 5059), 'numpy.ones', 'np.ones', (['sizeStep'], {}), '(sizeStep)\n', (5049, 5059), True, 'import numpy as np\n'), ((5094, 5111), 'numpy.ones', 'np.ones', (['sizeStep'], {}), '(sizeStep)\n', (5101, 5111), True, 'import numpy as np\n'), ((5146, 5163), 'numpy.ones', 'np.ones', (['sizeStep'], {}), '(sizeStep)\n', (5153, 5163), True, 'import numpy as np\n'), ((13423, 13465), 'loguru.logger.debug', 'logger.debug', (['f"""{i}:In: {y[i] * maxValue}"""'], {}), "(f'{i}:In: {y[i] * maxValue}')\n", (13435, 13465), False, 'from loguru import logger\n'), ((13472, 13515), 'loguru.logger.debug', 'logger.debug', (['f"""{i}:PWM: {u[i] * maxValue}"""'], {}), "(f'{i}:PWM: {u[i] * maxValue}')\n", (13484, 13515), False, 'from loguru import logger\n'), ((13522, 13565), 'loguru.logger.debug', 'logger.debug', (['f"""{i}:yr: {yr[i] * maxValue}"""'], {}), "(f'{i}:yr: {yr[i] * maxValue}')\n", (13534, 13565), False, 'from loguru import logger\n'), ((13647, 13745), 'loguru.logger.info', 'logger.info', (['f"""Já foram realizados {contLevel / sizeStep}/{qtdTrocas} trocas de niveis!"""'], {}), "(\n f'Já foram realizados {contLevel / sizeStep}/{qtdTrocas} trocas de niveis!'\n )\n", (13658, 13745), False, 'from loguru import logger\n'), ((13808, 13819), 'time.time', 'time.time', ([], {}), '()\n', (13817, 13819), False, 'import time\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Download and install Go on Linux, list all available versions on the
Go website, select version to install and pass it as an argument.
- Go is an open source programming language:
https://golang.org/doc/copyright.html
- Linux is a family of open-source Unix-like operating systems based on
the Linux kernel:
https://www.kernel.org/category/about.html
- Python is an interpreted, high-level, dynamically typed,
garbage-collected and general-purpose programming language:
https://en.wikipedia.org/wiki/Python_Software_Foundation_License
https://docs.python.org/3/license.html
Attributes:
chunk_size (int): Chunks size of the package, required for tqdm
go_dl_base_url (str): Base Go download URL
go_local (str): Local download folder on the filesystem
go_home (str): /home/user/go (home go folder for projects)
go_folders (tuple): /home/user/go/('src', 'pkg', 'bin')
go_install_home (str): '/usr/local' (go installation folder)
"""
# TODO: Implement a separate function for the argparse with return
# TODO: Implement color print based on message type - green for ok,
# red for error messages and blue for informational messages
# TODO: Validate format of the input parameter for the Go version - must
# follow x.y, x.yy, x.y.z or x.yy.z pattern, where x y and z are digits
# 0 to 9
# TODO: Add argparse argument '--action checkgo' to check whether go is
# already installed and if so - print the currently installed version
__author__ = '<NAME>'
__version__ = '1.0.11'
__maintainer__ = '<NAME>'
__status__ = 'Development'
__license__ = 'MIT'
import os
import time
import subprocess
from os import environ
from pathlib import Path
from typing import List, Any
from functools import partial
try:
import argparse
import requests
import httplib2
from tqdm import tqdm
from bs4 import BeautifulSoup
from bs4 import SoupStrainer
except ModuleNotFoundError as err:
exit(f'Error: {err}, run \'pip3 install -r requirements.txt\'')
go_dl_base_url: str = 'https://golang.org/dl/'
go_local: str = '/tmp/'
chunk_size: int = 1024
go_home: str = str(Path.home()) + '/go/'
go_folders: tuple = ('src', 'pkg', 'bin')
go_install_home: str = '/usr/local'
current_shell: str = environ['SHELL']
def check_exists_dl_folder(folderpath):
"""
Check if the local download folder exists.
Args:
folderpath (string): Path to the download folder
"""
if not os.path.exists(folderpath):
print(f'The desired download folder {folderpath} does not exist')
exit(1)
def get_go_versions(url):
"""
Display all available Go packages for Linux.
Args:
url (string): Base Go download URL
Returns:
go_linux_amd64_versions: All Go versions available on the site
"""
go_linux_amd64_versions = []
http = httplib2.Http()
status, response = http.request(url)
assert isinstance(response, object)
for link in BeautifulSoup(response, parse_only=SoupStrainer('a'),
features="html.parser"):
if link.has_attr('href'):
if 'linux-amd64' in link['href']:
go_linux_amd64_versions.append(link['href'].lstrip(
'/dl/go').rstrip('.linux-amd64.tar.gz'))
return go_linux_amd64_versions
def get_go_links(url):
"""
Display all available Go download links with packages for Linux
on the Go website.
Args:
url (string): Base Go download URL
Returns:
go_linux_amd64_links: All Go links available to download
"""
go_linux_amd64_links = []
http = httplib2.Http()
status, response = http.request(url)
for link in BeautifulSoup(response, parse_only=SoupStrainer('a'),
features="html.parser"):
if link.has_attr('href'):
if 'linux-amd64' in link['href']:
go_linux_amd64_links.append(url + link['href'].lstrip('/dl/'))
return go_linux_amd64_links
def get_go_link(url, version):
"""
Call this function only when specific version is required.
Args:
url (string): Base Go download URL
version (int): Desired Go version in formats x.y, x.y.z, x.yy.z
Returns:
go_linux_amd64_dl_link: Go link with desired version selected
"""
go_linux_amd64_dl_link: List[Any] = []
http = httplib2.Http()
status, response = http.request(url)
for link in BeautifulSoup(response, parse_only=SoupStrainer('a'),
features="html.parser"):
if link.has_attr('href'):
if 'linux-amd64' in link['href'] and version in link['href']:
go_linux_amd64_dl_link = url + link['href'].lstrip('/dl/')
return go_linux_amd64_dl_link
def get_go(url, location):
"""
Download and install desired Go package version for Linux, untar
the downloaded package and place the contents in /usr/local/go.
Args:
url (string): URL with desired go package
location (string): Local download folder on the filesystem
"""
r = requests.get(url, stream=True)
total_size = int(r.headers['content-length'])
filename = url.split('/')[-1]
tar_path = location + filename
# 1. Download the desired Go archive
with open(location + filename, 'wb') as f:
for data in tqdm(iterable=r.iter_content(chunk_size=chunk_size),
total=total_size / chunk_size, unit='KB'):
f.write(data)
print(f'Download complete, archive saved to {tar_path}')
# 2. Extract the downloaded archive,
# check if Go is installed - exit if /usr/local/go is present
if os.path.exists('/usr/local/go'):
exit('go is installed')
print(f'Extracting the archive contents from {tar_path} and '
f'installing Go in /usr/local/go/, make sure that your user is in '
f'the sudoers list')
try:
os.system(f'sudo tar -C {go_install_home} -xzf {tar_path}')
except IOError as e:
print(f'Error {e}, could not open {tar_path}')
exit(1)
def ensure_go_home(root_dir, subfolders):
"""
Create go folders /home/<user>/go/{src,pkg,bin}.
Args:
root_dir: /home/<user>/go/
subfolders: src, pkg, bin (provided in set)
"""
concat_path = partial(os.path.join, root_dir)
mkdirs = partial(os.makedirs, exist_ok=True)
for path in map(concat_path, subfolders):
mkdirs(path)
def append_gopath_to_env(envfile: str):
"""
Append the go path to the user's shell profile.
Args:
envfile (str): path to the env file, auto generated
"""
# open the current active shell source file and append the go path
print('Appending go path to $PATH')
with open(envfile, 'a') as f:
f.write('\n' + 'export PATH=$PATH:/usr/local/go/bin' + '\n')
f.close()
# source the updated envfile
subprocess.call(['.', envfile], shell=True)
def handle_os_environment():
"""
Update ENV .bashrc or .zshrc, '/etc/profile'.
"""
glob_profile_config: str = '/etc/profile'
user_home: str = str(Path.home()) + '/'
if 'zsh' in current_shell:
shell_rc: str = user_home + '.zshrc'
print(f'Current shell config: {shell_rc}')
append_gopath_to_env(shell_rc)
elif 'bash' in current_shell:
shell_rc: str = user_home + '.bashrc'
print(f'Current shell config: {shell_rc}')
append_gopath_to_env(shell_rc)
else:
print('Shell config file is unknown')
print(f'Global shell config: {glob_profile_config}')
print('Verify installation by running: \'go version\' from your terminal')
def main():
"""
Main function, entry point of program, argparser is used here in
combination with the functions defined in this module.
"""
download_url = None
desired_version = None
parser = argparse.ArgumentParser(description='List available Go packages '
'for Linux on the official '
'Go website. Install the '
'selected package version '
'from the list.')
parser.add_argument('--action', '-a', metavar='<action>',
choices=['listgoversions', 'listgolinks', 'installgo'],
action='store', dest="action",
default="listgoversions",
help='[listgoversions, listgolinks, installgo] - the '
'action that will be taken. "listgoversions" '
'will list all available Go versions for Linux '
'on the Go website. "listgolinks" will list all '
'available Go download links on the Go website. '
'"installgo" will install the selected Go '
'version passed as a parameter value. Default: '
'listgoversions')
parser.add_argument('--version', '-v', metavar='<version>', action='store',
dest="version",
help='Specifies the version of Go to be installed, '
'for example: 1.15.2')
args = parser.parse_args()
# List all available Go versions on the Go website
if args.action == 'listgoversions':
go_versions: list = get_go_versions(go_dl_base_url)
print('Available Go versions for Linux:')
# start from the second (1), not first (0) element, because the
# value of the first (0) can have duplicates - 1.15 1.15 gets
# parsed twice
for version in range(1, len(go_versions)):
print('Go ver:', go_versions[version])
exit(0)
# List all available Go download links on the Go website
if args.action == 'listgolinks':
go_links: list = get_go_links(go_dl_base_url)
print('Available Go download links for Linux:')
# start from the second (1), not first (0) element, because the
# value value of the first (0) can have duplicates with the
# second (1) element
for link in range(1, len(go_links)):
print('Download link for Go ver:', go_links[link])
exit(0)
# Download and install the desired Go version from the Go website
if args.action == 'installgo':
# First check if the download folder is present - 'go_local'
check_exists_dl_folder(go_local)
if args.version is not None:
desired_version = args.version
download_url = get_go_link(go_dl_base_url, desired_version)
else:
print('Please provide Go version as a value: 1.15.2')
exit(1)
print(
f'Selected Go version: {desired_version}, downloading Go '
f'package from: {download_url}')
setup_start = time.perf_counter()
get_go(download_url, go_local)
ensure_go_home(go_home, go_folders)
handle_os_environment()
setup_end = time.perf_counter()
print(f'Completed in {round(setup_end - setup_start, 2)} second(s)')
if __name__ == '__main__':
main()
|
[
"httplib2.Http",
"functools.partial",
"argparse.ArgumentParser",
"pathlib.Path.home",
"os.path.exists",
"os.system",
"time.perf_counter",
"subprocess.call",
"bs4.SoupStrainer",
"requests.get"
] |
[((2869, 2884), 'httplib2.Http', 'httplib2.Http', ([], {}), '()\n', (2882, 2884), False, 'import httplib2\n'), ((3643, 3658), 'httplib2.Http', 'httplib2.Http', ([], {}), '()\n', (3656, 3658), False, 'import httplib2\n'), ((4394, 4409), 'httplib2.Http', 'httplib2.Http', ([], {}), '()\n', (4407, 4409), False, 'import httplib2\n'), ((5113, 5143), 'requests.get', 'requests.get', (['url'], {'stream': '(True)'}), '(url, stream=True)\n', (5125, 5143), False, 'import requests\n'), ((5695, 5726), 'os.path.exists', 'os.path.exists', (['"""/usr/local/go"""'], {}), "('/usr/local/go')\n", (5709, 5726), False, 'import os\n'), ((6337, 6368), 'functools.partial', 'partial', (['os.path.join', 'root_dir'], {}), '(os.path.join, root_dir)\n', (6344, 6368), False, 'from functools import partial\n'), ((6382, 6417), 'functools.partial', 'partial', (['os.makedirs'], {'exist_ok': '(True)'}), '(os.makedirs, exist_ok=True)\n', (6389, 6417), False, 'from functools import partial\n'), ((6936, 6979), 'subprocess.call', 'subprocess.call', (["['.', envfile]"], {'shell': '(True)'}), "(['.', envfile], shell=True)\n", (6951, 6979), False, 'import subprocess\n'), ((7919, 8084), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""List available Go packages for Linux on the official Go website. Install the selected package version from the list."""'}), "(description=\n 'List available Go packages for Linux on the official Go website. Install the selected package version from the list.'\n )\n", (7942, 8084), False, 'import argparse\n'), ((2153, 2164), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (2162, 2164), False, 'from pathlib import Path\n'), ((2475, 2501), 'os.path.exists', 'os.path.exists', (['folderpath'], {}), '(folderpath)\n', (2489, 2501), False, 'import os\n'), ((5952, 6011), 'os.system', 'os.system', (['f"""sudo tar -C {go_install_home} -xzf {tar_path}"""'], {}), "(f'sudo tar -C {go_install_home} -xzf {tar_path}')\n", (5961, 6011), False, 'import os\n'), ((11011, 11030), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (11028, 11030), False, 'import time\n'), ((11166, 11185), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (11183, 11185), False, 'import time\n'), ((3018, 3035), 'bs4.SoupStrainer', 'SoupStrainer', (['"""a"""'], {}), "('a')\n", (3030, 3035), False, 'from bs4 import SoupStrainer\n'), ((3752, 3769), 'bs4.SoupStrainer', 'SoupStrainer', (['"""a"""'], {}), "('a')\n", (3764, 3769), False, 'from bs4 import SoupStrainer\n'), ((4503, 4520), 'bs4.SoupStrainer', 'SoupStrainer', (['"""a"""'], {}), "('a')\n", (4515, 4520), False, 'from bs4 import SoupStrainer\n'), ((7148, 7159), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (7157, 7159), False, 'from pathlib import Path\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .views import FriendViewSet, FriendshipRequestViewSet
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'friends', FriendViewSet, base_name='friends')
router.register(r'friendrequests', FriendshipRequestViewSet, base_name='friendrequests')
urlpatterns = router.urls
|
[
"rest_framework.routers.DefaultRouter"
] |
[((186, 201), 'rest_framework.routers.DefaultRouter', 'DefaultRouter', ([], {}), '()\n', (199, 201), False, 'from rest_framework.routers import DefaultRouter\n')]
|
from collections import defaultdict
from typing import Any
from django.db.models import Model
from rest_framework.exceptions import APIException, ValidationError
from rest_framework.fields import DateTimeField, IntegerField
from rest_framework.serializers import ModelSerializer, ListSerializer
class NestedModelSerializer(ModelSerializer):
class Meta:
model: Model
def _prepare_relational_fields(self) -> None:
# We only should concern about many to many fields, as Django handles one to many fields by itself
many_to_many = defaultdict(list)
for field_name, field_value in self.initial_data.items():
if field_name not in self.fields.fields:
raise ValidationError(f'Field not found ({field_name})')
field = self.fields.fields[field_name]
# Insertion on read only field will cause security issues
if field.read_only:
raise ValidationError(f'Read only field ({field_name})')
# Detect relation fields and append them to the list
if isinstance(field, ListSerializer): # It's a many to many field with new records
many_to_many[field_name] = []
for record in field_value:
obj = field.child.Meta.model.objects.create(**record)
many_to_many[field_name].append(obj.pk)
elif field_name.endswith('_ids'): # It's a many to many field with preexisted records
_field_name = field_name[:field_name.rfind('_ids')]
many_to_many[_field_name] += field_value
elif isinstance(field, ModelSerializer):
if hasattr(field, 'Meta'): # It's a one to many record with new record
obj = field.Meta.model.objects.create(**field_value)
self.validated_data[field_name + '_id'] = obj.pk
self.validated_data.pop(field_name)
else:
raise APIException('Meta not found')
# Assign our list to an attribute for future reference
self.many_to_many_data = many_to_many
# Remove all the relation fields from "validated_data" and make it safe to use
for field_name in many_to_many:
self.validated_data.pop(field_name)
def _save_none_relational_fields(self) -> None:
# Simply call Django "create" function with "validated_data" as previously we extracted all relations fields
instance = self.Meta.model.objects.create(**self.validated_data)
# It's really important to fill "self.instance" with new value, hence future calls refer to the right values
self.instance = instance
def _update_none_relational_fields(self) -> None:
# If we set new values to our attributes and then call "save" method, it will save changes into database
for field_name, field_value in self.validated_data.items():
setattr(self.instance, field_name, field_value)
self.instance.save()
def _save_or_update_many_to_many_fields(self, update: bool = True) -> None:
# We have to call "_prepare_relational_fields" before calling this method, otherwise an "AttributeError" error
# will raise because "many_to_many_data" define by "_prepare_relational_fields"
for field_name, field_value in self.many_to_many_data.items():
attr = getattr(self.instance, field_name)
# For update requests, we are going to remove all previous relations and replace them with the new ones
if update:
attr.clear()
for pk in field_value:
obj = attr.model.objects.get(id=pk)
attr.add(obj)
def _save_many_to_many_fields(self) -> None:
# A proxy method
self._save_or_update_many_to_many_fields()
def _update_many_to_many_fields(self) -> None:
# A proxy method
self._save_or_update_many_to_many_fields(update=True)
def create(self, validated_data: dict) -> Any:
self._prepare_relational_fields()
self._save_none_relational_fields()
self._save_many_to_many_fields()
return self.instance
def update(self, instance: Model, validated_data: dict) -> Any:
self.instance = instance
self.validated_data.update(validated_data)
self._prepare_relational_fields()
self._update_many_to_many_fields()
self._update_none_relational_fields()
return self.instance
class SafeDeleteSerializer(ModelSerializer):
deleted_at = DateTimeField(required=False, allow_null=True)
class Meta:
fields = ['deleted_at']
class LogFieldsSerializer(ModelSerializer):
inserted_at = DateTimeField(read_only=True)
updated_at = DateTimeField(read_only=True)
class Meta:
fields = ['inserted_at', 'updated_at']
class CommonFieldsSerializer(SafeDeleteSerializer, LogFieldsSerializer):
id = IntegerField(read_only=True)
class Meta:
fields = [
'id',
*SafeDeleteSerializer.Meta.fields,
*LogFieldsSerializer.Meta.fields,
]
|
[
"rest_framework.fields.IntegerField",
"collections.defaultdict",
"rest_framework.fields.DateTimeField",
"rest_framework.exceptions.ValidationError",
"rest_framework.exceptions.APIException"
] |
[((4586, 4632), 'rest_framework.fields.DateTimeField', 'DateTimeField', ([], {'required': '(False)', 'allow_null': '(True)'}), '(required=False, allow_null=True)\n', (4599, 4632), False, 'from rest_framework.fields import DateTimeField, IntegerField\n'), ((4746, 4775), 'rest_framework.fields.DateTimeField', 'DateTimeField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (4759, 4775), False, 'from rest_framework.fields import DateTimeField, IntegerField\n'), ((4793, 4822), 'rest_framework.fields.DateTimeField', 'DateTimeField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (4806, 4822), False, 'from rest_framework.fields import DateTimeField, IntegerField\n'), ((4971, 4999), 'rest_framework.fields.IntegerField', 'IntegerField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (4983, 4999), False, 'from rest_framework.fields import DateTimeField, IntegerField\n'), ((562, 579), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (573, 579), False, 'from collections import defaultdict\n'), ((722, 772), 'rest_framework.exceptions.ValidationError', 'ValidationError', (['f"""Field not found ({field_name})"""'], {}), "(f'Field not found ({field_name})')\n", (737, 772), False, 'from rest_framework.exceptions import APIException, ValidationError\n'), ((950, 1000), 'rest_framework.exceptions.ValidationError', 'ValidationError', (['f"""Read only field ({field_name})"""'], {}), "(f'Read only field ({field_name})')\n", (965, 1000), False, 'from rest_framework.exceptions import APIException, ValidationError\n'), ((1997, 2027), 'rest_framework.exceptions.APIException', 'APIException', (['"""Meta not found"""'], {}), "('Meta not found')\n", (2009, 2027), False, 'from rest_framework.exceptions import APIException, ValidationError\n')]
|
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
import time
class Worker(QRunnable):
'''
Worker thread
'''
@pyqtSlot()
def run(self):
'''
Your code goes in this function
'''
print("Thread start")
time.sleep(5)
print("Thread complete")
class MainWindow(QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.counter = 0
layout = QVBoxLayout()
self.l = QLabel("Start")
b = QPushButton("DANGER!")
b.pressed.connect(self.oh_no)
layout.addWidget(self.l)
layout.addWidget(b)
w = QWidget()
w.setLayout(layout)
self.setCentralWidget(w)
self.show()
self.timer = QTimer()
self.timer.setInterval(1000)
self.timer.timeout.connect(self.recurring_timer)
self.timer.start()
self.threadpool = QThreadPool()
print("Multithreading with maximum %d threads" % self.threadpool.maxThreadCount())
def oh_no(self):
worker = Worker()
self.threadpool.start(worker)
def recurring_timer(self):
self.counter += 1
self.l.setText("Counter: %d" % self.counter)
app = QApplication([])
window = MainWindow()
app.exec_()
|
[
"time.sleep"
] |
[((294, 307), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (304, 307), False, 'import time\n')]
|
import os, sys
import time
import torch
import numpy as np
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal
import copy
import cv2
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from PIL import Image as pil
import pickle
def print_loss_pack(loss_pack, name):
loss_depth, loss_mask_gt, loss_mask_out, loss_normal, loss_l2reg = loss_pack['depth'], loss_pack['mask_gt'], loss_pack['mask_out'], loss_pack['normal'], loss_pack['l2reg']
if len(loss_depth.shape) == 1:
loss_mask_gt, loss_mask_out, loss_depth, loss_normal, loss_l2reg = loss_mask_gt.mean(), loss_mask_out.mean(), loss_depth.mean(), loss_normal.mean(), loss_l2reg.mean()
print('NAME = [{0}] -- loss_depth: {1:.4f}, loss_mask_gt: {2:.4f}, loss_mask_out: {3:.4f}, loss_normal: {4:.4f}, loss_l2reg: {5:.4f}'.format(name, loss_depth.detach().cpu().numpy(), loss_mask_gt.detach().cpu().numpy(), loss_mask_out.detach().cpu().numpy(), loss_normal.detach().cpu().numpy(), loss_l2reg.detach().cpu().numpy()))
def print_loss_pack_color(loss_pack, name):
loss_color, loss_depth, loss_mask_gt, loss_mask_out, loss_normal, loss_l2reg, loss_l2reg_c = loss_pack['color'], loss_pack['depth'], loss_pack['mask_gt'], loss_pack['mask_out'], loss_pack['normal'], loss_pack['l2reg'], loss_pack['l2reg_c']
print('NAME = [{0}] -- loss_color: {1:.4f}, loss_depth: {2:.4f}, loss_mask_gt: {3:.4f}, loss_mask_out: {4:.4f}, loss_normal: {5:.4f}, loss_l2reg: {6:.4f}, loss_l2re_cg: {7:.4f}'.format(name, loss_color.detach().cpu().numpy(), loss_depth.detach().cpu().numpy(), loss_mask_gt.detach().cpu().numpy(), loss_mask_out.detach().cpu().numpy(), loss_normal.detach().cpu().numpy(), loss_l2reg.detach().cpu().numpy(), loss_l2reg_c.detach().cpu().numpy()))
def demo_color_save_render_output(prefix, sdf_renderer, shape_code, color_code, camera, lighting_loc=None, profile=False):
R, T = camera.extrinsic[:,:3], camera.extrinsic[:,3]
R, T = torch.from_numpy(R).float().cuda(), torch.from_numpy(T).float().cuda()
R.requires_grad, T.requires_grad = False, False
if lighting_loc is not None:
lighting_locations = torch.from_numpy(lighting_loc).float().unsqueeze(0).cuda()
else:
lighting_locations = None
render_output = sdf_renderer.render(color_code, shape_code, R, T, profile=profile, no_grad=True, lighting_locations=lighting_locations)
depth_rendered, normal_rendered, color_rgb, valid_mask_rendered, min_sdf_sample = render_output
data = {}
data['depth'] = depth_rendered.detach().cpu().numpy()
data['normal'] = normal_rendered.detach().cpu().numpy()
data['mask'] = valid_mask_rendered.detach().cpu().numpy()
data['color'] = color_rgb.detach().cpu().numpy()
data['min_sdf_sample'] = min_sdf_sample.detach().cpu().numpy()
data['latent_tensor'] = shape_code.detach().cpu().numpy()
data['K'] = sdf_renderer.get_intrinsic()
data['RT'] = torch.cat([R, T[:,None]], 1).detach().cpu().numpy()
fname = prefix + '_info.pkl'
with open(fname, 'wb') as f:
pickle.dump(data, f)
img_hw = sdf_renderer.get_img_hw()
visualizer = Visualizer(img_hw)
print('Writing to prefix: {}'.format(prefix))
visualizer.visualize_depth(prefix + '_depth.png', depth_rendered.detach().cpu().numpy(), valid_mask_rendered.detach().cpu().numpy())
visualizer.visualize_normal(prefix + '_normal.png', normal_rendered.detach().cpu().numpy(), valid_mask_rendered.detach().cpu().numpy(), bgr2rgb=True)
visualizer.visualize_mask(prefix + '_silhouette.png', valid_mask_rendered.detach().cpu().numpy())
cv2.imwrite(prefix + '_rendered_rgb.png', color_rgb.detach().cpu().numpy() * 255)
class Visualizer(object):
def __init__(self, img_hw, dmin=0.0, dmax=10.0):
self.img_h, self.img_w = img_hw[0], img_hw[1]
self.data = {}
self.dmin, self.dmax = dmin, dmax
self.loss_counter = 0
self.loss_curve = {}
self.loss_list = []
self.chamfer_list = []
def get_data(self, data_name):
if data_name in self.data.keys():
return self.data[data_name]
else:
raise ValueError('Key {0} does not exist.'.format(data_name))
def set_data(self, data):
self.data = data
def reset_data(self):
self.data = {}
keys = ['mask_gt', 'mask_output', 'loss_mask_gt', 'loss_mask_out',
'depth_gt', 'depth_output', 'loss_depth',
'normal_gt', 'normal_output', 'loss_normal']
for key in keys:
self.data[key] = np.zeros((64, 64))
def reset_loss_curve(self):
self.loss_counter = 0
self.loss_curve = {}
def reset_all(self):
self.reset_data()
self.reset_loss_curve()
def add_loss_from_pack(self, loss_pack):
'''
potential properties:
['mask_gt', 'mask_out', 'depth' 'normal', 'l2reg']
'''
loss_name_list = list(loss_pack.keys())
if self.loss_curve == {}:
for loss_name in loss_name_list:
self.loss_curve[loss_name] = []
for loss_name in loss_name_list:
loss_value = loss_pack[loss_name].detach().cpu().numpy()
self.loss_curve[loss_name].append(loss_value)
self.loss_counter = self.loss_counter + 1
def add_loss(self, loss):
self.loss_list.append(loss.detach().cpu().numpy())
def add_chamfer(self, chamfer):
self.chamfer_list.append(chamfer)
def add_data(self, data_name, data_src, data_mask=None):
'''
potential properties:
mask: ['mask_gt', 'mask_output', 'loss_mask_gt', 'loss_mask_out']
depth: ['depth_gt', 'depth_output', 'loss_depth']
normal: ['normal_gt', 'normal_output', 'loss_normal']
'''
if data_mask is None:
self.data[data_name] = data_src
else:
data_map = np.zeros(data_mask.shape)
data_map[data_mask != 0] = data_src
self.data[data_name] = data_map
def save_depth(self, fname, depth_vis, cmap='magma', direct=False):
if direct:
cv2.imwrite(fname, depth_vis)
return 0
vmin, vmax = 0, 255
normalizer = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
mapper = cm.ScalarMappable(norm=normalizer, cmap=cmap)
colormapped_im = (mapper.to_rgba(depth_vis)[:,:,:3] * 255).astype(np.uint8)
im = pil.fromarray(colormapped_im)
im.save(fname)
def save_mask(self, fname, mask_vis, bgr2rgb=False):
if bgr2rgb:
mask_vis = cv2.cvtColor(mask_vis, cv2.COLOR_BGR2RGB)
cv2.imwrite(fname, mask_vis)
def save_normal(self, fname, normal_vis, bgr2rgb=False):
if bgr2rgb:
normal_vis = cv2.cvtColor(normal_vis, cv2.COLOR_BGR2RGB)
cv2.imwrite(fname, normal_vis)
def save_error(self, fname, error_vis, bgr2rgb=False):
self.save_depth(fname, error_vis, cmap='jet')
def visualize_depth(self, fname, depth, mask=None):
# depth_vis = get_vis_depth(depth, mask=mask, dmin=self.dmin, dmax=self.dmax)
depth_vis = get_vis_depth(depth, mask=mask)
# self.save_depth(fname, depth_vis)
cv2.imwrite(fname, depth_vis)
def visualize_normal(self, fname, normal, mask=None, bgr2rgb=False):
normal_vis = get_vis_normal(normal, mask=mask)
if bgr2rgb:
normal_vis = cv2.cvtColor(normal_vis, cv2.COLOR_BGR2RGB)
cv2.imwrite(fname, normal_vis)
def visualize_mask(self, fname, mask, bgr2rgb=False):
mask_vis = get_vis_mask(mask)
if bgr2rgb:
mask_vis = cv2.cvtColor(mask_vis, cv2.COLOR_BGR2RGB)
cv2.imwrite(fname, mask_vis)
def imshow(self, ax, img, title=None):
ax.imshow(img)
ax.axis('off')
if title is not None:
ax.set_title(title)
def imshow_bgr2rgb(self, ax, img, title=None):
if len(img.shape) == 3:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
ax.imshow(img)
ax.axis('off')
if title is not None:
ax.set_title(title)
def show_loss_curve(self, fname):
pass
def show_all_data_3x4(self, fname):
fig, axs = plt.subplots(3, 4, figsize=(30,30))
# first row, groundtruth
depth_gt_vis = get_vis_depth(self.data['depth_gt'], mask=self.data['mask_gt'], dmin=self.dmin, dmax=self.dmax)
self.imshow_bgr2rgb(axs[0, 0], 255 - depth_gt_vis, title='depth gt')
normal_gt_vis = get_vis_normal(self.data['normal_gt'], mask=self.data['mask_gt'])
self.imshow(axs[0, 1], normal_gt_vis, title='normal gt')
mask_gt_vis = get_vis_mask(self.data['mask_gt'])
self.imshow_bgr2rgb(axs[0, 2], 255 - mask_gt_vis, title='mask gt')
axs[0, 3].axis('off')
# second row, output
depth_output_vis = get_vis_depth(self.data['depth_output'], mask=self.data['mask_output'], dmin=self.dmin, dmax=self.dmax)
self.imshow_bgr2rgb(axs[1, 0], 255 - depth_output_vis, title='depth output')
normal_output_vis = get_vis_normal(self.data['normal_output'], mask=self.data['mask_output'])
self.imshow(axs[1, 1], normal_output_vis, title='normal output')
mask_output_vis = get_vis_mask(self.data['mask_output'])
self.imshow_bgr2rgb(axs[1, 2], 255 - mask_output_vis, title='mask output')
axs[1, 3].axis('off')
# third row, loss
valid_mask = np.logical_and(self.data['mask_gt'], self.data['mask_output'])
loss_depth_vis = get_vis_depth(np.abs(self.data['loss_depth']), valid_mask, dmin=0.0, dmax=0.5)
self.imshow_bgr2rgb(axs[2, 0], 255 - loss_depth_vis, title='depth loss')
loss_normal_vis = get_vis_depth(self.data['loss_normal'], valid_mask, dmin=-1.0, dmax=0.0)
self.imshow_bgr2rgb(axs[2, 1], 255 - loss_normal_vis, title='normal loss')
loss_mask_gt_vis = get_vis_mask(np.abs(self.data['loss_mask_gt']) > 0)
self.imshow_bgr2rgb(axs[2, 2], 255 - loss_mask_gt_vis, title='gt \ output')
loss_mask_out_vis = get_vis_mask(np.abs(self.data['loss_mask_out']) > 0)
self.imshow_bgr2rgb(axs[2, 3], 255 - loss_mask_out_vis, title='output \ gt')
# savefig
fig.savefig(fname)
plt.close('all')
def save_all_data(self, prefix):
# groundtruth
depth_gt_vis = get_vis_depth(self.data['depth_gt'], mask=self.data['mask_gt'], dmin=self.dmin, dmax=self.dmax)
self.save_depth(prefix + '_depth_gt.png', depth_gt_vis, cmap='magma', direct=True)
normal_gt_vis = get_vis_normal(self.data['normal_gt'], mask=self.data['mask_gt'])
self.save_normal(prefix + '_normal_gt.png', normal_gt_vis, bgr2rgb=True)
mask_gt_vis = get_vis_mask(self.data['mask_gt'])
self.save_mask(prefix + '_mask_gt.png', mask_gt_vis)
# output
depth_output_vis = get_vis_depth(self.data['depth_output'], mask=self.data['mask_output'], dmin=self.dmin, dmax=self.dmax)
self.save_depth(prefix + '_depth_output.png', depth_output_vis, cmap='magma', direct=True)
normal_output_vis = get_vis_normal(self.data['normal_output'], mask=self.data['mask_output'])
self.save_normal(prefix + '_normal_output.png', normal_output_vis, bgr2rgb=True)
mask_output_vis = get_vis_mask(self.data['mask_output'])
self.save_mask(prefix + '_mask_output.png', mask_output_vis)
# third row, loss
valid_mask = np.logical_and(self.data['mask_gt'], self.data['mask_output'])
loss_depth_vis = get_vis_depth(np.abs(self.data['loss_depth']), valid_mask, dmin=0.0, dmax=0.5, bg_color=0)
self.save_error(prefix + '_depth_loss.png', loss_depth_vis, bgr2rgb=True)
loss_normal_vis = get_vis_depth(self.data['loss_normal'], valid_mask, dmin=-1.0, dmax=0.0, bg_color=0)
self.save_error(prefix + '_normal_loss.png', loss_normal_vis, bgr2rgb=True)
loss_mask_gt_vis = get_vis_depth(np.abs(self.data['loss_mask_gt']), bg_color=0)
self.save_error(prefix + '_mask_gt_loss.png', loss_mask_gt_vis, bgr2rgb=True)
loss_mask_out_vis = get_vis_depth(np.abs(self.data['loss_mask_out']), bg_color=0)
self.save_error(prefix + '_mask_out_loss.png', loss_mask_out_vis, bgr2rgb=True)
self.save_error(prefix + '_mask_loss.png', loss_mask_gt_vis + loss_mask_out_vis, bgr2rgb=True)
def dump_all_data(self, fname):
with open(fname, 'wb') as f:
pickle.dump({'data': self.data, 'loss_curve': self.loss_curve, 'loss_list': self.loss_list, 'chamfer_list': self.chamfer_list}, f)
def show_all_data(self, fname):
self.show_all_data_3x4(fname)
# self.save_all_data(fname[:-4])
def show_all_data_color(self, fname):
fig, axs = plt.subplots(3, 4, figsize=(30,30))
# first row, groundtruth
depth_gt_vis = get_vis_depth(self.data['depth_gt'], mask=self.data['mask_gt'], dmin=self.dmin, dmax=self.dmax)
self.imshow_bgr2rgb(axs[0, 0], depth_gt_vis, title='depth gt')
normal_gt_vis = get_vis_normal(self.data['normal_gt'])
self.imshow_bgr2rgb(axs[0, 1], normal_gt_vis, title='normal gt')
mask_gt_vis = get_vis_mask(self.data['mask_gt'])
self.imshow_bgr2rgb(axs[0, 2], mask_gt_vis, title='mask gt')
self.imshow_bgr2rgb(axs[0, 3], self.data['color_gt'], title='rgb gt')
# second row, output
depth_output_vis = get_vis_depth(self.data['depth_output'], mask=self.data['mask_output'], dmin=self.dmin, dmax=self.dmax)
self.imshow_bgr2rgb(axs[1, 0], depth_output_vis, title='depth output')
normal_output_vis = get_vis_normal(self.data['normal_output'])
self.imshow_bgr2rgb(axs[1, 1], normal_output_vis, title='normal output')
mask_output_vis = get_vis_mask(self.data['mask_output'])
self.imshow_bgr2rgb(axs[1, 2], mask_output_vis, title='mask output')
self.imshow_bgr2rgb(axs[1, 3], self.data['color_output'], title='rgb output')
# third row, loss
valid_mask = np.logical_and(self.data['mask_gt'], self.data['mask_output'])
loss_depth_vis = get_vis_depth(np.abs(self.data['loss_depth']), valid_mask, dmin=0.0, dmax=0.5)
self.imshow_bgr2rgb(axs[2, 0], loss_depth_vis, title='depth loss')
loss_normal_vis = get_vis_depth(self.data['loss_normal'], valid_mask, dmin=-1.0, dmax=0.0)
self.imshow_bgr2rgb(axs[2, 1], loss_normal_vis, title='normal loss')
loss_mask_gt_vis = get_vis_mask(np.abs(self.data['loss_mask_gt']) > 0)
loss_mask_out_vis = get_vis_mask(np.abs(self.data['loss_mask_out']) > 0)
loss_mask_gt_vis += loss_mask_out_vis
self.imshow_bgr2rgb(axs[2, 2], loss_mask_gt_vis, title='mask loss')
self.imshow_bgr2rgb(axs[2, 3], self.data['loss_color'], title='rgb loss')
# savefig
fig.savefig(fname)
plt.close('all')
def return_output_data_color(self):
return self.data['color_output'], self.data['depth_output'], self.data['normal_output'], self.data['mask_output']
def show_all_data_color_multi(self, fname, num_img=4):
fig, axs = plt.subplots(3, 2*num_img, figsize=(8*2*num_img,25))
for i in range(num_img):
# first row, ground truth
self.imshow_bgr2rgb(axs[0, 2*i], self.data['color_gt-{}'.format(i)], title='rgb gt {}'.format(i))
mask_gt_vis = get_vis_mask(self.data['mask_gt-{}'.format(i)])
self.imshow_bgr2rgb(axs[0, 2*i+1], mask_gt_vis, title='mask gt {}'.format(i))
# second row, output
self.imshow_bgr2rgb(axs[1, 2*i], self.data['color_output-{}'.format(i)], title='rgb output {}'.format(i))
mask_output_vis = get_vis_mask(self.data['mask_output-{}'.format(i)])
self.imshow_bgr2rgb(axs[1, 2*i+1], mask_output_vis, title='mask output {}'.format(i))
# third row, loss
self.imshow_bgr2rgb(axs[2, 2*i], self.data['loss_color-{}'.format(i)], title='rgb loss {}'.format(i))
loss_mask_gt_vis = get_vis_mask(np.abs(self.data['loss_mask_gt-{}'.format(i)]) > 0)
loss_mask_out_vis = get_vis_mask(np.abs(self.data['loss_mask_out-{}'.format(i)]) > 0)
loss_mask_gt_vis += loss_mask_out_vis
self.imshow_bgr2rgb(axs[2, 2*i+1], loss_mask_gt_vis, title='mask loss {}'.format(i))
# savefig
plt.subplots_adjust(top=0.95, right=0.99, left=0.01, bottom=0.01, wspace=0.05, hspace=0.1)
fig.savefig(fname)
plt.close('all')
def show_all_data_color_warp(self, fname):
fig, axs = plt.subplots(1, 5, figsize=(15, 3.4))
self.imshow_bgr2rgb(axs[0], self.data['color_gt-1'], title='view 1')
self.imshow_bgr2rgb(axs[1], self.data['color_gt-2'], title='view 2')
self.imshow_bgr2rgb(axs[2], self.data['color_valid-1'], title='valid region in view 1')
self.imshow_bgr2rgb(axs[3], self.data['color_valid-2'], title='warped color from view 2')
self.imshow_bgr2rgb(axs[4], self.data['color_valid_loss'], title='color loss')
# savefig
plt.subplots_adjust(top=0.99, right=0.99, left=0.01, bottom=0.00, wspace=0.05, hspace=0)
fig.savefig(fname)
plt.close('all')
|
[
"vis_utils.get_vis_depth",
"pickle.dump",
"numpy.abs",
"torch.cat",
"os.path.abspath",
"matplotlib.colors.Normalize",
"cv2.cvtColor",
"matplotlib.cm.ScalarMappable",
"cv2.imwrite",
"matplotlib.pyplot.close",
"vis_utils.get_vis_mask",
"matplotlib.pyplot.subplots",
"matplotlib.use",
"matplotlib.pyplot.subplots_adjust",
"vis_utils.get_vis_normal",
"torch.from_numpy",
"numpy.logical_and",
"numpy.zeros",
"PIL.Image.fromarray"
] |
[((233, 247), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (240, 247), True, 'import matplotlib as mpl\n'), ((91, 116), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (106, 116), False, 'import os, sys\n'), ((3133, 3153), 'pickle.dump', 'pickle.dump', (['data', 'f'], {}), '(data, f)\n', (3144, 3153), False, 'import pickle\n'), ((6299, 6341), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': 'vmin', 'vmax': 'vmax'}), '(vmin=vmin, vmax=vmax)\n', (6319, 6341), True, 'import matplotlib as mpl\n'), ((6359, 6404), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', ([], {'norm': 'normalizer', 'cmap': 'cmap'}), '(norm=normalizer, cmap=cmap)\n', (6376, 6404), True, 'import matplotlib.cm as cm\n'), ((6502, 6531), 'PIL.Image.fromarray', 'pil.fromarray', (['colormapped_im'], {}), '(colormapped_im)\n', (6515, 6531), True, 'from PIL import Image as pil\n'), ((6706, 6734), 'cv2.imwrite', 'cv2.imwrite', (['fname', 'mask_vis'], {}), '(fname, mask_vis)\n', (6717, 6734), False, 'import cv2\n'), ((6894, 6924), 'cv2.imwrite', 'cv2.imwrite', (['fname', 'normal_vis'], {}), '(fname, normal_vis)\n', (6905, 6924), False, 'import cv2\n'), ((7202, 7233), 'vis_utils.get_vis_depth', 'get_vis_depth', (['depth'], {'mask': 'mask'}), '(depth, mask=mask)\n', (7215, 7233), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((7286, 7315), 'cv2.imwrite', 'cv2.imwrite', (['fname', 'depth_vis'], {}), '(fname, depth_vis)\n', (7297, 7315), False, 'import cv2\n'), ((7411, 7444), 'vis_utils.get_vis_normal', 'get_vis_normal', (['normal'], {'mask': 'mask'}), '(normal, mask=mask)\n', (7425, 7444), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((7542, 7572), 'cv2.imwrite', 'cv2.imwrite', (['fname', 'normal_vis'], {}), '(fname, normal_vis)\n', (7553, 7572), False, 'import cv2\n'), ((7651, 7669), 'vis_utils.get_vis_mask', 'get_vis_mask', (['mask'], {}), '(mask)\n', (7663, 7669), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((7763, 7791), 'cv2.imwrite', 'cv2.imwrite', (['fname', 'mask_vis'], {}), '(fname, mask_vis)\n', (7774, 7791), False, 'import cv2\n'), ((8303, 8339), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(4)'], {'figsize': '(30, 30)'}), '(3, 4, figsize=(30, 30))\n', (8315, 8339), True, 'import matplotlib.pyplot as plt\n'), ((8396, 8496), 'vis_utils.get_vis_depth', 'get_vis_depth', (["self.data['depth_gt']"], {'mask': "self.data['mask_gt']", 'dmin': 'self.dmin', 'dmax': 'self.dmax'}), "(self.data['depth_gt'], mask=self.data['mask_gt'], dmin=self.\n dmin, dmax=self.dmax)\n", (8409, 8496), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((8593, 8658), 'vis_utils.get_vis_normal', 'get_vis_normal', (["self.data['normal_gt']"], {'mask': "self.data['mask_gt']"}), "(self.data['normal_gt'], mask=self.data['mask_gt'])\n", (8607, 8658), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((8746, 8780), 'vis_utils.get_vis_mask', 'get_vis_mask', (["self.data['mask_gt']"], {}), "(self.data['mask_gt'])\n", (8758, 8780), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((8943, 9050), 'vis_utils.get_vis_depth', 'get_vis_depth', (["self.data['depth_output']"], {'mask': "self.data['mask_output']", 'dmin': 'self.dmin', 'dmax': 'self.dmax'}), "(self.data['depth_output'], mask=self.data['mask_output'],\n dmin=self.dmin, dmax=self.dmax)\n", (8956, 9050), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((9160, 9233), 'vis_utils.get_vis_normal', 'get_vis_normal', (["self.data['normal_output']"], {'mask': "self.data['mask_output']"}), "(self.data['normal_output'], mask=self.data['mask_output'])\n", (9174, 9233), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((9333, 9371), 'vis_utils.get_vis_mask', 'get_vis_mask', (["self.data['mask_output']"], {}), "(self.data['mask_output'])\n", (9345, 9371), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((9533, 9595), 'numpy.logical_and', 'np.logical_and', (["self.data['mask_gt']", "self.data['mask_output']"], {}), "(self.data['mask_gt'], self.data['mask_output'])\n", (9547, 9595), True, 'import numpy as np\n'), ((9807, 9879), 'vis_utils.get_vis_depth', 'get_vis_depth', (["self.data['loss_normal']", 'valid_mask'], {'dmin': '(-1.0)', 'dmax': '(0.0)'}), "(self.data['loss_normal'], valid_mask, dmin=-1.0, dmax=0.0)\n", (9820, 9879), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((10346, 10362), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (10355, 10362), True, 'import matplotlib.pyplot as plt\n'), ((10446, 10546), 'vis_utils.get_vis_depth', 'get_vis_depth', (["self.data['depth_gt']"], {'mask': "self.data['mask_gt']", 'dmin': 'self.dmin', 'dmax': 'self.dmax'}), "(self.data['depth_gt'], mask=self.data['mask_gt'], dmin=self.\n dmin, dmax=self.dmax)\n", (10459, 10546), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((10657, 10722), 'vis_utils.get_vis_normal', 'get_vis_normal', (["self.data['normal_gt']"], {'mask': "self.data['mask_gt']"}), "(self.data['normal_gt'], mask=self.data['mask_gt'])\n", (10671, 10722), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((10826, 10860), 'vis_utils.get_vis_mask', 'get_vis_mask', (["self.data['mask_gt']"], {}), "(self.data['mask_gt'])\n", (10838, 10860), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((10967, 11074), 'vis_utils.get_vis_depth', 'get_vis_depth', (["self.data['depth_output']"], {'mask': "self.data['mask_output']", 'dmin': 'self.dmin', 'dmax': 'self.dmax'}), "(self.data['depth_output'], mask=self.data['mask_output'],\n dmin=self.dmin, dmax=self.dmax)\n", (10980, 11074), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((11198, 11271), 'vis_utils.get_vis_normal', 'get_vis_normal', (["self.data['normal_output']"], {'mask': "self.data['mask_output']"}), "(self.data['normal_output'], mask=self.data['mask_output'])\n", (11212, 11271), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((11387, 11425), 'vis_utils.get_vis_mask', 'get_vis_mask', (["self.data['mask_output']"], {}), "(self.data['mask_output'])\n", (11399, 11425), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((11543, 11605), 'numpy.logical_and', 'np.logical_and', (["self.data['mask_gt']", "self.data['mask_output']"], {}), "(self.data['mask_gt'], self.data['mask_output'])\n", (11557, 11605), True, 'import numpy as np\n'), ((11830, 11918), 'vis_utils.get_vis_depth', 'get_vis_depth', (["self.data['loss_normal']", 'valid_mask'], {'dmin': '(-1.0)', 'dmax': '(0.0)', 'bg_color': '(0)'}), "(self.data['loss_normal'], valid_mask, dmin=-1.0, dmax=0.0,\n bg_color=0)\n", (11843, 11918), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((12850, 12886), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(4)'], {'figsize': '(30, 30)'}), '(3, 4, figsize=(30, 30))\n', (12862, 12886), True, 'import matplotlib.pyplot as plt\n'), ((12943, 13043), 'vis_utils.get_vis_depth', 'get_vis_depth', (["self.data['depth_gt']"], {'mask': "self.data['mask_gt']", 'dmin': 'self.dmin', 'dmax': 'self.dmax'}), "(self.data['depth_gt'], mask=self.data['mask_gt'], dmin=self.\n dmin, dmax=self.dmax)\n", (12956, 13043), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((13134, 13172), 'vis_utils.get_vis_normal', 'get_vis_normal', (["self.data['normal_gt']"], {}), "(self.data['normal_gt'])\n", (13148, 13172), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((13268, 13302), 'vis_utils.get_vis_mask', 'get_vis_mask', (["self.data['mask_gt']"], {}), "(self.data['mask_gt'])\n", (13280, 13302), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((13507, 13614), 'vis_utils.get_vis_depth', 'get_vis_depth', (["self.data['depth_output']"], {'mask': "self.data['mask_output']", 'dmin': 'self.dmin', 'dmax': 'self.dmax'}), "(self.data['depth_output'], mask=self.data['mask_output'],\n dmin=self.dmin, dmax=self.dmax)\n", (13520, 13614), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((13718, 13760), 'vis_utils.get_vis_normal', 'get_vis_normal', (["self.data['normal_output']"], {}), "(self.data['normal_output'])\n", (13732, 13760), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((13868, 13906), 'vis_utils.get_vis_mask', 'get_vis_mask', (["self.data['mask_output']"], {}), "(self.data['mask_output'])\n", (13880, 13906), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((14118, 14180), 'numpy.logical_and', 'np.logical_and', (["self.data['mask_gt']", "self.data['mask_output']"], {}), "(self.data['mask_gt'], self.data['mask_output'])\n", (14132, 14180), True, 'import numpy as np\n'), ((14386, 14458), 'vis_utils.get_vis_depth', 'get_vis_depth', (["self.data['loss_normal']", 'valid_mask'], {'dmin': '(-1.0)', 'dmax': '(0.0)'}), "(self.data['loss_normal'], valid_mask, dmin=-1.0, dmax=0.0)\n", (14399, 14458), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((14954, 14970), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (14963, 14970), True, 'import matplotlib.pyplot as plt\n'), ((15213, 15272), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(2 * num_img)'], {'figsize': '(8 * 2 * num_img, 25)'}), '(3, 2 * num_img, figsize=(8 * 2 * num_img, 25))\n', (15225, 15272), True, 'import matplotlib.pyplot as plt\n'), ((16457, 16552), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.95)', 'right': '(0.99)', 'left': '(0.01)', 'bottom': '(0.01)', 'wspace': '(0.05)', 'hspace': '(0.1)'}), '(top=0.95, right=0.99, left=0.01, bottom=0.01, wspace=\n 0.05, hspace=0.1)\n', (16476, 16552), True, 'import matplotlib.pyplot as plt\n'), ((16583, 16599), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (16592, 16599), True, 'import matplotlib.pyplot as plt\n'), ((16667, 16704), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(5)'], {'figsize': '(15, 3.4)'}), '(1, 5, figsize=(15, 3.4))\n', (16679, 16704), True, 'import matplotlib.pyplot as plt\n'), ((17167, 17259), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.99)', 'right': '(0.99)', 'left': '(0.01)', 'bottom': '(0.0)', 'wspace': '(0.05)', 'hspace': '(0)'}), '(top=0.99, right=0.99, left=0.01, bottom=0.0, wspace=\n 0.05, hspace=0)\n', (17186, 17259), True, 'import matplotlib.pyplot as plt\n'), ((17291, 17307), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (17300, 17307), True, 'import matplotlib.pyplot as plt\n'), ((4638, 4656), 'numpy.zeros', 'np.zeros', (['(64, 64)'], {}), '((64, 64))\n', (4646, 4656), True, 'import numpy as np\n'), ((5977, 6002), 'numpy.zeros', 'np.zeros', (['data_mask.shape'], {}), '(data_mask.shape)\n', (5985, 6002), True, 'import numpy as np\n'), ((6199, 6228), 'cv2.imwrite', 'cv2.imwrite', (['fname', 'depth_vis'], {}), '(fname, depth_vis)\n', (6210, 6228), False, 'import cv2\n'), ((6656, 6697), 'cv2.cvtColor', 'cv2.cvtColor', (['mask_vis', 'cv2.COLOR_BGR2RGB'], {}), '(mask_vis, cv2.COLOR_BGR2RGB)\n', (6668, 6697), False, 'import cv2\n'), ((6842, 6885), 'cv2.cvtColor', 'cv2.cvtColor', (['normal_vis', 'cv2.COLOR_BGR2RGB'], {}), '(normal_vis, cv2.COLOR_BGR2RGB)\n', (6854, 6885), False, 'import cv2\n'), ((7490, 7533), 'cv2.cvtColor', 'cv2.cvtColor', (['normal_vis', 'cv2.COLOR_BGR2RGB'], {}), '(normal_vis, cv2.COLOR_BGR2RGB)\n', (7502, 7533), False, 'import cv2\n'), ((7713, 7754), 'cv2.cvtColor', 'cv2.cvtColor', (['mask_vis', 'cv2.COLOR_BGR2RGB'], {}), '(mask_vis, cv2.COLOR_BGR2RGB)\n', (7725, 7754), False, 'import cv2\n'), ((8046, 8082), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (8058, 8082), False, 'import cv2\n'), ((9635, 9666), 'numpy.abs', 'np.abs', (["self.data['loss_depth']"], {}), "(self.data['loss_depth'])\n", (9641, 9666), True, 'import numpy as np\n'), ((11645, 11676), 'numpy.abs', 'np.abs', (["self.data['loss_depth']"], {}), "(self.data['loss_depth'])\n", (11651, 11676), True, 'import numpy as np\n'), ((12041, 12074), 'numpy.abs', 'np.abs', (["self.data['loss_mask_gt']"], {}), "(self.data['loss_mask_gt'])\n", (12047, 12074), True, 'import numpy as np\n'), ((12216, 12250), 'numpy.abs', 'np.abs', (["self.data['loss_mask_out']"], {}), "(self.data['loss_mask_out'])\n", (12222, 12250), True, 'import numpy as np\n'), ((12541, 12675), 'pickle.dump', 'pickle.dump', (["{'data': self.data, 'loss_curve': self.loss_curve, 'loss_list': self.\n loss_list, 'chamfer_list': self.chamfer_list}", 'f'], {}), "({'data': self.data, 'loss_curve': self.loss_curve, 'loss_list':\n self.loss_list, 'chamfer_list': self.chamfer_list}, f)\n", (12552, 12675), False, 'import pickle\n'), ((14220, 14251), 'numpy.abs', 'np.abs', (["self.data['loss_depth']"], {}), "(self.data['loss_depth'])\n", (14226, 14251), True, 'import numpy as np\n'), ((10003, 10036), 'numpy.abs', 'np.abs', (["self.data['loss_mask_gt']"], {}), "(self.data['loss_mask_gt'])\n", (10009, 10036), True, 'import numpy as np\n'), ((10167, 10201), 'numpy.abs', 'np.abs', (["self.data['loss_mask_out']"], {}), "(self.data['loss_mask_out'])\n", (10173, 10201), True, 'import numpy as np\n'), ((14576, 14609), 'numpy.abs', 'np.abs', (["self.data['loss_mask_gt']"], {}), "(self.data['loss_mask_gt'])\n", (14582, 14609), True, 'import numpy as np\n'), ((14656, 14690), 'numpy.abs', 'np.abs', (["self.data['loss_mask_out']"], {}), "(self.data['loss_mask_out'])\n", (14662, 14690), True, 'import numpy as np\n'), ((2039, 2058), 'torch.from_numpy', 'torch.from_numpy', (['R'], {}), '(R)\n', (2055, 2058), False, 'import torch\n'), ((2075, 2094), 'torch.from_numpy', 'torch.from_numpy', (['T'], {}), '(T)\n', (2091, 2094), False, 'import torch\n'), ((3007, 3036), 'torch.cat', 'torch.cat', (['[R, T[:, None]]', '(1)'], {}), '([R, T[:, None]], 1)\n', (3016, 3036), False, 'import torch\n'), ((2225, 2255), 'torch.from_numpy', 'torch.from_numpy', (['lighting_loc'], {}), '(lighting_loc)\n', (2241, 2255), False, 'import torch\n')]
|
import importlib
# Task Factories know about all types of tasks that can be created and creates the appropriate instance when called
# Abstracts the creation logic, and all of the library importing away from the user
###################################################################################################
# NodeFactory: Abstracts the creation logic for new nodes
# Singleton Factory pattern, so no instantiation needed
###################################################################################################
class NodeFactory:
"""Create new nodes based on a type id"""
registered_nodes = {}
###############################################################################################
# Register Node (Class Method) : Registers a new node under a specified name/id
###############################################################################################
@classmethod
def register_node(cls, type_id, type_class):
"""register a new node into the factory
Args:
type_id:
type_class:
"""
cls.registered_nodes[type_id] = type_class
###############################################################################################
# Create Node (Class Method) : Returns a new instance of the specifed node
###############################################################################################
@classmethod
def create_node(cls, node_id, type_id):
"""create a new node based on type id
Args:
node_id:
type_id:
"""
node = cls.registered_nodes[type_id](node_id)
return node
###############################################################################################
# Import Node (Class Method) : Imports the python class associated with a node and register it
###############################################################################################
@classmethod
def import_node(cls, type_id, toolkit_id, class_name):
"""import the class for a new node and register it
Args:
type_id:
toolkit_id:
class_name:
"""
if type_id not in cls.registered_nodes:
module_name = "toolkits." + toolkit_id + "." + class_name
module = importlib.import_module(module_name)
cls.register_node(type_id, getattr(module, class_name))
return cls.registered_nodes[type_id]
|
[
"importlib.import_module"
] |
[((2347, 2383), 'importlib.import_module', 'importlib.import_module', (['module_name'], {}), '(module_name)\n', (2370, 2383), False, 'import importlib\n')]
|
import logging
from confluent_kafka import Producer
log = logging.getLogger(__name__)
def _log_delivery_report(error, message):
if error is None:
log.info(f"Delivered to topic: {message.topic()} ({message.partition()})")
else:
log.error(f"Failed to deliver message: {error}")
class KafkaEventsProducer:
def __init__(self, topic, config):
self._topic = topic
self._producer = Producer(config)
def send(self, event):
self._producer.produce(
self._topic,
event.as_json().encode("utf-8"),
callback=_log_delivery_report,
)
def flush(self):
self._producer.flush()
|
[
"confluent_kafka.Producer",
"logging.getLogger"
] |
[((60, 87), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (77, 87), False, 'import logging\n'), ((425, 441), 'confluent_kafka.Producer', 'Producer', (['config'], {}), '(config)\n', (433, 441), False, 'from confluent_kafka import Producer\n')]
|
from setuptools import setup
from setuptools import find_packages
setup(name = 'elink',
version = '0.1pre',
description = 'parallella elink',
license = 'TBD',
packages = find_packages(),
)
|
[
"setuptools.find_packages"
] |
[((197, 212), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (210, 212), False, 'from setuptools import find_packages\n')]
|
from pymtl import *
from lizard.util.rtl.interface import Interface, UseInterface
from lizard.util.rtl.method import MethodSpec
from lizard.util.rtl.register import Register, RegisterInterface
from lizard.util.rtl.thermometer_mask import ThermometerMask, ThermometerMaskInterface
from lizard.bitutil import clog2
class ArbiterInterface(Interface):
def __init__(s, nreqs):
s.nreqs = nreqs
super(ArbiterInterface, s).__init__([
MethodSpec(
'grant',
args={
'reqs': Bits(nreqs),
},
rets={
'grant': Bits(nreqs),
},
call=False,
rdy=False,
),
])
# Based on design from: http://fpgacpu.ca/fpga/priority.html
class PriorityArbiter(Model):
def __init__(s, interface):
UseInterface(s, interface)
@s.combinational
def compute():
# PYMTL_BROKEN unary - translates but does not simulate
s.grant_grant.v = s.grant_reqs & (0 - s.grant_reqs)
def line_trace(s):
return "{} -> {}".format(s.grant_reqs, s.grant_grant)
# Based on design from: http://fpgacpu.ca/fpga/roundrobin.html
# and "Arbiters: Design Ideas and Coding Styles" by <NAME>,
# Silicon Logic Engineering, Inc.
# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.86.550&rep=rep1&type=pdf
class RoundRobinArbiter(Model):
def __init__(s, interface):
UseInterface(s, interface)
nreqs = s.interface.nreqs
s.mask = Register(RegisterInterface(Bits(nreqs)), reset_value=0)
s.masker = ThermometerMask(ThermometerMaskInterface(nreqs))
s.raw_arb = PriorityArbiter(ArbiterInterface(nreqs))
s.masked_arb = PriorityArbiter(ArbiterInterface(nreqs))
s.final_grant = Wire(nreqs)
s.connect(s.raw_arb.grant_reqs, s.grant_reqs)
s.connect(s.masker.mask_in_, s.mask.read_data)
@s.combinational
def compute():
s.masked_arb.grant_reqs.v = s.grant_reqs & s.masker.mask_out
if s.masked_arb.grant_grant == 0:
s.final_grant.v = s.raw_arb.grant_grant
else:
s.final_grant.v = s.masked_arb.grant_grant
@s.combinational
def shift_write():
s.mask.write_data.v = s.final_grant << 1
s.connect(s.grant_grant, s.final_grant)
def line_trace(s):
return "{} -> {}".format(s.grant_reqs, s.grant_grant)
|
[
"lizard.util.rtl.thermometer_mask.ThermometerMaskInterface",
"lizard.util.rtl.interface.UseInterface"
] |
[((816, 842), 'lizard.util.rtl.interface.UseInterface', 'UseInterface', (['s', 'interface'], {}), '(s, interface)\n', (828, 842), False, 'from lizard.util.rtl.interface import Interface, UseInterface\n'), ((1394, 1420), 'lizard.util.rtl.interface.UseInterface', 'UseInterface', (['s', 'interface'], {}), '(s, interface)\n', (1406, 1420), False, 'from lizard.util.rtl.interface import Interface, UseInterface\n'), ((1552, 1583), 'lizard.util.rtl.thermometer_mask.ThermometerMaskInterface', 'ThermometerMaskInterface', (['nreqs'], {}), '(nreqs)\n', (1576, 1583), False, 'from lizard.util.rtl.thermometer_mask import ThermometerMask, ThermometerMaskInterface\n')]
|
import unittest
from factom_keys.fct import FactoidPrivateKey, FactoidAddress, generate_key_pair
class TestFactoidKeys(unittest.TestCase):
def test_generate_key_pair(self):
private_key, public_key = generate_key_pair()
assert isinstance(private_key, FactoidPrivateKey)
assert isinstance(public_key, FactoidAddress)
def test_key_string_validity_checkers(self):
# Valid pair. All zeros private key
private = '<KEY>'
public = '<KEY>'
assert FactoidPrivateKey.is_valid(private)
assert FactoidAddress.is_valid(public)
# Bad prefix
private = '<KEY>'
public = '<KEY>'
assert not FactoidAddress.is_valid(private)
assert not FactoidAddress.is_valid(public)
# Bad body
private = '<KEY>'
public = '<KEY>'
assert not FactoidPrivateKey.is_valid(private)
assert not FactoidAddress.is_valid(public)
# Bad checksums
private = '<KEY>'
public = '<KEY>'
assert not FactoidPrivateKey.is_valid(private)
assert not FactoidAddress.is_valid(public)
def test_key_imports_and_exports(self):
private_bytes = b'\0' * 32
private_string = '<KEY>'
public_string = '<KEY>'
private_from_bytes = FactoidPrivateKey(seed_bytes=private_bytes)
private_from_string = FactoidPrivateKey(key_string=private_string)
assert private_from_bytes.key_bytes == private_bytes
assert private_from_string.key_bytes == private_bytes
assert private_from_bytes.to_string() == private_string
assert private_from_string.to_string() == private_string
public_from_private = private_from_string.get_factoid_address()
public_from_string = FactoidAddress(address_string=public_string)
assert public_from_private.key_bytes is not None
assert public_from_string.key_bytes is None
assert public_from_private.rcd_hash == public_from_string.rcd_hash
assert public_from_private.to_string() == public_string
assert public_from_string.to_string() == public_string
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"factom_keys.fct.FactoidPrivateKey",
"factom_keys.fct.FactoidAddress.is_valid",
"factom_keys.fct.generate_key_pair",
"factom_keys.fct.FactoidPrivateKey.is_valid",
"factom_keys.fct.FactoidAddress"
] |
[((2162, 2177), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2175, 2177), False, 'import unittest\n'), ((215, 234), 'factom_keys.fct.generate_key_pair', 'generate_key_pair', ([], {}), '()\n', (232, 234), False, 'from factom_keys.fct import FactoidPrivateKey, FactoidAddress, generate_key_pair\n'), ((507, 542), 'factom_keys.fct.FactoidPrivateKey.is_valid', 'FactoidPrivateKey.is_valid', (['private'], {}), '(private)\n', (533, 542), False, 'from factom_keys.fct import FactoidPrivateKey, FactoidAddress, generate_key_pair\n'), ((558, 589), 'factom_keys.fct.FactoidAddress.is_valid', 'FactoidAddress.is_valid', (['public'], {}), '(public)\n', (581, 589), False, 'from factom_keys.fct import FactoidPrivateKey, FactoidAddress, generate_key_pair\n'), ((1300, 1343), 'factom_keys.fct.FactoidPrivateKey', 'FactoidPrivateKey', ([], {'seed_bytes': 'private_bytes'}), '(seed_bytes=private_bytes)\n', (1317, 1343), False, 'from factom_keys.fct import FactoidPrivateKey, FactoidAddress, generate_key_pair\n'), ((1374, 1418), 'factom_keys.fct.FactoidPrivateKey', 'FactoidPrivateKey', ([], {'key_string': 'private_string'}), '(key_string=private_string)\n', (1391, 1418), False, 'from factom_keys.fct import FactoidPrivateKey, FactoidAddress, generate_key_pair\n'), ((1773, 1817), 'factom_keys.fct.FactoidAddress', 'FactoidAddress', ([], {'address_string': 'public_string'}), '(address_string=public_string)\n', (1787, 1817), False, 'from factom_keys.fct import FactoidPrivateKey, FactoidAddress, generate_key_pair\n'), ((682, 714), 'factom_keys.fct.FactoidAddress.is_valid', 'FactoidAddress.is_valid', (['private'], {}), '(private)\n', (705, 714), False, 'from factom_keys.fct import FactoidPrivateKey, FactoidAddress, generate_key_pair\n'), ((734, 765), 'factom_keys.fct.FactoidAddress.is_valid', 'FactoidAddress.is_valid', (['public'], {}), '(public)\n', (757, 765), False, 'from factom_keys.fct import FactoidPrivateKey, FactoidAddress, generate_key_pair\n'), ((856, 891), 'factom_keys.fct.FactoidPrivateKey.is_valid', 'FactoidPrivateKey.is_valid', (['private'], {}), '(private)\n', (882, 891), False, 'from factom_keys.fct import FactoidPrivateKey, FactoidAddress, generate_key_pair\n'), ((911, 942), 'factom_keys.fct.FactoidAddress.is_valid', 'FactoidAddress.is_valid', (['public'], {}), '(public)\n', (934, 942), False, 'from factom_keys.fct import FactoidPrivateKey, FactoidAddress, generate_key_pair\n'), ((1038, 1073), 'factom_keys.fct.FactoidPrivateKey.is_valid', 'FactoidPrivateKey.is_valid', (['private'], {}), '(private)\n', (1064, 1073), False, 'from factom_keys.fct import FactoidPrivateKey, FactoidAddress, generate_key_pair\n'), ((1093, 1124), 'factom_keys.fct.FactoidAddress.is_valid', 'FactoidAddress.is_valid', (['public'], {}), '(public)\n', (1116, 1124), False, 'from factom_keys.fct import FactoidPrivateKey, FactoidAddress, generate_key_pair\n')]
|
import os
import time
import numpy as np
import torch
from torch import nn
from butterfly_factor import butterfly_factor_mult_intermediate
# from butterfly import Block2x2DiagProduct
# from test_factor_multiply import twiddle_list_concat
exps = np.arange(6, 14)
sizes = 1 << exps
batch_size = 256
ntrials = [100000, 100000, 10000, 10000, 10000, 10000, 10000, 10000]
dense_times = np.zeros(exps.size)
fft_times = np.zeros(exps.size)
butterfly_times = np.zeros(exps.size)
for idx_n, (n, ntrial) in enumerate(zip(sizes, ntrials)):
print(n)
# B = Block2x2DiagProduct(n).to('cuda')
L = torch.nn.Linear(n, n, bias=False).to('cuda')
x = torch.randn(batch_size, n, requires_grad=True).to('cuda')
grad = torch.randn_like(x)
# twiddle = twiddle_list_concat(B)
# Dense multiply
output = L(x) # Do it once to initialize cuBlas handle and such
torch.autograd.grad(output, (L.weight, x), grad)
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(ntrial):
output = L(x)
torch.autograd.grad(output, (L.weight, x), grad)
torch.cuda.synchronize()
end = time.perf_counter()
dense_times[idx_n] = (end - start) / ntrial
# FFT
output = torch.rfft(x, 1) # Do it once to initialize cuBlas handle and such
grad_fft = torch.randn_like(output)
torch.autograd.grad(output, x, grad_fft)
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(ntrial):
output = torch.rfft(x, 1)
torch.autograd.grad(output, x, grad_fft)
torch.cuda.synchronize()
end = time.perf_counter()
fft_times[idx_n] = (end - start) / ntrial
# Butterfly
output = butterfly_factor_mult_intermediate(twiddle, x)
torch.autograd.grad(output, (twiddle, x), grad)
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(ntrial):
output = butterfly_factor_mult_intermediate(twiddle, x)
torch.autograd.grad(output, (twiddle, x), grad)
torch.cuda.synchronize()
end = time.perf_counter()
butterfly_times[idx_n] = (end-start) / ntrial
print(dense_times)
print(fft_times)
print(butterfly_times)
print(dense_times / butterfly_times)
print(dense_times / fft_times)
data = {
'sizes': sizes,
'speedup_fft': dense_times / fft_times,
'speedup_butterfly': dense_times / butterfly_times,
}
import pickle
with open('speed_training_data.pkl', 'wb') as f:
pickle.dump(data, f)
|
[
"torch.cuda.synchronize",
"pickle.dump",
"torch.randn_like",
"butterfly_factor.butterfly_factor_mult_intermediate",
"torch.autograd.grad",
"numpy.zeros",
"time.perf_counter",
"torch.randn",
"numpy.arange",
"torch.rfft",
"torch.nn.Linear"
] |
[((249, 265), 'numpy.arange', 'np.arange', (['(6)', '(14)'], {}), '(6, 14)\n', (258, 265), True, 'import numpy as np\n'), ((387, 406), 'numpy.zeros', 'np.zeros', (['exps.size'], {}), '(exps.size)\n', (395, 406), True, 'import numpy as np\n'), ((419, 438), 'numpy.zeros', 'np.zeros', (['exps.size'], {}), '(exps.size)\n', (427, 438), True, 'import numpy as np\n'), ((457, 476), 'numpy.zeros', 'np.zeros', (['exps.size'], {}), '(exps.size)\n', (465, 476), True, 'import numpy as np\n'), ((722, 741), 'torch.randn_like', 'torch.randn_like', (['x'], {}), '(x)\n', (738, 741), False, 'import torch\n'), ((876, 924), 'torch.autograd.grad', 'torch.autograd.grad', (['output', '(L.weight, x)', 'grad'], {}), '(output, (L.weight, x), grad)\n', (895, 924), False, 'import torch\n'), ((929, 953), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (951, 953), False, 'import torch\n'), ((966, 985), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (983, 985), False, 'import time\n'), ((1097, 1121), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (1119, 1121), False, 'import torch\n'), ((1132, 1151), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1149, 1151), False, 'import time\n'), ((1224, 1240), 'torch.rfft', 'torch.rfft', (['x', '(1)'], {}), '(x, 1)\n', (1234, 1240), False, 'import torch\n'), ((1307, 1331), 'torch.randn_like', 'torch.randn_like', (['output'], {}), '(output)\n', (1323, 1331), False, 'import torch\n'), ((1336, 1376), 'torch.autograd.grad', 'torch.autograd.grad', (['output', 'x', 'grad_fft'], {}), '(output, x, grad_fft)\n', (1355, 1376), False, 'import torch\n'), ((1381, 1405), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (1403, 1405), False, 'import torch\n'), ((1418, 1437), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1435, 1437), False, 'import time\n'), ((1553, 1577), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (1575, 1577), False, 'import torch\n'), ((1588, 1607), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1605, 1607), False, 'import time\n'), ((1684, 1730), 'butterfly_factor.butterfly_factor_mult_intermediate', 'butterfly_factor_mult_intermediate', (['twiddle', 'x'], {}), '(twiddle, x)\n', (1718, 1730), False, 'from butterfly_factor import butterfly_factor_mult_intermediate\n'), ((1735, 1782), 'torch.autograd.grad', 'torch.autograd.grad', (['output', '(twiddle, x)', 'grad'], {}), '(output, (twiddle, x), grad)\n', (1754, 1782), False, 'import torch\n'), ((1787, 1811), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (1809, 1811), False, 'import torch\n'), ((1824, 1843), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1841, 1843), False, 'import time\n'), ((1996, 2020), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (2018, 2020), False, 'import torch\n'), ((2031, 2050), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2048, 2050), False, 'import time\n'), ((2431, 2451), 'pickle.dump', 'pickle.dump', (['data', 'f'], {}), '(data, f)\n', (2442, 2451), False, 'import pickle\n'), ((1044, 1092), 'torch.autograd.grad', 'torch.autograd.grad', (['output', '(L.weight, x)', 'grad'], {}), '(output, (L.weight, x), grad)\n', (1063, 1092), False, 'import torch\n'), ((1483, 1499), 'torch.rfft', 'torch.rfft', (['x', '(1)'], {}), '(x, 1)\n', (1493, 1499), False, 'import torch\n'), ((1508, 1548), 'torch.autograd.grad', 'torch.autograd.grad', (['output', 'x', 'grad_fft'], {}), '(output, x, grad_fft)\n', (1527, 1548), False, 'import torch\n'), ((1889, 1935), 'butterfly_factor.butterfly_factor_mult_intermediate', 'butterfly_factor_mult_intermediate', (['twiddle', 'x'], {}), '(twiddle, x)\n', (1923, 1935), False, 'from butterfly_factor import butterfly_factor_mult_intermediate\n'), ((1944, 1991), 'torch.autograd.grad', 'torch.autograd.grad', (['output', '(twiddle, x)', 'grad'], {}), '(output, (twiddle, x), grad)\n', (1963, 1991), False, 'import torch\n'), ((600, 633), 'torch.nn.Linear', 'torch.nn.Linear', (['n', 'n'], {'bias': '(False)'}), '(n, n, bias=False)\n', (615, 633), False, 'import torch\n'), ((653, 699), 'torch.randn', 'torch.randn', (['batch_size', 'n'], {'requires_grad': '(True)'}), '(batch_size, n, requires_grad=True)\n', (664, 699), False, 'import torch\n')]
|
from __future__ import annotations
import csv
import io
import json
import tarfile
import tempfile
from dataclasses import dataclass
from datetime import datetime
from http import HTTPStatus
from pprint import pprint
from typing import Iterator, Dict, Any
from dcp.data_format import Records
from dcp.utils.common import utcnow
import snapflow_crunchbase as crunchbase
from dcp.data_format import CsvFileFormat
from snapflow import Function, Context, DataBlock, DataFunctionContext, datafunction
from snapflow.helpers.connectors.connection import HttpApiConnection
CRUNCHBASE_API_BASE_URL = "https://api.crunchbase.com/bulk/v4/bulk_export.tar.gz"
CRUNCHBASE_BULK_CSV_URL = (
"http://static.crunchbase.com/data_crunchbase/bulk_export_sample.tar.gz"
)
CRUNCHBASE_CSV_TO_SCHEMA_MAP = {""}
@dataclass
class ImportCrunchbaseCSVState:
latest_imported_at: datetime
@datafunction(
"bulk_import",
namespace="crunchbase",
# state_class=ImportCrunchbaseCSVState,
display_name="Import Crunchbase data",
required_storage_classes=["file"],
)
def bulk_import(ctx: DataFunctionContext, user_key: str):
params = {
"user_key": user_key,
}
# while ctx.should_continue():
# ctx.emit_state_value("latest_imported_at", utcnow())
resp = HttpApiConnection().get(url=CRUNCHBASE_BULK_CSV_URL, params=params,)
print("------")
print(resp)
# tf = tempfile.TemporaryFile()
# tf = open("/Users/rootx/Projects/SnapData/test.tar.gz", "wb")
# tf.write(resp.content)
# tf.close()
ib = io.BytesIO(resp.content)
# tar = tarfile.open("/Users/rootx/Projects/SnapData/test.tar.gz", "r:gz")
with tarfile.open(fileobj=ib) as csv_files:
raw = csv_files.extractfile("funding_rounds.csv".format(data_source))
print("----------")
with io.TextIOWrapper(raw) as raw_str:
print(list(csv.DictReader(raw_str)))
print("----------")
# tar.extractall("/Users/rootx/Projects/SnapData/test/")
# tar.close()
#
# raw = open("/Users/rootx/Projects/SnapData/test/organizations.csv", "r")
#
# dr = csv.DictReader(open("/Users/rootx/Projects/SnapData/test/organizations.csv", "r"))
# # print(list(dr))
# print("------")
# ctx.emit_state_value("imported", True)
# ctx.emit(raw, data_format=CsvFileFormat, schema="crunchbase.CrunchbasePerson")
# ctx.emit_state_value("imported", True)
# ctx.emit(raw, storage=ctx.execution_context.target_storage, data_format=CsvFileFormat)
# # check if there is anything left to process
# if resp.status_code == HTTPStatus.NO_CONTENT:
# break
#
# json_resp = resp.json()
#
# assert isinstance(json_resp, list)
#
# yield resp.json()
|
[
"io.BytesIO",
"snapflow.datafunction",
"csv.DictReader",
"io.TextIOWrapper",
"tarfile.open",
"snapflow.helpers.connectors.connection.HttpApiConnection"
] |
[((875, 1005), 'snapflow.datafunction', 'datafunction', (['"""bulk_import"""'], {'namespace': '"""crunchbase"""', 'display_name': '"""Import Crunchbase data"""', 'required_storage_classes': "['file']"}), "('bulk_import', namespace='crunchbase', display_name=\n 'Import Crunchbase data', required_storage_classes=['file'])\n", (887, 1005), False, 'from snapflow import Function, Context, DataBlock, DataFunctionContext, datafunction\n'), ((1547, 1571), 'io.BytesIO', 'io.BytesIO', (['resp.content'], {}), '(resp.content)\n', (1557, 1571), False, 'import io\n'), ((1661, 1685), 'tarfile.open', 'tarfile.open', ([], {'fileobj': 'ib'}), '(fileobj=ib)\n', (1673, 1685), False, 'import tarfile\n'), ((1280, 1299), 'snapflow.helpers.connectors.connection.HttpApiConnection', 'HttpApiConnection', ([], {}), '()\n', (1297, 1299), False, 'from snapflow.helpers.connectors.connection import HttpApiConnection\n'), ((1819, 1840), 'io.TextIOWrapper', 'io.TextIOWrapper', (['raw'], {}), '(raw)\n', (1835, 1840), False, 'import io\n'), ((1876, 1899), 'csv.DictReader', 'csv.DictReader', (['raw_str'], {}), '(raw_str)\n', (1890, 1899), False, 'import csv\n')]
|
"""@package config
Contains all config files necessary for simulator
Rignumber body configuration data
"""
from kinematics import Kinematics
import threading
from inc import *
from gui import Gui
from communication import ComminucationModule
from rigid_body_system_parser import RigidBodySystemParser
class Simulator:
def __init__(self, rigid_body_system=RigidBodySystem(), scene=Scene()):
self.rigid_body_system = rigid_body_system
try:
self.rigid_body_system_parser = RigidBodySystemParser(self.rigid_body_system)
self.kinematics = Kinematics(
root=self.rigid_body_system_parser.get_tree(),
joint_index_dict=self.rigid_body_system_parser.get_joint_index_dict(),
joint_value_dict=self.rigid_body_system_parser.get_joint_value_dict())
self.com = ComminucationModule(
joint_index_dict=self.rigid_body_system_parser.get_joint_index_dict(),
joint_value_dict=self.rigid_body_system_parser.get_joint_value_dict())
self.gui = Gui(root=self.rigid_body_system_parser.get_tree(), scene=scene)
except Exception as error:
log('Error: ' + repr(error))
raise Exception('Simulator Error!', 'Simulator')
|
[
"rigid_body_system_parser.RigidBodySystemParser"
] |
[((506, 551), 'rigid_body_system_parser.RigidBodySystemParser', 'RigidBodySystemParser', (['self.rigid_body_system'], {}), '(self.rigid_body_system)\n', (527, 551), False, 'from rigid_body_system_parser import RigidBodySystemParser\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Filters to provide an Experiment instance to pare down the amount of data
to look at.
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals)
import six
from six.moves import (zip, filter, map, reduce, input, range)
import math
def summary_lifetime_minimum(threshold):
"""
Returns a function that filters summary blob data by a minimum time,
*threshold*.
"""
def f(summary_data):
lifetimes = summary_data['died_t'] - summary_data['born_t']
return summary_data[lifetimes >= threshold]
return f
def exists_in_frame(frame):
"""
Returns a function that filters summary blob data by requiring it to
exist on a specific *frame*.
"""
def f(summary_data):
born_before = summary_data['born_f'] <= frame
died_after = summary_data['died_f'] >= frame
return summary_data[born_before & died_after]
return f
def exists_at_time(time):
"""
Returns a function that filters summary blob data by requiring it to
exist at a specific *time*.
"""
def f(summary_data):
born_before = summary_data['born_t'] <= time
died_after = summary_data['died_t'] >= time
return summary_data[born_before & died_after]
return f
def _midline_length(points):
"""
Calculates the length of a path connecting *points*.
"""
dist = 0
ipoints = iter(points)
a = six.next(ipoints) # prime loop
for b in ipoints:
dist += math.sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2)
a = b
return dist
def relative_move_minimum(threshold):
"""
Returns a function that filters parsed blob data by a minimum amount
of movement. The sum of the blob's centroid bounding box must exceed
*threshold* times the average length of the midline.
"""
def f(blob):
xcent, ycent = tuple(zip(*blob['centroid']))
move_px = (max(xcent) - min(xcent)) + (max(ycent) - min(ycent))
size_px = (
sum(_midline_length(p) for p in blob['midline'] if p)
/ len(blob['midline']))
return move_px >= size_px * threshold
return f
def area_minimum(threshold): # pragma: no cover # TODO
"""
Returns a function that filters parsed blob data by a minimum ...
"""
def f(blob):
return bool
return f
def aspect_ratio_minimum(threshold): # pragma: no cover # TODO
"""
Returns a function that filters parsed blob data by a minimum ...
"""
def f(blob):
return bool
return f
|
[
"six.moves.zip",
"six.next",
"math.sqrt"
] |
[((1477, 1494), 'six.next', 'six.next', (['ipoints'], {}), '(ipoints)\n', (1485, 1494), False, 'import six\n'), ((1546, 1596), 'math.sqrt', 'math.sqrt', (['((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)'], {}), '((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)\n', (1555, 1596), False, 'import math\n'), ((1929, 1951), 'six.moves.zip', 'zip', (["*blob['centroid']"], {}), "(*blob['centroid'])\n", (1932, 1951), False, 'from six.moves import zip, filter, map, reduce, input, range\n')]
|
from pandas import Series
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import StratifiedKFold, ShuffleSplit
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder
def transform_labels(y) -> Series:
if type(next(iter(y))) is str:
le = LabelEncoder()
le.fit(y)
y = le.transform(y)
return Series(y)
def calc_auc(clf, test_x, test_y):
y_pred = clf.predict(test_x)
return metrics.roc_auc_score(
transform_labels(test_y),
transform_labels(y_pred.tolist())
)
def roc_plot(classifier, X, y, n_splits=3, title='', labeller=None):
cv = StratifiedKFold(n_splits=n_splits)
#if labeller:
# y = [labeller(i) for i in y]
y = transform_labels(y)
#cv = ShuffleSplit(n_splits=n_splits)
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
i = 0
for train, test in cv.split(X, y):
probas_ = classifier.fit(X.iloc[train], y.iloc[train]).predict_proba(X.iloc[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y.iloc[test], probas_[:, 1])
tprs.append(interp(mean_fpr, fpr, tpr))
tprs[-1][0] = 0.0
roc_auc = auc(fpr, tpr)
aucs.append(roc_auc)
plt.plot(fpr, tpr, lw=1, alpha=0.3,
label='ROC fold %d (AUC = %0.2f)' % (i, roc_auc))
i += 1
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Chance', alpha=.8)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
plt.plot(mean_fpr, mean_tpr, color='b',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic ' + title)
plt.legend(loc="lower right")
plt.show()
return plt
|
[
"matplotlib.pyplot.title",
"numpy.maximum",
"numpy.mean",
"matplotlib.pyplot.fill_between",
"numpy.std",
"sklearn.preprocessing.LabelEncoder",
"numpy.linspace",
"numpy.minimum",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"pandas.Series",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.plot",
"sklearn.metrics.roc_curve",
"sklearn.metrics.auc",
"sklearn.model_selection.StratifiedKFold",
"scipy.interp",
"matplotlib.pyplot.xlabel"
] |
[((444, 453), 'pandas.Series', 'Series', (['y'], {}), '(y)\n', (450, 453), False, 'from pandas import Series\n'), ((720, 754), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'n_splits'}), '(n_splits=n_splits)\n', (735, 754), False, 'from sklearn.model_selection import StratifiedKFold, ShuffleSplit\n'), ((924, 946), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (935, 946), True, 'import numpy as np\n'), ((1471, 1559), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]'], {'linestyle': '"""--"""', 'lw': '(2)', 'color': '"""r"""', 'label': '"""Chance"""', 'alpha': '(0.8)'}), "([0, 1], [0, 1], linestyle='--', lw=2, color='r', label='Chance',\n alpha=0.8)\n", (1479, 1559), True, 'import matplotlib.pyplot as plt\n'), ((1584, 1605), 'numpy.mean', 'np.mean', (['tprs'], {'axis': '(0)'}), '(tprs, axis=0)\n', (1591, 1605), True, 'import numpy as np\n'), ((1644, 1667), 'sklearn.metrics.auc', 'auc', (['mean_fpr', 'mean_tpr'], {}), '(mean_fpr, mean_tpr)\n', (1647, 1667), False, 'from sklearn.metrics import roc_curve, auc\n'), ((1682, 1694), 'numpy.std', 'np.std', (['aucs'], {}), '(aucs)\n', (1688, 1694), True, 'import numpy as np\n'), ((1699, 1831), 'matplotlib.pyplot.plot', 'plt.plot', (['mean_fpr', 'mean_tpr'], {'color': '"""b"""', 'label': "('Mean ROC (AUC = %0.2f $\\\\pm$ %0.2f)' % (mean_auc, std_auc))", 'lw': '(2)', 'alpha': '(0.8)'}), "(mean_fpr, mean_tpr, color='b', label=\n 'Mean ROC (AUC = %0.2f $\\\\pm$ %0.2f)' % (mean_auc, std_auc), lw=2,\n alpha=0.8)\n", (1707, 1831), True, 'import matplotlib.pyplot as plt\n'), ((1863, 1883), 'numpy.std', 'np.std', (['tprs'], {'axis': '(0)'}), '(tprs, axis=0)\n', (1869, 1883), True, 'import numpy as np\n'), ((1901, 1934), 'numpy.minimum', 'np.minimum', (['(mean_tpr + std_tpr)', '(1)'], {}), '(mean_tpr + std_tpr, 1)\n', (1911, 1934), True, 'import numpy as np\n'), ((1952, 1985), 'numpy.maximum', 'np.maximum', (['(mean_tpr - std_tpr)', '(0)'], {}), '(mean_tpr - std_tpr, 0)\n', (1962, 1985), True, 'import numpy as np\n'), ((1990, 2097), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['mean_fpr', 'tprs_lower', 'tprs_upper'], {'color': '"""grey"""', 'alpha': '(0.2)', 'label': '"""$\\\\pm$ 1 std. dev."""'}), "(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=0.2,\n label='$\\\\pm$ 1 std. dev.')\n", (2006, 2097), True, 'import matplotlib.pyplot as plt\n'), ((2119, 2142), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-0.05, 1.05]'], {}), '([-0.05, 1.05])\n', (2127, 2142), True, 'import matplotlib.pyplot as plt\n'), ((2147, 2170), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-0.05, 1.05]'], {}), '([-0.05, 1.05])\n', (2155, 2170), True, 'import matplotlib.pyplot as plt\n'), ((2175, 2208), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (2185, 2208), True, 'import matplotlib.pyplot as plt\n'), ((2213, 2245), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (2223, 2245), True, 'import matplotlib.pyplot as plt\n'), ((2250, 2305), 'matplotlib.pyplot.title', 'plt.title', (["('Receiver operating characteristic ' + title)"], {}), "('Receiver operating characteristic ' + title)\n", (2259, 2305), True, 'import matplotlib.pyplot as plt\n'), ((2310, 2339), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (2320, 2339), True, 'import matplotlib.pyplot as plt\n'), ((2344, 2354), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2352, 2354), True, 'import matplotlib.pyplot as plt\n'), ((372, 386), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (384, 386), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((1166, 1204), 'sklearn.metrics.roc_curve', 'roc_curve', (['y.iloc[test]', 'probas_[:, 1]'], {}), '(y.iloc[test], probas_[:, 1])\n', (1175, 1204), False, 'from sklearn.metrics import roc_curve, auc\n'), ((1297, 1310), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (1300, 1310), False, 'from sklearn.metrics import roc_curve, auc\n'), ((1348, 1437), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr'], {'lw': '(1)', 'alpha': '(0.3)', 'label': "('ROC fold %d (AUC = %0.2f)' % (i, roc_auc))"}), "(fpr, tpr, lw=1, alpha=0.3, label='ROC fold %d (AUC = %0.2f)' % (i,\n roc_auc))\n", (1356, 1437), True, 'import matplotlib.pyplot as plt\n'), ((1225, 1251), 'scipy.interp', 'interp', (['mean_fpr', 'fpr', 'tpr'], {}), '(mean_fpr, fpr, tpr)\n', (1231, 1251), False, 'from scipy import interp\n')]
|
from random import randint
class Product():
def __init__(self, name, price=10, weight=20, flammability=0.5, identifier=randint(1000000, 9999999)):
self.name = name
self.price = price
self.weight = weight
self.flammability = flammability
self.identifier = identifier
def stealability(self):
x = self.price/self.weight
if x < 0.5:
message = 'not so stealable'
else:
if x >= 1.0:
message = "very stealable"
else:
message = 'kinda stealable'
return message
def explode(self):
x = self.flammability * self.weight
if x < 10:
message = '...fizzle.'
else:
if x >= 50:
message = "....BABOOM!!"
else:
message = '...boom!'
return message
class BoxingGlove(Product):
def __init__(self, name, price=10,
weight=10, flammability=0.5,
identifier=randint(1000000, 9999999)):
super().__init__(name, price, weight, flammability, identifier)
def explode(self):
print("...it's a glove")
def punch(self):
x = self.weight
if x < 5:
message = 'That tickles!'
else:
if x > 15:
message = "OUCH!!"
else:
message = 'Hey that hurts!'
return message
if __name__ == "__main__":
prod = Product('A cool toy')
print(prod.name)
print(prod.identifier)
|
[
"random.randint"
] |
[((126, 151), 'random.randint', 'randint', (['(1000000)', '(9999999)'], {}), '(1000000, 9999999)\n', (133, 151), False, 'from random import randint\n'), ((1018, 1043), 'random.randint', 'randint', (['(1000000)', '(9999999)'], {}), '(1000000, 9999999)\n', (1025, 1043), False, 'from random import randint\n')]
|
from multiprocessing import Process
from json import loads
from json.decoder import JSONDecodeError
from time import sleep
class ProcessReceive(Process):
def __init__(self, queue, socket, client_status):
self.queue = queue
self.socket = socket
self.client_status = client_status
super().__init__(target=self._process_receive)
def _process_receive(self):
while True:
if not bool(self.client_status.value):
self.socket.close()
print("\t\tEnd of Process Receive")
return
try:
response = self.socket.recv(1024)
except ConnectionResetError:
break
if response is None:
continue
aux = response.decode('utf-8')
try:
jdata = loads(aux)
except JSONDecodeError:
continue
self.queue.put(jdata)
|
[
"json.loads"
] |
[((856, 866), 'json.loads', 'loads', (['aux'], {}), '(aux)\n', (861, 866), False, 'from json import loads\n')]
|
import os
async def get_extension_models():
l = []
list_of_files = [x for x in os.listdir("/code/external_modules/") if x not in ['__init__.py', '__pycache__']]
for m in list_of_files:
# Check if model not empty
extension_model_files = [
x for x in os.listdir(f"/code/external_modules/{m}/models") if x not in ['__init__.py', '__pycache__']
]
if len(extension_model_files) > 0:
l.append(f"external_modules.{m}.models")
return l
|
[
"os.listdir"
] |
[((89, 126), 'os.listdir', 'os.listdir', (['"""/code/external_modules/"""'], {}), "('/code/external_modules/')\n", (99, 126), False, 'import os\n'), ((291, 339), 'os.listdir', 'os.listdir', (['f"""/code/external_modules/{m}/models"""'], {}), "(f'/code/external_modules/{m}/models')\n", (301, 339), False, 'import os\n')]
|
"""!Tokenizer for the produtil.testing.parser module."""
import re
import produtil.testing.utilities
__all__=[ 'Token', 'end_of_line_type', 'end_of_text_type', 'Tokenizer',
'TokenizeFile' ]
class Token(object):
"""!Represents one token in the tokenized version of a file."""
##@var token_type
# The type of token, a string
##@var token_value
# The text that was tokenized, a string.
##@var filename
# The file from which this token originates, a string. The
# special value produtil.testing.utilities.unknown_file indicates
# the file is unknown or the token is not from a file.
##@var lineno
# The line from file filename fron which this token originates, an integer.
# The special value -1 means the line is unknown.
def __init__(self,token_type,token_value,filename,lineno):
"""!Constructor for Token
@param token_type The type of token, a string
@param token_value The text this token represents, a string.
@param filename The name of the file from which this token
originates or produtil.testing.utilities.unknown_file if
unknown.
@param lineno The integer line number, counting from 1, from
which this token originates. Multi-line tokens should have a
line number representative of the region the token originates,
preferably on its first line. If the token is not from a
file, the value should be -1."""
super(Token,self).__init__()
self.token_type=token_type
self.filename=filename
self.lineno=lineno
self.token_value=token_value
def __repr__(self):
"""!A string representation of this token suitable for debugging.
@returns Python code that would construct this token."""
return 'Token(%s,%s,%s,%s)'%(
repr(self.token_type),repr(self.token_value),
repr(self.filename),repr(self.lineno))
def __str__(self):
"""!A human-readable string representation of this token.
@returns Python code that would construct this token."""
return 'Token(%s,%s,%s,%s)'%(
repr(self.token_type),repr(self.token_value),
repr(self.filename),repr(self.lineno))
##@var end_of_line_type
# The token_type parameter to send to Token.__init__() to indicate the
# end of a line
end_of_line_type='\n'
##@var end_of_text_type
# The token_type parameter to send to Token.__init__() to indicate the
# end of a file or string.
end_of_text_type=''
class Tokenizer(object):
"""!Tokenizes a file, turning it into a stream of Token objects
for parsing."""
##@var re
# A compiled regular expression used to tokenize the file.
def copy(self):
"""!Duplicates this object
At present, a Tokenizer has no internal state information.
Hence, this is equivalent to Tokenizer(). This may change in
the future. Hence, if you want to copy a Tokenizer, you
should use the copy() function.
@returns A new empty Tokenizer."""
return Tokenizer()
def __init__(self):
"""!Constructor for Tokenizer"""
super(Tokenizer,self).__init__()
#yell('compile\n')
self.re=re.compile(r'''(?xs)
(
(?P<comment> \# [^\r\n]+ (?: \r | \n )+ )
| (?P<commentend> \# [^\r\n]+ | \# ) $
| (?P<varname> [A-Za-z_] [A-Za-z_0-9.@]*
(?: % [A-Za-z_][A-Za-z_0-9.@]* )* )
| (?P<hash>\#)
| (?P<number>
[+-]? [0-9]+\.[0-9]+ (?: [eE] [+-]? [0-9]+ )?
| [+-]? \.[0-9]+ (?: [eE] [+-]? [0-9]+ )?
| [+-]? [0-9]+\. (?: [eE] [+-]? [0-9]+ )?
| [+-]? [0-9]+ (?: [eE] [+-]? [0-9]+ )?
)
| (?P<empty_qstring> '' )
| (?P<empty_dqstring> "" )
| ' (?P<qstring> (?:
[^'\\]
| ( \\ . )+ ) * ) '
| " (?P<dqstring> (?:
[^"\\]
| ( \\ . )+ ) * ) "
| \[\[\[ (?P<bracestring> (?:
[^\]@]
| @ (?!\[)
| @ \[ @ \]
| @ \[ ' [^']+ ' \]
| @ \[ [^\]]+ \]
| \]\] (?!\])
| \] (?!\])
) *? ) \]\]\]
| (?P<endline>[ \t]* [\r\n]+)
| (?P<equalequal> == )
| (?P<equal> = )
| (?P<astrisk> \* )
| (?P<whitespace> [ \t]+ )
| (?P<lset>\{)
| (?P<rset>\})
| (?P<lfort>\(/)
| (?P<rfort>/\))
| (?P<lparen>\()
| (?P<rparen>\))
| (?P<comma>,)
| (?P<colon>:)
| (?P<at>@)
| (?P<oper>\.[a-zA-Z_][a-zA-Z0-9_.]*\.)
| <=+ (?P<filter>[a-zA-Z_][a-zA-Z0-9_.]*) =+
| (?P<error> . )
)''')
def tokenize(self,text,filename=produtil.testing.utilities.unknown_file,
first_line=1):
"""!Tokenizes the specified file, acting as an iterator over Token objects.
Loops over the text of the given file, creating Token objects
and yielding them.
@param text The text to tokenize.
@param filename The file from which the text originates. This may be used
for two purposes. The first is error reporting, and the second is
"load" statements, which load files relative to the path to the
current file.
@param first_line The line number for the first line of the file."""
lineno=first_line
for m in self.re.finditer(text):
if m is None:
raise ValueError('SHOULD NOT GET HERE: no match on "%s"'%(line,))
# else:
# for dkey,dval in m.groupdict().iteritems():
# if dval is not None:
# yell("%10s = %s\n"%(dkey,repr(dval)))
if m.group('comment'):
yield Token(end_of_line_type,m.group('comment'),
filename,lineno)
elif m.group('commentend'):
yield Token(end_of_line_type,m.group('commentend'),
filename,lineno)
elif m.group('hash'):
yield Token(end_of_line_type,m.group('commentend'),
filename,lineno)
elif m.group('endline'):
yield Token(end_of_line_type,m.group('endline'),
filename,lineno)
elif m.group('oper'):
yield Token('oper',m.group('oper'),filename,lineno)
elif m.group('filter'):
yield Token('oper','.'+m.group('filter')+'.',filename,lineno)
elif m.group('varname'):
yield Token('varname',m.group('varname'),filename,lineno)
elif m.group('number'):
yield Token('number',m.group('number'),filename,lineno)
elif m.group('empty_qstring'):
yield Token('qstring','',filename,lineno)
elif m.group('empty_dqstring'):
yield Token('dqstring','',filename,lineno)
elif m.group('qstring'):
yield Token('qstring',m.group('qstring'),filename,lineno)
elif m.group('dqstring'):
yield Token('dqstring',m.group('dqstring'),filename,lineno)
elif m.group('bracestring'):
yield Token('bracestring',m.group('bracestring'),
filename,lineno)
elif m.group('at'):
yield Token('@','@',filename,lineno)
elif m.group('equalequal'):
yield Token('==','==',filename,lineno)
elif m.group('equal'):
yield Token('=','=',filename,lineno)
elif m.group('comma'):
yield Token(',',',',filename,lineno)
elif m.group('colon'):
yield Token(':',':',filename,lineno)
elif m.group('lset'):
yield Token('{','{',filename,lineno)
elif m.group('rset'):
yield Token('}','}',filename,lineno)
elif m.group('lparen'):
yield Token('(','(',filename,lineno)
elif m.group('rparen'):
yield Token(')',')',filename,lineno)
elif m.group('lfort'):
yield Token('(/','(/',filename,lineno)
elif m.group('rfort'):
yield Token('/)','/)',filename,lineno)
elif m.group('whitespace'):
pass # Ignore whitespace outside strings
else:
raise ValueError('%s:%d: invalid text %s'%(
filename,lineno,repr(m.group(0))))
lineno+=m.group(0).count('\n')
yield Token(end_of_text_type,'',filename,lineno)
class TokenizeFile(object):
"""!Wrapper around a Tokenizer for a specified file.
This is a convenience class; it is a wrapper around a Tokenizer,
but also knows how to create new TokenizeFile objects for the same
type of underlyting Tokenizer objects (for_file())."""
##@var tokenizer
# The Tokenizer object that turns text into sequences of Token objects.
##@var fileobj
# A file-like object that produces text for the tokenizer
##@var filename
# The name of the file that fileobj reads.
##@var first_line
# The integer first line of the file, usually 1.
def __init__(self,tokenizer,fileobj,
filename=produtil.testing.utilities.unknown_file,
first_line=1):
"""!Constructor for TokenizeFile
@param tokenizer The Tokenizer-like object to parse.
@param fileobj The opened file-like object to read.
@param filename The file from which the text originates. This may be used
for two purposes. The first is error reporting, and the second is
"load" statements, which load files relative to the path to the
current file.
@param first_line The line number for the first line of the file."""
self.tokenizer=tokenizer
self.fileobj=fileobj
self.filename=filename
self.first_line=first_line
def for_file(self,fileobj,filename,first_line=1):
"""!Creates a new TokenizeFile object for the specified file.
@param fileobj The file-like object to read.
@param filename The file from which the text originates. This
may be used for two purposes. The first is error reporting,
and the second is "load" statements, which load files
relative to the path to the current file.
@param first_line The line number for the first line of the file."""
return TokenizeFile(self.tokenizer.copy(),fileobj,filename,first_line)
def __iter__(self):
"""!Iterates over tokens in self.fileobj."""
text=self.fileobj.read()
for token in self.tokenizer.tokenize(
text,self.filename,self.first_line):
yield token
|
[
"re.compile"
] |
[((3259, 5331), 're.compile', 're.compile', (['"""(?xs)\n (\n (?P<comment> \\\\# [^\\\\r\\\\n]+ (?: \\\\r | \\\\n )+ )\n | (?P<commentend> \\\\# [^\\\\r\\\\n]+ | \\\\# ) $\n | (?P<varname> [A-Za-z_] [A-Za-z_0-9.@]*\n (?: % [A-Za-z_][A-Za-z_0-9.@]* )* )\n | (?P<hash>\\\\#)\n | (?P<number>\n [+-]? [0-9]+\\\\.[0-9]+ (?: [eE] [+-]? [0-9]+ )?\n | [+-]? \\\\.[0-9]+ (?: [eE] [+-]? [0-9]+ )?\n | [+-]? [0-9]+\\\\. (?: [eE] [+-]? [0-9]+ )?\n | [+-]? [0-9]+ (?: [eE] [+-]? [0-9]+ )?\n )\n | (?P<empty_qstring> \'\' )\n | (?P<empty_dqstring> "" )\n | \' (?P<qstring> (?:\n [^\'\\\\\\\\]\n | ( \\\\\\\\ . )+ ) * ) \'\n | " (?P<dqstring> (?:\n [^"\\\\\\\\]\n | ( \\\\\\\\ . )+ ) * ) "\n | \\\\[\\\\[\\\\[ (?P<bracestring> (?:\n [^\\\\]@]\n | @ (?!\\\\[)\n | @ \\\\[ @ \\\\]\n | @ \\\\[ \' [^\']+ \' \\\\]\n | @ \\\\[ [^\\\\]]+ \\\\]\n | \\\\]\\\\] (?!\\\\])\n | \\\\] (?!\\\\])\n ) *? ) \\\\]\\\\]\\\\]\n | (?P<endline>[ \\\\t]* [\\\\r\\\\n]+)\n | (?P<equalequal> == )\n | (?P<equal> = )\n | (?P<astrisk> \\\\* )\n | (?P<whitespace> [ \\\\t]+ )\n | (?P<lset>\\\\{)\n | (?P<rset>\\\\})\n | (?P<lfort>\\\\(/)\n | (?P<rfort>/\\\\))\n | (?P<lparen>\\\\()\n | (?P<rparen>\\\\))\n | (?P<comma>,)\n | (?P<colon>:)\n | (?P<at>@)\n | (?P<oper>\\\\.[a-zA-Z_][a-zA-Z0-9_.]*\\\\.)\n | <=+ (?P<filter>[a-zA-Z_][a-zA-Z0-9_.]*) =+\n | (?P<error> . )\n )"""'], {}), '(\n """(?xs)\n (\n (?P<comment> \\\\# [^\\\\r\\\\n]+ (?: \\\\r | \\\\n )+ )\n | (?P<commentend> \\\\# [^\\\\r\\\\n]+ | \\\\# ) $\n | (?P<varname> [A-Za-z_] [A-Za-z_0-9.@]*\n (?: % [A-Za-z_][A-Za-z_0-9.@]* )* )\n | (?P<hash>\\\\#)\n | (?P<number>\n [+-]? [0-9]+\\\\.[0-9]+ (?: [eE] [+-]? [0-9]+ )?\n | [+-]? \\\\.[0-9]+ (?: [eE] [+-]? [0-9]+ )?\n | [+-]? [0-9]+\\\\. (?: [eE] [+-]? [0-9]+ )?\n | [+-]? [0-9]+ (?: [eE] [+-]? [0-9]+ )?\n )\n | (?P<empty_qstring> \'\' )\n | (?P<empty_dqstring> "" )\n | \' (?P<qstring> (?:\n [^\'\\\\\\\\]\n | ( \\\\\\\\ . )+ ) * ) \'\n | " (?P<dqstring> (?:\n [^"\\\\\\\\]\n | ( \\\\\\\\ . )+ ) * ) "\n | \\\\[\\\\[\\\\[ (?P<bracestring> (?:\n [^\\\\]@]\n | @ (?!\\\\[)\n | @ \\\\[ @ \\\\]\n | @ \\\\[ \' [^\']+ \' \\\\]\n | @ \\\\[ [^\\\\]]+ \\\\]\n | \\\\]\\\\] (?!\\\\])\n | \\\\] (?!\\\\])\n ) *? ) \\\\]\\\\]\\\\]\n | (?P<endline>[ \\\\t]* [\\\\r\\\\n]+)\n | (?P<equalequal> == )\n | (?P<equal> = )\n | (?P<astrisk> \\\\* )\n | (?P<whitespace> [ \\\\t]+ )\n | (?P<lset>\\\\{)\n | (?P<rset>\\\\})\n | (?P<lfort>\\\\(/)\n | (?P<rfort>/\\\\))\n | (?P<lparen>\\\\()\n | (?P<rparen>\\\\))\n | (?P<comma>,)\n | (?P<colon>:)\n | (?P<at>@)\n | (?P<oper>\\\\.[a-zA-Z_][a-zA-Z0-9_.]*\\\\.)\n | <=+ (?P<filter>[a-zA-Z_][a-zA-Z0-9_.]*) =+\n | (?P<error> . )\n )"""\n )\n', (3269, 5331), False, 'import re\n')]
|
import machine
from machine import Pin
import pycom
from utime import sleep
# Config pin:
configPin = Pin('P21', Pin.IN, Pin.PULL_UP)
pycom.heartbeat(False)
if machine.reset_cause() == machine.DEEPSLEEP_RESET:
print('Woke from a deep sleep')
else:
print('Power on or hard reset')
# Check for config mode:
configPin()
if configPin():
# Do something
for i in range(0, 10):
pycom.rgbled(0x0000FF)
sleep(0.2)
pycom.rgbled(0x000000)
sleep(0.2)
pycom.rgbled(0xFF0000)
sleep(0.2)
pycom.rgbled(0x000000)
sleep(0.2)
# Go to sleep for 10 seconds
machine.deepsleep(10000)
print('Config Mode')
|
[
"machine.deepsleep",
"utime.sleep",
"machine.reset_cause",
"pycom.heartbeat",
"pycom.rgbled",
"machine.Pin"
] |
[((103, 134), 'machine.Pin', 'Pin', (['"""P21"""', 'Pin.IN', 'Pin.PULL_UP'], {}), "('P21', Pin.IN, Pin.PULL_UP)\n", (106, 134), False, 'from machine import Pin\n'), ((136, 158), 'pycom.heartbeat', 'pycom.heartbeat', (['(False)'], {}), '(False)\n', (151, 158), False, 'import pycom\n'), ((163, 184), 'machine.reset_cause', 'machine.reset_cause', ([], {}), '()\n', (182, 184), False, 'import machine\n'), ((636, 660), 'machine.deepsleep', 'machine.deepsleep', (['(10000)'], {}), '(10000)\n', (653, 660), False, 'import machine\n'), ((401, 418), 'pycom.rgbled', 'pycom.rgbled', (['(255)'], {}), '(255)\n', (413, 418), False, 'import pycom\n'), ((432, 442), 'utime.sleep', 'sleep', (['(0.2)'], {}), '(0.2)\n', (437, 442), False, 'from utime import sleep\n'), ((451, 466), 'pycom.rgbled', 'pycom.rgbled', (['(0)'], {}), '(0)\n', (463, 466), False, 'import pycom\n'), ((482, 492), 'utime.sleep', 'sleep', (['(0.2)'], {}), '(0.2)\n', (487, 492), False, 'from utime import sleep\n'), ((501, 523), 'pycom.rgbled', 'pycom.rgbled', (['(16711680)'], {}), '(16711680)\n', (513, 523), False, 'import pycom\n'), ((532, 542), 'utime.sleep', 'sleep', (['(0.2)'], {}), '(0.2)\n', (537, 542), False, 'from utime import sleep\n'), ((551, 566), 'pycom.rgbled', 'pycom.rgbled', (['(0)'], {}), '(0)\n', (563, 566), False, 'import pycom\n'), ((582, 592), 'utime.sleep', 'sleep', (['(0.2)'], {}), '(0.2)\n', (587, 592), False, 'from utime import sleep\n')]
|
import datetime
from dateutil.parser import parse
from dateutil.relativedelta import MO, SU, relativedelta
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.utils import timezone
from django.views.generic.base import TemplateView
from homeschool.schools.models import SchoolYear
from homeschool.students.models import Coursework
class IndexView(TemplateView):
template_name = "core/index.html"
class AppView(LoginRequiredMixin, TemplateView):
template_name = "core/app.html"
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
# This is UTC so it is not localized to the user's timezone.
# That may lead to funny results in the evening.
today = timezone.now().date()
context["today"] = today
week = self.get_week_boundaries(today)
context["monday"], context["sunday"] = week
school_year = (
SchoolYear.objects.filter(
school=self.request.user.school,
start_date__lte=today,
end_date__gte=today,
)
.prefetch_related("grade_levels", "grade_levels__courses")
.first()
)
week_dates = []
if school_year:
week_dates = school_year.get_week_dates_for(week)
context["week_dates"] = week_dates
context["schedules"] = self.get_schedules(school_year, week, week_dates)
return context
def get_week_boundaries(self, today):
"""Get the Monday and Sunday that bound today."""
monday = today + relativedelta(weekday=MO(-1))
sunday = today + relativedelta(weekday=SU(+1))
return monday, sunday
def get_schedules(self, school_year, week, week_dates):
"""Get the schedules for each student."""
schedules = []
if school_year is None:
return schedules
for student in self.request.user.school.students.all():
courses = student.get_courses(school_year)
week_coursework = student.get_week_coursework(week)
schedule = self.get_student_schedule(
student, week_dates, courses, week_coursework
)
schedules.append(schedule)
return schedules
def get_student_schedule(self, student, week_dates, courses, week_coursework):
"""Get the schedule.
Each student will get a list of courses, filled with each day.
Empty slots will contain None.
"""
completed_task_ids = list(
Coursework.objects.filter(
student=student, course_task__course__in=courses
).values_list("course_task_id", flat=True)
)
task_limit = len(week_dates)
schedule = {"student": student, "courses": []}
for course in courses:
course_schedule = {"course": course, "days": []}
# Doing this query in a loop is definitely an N+1 bug.
# If it's possible to do a single query of all tasks
# that groups by course then that would be better.
# No need to over-optimize until that's a real issue.
# I brought this up on the forum. It doesn't look like it's easy to fix.
# https://forum.djangoproject.com/t/grouping-by-foreignkey-with-a-limit-per-group/979
course_tasks = list(
course.course_tasks.exclude(id__in=completed_task_ids)[:task_limit]
)
course_tasks.reverse()
for week_date in week_dates:
course_schedule_item = {"week_date": week_date}
if (
course.id in week_coursework
and week_date in week_coursework[course.id]
):
coursework_list = week_coursework[course.id][week_date]
course_schedule_item["coursework"] = coursework_list
elif course.runs_on(week_date) and course_tasks:
course_schedule_item["task"] = course_tasks.pop()
course_schedule["days"].append(course_schedule_item)
schedule["courses"].append(course_schedule)
return schedule
class DailyView(LoginRequiredMixin, TemplateView):
template_name = "core/daily.html"
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
year = self.kwargs.get("year")
month = self.kwargs.get("month")
day = self.kwargs.get("day")
if year and month and day:
day = datetime.date(year, month, day)
else:
# This is UTC so it is not localized to the user's timezone.
# That may lead to funny results in the evening.
day = timezone.now().date()
context["day"] = day
school_year = (
SchoolYear.objects.filter(
school=self.request.user.school, start_date__lte=day, end_date__gte=day
)
.prefetch_related("grade_levels", "grade_levels__courses")
.first()
)
# Set previous and next days navigation.
if school_year:
context["yesterday"] = school_year.get_previous_day_from(day)
context["ereyesterday"] = school_year.get_previous_day_from(
context["yesterday"]
)
context["tomorrow"] = school_year.get_next_day_from(day)
context["overmorrow"] = school_year.get_next_day_from(context["tomorrow"])
else:
context["ereyesterday"] = day - datetime.timedelta(days=2)
context["yesterday"] = day - datetime.timedelta(days=1)
context["tomorrow"] = day + datetime.timedelta(days=1)
context["overmorrow"] = day + datetime.timedelta(days=2)
context["schedules"] = self.get_schedules(school_year, day)
return context
def get_schedules(self, school_year, day):
"""Get the schedules for each student."""
schedules = []
if not school_year:
return schedules
if not school_year.runs_on(day):
return schedules
for student in self.request.user.school.students.all():
courses = student.get_courses(school_year)
schedule = self.get_student_schedule(student, day, courses)
schedules.append(schedule)
return schedules
def get_student_schedule(self, student, day, courses):
"""Get the daily schedule for the student."""
day_coursework = student.get_day_coursework(day)
completed_task_ids = list(
Coursework.objects.filter(
student=student, course_task__course__in=courses
).values_list("course_task_id", flat=True)
)
schedule = {"student": student, "courses": []}
for course in courses:
course_schedule = {"course": course}
if course.id in day_coursework:
course_schedule["coursework"] = day_coursework[course.id]
elif course.runs_on(day):
# Doing this query in a loop is definitely an N+1 bug.
# If it's possible to do a single query of all tasks
# that groups by course then that would be better.
# No need to over-optimize until that's a real issue.
# I brought this up on the forum. It doesn't look like it's easy to fix.
# https://forum.djangoproject.com/t/grouping-by-foreignkey-with-a-limit-per-group/979
course_task = course.course_tasks.exclude(
id__in=completed_task_ids
).first()
course_schedule["task"] = course_task
schedule["courses"].append(course_schedule)
return schedule
def post(self, request, *args, **kwargs):
"""Process students' work."""
completed_date = timezone.now().date()
if "completed_date" in request.POST:
completed_date = parse(request.POST["completed_date"])
tasks_by_student = self.get_task_completions_by_student(request.POST)
if tasks_by_student:
for student_id, tasks in tasks_by_student.items():
student = request.user.school.students.filter(id=student_id).first()
self.mark_completion(student, tasks, completed_date)
success_url = request.GET.get("next", reverse("core:daily"))
return HttpResponseRedirect(success_url)
def get_task_completions_by_student(self, post_data):
"""Parse out the tasks."""
tasks = {}
for key, value in post_data.items():
if not key.startswith("task"):
continue
parts = key.split("-")
student_id = int(parts[1])
task_id = int(parts[2])
if student_id not in tasks:
tasks[student_id] = {"complete": [], "incomplete": []}
category = "complete" if value == "on" else "incomplete"
tasks[student_id][category].append(task_id)
return tasks
def mark_completion(self, student, tasks, completed_date):
"""Mark completed tasks or clear already complete tasks."""
if not student:
return
self.process_complete_tasks(student, tasks["complete"], completed_date)
self.process_incomplete_tasks(student, tasks["incomplete"])
def process_complete_tasks(self, student, complete_task_ids, completed_date):
"""Add coursework for any tasks that do not have it."""
existing_complete_task_ids = set(
Coursework.objects.filter(
student=student, course_task__in=complete_task_ids
).values_list("course_task_id", flat=True)
)
newly_complete_task_ids = set(complete_task_ids) - existing_complete_task_ids
if newly_complete_task_ids:
new_coursework = []
for task_id in newly_complete_task_ids:
new_coursework.append(
Coursework(
student=student,
course_task_id=task_id,
completed_date=completed_date,
)
)
Coursework.objects.bulk_create(new_coursework)
def process_incomplete_tasks(self, student, incomplete_task_ids):
"""Remove any coursework for tasks that are marked as incomplete."""
Coursework.objects.filter(
student=student, course_task__in=incomplete_task_ids
).delete()
|
[
"dateutil.relativedelta.MO",
"dateutil.parser.parse",
"homeschool.students.models.Coursework.objects.filter",
"django.utils.timezone.now",
"homeschool.students.models.Coursework",
"datetime.date",
"django.urls.reverse",
"homeschool.schools.models.SchoolYear.objects.filter",
"datetime.timedelta",
"homeschool.students.models.Coursework.objects.bulk_create",
"django.http.HttpResponseRedirect",
"dateutil.relativedelta.SU"
] |
[((8549, 8582), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['success_url'], {}), '(success_url)\n', (8569, 8582), False, 'from django.http import HttpResponseRedirect\n'), ((4665, 4696), 'datetime.date', 'datetime.date', (['year', 'month', 'day'], {}), '(year, month, day)\n', (4678, 4696), False, 'import datetime\n'), ((8102, 8139), 'dateutil.parser.parse', 'parse', (["request.POST['completed_date']"], {}), "(request.POST['completed_date'])\n", (8107, 8139), False, 'from dateutil.parser import parse\n'), ((8511, 8532), 'django.urls.reverse', 'reverse', (['"""core:daily"""'], {}), "('core:daily')\n", (8518, 8532), False, 'from django.urls import reverse\n'), ((10335, 10381), 'homeschool.students.models.Coursework.objects.bulk_create', 'Coursework.objects.bulk_create', (['new_coursework'], {}), '(new_coursework)\n', (10365, 10381), False, 'from homeschool.students.models import Coursework\n'), ((839, 853), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (851, 853), False, 'from django.utils import timezone\n'), ((5668, 5694), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(2)'}), '(days=2)\n', (5686, 5694), False, 'import datetime\n'), ((5736, 5762), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (5754, 5762), False, 'import datetime\n'), ((5803, 5829), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (5821, 5829), False, 'import datetime\n'), ((5872, 5898), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(2)'}), '(days=2)\n', (5890, 5898), False, 'import datetime\n'), ((8006, 8020), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (8018, 8020), False, 'from django.utils import timezone\n'), ((10538, 10617), 'homeschool.students.models.Coursework.objects.filter', 'Coursework.objects.filter', ([], {'student': 'student', 'course_task__in': 'incomplete_task_ids'}), '(student=student, course_task__in=incomplete_task_ids)\n', (10563, 10617), False, 'from homeschool.students.models import Coursework\n'), ((1706, 1712), 'dateutil.relativedelta.MO', 'MO', (['(-1)'], {}), '(-1)\n', (1708, 1712), False, 'from dateutil.relativedelta import MO, SU, relativedelta\n'), ((1761, 1767), 'dateutil.relativedelta.SU', 'SU', (['(+1)'], {}), '(+1)\n', (1763, 1767), False, 'from dateutil.relativedelta import MO, SU, relativedelta\n'), ((2651, 2726), 'homeschool.students.models.Coursework.objects.filter', 'Coursework.objects.filter', ([], {'student': 'student', 'course_task__course__in': 'courses'}), '(student=student, course_task__course__in=courses)\n', (2676, 2726), False, 'from homeschool.students.models import Coursework\n'), ((4863, 4877), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (4875, 4877), False, 'from django.utils import timezone\n'), ((6715, 6790), 'homeschool.students.models.Coursework.objects.filter', 'Coursework.objects.filter', ([], {'student': 'student', 'course_task__course__in': 'courses'}), '(student=student, course_task__course__in=courses)\n', (6740, 6790), False, 'from homeschool.students.models import Coursework\n'), ((9703, 9780), 'homeschool.students.models.Coursework.objects.filter', 'Coursework.objects.filter', ([], {'student': 'student', 'course_task__in': 'complete_task_ids'}), '(student=student, course_task__in=complete_task_ids)\n', (9728, 9780), False, 'from homeschool.students.models import Coursework\n'), ((10127, 10214), 'homeschool.students.models.Coursework', 'Coursework', ([], {'student': 'student', 'course_task_id': 'task_id', 'completed_date': 'completed_date'}), '(student=student, course_task_id=task_id, completed_date=\n completed_date)\n', (10137, 10214), False, 'from homeschool.students.models import Coursework\n'), ((1031, 1138), 'homeschool.schools.models.SchoolYear.objects.filter', 'SchoolYear.objects.filter', ([], {'school': 'self.request.user.school', 'start_date__lte': 'today', 'end_date__gte': 'today'}), '(school=self.request.user.school, start_date__lte=\n today, end_date__gte=today)\n', (1056, 1138), False, 'from homeschool.schools.models import SchoolYear\n'), ((4951, 5054), 'homeschool.schools.models.SchoolYear.objects.filter', 'SchoolYear.objects.filter', ([], {'school': 'self.request.user.school', 'start_date__lte': 'day', 'end_date__gte': 'day'}), '(school=self.request.user.school, start_date__lte=\n day, end_date__gte=day)\n', (4976, 5054), False, 'from homeschool.schools.models import SchoolYear\n')]
|
########################################################################################################################
# Module: tests/test_core.py
# Description: Tests for core and Sampler
#
# Web: https://github.com/SamDuffield/mocat
########################################################################################################################
import unittest
import jax.numpy as jnp
import mocat.src.sample
import numpy.testing as npt
from mocat.src import core
from mocat.src import sample
class Testcdict(unittest.TestCase):
cdict = core.cdict(test_arr=jnp.ones((10, 3)),
test_float=3.)
def test_init(self):
npt.assert_(hasattr(self.cdict, 'test_arr'))
npt.assert_array_equal(self.cdict.test_arr, jnp.ones((10, 3)))
npt.assert_(hasattr(self.cdict, 'test_float'))
npt.assert_equal(self.cdict.test_float, 3.)
def test_copy(self):
cdict2 = self.cdict.copy()
npt.assert_(isinstance(cdict2, core.cdict))
npt.assert_(isinstance(cdict2.test_arr, jnp.DeviceArray))
npt.assert_array_equal(cdict2.test_arr, jnp.ones((10, 3)))
npt.assert_(isinstance(cdict2.test_float, float))
npt.assert_equal(cdict2.test_float, 3.)
cdict2.test_arr = jnp.zeros(5)
npt.assert_array_equal(self.cdict.test_arr, jnp.ones((10, 3)))
cdict2.test_float = 9.
npt.assert_equal(self.cdict.test_float, 3.)
def test_getitem(self):
cdict_0get = self.cdict[0]
npt.assert_(isinstance(cdict_0get, core.cdict))
npt.assert_(isinstance(cdict_0get.test_arr, jnp.DeviceArray))
npt.assert_array_equal(cdict_0get.test_arr, jnp.ones(3))
npt.assert_(isinstance(cdict_0get.test_float, float))
npt.assert_equal(cdict_0get.test_float, 3.)
def test_additem(self):
cdict_other = core.cdict(test_arr=jnp.ones((2, 3)),
test_float=7.,
time=25.)
self.cdict.time = 10.
cdict_add = self.cdict + cdict_other
npt.assert_(isinstance(cdict_add, core.cdict))
npt.assert_(isinstance(cdict_add.test_arr, jnp.DeviceArray))
npt.assert_array_equal(cdict_add.test_arr, jnp.ones((12, 3)))
npt.assert_array_equal(cdict_add.time, 35.)
npt.assert_(isinstance(cdict_add.test_float, float))
npt.assert_equal(cdict_add.test_float, 3.)
npt.assert_array_equal(self.cdict.test_arr, jnp.ones((10, 3)))
npt.assert_equal(self.cdict.test_float, 3.)
npt.assert_equal(self.cdict.time, 10.)
del self.cdict.time
class TestSampler(unittest.TestCase):
sampler = sample.Sampler(name='test', other=jnp.zeros(2))
def test_init(self):
npt.assert_equal(self.sampler.name, 'test')
npt.assert_(hasattr(self.sampler, 'parameters'))
npt.assert_array_equal(self.sampler.parameters.other, jnp.zeros(2))
def test_copy(self):
sampler2 = self.sampler.deepcopy()
npt.assert_(isinstance(sampler2, sample.Sampler))
sampler2.name = 'other'
npt.assert_equal(self.sampler.name, 'test')
sampler2.parameters.other = 10.
npt.assert_array_equal(self.sampler.parameters.other, jnp.zeros(2))
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"numpy.testing.assert_array_equal",
"numpy.testing.assert_equal",
"jax.numpy.ones",
"jax.numpy.zeros"
] |
[((3308, 3323), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3321, 3323), False, 'import unittest\n'), ((851, 895), 'numpy.testing.assert_equal', 'npt.assert_equal', (['self.cdict.test_float', '(3.0)'], {}), '(self.cdict.test_float, 3.0)\n', (867, 895), True, 'import numpy.testing as npt\n'), ((1209, 1249), 'numpy.testing.assert_equal', 'npt.assert_equal', (['cdict2.test_float', '(3.0)'], {}), '(cdict2.test_float, 3.0)\n', (1225, 1249), True, 'import numpy.testing as npt\n'), ((1276, 1288), 'jax.numpy.zeros', 'jnp.zeros', (['(5)'], {}), '(5)\n', (1285, 1288), True, 'import jax.numpy as jnp\n'), ((1400, 1444), 'numpy.testing.assert_equal', 'npt.assert_equal', (['self.cdict.test_float', '(3.0)'], {}), '(self.cdict.test_float, 3.0)\n', (1416, 1444), True, 'import numpy.testing as npt\n'), ((1771, 1815), 'numpy.testing.assert_equal', 'npt.assert_equal', (['cdict_0get.test_float', '(3.0)'], {}), '(cdict_0get.test_float, 3.0)\n', (1787, 1815), True, 'import numpy.testing as npt\n'), ((2276, 2320), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['cdict_add.time', '(35.0)'], {}), '(cdict_add.time, 35.0)\n', (2298, 2320), True, 'import numpy.testing as npt\n'), ((2390, 2433), 'numpy.testing.assert_equal', 'npt.assert_equal', (['cdict_add.test_float', '(3.0)'], {}), '(cdict_add.test_float, 3.0)\n', (2406, 2433), True, 'import numpy.testing as npt\n'), ((2513, 2557), 'numpy.testing.assert_equal', 'npt.assert_equal', (['self.cdict.test_float', '(3.0)'], {}), '(self.cdict.test_float, 3.0)\n', (2529, 2557), True, 'import numpy.testing as npt\n'), ((2565, 2604), 'numpy.testing.assert_equal', 'npt.assert_equal', (['self.cdict.time', '(10.0)'], {}), '(self.cdict.time, 10.0)\n', (2581, 2604), True, 'import numpy.testing as npt\n'), ((2768, 2811), 'numpy.testing.assert_equal', 'npt.assert_equal', (['self.sampler.name', '"""test"""'], {}), "(self.sampler.name, 'test')\n", (2784, 2811), True, 'import numpy.testing as npt\n'), ((3114, 3157), 'numpy.testing.assert_equal', 'npt.assert_equal', (['self.sampler.name', '"""test"""'], {}), "(self.sampler.name, 'test')\n", (3130, 3157), True, 'import numpy.testing as npt\n'), ((580, 597), 'jax.numpy.ones', 'jnp.ones', (['(10, 3)'], {}), '((10, 3))\n', (588, 597), True, 'import jax.numpy as jnp\n'), ((768, 785), 'jax.numpy.ones', 'jnp.ones', (['(10, 3)'], {}), '((10, 3))\n', (776, 785), True, 'import jax.numpy as jnp\n'), ((1123, 1140), 'jax.numpy.ones', 'jnp.ones', (['(10, 3)'], {}), '((10, 3))\n', (1131, 1140), True, 'import jax.numpy as jnp\n'), ((1341, 1358), 'jax.numpy.ones', 'jnp.ones', (['(10, 3)'], {}), '((10, 3))\n', (1349, 1358), True, 'import jax.numpy as jnp\n'), ((1687, 1698), 'jax.numpy.ones', 'jnp.ones', (['(3)'], {}), '(3)\n', (1695, 1698), True, 'import jax.numpy as jnp\n'), ((2249, 2266), 'jax.numpy.ones', 'jnp.ones', (['(12, 3)'], {}), '((12, 3))\n', (2257, 2266), True, 'import jax.numpy as jnp\n'), ((2486, 2503), 'jax.numpy.ones', 'jnp.ones', (['(10, 3)'], {}), '((10, 3))\n', (2494, 2503), True, 'import jax.numpy as jnp\n'), ((2720, 2732), 'jax.numpy.zeros', 'jnp.zeros', (['(2)'], {}), '(2)\n', (2729, 2732), True, 'import jax.numpy as jnp\n'), ((2931, 2943), 'jax.numpy.zeros', 'jnp.zeros', (['(2)'], {}), '(2)\n', (2940, 2943), True, 'import jax.numpy as jnp\n'), ((3261, 3273), 'jax.numpy.zeros', 'jnp.zeros', (['(2)'], {}), '(2)\n', (3270, 3273), True, 'import jax.numpy as jnp\n'), ((1886, 1902), 'jax.numpy.ones', 'jnp.ones', (['(2, 3)'], {}), '((2, 3))\n', (1894, 1902), True, 'import jax.numpy as jnp\n')]
|
from itertools import product
import pytest
from openbrewerydb_api_tests import configuration as CONF
TEST_DATA = {
'endpoints': [
# city endpoints
'breweries?by_city=portland',
'breweries?by_city=san%20diego',
'breweries?by_city=san_diego',
# name endpoints
'breweries?by_name=company',
'breweries?by_name=gordon_biersch',
'breweries?by_name=granite%20city',
# state endpoints
'breweries?by_state=california',
'breweries?by_state=new_york',
'breweries?by_state=north%20carolina',
# postal code endpoints
'breweries?by_postal=44107',
'breweries?by_postal=44107-4020',
'breweries?by_postal=44107_4020',
# type endpoints
'breweries?by_type=planning',
'breweries?by_type=micro',
# tag
'breweries?by_tag=patio',
# todo uncomment when data appears in the database
# tags
# 'breweries?by_tags=patio,dog-friendly',
# page
'breweries?page=15',
'breweries?page=42',
],
'fields': [field for field in CONF.FIELD_NAMES if field != 'tag_list'],
'signs': ['', '-', '+'],
}
class TestSortingResponse:
"""the class provides a set of tests for checking the correctness of sorting
in api responses and invariance of returned data"""
@pytest.fixture(
scope='class',
params=product(TEST_DATA['endpoints'], TEST_DATA['signs'], TEST_DATA['fields'], ))
def dataset(self, api_client, request):
endpoint, sign, field = request.param
reverse = True if sign == '-' else False
response = api_client.get(endpoint).json()
new_endpoint = f'{endpoint}&sort={sign}{field}'
response_sort = api_client.get(new_endpoint).json()
return reverse, field, response, response_sort, new_endpoint
def test_field_sorting(self, dataset):
"""check sorting"""
reverse, field, _, response_sort, endpoint = dataset
# Note - ignore empty lines and None
fields = [item[field] for item in response_sort if item[field]]
if field == 'id':
expected = sorted(fields, reverse=reverse, key=lambda x: int(x))
if field in ('longitude', 'latitude'):
expected = sorted(fields, reverse=reverse, key=lambda x: float(x))
else:
expected = sorted(fields, reverse=reverse)
assert fields == expected, f'endpoint: {endpoint}\nfields: {fields}\n'
def test_data_persistence(self, dataset):
"""check data persistence"""
reverse, field, response, response_sort, endpoint = dataset
fields = {item['id'] for item in response}
fields_sort = {item['id'] for item in response_sort}
assert fields == fields_sort, f'endpoint: {endpoint}\nfields: {fields}\n'
# todo сортировка по не существующему полю, как должно отвечать api?
|
[
"itertools.product"
] |
[((1420, 1492), 'itertools.product', 'product', (["TEST_DATA['endpoints']", "TEST_DATA['signs']", "TEST_DATA['fields']"], {}), "(TEST_DATA['endpoints'], TEST_DATA['signs'], TEST_DATA['fields'])\n", (1427, 1492), False, 'from itertools import product\n')]
|
import logging
from collections import defaultdict
from django.contrib.auth.models import User
from django.core.exceptions import EmptyResultSet
from django.db import connection
from django.db.models.query_utils import Q
from annotation.models.damage_enums import PathogenicityImpact
from classification.enums import ClinicalSignificance
from classification.models import Classification, GenomeBuild
from library.database_utils import get_queryset_select_from_where_parts, dictfetchall
from snpdb.models.models_enums import BuiltInFilters
# Add the necessary fields to qs to create join:
REQUIRED_FIELDS = [
"clinvar__highest_pathogenicity",
"variantannotation__gene__geneannotation__omim_terms",
"variantannotation__impact"
]
CLASSIFICATION_COUNT_SQL = """
select 1
from classification_classification
where classification_classification.variant_id in (
select snpdb_variantallele.variant_id
from snpdb_variantallele
where allele_id in (
select allele_id
from snpdb_variantallele
where variant_id = snpdb_variant.id
)
)
"""
COUNTS = {
BuiltInFilters.TOTAL: "count(*)",
BuiltInFilters.CLINVAR: "sum(case when %(annotation_clinvar)s.highest_pathogenicity >= 4 then 1 else 0 end)",
BuiltInFilters.OMIM: "sum(case when %(annotation_geneannotation)s.omim_terms is not null then 1 else 0 end)",
BuiltInFilters.IMPACT_HIGH_OR_MODERATE: "sum(case when %(annotation_variantannotation)s.impact in ('H', 'M') then 1 else 0 end)",
BuiltInFilters.COSMIC: "sum(case when %(annotation_variantannotation)s.cosmic_id is not null then 1 else 0 end)",
BuiltInFilters.CLASSIFIED: f"sum(case when exists ({CLASSIFICATION_COUNT_SQL}) then 1 else 0 end)",
BuiltInFilters.CLASSIFIED_PATHOGENIC: f"sum(case when exists ({CLASSIFICATION_COUNT_SQL} AND classification_classification.clinical_significance in ('4', '5')) then 1 else 0 end)"
}
def get_extra_filters_q(user: User, genome_build: GenomeBuild, extra_filters):
if extra_filters == BuiltInFilters.CLINVAR:
q = Q(clinvar__highest_pathogenicity__gte=4)
elif extra_filters == BuiltInFilters.OMIM:
q = Q(variantannotation__gene__geneannotation__omim_terms__isnull=False)
elif extra_filters in [BuiltInFilters.CLASSIFIED, BuiltInFilters.CLASSIFIED_PATHOGENIC]:
clinical_significance_list = None
if extra_filters == BuiltInFilters.CLASSIFIED_PATHOGENIC:
clinical_significance_list = [ClinicalSignificance.LIKELY_PATHOGENIC, ClinicalSignificance.PATHOGENIC]
q = Classification.get_variant_q(user, genome_build, clinical_significance_list)
elif extra_filters == BuiltInFilters.IMPACT_HIGH_OR_MODERATE:
q = Q(variantannotation__impact__in=(PathogenicityImpact.HIGH, PathogenicityImpact.MODERATE))
elif extra_filters == BuiltInFilters.COSMIC:
q = Q(variantannotation__cosmic_id__isnull=False)
else:
logging.warning("get_extra_filters_q, unknown filter '%s'", extra_filters)
q = Q(pk__isnull=False) # No op
return q
def get_node_count_colors(css_property):
""" Returns a list of tuples with last element being a dict,
css_property of "color" = [('ClinVar', {color: #ff0000}), etc] """
node_count_colors = []
for label, color in BuiltInFilters.COLORS:
node_count_colors.append((label, {css_property: color}))
return node_count_colors
def get_node_counts_mine_and_available(analysis):
node_count_types = analysis.get_node_count_types()
labels = dict(BuiltInFilters.CHOICES)
my_choices = [x[0] for x in node_count_types]
all_choices = [x[0] for x in BuiltInFilters.CHOICES]
# Needs to stay in order.
available_choices = []
for c in all_choices:
if c not in my_choices:
available_choices.append(c)
my_node_counts_list = []
for node_count in my_choices:
my_node_counts_list.append({"pk": node_count,
"css_classes": 'node-count-legend-' + node_count,
"description": labels[node_count]})
available_node_counts_list = []
for node_count in available_choices:
available_node_counts_list.append({"pk": node_count,
"css_classes": 'node-count-legend-' + node_count,
"description": labels[node_count]})
return my_node_counts_list, available_node_counts_list
def get_node_counts_and_labels_dict(node):
# TODO: We should pass in the labels we want, only join to the appropriate tables and retrieve what we want
# so if we only want clinvar or classified we only have to scan short tables
# Need to do inner query as distinct needs to be applied
# before aggregate functions
qs = node.get_queryset(inner_query_distinct=True)
qs = qs.values(*REQUIRED_FIELDS)
def get_count_alias(count_type):
return f"{count_type}_count".lower()
try:
_, from_str, where_str = get_queryset_select_from_where_parts(qs)
partition_names = node.analysis.annotation_version.get_partition_names()
select_columns = []
for count_type, column_string in COUNTS.items():
column_string %= partition_names
column_string += " as " + get_count_alias(count_type)
select_columns.append(column_string)
select_str = 'SELECT ' + ',\n'.join(select_columns)
sql = '\n'.join([select_str, from_str, where_str])
# logging.info("NODE COUNT sql was:")
# logging.info(sql)
try:
cursor = connection.cursor()
cursor.execute(sql)
except Exception as e:
logging.error(e)
logging.error(sql)
raise
data = dictfetchall(cursor)
if len(data) != 1:
msg = f"Expected single row! Was {len(data)} rows"
raise ValueError(msg)
data = data[0]
except EmptyResultSet:
data = defaultdict(int)
node_counts = {}
for count_type in COUNTS:
count_alias = get_count_alias(count_type)
node_counts[count_type] = data[count_alias] or 0
return node_counts
|
[
"logging.error",
"logging.warning",
"django.db.models.query_utils.Q",
"django.db.connection.cursor",
"collections.defaultdict",
"library.database_utils.dictfetchall",
"classification.models.Classification.get_variant_q",
"library.database_utils.get_queryset_select_from_where_parts"
] |
[((2041, 2081), 'django.db.models.query_utils.Q', 'Q', ([], {'clinvar__highest_pathogenicity__gte': '(4)'}), '(clinvar__highest_pathogenicity__gte=4)\n', (2042, 2081), False, 'from django.db.models.query_utils import Q\n'), ((4999, 5039), 'library.database_utils.get_queryset_select_from_where_parts', 'get_queryset_select_from_where_parts', (['qs'], {}), '(qs)\n', (5035, 5039), False, 'from library.database_utils import get_queryset_select_from_where_parts, dictfetchall\n'), ((5774, 5794), 'library.database_utils.dictfetchall', 'dictfetchall', (['cursor'], {}), '(cursor)\n', (5786, 5794), False, 'from library.database_utils import get_queryset_select_from_where_parts, dictfetchall\n'), ((2141, 2209), 'django.db.models.query_utils.Q', 'Q', ([], {'variantannotation__gene__geneannotation__omim_terms__isnull': '(False)'}), '(variantannotation__gene__geneannotation__omim_terms__isnull=False)\n', (2142, 2209), False, 'from django.db.models.query_utils import Q\n'), ((5597, 5616), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (5614, 5616), False, 'from django.db import connection\n'), ((5984, 6000), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (5995, 6000), False, 'from collections import defaultdict\n'), ((2538, 2614), 'classification.models.Classification.get_variant_q', 'Classification.get_variant_q', (['user', 'genome_build', 'clinical_significance_list'], {}), '(user, genome_build, clinical_significance_list)\n', (2566, 2614), False, 'from classification.models import Classification, GenomeBuild\n'), ((5692, 5708), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (5705, 5708), False, 'import logging\n'), ((5721, 5739), 'logging.error', 'logging.error', (['sql'], {}), '(sql)\n', (5734, 5739), False, 'import logging\n'), ((2693, 2786), 'django.db.models.query_utils.Q', 'Q', ([], {'variantannotation__impact__in': '(PathogenicityImpact.HIGH, PathogenicityImpact.MODERATE)'}), '(variantannotation__impact__in=(PathogenicityImpact.HIGH,\n PathogenicityImpact.MODERATE))\n', (2694, 2786), False, 'from django.db.models.query_utils import Q\n'), ((2844, 2889), 'django.db.models.query_utils.Q', 'Q', ([], {'variantannotation__cosmic_id__isnull': '(False)'}), '(variantannotation__cosmic_id__isnull=False)\n', (2845, 2889), False, 'from django.db.models.query_utils import Q\n'), ((2908, 2982), 'logging.warning', 'logging.warning', (['"""get_extra_filters_q, unknown filter \'%s\'"""', 'extra_filters'], {}), '("get_extra_filters_q, unknown filter \'%s\'", extra_filters)\n', (2923, 2982), False, 'import logging\n'), ((2995, 3014), 'django.db.models.query_utils.Q', 'Q', ([], {'pk__isnull': '(False)'}), '(pk__isnull=False)\n', (2996, 3014), False, 'from django.db.models.query_utils import Q\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" Copyright 2013 <NAME>
Copyright 2017 The Graphite Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
# Import the precompiled protobuffer. It can be recompiled with:
# $ protoc --python_out=. carbon.proto
from carbon.carbon_pb2 import Payload
import os
import sys
import time
import socket
import struct
CARBON_SERVER = '127.0.0.1'
CARBON_PROTOBUF_PORT = 2005
DELAY = 60
def run(sock, delay):
"""Make the client go go go"""
while True:
# Epoch, timestamp in seconds since 1970
now = int(time.time())
# Initialize the protobuf payload
payload_pb = Payload()
labels = ['1min', '5min', '15min']
for name, value in zip(labels, os.getloadavg()):
m = payload_pb.metrics.add()
m.metric = 'system.loadavg_' + name
p = m.points.add()
p.timestamp = now
p.value = value
print("sending message")
print(('-' * 80))
print(payload_pb)
package = payload_pb.SerializeToString()
# The message must be prepended with its size
size = struct.pack('!L', len(package))
sock.sendall(size)
# Then send the actual payload
sock.sendall(package)
time.sleep(delay)
def main():
"""Wrap it all up together"""
delay = DELAY
if len(sys.argv) > 1:
arg = sys.argv[1]
if arg.isdigit():
delay = int(arg)
else:
sys.stderr.write(
"Ignoring non-integer argument. Using default: %ss\n"
% delay)
sock = socket.socket()
try:
sock.connect((CARBON_SERVER, CARBON_PROTOBUF_PORT))
except socket.error:
raise SystemExit("Couldn't connect to %(server)s on port %(port)d, "
"is carbon-cache.py running?" %
{'server': CARBON_SERVER,
'port': CARBON_PROTOBUF_PORT})
try:
run(sock, delay)
except KeyboardInterrupt:
sys.stderr.write("\nExiting on CTRL-c\n")
sys.exit(0)
if __name__ == "__main__":
main()
|
[
"os.getloadavg",
"socket.socket",
"time.time",
"time.sleep",
"sys.stderr.write",
"carbon.carbon_pb2.Payload",
"sys.exit"
] |
[((2110, 2125), 'socket.socket', 'socket.socket', ([], {}), '()\n', (2123, 2125), False, 'import socket\n'), ((1127, 1136), 'carbon.carbon_pb2.Payload', 'Payload', ([], {}), '()\n', (1134, 1136), False, 'from carbon.carbon_pb2 import Payload\n'), ((1760, 1777), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (1770, 1777), False, 'import time\n'), ((1050, 1061), 'time.time', 'time.time', ([], {}), '()\n', (1059, 1061), False, 'import time\n'), ((1220, 1235), 'os.getloadavg', 'os.getloadavg', ([], {}), '()\n', (1233, 1235), False, 'import os\n'), ((1977, 2056), 'sys.stderr.write', 'sys.stderr.write', (["('Ignoring non-integer argument. Using default: %ss\\n' % delay)"], {}), "('Ignoring non-integer argument. Using default: %ss\\n' % delay)\n", (1993, 2056), False, 'import sys\n'), ((2535, 2578), 'sys.stderr.write', 'sys.stderr.write', (['"""\nExiting on CTRL-c\n"""'], {}), '("""\nExiting on CTRL-c\n""")\n', (2551, 2578), False, 'import sys\n'), ((2585, 2596), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2593, 2596), False, 'import sys\n')]
|
import numpy as np
import diversipy
def test_distance_to_boundary():
points = np.array([[0.1, 0.2], [0.3, 0.9]])
np.testing.assert_almost_equal(
diversipy.distance.distance_to_boundary(points), np.array([0.1, 0.1])
)
np.testing.assert_almost_equal(
diversipy.distance.distance_to_boundary(points, cuboid=((-1, -1), (2, 2))),
np.array([1.1, 1.1]),
)
def test_distance_matrix():
points1 = np.array([[0.1, 0.2], [0.3, 0.9], [0.6, 0.1]])
points2 = np.array([[0.2, 0.2]])
# test L1 distance
np.testing.assert_almost_equal(
diversipy.distance.distance_matrix(points1, points2, norm=1),
[[0.1], [0.1 + 0.7], [0.4 + 0.1]],
)
# test L2 distance
np.testing.assert_almost_equal(
diversipy.distance.distance_matrix(points1, points2, norm=2),
[[0.1], [(0.1 ** 2 + 0.7 ** 2) ** 0.5], [(0.4 ** 2 + 0.1 ** 2) ** 0.5]],
)
# test toridal L1 distance
np.testing.assert_almost_equal(
diversipy.distance.distance_matrix(points1, points2, norm=1, max_dist=[1, 1]),
[[0.1], [0.1 + (1 - 0.7)], [0.4 + 0.1]],
)
|
[
"diversipy.distance.distance_to_boundary",
"numpy.array",
"diversipy.distance.distance_matrix"
] |
[((84, 118), 'numpy.array', 'np.array', (['[[0.1, 0.2], [0.3, 0.9]]'], {}), '([[0.1, 0.2], [0.3, 0.9]])\n', (92, 118), True, 'import numpy as np\n'), ((439, 485), 'numpy.array', 'np.array', (['[[0.1, 0.2], [0.3, 0.9], [0.6, 0.1]]'], {}), '([[0.1, 0.2], [0.3, 0.9], [0.6, 0.1]])\n', (447, 485), True, 'import numpy as np\n'), ((500, 522), 'numpy.array', 'np.array', (['[[0.2, 0.2]]'], {}), '([[0.2, 0.2]])\n', (508, 522), True, 'import numpy as np\n'), ((163, 210), 'diversipy.distance.distance_to_boundary', 'diversipy.distance.distance_to_boundary', (['points'], {}), '(points)\n', (202, 210), False, 'import diversipy\n'), ((212, 232), 'numpy.array', 'np.array', (['[0.1, 0.1]'], {}), '([0.1, 0.1])\n', (220, 232), True, 'import numpy as np\n'), ((283, 357), 'diversipy.distance.distance_to_boundary', 'diversipy.distance.distance_to_boundary', (['points'], {'cuboid': '((-1, -1), (2, 2))'}), '(points, cuboid=((-1, -1), (2, 2)))\n', (322, 357), False, 'import diversipy\n'), ((367, 387), 'numpy.array', 'np.array', (['[1.1, 1.1]'], {}), '([1.1, 1.1])\n', (375, 387), True, 'import numpy as np\n'), ((590, 650), 'diversipy.distance.distance_matrix', 'diversipy.distance.distance_matrix', (['points1', 'points2'], {'norm': '(1)'}), '(points1, points2, norm=1)\n', (624, 650), False, 'import diversipy\n'), ((768, 828), 'diversipy.distance.distance_matrix', 'diversipy.distance.distance_matrix', (['points1', 'points2'], {'norm': '(2)'}), '(points1, points2, norm=2)\n', (802, 828), False, 'import diversipy\n'), ((992, 1069), 'diversipy.distance.distance_matrix', 'diversipy.distance.distance_matrix', (['points1', 'points2'], {'norm': '(1)', 'max_dist': '[1, 1]'}), '(points1, points2, norm=1, max_dist=[1, 1])\n', (1026, 1069), False, 'import diversipy\n')]
|
import numpy as np
import os
import configparser
import tensorflow as tf
from pkg_resources import resource_filename
from pyniel.python_tools.path_tools import make_dir_if_not_exists
import crowd_sim # adds CrowdSim-v0 to gym # noqa
from crowd_sim.envs.crowd_sim import CrowdSim # reference to env code # noqa
from crowd_sim.envs.utils.robot import Robot # next line fails otherwise # noqa
from crowd_nav.policy.network_om import SDOADRL
from crowd_sim.envs.utils.state import JointState, FullState, ObservableState
from crowd_sim.envs.utils.action import ActionRot
from navrep.scripts.cross_test_navreptrain_in_ianenv import run_test_episodes
from navrep.tools.commonargs import parse_common_args
from navrep.envs.ianenv import IANEnv
TODO = None
class LuciaRawPolicy(object):
""" legacy SOADRL policy from lucia's paper, takes in agents state, local map
The problem is that in the original implementation, policy and environment are intertwined.
this class goes further into separating them by reimplementing methods from
agents.py, robots.py """
def __init__(self):
self._make_policy()
def _make_policy(self):
# Config
config_dir = resource_filename('crowd_nav', 'config')
config_file = os.path.join(config_dir, 'test_soadrl_static.config')
config = configparser.RawConfigParser()
config.read(config_file)
sess = tf.Session()
policy = SDOADRL()
policy.configure(sess, 'global', config)
policy.set_phase('test')
self.model_path = os.path.expanduser('~/soadrl/Final_models/angular_map_full_FOV/rl_model')
policy.load_model(self.model_path)
self.policy = policy
def act(self, obs):
robot_state, humans_state, local_map = obs
state = JointState(robot_state, humans_state)
action = self.policy.predict(state, local_map, None)
action = ActionRot(robot_state.v_pref * action.v, action.r) # de-normalize
return action
class IANEnvWithLegacySOADRLObs(object):
def __init__(self,
silent=False, max_episode_length=1000, collect_trajectories=False):
# Get lidar values from the SOADRL config
config_dir = resource_filename('crowd_nav', 'config')
config_file = os.path.join(config_dir, 'test_soadrl_static.config')
config = configparser.RawConfigParser()
config.read(config_file)
self.v_pref = config.getfloat('humans', 'v_pref')
# lidar scan expected by SOADRL
self.angular_map_max_range = config.getfloat('map', 'angular_map_max_range')
self.angular_map_dim = config.getint('map', 'angular_map_dim')
self.angular_map_min_angle = config.getfloat('map', 'angle_min') * np.pi
self.angular_map_max_angle = config.getfloat('map', 'angle_max') * np.pi
self.angular_map_angle_increment = (
self.angular_map_max_angle - self.angular_map_min_angle) / self.angular_map_dim
self.lidar_upsampling = 15
# create env
self.env = IANEnv(
silent=silent, max_episode_length=max_episode_length, collect_trajectories=collect_trajectories)
self.reset()
def reset(self):
""" IANEnv destroys and re-creates its iarlenv at every reset, so apply our changes here """
self.env.reset()
# we raytrace at a higher resolution, then downsample back to the original soadrl resolution
# this avoids missing small obstacles due to the small soadrl resolution
self.env.iarlenv.rlenv.virtual_peppers[0].kLidarMergedMaxAngle = self.angular_map_max_angle
self.env.iarlenv.rlenv.virtual_peppers[0].kLidarMergedMinAngle = self.angular_map_min_angle
self.env.iarlenv.rlenv.virtual_peppers[0].kLidarAngleIncrement = \
self.angular_map_angle_increment / self.lidar_upsampling
self.env.iarlenv.rlenv.kMergedScanSize = self.angular_map_dim * self.lidar_upsampling
self.episode_statistics = self.env.episode_statistics
obs, _, _, _ = self.step(ActionRot(0.,0.))
return obs
def step(self, action):
# convert lucia action to IANEnv action
ianenv_action = np.array([0., 0., 0.])
# SOADRL - rotation is dtheta
# IAN - rotation is dtheta/dt
ianenv_action[2] = action.r / self.env._get_dt()
# SOADRL - instant rot, then vel
# IAN - vel, then rot
action_vy = 0. # SOADRL outputs non-holonomic by default
ianenv_action[0] = action.v * np.cos(action.r) - action_vy * np.sin(action.r)
ianenv_action[1] = action.v * np.sin(action.r) + action_vy * np.cos(action.r)
# get obs from IANEnv
obs, rew, done, info = self.env.step(ianenv_action)
# convert to SOADRL style
robot_state = FullState(
self.env.iarlenv.rlenv.virtual_peppers[0].pos[0],
self.env.iarlenv.rlenv.virtual_peppers[0].pos[1],
self.env.iarlenv.rlenv.virtual_peppers[0].vel[0],
self.env.iarlenv.rlenv.virtual_peppers[0].vel[1],
self.env.iarlenv.rlenv.vp_radii[0],
self.env.iarlenv.rlenv.agent_goals[0][0],
self.env.iarlenv.rlenv.agent_goals[0][1],
self.v_pref,
self.env.iarlenv.rlenv.virtual_peppers[0].pos[2],)
humans_state = [ObservableState(
human.pos[0],
human.pos[1],
human.vel[0],
human.vel[1],
r,) for human, r in zip(
self.env.iarlenv.rlenv.virtual_peppers[1:], self.env.iarlenv.rlenv.vp_radii[1:])]
scan = obs[0]
# for each angular section we take the min of the returns
downsampled_scan = scan.reshape((-1, self.lidar_upsampling))
downsampled_scan = np.min(downsampled_scan, axis=1)
self.last_downsampled_scan = downsampled_scan
local_map = np.clip(downsampled_scan / self.angular_map_max_range, 0., 1.)
obs = (robot_state, humans_state, local_map)
return obs, rew, done, info
def _get_dt(self):
return self.env._get_dt()
def render(self, *args, **kwargs):
_, lidar_angles = self.env.iarlenv.rlenv.virtual_peppers[0].get_lidar_update_ijangles(
"merged", self.env.iarlenv.rlenv.kMergedScanSize
)
lidar_angles_downsampled = lidar_angles[::self.lidar_upsampling]
kwargs["lidar_angles_override"] = lidar_angles_downsampled
kwargs["lidar_scan_override"] = self.last_downsampled_scan
return self.env.render(*args, **kwargs)
if __name__ == '__main__':
args, _ = parse_common_args()
if args.n is None:
args.n = 1000
collect_trajectories = False
env = IANEnvWithLegacySOADRLObs(silent=True, collect_trajectories=collect_trajectories)
policy = LuciaRawPolicy()
S = run_test_episodes(env, policy, render=args.render, num_episodes=args.n)
DIR = os.path.expanduser("~/navrep/eval/crosstest")
if args.dry_run:
DIR = "/tmp/navrep/eval/crosstest"
make_dir_if_not_exists(DIR)
if collect_trajectories:
NAME = "lucianavreptrain_in_ianenv_{}.pckl".format(len(S))
PATH = os.path.join(DIR, NAME)
S.to_pickle(PATH)
else:
NAME = "lucianavreptrain_in_ianenv_{}.csv".format(len(S))
PATH = os.path.join(DIR, NAME)
S.to_csv(PATH)
print("{} written.".format(PATH))
|
[
"pkg_resources.resource_filename",
"numpy.clip",
"numpy.sin",
"crowd_sim.envs.utils.action.ActionRot",
"os.path.join",
"pyniel.python_tools.path_tools.make_dir_if_not_exists",
"configparser.RawConfigParser",
"navrep.envs.ianenv.IANEnv",
"crowd_nav.policy.network_om.SDOADRL",
"crowd_sim.envs.utils.state.JointState",
"navrep.scripts.cross_test_navreptrain_in_ianenv.run_test_episodes",
"navrep.tools.commonargs.parse_common_args",
"tensorflow.Session",
"numpy.min",
"crowd_sim.envs.utils.state.ObservableState",
"numpy.cos",
"crowd_sim.envs.utils.state.FullState",
"numpy.array",
"os.path.expanduser"
] |
[((6596, 6615), 'navrep.tools.commonargs.parse_common_args', 'parse_common_args', ([], {}), '()\n', (6613, 6615), False, 'from navrep.tools.commonargs import parse_common_args\n'), ((6827, 6898), 'navrep.scripts.cross_test_navreptrain_in_ianenv.run_test_episodes', 'run_test_episodes', (['env', 'policy'], {'render': 'args.render', 'num_episodes': 'args.n'}), '(env, policy, render=args.render, num_episodes=args.n)\n', (6844, 6898), False, 'from navrep.scripts.cross_test_navreptrain_in_ianenv import run_test_episodes\n'), ((6910, 6955), 'os.path.expanduser', 'os.path.expanduser', (['"""~/navrep/eval/crosstest"""'], {}), "('~/navrep/eval/crosstest')\n", (6928, 6955), False, 'import os\n'), ((7024, 7051), 'pyniel.python_tools.path_tools.make_dir_if_not_exists', 'make_dir_if_not_exists', (['DIR'], {}), '(DIR)\n', (7046, 7051), False, 'from pyniel.python_tools.path_tools import make_dir_if_not_exists\n'), ((1194, 1234), 'pkg_resources.resource_filename', 'resource_filename', (['"""crowd_nav"""', '"""config"""'], {}), "('crowd_nav', 'config')\n", (1211, 1234), False, 'from pkg_resources import resource_filename\n'), ((1257, 1310), 'os.path.join', 'os.path.join', (['config_dir', '"""test_soadrl_static.config"""'], {}), "(config_dir, 'test_soadrl_static.config')\n", (1269, 1310), False, 'import os\n'), ((1328, 1358), 'configparser.RawConfigParser', 'configparser.RawConfigParser', ([], {}), '()\n', (1356, 1358), False, 'import configparser\n'), ((1408, 1420), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1418, 1420), True, 'import tensorflow as tf\n'), ((1438, 1447), 'crowd_nav.policy.network_om.SDOADRL', 'SDOADRL', ([], {}), '()\n', (1445, 1447), False, 'from crowd_nav.policy.network_om import SDOADRL\n'), ((1556, 1629), 'os.path.expanduser', 'os.path.expanduser', (['"""~/soadrl/Final_models/angular_map_full_FOV/rl_model"""'], {}), "('~/soadrl/Final_models/angular_map_full_FOV/rl_model')\n", (1574, 1629), False, 'import os\n'), ((1795, 1832), 'crowd_sim.envs.utils.state.JointState', 'JointState', (['robot_state', 'humans_state'], {}), '(robot_state, humans_state)\n', (1805, 1832), False, 'from crowd_sim.envs.utils.state import JointState, FullState, ObservableState\n'), ((1911, 1961), 'crowd_sim.envs.utils.action.ActionRot', 'ActionRot', (['(robot_state.v_pref * action.v)', 'action.r'], {}), '(robot_state.v_pref * action.v, action.r)\n', (1920, 1961), False, 'from crowd_sim.envs.utils.action import ActionRot\n'), ((2221, 2261), 'pkg_resources.resource_filename', 'resource_filename', (['"""crowd_nav"""', '"""config"""'], {}), "('crowd_nav', 'config')\n", (2238, 2261), False, 'from pkg_resources import resource_filename\n'), ((2284, 2337), 'os.path.join', 'os.path.join', (['config_dir', '"""test_soadrl_static.config"""'], {}), "(config_dir, 'test_soadrl_static.config')\n", (2296, 2337), False, 'import os\n'), ((2355, 2385), 'configparser.RawConfigParser', 'configparser.RawConfigParser', ([], {}), '()\n', (2383, 2385), False, 'import configparser\n'), ((3048, 3155), 'navrep.envs.ianenv.IANEnv', 'IANEnv', ([], {'silent': 'silent', 'max_episode_length': 'max_episode_length', 'collect_trajectories': 'collect_trajectories'}), '(silent=silent, max_episode_length=max_episode_length,\n collect_trajectories=collect_trajectories)\n', (3054, 3155), False, 'from navrep.envs.ianenv import IANEnv\n'), ((4190, 4215), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (4198, 4215), True, 'import numpy as np\n'), ((4808, 5225), 'crowd_sim.envs.utils.state.FullState', 'FullState', (['self.env.iarlenv.rlenv.virtual_peppers[0].pos[0]', 'self.env.iarlenv.rlenv.virtual_peppers[0].pos[1]', 'self.env.iarlenv.rlenv.virtual_peppers[0].vel[0]', 'self.env.iarlenv.rlenv.virtual_peppers[0].vel[1]', 'self.env.iarlenv.rlenv.vp_radii[0]', 'self.env.iarlenv.rlenv.agent_goals[0][0]', 'self.env.iarlenv.rlenv.agent_goals[0][1]', 'self.v_pref', 'self.env.iarlenv.rlenv.virtual_peppers[0].pos[2]'], {}), '(self.env.iarlenv.rlenv.virtual_peppers[0].pos[0], self.env.\n iarlenv.rlenv.virtual_peppers[0].pos[1], self.env.iarlenv.rlenv.\n virtual_peppers[0].vel[0], self.env.iarlenv.rlenv.virtual_peppers[0].\n vel[1], self.env.iarlenv.rlenv.vp_radii[0], self.env.iarlenv.rlenv.\n agent_goals[0][0], self.env.iarlenv.rlenv.agent_goals[0][1], self.\n v_pref, self.env.iarlenv.rlenv.virtual_peppers[0].pos[2])\n', (4817, 5225), False, 'from crowd_sim.envs.utils.state import JointState, FullState, ObservableState\n'), ((5775, 5807), 'numpy.min', 'np.min', (['downsampled_scan'], {'axis': '(1)'}), '(downsampled_scan, axis=1)\n', (5781, 5807), True, 'import numpy as np\n'), ((5882, 5946), 'numpy.clip', 'np.clip', (['(downsampled_scan / self.angular_map_max_range)', '(0.0)', '(1.0)'], {}), '(downsampled_scan / self.angular_map_max_range, 0.0, 1.0)\n', (5889, 5946), True, 'import numpy as np\n'), ((7163, 7186), 'os.path.join', 'os.path.join', (['DIR', 'NAME'], {}), '(DIR, NAME)\n', (7175, 7186), False, 'import os\n'), ((7304, 7327), 'os.path.join', 'os.path.join', (['DIR', 'NAME'], {}), '(DIR, NAME)\n', (7316, 7327), False, 'import os\n'), ((4052, 4071), 'crowd_sim.envs.utils.action.ActionRot', 'ActionRot', (['(0.0)', '(0.0)'], {}), '(0.0, 0.0)\n', (4061, 4071), False, 'from crowd_sim.envs.utils.action import ActionRot\n'), ((5335, 5409), 'crowd_sim.envs.utils.state.ObservableState', 'ObservableState', (['human.pos[0]', 'human.pos[1]', 'human.vel[0]', 'human.vel[1]', 'r'], {}), '(human.pos[0], human.pos[1], human.vel[0], human.vel[1], r)\n', (5350, 5409), False, 'from crowd_sim.envs.utils.state import JointState, FullState, ObservableState\n'), ((4528, 4544), 'numpy.cos', 'np.cos', (['action.r'], {}), '(action.r)\n', (4534, 4544), True, 'import numpy as np\n'), ((4559, 4575), 'numpy.sin', 'np.sin', (['action.r'], {}), '(action.r)\n', (4565, 4575), True, 'import numpy as np\n'), ((4614, 4630), 'numpy.sin', 'np.sin', (['action.r'], {}), '(action.r)\n', (4620, 4630), True, 'import numpy as np\n'), ((4645, 4661), 'numpy.cos', 'np.cos', (['action.r'], {}), '(action.r)\n', (4651, 4661), True, 'import numpy as np\n')]
|
import json, requests, sys
URL='https://api.groupme.com/v3'
# Attempt to load the config file
try:
config_file = open('config.json')
except IOError:
print("Cannot open config file, exiting")
sys.exit(1)
CONFIG = json.loads(config_file.read())
config_file.close()
if not 'token' in CONFIG:
print("Invalig config file, see readme")
sys.exit(1)
TOKEN = CONFIG["token"]
# Re writes the config file with the given config dict
def write_config(config):
if not 'token' in config:
print("Config needs a 'token' field")
return
config_file = open('config.json', 'w')
config_file.write(json.dumps(config))
config_file.close()
# hit a get rest endpoint with the given params,
# returning the result as a json object
def get_rest(endpoint, params={}):
params['token'] = TOKEN
res = requests.get(URL + '/' + endpoint, params=params)
if (res.status_code == 200):
return json.loads(res.text)['response']
else:
print(res.text)
return None
# hit a post rest endpoint with the given params,
# returning the result as a json object
def post_rest(endpoint, data={}, params={}, headers={}):
params['token'] = TOKEN
res = requests.post(URL + '/' + endpoint, data=json.dumps(data), params=params, headers=headers)
print(res.text)
if (res.status_code == 200 or res.status_code == 201):
return json.loads(res.text)['response']
else:
return None
|
[
"requests.get",
"json.loads",
"sys.exit",
"json.dumps"
] |
[((354, 365), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (362, 365), False, 'import json, requests, sys\n'), ((840, 889), 'requests.get', 'requests.get', (["(URL + '/' + endpoint)"], {'params': 'params'}), "(URL + '/' + endpoint, params=params)\n", (852, 889), False, 'import json, requests, sys\n'), ((205, 216), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (213, 216), False, 'import json, requests, sys\n'), ((629, 647), 'json.dumps', 'json.dumps', (['config'], {}), '(config)\n', (639, 647), False, 'import json, requests, sys\n'), ((938, 958), 'json.loads', 'json.loads', (['res.text'], {}), '(res.text)\n', (948, 958), False, 'import json, requests, sys\n'), ((1252, 1268), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (1262, 1268), False, 'import json, requests, sys\n'), ((1396, 1416), 'json.loads', 'json.loads', (['res.text'], {}), '(res.text)\n', (1406, 1416), False, 'import json, requests, sys\n')]
|
from thetis import *
import time as time_mod
from model_config import *
# Setup solver
solver_obj, start_time, update_forcings = construct_solver(
output_directory="outputs_spinup",
spinup=True,
start_date=datetime.datetime(2022, 1, 1, tzinfo=sim_tz),
end_date=datetime.datetime(2022, 1, 15, tzinfo=sim_tz),
fields_to_export=[],
fields_to_export_hdf5=["elev_2d", "uv_2d"],
simulation_export_time=24 * 3600.0,
)
output_dir = solver_obj.options.output_directory
mesh2d = solver_obj.mesh2d
solver_obj.assign_initial_conditions()
update_forcings(0.0)
# Time integrate
tic = time_mod.perf_counter()
solver_obj.iterate(update_forcings=update_forcings)
toc = time_mod.perf_counter()
print_output(f"Total duration: {toc-tic:.2f} seconds")
|
[
"time.perf_counter"
] |
[((601, 624), 'time.perf_counter', 'time_mod.perf_counter', ([], {}), '()\n', (622, 624), True, 'import time as time_mod\n'), ((683, 706), 'time.perf_counter', 'time_mod.perf_counter', ([], {}), '()\n', (704, 706), True, 'import time as time_mod\n')]
|
from django.conf.urls import patterns, url
def get_sitegate_urls():
"""Returns sitegate urlpatterns, that can be attached
to urlpatterns of a project:
# Example from urls.py.
from sitegate.toolbox import get_sitegate_urls
urlpatterns = patterns('',
...
url(r'^login/$', 'apps.views.login', name='login'),
...
) + get_sitegate_urls() # Attach.
"""
return patterns(
'',
url(r'^verify_email/(?P<code>\S+)/$', 'sitegate.views.verify_email', name='verify_email'),
url(r'^verify/(?P<what>[\w-_]+)/(?P<code>\S+)/$', 'sitegate.views.generic_confirmation', name='generic_confirmation')
)
|
[
"django.conf.urls.url"
] |
[((475, 569), 'django.conf.urls.url', 'url', (['"""^verify_email/(?P<code>\\\\S+)/$"""', '"""sitegate.views.verify_email"""'], {'name': '"""verify_email"""'}), "('^verify_email/(?P<code>\\\\S+)/$', 'sitegate.views.verify_email', name=\n 'verify_email')\n", (478, 569), False, 'from django.conf.urls import patterns, url\n'), ((574, 696), 'django.conf.urls.url', 'url', (['"""^verify/(?P<what>[\\\\w-_]+)/(?P<code>\\\\S+)/$"""', '"""sitegate.views.generic_confirmation"""'], {'name': '"""generic_confirmation"""'}), "('^verify/(?P<what>[\\\\w-_]+)/(?P<code>\\\\S+)/$',\n 'sitegate.views.generic_confirmation', name='generic_confirmation')\n", (577, 696), False, 'from django.conf.urls import patterns, url\n')]
|
import asyncio
from aiodag import task
@task
async def processA():
await asyncio.sleep(1)
print('Done processA')
async def processB():
await asyncio.sleep(5)
print('Done processB')
@task
async def processC():
await asyncio.sleep(1)
print('Done processC')
@task
async def processD(f):
await asyncio.sleep(f / 2)
print('Done processD')
@task
async def processE():
await asyncio.sleep(1)
print('Done processE')
@task
async def processF():
val = 2
await asyncio.sleep(val)
print('Done processF')
return val
async def main():
# ok to redecorate tasks
# pass explicit dependencies to the task decorator
# these are explicit because they are not implied through the func params
tF = processF()
tE = processE()
tD = task(processD, tE)(tF) # you can see this one has endogenous and exogenous deps
tC = task(processC, tD)()
tB = task(processB, tE)()
tA = task(processA, tB, tC)()
await asyncio.gather(tA)
if __name__ == '__main__':
loop = asyncio.new_event_loop()
loop.run_until_complete(main())
|
[
"asyncio.gather",
"asyncio.sleep",
"aiodag.task",
"asyncio.new_event_loop"
] |
[((1041, 1065), 'asyncio.new_event_loop', 'asyncio.new_event_loop', ([], {}), '()\n', (1063, 1065), False, 'import asyncio\n'), ((79, 95), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (92, 95), False, 'import asyncio\n'), ((156, 172), 'asyncio.sleep', 'asyncio.sleep', (['(5)'], {}), '(5)\n', (169, 172), False, 'import asyncio\n'), ((239, 255), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (252, 255), False, 'import asyncio\n'), ((323, 343), 'asyncio.sleep', 'asyncio.sleep', (['(f / 2)'], {}), '(f / 2)\n', (336, 343), False, 'import asyncio\n'), ((410, 426), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (423, 426), False, 'import asyncio\n'), ((505, 523), 'asyncio.sleep', 'asyncio.sleep', (['val'], {}), '(val)\n', (518, 523), False, 'import asyncio\n'), ((797, 815), 'aiodag.task', 'task', (['processD', 'tE'], {}), '(processD, tE)\n', (801, 815), False, 'from aiodag import task\n'), ((887, 905), 'aiodag.task', 'task', (['processC', 'tD'], {}), '(processC, tD)\n', (891, 905), False, 'from aiodag import task\n'), ((917, 935), 'aiodag.task', 'task', (['processB', 'tE'], {}), '(processB, tE)\n', (921, 935), False, 'from aiodag import task\n'), ((947, 969), 'aiodag.task', 'task', (['processA', 'tB', 'tC'], {}), '(processA, tB, tC)\n', (951, 969), False, 'from aiodag import task\n'), ((982, 1000), 'asyncio.gather', 'asyncio.gather', (['tA'], {}), '(tA)\n', (996, 1000), False, 'import asyncio\n')]
|
from collections import OrderedDict
import matplotlib.pyplot as plt
import seaborn as sns
import torch
import torch.nn as nn
from seaborn.palettes import color_palette
import numpy as np
# import seaborn as sns
import torch
import os
from BatchTransNorm import BatchTransNorm2d
from datasets import (Chest_few_shot, CropDisease_few_shot, EuroSAT_few_shot,
ISIC_few_shot, miniImageNet_few_shot)
def get_visual_domain(BN_list, dataloader_list, dataset_names_list):
label_dataset = []
spatial_mean = []
spatial_var = []
with torch.no_grad():
for i, loader in enumerate(dataloader_list):
# loader_iter = iter(loader)
# x, _ = loader_iter.next()
for x, _ in loader:
out = BN_list[i](x)
spatial_mean += out.mean([2, 3]).tolist()
spatial_var += out.var([2, 3]).tolist()
label_dataset += [dataset_names_list[i]]*len(x)
break
return np.array(spatial_mean), np.array(spatial_var), label_dataset
if torch.cuda.is_available():
dev = "cuda:0"
else:
dev = "cpu"
device = torch.device(dev)
dataset_class_list = [miniImageNet_few_shot, EuroSAT_few_shot]#, CropDisease_few_shot, Chest_few_shot, ISIC_few_shot]
dataset_names_list = ['miniImageNet', 'EuroSAT',
'CropDisease', 'ChestX', 'ISIC']
dataloader_list = []
for i, dataset_class in enumerate(dataset_class_list):
transform = dataset_class.TransformLoader(
224).get_composed_transform(aug=True)
transform_test = dataset_class.TransformLoader(
224).get_composed_transform(aug=False)
# split = 'datasets/split_seed_1/{0}_labeled_20.csv'.format(
# dataset_names_list[i])
# if dataset_names_list[i] == 'miniImageNet':
split = None
dataset = dataset_class.SimpleDataset(
transform, split=split)
loader = torch.utils.data.DataLoader(dataset, batch_size=128,
num_workers=0,
shuffle=True, drop_last=True)
dataloader_list.append(loader)
BN_list = []
btn = BatchTransNorm2d(num_features=3)
with torch.no_grad():
for i, loader in enumerate(dataloader_list):
BN_list.append(nn.BatchNorm2d(num_features=3))
BN_list[-1].train()
for epoch in range(3): # number of epoch
for x, _ in loader:
BN_list[-1](x)
# break
print('dataset {0} epoch {1}'.format(dataset_names_list[i], epoch))
btn.load_state_dict(BN_list[0].state_dict())
vd_mean, vd_var, labels = get_visual_domain(BN_list, dataloader_list, dataset_names_list)
tvd_mean, tvd_var, labels = get_visual_domain([btn]*len(BN_list), dataloader_list, dataset_names_list)
color = sns.color_palette(n_colors=len(dataloader_list))
fig = plt.figure(figsize=(20, 10))
ax = fig.subplots(1,2)
sns.kdeplot(x=vd_mean[:, 0], y=vd_var[:, 0],
hue=labels, ax=ax[0], palette=color)
sns.kdeplot(x=tvd_mean[:, 0], y=tvd_var[:, 0],
hue=labels, ax=ax[1], palette=color)
title = 'Left visual domain, Right transnormed visual domain.'
fig.suptitle(title)
plt.savefig('./lab/visual_domain/{0}.png'.format(title))
plt.savefig('./lab/visual_domain/{0}.svg'.format(title))
print(title)
|
[
"seaborn.kdeplot",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.figure",
"torch.nn.BatchNorm2d",
"torch.cuda.is_available",
"numpy.array",
"BatchTransNorm.BatchTransNorm2d",
"torch.device",
"torch.no_grad"
] |
[((1082, 1107), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1105, 1107), False, 'import torch\n'), ((1159, 1176), 'torch.device', 'torch.device', (['dev'], {}), '(dev)\n', (1171, 1176), False, 'import torch\n'), ((2159, 2191), 'BatchTransNorm.BatchTransNorm2d', 'BatchTransNorm2d', ([], {'num_features': '(3)'}), '(num_features=3)\n', (2175, 2191), False, 'from BatchTransNorm import BatchTransNorm2d\n'), ((2871, 2899), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (2881, 2899), True, 'import matplotlib.pyplot as plt\n'), ((2927, 3013), 'seaborn.kdeplot', 'sns.kdeplot', ([], {'x': 'vd_mean[:, 0]', 'y': 'vd_var[:, 0]', 'hue': 'labels', 'ax': 'ax[0]', 'palette': 'color'}), '(x=vd_mean[:, 0], y=vd_var[:, 0], hue=labels, ax=ax[0], palette=\n color)\n', (2938, 3013), True, 'import seaborn as sns\n'), ((3022, 3109), 'seaborn.kdeplot', 'sns.kdeplot', ([], {'x': 'tvd_mean[:, 0]', 'y': 'tvd_var[:, 0]', 'hue': 'labels', 'ax': 'ax[1]', 'palette': 'color'}), '(x=tvd_mean[:, 0], y=tvd_var[:, 0], hue=labels, ax=ax[1],\n palette=color)\n', (3033, 3109), True, 'import seaborn as sns\n'), ((1924, 2026), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': '(128)', 'num_workers': '(0)', 'shuffle': '(True)', 'drop_last': '(True)'}), '(dataset, batch_size=128, num_workers=0, shuffle\n =True, drop_last=True)\n', (1951, 2026), False, 'import torch\n'), ((2197, 2212), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2210, 2212), False, 'import torch\n'), ((566, 581), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (579, 581), False, 'import torch\n'), ((1016, 1038), 'numpy.array', 'np.array', (['spatial_mean'], {}), '(spatial_mean)\n', (1024, 1038), True, 'import numpy as np\n'), ((1040, 1061), 'numpy.array', 'np.array', (['spatial_var'], {}), '(spatial_var)\n', (1048, 1061), True, 'import numpy as np\n'), ((2290, 2320), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(3)'}), '(num_features=3)\n', (2304, 2320), True, 'import torch.nn as nn\n')]
|
# encoding:utf-8
import lxml
"""
lxml.etree.HTML() 处理文本字符串
lxml.etree.parse() 处理的是文件内容
"""
import lxml.etree
html = lxml.etree.parse("1.html") # 处理文件
print(html)
print(type(html))
print(lxml.etree.tostring(html))
"""
报错:
lxml.etree.XMLSyntaxError: Opening and ending tag mismatch: meta line 4 and head, line 6, column 8
这个主要是标签不匹配的原因,将html中的meta标签去掉即可
"""
"""
知识点:lxml.etree.parse(html_file_path,解析器),使用tostring()得到的数据是bytes类型的,decode解码查看
from lxml import etree
html = etree.parse('./test.html', etree.HTMLParser())
result = etree.tostring(html)
print(result.decode('utf-8'))
"""
|
[
"lxml.etree.parse",
"lxml.etree.tostring"
] |
[((125, 151), 'lxml.etree.parse', 'lxml.etree.parse', (['"""1.html"""'], {}), "('1.html')\n", (141, 151), False, 'import lxml\n'), ((196, 221), 'lxml.etree.tostring', 'lxml.etree.tostring', (['html'], {}), '(html)\n', (215, 221), False, 'import lxml\n')]
|
import networkx as nx
import random as rnd
import math
def writeNodes(basename, number, caps, lista, f):
for i in range(number):
name = basename + str(i)
node = "node(" + name +"," + caps +").\n"
lista.append(name)
f.write(node)
def printLinks(list1, list2, qos, f):
for n1 in list1:
for n2 in list2:
if n1 != n2:
link = "link(" + n1 + ", " + n2 + ", " + qos + ").\n"
f.write(link)
def builder(nodesnumber, path="infra.pl"):
f = open(path, "w+")
f.write(":-dynamic link/4.\n:-dynamic node/4.\n\n")
CLOUDS = nodesnumber
ISPS = nodesnumber
CABINETS = nodesnumber
ACCESSPOINTS = nodesnumber
SMARTPHONES = nodesnumber
clouds = []
isps = []
cabinets = []
accesspoints = []
smartphones = []
writeNodes("cloud", CLOUDS, "[ubuntu, mySQL, gcc, make, nodejs, rabbitmq, go, java, mongodb, dotnet], inf, []", clouds, f)
writeNodes("ispdatacentre", ISPS, "[ubuntu, mySQL, nodejs, rabbitmq, go, java, mongodb, dotnet], 50, []", isps, f)
writeNodes("cabinetserver", CABINETS, "[ubuntu, mySQL, nodejs, rabbitmq, go, java, mongodb], 20, []", cabinets, f)
writeNodes("accesspoint", ACCESSPOINTS, "[ubuntu, gcc, make, java, nodejs, mongodb], 4, [vrViewer,user]", accesspoints, f)
writeNodes("smartphone", SMARTPHONES, "[android, gcc, make, java], 8, [vrViewer,user]", smartphones, f)
f.write("\n")
printLinks(clouds, clouds, "20, 1000", f)
printLinks(clouds, isps, "110, 1000", f)
printLinks(clouds, cabinets, "135, 100", f)
printLinks(clouds, accesspoints, " 148, 20", f)
printLinks(clouds, smartphones, "150, 18", f)
f.write("\n")
printLinks(isps, clouds, "110, 1000", f)
printLinks(isps, isps, "20, 1000", f)
printLinks(isps, cabinets, "25, 500", f)
printLinks(isps, accesspoints, "38, 50", f)
printLinks(isps, smartphones, "20, 1000", f)
f.write("\n")
printLinks(cabinets, clouds, "135, 100", f)
printLinks(cabinets, isps, "25, 500", f)
printLinks(cabinets, cabinets, "20, 1000", f)
printLinks(cabinets, accesspoints, "13, 50", f)
printLinks(cabinets, smartphones, "15, 35", f)
f.write("\n")
printLinks(accesspoints, clouds, "148, 3", f)
printLinks(accesspoints, isps, "38, 4", f)
printLinks(accesspoints, cabinets, "13, 4", f)
printLinks(accesspoints, accesspoints, "10, 50", f)
printLinks(accesspoints, smartphones, "2, 70", f)
f.write("\n")
printLinks(smartphones, clouds, "150, 2", f)
printLinks(smartphones, isps, "40, 2.5", f)
printLinks(smartphones, cabinets, "15, 3", f)
printLinks(smartphones, accesspoints, "2, 70", f)
printLinks(smartphones, smartphones, "15, 50", f)
f.close()
def set_node_as_cloud(node):
rand = rnd.random()
if rand > 0.9:
node["software"] = "[]"
elif rand > 0.7:
node["software"] = "[ubuntu]"
else:
node["software"] = "[ubuntu, mySQL, gcc, make]"
rand = rnd.random()
if rand > 0.9:
node["hardware"] = "0"
else:
node["hardware"] = "inf"
node["iot"] = "[sensor1, sensor2, sensor3]"
node["handler"] = set_node_as_cloud
return node
def set_node_as_ispdatacentre(node):
rand = rnd.random()
if rand > 0.9:
node["software"] = "[]"
elif rand > 0.7:
node["software"] = "[ubuntu]"
else:
node["software"] = "[ubuntu, mySQL]"
rand = rnd.random()
if rand > 0.9:
node["hardware"] = "0"
elif rand > 0.7:
node["hardware"] = "25"
else:
node["hardware"] = "50"
node["iot"] = "[sensor2]"
node["handler"] = set_node_as_ispdatacentre
return node
def set_node_as_cabinetserver(node):
rand = rnd.random()
if rand > 0.9:
node["software"] = "[]"
elif rand > 0.7:
node["software"] = "[ubuntu]"
else:
node["software"] = "[ubuntu, mySQL]"
rand = rnd.random()
if rand > 0.9:
node["hardware"] = "0"
elif rand > 0.7:
node["hardware"] = "10"
else:
node["hardware"] = "20"
node["iot"] = "[sensor1, sensor3]"
node["handler"] = set_node_as_cabinetserver
return node
def set_node_as_accesspoint(node):
rand = rnd.random()
if rand > 0.9:
node["software"] = "[]"
elif rand > 0.7:
node["software"] = "[ubuntu]"
else:
node["software"] = "[ubuntu, gcc, make]"
rand = rnd.random()
if rand > 0.9:
node["hardware"] = "0"
elif rand > 0.7:
node["hardware"] = "2"
else:
node["hardware"] = "4"
if rnd.random() > 0.9: #3%
node["iot"] = "[vrViewer]"
else:
node["iot"] = "[sensor4]"
node["handler"] = set_node_as_accesspoint
return node
def set_node_as_smartphone(node):
rand = rnd.random()
if rand > 0.9:
node["software"] = "[]"
elif rand > 0.7:
node["software"] = "[android]"
else:
node["software"] = "[android, gcc, make]"
rand = rnd.random()
if rand > 0.9:
node["hardware"] = "0"
elif rand > 0.7:
node["hardware"] = "4"
else:
node["hardware"] = "8"
if rnd.random() > 0.95: #5%
node["iot"] = "[vrViewer]"
else:
node["iot"] = "[ac, lamp]"
node["handler"] = set_node_as_smartphone
return node
def set_link(link):
link['latency'] = rnd.choice([5,10,25,50,100])
link['bandwidth'] = rnd.choice([5,10,25,50,100])
link["handler"] = set_link
def generate_graph_infrastructure(n,m,seed = None):
G = nx.generators.complete_graph(n)
for i in G.nodes:
rand = rnd.random()
if rand > 0.9: #10%
set_node_as_cloud(G.nodes[i])
elif rand > 0.7: #20%
set_node_as_ispdatacentre(G.nodes[i])
elif rand > 0.4: #30%
set_node_as_cabinetserver(G.nodes[i])
elif rand > 0.2: #20%
set_node_as_accesspoint(G.nodes[i])
else: #20%
set_node_as_smartphone(G.nodes[i])
for (i,j) in G.edges():
set_link(G.edges[i,j])
return G
def change_graph_infrastructure(G):
for i in G.nodes:
node = G.nodes[i]
node["handler"](node)
for (i,j) in G.edges():
link=G.edges[i,j]
link["handler"](link)
return G
def print_graph_infrastructure(G):
f = open("./infra.pl","w+")
f.write(":-dynamic link/4.\n:-dynamic node/4.\n\n")
for i in G.nodes:
node = G.nodes[i]
newnode = 'node(node'+str(i)+', '+node['software']+', '+node['hardware']+', '+node['iot']+').\n'
f.write(newnode)
for (i,j) in G.edges():
link=G.edges[i,j]
newlink='link(node'+str(i)+', node'+str(j)+', '+str(link['latency'])+', '+str(link['bandwidth'])+').\n'
f.write(newlink)
newlink='link(node'+str(j)+', node'+str(i)+', '+str(link['latency'])+', '+str(link['bandwidth'])+').\n'
f.write(newlink)
f.close()
if __name__ == "__main__":
builder(3)
nodes = 1024
G = generate_graph_infrastructure(nodes, (int(math.log2(nodes))))
print_graph_infrastructure(G)
input()
while True:
change_graph_infrastructure(G)
print_graph_infrastructure(G)
input()
|
[
"networkx.generators.complete_graph",
"random.random",
"math.log2",
"random.choice"
] |
[((2818, 2830), 'random.random', 'rnd.random', ([], {}), '()\n', (2828, 2830), True, 'import random as rnd\n'), ((3019, 3031), 'random.random', 'rnd.random', ([], {}), '()\n', (3029, 3031), True, 'import random as rnd\n'), ((3279, 3291), 'random.random', 'rnd.random', ([], {}), '()\n', (3289, 3291), True, 'import random as rnd\n'), ((3469, 3481), 'random.random', 'rnd.random', ([], {}), '()\n', (3479, 3481), True, 'import random as rnd\n'), ((3779, 3791), 'random.random', 'rnd.random', ([], {}), '()\n', (3789, 3791), True, 'import random as rnd\n'), ((3969, 3981), 'random.random', 'rnd.random', ([], {}), '()\n', (3979, 3981), True, 'import random as rnd\n'), ((4278, 4290), 'random.random', 'rnd.random', ([], {}), '()\n', (4288, 4290), True, 'import random as rnd\n'), ((4472, 4484), 'random.random', 'rnd.random', ([], {}), '()\n', (4482, 4484), True, 'import random as rnd\n'), ((4852, 4864), 'random.random', 'rnd.random', ([], {}), '()\n', (4862, 4864), True, 'import random as rnd\n'), ((5048, 5060), 'random.random', 'rnd.random', ([], {}), '()\n', (5058, 5060), True, 'import random as rnd\n'), ((5423, 5455), 'random.choice', 'rnd.choice', (['[5, 10, 25, 50, 100]'], {}), '([5, 10, 25, 50, 100])\n', (5433, 5455), True, 'import random as rnd\n'), ((5476, 5508), 'random.choice', 'rnd.choice', (['[5, 10, 25, 50, 100]'], {}), '([5, 10, 25, 50, 100])\n', (5486, 5508), True, 'import random as rnd\n'), ((5598, 5629), 'networkx.generators.complete_graph', 'nx.generators.complete_graph', (['n'], {}), '(n)\n', (5626, 5629), True, 'import networkx as nx\n'), ((4640, 4652), 'random.random', 'rnd.random', ([], {}), '()\n', (4650, 4652), True, 'import random as rnd\n'), ((5212, 5224), 'random.random', 'rnd.random', ([], {}), '()\n', (5222, 5224), True, 'import random as rnd\n'), ((5668, 5680), 'random.random', 'rnd.random', ([], {}), '()\n', (5678, 5680), True, 'import random as rnd\n'), ((7101, 7117), 'math.log2', 'math.log2', (['nodes'], {}), '(nodes)\n', (7110, 7117), False, 'import math\n')]
|
# Copyright (c) 2019. Partners HealthCare and other members of
# Forome Association
#
# Developed by <NAME> based on contributions by <NAME>,
# <NAME>, <NAME> and other members of Division of
# Genetics, Brigham and Women's Hospital
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json, abc
from datetime import datetime, timedelta
from xml.sax.saxutils import escape
from app.view.asp_set import AspectSetH
from app.config.a_config import AnfisaConfig
from app.config.view_tune import tuneAspects
from app.config.flt_tune import tuneUnits
from app.config.solutions import completeDsModes
from app.eval.condition import ConditionMaker
from app.eval.filter import FilterEval
from app.eval.dtree import DTreeEval
from app.eval.code_works import cmpTrees
from app.eval.dtree_parse import ParsedDTree
from app.eval.dtree_mod import modifyDTreeCode
from app.prepare.sec_ws import SecondaryWsCreation
from .ds_disk import DataDiskStorage
from .ds_favor import FavorStorage
from .sol_broker import SolutionBroker
from .family import FamilyInfo
from .zygosity import ZygositySupport
from .rest_api import RestAPI
from .rec_list import RecListTask
from .tab_report import reportCSV
#===============================================
class DataSet(SolutionBroker):
sStatRqCount = 0
sTimeCoeff = AnfisaConfig.configOption("tm.coeff")
sMaxTabRqSize = AnfisaConfig.configOption("max.tab.rq.size")
sMaxExportSize = AnfisaConfig.configOption("export.max.count")
#===============================================
def __init__(self, data_vault, dataset_info, dataset_path,
sol_pack_name = None, add_modes = None):
SolutionBroker.__init__(self,
dataset_info["meta"].get("data_schema", "CASE"),
dataset_info.get("modes"))
self.addModes(data_vault.getApp().getRunModes())
if add_modes:
self.addModes(add_modes)
self.mDataVault = data_vault
self.mDataInfo = dataset_info
self.mName = dataset_info["name"]
self.mDSKind = dataset_info["kind"]
self.mTotal = dataset_info["total"]
self.mMongoAgent = (data_vault.getApp().getMongoConnector().
getDSAgent(dataset_info["mongo"], dataset_info["kind"]))
self.mAspects = AspectSetH.load(dataset_info["view_schema"])
self.mFltSchema = dataset_info["flt_schema"]
self.mPath = dataset_path
self.mFInfo = self.mDataVault.checkFileStat(
self.mPath + "/dsinfo.json")
self.mCondVisitorTypes = []
if self.getDataSchema() == "FAVOR" and self.mDSKind == "xl":
self.mRecStorage = FavorStorage(
self.getApp().getOption("favor-url"))
else:
self.mRecStorage = DataDiskStorage(self, self.mPath)
self.mFamilyInfo = FamilyInfo(dataset_info["meta"])
if (self.mDataInfo.get("zygosity_var")
and 1 <= len(self.mFamilyInfo) <= 10):
self.addModes({"ZYG"})
self.mZygSupport = None
self.mViewContext = dict()
if self.mFamilyInfo.getCohortList():
self.mViewContext["cohorts"] = self.mFamilyInfo.getCohortMap()
completeDsModes(self)
tuneAspects(self, self.mAspects)
def startService(self):
self.mZygSupport = ZygositySupport(self)
tuneUnits(self)
self.mDataVault.getVarRegistry().relax(self.mName)
self.setSolEnv(self.mDataVault.makeSolutionEnv(self))
def isUpToDate(self, fstat_info):
return fstat_info == self.mFInfo
def descrContext(self, rq_args, rq_descr):
rq_descr.append("kind=" + self.mDSKind)
rq_descr.append("dataset=" + self.mName)
def addConditionVisitorType(self, visitor_type):
self.mCondVisitorTypes.append(visitor_type)
@abc.abstractmethod
def getEvalSpace(self):
assert False, "Abstract eval space"
def getApp(self):
return self.mDataVault.getApp()
def getDataVault(self):
return self.mDataVault
def getName(self):
return self.mName
def getDSKind(self):
return self.mDSKind
def getTotal(self):
return self.mTotal
def getMongoAgent(self):
return self.mMongoAgent
def getFltSchema(self):
return self.mFltSchema
def getDataInfo(self):
return self.mDataInfo
def getFamilyInfo(self):
return self.mFamilyInfo
def getRecStorage(self):
return self.mRecStorage
#===============================================
def getViewSchema(self):
return self.mAspects.dump()
def getRecordData(self, rec_no):
return self.mRecStorage.getRecordData(rec_no)
def getFirstAspectID(self):
return self.mAspects.getFirstAspectID()
def getViewRepr(self, rec_no, details = None, active_samples = None):
rec_data = self.mRecStorage.getRecordData(rec_no)
v_context = self.mViewContext.copy()
if details is not None:
v_context["details"] = details
if active_samples:
if active_samples.strip().startswith('['):
v_context["active-samples"] = set(json.parse(active_samples))
else:
v_context["active-samples"] = set(map(int,
active_samples.split(',')))
v_context["data"] = rec_data
v_context["rec_no"] = rec_no
return self.mAspects.getViewRepr(rec_data, v_context)
def getSourceVersions(self):
if "versions" in self.mDataInfo["meta"]:
versions = self.mDataInfo["meta"]["versions"]
return [[key, str(versions[key])]
for key in sorted(versions.keys())]
return []
def getBaseDSName(self):
return self.mDataInfo.get("base")
def getRootDSName(self):
return self.mDataInfo.get("root")
def getTagsMan(self):
return None
def getZygositySupport(self):
return self.mZygSupport
def getZygUnitNames(self):
if self.testRequirements({"ZYG"}):
var_name = self.mDataInfo["zygosity_var"]
return ["%s_%d" % (var_name, idx)
for idx in range(len(self.mFamilyInfo))]
return []
def makeSolEntry(self, key, entry_data, name,
updated_time = None, updated_from = None):
if key == "filter":
return FilterEval(self.getEvalSpace(), entry_data,
name, updated_time, updated_from)
if key == "dtree":
return DTreeEval(self.getEvalSpace(), entry_data,
name, updated_time, updated_from)
assert False, "Bad solution entry kind: " + key
return None
def getDocsInfo(self):
ret = [None, [["Info", "info.html"]]]
if self.mDataInfo.get("doc"):
ret[-1] += self.mDataInfo["doc"]
return ret
def getMaxExportSize(self):
return self.sMaxExportSize
#===============================================
@classmethod
def shortPDataReport(cls, rec_no, rec_data):
return {
"no": rec_no,
"lb": escape(rec_data.get("_label")),
"cl": AnfisaConfig.normalizeColorCode(
rec_data.get("_color"))}
#===============================================
def dumpDSInfo(self, navigation_mode = False):
note, time_label = self.getMongoAgent().getNote()
ret = {
"name": self.mName,
"upd-time": self.getMongoAgent().getCreationDate(),
"create-time": self.mDataVault.getTimeOfStat(self.mFInfo),
"kind": self.mDSKind,
"note": note,
"doc": self.getDocsInfo(),
"total": self.getTotal(),
"date-note": time_label}
ancestors = []
base_name = self.getBaseDSName()
while base_name is not None:
base_h = self.mDataVault.getDS(base_name)
if base_h is None:
ancestors.append([base_name, None])
break
ancestors.append([base_name, base_h.getDocsInfo()])
base_name = base_h.getBaseDSName()
if self.getRootDSName() and self.getRootDSName() != self.getName():
if len(ancestors) == 0 or ancestors[-1][0] != self.getRootDSName():
root_h = self.mDataVault.getDS(self.getRootDSName())
ancestors.append([self.getRootDSName(),
None if root_h is None else root_h.getDocsInfo()])
ret["ancestors"] = ancestors
if navigation_mode:
secondary_seq = self.mDataVault.getSecondaryWSNames(self)
if secondary_seq:
ret["secondary"] = [ws_h.getName() for ws_h in secondary_seq]
else:
ret["meta"] = self.mDataInfo["meta"]
ret["cohorts"] = self.mFamilyInfo.getCohortList()
ret["unit-classes"] = (
self.mDataVault.getVarRegistry().getClassificationDescr())
ret["export-max-count"] = self.sMaxExportSize
if not navigation_mode:
cur_v_group = None
unit_groups = []
for unit_h in self.getEvalSpace().iterUnits():
if unit_h.isScreened():
continue
if unit_h.getVGroup() != cur_v_group:
cur_v_group = unit_h.getVGroup()
if not cur_v_group:
cur_v_group = ""
if (len(unit_groups) == 0
or unit_groups[-1][0] != cur_v_group):
unit_groups.append([cur_v_group, []])
unit_groups[-1][1].append(unit_h.getName())
ret["unit-groups"] = unit_groups
return ret
#===============================================
def prepareAllUnitStat(self, condition, eval_h,
time_end, point_no = None):
ret = []
for unit_h in self.getEvalSpace().iterUnits():
if unit_h.isScreened():
continue
if unit_h.getUnitKind() == "func":
ret.append(unit_h.makeInfoStat(eval_h, point_no))
continue
if point_no is not None and not unit_h.isInDTrees():
continue
if time_end is False:
ret.append(unit_h.prepareStat(incomplete_mode = True))
continue
ret.append(unit_h.makeStat(condition, eval_h))
if time_end is not None and datetime.now() > time_end:
time_end = False
return ret
def prepareSelectedUnitStat(self, unit_names, condition,
eval_h, time_end = None, point_no = None):
ret = []
for unit_name in unit_names:
unit_h = self.getEvalSpace().getUnit(unit_name)
assert not unit_h.isScreened() and unit_h.getUnitKind != "func", (
"No function provided in DS: " + unit_name)
assert point_no is None or unit_h.isInDTrees(), (
"Unit is inaccessible in Decision Trees: " + unit_name)
ret.append(unit_h.makeStat(condition, eval_h))
if time_end is not None and datetime.now() > time_end:
break
return ret
#===============================================
def prepareDTreePointCounts(self, dtree_h, rq_id,
point_idxs = None, time_end = None):
counts = [None] * len(dtree_h)
needs_more = point_idxs is not None
zero_idx = None
if point_idxs is None:
point_idxs = range(len(dtree_h))
for idx in point_idxs:
if dtree_h.pointNotActive(idx):
counts[idx] = self.getEvalSpace().makeEmptyCounts()
continue
if (not needs_more and time_end is not None
and datetime.now() > time_end):
break
if zero_idx is not None and idx >= zero_idx:
continue
counts[idx] = self.getEvalSpace().evalTotalCounts(
dtree_h.getActualCondition(idx))
needs_more = False
if counts[idx][0] == 0 and dtree_h.checkZeroAfter(idx):
zero_idx = idx
for idx1 in range(zero_idx, len(dtree_h)):
counts[idx1] = counts[idx][:]
return counts
#===============================================
def visitCondition(self, condition, ret_handle):
if condition is None:
return
for cond_visitor_type in self.mCondVisitorTypes:
visitor = cond_visitor_type(self)
condition.visit(visitor)
ret = visitor.makeResult()
if ret:
ret_handle[visitor.getName()] = ret
#===============================================
def _getArgCondFilter(self, rq_args,
activate_it = True, join_cond_data = None):
filter_h, cond_data = None, None
if rq_args.get("filter"):
filter_h = self.pickSolEntry("filter", rq_args["filter"])
assert filter_h is not None, "No filter for: " + rq_args["filter"]
if join_cond_data is not None:
cond_data = filter_h.getCondDataSeq()
filter_h = None
if filter_h is None and cond_data is None:
if "conditions" in rq_args:
cond_data = json.loads(rq_args["conditions"])
else:
cond_data = ConditionMaker.condAll()
if join_cond_data is not None:
assert filter_h is None, "Filter&join collision"
cond_data = cond_data[:] + join_cond_data[:]
if filter_h is None:
filter_h = FilterEval(self.getEvalSpace(), cond_data)
filter_h = self.updateSolEntry("filter", filter_h)
if activate_it:
filter_h.activate()
return filter_h
def _getArgDTree(self, rq_args, activate_it = True,
use_dtree = True, dtree_h = None):
if dtree_h is None:
if use_dtree and "dtree" in rq_args:
dtree_h = self.pickSolEntry("dtree", rq_args["dtree"])
assert dtree_h is not None, (
"No decision tree: " + rq_args["dtree"])
else:
assert "code" in rq_args, (
'Missing request argument: "dtree" or "code"')
dtree_h = DTreeEval(self.getEvalSpace(), rq_args["code"])
dtree_h = self.updateSolEntry("dtree", dtree_h)
if activate_it:
dtree_h.activate()
return dtree_h
def _getArgTimeEnd(self, rq_args):
if self.getEvalSpace().heavyMode() and "tm" in rq_args:
return datetime.now() + timedelta(
seconds = self.sTimeCoeff * float(rq_args["tm"]) + 1E-5)
return None
def _makeRqId(self):
self.sStatRqCount += 1
return str(self.sStatRqCount) + '/' + str(datetime.now())
#===============================================
@RestAPI.ds_request
def rq__ds_stat(self, rq_args):
time_end = self._getArgTimeEnd(rq_args)
join_cond_data = None
if "instr" in rq_args:
instr_info = json.loads(rq_args["instr"])
if instr_info[0] == "JOIN":
join_cond_data = self.pickSolEntry(
"filter", instr_info[1]).getCondDataSeq()
else:
if instr_info[0] == "DELETE":
instr_cond_data = None
else:
instr_cond_data = self._getArgCondFilter(
rq_args, activate_it = False).getCondDataSeq()
if not self.modifySolEntry("filter",
instr_info, instr_cond_data):
assert False, ("Bad instruction kind: "
+ json.dumps(instr_info))
filter_h = self._getArgCondFilter(rq_args,
join_cond_data = join_cond_data)
condition = filter_h.getCondition()
ret_handle = {
"kind": self.mDSKind,
"total-counts": self.getEvalSpace().getTotalCounts(),
"filtered-counts": self.getEvalSpace().evalTotalCounts(condition),
"stat-list": self.prepareAllUnitStat(condition,
filter_h, time_end),
"filter-list": self.getSolEntryList("filter"),
"cur-filter": filter_h.getFilterName(),
"rq-id": self._makeRqId()}
ret_handle.update(filter_h.reportInfo())
return ret_handle
#===============================================
@RestAPI.ds_request
def rq__dtree_stat(self, rq_args):
time_end = self. _getArgTimeEnd(rq_args)
dtree_h = self._getArgDTree(rq_args)
assert "no" in rq_args, 'Missing request argument "no"'
point_no = int(rq_args["no"])
condition = dtree_h.getActualCondition(point_no)
ret_handle = {
"total-counts": self.getEvalSpace().getTotalCounts(),
"filtered-counts": self.getEvalSpace().evalTotalCounts(condition),
"stat-list": self.prepareAllUnitStat(condition,
dtree_h, time_end, point_no),
"rq-id": self._makeRqId()}
return ret_handle
#===============================================
@RestAPI.ds_request
def rq__statunits(self, rq_args):
time_end = self. _getArgTimeEnd(rq_args)
if "dtree" in rq_args or "code" in rq_args:
eval_h = self._getArgDTree(rq_args)
assert "no" in rq_args, 'Missing request argument "no"'
point_no = int(rq_args["no"])
condition = eval_h.getActualCondition(point_no)
else:
eval_h = self._getArgCondFilter(rq_args)
condition, point_no = eval_h.getCondition(), None
assert "units" in rq_args, 'Missing request argument "units"'
ret_handle = {
"rq-id": rq_args.get("rq_id"),
"units": self.prepareSelectedUnitStat(
json.loads(rq_args["units"]), condition,
eval_h, time_end, point_no)}
return ret_handle
#===============================================
@RestAPI.ds_request
def rq__statfunc(self, rq_args):
if "dtree" in rq_args or "code" in rq_args:
eval_h = self._getArgDTree(rq_args)
point_no = int(rq_args["no"])
assert "no" in rq_args, 'Missing request argument "no"'
condition = eval_h.getActualCondition(point_no)
else:
eval_h = self._getArgCondFilter(rq_args)
condition = eval_h.getCondition()
point_no = int(rq_args["no"]) if "no" in rq_args else None
assert "unit" in rq_args, 'Missing request argument "unit"'
unit_h = self.getEvalSpace().getUnit(rq_args["unit"])
assert "param" in rq_args, 'Missing request argument "param"'
parameters = json.loads(rq_args["param"])
ret = unit_h.makeParamStat(condition, parameters, eval_h, point_no)
if rq_args.get("rq_id"):
ret["rq-id"] = rq_args.get("rq_id")
if rq_args.get("no"):
ret["no"] = rq_args.get("no")
return ret
#===============================================
@RestAPI.ds_request
def rq__dtree_set(self, rq_args):
time_end = self._getArgTimeEnd(rq_args)
instr = rq_args.get("instr")
if instr is not None:
instr = json.loads(instr)
if instr and instr[0] == "DTREE":
dtree_proc_h = self._getArgDTree(
rq_args, activate_it = False)
if not self.modifySolEntry("dtree", instr[1:],
dtree_proc_h.getCode()):
assert False, (
"Failed to modify DTREE: " + json.dumps(instr[1:]))
instr = None
dtree_h = None
if instr:
assert "code" in rq_args, 'Missing request argument "code"'
parsed = ParsedDTree(self.getEvalSpace(), rq_args["code"])
dtree_code = modifyDTreeCode(parsed, instr)
dtree_h = DTreeEval(self.getEvalSpace(), dtree_code)
dtree_h = self._getArgDTree(rq_args, dtree_h = dtree_h)
rq_id = self._makeRqId()
ret_handle = {
"kind": self.mDSKind,
"total-counts": self.getEvalSpace().getTotalCounts(),
"point-counts": self.prepareDTreePointCounts(
dtree_h, rq_id, time_end = time_end),
"dtree-list": self.getSolEntryList("dtree"),
"rq-id": rq_id}
ret_handle.update(dtree_h.reportInfo())
return ret_handle
#===============================================
@RestAPI.ds_request
def rq__dtree_counts(self, rq_args):
time_end = self. _getArgTimeEnd(rq_args)
dtree_h = self._getArgDTree(rq_args)
assert "rq_id" in rq_args, 'Missing request argument "rq_id"'
assert "points" in rq_args, 'Missing request argument "points"'
rq_id = rq_args["rq_id"]
return {
"point-counts": self.prepareDTreePointCounts(dtree_h,
rq_id, json.loads(rq_args["points"]), time_end),
"rq-id": rq_id}
#===============================================
@RestAPI.ds_request
def rq__dtree_check(self, rq_args):
dtree_h = self._getArgDTree(rq_args,
use_dtree = False, activate_it = False)
ret_handle = {"code": dtree_h.getCode()}
if dtree_h.getErrorInfo() is not None:
ret_handle.update(dtree_h.getErrorInfo())
return ret_handle
#===============================================
@RestAPI.ds_request
def rq__dtree_cmp(self, rq_args):
dtree_h = self._getArgDTree(activate_it = False)
assert "other" in rq_args, 'Missing request argument "other"'
other_dtree_h = self.pickSolEntry("dtree", rq_args["other"])
assert other_dtree_h is not None, (
"Not found decision tree :" + rq_args["other"])
return {"cmp": cmpTrees(
dtree_h.getCode(), other_dtree_h.getCode())}
#===============================================
@RestAPI.ds_request
def rq__recdata(self, rq_args):
assert "rec" in rq_args, 'Missing request argument "rec"'
return self.mRecStorage.getRecordData(int(rq_args.get("rec")))
#===============================================
@RestAPI.ds_request
def rq__reccnt(self, rq_args):
assert "rec" in rq_args, 'Missing request argument "rec"'
return self.getViewRepr(int(rq_args["rec"]),
details = rq_args.get("details"),
active_samples = rq_args.get("samples"))
#===============================================
@RestAPI.ds_request
def rq__dsinfo(self, rq_args):
note = rq_args.get("note")
if note is not None:
with self:
self.getMongoAgent().setNote(note)
with self.mDataVault:
return self.dumpDSInfo(navigation_mode = False)
#===============================================
@RestAPI.ds_request
def rq__ds2ws(self, rq_args):
assert "ws" in rq_args, 'Missing request argument "ws"'
if "dtree" in rq_args or "code" in rq_args:
eval_h = self._getArgDTree(rq_args)
else:
eval_h = self._getArgCondFilter(rq_args)
task = SecondaryWsCreation(self, rq_args["ws"], eval_h,
force_mode = rq_args.get("force"))
return {"task_id": self.getApp().runTask(task)}
#===============================================
@RestAPI.ds_request
def rq__ds_list(self, rq_args):
if "dtree" in rq_args or "code" in rq_args:
eval_h = self._getArgDTree(rq_args)
assert "no" in rq_args, 'Missing request argument "no"'
condition = eval_h.getActualCondition(int(rq_args["no"]))
else:
eval_h = self._getArgCondFilter(rq_args)
condition = eval_h.getCondition()
return {"task_id": self.getApp().runTask(
RecListTask(self, condition, rq_args.get("smpcnt")))}
#===============================================
@RestAPI.ds_request
def rq__tab_report(self, rq_args):
assert "seq" in rq_args, 'Missing request argument "seq"'
assert "schema" in rq_args, 'Missing request argument "schema"'
seq_rec_no = json.loads(rq_args["seq"])
tab_schema = self.getStdItem("tab-schema", rq_args["schema"]).getData()
return [tab_schema.reportRecord(self, rec_no)
for rec_no in seq_rec_no[:self.sMaxTabRqSize]]
#===============================================
@RestAPI.ds_request
def rq__export(self, rq_args):
filter_h = self._getArgCondFilter(rq_args)
rec_no_seq = self.fiterRecords(filter_h.getCondition(),
zone_data = rq_args.get("zone"))
fname = self.getApp().makeExcelExport(
self.getName(), self, rec_no_seq, self.getTagsMan())
return {"kind": "excel", "fname": fname}
#===============================================
@RestAPI.ds_request
def rq__csv_export(self, rq_args):
filter_h = self._getArgCondFilter(rq_args)
rec_no_seq = self.fiterRecords(filter_h.getCondition(),
zone_data = rq_args.get("zone"))
assert "schema" in rq_args, 'Missing request argument "schema"'
tab_schema = self.getStdItem("tab-schema", rq_args["schema"]).getData()
return ["!", "csv", reportCSV(self, tab_schema, rec_no_seq),
[("Content-Disposition", "attachment;filename=anfisa_export.csv")]]
#===============================================
@RestAPI.ds_request
def rq__solutions(self, rq_args):
return self.reportSolutions()
#===============================================
@RestAPI.ds_request
def rq__vsetup(self, rq_args):
return {"aspects": self.mAspects.dump()}
|
[
"json.parse",
"json.loads",
"app.eval.dtree_mod.modifyDTreeCode",
"app.config.solutions.completeDsModes",
"json.dumps",
"app.eval.condition.ConditionMaker.condAll",
"app.config.a_config.AnfisaConfig.configOption",
"app.view.asp_set.AspectSetH.load",
"app.config.view_tune.tuneAspects",
"datetime.datetime.now",
"app.config.flt_tune.tuneUnits"
] |
[((1816, 1853), 'app.config.a_config.AnfisaConfig.configOption', 'AnfisaConfig.configOption', (['"""tm.coeff"""'], {}), "('tm.coeff')\n", (1841, 1853), False, 'from app.config.a_config import AnfisaConfig\n'), ((1874, 1918), 'app.config.a_config.AnfisaConfig.configOption', 'AnfisaConfig.configOption', (['"""max.tab.rq.size"""'], {}), "('max.tab.rq.size')\n", (1899, 1918), False, 'from app.config.a_config import AnfisaConfig\n'), ((1940, 1985), 'app.config.a_config.AnfisaConfig.configOption', 'AnfisaConfig.configOption', (['"""export.max.count"""'], {}), "('export.max.count')\n", (1965, 1985), False, 'from app.config.a_config import AnfisaConfig\n'), ((2777, 2821), 'app.view.asp_set.AspectSetH.load', 'AspectSetH.load', (["dataset_info['view_schema']"], {}), "(dataset_info['view_schema'])\n", (2792, 2821), False, 'from app.view.asp_set import AspectSetH\n'), ((3681, 3702), 'app.config.solutions.completeDsModes', 'completeDsModes', (['self'], {}), '(self)\n', (3696, 3702), False, 'from app.config.solutions import completeDsModes\n'), ((3712, 3744), 'app.config.view_tune.tuneAspects', 'tuneAspects', (['self', 'self.mAspects'], {}), '(self, self.mAspects)\n', (3723, 3744), False, 'from app.config.view_tune import tuneAspects\n'), ((3831, 3846), 'app.config.flt_tune.tuneUnits', 'tuneUnits', (['self'], {}), '(self)\n', (3840, 3846), False, 'from app.config.flt_tune import tuneUnits\n'), ((19330, 19358), 'json.loads', 'json.loads', (["rq_args['param']"], {}), "(rq_args['param'])\n", (19340, 19358), False, 'import json, abc\n'), ((24792, 24818), 'json.loads', 'json.loads', (["rq_args['seq']"], {}), "(rq_args['seq'])\n", (24802, 24818), False, 'import json, abc\n'), ((15625, 15653), 'json.loads', 'json.loads', (["rq_args['instr']"], {}), "(rq_args['instr'])\n", (15635, 15653), False, 'import json, abc\n'), ((19859, 19876), 'json.loads', 'json.loads', (['instr'], {}), '(instr)\n', (19869, 19876), False, 'import json, abc\n'), ((20453, 20483), 'app.eval.dtree_mod.modifyDTreeCode', 'modifyDTreeCode', (['parsed', 'instr'], {}), '(parsed, instr)\n', (20468, 20483), False, 'from app.eval.dtree_mod import modifyDTreeCode\n'), ((13818, 13851), 'json.loads', 'json.loads', (["rq_args['conditions']"], {}), "(rq_args['conditions'])\n", (13828, 13851), False, 'import json, abc\n'), ((13898, 13922), 'app.eval.condition.ConditionMaker.condAll', 'ConditionMaker.condAll', ([], {}), '()\n', (13920, 13922), False, 'from app.eval.condition import ConditionMaker\n'), ((15133, 15147), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (15145, 15147), False, 'from datetime import datetime, timedelta\n'), ((15361, 15375), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (15373, 15375), False, 'from datetime import datetime, timedelta\n'), ((18427, 18455), 'json.loads', 'json.loads', (["rq_args['units']"], {}), "(rq_args['units'])\n", (18437, 18455), False, 'import json, abc\n'), ((21535, 21564), 'json.loads', 'json.loads', (["rq_args['points']"], {}), "(rq_args['points'])\n", (21545, 21564), False, 'import json, abc\n'), ((5656, 5682), 'json.parse', 'json.parse', (['active_samples'], {}), '(active_samples)\n', (5666, 5682), False, 'import json, abc\n'), ((10945, 10959), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (10957, 10959), False, 'from datetime import datetime, timedelta\n'), ((11627, 11641), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (11639, 11641), False, 'from datetime import datetime, timedelta\n'), ((12283, 12297), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (12295, 12297), False, 'from datetime import datetime, timedelta\n'), ((20196, 20217), 'json.dumps', 'json.dumps', (['instr[1:]'], {}), '(instr[1:])\n', (20206, 20217), False, 'import json, abc\n'), ((16263, 16285), 'json.dumps', 'json.dumps', (['instr_info'], {}), '(instr_info)\n', (16273, 16285), False, 'import json, abc\n')]
|
import os
from os.path import join, isfile, isdir
import errno
import PIL
from PIL import ImageFont
from PIL import Image
from PIL import ImageDraw
import random
random.seed(5)
class Options:
light, dark, none = range(3)
def get_random_font():
allowed_fonts = [
'dejavu',
'freefont',
]
for i in range(5):
base_dir = '/usr/share/fonts/truetype'
font_dirs = [d for d in os.listdir(base_dir) if isdir(join(base_dir, d)) and d in allowed_fonts]
font_dir = font_dirs[random.randint(0,len(font_dirs)-1)]
fonts = [f for f in os.listdir(join(base_dir, font_dir)) if f.endswith('.ttf')]
if len(fonts) > 0:
font_name = fonts[random.randint(0, len(fonts)-1)]
font_path = join(join(base_dir, font_dir), font_name)
font = ImageFont.truetype(font_path, random.randint(12,55))
return font
font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf",random.randint(10,45))
return font
def get_random_options():
if random.randint(0,1) == 0:
return Options.light, Options.dark
return Options.dark, Options.light
def get_random_color(option=Options.none):
if option == Options.none:
return(random.randint(1, 255), random.randint(1, 255), random.randint(1, 255))
elif option == Options.light:
return (random.randint(150, 255), random.randint(150, 255), random.randint(150, 255))
return (random.randint(0,105), random.randint(0,105), random.randint(0,105))
def get_random_pos():
return (random.randint(1, 150),random.randint(1, 150))
def get_random_line():
return (get_random_pos(), get_random_pos())
def generate_random_img(filename, text):
font = get_random_font()
back_option, front_option = get_random_options()
back_colour = get_random_color(option=back_option)
front_colour = get_random_color(option=front_option)
img = Image.new("RGBA", (200,200), back_colour)
draw = ImageDraw.Draw(img)
nb_lines = random.randint(0, 5)
for i in range(nb_lines):
x1 = random.randint(1,200)
x2 = random.randint(1,200)
y1 = random.randint(1,200)
y2 = random.randint(1,200)
draw.line((x1, y1, x2, y2), fill=get_random_color(option=back_option),width=random.randint(1,20))
nb_circles = random.randint(0, 5)
for i in range(nb_circles):
x1 = random.randint(1,150)
x2 = random.randint(x1, 200)
y1 = random.randint(1,150)
y2 = random.randint(y1, 200)
draw.ellipse((x1, y1, x2, y2), fill=get_random_color(option=back_option))
draw.text(get_random_pos(), text, front_colour , font=font)
draw = ImageDraw.Draw(img)
img.save(filename)
def generate_batch(directory, basename, nb_images=1000):
path = os.path.join(directory, basename)
make_sure_path_exists(path)
for i in range(0,nb_images):
filename = '{}.{}.jpg'.format(basename, i);
filename = os.path.join(path, filename)
generate_random_img(filename, basename)
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def main():
generate_batch('data/train','0',10000)
generate_batch('data/train','1',10000)
#generate_batch('data/train','2',10000)
#generate_batch('data/train','3',10000)
#generate_batch('data/train','4',10000)
#generate_batch('data/train','5',10000)
#generate_batch('data/train','6',10000)
#generate_batch('data/train','7',10000)
#generate_batch('data/train','8',10000)
#generate_batch('data/train','9',10000)
generate_batch('data/validation','0',1000)
generate_batch('data/validation','1',1000)
#generate_batch('data/validation','2',2000)
#generate_batch('data/validation','3',2000)
#generate_batch('data/validation','4',2000)
#generate_batch('data/validation','5',2000)
#generate_batch('data/validation','6',2000)
#generate_batch('data/validation','7',2000)
#generate_batch('data/validation','8',2000)
#generate_batch('data/validation','9',2000)
if __name__ == '__main__':
main()
|
[
"PIL.Image.new",
"random.randint",
"os.makedirs",
"random.seed",
"PIL.ImageDraw.Draw",
"os.path.join",
"os.listdir"
] |
[((163, 177), 'random.seed', 'random.seed', (['(5)'], {}), '(5)\n', (174, 177), False, 'import random\n'), ((1932, 1974), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', '(200, 200)', 'back_colour'], {}), "('RGBA', (200, 200), back_colour)\n", (1941, 1974), False, 'from PIL import Image\n'), ((1985, 2004), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (1999, 2004), False, 'from PIL import ImageDraw\n'), ((2025, 2045), 'random.randint', 'random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (2039, 2045), False, 'import random\n'), ((2345, 2365), 'random.randint', 'random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (2359, 2365), False, 'import random\n'), ((2702, 2721), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (2716, 2721), False, 'from PIL import ImageDraw\n'), ((2812, 2845), 'os.path.join', 'os.path.join', (['directory', 'basename'], {}), '(directory, basename)\n', (2824, 2845), False, 'import os\n'), ((979, 1001), 'random.randint', 'random.randint', (['(10)', '(45)'], {}), '(10, 45)\n', (993, 1001), False, 'import random\n'), ((1053, 1073), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (1067, 1073), False, 'import random\n'), ((1463, 1485), 'random.randint', 'random.randint', (['(0)', '(105)'], {}), '(0, 105)\n', (1477, 1485), False, 'import random\n'), ((1486, 1508), 'random.randint', 'random.randint', (['(0)', '(105)'], {}), '(0, 105)\n', (1500, 1508), False, 'import random\n'), ((1509, 1531), 'random.randint', 'random.randint', (['(0)', '(105)'], {}), '(0, 105)\n', (1523, 1531), False, 'import random\n'), ((1567, 1589), 'random.randint', 'random.randint', (['(1)', '(150)'], {}), '(1, 150)\n', (1581, 1589), False, 'import random\n'), ((1590, 1612), 'random.randint', 'random.randint', (['(1)', '(150)'], {}), '(1, 150)\n', (1604, 1612), False, 'import random\n'), ((2094, 2116), 'random.randint', 'random.randint', (['(1)', '(200)'], {}), '(1, 200)\n', (2108, 2116), False, 'import random\n'), ((2129, 2151), 'random.randint', 'random.randint', (['(1)', '(200)'], {}), '(1, 200)\n', (2143, 2151), False, 'import random\n'), ((2164, 2186), 'random.randint', 'random.randint', (['(1)', '(200)'], {}), '(1, 200)\n', (2178, 2186), False, 'import random\n'), ((2199, 2221), 'random.randint', 'random.randint', (['(1)', '(200)'], {}), '(1, 200)\n', (2213, 2221), False, 'import random\n'), ((2412, 2434), 'random.randint', 'random.randint', (['(1)', '(150)'], {}), '(1, 150)\n', (2426, 2434), False, 'import random\n'), ((2447, 2470), 'random.randint', 'random.randint', (['x1', '(200)'], {}), '(x1, 200)\n', (2461, 2470), False, 'import random\n'), ((2484, 2506), 'random.randint', 'random.randint', (['(1)', '(150)'], {}), '(1, 150)\n', (2498, 2506), False, 'import random\n'), ((2519, 2542), 'random.randint', 'random.randint', (['y1', '(200)'], {}), '(y1, 200)\n', (2533, 2542), False, 'import random\n'), ((2966, 2994), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (2978, 2994), False, 'import os\n'), ((3082, 3099), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (3093, 3099), False, 'import os\n'), ((1251, 1273), 'random.randint', 'random.randint', (['(1)', '(255)'], {}), '(1, 255)\n', (1265, 1273), False, 'import random\n'), ((1275, 1297), 'random.randint', 'random.randint', (['(1)', '(255)'], {}), '(1, 255)\n', (1289, 1297), False, 'import random\n'), ((1299, 1321), 'random.randint', 'random.randint', (['(1)', '(255)'], {}), '(1, 255)\n', (1313, 1321), False, 'import random\n'), ((419, 439), 'os.listdir', 'os.listdir', (['base_dir'], {}), '(base_dir)\n', (429, 439), False, 'import os\n'), ((764, 788), 'os.path.join', 'join', (['base_dir', 'font_dir'], {}), '(base_dir, font_dir)\n', (768, 788), False, 'from os.path import join, isfile, isdir\n'), ((850, 872), 'random.randint', 'random.randint', (['(12)', '(55)'], {}), '(12, 55)\n', (864, 872), False, 'import random\n'), ((1373, 1397), 'random.randint', 'random.randint', (['(150)', '(255)'], {}), '(150, 255)\n', (1387, 1397), False, 'import random\n'), ((1399, 1423), 'random.randint', 'random.randint', (['(150)', '(255)'], {}), '(150, 255)\n', (1413, 1423), False, 'import random\n'), ((1425, 1449), 'random.randint', 'random.randint', (['(150)', '(255)'], {}), '(150, 255)\n', (1439, 1449), False, 'import random\n'), ((2305, 2326), 'random.randint', 'random.randint', (['(1)', '(20)'], {}), '(1, 20)\n', (2319, 2326), False, 'import random\n'), ((596, 620), 'os.path.join', 'join', (['base_dir', 'font_dir'], {}), '(base_dir, font_dir)\n', (600, 620), False, 'from os.path import join, isfile, isdir\n'), ((449, 466), 'os.path.join', 'join', (['base_dir', 'd'], {}), '(base_dir, d)\n', (453, 466), False, 'from os.path import join, isfile, isdir\n')]
|
import json
import logging
from collections import OrderedDict
from pathlib import Path
from sira.modelling.component import Component, ConnectionValues
from sira.modelling.infrastructure import InfrastructureFactory
from sira.tools.convert_excel_files_to_json import (read_excel_to_json,
update_json_structure)
rootLogger = logging.getLogger(__name__)
def ingest_model(config):
"""
Reads a model file into python objects
:param config: path to json or xlsx file containing system model
:return: -List of algorithms for each component in particular damage state
-Object of class infrastructure
"""
extension = Path(config.INPUT_MODEL_PATH).suffix[1:].lower()
if extension == 'json':
with open(config.INPUT_MODEL_PATH, 'r') as f:
# ensure that damage states are ordered
model = json.load(f, object_pairs_hook=OrderedDict)
return read_model_from_json(config, model)
elif extension == 'xlsx':
json_obj = json.loads(
read_excel_to_json(config.INPUT_MODEL_PATH),
object_pairs_hook=OrderedDict)
model = update_json_structure(json_obj)
return read_model_from_json(config, model)
else:
rootLogger.critical(
"Invalid model file type! "
"Accepted types are json or xlsx.")
raise ValueError(
"Invalid model file type! "
"Accepted types are json or xlsx. "
"File supplied: " + config.SYS_CONF_FILE)
def read_model_from_json(config, model):
"""
Create an infrastructure_model and AlgorithmFactory from the
infrastructure model in json file
:param config:
:return:
"""
system_class = config.SYSTEM_CLASS
system_subclass = config.SYSTEM_SUBCLASS
# read the lists from json
system_meta = model['system_meta']
component_list = model['component_list']
node_conn_df = model['node_conn_df']
sysinp_setup = model['sysinp_setup']
sysout_setup = model['sysout_setup']
system_components = {}
for component_id in component_list:
component_values = {}
component_values['component_id'] = component_id
for param in component_list[component_id].keys():
component_values[param] = component_list[component_id][param]
# list of damage states with a function assignment!
system_components[component_id] = Component(**component_values)
# TODO refactor code below, combine the two high level variables
# in input json and make corresponding changes in code below
# now we add children!
for index in node_conn_df:
component_id = node_conn_df[index]['origin']
system_component = system_components[component_id]
if not system_component.destination_components:
system_component.destination_components = {}
edge_values = {}
edge_values['link_capacity'] \
= float(node_conn_df[index]['link_capacity'])
edge_values['weight'] = float(node_conn_df[index]['weight'])
system_component.\
destination_components[node_conn_df[index]['destination']] \
= ConnectionValues(**edge_values)
infrastructure_system_constructor = dict()
infrastructure_system_constructor['name'] = \
system_class + " : " + system_subclass
infrastructure_system_constructor['components'] = system_components
infrastructure_system_constructor['system_meta'] = dict(system_meta)
# create the supply and output node dictionaries
supply_nodes = {}
for index in sysinp_setup:
sv_dict = {}
sv_dict['input_capacity'] \
= sysinp_setup[index]['input_capacity']
sv_dict['capacity_fraction'] \
= float(sysinp_setup[index]['capacity_fraction'])
sv_dict['commodity_type'] \
= sysinp_setup[index]['commodity_type']
supply_nodes[index] = sv_dict
infrastructure_system_constructor['supply_nodes'] = supply_nodes
output_nodes = {}
for index in sysout_setup:
op_dict = {}
op_dict['production_node'] \
= sysout_setup[index]['production_node']
op_dict['output_node_capacity'] \
= sysout_setup[index]['output_node_capacity']
op_dict['capacity_fraction'] \
= float(sysout_setup[index]['capacity_fraction'])
op_dict['priority'] = sysout_setup[index]['priority']
output_nodes[index] = op_dict
infrastructure_system_constructor['sys_dmg_states'] = []
for key in component_list:
for damages_state in component_list[key]["damages_states_constructor"]:
if damages_state not in \
infrastructure_system_constructor['sys_dmg_states']:
infrastructure_system_constructor['sys_dmg_states'].\
append(damages_state)
infrastructure_system_constructor['output_nodes'] = output_nodes
# set the system class
infrastructure_system_constructor['system_class'] = system_class
return InfrastructureFactory.create_model(infrastructure_system_constructor)
|
[
"json.load",
"sira.tools.convert_excel_files_to_json.read_excel_to_json",
"sira.modelling.component.Component",
"logging.getLogger",
"sira.modelling.infrastructure.InfrastructureFactory.create_model",
"pathlib.Path",
"sira.tools.convert_excel_files_to_json.update_json_structure",
"sira.modelling.component.ConnectionValues"
] |
[((379, 406), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (396, 406), False, 'import logging\n'), ((5101, 5170), 'sira.modelling.infrastructure.InfrastructureFactory.create_model', 'InfrastructureFactory.create_model', (['infrastructure_system_constructor'], {}), '(infrastructure_system_constructor)\n', (5135, 5170), False, 'from sira.modelling.infrastructure import InfrastructureFactory\n'), ((2469, 2498), 'sira.modelling.component.Component', 'Component', ([], {}), '(**component_values)\n', (2478, 2498), False, 'from sira.modelling.component import Component, ConnectionValues\n'), ((3224, 3255), 'sira.modelling.component.ConnectionValues', 'ConnectionValues', ([], {}), '(**edge_values)\n', (3240, 3255), False, 'from sira.modelling.component import Component, ConnectionValues\n'), ((909, 952), 'json.load', 'json.load', (['f'], {'object_pairs_hook': 'OrderedDict'}), '(f, object_pairs_hook=OrderedDict)\n', (918, 952), False, 'import json\n'), ((1182, 1213), 'sira.tools.convert_excel_files_to_json.update_json_structure', 'update_json_structure', (['json_obj'], {}), '(json_obj)\n', (1203, 1213), False, 'from sira.tools.convert_excel_files_to_json import read_excel_to_json, update_json_structure\n'), ((1078, 1121), 'sira.tools.convert_excel_files_to_json.read_excel_to_json', 'read_excel_to_json', (['config.INPUT_MODEL_PATH'], {}), '(config.INPUT_MODEL_PATH)\n', (1096, 1121), False, 'from sira.tools.convert_excel_files_to_json import read_excel_to_json, update_json_structure\n'), ((705, 734), 'pathlib.Path', 'Path', (['config.INPUT_MODEL_PATH'], {}), '(config.INPUT_MODEL_PATH)\n', (709, 734), False, 'from pathlib import Path\n')]
|
import sys
import pdb
pdb.set_trace();
print("\n Enter the numbers:");
a=input();
b=input();
c=int(a)+int(b);
print("\n the first number:");
print(a);
print("\n the second number:");
print(b);
print("\n The addition of two numbers:");
print(c);
|
[
"pdb.set_trace"
] |
[((22, 37), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (35, 37), False, 'import pdb\n')]
|
# Generated by Django 2.2.2 on 2019-06-15 21:28
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('myfiles', '0010_auto_20190615_2244'),
]
operations = [
migrations.RenameField(
model_name='folder',
old_name='owner_id',
new_name='owner',
),
migrations.RenameField(
model_name='folder',
old_name='parent_folder_id',
new_name='parent_folder',
),
]
|
[
"django.db.migrations.RenameField"
] |
[((227, 314), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""folder"""', 'old_name': '"""owner_id"""', 'new_name': '"""owner"""'}), "(model_name='folder', old_name='owner_id', new_name=\n 'owner')\n", (249, 314), False, 'from django.db import migrations\n'), ((366, 468), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""folder"""', 'old_name': '"""parent_folder_id"""', 'new_name': '"""parent_folder"""'}), "(model_name='folder', old_name='parent_folder_id',\n new_name='parent_folder')\n", (388, 468), False, 'from django.db import migrations\n')]
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""
Entry/exit point for pulse simulation specified through PulseSimulator backend
"""
from warnings import warn
import numpy as np
from ..system_models.string_model_parser.string_model_parser import NoiseParser
from ..qutip_extra_lite import qobj_generators as qobj_gen
from .digest_pulse_qobj import digest_pulse_qobj
from ..de_solvers.pulse_de_options import OPoptions
from .unitary_controller import run_unitary_experiments
from .mc_controller import run_monte_carlo_experiments
def pulse_controller(qobj, system_model, backend_options):
""" Interprets PulseQobj input, runs simulations, and returns results
Parameters:
qobj (qobj): pulse qobj containing a list of pulse schedules
system_model (PulseSystemModel): contains system model information
backend_options (dict): dict of options, which overrides other parameters
Returns:
list: simulation results
Raises:
ValueError: if input is of incorrect format
Exception: for invalid ODE options
"""
pulse_sim_desc = PulseSimDescription()
if backend_options is None:
backend_options = {}
noise_model = backend_options.get('noise_model', None)
# post warnings for unsupported features
_unsupported_warnings(noise_model)
# ###############################
# ### Extract model parameters
# ###############################
# Get qubit list and number
qubit_list = system_model.subsystem_list
if qubit_list is None:
raise ValueError('Model must have a qubit list to simulate.')
n_qubits = len(qubit_list)
# get Hamiltonian
if system_model.hamiltonian is None:
raise ValueError('Model must have a Hamiltonian to simulate.')
ham_model = system_model.hamiltonian
# For now we dump this into OpSystem, though that should be refactored
pulse_sim_desc.system = ham_model._system
pulse_sim_desc.vars = ham_model._variables
pulse_sim_desc.channels = ham_model._channels
pulse_sim_desc.h_diag = ham_model._h_diag
pulse_sim_desc.evals = ham_model._evals
pulse_sim_desc.estates = ham_model._estates
dim_qub = ham_model._subsystem_dims
dim_osc = {}
# convert estates into a Qutip qobj
estates = [qobj_gen.state(state) for state in ham_model._estates.T[:]]
pulse_sim_desc.initial_state = estates[0]
pulse_sim_desc.global_data['vars'] = list(pulse_sim_desc.vars.values())
# Need this info for evaluating the hamiltonian vars in the c++ solver
pulse_sim_desc.global_data['vars_names'] = list(pulse_sim_desc.vars.keys())
# Get dt
if system_model.dt is None:
raise ValueError('Qobj must have a dt value to simulate.')
pulse_sim_desc.dt = system_model.dt
# Parse noise
if noise_model:
noise = NoiseParser(noise_dict=noise_model, dim_osc=dim_osc, dim_qub=dim_qub)
noise.parse()
pulse_sim_desc.noise = noise.compiled
if any(pulse_sim_desc.noise):
pulse_sim_desc.can_sample = False
# ###############################
# ### Parse qobj_config settings
# ###############################
digested_qobj = digest_pulse_qobj(qobj,
pulse_sim_desc.channels,
pulse_sim_desc.dt,
qubit_list,
backend_options)
# does this even need to be extracted here, or can the relevant info just be passed to the
# relevant functions?
pulse_sim_desc.global_data['shots'] = digested_qobj.shots
pulse_sim_desc.global_data['meas_level'] = digested_qobj.meas_level
pulse_sim_desc.global_data['meas_return'] = digested_qobj.meas_return
pulse_sim_desc.global_data['memory_slots'] = digested_qobj.memory_slots
pulse_sim_desc.global_data['memory'] = digested_qobj.memory
pulse_sim_desc.global_data['n_registers'] = digested_qobj.n_registers
pulse_sim_desc.global_data['pulse_array'] = digested_qobj.pulse_array
pulse_sim_desc.global_data['pulse_indices'] = digested_qobj.pulse_indices
pulse_sim_desc.pulse_to_int = digested_qobj.pulse_to_int
pulse_sim_desc.experiments = digested_qobj.experiments
# Handle qubit_lo_freq
qubit_lo_freq = digested_qobj.qubit_lo_freq
# if it wasn't specified in the PulseQobj, draw from system_model
if qubit_lo_freq is None:
qubit_lo_freq = system_model._qubit_freq_est
# if still None draw from the Hamiltonian
if qubit_lo_freq is None:
qubit_lo_freq = system_model.hamiltonian.get_qubit_lo_from_drift()
warn('Warning: qubit_lo_freq was not specified in PulseQobj or in PulseSystemModel, ' +
'so it is beign automatically determined from the drift Hamiltonian.')
pulse_sim_desc.freqs = system_model.calculate_channel_frequencies(qubit_lo_freq=qubit_lo_freq)
pulse_sim_desc.global_data['freqs'] = list(pulse_sim_desc.freqs.values())
# ###############################
# ### Parse backend_options
# # solver-specific information should be extracted in the solver
# ###############################
pulse_sim_desc.global_data['seed'] = (int(backend_options['seed']) if 'seed' in backend_options
else None)
pulse_sim_desc.global_data['q_level_meas'] = int(backend_options.get('q_level_meas', 1))
# solver options
allowed_ode_options = ['atol', 'rtol', 'nsteps', 'max_step',
'num_cpus', 'norm_tol', 'norm_steps',
'rhs_reuse', 'rhs_filename']
ode_options = backend_options.get('ode_options', {})
for key in ode_options:
if key not in allowed_ode_options:
raise Exception('Invalid ode_option: {}'.format(key))
pulse_sim_desc.ode_options = OPoptions(**ode_options)
# Set the ODE solver max step to be the half the
# width of the smallest pulse
min_width = np.iinfo(np.int32).max
for key, val in pulse_sim_desc.pulse_to_int.items():
if key != 'pv':
stop = pulse_sim_desc.global_data['pulse_indices'][val + 1]
start = pulse_sim_desc.global_data['pulse_indices'][val]
min_width = min(min_width, stop - start)
pulse_sim_desc.ode_options.max_step = min_width / 2 * pulse_sim_desc.dt
# ########################################
# Determination of measurement operators.
# ########################################
pulse_sim_desc.global_data['measurement_ops'] = [None] * n_qubits
for exp in pulse_sim_desc.experiments:
# Add in measurement operators
# Not sure if this will work for multiple measurements
# Note: the extraction of multiple measurements works, but the simulator itself
# implicitly assumes there is only one measurement at the end
if any(exp['acquire']):
for acq in exp['acquire']:
for jj in acq[1]:
if jj > qubit_list[-1]:
continue
if not pulse_sim_desc.global_data['measurement_ops'][qubit_list.index(jj)]:
q_level_meas = pulse_sim_desc.global_data['q_level_meas']
pulse_sim_desc.global_data['measurement_ops'][qubit_list.index(jj)] = \
qobj_gen.qubit_occ_oper_dressed(jj,
estates,
h_osc=dim_osc,
h_qub=dim_qub,
level=q_level_meas
)
if not exp['can_sample']:
pulse_sim_desc.can_sample = False
op_data_config(pulse_sim_desc)
run_experiments = (run_unitary_experiments if pulse_sim_desc.can_sample
else run_monte_carlo_experiments)
exp_results, exp_times = run_experiments(pulse_sim_desc)
return format_exp_results(exp_results, exp_times, pulse_sim_desc)
def op_data_config(op_system):
""" Preps the data for the opsolver.
This should eventually be replaced by functions that construct different types of DEs
in standard formats
Everything is stored in the passed op_system.
Args:
op_system (OPSystem): An openpulse system.
"""
num_h_terms = len(op_system.system)
H = [hpart[0] for hpart in op_system.system]
op_system.global_data['num_h_terms'] = num_h_terms
# take care of collapse operators, if any
op_system.global_data['c_num'] = 0
if op_system.noise:
op_system.global_data['c_num'] = len(op_system.noise)
op_system.global_data['num_h_terms'] += 1
op_system.global_data['c_ops_data'] = []
op_system.global_data['c_ops_ind'] = []
op_system.global_data['c_ops_ptr'] = []
op_system.global_data['n_ops_data'] = []
op_system.global_data['n_ops_ind'] = []
op_system.global_data['n_ops_ptr'] = []
op_system.global_data['h_diag_elems'] = op_system.h_diag
# if there are any collapse operators
H_noise = 0
for kk in range(op_system.global_data['c_num']):
c_op = op_system.noise[kk]
n_op = c_op.dag() * c_op
# collapse ops
op_system.global_data['c_ops_data'].append(c_op.data.data)
op_system.global_data['c_ops_ind'].append(c_op.data.indices)
op_system.global_data['c_ops_ptr'].append(c_op.data.indptr)
# norm ops
op_system.global_data['n_ops_data'].append(n_op.data.data)
op_system.global_data['n_ops_ind'].append(n_op.data.indices)
op_system.global_data['n_ops_ptr'].append(n_op.data.indptr)
# Norm ops added to time-independent part of
# Hamiltonian to decrease norm
H_noise -= 0.5j * n_op
if H_noise:
H = H + [H_noise]
# construct data sets
op_system.global_data['h_ops_data'] = [-1.0j * hpart.data.data
for hpart in H]
op_system.global_data['h_ops_ind'] = [hpart.data.indices for hpart in H]
op_system.global_data['h_ops_ptr'] = [hpart.data.indptr for hpart in H]
# Convert inital state to flat array in global_data
op_system.global_data['initial_state'] = \
op_system.initial_state.full().ravel()
def format_exp_results(exp_results, exp_times, op_system):
""" format simulation results
Parameters:
exp_results (list): simulation results
exp_times (list): simulation times
op_system (PulseSimDescription): object containing all simulation information
Returns:
list: formatted simulation results
"""
# format the data into the proper output
all_results = []
for idx_exp, exp in enumerate(op_system.experiments):
m_lev = op_system.global_data['meas_level']
m_ret = op_system.global_data['meas_return']
# populate the results dictionary
results = {'seed_simulator': exp['seed'],
'shots': op_system.global_data['shots'],
'status': 'DONE',
'success': True,
'time_taken': exp_times[idx_exp],
'header': exp['header'],
'meas_level': m_lev,
'meas_return': m_ret,
'data': {}}
if op_system.can_sample:
memory = exp_results[idx_exp][0]
results['data']['statevector'] = []
for coef in exp_results[idx_exp][1]:
results['data']['statevector'].append([np.real(coef),
np.imag(coef)])
results['header']['ode_t'] = exp_results[idx_exp][2]
else:
memory = exp_results[idx_exp]
# meas_level 2 return the shots
if m_lev == 2:
# convert the memory **array** into a n
# integer
# e.g. [1,0] -> 2
int_mem = memory.dot(np.power(2.0,
np.arange(memory.shape[1]))).astype(int)
# if the memory flag is set return each shot
if op_system.global_data['memory']:
hex_mem = [hex(val) for val in int_mem]
results['data']['memory'] = hex_mem
# Get hex counts dict
unique = np.unique(int_mem, return_counts=True)
hex_dict = {}
for kk in range(unique[0].shape[0]):
key = hex(unique[0][kk])
hex_dict[key] = unique[1][kk]
results['data']['counts'] = hex_dict
# meas_level 1 returns the <n>
elif m_lev == 1:
if m_ret == 'avg':
memory = [np.mean(memory, 0)]
# convert into the right [real, complex] pair form for json
# this should be cython?
results['data']['memory'] = []
for mem_shot in memory:
results['data']['memory'].append([])
for mem_slot in mem_shot:
results['data']['memory'][-1].append(
[np.real(mem_slot), np.imag(mem_slot)])
if m_ret == 'avg':
results['data']['memory'] = results['data']['memory'][0]
all_results.append(results)
return all_results
def _unsupported_warnings(noise_model):
""" Warns the user about untested/unsupported features.
Parameters:
noise_model (dict): backend_options for simulation
Returns:
Raises:
AerError: for unsupported features
"""
# Warnings that don't stop execution
warning_str = '{} are an untested feature, and therefore may not behave as expected.'
if noise_model is not None:
warn(warning_str.format('Noise models'))
class PulseSimDescription():
""" Object for holding any/all information required for simulation.
Needs to be refactored into different pieces.
"""
def __init__(self):
# The system Hamiltonian in numerical format
self.system = None
# The noise (if any) in numerical format
self.noise = None
# System variables
self.vars = None
# The initial state of the system
self.initial_state = None
# Channels in the Hamiltonian string
# these tell the order in which the channels
# are evaluated in the RHS solver.
self.channels = None
# options of the ODE solver
self.ode_options = None
# time between pulse sample points.
self.dt = None
# Array containing all pulse samples
self.pulse_array = None
# Array of indices indicating where a pulse starts in the self.pulse_array
self.pulse_indices = None
# A dict that translates pulse names to integers for use in self.pulse_indices
self.pulse_to_int = None
# Holds the parsed experiments
self.experiments = []
# Can experiments be simulated once then sampled
self.can_sample = True
# holds global data
self.global_data = {}
# holds frequencies for the channels
self.freqs = {}
# diagonal elements of the hamiltonian
self.h_diag = None
# eigenvalues of the time-independent hamiltonian
self.evals = None
# eigenstates of the time-independent hamiltonian
self.estates = None
|
[
"numpy.iinfo",
"numpy.imag",
"numpy.mean",
"numpy.arange",
"numpy.real",
"warnings.warn",
"numpy.unique"
] |
[((5144, 5312), 'warnings.warn', 'warn', (["('Warning: qubit_lo_freq was not specified in PulseQobj or in PulseSystemModel, '\n + 'so it is beign automatically determined from the drift Hamiltonian.')"], {}), "(\n 'Warning: qubit_lo_freq was not specified in PulseQobj or in PulseSystemModel, '\n + 'so it is beign automatically determined from the drift Hamiltonian.')\n", (5148, 5312), False, 'from warnings import warn\n'), ((6483, 6501), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (6491, 6501), True, 'import numpy as np\n'), ((12920, 12958), 'numpy.unique', 'np.unique', (['int_mem'], {'return_counts': '(True)'}), '(int_mem, return_counts=True)\n', (12929, 12958), True, 'import numpy as np\n'), ((12145, 12158), 'numpy.real', 'np.real', (['coef'], {}), '(coef)\n', (12152, 12158), True, 'import numpy as np\n'), ((12215, 12228), 'numpy.imag', 'np.imag', (['coef'], {}), '(coef)\n', (12222, 12228), True, 'import numpy as np\n'), ((13294, 13312), 'numpy.mean', 'np.mean', (['memory', '(0)'], {}), '(memory, 0)\n', (13301, 13312), True, 'import numpy as np\n'), ((12609, 12635), 'numpy.arange', 'np.arange', (['memory.shape[1]'], {}), '(memory.shape[1])\n', (12618, 12635), True, 'import numpy as np\n'), ((13682, 13699), 'numpy.real', 'np.real', (['mem_slot'], {}), '(mem_slot)\n', (13689, 13699), True, 'import numpy as np\n'), ((13701, 13718), 'numpy.imag', 'np.imag', (['mem_slot'], {}), '(mem_slot)\n', (13708, 13718), True, 'import numpy as np\n')]
|
import os
def get_subfolder_names(path):
return [f.name for f in os.scandir(path) if f.is_dir()]
|
[
"os.scandir"
] |
[((71, 87), 'os.scandir', 'os.scandir', (['path'], {}), '(path)\n', (81, 87), False, 'import os\n')]
|
import datetime
import math
from collections import defaultdict
import dateutil.parser
from elasticmagic.types import instantiate
from elasticmagic.types import Type
from elasticmagic.compat import force_unicode
from elasticmagic.compat import int_types
TIME_ATTRS = {'hour', 'minute', 'second', 'microsecond', 'tzinfo'}
class TypeCodec(object):
def decode(self, value, es_type=None):
raise NotImplementedError
def encode(self, value, es_type=None):
raise NotImplementedError
class StringCodec(TypeCodec):
def decode(self, value, es_type=None):
return force_unicode(value)
def encode(self, value, es_type=None):
return force_unicode(value)
class FloatCodec(TypeCodec):
def decode(self, value, es_type=None):
v = float(value)
if math.isnan(v) or math.isinf(v):
raise ValueError('NaN or Inf is not supported')
return v
def encode(self, value, es_type=None):
return value
class IntCodec(TypeCodec):
def encode(self, value, es_type=None):
if isinstance(value, int_types):
return force_unicode(value)
return force_unicode(int(value))
def decode(self, value, es_type=None):
v = int(value)
if (
es_type is not None and
(v < es_type.MIN_VALUE or v > es_type.MAX_VALUE)
):
raise ValueError(
'Value must be between {} and {}'.format(
es_type.MIN_VALUE, es_type.MAX_VALUE
)
)
return v
class BoolCodec(TypeCodec):
def encode(self, value, es_type=None):
if value is True:
return 'true'
if value is False:
return 'false'
return bool(value)
def decode(self, value, es_type=None):
if isinstance(value, bool):
return value
if value == 'true':
return True
if value == 'false':
return False
raise ValueError('Cannot decode boolean value: {}'.format(value))
class DateCodec(TypeCodec):
def encode(self, value, es_type=None):
if isinstance(value, datetime.datetime):
return value.strftime('%Y-%m-%dT%H:%M:%S.%f')
if isinstance(value, datetime.date):
return value.strftime('%Y-%m-%d')
raise ValueError('Value must be date or datetime: {}'.format(value))
def decode(self, value, es_type=None):
if isinstance(value, (datetime.datetime, datetime.date)):
return value
return dateutil.parser.parse(value)
def wrap_list(v):
if not isinstance(v, (list, tuple)):
return [v]
return v
class BaseCodec(object):
def decode_value(self, value, es_type=None):
raise NotImplementedError()
def decode(self, params, types=None):
raise NotImplementedError()
def encode_value(self, value, es_type=None):
raise NotImplementedError()
def encode(self, values, types=None):
raise NotImplementedError()
class SimpleCodec(BaseCodec):
OP_SEP = '__'
NULL_VAL = 'null'
DEFAULT_OP = 'exact'
CODECS = {
None: StringCodec,
float: FloatCodec,
int: IntCodec,
bool: BoolCodec,
datetime.datetime: DateCodec,
}
@staticmethod
def _normalize_params(params):
if hasattr(params, 'getall'):
# Webob
return params.dict_of_lists()
if hasattr(params, 'getlist'):
# Django
return dict(params.lists())
if isinstance(params, (list, tuple)):
# list, tuple
new_params = defaultdict(list)
for p, v in params:
new_params[p].extend(v)
return new_params
if isinstance(params, dict):
# dict
return params
raise TypeError("'params' must be Webob MultiDict, "
"Django QueryDict, list, tuple or dict")
@staticmethod
def _get_es_type_class(es_type):
if es_type is not None and isinstance(es_type, Type):
if es_type.sub_type:
return SimpleCodec._get_es_type_class(es_type.sub_type)
return es_type.__class__
return es_type
@staticmethod
def _get_es_and_python_types(es_type):
if es_type is None:
return None, None
es_type = instantiate(es_type)
if es_type.sub_type:
es_type = es_type.sub_type
return es_type, es_type.python_type
def decode_value(self, value, es_type=None):
if value is None or value == self.NULL_VAL:
return None
es_type, python_type = self._get_es_and_python_types(es_type)
value_codec = self.CODECS.get(python_type, StringCodec)()
return value_codec.decode(value, es_type=es_type)
def decode(self, params, types=None):
params = self._normalize_params(params)
types = types or {}
decoded_params = {}
for name, v in params.items():
name, _, op = name.partition(self.OP_SEP)
if not op:
op = self.DEFAULT_OP
es_type = types.get(name)
for w in wrap_list(v):
try:
decoded_value = self.decode_value(w, es_type=es_type)
decoded_params \
.setdefault(name, {}) \
.setdefault(op, []) \
.append(decoded_value)
except ValueError:
# just ignore values we cannot decode
pass
return decoded_params
def encode_value(self, value, es_type=None):
if value is None:
return self.NULL_VAL
es_type, python_type = self._get_es_and_python_types(es_type)
value_codec = self.CODECS.get(python_type, StringCodec)()
return value_codec.encode(value, es_type=es_type)
def encode(self, values, types=None):
params = {}
for name, ops in values.items():
for op, vals in ops.items():
if op == self.DEFAULT_OP:
key = name
else:
key = '{}__{}'.format(name, op)
if types:
es_type = types.get(name)
else:
es_type = None
params[key] = [
self.encode_value(v, es_type=es_type)
for v in vals
]
return params
|
[
"elasticmagic.types.instantiate",
"math.isnan",
"math.isinf",
"collections.defaultdict",
"elasticmagic.compat.force_unicode"
] |
[((597, 617), 'elasticmagic.compat.force_unicode', 'force_unicode', (['value'], {}), '(value)\n', (610, 617), False, 'from elasticmagic.compat import force_unicode\n'), ((677, 697), 'elasticmagic.compat.force_unicode', 'force_unicode', (['value'], {}), '(value)\n', (690, 697), False, 'from elasticmagic.compat import force_unicode\n'), ((4395, 4415), 'elasticmagic.types.instantiate', 'instantiate', (['es_type'], {}), '(es_type)\n', (4406, 4415), False, 'from elasticmagic.types import instantiate\n'), ((808, 821), 'math.isnan', 'math.isnan', (['v'], {}), '(v)\n', (818, 821), False, 'import math\n'), ((825, 838), 'math.isinf', 'math.isinf', (['v'], {}), '(v)\n', (835, 838), False, 'import math\n'), ((1114, 1134), 'elasticmagic.compat.force_unicode', 'force_unicode', (['value'], {}), '(value)\n', (1127, 1134), False, 'from elasticmagic.compat import force_unicode\n'), ((3645, 3662), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3656, 3662), False, 'from collections import defaultdict\n')]
|
import uuid
from datetime import datetime
from flask import current_app
from sqlalchemy.orm.exc import NoResultFound
from app.models import ServiceSmsSender, Service
from tests.app.db import create_service, create_service_sms_sender, create_inbound_number, \
create_service_with_inbound_number
def test_add_service_sms_sender_calls_dao_method(admin_request, mocker):
added_service_sms_sender = ServiceSmsSender(created_at=datetime.utcnow())
dao_add_sms_sender_for_service = mocker.patch(
'app.service.sms_sender_rest.dao_add_sms_sender_for_service',
return_value=added_service_sms_sender
)
service_id = uuid.uuid4()
mocker.patch(
'app.service.sms_sender_rest.dao_fetch_service_by_id',
return_value=Service()
)
response_json = admin_request.post(
'service_sms_sender.add_service_sms_sender',
service_id=service_id,
_data={
"sms_sender": 'second',
"is_default": False,
},
_expected_status=201
)
dao_add_sms_sender_for_service.assert_called_with(service_id=service_id, sms_sender='second', is_default=False)
assert response_json == added_service_sms_sender.serialize()
def test_add_service_sms_sender_return_404_when_service_does_not_exist(admin_request, mocker):
mocker.patch('app.service.sms_sender_rest.dao_fetch_service_by_id', side_effect=NoResultFound())
response_json = admin_request.post(
'service_sms_sender.add_service_sms_sender',
service_id=uuid.uuid4(),
_expected_status=404
)
assert response_json['result'] == 'error'
assert response_json['message'] == 'No result found'
def test_add_service_sms_sender_return_404_when_rate_limit_too_small(admin_request, mocker):
added_service_sms_sender = ServiceSmsSender(created_at=datetime.utcnow(), rate_limit=1)
mocker.patch(
'app.service.sms_sender_rest.dao_add_sms_sender_for_service',
return_value=added_service_sms_sender
)
mocker.patch(
'app.service.sms_sender_rest.dao_fetch_service_by_id',
return_value=Service()
)
response_json = admin_request.post(
'service_sms_sender.add_service_sms_sender',
service_id=uuid.uuid4(),
_data={
"sms_sender": 'second',
"is_default": False,
"rate_limit": 0,
},
_expected_status=400
)
assert response_json['errors'][0]['error'] == 'ValidationError'
assert response_json['errors'][0]['message'] == 'rate_limit 0 is less than the minimum of 1'
def test_update_service_sms_sender(admin_request, notify_db_session):
service = create_service()
service_sms_sender = create_service_sms_sender(service=service, sms_sender='1235', is_default=False)
response_json = admin_request.post(
'service_sms_sender.update_service_sms_sender',
service_id=service.id,
sms_sender_id=service_sms_sender.id,
_data={
"sms_sender": 'second',
"is_default": False,
},
_expected_status=200
)
assert response_json['sms_sender'] == 'second'
assert not response_json['inbound_number_id']
assert not response_json['is_default']
def test_update_service_sms_sender_does_not_allow_sender_update_for_inbound_number(admin_request, notify_db_session):
service = create_service()
inbound_number = create_inbound_number('12345', service_id=service.id)
service_sms_sender = create_service_sms_sender(
service=service,
sms_sender='1235',
is_default=False,
inbound_number_id=inbound_number.id
)
payload = {
"sms_sender": 'second',
"is_default": True,
"inbound_number_id": str(inbound_number.id)
}
admin_request.post(
'service_sms_sender.update_service_sms_sender',
service_id=service.id,
sms_sender_id=service_sms_sender.id,
_data=payload,
_expected_status=400
)
def test_update_service_sms_sender_return_404_when_service_does_not_exist(admin_request, mocker):
mocker.patch(
'app.service.sms_sender_rest.dao_fetch_service_by_id',
side_effect=NoResultFound()
)
response = admin_request.post(
'service_sms_sender.update_service_sms_sender',
service_id=uuid.uuid4(),
sms_sender_id=uuid.uuid4(),
_expected_status=404
)
assert response['result'] == 'error'
assert response['message'] == 'No result found'
def test_delete_service_sms_sender_can_archive_sms_sender(admin_request, notify_db_session):
service = create_service()
service_sms_sender = create_service_sms_sender(
service=service,
sms_sender='5678',
is_default=False
)
admin_request.post(
'service_sms_sender.delete_service_sms_sender',
service_id=service.id,
sms_sender_id=service_sms_sender.id,
)
assert service_sms_sender.archived is True
def test_delete_service_sms_sender_returns_400_if_archiving_inbound_number(admin_request, notify_db_session):
service = create_service_with_inbound_number(inbound_number='7654321')
inbound_number = service.service_sms_senders[0]
response = admin_request.post(
'service_sms_sender.delete_service_sms_sender',
service_id=service.id,
sms_sender_id=service.service_sms_senders[0].id,
_expected_status=400
)
assert response == {'message': 'You cannot delete an inbound number', 'result': 'error'}
assert inbound_number.archived is False
def test_get_service_sms_sender_by_id(admin_request, notify_db_session):
service_sms_sender = create_service_sms_sender(
service=create_service(),
sms_sender='1235',
is_default=False
)
response_json = admin_request.get(
'service_sms_sender.get_service_sms_sender_by_id',
service_id=service_sms_sender.service_id,
sms_sender_id=service_sms_sender.id,
_expected_status=200
)
assert response_json == service_sms_sender.serialize()
def test_get_service_sms_sender_by_id_returns_404_when_service_sms_sender_does_not_exist(admin_request, mocker):
mocker.patch('app.service.sms_sender_rest.dao_get_service_sms_sender_by_id', side_effect=NoResultFound())
admin_request.get(
'service_sms_sender.get_service_sms_sender_by_id',
service_id=uuid.uuid4(),
sms_sender_id=uuid.uuid4(),
_expected_status=404
)
def test_get_service_sms_senders_for_service(admin_request, notify_db_session):
service_sms_sender = create_service_sms_sender(
service=create_service(),
sms_sender='second',
is_default=False
)
response_json = admin_request.get(
'service_sms_sender.get_service_sms_senders_for_service',
service_id=service_sms_sender.service_id,
_expected_status=200
)
assert len(response_json) == 2
assert response_json[0]['is_default']
assert response_json[0]['sms_sender'] == current_app.config['FROM_NUMBER']
assert not response_json[1]['is_default']
assert response_json[1]['sms_sender'] == 'second'
def test_get_service_sms_senders_for_service_returns_404_when_service_does_not_exist(admin_request, mocker):
# mocker.patch('app.service.sms_sender_rest.dao_fetch_service_by_id', side_effect=NoResultFound())
admin_request.get(
'service_sms_sender.get_service_sms_senders_for_service',
service_id=uuid.uuid4(),
_expected_status=404
)
|
[
"uuid.uuid4",
"tests.app.db.create_service_sms_sender",
"tests.app.db.create_service_with_inbound_number",
"app.models.Service",
"tests.app.db.create_service",
"datetime.datetime.utcnow",
"tests.app.db.create_inbound_number",
"sqlalchemy.orm.exc.NoResultFound"
] |
[((643, 655), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (653, 655), False, 'import uuid\n'), ((2662, 2678), 'tests.app.db.create_service', 'create_service', ([], {}), '()\n', (2676, 2678), False, 'from tests.app.db import create_service, create_service_sms_sender, create_inbound_number, create_service_with_inbound_number\n'), ((2704, 2783), 'tests.app.db.create_service_sms_sender', 'create_service_sms_sender', ([], {'service': 'service', 'sms_sender': '"""1235"""', 'is_default': '(False)'}), "(service=service, sms_sender='1235', is_default=False)\n", (2729, 2783), False, 'from tests.app.db import create_service, create_service_sms_sender, create_inbound_number, create_service_with_inbound_number\n'), ((3367, 3383), 'tests.app.db.create_service', 'create_service', ([], {}), '()\n', (3381, 3383), False, 'from tests.app.db import create_service, create_service_sms_sender, create_inbound_number, create_service_with_inbound_number\n'), ((3405, 3458), 'tests.app.db.create_inbound_number', 'create_inbound_number', (['"""12345"""'], {'service_id': 'service.id'}), "('12345', service_id=service.id)\n", (3426, 3458), False, 'from tests.app.db import create_service, create_service_sms_sender, create_inbound_number, create_service_with_inbound_number\n'), ((3484, 3605), 'tests.app.db.create_service_sms_sender', 'create_service_sms_sender', ([], {'service': 'service', 'sms_sender': '"""1235"""', 'is_default': '(False)', 'inbound_number_id': 'inbound_number.id'}), "(service=service, sms_sender='1235', is_default=\n False, inbound_number_id=inbound_number.id)\n", (3509, 3605), False, 'from tests.app.db import create_service, create_service_sms_sender, create_inbound_number, create_service_with_inbound_number\n'), ((4609, 4625), 'tests.app.db.create_service', 'create_service', ([], {}), '()\n', (4623, 4625), False, 'from tests.app.db import create_service, create_service_sms_sender, create_inbound_number, create_service_with_inbound_number\n'), ((4651, 4730), 'tests.app.db.create_service_sms_sender', 'create_service_sms_sender', ([], {'service': 'service', 'sms_sender': '"""5678"""', 'is_default': '(False)'}), "(service=service, sms_sender='5678', is_default=False)\n", (4676, 4730), False, 'from tests.app.db import create_service, create_service_sms_sender, create_inbound_number, create_service_with_inbound_number\n'), ((5098, 5158), 'tests.app.db.create_service_with_inbound_number', 'create_service_with_inbound_number', ([], {'inbound_number': '"""7654321"""'}), "(inbound_number='7654321')\n", (5132, 5158), False, 'from tests.app.db import create_service, create_service_sms_sender, create_inbound_number, create_service_with_inbound_number\n'), ((434, 451), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (449, 451), False, 'from datetime import datetime\n'), ((759, 768), 'app.models.Service', 'Service', ([], {}), '()\n', (766, 768), False, 'from app.models import ServiceSmsSender, Service\n'), ((1395, 1410), 'sqlalchemy.orm.exc.NoResultFound', 'NoResultFound', ([], {}), '()\n', (1408, 1410), False, 'from sqlalchemy.orm.exc import NoResultFound\n'), ((1525, 1537), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1535, 1537), False, 'import uuid\n'), ((1832, 1849), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1847, 1849), False, 'from datetime import datetime\n'), ((2107, 2116), 'app.models.Service', 'Service', ([], {}), '()\n', (2114, 2116), False, 'from app.models import ServiceSmsSender, Service\n'), ((2236, 2248), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2246, 2248), False, 'import uuid\n'), ((4188, 4203), 'sqlalchemy.orm.exc.NoResultFound', 'NoResultFound', ([], {}), '()\n', (4201, 4203), False, 'from sqlalchemy.orm.exc import NoResultFound\n'), ((4321, 4333), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4331, 4333), False, 'import uuid\n'), ((4357, 4369), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4367, 4369), False, 'import uuid\n'), ((5706, 5722), 'tests.app.db.create_service', 'create_service', ([], {}), '()\n', (5720, 5722), False, 'from tests.app.db import create_service, create_service_sms_sender, create_inbound_number, create_service_with_inbound_number\n'), ((6279, 6294), 'sqlalchemy.orm.exc.NoResultFound', 'NoResultFound', ([], {}), '()\n', (6292, 6294), False, 'from sqlalchemy.orm.exc import NoResultFound\n'), ((6398, 6410), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (6408, 6410), False, 'import uuid\n'), ((6434, 6446), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (6444, 6446), False, 'import uuid\n'), ((6633, 6649), 'tests.app.db.create_service', 'create_service', ([], {}), '()\n', (6647, 6649), False, 'from tests.app.db import create_service, create_service_sms_sender, create_inbound_number, create_service_with_inbound_number\n'), ((7482, 7494), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (7492, 7494), False, 'import uuid\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
atmospheres.py includes functions to calculate atmospheric quantities.
Created on Tue Nov 29 11:45:15 2016
@author: tr1010 (<NAME>)
"""
import sys
sys.path.append('atmosphere_models/Python-NRLMSISE-00-master')
from nrlmsise_00_header import *
from nrlmsise_00 import *
import numpy as np
def nrlmsise00(doy,year,sec,alt,g_lat,g_long,lst,f107A,f107,ap):
"""
nrlmsise00 calculates atmospheric quantities using the NRLMSISE-00
atmosphere published in 2001 by <NAME>, <NAME>, and <NAME>.
Originally written in FORTRAN, it was later implemented in C by Dominik
Brodowski.
This function calls a Python port of Brodowski's C implementation originally
written by <NAME> in 2013. This software was released under an MIT
license (see the license file in the atmosphere_models directory).
The NRLMSISE-00 model uses a number of switches (contained in the flags
class) to modify the model output. At the moment, these defaults are hard-
wired into PETra. Later revisions will give the user the ability to select
these switches. For more detailed information about the inputs/outputs/switches
used in this model, the user is directed to the docstrings of the funcitons
contained in the model files (norlmsise_00_header.py and nrlmsise_00.py).
Inputs:
doy: day of year
year: year (currently ignored)
sec: seconds in day
alt: altitude
g_lat: geodetic latitude
g_long: geodetic longitude
lst: local apparent solar time (hours)
f107A: 81 day average of F10.7 flux (centred on doy)
f107: daily f10.7 flux (for previous day)
ap: magnetic index (daily)
Outputs:
rho: density at the requested altitude
pressure_mixture: pressure at the requested altitude
temperature: temperature at the requested altitude
R_mixture: the gas constant of the mixture
mean_free_path: mean free path of the air at the requested altitude.
In contrast to the other outputs of this function, the
mean free path calculation assumes a single molecule
gas (assumed to be an 'average' air molecule)
eta: viscosity (calcualted using Sutherland's law)
molecular_weight_mixture: the molecular weight of the air at the
requested altitude
SoS: speed of sound (assume ratio of specific heats is constant 1.4
everywhere in the atmosphere)
"""
output = nrlmsise_output()
Input = nrlmsise_input()
# output = [nrlmsise_output() for _ in range(17)]
# Input = [nrlmsise_input() for _ in range(17)]
flags = nrlmsise_flags()
aph = ap_array() # For more detailed ap data (i.e more than daily)
flags.switches[0] = 1 # to have results in m rather than cm
for i in range(1,24):
flags.switches[i]=1
# below 80 km solar & magnetic effects not well established so set to defaults
if alt < 80e3:
f107 = 150.
f107A = 150.
ap = 4.
# fill out Input class
Input.year=year
Input.doy=doy
Input.sec=sec
Input.alt=alt*1e-3 #change input to km
Input.g_lat=g_lat*180/np.pi
Input.g_long=g_long*180/np.pi
Input.lst=lst
Input.f107A=f107A
Input.f107=f107
Input.ap=ap
if alt > 500e3:
gtd7d(Input, flags, output)
else:
gtd7(Input, flags, output)
d = output.d
t = output.t
"""
DEFAULT OUTPUT VARIABLES:
d[0] - HE NUMBER DENSITY(CM-3)
d[1] - O NUMBER DENSITY(CM-3)
d[2] - N2 NUMBER DENSITY(CM-3)
d[3] - O2 NUMBER DENSITY(CM-3)
d[4] - AR NUMBER DENSITY(CM-3)
d[5] - TOTAL MASS DENSITY(GM/CM3) [includes d[8] in td7d]
d[6] - H NUMBER DENSITY(CM-3)
d[7] - N NUMBER DENSITY(CM-3)
d[8] - Anomalous oxygen NUMBER DENSITY(CM-3)
t[0] - EXOSPHERIC TEMPERATURE
t[1] - TEMPERATURE AT ALT
"""
#Now process output to get required values
kb = 1.38064852e-23 # Boltzmann constant (m**2 kg)/(s**2 K)
Na = 6.022140857e26 # avogadro number (molecules per kilomole)
R0 = kb * Na # universal gas constant
#Molecular weights of different components (kg/kmole)
molecular_weights = np.zeros(8)
molecular_weights[0] = 4.002602 #He
molecular_weights[1] = 15.9994 #O
molecular_weights[2] = 28.0134 #N2
molecular_weights[3] = 31.9988 #O2
molecular_weights[4] = 39.948 #AR
molecular_weights[5] = 1.00794 #H
molecular_weights[6] = 14.0067 #N
molecular_weights[7] = 15.9994 #anomalous O
# Calculate partial pressures
partial_p = np.zeros(8)
partial_p[0] = d[0]*kb*t[1] #He
partial_p[1] = d[1]*kb*t[1] #O
partial_p[2] = d[2]*kb*t[1] #N2
partial_p[3] = d[3]*kb*t[1] #O2
partial_p[4] = d[4]*kb*t[1] #AR
partial_p[5] = d[6]*kb*t[1] #H
partial_p[6] = d[7]*kb*t[1] #N
partial_p[7] = d[8]*kb*t[1] #anomalous O
#Assuming perfect gas, calculate atmospheric pressure
pressure_mixture = np.sum(partial_p)
temperature = t[1]
mole_fraction = np.divide(partial_p,pressure_mixture)
molecular_weight_mixture = np.sum(np.multiply(mole_fraction,molecular_weights)) #kg/kmol
mass_fractions = np.multiply(mole_fraction,
np.divide(molecular_weights,molecular_weight_mixture))
specific_gas_constants = R0/molecular_weights
R_mixture = np.sum(np.multiply(mass_fractions,specific_gas_constants))
number_density_mixture = np.sum(d) - d[5]
mean_free_path = (np.sqrt(2)*np.pi*4.15e-10**2*number_density_mixture)**-1
eta = np.float64(1.458e-6*temperature**1.5/(temperature + 110.4)) # dynamic viscosity via sutherland law
SoS = np.float64(np.sqrt(1.4*R_mixture*temperature))
rho = d[5]
return rho, pressure_mixture, temperature, R_mixture, mean_free_path, eta, molecular_weight_mixture, SoS
# US mutant Atmosphere
def US62_76(r,RE):
"""
US62_76 is a very simple atmosphere model that uses the US76 standard
atmosphere below 80 km and the US62 standard atmosphere above 80km
Inputs:
r: altitude
RE: radius of the Earth
Outputs:
rho: density
P: pressure
T: temperature
mfp: mean free path
eta: viscosity (sutherland's law)
MolW: molecular weight
SoS: speed of sound
"""
#Some constants:
#RE = 6378.137e3
Na = np.float64(6.0220978e23)
sig = np.float64(3.65e-10)
# Sea level standard values:
P0 = 101325.0 #Pa
T0 = 288.15 #K
M = np.array([28.9644, 28.9644, 28.9644, 28.9644, 28.9644, 28.9644, 28.962, 28.962,
28.88, 28.56, 28.07, 26.92, 26.66, 26.4, 25.85,
24.70, 22.66, 19.94, 17.94, 16.84, 16.17]) # Molecular masses with altitude g/mol
R0 = 8.31432 # J/mol-K
g0 = 9.806658 # m/s2
GM_R = g0*M/R0 # GM/R K/km
Z = (r - RE)*1e-3 # convert radius in m to altitude in km
H = me2po(RE,Z) # geopotential altitude
BLH = np.array([0., 11., 20., 32., 47., 51., 71., me2po(RE,86.),
me2po(RE,100.), me2po(RE,110.), me2po(RE,120.), me2po(RE,150.),
me2po(RE,160.), me2po(RE,170.), me2po(RE,190.), me2po(RE,230.),
me2po(RE,300.), me2po(RE,400.), me2po(RE,500.), me2po(RE,600.),
me2po(RE,700.)])
L = np.array([0., -6.5, 0., 1., 2.8, 0., -2.8, -2., 1.693, 5., 10., 20., 15.,
10., 7., 5., 4., 3.3, 2.6, 1.7, 1.1])
BLT = np.zeros((21,))
BLP = np.zeros((21,))
BLT[0] = T0
BLP[0] = P0
for i in range(0, 20):
# Calculate base temperatures
BLT[i+1] = BLT[i] + L[i+1]*(BLH[i+1] - BLH[i])
# Calculate base pressures
if (i+1 == 0) or (i+1 == 2) or (i+1 == 5):
BLP[i+1] = BLP[i]*np.exp(-GM_R[i+1]*(BLH[i+1] - BLH[i])/BLT[i])
else:
BLP[i+1] = BLP[i]*((BLT[i] + L[i+1]*(BLH[i+1] - BLH[i]))/BLT[i])**(-GM_R[i+1]/L[i+1])
# Calculate values at requested altitude
if H > BLH[i] and H <= BLH[i+1]:
# Molecular weight (interpolate)]
MolW = M[i] + (M[i+1] - M[i])*(H - BLH[i])/(BLH[i+1] - BLH[i])
gmrtemp = g0*MolW/R0
# Molecular scale Temperature
T = np.float64(BLT[i] + L[i+1]*(H - BLH[i]))
T = MolW*T/M[0] # Convert molecular scale temperature to kinetic temperature
# Pressure
if i+1 == 0 or i+1 == 2 or i+1 == 5:
P = np.float64(BLP[i]*np.exp(-gmrtemp*(H - BLH[i])/BLT[i]))
else:
P = np.float64(BLP[i]*((BLT[i] + L[i+1]*(H - BLH[i]))/BLT[i])**(-gmrtemp/L[i+1]))
# Density
rho = np.float64(MolW*1e-3*P/(R0*T))
mfp = np.float64(MolW*1e-3/(2**0.5*np.pi*sig**2*rho*Na)) # mean free path
eta = np.float64(1.458e-6*T**1.5/(T + 110.4)) # dynamic viscosity via sutherland law
SoS = np.float64(np.sqrt(1.4*287.085*T))
return rho, P, T, mfp, eta, MolW, SoS
def me2po(RE,Z):
"""
me2po converts geometric altitude to geopotential altitude -- the US
standard atmosphere works in geopotential altitudes, which approximates the
altitude of a pressure surface above the mean sea level.
The reasoning for this is as follows: A change in geometric altitude will
create a change in gravitational potential energy per unit mass (as the
effects of gravity become smaller as two objects move away from each other)
Inputs:
RE: Earth radius
Z: Geometric altitude
Outputs:
H: Geopotential altitude
"""
H = RE*Z/(RE + Z)
return H
|
[
"sys.path.append",
"numpy.divide",
"numpy.sum",
"numpy.multiply",
"numpy.zeros",
"numpy.array",
"numpy.exp",
"numpy.float64",
"numpy.sqrt"
] |
[((202, 264), 'sys.path.append', 'sys.path.append', (['"""atmosphere_models/Python-NRLMSISE-00-master"""'], {}), "('atmosphere_models/Python-NRLMSISE-00-master')\n", (217, 264), False, 'import sys\n'), ((4385, 4396), 'numpy.zeros', 'np.zeros', (['(8)'], {}), '(8)\n', (4393, 4396), True, 'import numpy as np\n'), ((4770, 4781), 'numpy.zeros', 'np.zeros', (['(8)'], {}), '(8)\n', (4778, 4781), True, 'import numpy as np\n'), ((5158, 5175), 'numpy.sum', 'np.sum', (['partial_p'], {}), '(partial_p)\n', (5164, 5175), True, 'import numpy as np\n'), ((5229, 5267), 'numpy.divide', 'np.divide', (['partial_p', 'pressure_mixture'], {}), '(partial_p, pressure_mixture)\n', (5238, 5267), True, 'import numpy as np\n'), ((5792, 5858), 'numpy.float64', 'np.float64', (['(1.458e-06 * temperature ** 1.5 / (temperature + 110.4))'], {}), '(1.458e-06 * temperature ** 1.5 / (temperature + 110.4))\n', (5802, 5858), True, 'import numpy as np\n'), ((6628, 6653), 'numpy.float64', 'np.float64', (['(6.0220978e+23)'], {}), '(6.0220978e+23)\n', (6638, 6653), True, 'import numpy as np\n'), ((6663, 6683), 'numpy.float64', 'np.float64', (['(3.65e-10)'], {}), '(3.65e-10)\n', (6673, 6683), True, 'import numpy as np\n'), ((6771, 6950), 'numpy.array', 'np.array', (['[28.9644, 28.9644, 28.9644, 28.9644, 28.9644, 28.9644, 28.962, 28.962, \n 28.88, 28.56, 28.07, 26.92, 26.66, 26.4, 25.85, 24.7, 22.66, 19.94, \n 17.94, 16.84, 16.17]'], {}), '([28.9644, 28.9644, 28.9644, 28.9644, 28.9644, 28.9644, 28.962, \n 28.962, 28.88, 28.56, 28.07, 26.92, 26.66, 26.4, 25.85, 24.7, 22.66, \n 19.94, 17.94, 16.84, 16.17])\n', (6779, 6950), True, 'import numpy as np\n'), ((7583, 7711), 'numpy.array', 'np.array', (['[0.0, -6.5, 0.0, 1.0, 2.8, 0.0, -2.8, -2.0, 1.693, 5.0, 10.0, 20.0, 15.0, \n 10.0, 7.0, 5.0, 4.0, 3.3, 2.6, 1.7, 1.1]'], {}), '([0.0, -6.5, 0.0, 1.0, 2.8, 0.0, -2.8, -2.0, 1.693, 5.0, 10.0, 20.0,\n 15.0, 10.0, 7.0, 5.0, 4.0, 3.3, 2.6, 1.7, 1.1])\n', (7591, 7711), True, 'import numpy as np\n'), ((7724, 7739), 'numpy.zeros', 'np.zeros', (['(21,)'], {}), '((21,))\n', (7732, 7739), True, 'import numpy as np\n'), ((7750, 7765), 'numpy.zeros', 'np.zeros', (['(21,)'], {}), '((21,))\n', (7758, 7765), True, 'import numpy as np\n'), ((5310, 5355), 'numpy.multiply', 'np.multiply', (['mole_fraction', 'molecular_weights'], {}), '(mole_fraction, molecular_weights)\n', (5321, 5355), True, 'import numpy as np\n'), ((5451, 5505), 'numpy.divide', 'np.divide', (['molecular_weights', 'molecular_weight_mixture'], {}), '(molecular_weights, molecular_weight_mixture)\n', (5460, 5505), True, 'import numpy as np\n'), ((5589, 5640), 'numpy.multiply', 'np.multiply', (['mass_fractions', 'specific_gas_constants'], {}), '(mass_fractions, specific_gas_constants)\n', (5600, 5640), True, 'import numpy as np\n'), ((5675, 5684), 'numpy.sum', 'np.sum', (['d'], {}), '(d)\n', (5681, 5684), True, 'import numpy as np\n'), ((5917, 5955), 'numpy.sqrt', 'np.sqrt', (['(1.4 * R_mixture * temperature)'], {}), '(1.4 * R_mixture * temperature)\n', (5924, 5955), True, 'import numpy as np\n'), ((8527, 8571), 'numpy.float64', 'np.float64', (['(BLT[i] + L[i + 1] * (H - BLH[i]))'], {}), '(BLT[i] + L[i + 1] * (H - BLH[i]))\n', (8537, 8571), True, 'import numpy as np\n'), ((8976, 9015), 'numpy.float64', 'np.float64', (['(MolW * 0.001 * P / (R0 * T))'], {}), '(MolW * 0.001 * P / (R0 * T))\n', (8986, 9015), True, 'import numpy as np\n'), ((9025, 9092), 'numpy.float64', 'np.float64', (['(MolW * 0.001 / (2 ** 0.5 * np.pi * sig ** 2 * rho * Na))'], {}), '(MolW * 0.001 / (2 ** 0.5 * np.pi * sig ** 2 * rho * Na))\n', (9035, 9092), True, 'import numpy as np\n'), ((9111, 9157), 'numpy.float64', 'np.float64', (['(1.458e-06 * T ** 1.5 / (T + 110.4))'], {}), '(1.458e-06 * T ** 1.5 / (T + 110.4))\n', (9121, 9157), True, 'import numpy as np\n'), ((8045, 8098), 'numpy.exp', 'np.exp', (['(-GM_R[i + 1] * (BLH[i + 1] - BLH[i]) / BLT[i])'], {}), '(-GM_R[i + 1] * (BLH[i + 1] - BLH[i]) / BLT[i])\n', (8051, 8098), True, 'import numpy as np\n'), ((8844, 8940), 'numpy.float64', 'np.float64', (['(BLP[i] * ((BLT[i] + L[i + 1] * (H - BLH[i])) / BLT[i]) ** (-gmrtemp / L[i +\n 1]))'], {}), '(BLP[i] * ((BLT[i] + L[i + 1] * (H - BLH[i])) / BLT[i]) ** (-\n gmrtemp / L[i + 1]))\n', (8854, 8940), True, 'import numpy as np\n'), ((9219, 9245), 'numpy.sqrt', 'np.sqrt', (['(1.4 * 287.085 * T)'], {}), '(1.4 * 287.085 * T)\n', (9226, 9245), True, 'import numpy as np\n'), ((5720, 5730), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (5727, 5730), True, 'import numpy as np\n'), ((8768, 8808), 'numpy.exp', 'np.exp', (['(-gmrtemp * (H - BLH[i]) / BLT[i])'], {}), '(-gmrtemp * (H - BLH[i]) / BLT[i])\n', (8774, 8808), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from nt_toolbox.signal import imageplot
def plot_levelset(Z, level=0, f=[]):
"""
f is supposed to be of the same shape as Z
"""
if len(f) == 0:
f = np.copy(Z)
n,p = np.shape(Z)
X,Y = np.meshgrid(np.arange(0,n),np.arange(0,p))
plt.contour(X, Y, Z,[level],linewidths=2, colors="red")
imageplot(f)
|
[
"numpy.copy",
"nt_toolbox.signal.imageplot",
"numpy.shape",
"matplotlib.pyplot.contour",
"numpy.arange"
] |
[((258, 269), 'numpy.shape', 'np.shape', (['Z'], {}), '(Z)\n', (266, 269), True, 'import numpy as np\n'), ((327, 384), 'matplotlib.pyplot.contour', 'plt.contour', (['X', 'Y', 'Z', '[level]'], {'linewidths': '(2)', 'colors': '"""red"""'}), "(X, Y, Z, [level], linewidths=2, colors='red')\n", (338, 384), True, 'import matplotlib.pyplot as plt\n'), ((387, 399), 'nt_toolbox.signal.imageplot', 'imageplot', (['f'], {}), '(f)\n', (396, 399), False, 'from nt_toolbox.signal import imageplot\n'), ((228, 238), 'numpy.copy', 'np.copy', (['Z'], {}), '(Z)\n', (235, 238), True, 'import numpy as np\n'), ((292, 307), 'numpy.arange', 'np.arange', (['(0)', 'n'], {}), '(0, n)\n', (301, 307), True, 'import numpy as np\n'), ((307, 322), 'numpy.arange', 'np.arange', (['(0)', 'p'], {}), '(0, p)\n', (316, 322), True, 'import numpy as np\n')]
|
import sys
from pathlib import Path
def document_glossary(outfile: str) -> None:
path_to_qcdb = Path("../qcdb").resolve().parent
sys.path.append(str(path_to_qcdb))
import qcdb
from qcdb.qcvars.glossary import qcvardefs
rst = []
rst.append(".. _`apdx:qcvariables_alpha`:")
rst.append("")
rst.append("QCVariables by Alpha")
rst.append("====================")
rst.append("")
for qcvar, info in sorted(qcvardefs.items()):
rst.append(f".. qcvar:: {qcvar}\n")
for line in info["glossary"].split("\n"):
if line.strip():
rst.append(f" {line.strip().replace('???', ' ')}")
rst.append(f" units: [{info['units']}]")
if "dimension" in info:
rst.append(f" dimension: [{info['dimension']}]")
rst.append("")
with open(outfile, "w") as fp:
fp.write("\n".join(rst))
# print("\n".join(rst))
if __name__ == "__main__":
document_glossary("source/autodoc_glossary_qcvars.rst")
|
[
"pathlib.Path",
"qcdb.qcvars.glossary.qcvardefs.items"
] |
[((446, 463), 'qcdb.qcvars.glossary.qcvardefs.items', 'qcvardefs.items', ([], {}), '()\n', (461, 463), False, 'from qcdb.qcvars.glossary import qcvardefs\n'), ((102, 117), 'pathlib.Path', 'Path', (['"""../qcdb"""'], {}), "('../qcdb')\n", (106, 117), False, 'from pathlib import Path\n')]
|
# python3
#
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example using TF Lite to classify objects with the Raspberry Pi camera."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#import argparse
#import io
#import time
import numpy as np
#import picamera
from PIL import Image
from tflite_runtime.interpreter import Interpreter
#from datetime import datetime
#from time import sleep
def saveImageSimple(cropImage):
filePath = "./test/224.jpg"
cropImage.save(filePath, quality=100, subsampling=0)
print("saved", filePath) # log DEBUG
return True
def load_labels(path):
with open(path, 'r') as f:
return {i: line.strip() for i, line in enumerate(f.readlines())}
def set_input_tensor(interpreter, image):
tensor_index = interpreter.get_input_details()[0]['index']
input_tensor = interpreter.tensor(tensor_index)()[0]
input_tensor[:, :] = image
def classify_image(interpreter, image, top_k=1):
"""Returns a sorted array of classification results."""
set_input_tensor(interpreter, image)
interpreter.invoke()
output_details = interpreter.get_output_details()[0]
output = np.squeeze(interpreter.get_tensor(output_details['index']))
# If the model is quantized (uint8 data), then dequantize the results
if output_details['dtype'] == np.uint8:
scale, zero_point = output_details['quantization']
# "output" is list of probablilities, in the same order as labels are in dict.txt
output = scale * (output - zero_point)
# "ordered" is list of numbers that show the order of each probability in "output"
ordered = np.argpartition(-output, top_k)
# print("ordered ", ordered)
# print("output", output)
# best = ordered[0]
# all = [(labels[i], output[i]) for i in ordered[:top_k]]
# print(best, all)
return output
# return ordered # labels
# return output
def formatOutput(output, labels):
all = {}
labelNumber = 0
for i in output:
all[labels[labelNumber]] = i
labelNumber = labelNumber + 1
bestKey = max(all, key=lambda key: all[key])
bestVal = all[bestKey]
# print("best", best)
# TODO: return best key and value as second return value
return bestKey, bestVal, all
# Main function
def classify(cropFrame):
print("Here")
# width = 224
# height = 224
# Hardcoded args
# model = './models/tflite-plumps1_20210328/model.tflite'
# labels = './models/tflite-plumps1_20210328/dict.txt'
model = './models/tflite-plumps2_20210330/model.tflite'
labels = './models/tflite-plumps2_20210330/dict.txt'
# TODO: Do this only once, pass to the function?
labels = load_labels(labels)
interpreter = Interpreter(model)
interpreter.allocate_tensors()
_, height, width, _ = interpreter.get_input_details()[0]['shape']
cropImage = Image.fromarray(cropFrame)
cropImage = cropImage.resize((width, height), Image.ANTIALIAS)
# success = saveImageSimple(cropImage) # test
results = classify_image(interpreter, cropImage, 1)
# print("Results array ", results)
bestKey, bestVal, all = formatOutput(results, labels)
print("res: ", bestKey, bestVal, all)
# label_id, prob = results[0]
# print(labels[label_id], prob)
# return labels[label_id], prob
return bestKey, bestVal, all
|
[
"PIL.Image.fromarray",
"numpy.argpartition",
"tflite_runtime.interpreter.Interpreter"
] |
[((2208, 2239), 'numpy.argpartition', 'np.argpartition', (['(-output)', 'top_k'], {}), '(-output, top_k)\n', (2223, 2239), True, 'import numpy as np\n'), ((3261, 3279), 'tflite_runtime.interpreter.Interpreter', 'Interpreter', (['model'], {}), '(model)\n', (3272, 3279), False, 'from tflite_runtime.interpreter import Interpreter\n'), ((3402, 3428), 'PIL.Image.fromarray', 'Image.fromarray', (['cropFrame'], {}), '(cropFrame)\n', (3417, 3428), False, 'from PIL import Image\n')]
|
"""
用于减少编码中的多个简单条件if分支,
实现类似 java spring 中通过 application context 生命周期回调实现的工厂路由
实例见下方test
"""
import functools
def router(func):
"""
入口方法装饰器
:param func: 入口方法
:return: 装饰后的方法
"""
# 路由表
route_table = {}
@functools.wraps(func)
def wrapper(arg0, *args, **kwargs):
"""获取分支方法,获取失败则使用入口方法做兜底"""
try:
branch_func = route_table[arg0]
except KeyError:
pass
else:
return branch_func(arg0, *args, **kwargs)
return func(arg0, *args, **kwargs)
def route(key):
# 用于将具体分支方法注册到路由表中
def wrap(branch_func):
"""分支方法路由注册"""
if key in route_table:
raise ValueError(f'@route: ambiguous branch func for {key!r}')
route_table[key] = branch_func
return branch_func
return wrap
wrapper.route = route
return wrapper
if __name__ == '__main__':
# pylint: disable = E, W, R, C
@router
def fun(key):
raise ValueError(f'key error, key: {key}')
@fun.route(1)
def __fun1(key):
return 1 + key
@fun.route(2)
def __fun2(key):
return 2 + key
@fun.route(3)
@fun.route(4)
def __fun34(key):
return 3 + key
print(f'result:{fun(3)}')
print(f'result:{fun(5)}')
|
[
"functools.wraps"
] |
[((240, 261), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (255, 261), False, 'import functools\n')]
|
import json
import traceback
from datetime import datetime, timedelta
from typing import List
from parse import parse
from anubis.models import db, Submission, Assignment, Course
from anubis.utils.data import with_context
from anubis.utils.lms.autograde import bulk_autograde
from anubis.utils.lms.submissions import init_submission
from anubis.utils.services.github import fix_github_broken_repos
from anubis.utils.services.logger import logger
from anubis.utils.services.rpc import enqueue_ide_reap_stale, enqueue_autograde_pipeline
def reap_stale_submissions():
"""
This will set find stale submission and set them to processed. A stale
submission is one that has not been updated in 15 minutes and is still
in a processing state.
Flask app context is required before calling this function.
:return:
"""
print("Reaping stale submissions")
# Find and update stale submissions
Submission.query.filter(
Submission.last_updated < datetime.now() - timedelta(minutes=60),
Submission.processed == False,
Submission.state != 'regrading',
).update({
'processed': True,
'state': "Reaped after timeout",
}, False)
# Commit any changes
db.session.commit()
def reap_recent_assignments():
"""
Calculate stats for recent submissions
:return:
"""
from anubis.config import config
recent_assignments = Assignment.query.filter(
Assignment.release_date > datetime.now(),
Assignment.due_date > datetime.now() - config.STATS_REAP_DURATION,
).all()
print(json.dumps({
'reaping assignments:': [assignment.data for assignment in recent_assignments]
}, indent=2))
for assignment in recent_assignments:
for submission in Submission.query.filter(
Submission.assignment_id == assignment.id,
Submission.build == None,
).all():
if submission.build is None:
init_submission(submission)
enqueue_autograde_pipeline(submission.id)
for assignment in recent_assignments:
bulk_autograde(assignment.id)
def reap_broken_repos():
"""
For reasons not clear to me yet, the webhooks are sometimes missing
on the first commit. The result is that repos will be created on
github without anubis seeing them.
This function should be the fix for this. It will call out to the
github api to list all the repos under the organization then try to
create repos for each listed repo.
:return:
"""
# Pull all courses
courses: List[Course] = Course.query.all()
# Iterate over all course attempting to fix issues
# on each github org.
for course in courses:
# Get the admin specified github org url
org_url = (course.github_org_url or '').rstrip('/')
# Try to parse out the org name from the expected structure
# of the org url.
match = parse('https://github.com/{}', org_url)
# If a match for the org name was not found, then we note in the logs and continue
if match is None:
logger.info('Could not find org_name for reaper.reap_broken_repos')
continue
# Get the org_name from the matches values
org_name = match[0]
# Attempt to fix any broken or lost repos for the course org.
try:
fix_github_broken_repos(org_name)
except Exception as e:
logger.error('reaper.reap_broken_repos failed', org_name, e)
logger.error(traceback.format_exc())
logger.error('continuing')
continue
@with_context
def reap():
# Enqueue a job to reap stale ide k8s resources
enqueue_ide_reap_stale()
# Reap the stale submissions
reap_stale_submissions()
# Reap broken repos
reap_broken_repos()
# Reap broken submissions in recent assignments
reap_recent_assignments()
if __name__ == "__main__":
print("")
print("""
___
/ \\\\
/\\\\ | . . \\\\
////\\\\| ||
//// \\\\\\ ___//\\
/// \\\\ \\
/// |\\\\ |
// | \\\\ \\ \\
/ | \\\\ \\ \\
| \\\\ / /
| \\/ /
| \\\\/|
| \\\\|
| \\\\
| |
|_________\\
""")
reap()
|
[
"anubis.utils.services.github.fix_github_broken_repos",
"anubis.utils.lms.submissions.init_submission",
"anubis.models.Submission.query.filter",
"json.dumps",
"anubis.utils.services.logger.logger.info",
"anubis.utils.services.rpc.enqueue_autograde_pipeline",
"anubis.utils.services.rpc.enqueue_ide_reap_stale",
"anubis.models.db.session.commit",
"anubis.utils.services.logger.logger.error",
"traceback.format_exc",
"anubis.utils.lms.autograde.bulk_autograde",
"datetime.timedelta",
"datetime.datetime.now",
"parse.parse",
"anubis.models.Course.query.all"
] |
[((1232, 1251), 'anubis.models.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1249, 1251), False, 'from anubis.models import db, Submission, Assignment, Course\n'), ((2619, 2637), 'anubis.models.Course.query.all', 'Course.query.all', ([], {}), '()\n', (2635, 2637), False, 'from anubis.models import db, Submission, Assignment, Course\n'), ((3733, 3757), 'anubis.utils.services.rpc.enqueue_ide_reap_stale', 'enqueue_ide_reap_stale', ([], {}), '()\n', (3755, 3757), False, 'from anubis.utils.services.rpc import enqueue_ide_reap_stale, enqueue_autograde_pipeline\n'), ((1594, 1700), 'json.dumps', 'json.dumps', (["{'reaping assignments:': [assignment.data for assignment in recent_assignments]\n }"], {'indent': '(2)'}), "({'reaping assignments:': [assignment.data for assignment in\n recent_assignments]}, indent=2)\n", (1604, 1700), False, 'import json\n'), ((2118, 2147), 'anubis.utils.lms.autograde.bulk_autograde', 'bulk_autograde', (['assignment.id'], {}), '(assignment.id)\n', (2132, 2147), False, 'from anubis.utils.lms.autograde import bulk_autograde\n'), ((2967, 3006), 'parse.parse', 'parse', (['"""https://github.com/{}"""', 'org_url'], {}), "('https://github.com/{}', org_url)\n", (2972, 3006), False, 'from parse import parse\n'), ((3137, 3204), 'anubis.utils.services.logger.logger.info', 'logger.info', (['"""Could not find org_name for reaper.reap_broken_repos"""'], {}), "('Could not find org_name for reaper.reap_broken_repos')\n", (3148, 3204), False, 'from anubis.utils.services.logger import logger\n'), ((3402, 3435), 'anubis.utils.services.github.fix_github_broken_repos', 'fix_github_broken_repos', (['org_name'], {}), '(org_name)\n', (3425, 3435), False, 'from anubis.utils.services.github import fix_github_broken_repos\n'), ((1781, 1878), 'anubis.models.Submission.query.filter', 'Submission.query.filter', (['(Submission.assignment_id == assignment.id)', '(Submission.build == None)'], {}), '(Submission.assignment_id == assignment.id, \n Submission.build == None)\n', (1804, 1878), False, 'from anubis.models import db, Submission, Assignment, Course\n'), ((1981, 2008), 'anubis.utils.lms.submissions.init_submission', 'init_submission', (['submission'], {}), '(submission)\n', (1996, 2008), False, 'from anubis.utils.lms.submissions import init_submission\n'), ((2025, 2066), 'anubis.utils.services.rpc.enqueue_autograde_pipeline', 'enqueue_autograde_pipeline', (['submission.id'], {}), '(submission.id)\n', (2051, 2066), False, 'from anubis.utils.services.rpc import enqueue_ide_reap_stale, enqueue_autograde_pipeline\n'), ((3479, 3539), 'anubis.utils.services.logger.logger.error', 'logger.error', (['"""reaper.reap_broken_repos failed"""', 'org_name', 'e'], {}), "('reaper.reap_broken_repos failed', org_name, e)\n", (3491, 3539), False, 'from anubis.utils.services.logger import logger\n'), ((3601, 3627), 'anubis.utils.services.logger.logger.error', 'logger.error', (['"""continuing"""'], {}), "('continuing')\n", (3613, 3627), False, 'from anubis.utils.services.logger import logger\n'), ((1480, 1494), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1492, 1494), False, 'from datetime import datetime, timedelta\n'), ((3565, 3587), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (3585, 3587), False, 'import traceback\n'), ((985, 999), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (997, 999), False, 'from datetime import datetime, timedelta\n'), ((1002, 1023), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(60)'}), '(minutes=60)\n', (1011, 1023), False, 'from datetime import datetime, timedelta\n'), ((1526, 1540), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1538, 1540), False, 'from datetime import datetime, timedelta\n')]
|
import glob
import itertools
import os
import pickle # For saving the vocabulary.
import re # For regex.
from collections import Counter
from functools import partial
from nltk.tokenize import TweetTokenizer
# Local imports.
from twitter_analysis_tools.text import ngrams, stopwords
from twitter_analysis_tools.twitter import common_pipelines, file_mgmt
def get_vocab_file_substring(include_retweets, max_ngram_len):
"""Get the substring with parameters for the saved data filename.
>>> get_vocab_file_substring(False, 2)
'vocab-retweets-False-ngrams-1-to-2'
"""
return "-".join(
[
"vocab",
"retweets",
str(include_retweets),
"ngrams",
str(1),
"to",
str(max_ngram_len),
]
)
class Vocabulary:
"""Class corresponding to a vocabulary with given parameters."""
def __init__(self, data_dir="", include_retweets=True, max_ngram_len=2):
"""Store parameters for vocabulary instance."""
self.data_dir = data_dir
self.include_retweets = include_retweets
self.max_ngram_len = max_ngram_len
self.vocab_file_substring = get_vocab_file_substring(
include_retweets, max_ngram_len
)
def build_vocabulary_for_file(self, filepath, save_vocab=False):
"""Get the vocabulary for each tweet file in filepaths.
Args:
filepath (str): The file containing the tweet data.
include_retweets (bool): Whether retweets should be included.
max_ngram_len (int): The max length of ngrams to include.
save_vocab (bool): Whether to save the vocabulary.
"""
# Parameters to save with vocabulary.
vocab_dict = {
"include_retweets": self.include_retweets,
"stopwords": stopwords.stopwords,
"max_ngram_len": self.max_ngram_len,
}
# Get text from English tweets.
tweet_terms = common_pipelines.get_tweet_text_pipeline(
filepath, self.include_retweets
)
# Tokenize tweets.
tokenizer = TweetTokenizer(preserve_case=False)
tweet_terms.add_map(tokenizer.tokenize)
# Remove stopwords.
tweet_terms.add_map(stopwords.remove_stopword_tokens)
# Collect ngrams from the tokens for each tweet.
tweet_terms.add_map(partial(ngrams.get_ngrams, self.max_ngram_len))
# Flatten tokens to single list.
terms = itertools.chain.from_iterable(tweet_terms)
# Build vocabulary of terms and count occurances.
term_counts = Counter(terms)
# Save vocabulary.
vocab_dict["term_counts"] = term_counts
if save_vocab:
# Pickle the vocabulary and associated data.
filepath = re.sub(
"-id-", "-{}-".format(self.vocab_file_substring), filepath
)
filepath = re.sub(".jsonl.gz", ".pickle", filepath)
with open(filepath, "wb") as file:
pickle.dump(vocab_dict, file)
return vocab_dict
def vocab_exists(self, tweets_filepath):
"""Return whether the vocab for filepath already exists.
Args:
filepath: filepath for data file.
"""
date_hour = file_mgmt.extract_date_hour(tweets_filepath)
year_month = file_mgmt.year_month_from_date_hour(date_hour)
# Form of the corresponding vocab file.
vocab_file_form = "{}/*{}-{}.pickle".format(
year_month, self.vocab_file_substring, date_hour
)
match_vocab_file = os.path.join(self.data_dir, vocab_file_form)
# Return True if a matching vocabulary file exists.
if glob.glob(match_vocab_file, recursive=True):
return True
return False
def vocab_does_not_exist(self, tweets_filepath):
"""Return whether the vocab for filepath does not already exist."""
return not self.vocab_exists(tweets_filepath)
|
[
"functools.partial",
"pickle.dump",
"twitter_analysis_tools.twitter.file_mgmt.year_month_from_date_hour",
"os.path.join",
"twitter_analysis_tools.twitter.file_mgmt.extract_date_hour",
"collections.Counter",
"nltk.tokenize.TweetTokenizer",
"glob.glob",
"twitter_analysis_tools.twitter.common_pipelines.get_tweet_text_pipeline",
"itertools.chain.from_iterable",
"re.sub"
] |
[((1993, 2066), 'twitter_analysis_tools.twitter.common_pipelines.get_tweet_text_pipeline', 'common_pipelines.get_tweet_text_pipeline', (['filepath', 'self.include_retweets'], {}), '(filepath, self.include_retweets)\n', (2033, 2066), False, 'from twitter_analysis_tools.twitter import common_pipelines, file_mgmt\n'), ((2137, 2172), 'nltk.tokenize.TweetTokenizer', 'TweetTokenizer', ([], {'preserve_case': '(False)'}), '(preserve_case=False)\n', (2151, 2172), False, 'from nltk.tokenize import TweetTokenizer\n'), ((2504, 2546), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['tweet_terms'], {}), '(tweet_terms)\n', (2533, 2546), False, 'import itertools\n'), ((2628, 2642), 'collections.Counter', 'Counter', (['terms'], {}), '(terms)\n', (2635, 2642), False, 'from collections import Counter\n'), ((3308, 3352), 'twitter_analysis_tools.twitter.file_mgmt.extract_date_hour', 'file_mgmt.extract_date_hour', (['tweets_filepath'], {}), '(tweets_filepath)\n', (3335, 3352), False, 'from twitter_analysis_tools.twitter import common_pipelines, file_mgmt\n'), ((3374, 3420), 'twitter_analysis_tools.twitter.file_mgmt.year_month_from_date_hour', 'file_mgmt.year_month_from_date_hour', (['date_hour'], {}), '(date_hour)\n', (3409, 3420), False, 'from twitter_analysis_tools.twitter import common_pipelines, file_mgmt\n'), ((3621, 3665), 'os.path.join', 'os.path.join', (['self.data_dir', 'vocab_file_form'], {}), '(self.data_dir, vocab_file_form)\n', (3633, 3665), False, 'import os\n'), ((3738, 3781), 'glob.glob', 'glob.glob', (['match_vocab_file'], {'recursive': '(True)'}), '(match_vocab_file, recursive=True)\n', (3747, 3781), False, 'import glob\n'), ((2398, 2444), 'functools.partial', 'partial', (['ngrams.get_ngrams', 'self.max_ngram_len'], {}), '(ngrams.get_ngrams, self.max_ngram_len)\n', (2405, 2444), False, 'from functools import partial\n'), ((2943, 2983), 're.sub', 're.sub', (['""".jsonl.gz"""', '""".pickle"""', 'filepath'], {}), "('.jsonl.gz', '.pickle', filepath)\n", (2949, 2983), False, 'import re\n'), ((3047, 3076), 'pickle.dump', 'pickle.dump', (['vocab_dict', 'file'], {}), '(vocab_dict, file)\n', (3058, 3076), False, 'import pickle\n')]
|
"""
Context class for the pushing task as used in the paper
"How to Train Your Differentiable Filter".
"""
# this code only works with tensorflow 1
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import tensorflow_probability as tfp
import numpy as np
import os
import csv
from matplotlib.patches import Ellipse
from matplotlib.patches import Polygon
import matplotlib.pyplot as plt
import pickle
from differentiable_filters.contexts import paper_base_context as base
from differentiable_filters.utils.base_layer import BaseLayer
from differentiable_filters.utils import recordio as tfr
from differentiable_filters.utils import push_utils as utils
from differentiable_filters.utils import tensorflow_compatability as compat
class Context(base.PaperaseContext):
def __init__(self, param, mode):
"""
Context class for the pushing task as used in the paper.
Parameters
----------
param : dict
A dictionary of arguments
mode : string
determines which parts of the model are trained. Use "filter" for
the whole model, "pretrain_obs" for pretraining the observation
related functions of the context in isolation or "pretrain_proc"
for pretrainign the process-related functions of the context.
"""
base.PaperBaseContext.__init__(self, param, mode)
if 'normalize' in param.keys():
self.normalize = param['normalize']
else:
self.normalize = 'layer'
# the state size
self.dim_x = 10
self.dim_u = 2
self.dim_z = 8
# dimension names
self.x_names = ['x', 'y', 'theta', 'l', 'mu', 'rx', 'ry', 'nx', 'ny',
's']
self.z_names = ['x', 'y', 'theta', 'rx', 'ry', 'nx', 'ny', 's']
# load the points on the outline of the butter object for visualization
path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
with open(os.path.join(path, 'resources',
'butter_points.pkl'), 'rb') as bf:
butter_points = pickle.load(bf)
self.butter_points = np.array(butter_points)
# define initial values for the process noise q and observation noise r
# diagonals
# Important: All values are standard-deviations, so they are
# squared for forming the covariance matrices
if param['q_diag'] is not None:
cov_string = param['q_diag']
cov = list(map(lambda x: float(x), cov_string.split(' ')))
self.q_diag = np.array(cov).astype(np.float32)
else:
self.q_diag = np.ones((self.dim_x)).astype(np.float32)
self.q_diag = self.q_diag.astype(np.float32) / self.scale
if param['r_diag'] is not None:
cov_string = param['r_diag']
cov = list(map(lambda x: float(x), cov_string.split(' ')))
self.r_diag = np.array(cov).astype(np.float32)
else:
self.r_diag = np.ones((self.dim_z)).astype(np.float32)
self.r_diag = self.r_diag.astype(np.float32) / self.scale
# if the noise matrices are not learned, we construct the fixed
# covariance matrices here
q = np.diag(np.square(self.q_diag))
self.Q = tf.convert_to_tensor(q, dtype=tf.float32)
r = np.diag(np.square(self.r_diag))
self.R = tf.convert_to_tensor(r, dtype=tf.float32)
# for state in mm/deg,
# c = np.array([50, 50, 1e-2, 5, 5, 50, 50, 0.5, 0.5, 0.5])
self.noise_list = \
[np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]),
np.array([49.8394116, -2.3510439, 0, 2.5196417, 1.93745247,
27.6656989, 67.1287098, 0.03124815, -0.18917632,
-0.14730855]),
np.array([27.9914853, -30.3366791, 0, -4.6963326, -2.96631439,
3.6698755, -14.5376077, -0.49956926, 0.56362964,
0.54478971])]
for i, n in enumerate(self.noise_list):
self.noise_list[i] = n.astype(np.float32)
if mode == 'filter':
train_sensor_model = param['train_sensor_model']
train_process_model = param['train_process_model']
train_q = param['train_q']
train_r = param['train_r']
if param['filter'] == 'lstm':
train_process_model = False
train_q = False
train_r = False
# tensorflow does not allow summaries inside rnn-loops
summary = False
else:
train_sensor_model = True
train_process_model = True
train_q = True
train_r = True
summary = True
# all layers used in the context need to be instantiated here, but we
# cannot instantiate layers that will not be used
if mode == 'filter' or mode == 'pretrain_obs':
# don't train the segmentation model is we use a pretrained
# sensor network
self.segmentation_layer = \
SegmentationLayer(self.batch_size, self.normalize, summary,
train_sensor_model)
self.sensor_model_layer = \
SensorLayer(self.batch_size, self.normalize, self.scale,
summary, train_sensor_model)
self.observation_model_layer = ObservationModel(self.dim_z,
self.batch_size)
# group the layers for easier checkpoint restoring
self.observation_models = {'sensor': [self.segmentation_layer,
self.sensor_model_layer],
'obs': self.observation_model_layer}
self.update_ops += self.segmentation_layer.updateable
self.update_ops += self.sensor_model_layer.updateable
else:
self.observation_models = {}
lstm_no_noise = param['filter'] == 'lstm' and \
not param['lstm_structure'] == 'full'
self.observation_noise_models = {}
if param['learn_r'] and param['hetero_r'] and \
param['diagonal_covar'] and mode == 'filter' and \
not lstm_no_noise or mode == 'pretrain_obs':
self.observation_noise_hetero_diag = \
ObservationNoise(self.batch_size, self.dim_z, self.r_diag,
self.scale,
hetero=True, diag=True, trainable=train_r,
summary=summary)
self.observation_noise_models['het_diag'] = \
self.observation_noise_hetero_diag
if param['learn_r'] and param['hetero_r'] and \
not param['diagonal_covar'] and mode == 'filter' and \
not lstm_no_noise or mode == 'pretrain_obs':
self.observation_noise_hetero_full = \
ObservationNoise(self.batch_size, self.dim_z, self.r_diag,
self.scale, hetero=True, diag=False,
trainable=train_r, summary=summary)
self.observation_noise_models['het_full'] = \
self.observation_noise_hetero_full
if param['learn_r'] and not param['hetero_r'] and \
param['diagonal_covar'] and mode == 'filter' and \
not lstm_no_noise or mode == 'pretrain_obs':
self.observation_noise_const_diag = \
ObservationNoise(self.batch_size, self.dim_z, self.r_diag,
self.scale, hetero=False, diag=True,
trainable=train_r, summary=summary)
self.observation_noise_models['const_diag'] = \
self.observation_noise_const_diag
if param['learn_r'] and not param['hetero_r'] and \
not param['diagonal_covar'] and mode == 'filter' and \
not lstm_no_noise or mode == 'pretrain_obs':
self.observation_noise_const_full = \
ObservationNoise(self.batch_size, self.dim_z, self.r_diag,
self.scale, hetero=False, diag=False,
trainable=train_r, summary=summary)
self.observation_noise_models['const_full'] = \
self.observation_noise_const_full
if param['learned_likelihood'] and mode == 'filter' and \
not lstm_no_noise or mode == 'pretrain_obs':
self.likelihood_layer = Likelihood(self.dim_z, trainable=train_r,
summary=summary)
self.observation_noise_models['like'] = self.likelihood_layer
self.process_models = {}
lstm_unstructured = param['filter'] == 'lstm' and \
(param['lstm_structure'] == 'none' or
param['lstm_structure'] == 'lstm' or
param['lstm_structure'] == 'lstm1')
if mode == 'filter' and not lstm_unstructured and \
param['learn_process'] or mode == 'pretrain_process':
self.process_model_learned_layer = \
ProcessModel(self.batch_size, self.dim_x, self.scale,
learned=True, jacobian=param['filter'] == 'ekf',
trainable=train_process_model, summary=summary)
self.process_models['learned'] = self.process_model_learned_layer
if mode == 'filter' and not lstm_unstructured and \
not param['learn_process'] or mode == 'pretrain_process':
self.process_model_analytical_layer = \
ProcessModel(self.batch_size, self.dim_x, self.scale,
learned=False, jacobian=param['filter'] == 'ekf',
trainable=train_process_model, summary=summary)
self.process_models['ana'] = self.process_model_analytical_layer
self.process_noise_models = {}
process_noise = (param['learn_q'] and not lstm_no_noise and
mode == 'filter')
if process_noise and param['learn_process'] and param['hetero_q'] and \
param['diagonal_covar'] or mode == 'pretrain_process':
self.process_noise_hetero_diag_lrn = \
ProcessNoise(self.batch_size, self.dim_x, self.q_diag,
self.scale, hetero=True, diag=True, learned=True,
trainable=train_q, summary=summary)
self.process_noise_models['het_diag_lrn'] = \
self.process_noise_hetero_diag_lrn
if process_noise and param['learn_process'] and param['hetero_q'] and \
not param['diagonal_covar'] or mode == 'pretrain_process':
self.process_noise_hetero_full_lrn = \
ProcessNoise(self.batch_size, self.dim_x, self.q_diag,
self.scale, hetero=True, diag=False, learned=True,
trainable=train_q, summary=summary)
self.process_noise_models['het_full_lrn'] = \
self.process_noise_hetero_full_lrn
if process_noise and param['learn_process'] and \
not param['hetero_q'] and param['diagonal_covar'] or \
mode == 'pretrain_process':
self.process_noise_const_diag_lrn = \
ProcessNoise(self.batch_size, self.dim_x, self.q_diag,
self.scale, hetero=False, diag=True, learned=True,
trainable=train_q, summary=summary)
self.process_noise_models['const_diag_lrn'] = \
self.process_noise_const_diag_lrn
if process_noise and param['learn_process'] and \
not param['hetero_q'] and not param['diagonal_covar'] or \
mode == 'pretrain_process':
self.process_noise_const_full_lrn = \
ProcessNoise(self.batch_size, self.dim_x, self.q_diag,
self.scale, hetero=False, diag=False,
learned=True, trainable=train_q, summary=summary)
self.process_noise_models['const_full_lrn'] = \
self.process_noise_const_full_lrn
if process_noise and not param['learn_process'] and \
param['hetero_q'] and param['diagonal_covar'] or \
mode == 'pretrain_process':
self.process_noise_hetero_diag_ana = \
ProcessNoise(self.batch_size, self.dim_x, self.q_diag,
self.scale, hetero=True, diag=True, learned=False,
trainable=train_q, summary=summary)
self.process_noise_models['het_diag_ana'] = \
self.process_noise_hetero_diag_ana
if process_noise and not param['learn_process'] and \
param['hetero_q'] and not param['diagonal_covar'] or \
mode == 'pretrain_process':
self.process_noise_hetero_full_ana = \
ProcessNoise(self.batch_size, self.dim_x, self.q_diag,
self.scale, hetero=True, diag=False,
learned=False, trainable=train_q, summary=summary)
self.process_noise_models['het_full_ana'] = \
self.process_noise_hetero_full_ana
if process_noise and not param['learn_process'] and \
not param['hetero_q'] and param['diagonal_covar'] or \
mode == 'pretrain_process':
self.process_noise_const_diag_ana = \
ProcessNoise(self.batch_size, self.dim_x, self.q_diag,
self.scale, hetero=False, diag=True,
learned=False, trainable=train_q, summary=summary)
self.process_noise_models['const_diag_ana'] = \
self.process_noise_const_diag_ana
if process_noise and not param['learn_process'] and \
not param['hetero_q'] and not param['diagonal_covar'] or \
mode == 'pretrain_process':
self.process_noise_const_full_ana = \
ProcessNoise(self.batch_size, self.dim_x, self.q_diag,
self.scale, hetero=False, diag=False,
learned=False, trainable=train_q, summary=summary)
self.process_noise_models['const_full_ana'] = \
self.process_noise_const_full_ana
###########################################################################
# observation models
###########################################################################
def run_sensor_model(self, raw_observations, training):
"""
Process raw observations and return an encoding and
predicted observations z for the filter
"""
images, tip_pos, tip_pos_pix, tip_end_pix, start_glimpse = \
raw_observations
seg_out, pix = self.segmentation_layer(images, training)
z, enc = self.sensor_model_layer([images, tip_pos, tip_pos_pix,
tip_end_pix, start_glimpse] +
seg_out, training)
enc = list(enc) + [pix]
return z, enc
def run_process_model(self, old_state, action, learned, training):
"""
Predict the next state from the old state and the action and returns
the jacobian
"""
if learned:
new_state, F = \
self.process_model_learned_layer([old_state, action, self.ob],
training)
else:
new_state, F = \
self.process_model_analytical_layer([old_state, action,
self.ob], training)
new_state = self.correct_state(new_state, diff=False)
return new_state, F
def get_initial_glimpse(self, image, training):
"""
Process the observations for the initial state and return a segmented
glimpse of the object in its initial position
"""
seg_out, pix = self.segmentation_layer(image, training)
mask, pos, glimpse_rot = seg_out
return glimpse_rot, pix, mask
def initial_from_observed(self, base_state, init_z, base_covar, init_R):
state = tf.concat([init_z[:, :3], base_state[:, 3:5], init_z[:, 3:]],
axis=-1)
covar = \
tf.concat([tf.concat([base_covar[:, :3, :3], init_R[:, :3, :3]],
axis=-1),
base_covar[:, 3:5, :],
tf.concat([base_covar[:, 5:, 5:], init_R[:, 3:, 3:]],
axis=-1)],
axis=1)
return state, covar
###########################################################################
# loss functions
###########################################################################
def get_filter_loss(self, prediction, label, step, training):
"""
Compute the loss for the filtering application - defined in the context
Args:
prediction: list of predicted tensors
label: list of label tensors
step: training step
training: boolean tensor, indicates if we compute a loss for
training or testing
Returns:
loss: the total loss for training the filtering application
metrics: additional metrics we might want to log for evaluation
metric-names: the names for those metrics
"""
particles, weights, states, covars, init_s, init_c, z, r, q = \
prediction
states = tf.reshape(states, [self.batch_size, -1, self.dim_x])
covars = tf.reshape(covars, [self.batch_size, -1, self.dim_x,
self.dim_x])
seq_label, mv_tr, mv_rot, vis = label
diff = seq_label - states
diff = self.correct_state(diff)
# get the likelihood
if self.param['filter'] == 'pf' and self.param['mixture_likelihood']:
num = particles.get_shape()[2].value
seq_label_tiled = tf.tile(seq_label[:, :, None, :], [1, 1, num, 1])
particle_diff = self.correct_state(seq_label_tiled - particles)
likelihood = self._mixture_likelihood(particle_diff, weights)
else:
likelihood = self._likelihood(diff, covars, reduce_mean=False)
# compensate for scaling
offset = tf.ones_like(likelihood)*tf.math.log(self.scale)*2*self.dim_x
likelihood += 0.5 * offset
# compute the errors of the predicted states
total_mse, total_dist = self._mse(diff, reduce_mean=False)
total_mse *= self.scale**2
total_dist *= self.scale
# compute component-wise distances
dists = []
for i in range(self.dim_x):
_, dist = self._mse(diff[:, :, i:i+1], reduce_mean=False)
dists += [dist*self.scale]
# position and orientation error
_, dist_tr = self._mse(diff[:, :, 0:2], reduce_mean=False)
_, dist_rot = self._mse(diff[:, :, 2:3], reduce_mean=False)
# compute the error in the predicted observations (only for monitoring)
diff_obs = tf.concat([seq_label[:, :, :3] - z[:, :, 0:3],
seq_label[:, :, 5:] - z[:, :, 3:]], axis=-1)
diff_obs = self.correct_observation_diff(diff_obs)
# rsme
_, dist_ob = self._mse(diff_obs, reduce_mean=False)
dist_ob *= self.scale
# component-wise
dist_obs = []
for i in range(self.dim_z):
_, dist = self._mse(diff_obs[:, :, i:i+1], reduce_mean=False)
dist = dist*self.scale
dist_obs += [dist]
# compute the correlation between predicted observation noise and
# the number of visible object pixels
# this only makes sense for the heteroscedastic noise
diag_r = tf.linalg.diag_part(r)
diag_r = tf.sqrt(tf.abs(diag_r + 1e-5))
diag_r = tf.reshape(diag_r, [-1, self.dim_z])
corr = []
for i in range(self.dim_z):
corr += \
[tfp.stats.correlation(diag_r[:, i:i+1],
tf.reshape(vis, [-1, 1]),
sample_axis=0, event_axis=-1)]
corr_r = tf.add_n(corr)/self.dim_z
# correlation between noise and contact
corr_r_cont = []
for i in range(self.dim_z):
crs = \
tfp.stats.correlation(diag_r[:, i:i+1],
tf.reshape(seq_label[:, :, 9:], [-1, 1]),
sample_axis=0, event_axis=-1)
corr_r_cont += [crs]
corr_r_cont = tf.add_n(corr_r_cont)/self.dim_z
# same for q
diag_q = tf.linalg.diag_part(q)
diag_q = tf.sqrt(tf.abs(diag_q + 1e-5))
diag_q = tf.reshape(diag_q, [-1, self.dim_x])
corr_q = []
for i in range(self.dim_x-1):
cqs = \
tfp.stats.correlation(diag_q[:, i:i+1],
tf.reshape(seq_label[:, :, 9:], [-1, 1]),
sample_axis=0, event_axis=-1)
corr_q += [cqs]
corr_q = tf.add_n(corr_q)/(self.dim_x-1)
# compute the output metric
m_per_tr, deg_per_deg = \
self._output_loss(states[:, :, :3], seq_label[:, :, :3],
mv_tr, mv_rot)
tf.summary.scalar('out/m_per_tr', m_per_tr)
tf.summary.scalar('out/deg_per_deg', deg_per_deg)
tf.summary.scalar('out/tr_total', tf.reduce_mean(mv_tr))
tf.summary.scalar('out/rot_total', tf.reduce_mean(mv_rot))
tf.summary.scalar('out/tr_error', tf.reduce_mean(dist_tr))
tf.summary.scalar('out/rot_error', tf.reduce_mean(dist_rot))
# get the weight decay
wd = []
for la in self.observation_models.values():
wd += la.losses
for la in self.observation_noise_models.values():
wd += la.losses
for la in self.process_models.values():
wd += la.losses
for la in self.process_noise_models.values():
wd += la.losses
wd = tf.add_n(wd)
# add a bias to all losses that use the likelihood, to set off
# possible negative values of the likelihood
total_tracking = tf.reduce_mean(total_mse)
total_obs = tf.reduce_mean(dist_ob)
if self.loss == 'like':
total_loss = tf.reduce_mean(likelihood)
elif self.loss == 'error':
total_loss = total_tracking
elif self.loss == 'mixed':
total_loss = (total_tracking + tf.reduce_mean(likelihood)) / 2.
elif self.loss == 'mixed_error':
total_loss = total_tracking * 0.75 + \
tf.reduce_mean(likelihood) * 0.25
elif self.loss == 'mixed_like':
total_loss = total_tracking * 0.25 + \
tf.reduce_mean(likelihood) * 0.75
elif self.loss == 'mixed_curr':
total_loss = tf.cond(tf.less(step, self.epoch_size * 3),
lambda: total_tracking,
lambda: tf.reduce_mean(likelihood))
if self.loss == 'mixed_curr':
total_loss_val = tf.reduce_mean(likelihood)
else:
total_loss_val = total_loss
if self.loss != 'error':
total_loss_val += 1000
total = tf.cond(training,
lambda: total_loss + wd, lambda: total_loss_val)
# add summaries
tf.summary.scalar('loss/total', total)
tf.summary.scalar('loss/wd', wd)
tf.summary.scalar('loss/likelihood', tf.reduce_mean(likelihood))
tf.summary.scalar('loss/tracking', total_tracking)
tf.summary.scalar('loss/observations', total_obs)
tf.summary.scalar('loss/corr_r_vis', tf.squeeze(corr_r))
tf.summary.scalar('loss/corr_r_cont', tf.squeeze(corr_r_cont))
tf.summary.scalar('loss/corr_q_cont', tf.squeeze(corr_q))
for i, name in enumerate(self.x_names):
tf.summary.scalar('tracking_loss/' + name,
tf.reduce_mean(dists[i]))
for i, name in enumerate(self.z_names):
tf.summary.scalar('observation_loss/' + name,
tf.reduce_mean(dist_obs[i]))
return total, [likelihood, total_dist, dist_ob, total_mse,
dist_tr, dist_rot, m_per_tr, deg_per_deg, vis,
seq_label[:, :, 9], diag_r, diag_q, wd] +\
dists, ['likelihood', 'dist', 'dist_obs', 'mse', 'dist_tr',
'dist_rot', 'm_tr', 'deg_rot', 'vis', 'cont', 'r_pred',
'q_pred', 'wd'] + \
self.x_names
def _output_loss(self, pred, label, mv_tr, mv_rot):
endpoint_error = self._compute_sq_distance(pred[:, -1, 0:2],
label[:, -1, 0:2])
endpoint_error_rot = self._compute_sq_distance(pred[:, -1, 2:3],
label[:, -1, 2:3], True)
m_per_tr = tf.where(tf.greater(mv_tr, 0),
endpoint_error**0.5/mv_tr, endpoint_error)
deg_per_deg = tf.where(tf.greater(mv_rot, 0),
endpoint_error_rot**0.5/mv_rot,
endpoint_error_rot)
return tf.reduce_mean(m_per_tr), tf.reduce_mean(deg_per_deg)
def _compute_sq_distance(self, pred, label, rotation=False):
diff = pred - label
if rotation:
diff = self._adapt_orientation(diff, self.ob, 1)
diff = tf.square(diff)
diff = tf.reduce_sum(diff, axis=-1)
diff = tf.where(tf.greater(diff, 0), tf.sqrt(diff), diff)
return diff
def get_observation_loss(self, prediction, labels, step, training):
"""
Compute the loss for the observation functions - defined in the context
Args:
prediction: list of predicted tensors
label: list of label tensors
step: training step
training: are we doing training or validation
Returns:
loss: the total loss for training the observation preprocessing
metrics: additional metrics we might want to log for evaluation
metric-names: the names for those metrics
"""
z, pix_pred, seg_pred, initial_pix_pred, initial_seg_pred, \
R_const_diag, R_const_tri, R_het_diag, R_het_tri, \
like_good, like_bad = prediction
label, pix_pos, initial_pix_pos, seg, initial_seg, vis = labels
diff = self.correct_observation_diff(label - z)
likelihood_const_diag = self._likelihood(tf.stop_gradient(diff),
R_const_diag,
reduce_mean=False)
likelihood_const_tri = self._likelihood(tf.stop_gradient(diff),
R_const_tri,
reduce_mean=False)
likelihood_het_diag = self._likelihood(diff, R_het_diag,
reduce_mean=False)
likelihood_het_tri = self._likelihood(diff, R_het_tri,
reduce_mean=False)
likelihood = (likelihood_const_diag + likelihood_const_tri +
likelihood_het_diag + likelihood_het_tri) / 4.
# compute the correlation between predicted observation noise and
# the number of visible object pixels
# this only makes sense for the heteroscedastic noise
diag_r_het_diag = tf.linalg.diag_part(R_het_diag)
diag_r_het_diag = tf.sqrt(tf.abs(diag_r_het_diag + 1e-5))
diag_r_het_diag = tf.reshape(diag_r_het_diag, [-1, self.dim_z])
diag_r_het_tri = tf.linalg.diag_part(R_het_tri)
diag_r_het_tri = tf.sqrt(tf.abs(diag_r_het_tri + 1e-5))
diag_r_het_tri = tf.reshape(diag_r_het_tri, [-1, self.dim_z])
corr_diag = []
corr_full = []
for i in range(self.dim_z):
corr_diag += \
[tfp.stats.correlation(diag_r_het_diag[:, i:i+1],
tf.reshape(vis, [-1, 1]),
sample_axis=0, event_axis=-1)]
corr_full += \
[tfp.stats.correlation(diag_r_het_tri[:, i:i+1],
tf.reshape(vis, [-1, 1]),
sample_axis=0, event_axis=-1)]
corr_r_diag = tf.add_n(corr_diag)/self.dim_z
corr_r_full = tf.add_n(corr_full)/self.dim_z
# compute the errors of the predicted observations
dist_obs = []
mses = []
cont = label[:, 7:8]
for i in range(self.dim_z):
mse, dist = self._mse(diff[:, i:i+1], reduce_mean=False)
# undo the overall scaling for dist and mse, but only undo the
# component-wise scaling for dist
scale_dist = self.scale
scale_mse = self.scale**2
# mask out non-contact cases for contact point and normal
if i in [3, 4, 5, 6]:
dist_obs += [tf.reduce_mean(dist*scale_dist*cont)]
mses += [tf.reduce_sum(mse*scale_mse*cont)]
else:
dist_obs += [tf.reduce_mean(dist*scale_dist)]
mses += [tf.reduce_sum(mse*scale_mse)]
mse = tf.add_n(mses)
# segmentatuin error
height = seg.get_shape()[1]
width = seg.get_shape()[2]
seg_pred = tf.image.resize(seg_pred, [height, width])
initial_seg_pred = tf.image.resize(initial_seg_pred, [height, width])
seg_loss = tf.nn.sigmoid_cross_entropy_with_logits(
logits=tf.squeeze(seg_pred, axis=-1),
labels=tf.squeeze(seg, axis=-1))
seg_loss = tf.reduce_mean(tf.reduce_sum(seg_loss, axis=[1, 2]))
seg_loss2 = tf.nn.sigmoid_cross_entropy_with_logits(
logits=tf.squeeze(initial_seg_pred, axis=-1),
labels=tf.squeeze(initial_seg, axis=-1))
seg_loss += tf.reduce_mean(tf.reduce_sum(seg_loss2, axis=[1, 2]))
# get the pixel prediction error for the position
pix_diff = pix_pred - pix_pos
pix_mse, pix_dist = self._mse(pix_diff, reduce_mean=False)
pix_mse = tf.reduce_mean(pix_mse)
_, dist_3d = self._mse(diff[:, :2], reduce_mean=False)
initial_pix_diff = initial_pix_pred - initial_pix_pos
initial_pix_mse, initial_pix_dist = self._mse(initial_pix_diff,
reduce_mean=False)
initial_pix_mse = tf.reduce_mean(initial_pix_mse)
# compute the angle-loss of the normals
norm_pred = z[:, 5:7]
norm_label = label[:, 5:7]
normal_ang = self.normal_loss(norm_pred, norm_label)
# compute the contact loss
contact_loss, ce = self.contact_loss(z[:, 7:8], label[:, 7:8])
# compute the loss for the learned likelihood model of the pf
good_loss = tf.reduce_mean(-tf.math.log(tf.maximum(like_good, 1e-6)))
bad_loss = \
tf.reduce_mean(-tf.math.log(tf.maximum(1.0 - like_bad, 1e-6)))
like_loss = good_loss + bad_loss
# add a penalty term for predicted rotation values greater than pi
rot_pred = tf.abs(z[:, 2])
rot_penalty = tf.where(tf.greater(rot_pred, 180),
tf.square(rot_pred - 180),
tf.zeros_like(rot_pred))
rot_penalty = tf.reduce_mean(rot_penalty)
wd = []
for la in self.observation_models.values():
wd += la.losses
for la in self.observation_noise_models.values():
wd += la.losses
wd = tf.add_n(wd)
# start by training only the localization for two epochs
total_train = \
tf.cond(tf.less(step, self.epoch_size*2),
lambda: 10 * (pix_mse + initial_pix_mse) + seg_loss,
lambda: (10 * tf.add_n(mses) +
10 * (pix_mse + initial_pix_mse) +
100 * tf.reduce_mean(normal_ang) +
100 * tf.reduce_mean(contact_loss) +
1e-4 * tf.reduce_mean(likelihood) +
1e-3 * like_loss +
rot_penalty + 0.01 * seg_loss + 0.01 * wd))
total_train = \
tf.cond(tf.less(step, self.epoch_size*5),
lambda: total_train,
lambda: (10 * tf.add_n(mses) +
10 * (pix_mse + initial_pix_mse) +
100 * tf.reduce_mean(normal_ang) +
100 * tf.reduce_mean(contact_loss) +
0.1 * (tf.reduce_mean(likelihood) + like_loss) +
rot_penalty + 0.001 * seg_loss + wd))
total_val = 10 * tf.add_n(mses) + 10 * tf.reduce_mean(normal_ang) + \
100 * tf.reduce_mean(contact_loss) + \
tf.reduce_mean(likelihood) + like_loss + 100
total = tf.cond(training, lambda: total_train, lambda: total_val)
# add summaries
tf.summary.scalar('loss/total', total)
tf.summary.scalar('loss/wd', wd)
tf.summary.scalar('loss/likelihood_const_diag',
tf.reduce_mean(likelihood_const_diag))
tf.summary.scalar('loss/likelihood_const_tri',
tf.reduce_mean(likelihood_const_tri))
tf.summary.scalar('loss/likelihood_het_diag',
tf.reduce_mean(likelihood_het_diag))
tf.summary.scalar('loss/likelihood_het_tri',
tf.reduce_mean(likelihood_het_tri))
for i, name in enumerate(self.z_names):
tf.summary.scalar('label/' + name, label[0, i])
for i, name in enumerate(self.z_names):
tf.summary.scalar('observation_loss/' + name,
tf.reduce_mean(dist_obs[i]))
for i, name in enumerate(self.z_names):
tf.summary.scalar('noise_loss/diag_' + name,
tf.reduce_mean(corr_diag[i]))
tf.summary.scalar('noise_loss/full_' + name,
tf.reduce_mean(corr_full[i]))
tf.summary.scalar('noise_loss/corr_diag', tf.reduce_mean(corr_r_diag))
tf.summary.scalar('noise_loss/corr_full', tf.reduce_mean(corr_r_full))
tf.summary.scalar('observation_loss/normal_ang',
tf.reduce_mean(normal_ang))
tf.summary.scalar('observation_loss/mean_vis',
tf.reduce_mean(vis))
tf.summary.scalar('observation_loss/dist_pix',
tf.reduce_mean(pix_dist))
tf.summary.scalar('observation_loss/dist_3d',
tf.reduce_mean(dist_3d))
tf.summary.scalar('observation_loss/contact_cross',
tf.reduce_mean(ce))
tf.summary.scalar('observation_loss/rot_penalty', rot_penalty)
tf.summary.scalar('loss/like_good', good_loss)
tf.summary.scalar('loss/like_bad', bad_loss)
tf.summary.scalar('loss/like_loss', like_loss)
tf.summary.scalar('loss/segmentation', seg_loss)
tf.summary.image('loss/seg_label', seg)
tf.summary.image('loss/seg_pred', seg_pred)
tf.summary.image('loss/initial_seg_label', initial_seg)
tf.summary.image('loss/inital_seg_pred', initial_seg_pred)
return total, [likelihood_const_diag, likelihood_const_tri,
likelihood_het_diag, likelihood_het_tri,
mse, like_loss, tf.reduce_mean(normal_ang),
tf.reduce_mean(ce), tf.reshape(vis, [-1, 1]),
diag_r_het_diag, diag_r_het_tri, wd] + dist_obs, \
['likelihood_const_diag', 'likelihood_const_tri',
'likelihood_het_diag', 'likelihood_het_tri', 'mse', 'like',
'normal_ang', 'contact_cross', 'vis', 'r_het_diag',
'r_het_tri', 'wd'] + self.z_names
def get_process_loss(self, prediction, labels, step, training):
"""
Compute the loss for the process functions - defined in the context
Args:
prediction: list of predicted tensors
label: list of label tensors
step: training step
training: boolean tensor, indicates if we compute a loss for
training or testing
Returns:
loss: the total loss for training the process model
metrics: additional metrics we might want to log for evaluation
metric-names: the names for those metrics
"""
state, Q_const_diag, Q_const_tri, Q_het_diag, Q_het_tri, \
state_ana, Q_const_diag_ana, Q_const_tri_ana, Q_het_diag_ana, \
Q_het_tri_ana = prediction
label, start = labels
diff = label - state
diff = self.correct_state(diff)
likelihood_const_diag = self._likelihood(diff, Q_const_diag,
reduce_mean=False)
likelihood_const_tri = self._likelihood(diff, Q_const_tri,
reduce_mean=False)
likelihood_het_diag = self._likelihood(diff, Q_het_diag,
reduce_mean=False)
likelihood_het_tri = self._likelihood(diff, Q_het_tri,
reduce_mean=False)
likelihood = (likelihood_const_diag + likelihood_const_tri +
likelihood_het_diag + likelihood_het_tri) / 4.
diff_ana = label - state_ana
diff_ana = self.correct_state(diff_ana)
likelihood_const_diag_ana = self._likelihood(diff_ana,
Q_const_diag_ana,
reduce_mean=False)
likelihood_const_tri_ana = self._likelihood(diff_ana, Q_const_tri_ana,
reduce_mean=False)
likelihood_het_diag_ana = self._likelihood(diff_ana, Q_het_diag_ana,
reduce_mean=False)
likelihood_het_tri_ana = self._likelihood(diff_ana, Q_het_tri_ana,
reduce_mean=False)
likelihood_ana = \
(likelihood_const_diag_ana + likelihood_const_tri_ana +
likelihood_het_diag_ana + likelihood_het_tri_ana) / 4.
# compute the errors of the predicted states from the learned model
mses = []
dists = []
for i in range(self.dim_x):
mse, dist = self._mse(diff[:, i:i+1], reduce_mean=False)
# undo the overall scaling for dist and mse
mses += [tf.reduce_mean(mse*self.scale**2)]
dists += [tf.reduce_mean(dist*self.scale)]
mse = tf.add_n(mses)
# compute the errors of the predicted states from the analytical model
dists_ana = []
for i in range(self.dim_x):
_, dist = self._mse(diff_ana[:, i:i+1], reduce_mean=False)
dists_ana += [tf.reduce_mean(dist*self.scale)]
wd = []
for la in self.process_models.values():
wd += la.losses
for la in self.process_noise_models.values():
wd += la.losses
wd = tf.add_n(wd)
total_loss = \
tf.cond(tf.less(step, self.epoch_size*5),
lambda: (1000 * tf.reduce_mean(mse) +
1e-5 * tf.reduce_mean(likelihood) +
1e-5 * tf.reduce_mean(likelihood_ana)),
lambda: (tf.reduce_mean(likelihood) +
tf.reduce_mean(likelihood_ana) +
1000 * tf.reduce_mean(mse)))
total = \
tf.cond(training,
lambda: total_loss + wd,
lambda: (tf.reduce_mean(likelihood) + 100 +
tf.reduce_mean(likelihood_ana) +
10 * tf.reduce_mean(mse)))
# add summaries
tf.summary.scalar('loss/total', total)
tf.summary.scalar('loss/wd', wd)
tf.summary.scalar('loss/likelihood_const_diag',
tf.reduce_mean(likelihood_const_diag))
tf.summary.scalar('loss/likelihood_const_tri',
tf.reduce_mean(likelihood_const_tri))
tf.summary.scalar('loss/likelihood_het_diag',
tf.reduce_mean(likelihood_het_diag))
tf.summary.scalar('loss/likelihood_het_tri',
tf.reduce_mean(likelihood_het_tri))
tf.summary.scalar('loss/likelihood_const_diag_ana',
tf.reduce_mean(likelihood_const_diag_ana))
tf.summary.scalar('loss/likelihood_const_tri_ana',
tf.reduce_mean(likelihood_const_tri_ana))
tf.summary.scalar('loss/likelihood_het_diag_ana',
tf.reduce_mean(likelihood_het_diag_ana))
tf.summary.scalar('loss/likelihood_het_tri_ana',
tf.reduce_mean(likelihood_het_tri_ana))
tf.summary.scalar('loss/tracking', tf.reduce_mean(mse))
for i, name in enumerate(self.x_names):
tf.summary.scalar('tracking_loss/' + name,
tf.reduce_mean(dists[i]))
tf.summary.scalar('tracking_loss/' + name + '_ana',
tf.reduce_mean(dists_ana[i]))
for i in range(min(self.batch_size, 1)):
tf.summary.scalar('label/x_' + str(i), label[i, 0])
tf.summary.scalar('label/y_' + str(i), label[i, 1])
tf.summary.scalar('label/theta_' + str(i), label[i, 2])
tf.summary.scalar('label/l_' + str(i), label[i, 3])
tf.summary.scalar('label/mu_' + str(i), label[i, 4])
tf.summary.scalar('label/rx_' + str(i), label[i, 5])
tf.summary.scalar('label/ry_' + str(i), label[i, 6])
tf.summary.scalar('label/nx_' + str(i), label[i, 7])
tf.summary.scalar('label/ny_' + str(i), label[i, 8])
tf.summary.scalar('label/s_' + str(i), label[i, 9])
tf.summary.scalar('start/x_' + str(i), start[i, 0])
tf.summary.scalar('start/y_' + str(i), start[i, 1])
tf.summary.scalar('start/theta_' + str(i), start[i, 2])
tf.summary.scalar('start/l_' + str(i), start[i, 3])
tf.summary.scalar('start/mu_' + str(i), start[i, 4])
tf.summary.scalar('start/rx_' + str(i), start[i, 5])
tf.summary.scalar('start/ry_' + str(i), start[i, 6])
tf.summary.scalar('start/nx_' + str(i), start[i, 7])
tf.summary.scalar('start/ny_' + str(i), start[i, 8])
tf.summary.scalar('start/s_' + str(i), start[i, 9])
tf.summary.scalar('pred/x_ana_' + str(i), state_ana[i, 0])
tf.summary.scalar('pred/y_ana_' + str(i), state_ana[i, 1])
tf.summary.scalar('pred/theta_ana_' + str(i), state_ana[i, 2])
tf.summary.scalar('pred/l_ana_' + str(i), state_ana[i, 3])
tf.summary.scalar('pred/mu_ana_' + str(i), state_ana[i, 4])
tf.summary.scalar('pred/rx_ana_' + str(i), state_ana[i, 5])
tf.summary.scalar('pred/ry_ana_' + str(i), state_ana[i, 6])
tf.summary.scalar('pred/nx_ana_' + str(i), state_ana[i, 7])
tf.summary.scalar('pred/ny_ana_' + str(i), state_ana[i, 8])
tf.summary.scalar('pred/s_ana_' + str(i), state_ana[i, 9])
tf.summary.scalar('pred/x_' + str(i), state[i, 0])
tf.summary.scalar('pred/y_' + str(i), state[i, 1])
tf.summary.scalar('pred/theta_' + str(i), state[i, 2])
tf.summary.scalar('pred/l_' + str(i), state[i, 3])
tf.summary.scalar('pred/mu_' + str(i), state[i, 4])
tf.summary.scalar('pred/rx_' + str(i), state[i, 5])
tf.summary.scalar('pred/ry_' + str(i), state[i, 6])
tf.summary.scalar('pred/nx_' + str(i), state[i, 7])
tf.summary.scalar('pred/ny_' + str(i), state[i, 8])
tf.summary.scalar('pred/s_' + str(i), state[i, 9])
return total, \
[likelihood_const_diag, likelihood_const_tri,
likelihood_het_diag, likelihood_het_tri,
likelihood_const_diag_ana, likelihood_const_tri_ana,
likelihood_het_diag_ana, likelihood_het_tri_ana, wd] + dists + \
dists_ana, \
['likelihood_const_diag', 'likelihood_const_tri',
'likelihood_het_diag', 'likelihood_het_tri',
'likelihood_const_diag_ana', 'likelihood_const_tri_ana',
'likelihood_het_diag_ana', 'likelihood_het_tri_ana', 'wd'] + \
self.x_names + list(map(lambda x: x + '_ana', self.x_names))
def normal_loss(self, pred, label, name=""):
# normalize both
pred_norm = tf.norm(pred, axis=-1, keep_dims=True)
label_norm = tf.norm(label, axis=-1, keep_dims=True)
pred = tf.nn.l2_normalize(pred, -1)
label = tf.nn.l2_normalize(label, -1)
# calculate the angles between them
if len(pred.get_shape().as_list()) == 3:
prod = tf.matmul(tf.reshape(pred, [self.batch_size, -1, 1, 2]),
tf.reshape(label, [self.batch_size, -1, 2, 1]))
prod = tf.clip_by_value(prod, -0.999999999, 0.999999999)
prod = tf.acos(tf.reshape(prod, [self.batch_size, -1, 1]))
else:
prod = tf.matmul(tf.reshape(pred, [self.batch_size, 1, 2]),
tf.reshape(label, [self.batch_size, 2, 1]))
prod = tf.clip_by_value(prod, -0.999999999, 0.999999999)
prod = tf.acos(tf.reshape(prod, [self.batch_size, 1]))
# mask out invalid values and non-contact cases
greater = tf.logical_and(tf.greater(pred_norm, 1e-6),
tf.greater(label_norm, 1e-6))
ang_mask = tf.logical_and(greater, tf.math.is_finite(prod))
ang = tf.where(ang_mask, tf.abs(prod), tf.zeros_like(prod))
# correct values over 180 deg.
ang = tf.where(tf.greater(tf.abs(ang), np.pi),
2*np.pi - tf.abs(ang), tf.abs(ang))*180./np.pi
return ang
def contact_loss(self, pred, label, name=""):
# calculate the error
label = tf.reshape(label, [self.batch_size, -1, 1])
pred = tf.reshape(pred, [self.batch_size, -1, 1])
# limit pred to [0..1]
pred = tf.clip_by_value(pred, 0, 1.)
# slightly downweight the loss for in-contact-cases to reduce the
# amount of false-positives
loss = (1 - label) * -tf.math.log(tf.maximum(1 - pred, 1e-7)) + \
label * -tf.math.log(tf.maximum(pred, 1e-7))
ce = (1 - label) * -tf.math.log(tf.maximum(1 - pred, 1e-7)) + \
label * -tf.math.log(tf.maximum(pred, 1e-7))
return loss, ce
###########################################################################
# keeping the state correct
###########################################################################
def correct_state(self, state, diff=True):
"""
Correct the state to make sure theta is in the right interval
Args:
state: The current state
Returns:
state: The corrected state
"""
shape = state.get_shape().as_list()
if len(shape) > 2:
state = tf.reshape(state, [-1, self.dim_x])
sc = self.scale
if diff:
state = \
tf.concat([state[:, :2],
self._adapt_orientation(state[:, 2:3], self.ob, sc),
state[:, 3:]], axis=-1)
else:
state = \
tf.concat([state[:, :2],
self._adapt_orientation(state[:, 2:3], self.ob, sc),
self._adapt_fr(state[:, 3:4]),
self._adapt_m(state[:, 4:5]),
state[:, 5:7],
self._adapt_n(state[:, 7:9], state[:, 5:7],
state[:, 0:2]),
self._adapt_s(state[:, 9:])], axis=-1)
if len(shape) > 2:
state = tf.reshape(state, shape[:-1] + [self.dim_x])
return state
def correct_observation_diff(self, diff):
"""
Correct a difference in observations to account for angle intervals
Args:
state: The difference
Returns:
state: The corrected difference
"""
shape = diff.get_shape().as_list()
if len(shape) > 2:
diff = tf.reshape(diff, [-1, self.dim_z])
sc = 1 * self.scale
diff = tf.concat([diff[:, :2],
self._adapt_orientation(diff[:, 2:3], self.ob, sc),
diff[:, 3:]], axis=-1)
if len(shape) > 2:
diff = tf.reshape(diff, shape[:-1] + [self.dim_z])
return diff
def weighted_state_mean_with_angles(self, points, weights):
ps = tf.concat([points[:, :, :2],
tf.sin(points[:, :, 2:3]*self.scale*np.pi/180.0),
tf.cos(points[:, :, 2:3]*self.scale*np.pi/180.0),
points[:, :, 3:]], axis=-1)
mult = tf.multiply(ps, weights)
mean = tf.reduce_sum(mult, axis=1)
ang1 = tf.math.atan2(mean[:, 2:3], mean[:, 3:4])*180.0/np.pi
out = tf.concat([mean[:, :2], ang1/self.scale, mean[:, 4:]], axis=-1)
return out
def weighted_observation_mean_with_angles(self, points, weights, axis=1):
ps = tf.concat([points[:, :, :2],
tf.sin(points[:, :, 2:3]*self.scale*np.pi/180.0),
tf.cos(points[:, :, 2:3]*self.scale*np.pi/180.0),
points[:, :, 3:]], axis=-1)
mult = tf.multiply(ps, weights)
mean = tf.reduce_sum(mult, axis=axis)
ang = tf.math.atan2(mean[:, 2:3], mean[:, 3:4])*180.0/np.pi
out = tf.concat([mean[:, :2], ang/self.scale, mean[:, 4:]], axis=-1)
return out
def _adapt_fr(self, fr):
# prevent l from getting too small or too big
fr = tf.clip_by_value(fr, 0.1/self.scale, 5e3/self.scale)
return fr
def _adapt_m(self, m):
# prevent m from getting negative or too large
m = tf.clip_by_value(m, 0.1/self.scale, 90./self.scale)
return m
def _adapt_s(self, s):
# keep the contact indicator between 0 and 1
s = tf.clip_by_value(s, 0., 1.)
return s
def _adapt_n(self, n, r, o):
# normalize -- not good at all!
# n_norm = tf.linalg.norm(n, axis=-1, keepdims=True)
# n = tf.where(tf.greater(tf.squeeze(n_norm), 1e-6), n/n_norm, n)
# # make sure the normal points towards the object
# dir_center = o[:, :2] - r[:, :2]
# dir_center_norm = tf.linalg.norm(dir_center, axis=-1, keepdims=True)
# dir_center = tf.where(tf.greater(tf.squeeze(dir_center_norm), 0.),
# dir_center/dir_center_norm, dir_center)
# prod = tf.matmul(tf.reshape(dir_center, [bs, 1, 2]),
# tf.reshape(n, [bs, 2, 1]))
# ang = tf.acos(tf.reshape(prod, [bs]))
# # correct values over 180 deg.
# ang = tf.where(tf.greater(tf.abs(ang), np.pi),
# 2*np.pi - tf.abs(ang), tf.abs(ang))*180./np.pi
# # if the angle is greater than 90 degree, we need to flip the
# # normal
# n = tf.where(tf.greater(ang, np.pi/2.), n, -1 * n)
return n
def _adapt_orientation(self, rot, ob, sc):
rot = rot * sc
# in most cases, the maximum rotation range is 180deg, but some have
# more or fewer symmetries
# we first apply a modulo operation to make sure that no value is
# larger than the maximum rotation range. Then we have to deal with the
# periodicity of the interval
rot_max = tf.ones_like(rot) * 180
ob = tf.squeeze(ob)
ob = tf.strings.regex_replace(ob, "\000", "")
ob = tf.strings.regex_replace(ob, "\00", "")
if len(ob.get_shape()) < 1:
rot_max = \
tf.case({tf.equal(ob, 'ellip1'): lambda: tf.zeros_like(rot),
tf.equal(ob, 'rect1'): lambda: tf.ones_like(rot)*90.,
tf.equal(ob, 'tri1'): lambda: tf.ones_like(rot)*360.,
tf.equal(ob, 'tri2'): lambda: tf.ones_like(rot)*360.,
tf.equal(ob, 'tri3'): lambda: tf.ones_like(rot)*360.,
tf.equal(ob, 'hex'): lambda: tf.ones_like(rot)*60.},
default=lambda: rot_max, exclusive=True)
rot_new = \
tf.cond(tf.equal(ob, 'ellip1'), lambda: tf.zeros_like(rot),
lambda: tf.math.mod(tf.abs(rot), rot_max)*tf.sign(rot))
# now make sure that the measured rotation is the smallest
# posslibel value in the interval - rot_max/2, rot_max/2
rot_add = tf.where(tf.greater(rot_new, rot_max/2.),
rot_new - rot_max, rot_new)
rot_add = tf.where(tf.less(rot_add, -rot_max/2.),
rot_add + rot_max, rot_add)
else:
if ob.get_shape()[0].value < rot.get_shape()[0].value:
mult = rot.get_shape()[0].value // ob.get_shape()[0].value
ob = tf.reshape(ob, [-1, 1])
ob = tf.reshape(tf.tile(ob, [1, mult]), [-1])
rot_max = tf.where(tf.equal(ob, 'ellip1'), tf.zeros_like(rot),
rot_max)
rot_max = tf.where(tf.equal(ob, 'rect1'), tf.ones_like(rot)*90,
rot_max)
rot_max = tf.where(tf.equal(ob, 'tri1'), tf.ones_like(rot)*360,
rot_max)
rot_max = tf.where(tf.equal(ob, 'tri2'), tf.ones_like(rot)*360,
rot_max)
rot_max = tf.where(tf.equal(ob, 'tri3'), tf.ones_like(rot)*360,
rot_max)
rot_max = tf.where(tf.equal(ob, 'hex'), tf.ones_like(rot)*60,
rot_max)
rot_new = tf.where(tf.equal(ob, 'ellip1'), tf.zeros_like(rot),
tf.math.mod(tf.abs(rot), rot_max)*tf.sign(rot))
# now make sure that the measured rotation is the smallest
# posslibel value in the interval - rot_max/2, rot_max/2
rot_add = tf.where(tf.greater(rot_new, rot_max/2.),
rot_new - rot_max, rot_new)
rot_add = tf.where(tf.less(rot_add, -rot_max/2.),
rot_add + rot_max, rot_add)
rot_add /= sc
return rot_add
###########################################################################
# data loading
###########################################################################
def tf_record_map(self, path, name, dataset, data_mode, train_mode,
num_threads=5):
"""
Defines how to read in the data from a tf record
"""
keys = ['pos', 'object', 'contact_point', 'normal', 'contact',
'tip', 'friction', 'coord', 'image', 'material', 'pix_tip',
'pix_pos', 'segmentation']
record_meta = tfr.RecordMeta.load(path, name + '_' + data_mode + '_')
if train_mode == 'filter':
dataset = dataset.map(
lambda x: self._parse_function(x, keys, record_meta,
data_mode),
num_parallel_calls=num_threads)
elif train_mode == 'pretrain_obs':
dataset = dataset.map(
lambda x: self._parse_function_obs(x, keys, record_meta,
data_mode),
num_parallel_calls=num_threads)
elif train_mode == 'pretrain_process':
dataset = dataset.map(
lambda x: self._parse_function_process(x, keys, record_meta,
data_mode),
num_parallel_calls=num_threads)
else:
self.log.error('unknown training mode: ' + train_mode)
dataset = \
dataset.flat_map(lambda x, y:
tf.data.Dataset.from_tensor_slices((x, y)))
return dataset
def _parse_example(self, example_proto, keys, record_meta):
features = {}
for key in keys:
record_meta.add_tf_feature(key, features)
parsed_features = tf.io.parse_single_example(example_proto,
features)
for key in keys:
features[key] = record_meta.reshape_and_cast(key,
parsed_features)
return features
def _parse_function_obs(self, example_proto, keys, record_meta, data_mode):
features = self._parse_example(example_proto, keys, record_meta)
pose = features['pos']
ori = self._adapt_orientation(pose[:, 3:]*(180.0/np.pi),
features['object'], 1)
pose = tf.concat([pose[:, 0:1]*1000/self.scale,
pose[:, 1:2]*1000/self.scale,
ori/self.scale], axis=1)
n = tf.squeeze(features['normal'])/self.scale
con = tf.cast(features['contact'], tf.float32)
con = tf.reshape(con, [-1, 1])/self.scale
tips = features['tip']
cp = features['contact_point'][:, :2]*1000
con_norm = tf.linalg.norm(cp, axis=-1)
cp = tf.where(tf.less(con_norm, 1e-6),
tips[:, :2]*1000, cp)/self.scale
pix_tip = features['pix_tip']
im = features['image']
coord = features['coord']
mask = features['segmentation']
mask = tf.cast(tf.where(tf.greater(mask, 2.5), tf.ones_like(mask),
tf.zeros_like(mask)), tf.float32)
vis = tf.reduce_sum(mask, axis=[1, 2, 3])
seq_len = im.get_shape()[0].value
im = tf.concat([im, coord], axis=-1)
pix = features['pix_pos'][:, :2]
ob = tf.reshape(features['object'], [1])
mat = tf.reshape(features['material'], [1])
# sanity check for reprojection betwen pixels and 3d
# # load a plane image for reprojecting
# path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
# path = os.path.join(path, 'resources', 'plane_image.npy')
# print('loading plane image from: ', path)
# plane_depth = tf.convert_to_tensor(np.load(path))[none, :, :, none]
# pix_pos = features['pix_pos'][1:2]
# pos_3d = features['pos'][1:2, :3]
# projected1 = utils._to_3d(pix_pos, im[1:2, :, :, -1:])
# projected2 = utils._to_3d(pix_pos, plane_depth)
# pix_pro = utils._to_2d(pos_3d)
# cp = tf.print(cp, [pix_pos, pix_pro],
# summarize=1000, message='pix, pix_pro\n')
# cp = tf.print(cp, [pos_3d, projected1, projected2],
# summarize=1000, message='3d, pro_d, pro_plane \n')
# we use several steps of the sequence
if data_mode == 'train':
start_inds = np.random.randint(2, seq_len-2, 5)
self.train_multiplier = len(start_inds)
else:
# use every eighth data point
start_inds = np.arange(2, seq_len-2, 8)
num = len(start_inds)
# prepare the lists of output tensors
viss = []
ims = []
start_ims = []
start_ts = []
tes = []
labels = []
good_zs = []
bad_zs = []
pixs = []
pixts = []
pixte = []
start_pixs = []
segs = []
start_segs = []
for si in start_inds:
start_ts += [tips[1]]
start_ims += [im[1]]
start_pixs += [pix[1]]
start_segs += [mask[1]]
viss += [vis[si]]
segs += [mask[si]]
ims += [im[si]]
pixs += [pix[si]]
pixts += [pix_tip[si]]
pixte += [pix_tip[si+1]]
tes += [tips[si]]
relative_rot = \
self._adapt_orientation(pose[si, 2:3] - pose[1, 2:3], ob,
self.scale)
label = tf.concat([pose[si, :2], relative_rot, cp[si], n[si],
con[si]], axis=0)
labels += [label]
good_noise = np.random.normal(loc=0, scale=1e-1, size=(24, 8))
good_noise[0, :] = 0
bad_noise = np.random.normal(loc=10, scale=5, size=(24, 8))
bad_noise[12:] = np.random.normal(loc=-10, scale=5,
size=(12, 8))
# downscale noise for normal and contact
good_noise[:, 5:] /= 10
bad_noise[:, 5:] /= 10
# upscale for pos and or
bad_noise[:, :2] *= 10
bad_noise[:, 2:3] *= 2
good_noise[:, :2] *= 10
good_noise[:, 2:3] *= 2
# adapt to scaling
bad_noise /= self.scale
good_noise /= self.scale
bad_zs += [tf.tile(label[None, :], [24, 1]) + bad_noise]
good_zs += [tf.tile(label[None, :], [24, 1]) + good_noise]
ims = tf.stack(ims)
start_ims = tf.stack(start_ims)
start_ts = tf.stack(start_ts)
tes = tf.stack(tes)
pixts = tf.stack(pixts)
pixte = tf.stack(pixte)
ob = tf.tile(ob, [num])
mat = tf.tile(mat, [num])
values = [(ims, tes, pixts, pixte), tf.stack(labels),
tf.stack(good_zs),
tf.stack(bad_zs), (start_ims, start_ts), (ob, mat)]
labels = [tf.stack(labels), tf.stack(pixs), tf.stack(start_pixs),
tf.stack(segs), tf.stack(start_segs), tf.stack(viss)]
return tuple(values), tuple(labels)
def _parse_function_process(self, example_proto, keys, record_meta,
data_mode):
features = self._parse_example(example_proto, keys, record_meta)
pose = features['pos']
ori = self._adapt_orientation(pose[:, 3:]*180./np.pi,
features['object'], 1)
pose = tf.concat([pose[:, 0:1]*1000, pose[:, 1:2]*1000, ori],
axis=1)/self.scale
n = tf.squeeze(features['normal'])/self.scale
con = tf.cast(features['contact'], tf.float32)
con = tf.reshape(con, [-1, 1])/self.scale
tips = features['tip']
cp = features['contact_point'][:, :2]
con_norm = tf.linalg.norm(cp, axis=-1)
cp = tf.where(tf.less(con_norm, 1e-6),
tips[:, :2], cp)*1000/self.scale
friction = \
tf.square(tf.reshape(features['friction'], [1]) * 1000.)
friction = friction/(100*self.scale)
mu = tf.atan(tf.ones([1], dtype=tf.float32) * 0.25)*180./np.pi
mu = mu/self.scale
ob = tf.reshape(features['object'], [1])
mat = tf.reshape(features['material'], [1])
seq_len = features['pos'].get_shape()[0].value
# calculate the actions - scale them by the same amount as the
# position
t_end = tips[1:, :2]
t_start = tips[:-1, :2]
u = (t_end - t_start) * 1000./self.scale
# we use several steps of the sequence
if data_mode == 'train':
start_inds = np.random.randint(2, seq_len-1, 10)
self.train_multiplier = len(start_inds)
else:
# use every eigth data point
start_inds = np.arange(2, seq_len-1, 8)
num = len(start_inds)
# prepare the lists of output tensors
start_state = []
us = []
labels = []
for si in start_inds:
p_start = pose[si-1][:2]
s_start = tf.concat([p_start, tf.zeros([1]), friction, mu,
cp[si-1], n[si-1], con[si-1]], axis=0)
start_state += [s_start]
us += [u[si-1]]
relative_rot = pose[si, 2:3] - pose[si-1, 2:3]
relative_rot = \
self._adapt_orientation(relative_rot, ob, self.scale)
label = tf.concat([pose[si, :2], relative_rot, friction, mu,
cp[si], n[si], con[si]], axis=0)
labels += [label]
start_state = tf.stack(start_state)
us = tf.stack(us)
ob = tf.tile(ob, [num])
mat = tf.tile(mat, [num])
values = [start_state, us, (ob, mat)]
labels = [labels, start_state]
return tuple(values), tuple(labels)
def _parse_function(self, example_proto, keys, record_meta, data_mode):
features = self._parse_example(example_proto, keys, record_meta)
pose = features['pos']
ori = self._adapt_orientation(pose[:, 3:]*180./np.pi,
features['object'], 1)
pose = tf.concat([pose[:, 0:1]*1000, pose[:, 1:2]*1000, ori],
axis=1)/self.scale
n = tf.squeeze(features['normal'])/self.scale
con = tf.cast(features['contact'], tf.float32)
con = tf.reshape(con, [-1, 1])/self.scale
tips = features['tip']
cp = features['contact_point'][:, :2]
con_norm = tf.linalg.norm(cp, axis=-1)
cp = tf.where(tf.less(con_norm, 1e-6),
tips[:, :2], cp)*1000/self.scale
friction = \
tf.square(tf.reshape(features['friction'], [1]) * 1000.)
friction = friction/(100*self.scale)
mu = tf.atan(tf.ones([1], dtype=tf.float32) * 0.25)*180./np.pi
mu = mu/self.scale
# calculate the actions - scale them by the same amount as the
# position
t_end = tips[1:, :2]
t_start = tips[:-1, :2]
u = (t_end - t_start) * 1000./self.scale
im = features['image']
coord = features['coord']
mask = features['segmentation']
mask = tf.cast(tf.where(tf.greater(mask, 2.5), tf.ones_like(mask),
tf.zeros_like(mask)), tf.float32)
vis = tf.reduce_sum(mask, axis=[1, 2, 3])
im = tf.concat([im, coord], axis=-1)
pix_tip = features['pix_tip']
ob = tf.reshape(features['object'], [1])
mat = tf.reshape(features['material'], [1])
seq_len = features['pos'].get_shape()[0].value
# we use several steps of the sequence
if data_mode == 'train':
num = 1
start_inds = np.random.randint(1, seq_len-self.sl-2, num)
elif data_mode == 'val':
num = 1
# we use several sub-sequences of the validation sequence
start_inds = np.arange(1, seq_len-self.sl-2, (self.sl+1)//2)
start_inds = start_inds[:num]
else:
if self.sl > seq_len//2:
start_inds = [1]
else:
start_inds = np.arange(1, seq_len-self.sl-2, 20)
num = len(start_inds)
self.test_multiplier = num
# prepare the lists of output tensors
ims = []
start_ims = []
start_ts = []
start_state = []
us = []
tes = []
pixts = []
pixte = []
labels = []
mv_trs = []
mv_rots = []
viss = []
for si in start_inds:
p_start = pose[si][:2]
s_start = tf.concat([p_start, tf.zeros([1]), friction, mu,
cp[si], n[si], con[si]], axis=0)
start_state += [s_start]
start_ts += [tips[si]]
start_ims += [im[si]]
start = si + 1
end = si + 1 + self.sl
ims += [im[start:end]]
us += [u[start:end]]
tes += [tips[start:end]]
pixts += [pix_tip[start:end]]
pixte += [pix_tip[start+1:end+1]]
relative_rot = pose[start:end, 2:3] - \
tf.tile(pose[si:si+1, 2:3], [self.sl, 1])
relative_rot = \
self._adapt_orientation(relative_rot, ob, self.scale)
label = tf.concat([pose[start:end, :2], relative_rot,
tf.tile(friction[None, :], [self.sl, 1]),
tf.tile(mu[None, :], [self.sl, 1]),
cp[start:end], n[start:end],
con[start:end]], axis=-1)
labels += [label]
viss += [vis[start:end]]
mv = pose[start:end] - pose[si:end-1]
mv_trs += [tf.reduce_sum(tf.norm(mv[:, :2], axis=-1))]
mvr = self._adapt_orientation(mv[:, 2], ob, self.scale)
mv_rots += [tf.reduce_sum(tf.abs(mvr))]
ims = tf.stack(ims)
start_ims = tf.stack(start_ims)
start_ts = tf.stack(start_ts)
start_state = tf.stack(start_state)
us = tf.stack(us)
tes = tf.stack(tes)
pixts = tf.stack(pixts)
pixte = tf.stack(pixte)
mv_trs = tf.stack(mv_trs)
mv_rots = tf.stack(mv_rots)
viss = tf.stack(viss)
ob = tf.tile(ob, [num])
mat = tf.tile(mat, [num])
values = [(ims, tes, pixts, pixte), us, (start_ims, start_ts),
start_state, (ob, mat)]
labels = [labels, mv_trs, mv_rots, viss]
return tuple(values), tuple(labels)
######################################
# Evaluation
######################################
def save_log(self, log_dict, out_dir, step, num, mode):
if mode == 'filter':
keys = ['noise_num', 'likelihood', 'likelihood_std', 'dist_tr',
'dist_tr_std', 'dist_rot', 'dist_rot_std', 'corr_r_vis',
'corr_r_cont', 'corr_q_cont',
'm_tr', 'm_tr_std', 'deg_rot', 'deg_rot_std', 'dist',
'dist_std', 'dist_obs', 'dist_obs_std']
keys += self.x_names + list(map(lambda x: x + '_std',
self.x_names))
keys_corr = ['noise_num']
keys_corr += list(map(lambda x: 'cq_cont_' + x, self.x_names))
keys_corr += list(map(lambda x: 'cr_cont_' + x, self.z_names))
keys_corr += list(map(lambda x: 'cr_vis_' + x, self.z_names))
log_file = open(os.path.join(out_dir, str(step) + '_res.csv'), 'a')
log = csv.DictWriter(log_file, keys)
if num == 0:
log.writeheader()
log_file_corr = open(os.path.join(out_dir,
str(step) + '_corr.csv'), 'a')
log_corr = csv.DictWriter(log_file_corr, keys_corr)
if num == 0:
log_corr.writeheader()
row = {}
for k, v in log_dict.items():
if k in keys and type(v[0]) not in [str, bool, np.str,
np.bool]:
row[k] = np.mean(v)
row[k + '_std'] = np.std(v)
# corr_r cannot be properly evaluated per-example when batch size
# is 1, so we have to evaluate it here before outputting it
row_corr = {}
r_pred = log_dict['r_pred'].reshape(-1, self.dim_z).T
vis = log_dict['vis'].reshape(-1, 1).T
cont = log_dict['cont'].reshape(-1, 1).T
corr_vis = []
corr_cont = []
for i, n in enumerate(self.z_names):
r_c = np.corrcoef(r_pred[i:i+1], cont)[0, 1]
r_v = np.corrcoef(r_pred[i:i+1], vis)[0, 1]
corr_vis += [r_v]
corr_cont += [r_c]
row_corr['cr_cont_' + n] = r_c
row_corr['cr_vis_' + n] = r_v
row['corr_r_vis'] = np.mean(corr_vis)
row['corr_r_cont'] = np.mean(corr_cont)
q_pred = log_dict['q_pred'].reshape(-1, self.dim_x).T
corr_cont = []
for i, n in enumerate(self.x_names):
q_c = np.corrcoef(q_pred[i:i+1], cont)[0, 1]
corr_cont += [q_c]
row_corr['cq_cont_' + n] = q_c
row['corr_q_cont'] = np.mean(corr_cont)
row['noise_num'] = num
log.writerow(row)
log_file.close()
row_corr['noise_num'] = num
log_corr.writerow(row_corr)
log_file_corr.close()
else:
row = {}
for k, v in log_dict.items():
if type(v[0]) not in [str, bool, np.str, np.bool]:
row[k] = np.mean(v)
row[k + '_std'] = np.std(v)
if mode == 'pretrain_obs':
# corr_r cannot be properly evaluated per-example when batch
# size is 1, so we have to evaluate it here
r_het_diag = log_dict['r_het_diag'].reshape(-1, self.dim_z).T
r_het_tri = log_dict['r_het_tri'].reshape(-1, self.dim_z).T
vis = log_dict['vis'].reshape(-1, 1).T
corr_diags = []
corr_fulls = []
for i in range(self.dim_z):
corr_diags += [np.corrcoef(r_het_diag[i:i+1], vis)[0, 1]]
corr_fulls += [np.corrcoef(r_het_tri[i:i+1], vis)[0, 1]]
row['corr_r_het_diag'] = np.mean(corr_diags)
row['corr_r_het_tri'] = np.mean(corr_fulls)
for i, n in enumerate(self.z_names):
row['corr_' + n + '_diag'] = corr_diags[i]
row['corr_' + n + '_full'] = corr_fulls[i]
log_file = open(os.path.join(out_dir, str(step) + '_res.csv'),
'w')
log = csv.DictWriter(log_file, sorted(row.keys()))
log.writeheader()
log.writerow(row)
log_file.close()
return
def _eigsorted(self, cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:, order]
def plot_tracking(self, seq_pred, cov_pred, z, seq, q_pred, r_pred, vis,
out_dir, num, diffs, likes, actions, ob, init,
full_out=False):
pos_pred = np.squeeze(seq_pred[:, :2])
or_pred = np.squeeze(seq_pred[:, 2])
l_pred = np.squeeze(seq_pred[:, 3])
mu_pred = np.squeeze(seq_pred[:, 4])
cp_pred = np.squeeze(seq_pred[:, 5:7])
n_pred = np.squeeze(seq_pred[:, 7:9])
s_pred = np.squeeze(seq_pred[:, 9])
vis = vis / np.max(vis)
if z is not None:
pos_obs = np.squeeze(z[:, :2])
or_obs = np.squeeze(z[:, 2])
r_obs = np.squeeze(z[:, 3:5])
n_obs = np.squeeze(z[:, 5:7])
s_obs = np.squeeze(z[:, 7])
if cov_pred is not None:
cov_pred = cov_pred.reshape(self.sl, self.dim_x, self.dim_x)
cx = np.sqrt(np.squeeze(cov_pred[:, 0, 0]))
cy = np.sqrt(np.squeeze(cov_pred[:, 1, 1]))
ct = np.sqrt(np.squeeze(cov_pred[:, 2, 2]))
cl = np.sqrt(np.squeeze(cov_pred[:, 3, 3]))
cmu = np.sqrt(np.squeeze(cov_pred[:, 4, 4]))
crx = np.sqrt(np.squeeze(cov_pred[:, 5, 5]))
cry = np.sqrt(np.squeeze(cov_pred[:, 6, 6]))
cnx = np.sqrt(np.squeeze(cov_pred[:, 7, 7]))
cny = np.sqrt(np.squeeze(cov_pred[:, 8, 8]))
cs = np.sqrt(np.squeeze(cov_pred[:, 9, 9]))
q_pred = q_pred.reshape(self.sl, self.dim_x, self.dim_x)
r_pred = r_pred.reshape(self.sl, self.dim_z, self.dim_z)
qx = np.sqrt(np.squeeze(q_pred[:, 0, 0]))
qy = np.sqrt(np.squeeze(q_pred[:, 1, 1]))
qt = np.sqrt(np.squeeze(q_pred[:, 2, 2]))
ql = np.sqrt(np.squeeze(q_pred[:, 3, 3]))
qmu = np.sqrt(np.squeeze(q_pred[:, 4, 4]))
qrx = np.sqrt(np.squeeze(q_pred[:, 5, 5]))
qry = np.sqrt(np.squeeze(q_pred[:, 6, 6]))
qnx = np.sqrt(np.squeeze(q_pred[:, 7, 7]))
qny = np.sqrt(np.squeeze(q_pred[:, 8, 8]))
qs = np.sqrt(np.squeeze(q_pred[:, 9, 9]))
rx = np.sqrt(np.squeeze(r_pred[:, 0, 0]))
ry = np.sqrt(np.squeeze(r_pred[:, 1, 1]))
rt = np.sqrt(np.squeeze(r_pred[:, 2, 2]))
rrx = np.sqrt(np.squeeze(r_pred[:, 3, 3]))
rry = np.sqrt(np.squeeze(r_pred[:, 4, 4]))
rnx = np.sqrt(np.squeeze(r_pred[:, 5, 5]))
rny = np.sqrt(np.squeeze(r_pred[:, 6, 6]))
rs = np.sqrt(np.squeeze(r_pred[:, 7, 7]))
fig, ax = plt.subplots(2, 3, figsize=[20, 15])
ts = np.arange(pos_pred.shape[0])
ax[0, 0].plot(ts, pos_pred[:, 0], '-r', label='x predicted')
ax[0, 0].plot(ts, seq[:, 0], '--g', label='x true')
ax[0, 0].plot(ts, pos_obs[:, 0], 'kx', label='x observed')
ax[0, 0].plot(ts, pos_pred[:, 1], '-m', label='y predicted')
ax[0, 0].plot(ts, seq[:, 1], '--c', label='y true')
ax[0, 0].plot(ts, pos_obs[:, 1], 'ko', label='y observed')
ax[0, 0].set_title('position')
ax[0, 0].legend()
ax[0, 1].plot(ts, or_pred, '-r', label='predicted')
ax[0, 1].plot(ts, seq[:, 2], '--g', label='true')
ax[0, 1].plot(ts, or_obs, 'kx', label='observed')
ax[0, 1].set_title('heading')
ax[0, 1].legend()
ax[0, 2].plot(ts, cp_pred[:, 0], '-r', label='x predicted')
ax[0, 2].plot(ts, seq[:, 5], '--g', label='x true')
ax[0, 2].plot(ts, r_obs[:, 0], 'kx', label='x observed')
ax[0, 2].plot(ts, cp_pred[:, 1], '-m', label='y predicted')
ax[0, 2].plot(ts, seq[:, 6], '--c', label='y true')
ax[0, 2].plot(ts, r_obs[:, 1], 'ko', label='y observed')
ax[0, 2].set_title('contact point')
ax[0, 2].legend()
ax[1, 2].plot(ts, n_pred[:, 0], '-r', label='x predicted')
ax[1, 2].plot(ts, seq[:, 7], '--g', label='x true')
ax[1, 2].plot(ts, n_obs[:, 0], 'kx', label='x observed')
ax[1, 2].plot(ts, n_pred[:, 1], '-m', label='y predicted')
ax[1, 2].plot(ts, seq[:, 8], '--c', label='y true')
ax[1, 2].plot(ts, n_obs[:, 1], 'ko', label='y observed')
ax[1, 2].set_title('normal')
ax[1, 2].legend()
ax[1, 0].plot(ts, mu_pred, '-r', label='mu predicted')
ax[1, 0].plot(ts, seq[:, 4], '--g', label='mu true')
ax[1, 0].plot(ts, l_pred, '-m', label='l predicted')
ax[1, 0].plot(ts, seq[:, 3], '--c', label='l true')
ax[1, 0].set_title('friction')
ax[1, 0].legend()
ax[1, 1].plot(ts, s_pred, '-r', label='predicted')
ax[1, 1].plot(ts, seq[:, 9], '--g', label='true')
ax[1, 1].plot(ts, s_obs, 'kx', label='observed')
ax[1, 1].plot(ts, vis, '-b', label='visibility')
ax[1, 1].set_title('contact')
ax[1, 1].legend()
if cov_pred is not None:
ax[0, 0].fill_between(ts, pos_pred[:, 0] - cx,
pos_pred[:, 0] + cx, color="lightblue")
ax[0, 0].fill_between(ts, pos_pred[:, 1] - cy,
pos_pred[:, 1] + cy, color="lightblue")
ax[0, 1].fill_between(ts, (or_pred - ct), (or_pred + ct),
color="lightblue")
ax[0, 2].fill_between(ts, cp_pred[:, 0] - crx,
cp_pred[:, 0] + crx, color="lightblue")
ax[0, 2].fill_between(ts, cp_pred[:, 1] - cry,
cp_pred[:, 1] + cry, color="lightblue")
ax[1, 0].fill_between(ts, (l_pred - cl),
(l_pred + cl), color="lightblue")
ax[1, 0].fill_between(ts, mu_pred - cmu,
mu_pred + cmu, color="lightblue")
ax[1, 1].fill_between(ts, (s_pred - cs), (s_pred + cs),
color="lightblue")
ax[1, 2].fill_between(ts, n_pred[:, 0] - cnx,
n_pred[:, 0] + cnx, color="lightblue")
ax[1, 2].fill_between(ts, n_pred[:, 1] - cny,
n_pred[:, 1] + cny, color="lightblue")
fig.subplots_adjust(left=0.1, bottom=0.1, right=0.95, top=0.85,
wspace=0.1, hspace=0.3)
fig.savefig(os.path.join(out_dir, str(num) + "_tracking"),
bbox_inches="tight")
# plot the noise estimates
fig, ax = plt.subplots(2, 3, figsize=[20, 15])
ts = np.arange(pos_pred.shape[0])
sc = np.max([np.max(qx), np.max(qy), np.max(rx), np.max(ry)])
sc = max(1., sc)
ax[0, 0].plot(ts, qx, '-r', label='qx')
ax[0, 0].plot(ts, rx, '--g', label='rx')
ax[0, 0].plot(ts, qy, '-m', label='qy')
ax[0, 0].plot(ts, ry, '--c', label='ry')
ax[0, 0].plot(ts, vis*sc, '-b', label='visibility')
ax[0, 0].plot(ts, seq[:, 9]*sc, '-k', label='contact')
ax[0, 0].set_title('position')
ax[0, 0].legend()
sc = np.max([np.max(qt), np.max(rt)])
sc = max(1., sc)
ax[0, 1].plot(ts, qt, '-r', label='q')
ax[0, 1].plot(ts, rt, '--g', label='r')
ax[0, 1].plot(ts, vis*sc, '-b', label='visibility')
ax[0, 1].plot(ts, seq[:, 9]*sc, '-k', label='contact')
ax[0, 1].set_title('heading')
ax[0, 1].legend()
sc = np.max([np.max(qrx), np.max(qry), np.max(rrx), np.max(rry)])
sc = max(1., sc)
ax[0, 2].plot(ts, qrx, '-r', label='qx')
ax[0, 2].plot(ts, rrx, '--g', label='rx')
ax[0, 2].plot(ts, qry, '-m', label='qy')
ax[0, 2].plot(ts, rry, '--c', label='ry')
ax[0, 2].plot(ts, vis*sc, '-b', label='visibility')
ax[0, 2].plot(ts, seq[:, 9]*sc, '-k', label='contact')
ax[0, 2].set_title('contact point')
ax[0, 2].legend()
sc = np.max([np.max(qnx), np.max(qny), np.max(rnx), np.max(rny)])
sc = max(1., sc)
ax[1, 2].plot(ts, qnx, '-r', label='qx')
ax[1, 2].plot(ts, rnx, '--g', label='rx')
ax[1, 2].plot(ts, qny, '-m', label='qy')
ax[1, 2].plot(ts, rny, '--c', label='ry')
ax[1, 2].plot(ts, vis*sc, '-b', label='visibility')
ax[1, 2].plot(ts, seq[:, 9]*sc, '-k', label='contact')
ax[1, 2].set_title('normal')
ax[1, 2].legend()
sc = np.max([np.max(qmu), np.max(ql)])
sc = max(1., sc)
ax[1, 0].plot(ts, qmu, '-r', label='qmu')
ax[1, 0].plot(ts, ql, '-m', label='ql')
ax[1, 0].plot(ts, seq[:, 9]*sc, '-k', label='contact')
ax[1, 0].set_title('friction')
ax[1, 0].legend()
sc = np.max([np.max(qs), np.max(rs)])
sc = max(1., sc)
ax[1, 1].plot(ts, qs, '-r', label='q')
ax[1, 1].plot(ts, rs, '--g', label='r')
ax[1, 1].plot(ts, vis*sc, '-b', label='visibility')
ax[1, 1].plot(ts, seq[:, 9]*sc, '-k', label='contact')
ax[1, 1].set_title('contact')
ax[1, 1].legend()
fig.subplots_adjust(left=0.1, bottom=0.1, right=0.95, top=0.85,
wspace=0.1, hspace=0.3)
fig.savefig(os.path.join(out_dir, str(num) + "_noise"),
bbox_inches="tight")
log_file = open(os.path.join(out_dir, str(num) + '_seq.csv'), 'w')
keys = ['t', 'x', 'y', 'or', 'l', 'mu', 'rx', 'ry', 'nx', 'ny', 's',
'x_p', 'y_p', 'or_p', 'l_p', 'mu_p', 'rx_p', 'ry_p', 'nx_p',
'ny_p', 's_p']
if cov_pred is not None and z is not None:
keys += ['x_c', 'y_c', 'or_c', 'l_c', 'mu_c', 'rx_c', 'ry_c',
'nx_c', 'ny_c', 's_c', 'x_ob', 'y_ob', 'or_ob', 'rx_ob',
'ry_ob', 'nx_ob', 'ny_ob', 's_ob']
log = csv.DictWriter(log_file, keys)
log.writeheader()
for t in ts:
row = {'x': seq[t, 0], 'y': seq[t, 1], 'or': seq[t, 2],
'l': seq[t, 3], 'mu': seq[t, 4], 'rx': seq[t, 5],
'ry': seq[t, 6], 'nx': seq[t, 7], 'ny': seq[t, 8],
's': seq[t, 9],
'x_p': seq_pred[t, 0], 'y_p': seq_pred[t, 1],
'or_p': seq_pred[t, 2], 'l_p': seq_pred[t, 3],
'mu_p': seq_pred[t, 4], 'rx_p': seq_pred[t, 5],
'ry_p': seq_pred[t, 6], 'nx_p': seq_pred[t, 7],
'ny_p': seq_pred[t, 8], 's_p': seq_pred[t, 9],
'x_c': cx[t], 'y_c': cy[t], 'or_c': ct[t], 'l_c': cl[t],
'mu_c': cmu[t], 'rx_c': crx[t], 'ry_c': cry[t],
'nx_c': cnx[t], 'ny_c': cny[t], 's_c': cs[t],
'x_ob': pos_obs[t, 0], 'y_ob': pos_obs[t, 1],
'or_ob': or_obs[t], 'rx_ob': r_obs[t, 0],
'ry_ob': r_obs[t, 1], 'nx_ob': n_obs[t, 0],
'ny_ob': n_obs[t, 1], 's_ob': s_obs[t]}
log.writerow(row)
else:
log = csv.DictWriter(log_file, keys)
log.writeheader()
for t in ts:
row = {'x': seq[t, 0], 'y': seq[t, 1], 'or': seq[t, 2],
'l': seq[t, 3], 'mu': seq[t, 4], 'rx': seq[t, 5],
'ry': seq[t, 6], 'nx': seq[t, 7], 'ny': seq[t, 8],
's': seq[t, 9],
'x_p': seq_pred[t, 0], 'y_p': seq_pred[t, 1],
'or_p': seq_pred[t, 2], 'l_p': seq_pred[t, 3],
'mu_p': seq_pred[t, 4], 'rx_p': seq_pred[t, 5],
'ry_p': seq_pred[t, 6], 'nx_p': seq_pred[t, 7],
'ny_p': seq_pred[t, 8], 's_p': seq_pred[t, 9]}
log.writerow(row)
log_file.close()
# save debug output
if full_out:
name = os.path.join(out_dir, str(num))
np.save(name + '_init', init)
np.save(name + '_true', seq)
np.save(name + '_pred', seq_pred)
np.save(name + '_obs', z)
np.save(name + '_c', cov_pred)
np.save(name + '_q', q_pred)
np.save(name + '_r', r_pred)
np.save(name + '_vis', vis)
np.save(name + '_u', actions)
np.save(name + '_ob', ob)
def plot_trajectory(self, particles, weights, seq, cov_pred, seq_pred,
ob, out_dir, num):
if particles is not None:
particles = particles.reshape(self.sl, -1, self.dim_x)
weights = weights.reshape(self.sl, -1)
if cov_pred is not None:
cov_pred = cov_pred.reshape(self.sl, self.dim_x, self.dim_x)
# get the object shape (deal with some encoding problems)
ob = np.asscalar(ob).decode("utf-8").replace('\0', '')
if 'rect' in ob:
# c-----d
# | |
# a-----b
# get the positions of the corner points
if '1' in ob:
points = [[-0.045, -0.045], [0.045, -0.045],
[0.045, 0.045], [-0.045, 0.045]]
if '2' in ob:
points = [[-0.044955, -0.05629], [0.044955, -0.05629],
[0.044955, 0.05629], [-0.044955, 0.05629]]
if '3' in ob:
points = [[-0.067505, -0.04497], [0.067505, -0.04497],
[0.067505, 0.04497], [-0.067505, 0.04497]]
elif 'tri' in ob:
# b ----- a
# |
# |
# c
# get the positions of the points
if '1' in ob:
points = [[0.045, 0.045], [-0.0809, 0.045], [0.045, -0.08087]]
if '2' in ob:
points = [[0.045, 0.045], [-0.106, 0.045], [0.045, -0.08087]]
if '3' in ob:
points = [[0.045, 0.045], [-0.1315, 0.045], [0.045, -0.08061]]
elif 'ellip' in ob:
if '1' in ob:
a = 0.0525
b = 0.0525
elif '2' in ob:
a = 0.0525
b = 0.065445
elif '3' in ob:
a = 0.0525
b = 0.0785
elif 'hex' in ob:
points = []
for i in range(6):
theta = (np.pi/3)*i
points += [[0.06050*np.cos(theta),
0.06050*np.sin(theta)]]
elif 'butter' in ob:
points = self.butter_points[:]
pos_pred = np.squeeze(seq_pred[:, :2])
minx = min(np.min(seq[:, 0]), np.min(pos_pred[:, 0]))
miny = min(np.min(seq[:, 1]), np.min(pos_pred[:, 1]))
maxx = max(np.max(seq[:, 0]), np.max(pos_pred[:, 0]))
maxy = max(np.max(seq[:, 1]), np.max(pos_pred[:, 1]))
fig, ax = plt.subplots(figsize=[15, 15])
ax.set_aspect('equal')
fig2, ax2 = plt.subplots(figsize=[17, 17])
ax2.set_aspect('equal')
for i in range(self.sl - 1):
if cov_pred is not None:
# plot the confidence ellipse
vals, vecs = self._eigsorted(cov_pred[i, :2, :2])
theta = np.degrees(np.arctan2(*vecs[:, 0][::-1]))
width, height = 4 * np.sqrt(vals)
ellip = Ellipse(xy=pos_pred[i], width=width, height=height,
angle=theta, alpha=0.1)
ax.add_artist(ellip)
if particles is not None:
# sort the particles by weight
p = weights[i].argsort()
par = particles[i][p]
wei = weights[i][p]
# plot the 20 best weighted particles with colour depending on
# weight
if i == 0:
ax.scatter(par[:20, 0], par[:20, 1],
c=wei[:20], cmap='jet', marker='x',
alpha=0.5, label='particles')
else:
ax.scatter(par[:20, 0], par[:20, 1],
c=wei[:20], cmap='jet', marker='x',
alpha=0.5)
# plot a marker for the starting point of the sequence
if i == 0:
ax.plot(seq[i, 0], seq[i, 1], 'cx', markersize=15.,
label='start')
# plot the mean trajectory
ax.plot([pos_pred[i, 0], pos_pred[i+1, 0]],
[pos_pred[i, 1], pos_pred[i+1, 1]], '-r',
label='predicted')
# plot the real trajectory
ax.plot([seq[i, 0], seq[i+1, 0]], [seq[i, 1], seq[i+1, 1]],
'-g', label='true')
ax2.plot(seq[i, 0], seq[i, 1], 'cx', markersize=15.,
label='start')
# plot the mean trajectory
ax2.plot([pos_pred[i, 0], pos_pred[i+1, 0]],
[pos_pred[i, 1], pos_pred[i+1, 1]], '-r',
label='predicted')
# plot the real trajectory
ax2.plot([seq[i, 0], seq[i+1, 0]], [seq[i, 1], seq[i+1, 1]],
'-g', label='true')
else:
# plot the mean trajectory
ax.plot([pos_pred[i, 0], pos_pred[i+1, 0]],
[pos_pred[i, 1], pos_pred[i+1, 1]], '-r')
# plot the real trajectory
ax.plot([seq[i, 0], seq[i+1, 0]],
[seq[i, 1], seq[i+1, 1]], '-g')
# plot the mean trajectory
ax2.plot([pos_pred[i, 0], pos_pred[i+1, 0]],
[pos_pred[i, 1], pos_pred[i+1, 1]], '-r')
# plot the real trajectory
ax2.plot([seq[i, 0], seq[i+1, 0]],
[seq[i, 1], seq[i+1, 1]], '-g')
# plot the mean trajectory
ax.plot(pos_pred[i, 0], pos_pred[i, 1], 'ro')
ax.plot(seq[i, 0], seq[i, 1], 'go')
if i % 5 == 0:
if 'ellip' in ob:
ax2.add_artist(Ellipse((pos_pred[i, 0], pos_pred[i, 1]),
2*a*1000, 2*b*1000, seq_pred[i, 2],
alpha=0.1, facecolor='r',
edgecolor='r'))
ax2.add_artist(Ellipse((seq[i, 0], seq[i, 1]),
2*a*1000, 2*b*1000, seq[i, 2],
alpha=0.1, facecolor='g',
edgecolor='g'))
else:
r_p = np.zeros((2, 2))
r_pred = seq_pred[i, 2]*np.pi/180.
r_p[0, 0] = np.cos(r_pred)
r_p[0, 1] = -np.sin(r_pred)
r_p[1, 0] = np.sin(r_pred)
r_p[1, 1] = np.cos(r_pred)
r_l = np.zeros((2, 2))
r_la = seq[i, 2]*np.pi/180.
r_l[0, 0] = np.cos(r_la)
r_l[0, 1] = -np.sin(r_la)
r_l[1, 0] = np.sin(r_la)
r_l[1, 1] = np.cos(r_la)
points_p = []
points_l = []
for p in points:
# rotate and translate the points according to the
# object's pose
pt = np.array(p).reshape(2, 1) * 1000
points_p += [np.dot(r_p, pt).reshape(2)+pos_pred[i]]
points_l += [np.dot(r_l, pt).reshape(2)+seq[i, :2]]
ax2.add_artist(Polygon(points_p, alpha=0.1, facecolor='r',
edgecolor='r'))
ax2.add_artist(Polygon(points_l, alpha=0.1, facecolor='g',
edgecolor='g'))
ax.legend()
# plot the last step
if cov_pred is not None:
vals, vecs = self._eigsorted(cov_pred[-1, :2, :2])
theta = np.degrees(np.arctan2(*vecs[:, 0][::-1]))
width, height = 2 * 2 * np.sqrt(vals)
ellip = Ellipse(xy=pos_pred[-1], width=width, height=height,
angle=theta, alpha=0.1)
ax.add_artist(ellip)
# plot the mean trajectory
ax.plot(pos_pred[-1, 0], pos_pred[-1, 1], 'ro')
# plot the real trajectory
ax.plot(seq[-1, 0], seq[-1, 1], 'go')
if particles is not None:
p = weights[-1].argsort()
par = particles[-1][p]
wei = weights[-1][p]
# plot the particles with colour depending on weight
ax.scatter(par[:20, 0], par[:20, 1],
c=wei[:20], cmap='jet', marker='x', alpha=0.5)
fig.savefig(os.path.join(out_dir, str(num) + "_tracking_2d"),
bbox_inches="tight")
ax2.set_xlim([minx-100, maxx+100])
ax2.set_ylim([miny-100, maxy+100])
fig2.savefig(os.path.join(out_dir, str(num) + "_tracking_vis"),
bbox_inches="tight")
class SegmentationLayer(BaseLayer):
def __init__(self, batch_size, normalize, summary, trainable):
super(SegmentationLayer, self).__init__()
self.summary = summary
self.batch_size = batch_size
self.normalize = normalize
# load a plane image for reprojecting
path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
path = os.path.join(path, 'resources', 'plane_image.npy')
self.plane_depth = \
tf.convert_to_tensor(np.load(path))[None, :, :, None]
self.plane_depth = tf.tile(self.plane_depth,
[self.batch_size, 1, 1, 1])
# segmenting the image
self.im_c1 = self._conv_layer('segment/conv1', 7, 8,
trainable=trainable)
self.im_c2 = self._conv_layer('segment/conv2', 5, 16,
trainable=trainable)
self.im_c3 = self._conv_layer('segment/conv3', 3, 32,
trainable=trainable)
self.im_d1 = self._deconv_layer('segment/deconv1', 13, 16,
trainable=trainable)
self.im_d2 = self._deconv_layer('segment/deconv2', 3, 8,
trainable=trainable)
self.im_d3 = self._deconv_layer('segment/deconv3', 3, 1,
activation=None, trainable=trainable)
if self.normalize == 'layer':
self.im_n1 =\
tf.keras.layers.LayerNormalization(name='segment/norm1',
trainable=trainable)
self.im_n2 =\
tf.keras.layers.LayerNormalization(name='segment/norm2',
trainable=trainable)
self.im_n3 =\
tf.keras.layers.LayerNormalization(name='segment/norm3',
trainable=trainable)
self.im_n4 = \
tf.keras.layers.LayerNormalization(name='segment/norm4',
trainable=trainable)
self.im_n5 = \
tf.keras.layers.LayerNormalization(name='segment/norm5',
trainable=trainable)
elif self.normalize == 'batch':
self.im_n1 =\
tf.keras.layers.BatchNormalization(name='segment/norm1',
trainable=trainable)
self.im_n2 =\
tf.keras.layers.BatchNormalization(name='segment/norm2',
trainable=trainable)
self.im_n3 =\
tf.keras.layers.BatchNormalization(name='segment/norm3',
trainable=trainable)
self.im_n4 = \
tf.keras.layers.BatchNormalization(name='segment/norm4',
trainable=trainable)
self.im_n5 = \
tf.keras.layers.BatchNormalization(name='segment/norm5',
trainable=trainable)
self.updateable = [self.im_n1, self.im_n2, self.im_n3, self.im_n4,
self.im_n5]
def call(self, inputs, training):
# unpack the inputs
images = inputs[:, :, :, 0:3]
coords = inputs[:, :, :, 3:]
height = images.get_shape()[1].value
width = images.get_shape()[2].value
# disable the topmost name scope so that the summaries don't end up all
# under one tab in tensorbaord
with tf.name_scope(""):
# segment the image
with tf.name_scope('segment'):
conv1 = self.im_c1(inputs)
conv1 = tf.nn.max_pool2d(conv1, 3, 2, padding='SAME')
if self.normalize == 'layer':
conv1 = self.im_n1(conv1)
elif self.normalize == 'batch':
conv1 = self.im_n1(conv1, training)
conv2 = self.im_c2(conv1)
conv2 = tf.nn.max_pool2d(conv2, 3, 2, padding='SAME')
if self.normalize == 'layer':
conv2 = self.im_n2(conv2)
elif self.normalize == 'batch':
conv2 = self.im_n2(conv2, training)
conv3 = self.im_c3(conv2)
conv3 = tf.nn.max_pool2d(conv3, 5, 4, padding='SAME')
if self.normalize == 'layer':
conv3 = self.im_n3(conv3)
elif self.normalize == 'batch':
conv3 = self.im_n3(conv3, training)
deconv1 = self.im_d1(conv3)
deconv1 = tf.image.resize(deconv1, conv2.get_shape()[1:3])
deconv1 = deconv1 + conv2
if self.normalize == 'layer':
deconv1 = self.im_n4(deconv1)
elif self.normalize == 'batch':
deconv1 = self.im_n4(deconv1, training)
deconv2 = self.im_d2(deconv1)
deconv2 = tf.image.resize(deconv2, [height // 2, width // 2])
if self.normalize == 'layer':
deconv2 = self.im_n5(deconv2)
elif self.normalize == 'batch':
deconv2 = self.im_n5(deconv2, training)
mask_out = self.im_d3(deconv2)
mask = tf.image.resize(mask_out, [height, width])
if self.summary:
if self.normalize == 'batch':
tf.summary.histogram('n1_mean', self.im_n1.moving_mean)
tf.summary.histogram('n1_var',
self.im_n1.moving_variance)
tf.summary.image('rgb', images[:, :, :, :3])
tf.summary.image('depth', coords[:, :, :, -1:])
tf.summary.image('conv1_im', conv1[0:1, :, :, 0:1])
tf.summary.histogram('conv1_out', conv1)
tf.summary.image('conv2_im', conv2[0:1, :, :, 0:1])
tf.summary.histogram('conv2_out', conv2)
tf.summary.image('conv3_im', conv3[0:1, :, :, 0:1])
tf.summary.histogram('conv3_out', conv3)
tf.summary.image('deconv1_im', deconv1[0:1, :, :, 0:1])
tf.summary.histogram('deconv1_out', deconv1)
tf.summary.image('deconv2_im', deconv2[0:1, :, :, 0:1])
tf.summary.histogram('deconv2_out', deconv2)
tf.summary.image('mask', mask_out[0:1])
# predict the object position
pos_pix = self._spatial_softmax(mask, 'pos', method='softmax',
summary=self.summary)
pos_pix = tf.reshape(pos_pix, [self.batch_size, 2])
pos = utils._to_3d(pos_pix, self.plane_depth)
# extract the glimpses for rotation estimation and parameter
# estimation
coords_rot = tf.concat([pos_pix[:, 1:2] * 2, pos_pix[:, 0:1] * 2],
axis=1)
glimpse_rot = \
tf.image.extract_glimpse(images, size=[72, 72],
offsets=coords_rot,
centered=True, normalized=False)
return [mask_out, pos, glimpse_rot], pos_pix
class SensorLayer(BaseLayer):
def __init__(self, batch_size, normalize, scale, summary, trainable):
super(SensorLayer, self).__init__()
self.summary = summary
self.batch_size = batch_size
self.scale = scale
self.normalize = normalize
# load a plane image for reprojecting
path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
path = os.path.join(path, 'resources', 'plane_image.npy')
self.plane_depth = \
tf.convert_to_tensor(np.load(path))[None, :, :, None]
self.plane_depth = tf.tile(self.plane_depth,
[self.batch_size, 1, 1, 1])
# processing the glimpse
self.g_c1 = self._conv_layer('glimpse/conv1', 3, 8,
trainable=trainable)
self.g_c2 = self._conv_layer('glimpse/conv2', 3, 16,
trainable=trainable)
self.g_c3 = self._conv_layer('glimpse/conv2', 3, 32,
trainable=trainable)
self.g_fc1 = self._fc_layer('glimpse/r_fc1', 128, trainable=trainable)
self.g_rfc2 = self._fc_layer('glimpse/r_fc2', 64, trainable=trainable)
self.g_r = self._fc_layer('glimpse/r', 2, activation=None,
trainable=trainable)
self.g_nfc2 = self._fc_layer('glimpse/n_fc2', 64, trainable=trainable)
self.g_n = self._fc_layer('glimpse/n', 2, activation=None,
trainable=trainable)
self.g_s = self._fc_layer('glimpse/s', 1, activation=None, bias=-0.1,
trainable=trainable)
# get the rotation
self.r_c1 = self._conv_layer('rot/conv1', 3, 32, trainable=trainable)
self.r_c2 = self._conv_layer('rot/conv2', 3, 64, trainable=trainable)
self.r_fc1 = self._fc_layer('rot/fc1', 128, trainable=trainable)
self.r_fc2 = self._fc_layer('rot/fc2', 64, trainable=trainable)
self.r_rot = self._fc_layer('rot/rot', 1, activation=None,
trainable=trainable)
if self.normalize == 'layer':
self.g_n1 = \
tf.keras.layers.LayerNormalization(name='glimpse/norm1',
trainable=trainable)
self.g_n2 = \
tf.keras.layers.LayerNormalization(name='glimpse/norm2',
trainable=trainable)
self.g_n3 = \
tf.keras.layers.LayerNormalization(name='glimpse/norm3',
trainable=trainable)
self.r_n1 = \
tf.keras.layers.LayerNormalization(name='rot/norm1',
trainable=trainable)
self.r_n2 = \
tf.keras.layers.LayerNormalization(name='rot/norm2',
trainable=trainable)
elif self.normalize == 'batch':
self.g_n1 = \
tf.keras.layers.BatchNormalization(name='glimpse/norm1',
trainable=trainable)
self.g_n2 = \
tf.keras.layers.BatchNormalization(name='glimpse/norm2',
trainable=trainable)
self.g_n3 = \
tf.keras.layers.BatchNormalization(name='glimpse/norm3',
trainable=trainable)
self.r_n1 = \
tf.keras.layers.BatchNormalization(name='rot/norm1',
trainable=trainable)
self.r_n2 = \
tf.keras.layers.BatchNormalization(name='rot/norm2',
trainable=trainable)
self.updateable = [self.g_n1, self.g_n2, self.g_n3, self.r_n1,
self.r_n2]
def call(self, inputs, training):
# unpack the inputs
pc, tip_pos, tip_pix, tip_pix_end, start_glimpse, mask, pos, \
glimpse_rot = inputs
# unpack the inputs
image = pc[:, :, :, 0:3]
coord = pc[:, :, :, 3:]
# disable the topmost name scope so that the summaries don't end up all
# under one tab in tensorbaord
with tf.name_scope(""):
# predict the orientation
with tf.name_scope('rot'):
# in_data = tf.concat([glimpse_rot, start_glimpse], axis=-1)
in_data = start_glimpse - glimpse_rot
rot_conv1 = self.r_c1(in_data)
if self.normalize == 'layer':
rot_conv1 = self.r_n1(rot_conv1)
elif self.normalize == 'batch':
rot_conv1 = self.r_n1(rot_conv1, training)
rot_conv1 = tf.nn.max_pool2d(rot_conv1, 3, 2, padding='VALID')
rot_conv2 = self.r_c2(rot_conv1)
if self.normalize == 'layer':
rot_conv2 = self.r_n2(rot_conv2)
elif self.normalize == 'batch':
rot_conv2 = self.r_n2(rot_conv2, training)
rot_conv2 = tf.nn.max_pool2d(rot_conv2, 3, 2, padding='VALID')
rot_fc1 = self.r_fc1(tf.reshape(rot_conv2,
[self.batch_size, -1]))
rot_fc2 = self.r_fc2(rot_fc1)
rot = self.r_rot(rot_fc2)
if self.summary:
tf.summary.image('glimpse_rot',
glimpse_rot[0:1, :, :, :3])
tf.summary.image('glimpse_start',
start_glimpse[0:1, :, :, :3])
tf.summary.image('conv1_im', rot_conv1[0:1, :, :, 0:1])
tf.summary.histogram('conv1_out', rot_conv1)
tf.summary.image('conv2_im', rot_conv2[0:1, :, :, 0:1])
tf.summary.histogram('conv2_out', rot_conv2)
tf.summary.histogram('fc1_out', rot_fc1)
tf.summary.histogram('fc2_out', rot_fc2)
tf.summary.histogram('rot_out', rot)
# process the glimpse
with tf.name_scope('glimpse'):
tip_pix_x = tf.slice(tip_pix, [0, 0], [-1, 1]) * 2
tip_pix_y = tf.slice(tip_pix, [0, 1], [-1, 1]) * 2
coords = tf.concat([tip_pix_y, tip_pix_x], axis=1)
glimpse = \
tf.image.extract_glimpse(coord, size=[64, 64],
offsets=coords,
centered=True, normalized=False)
im_glimpse = \
tf.image.extract_glimpse(image, size=[64, 64],
offsets=coords,
centered=True, normalized=False)
# subtract the tip pose to normalize the z coordinates
glimpse -= tip_pos[:, None, None, :]
in_g = tf.concat([im_glimpse, glimpse], axis=-1)
g_conv1 = self.g_c1(in_g)
g_conv1 = tf.nn.max_pool2d(g_conv1, 3, 2, padding='VALID')
if self.normalize == 'layer':
g_conv1 = self.g_n1(g_conv1)
elif self.normalize == 'batch':
g_conv1 = self.g_n1(g_conv1, training)
g_conv2 = self.g_c2(g_conv1)
g_conv2 = tf.nn.max_pool2d(g_conv2, 3, 2, padding='VALID')
if self.normalize == 'layer':
g_conv2 = self.g_n2(g_conv2)
elif self.normalize == 'batch':
g_conv2 = self.g_n2(g_conv2, training)
g_conv3 = self.g_c3(g_conv2)
# g_conv3 = tf.nn.max_pool2d(g_conv3, 3, 2, padding='VALID')
if self.normalize == 'layer':
g_conv3 = self.g_n3(g_conv3)
elif self.normalize == 'batch':
g_conv3 = self.g_n3(g_conv3, training)
glimpse_encoding = tf.reshape(g_conv3, [self.batch_size, -1])
# add the action
pix_u = tf.concat([tip_pix_end - tip_pix, tip_pix], axis=1)
glimpse_encoding = tf.concat([glimpse_encoding, pix_u],
axis=-1)
# extract contact point and push velocity from the glimpse
g_fc1 = self.g_fc1(glimpse_encoding)
g_rfc2 = self.g_rfc2(g_fc1)
r_pix = self.g_r(g_rfc2)
# add the tip's global postition to the local estimate and
# transform to 2d (using the tip's depth if necessary)
r_pix = r_pix + tip_pix
# r = utils._to_3d(r_pix, self.plane_depth)
r = utils._to_3d_d(r_pix, coord[:, :, :, -1:], tip_pos)
g_nfc2 = self.g_nfc2(g_fc1)
n_pix = self.g_n(g_nfc2)
# calculate the pixel end point to get the z-value
# for projecting the predicted normal from pixels to 3d
n_end_pix = tf.stop_gradient(r_pix) + n_pix
# n_end = utils._to_3d(n_end_pix, self.plane_depth)
n_end = utils._to_3d_d(n_end_pix, coord[:, :, :, -1:],
tip_pos)
n = n_end - tf.stop_gradient(r)
# get the contact annotation
s = self.g_s(glimpse_encoding)
s = tf.nn.sigmoid(s)
# here we have to adapt the observations to the scale, since
# the network can't learn it itself due to the sigmoid
s = s / self.scale
if self.summary:
tf.summary.image('glimpse_z', glimpse[0:1, :, :, -1:])
tf.summary.image('glimpse_rgb', im_glimpse[0:1])
tf.summary.image('conv1_im', g_conv1[0:1, :, :, 0:1])
tf.summary.histogram('conv1_out', g_conv1)
tf.summary.image('conv2_im', g_conv2[0:1, :, :, 0:1])
tf.summary.histogram('conv2_out', g_conv2)
tf.summary.image('conv3_im', g_conv3[0:1, :, :, 0:1])
tf.summary.histogram('g_fc1_out', g_fc1)
tf.summary.histogram('g_rfc2_out', g_rfc2)
tf.summary.histogram('r_pix_out', r_pix)
tf.summary.histogram('g_nfc2_out', g_nfc2)
tf.summary.histogram('n_pix_out', n_pix)
tf.summary.histogram('n_end_pix_out', n_end_pix)
# assemble the observations: remove the z(up) coordinates,
# convert to centimeter, normalize
n_norm = tf.linalg.norm(n[:, :2], axis=1, keepdims=True)
n = tf.where(tf.greater(tf.squeeze(n_norm), 1e-5),
n[:, :2] / n_norm, n[:, :2])
n = tf.where(tf.greater_equal(tf.tile(s, [1, 2]), 0.5), n, 0 * n)
# we only care for the position in the table plane
r = r[:, :2] * 1000. / self.scale
n = n[:, :2] / self.scale
pos = pos[:, :2] * 1000. / self.scale
z = tf.concat([pos, rot, r, n, s], axis=-1)
if self.summary:
tf.summary.scalar('r_x', r[0, 0])
tf.summary.scalar('r_y', r[0, 1])
tf.summary.scalar('n_x', n[0, 0])
tf.summary.scalar('n_y', n[0, 1])
tf.summary.scalar('o_x', pos[0, 0])
tf.summary.scalar('o_y', pos[0, 1])
tf.summary.scalar('t_x', tip_pos[0, 0])
tf.summary.scalar('t_y', tip_pos[0, 1])
tf.summary.scalar('s', s[0, 0])
tf.summary.scalar('rot', rot[0, 0])
return z, [mask, rot_fc2, g_fc1]
class ObservationNoise(BaseLayer):
def __init__(self, batch_size, dim_z, r_diag, scale, hetero, diag,
trainable, summary):
super(ObservationNoise, self).__init__()
self.hetero = hetero
self.diag = diag
self.batch_size = batch_size
self.dim_z = dim_z
self.scale = scale
self.r_diag = r_diag
self.summary = summary
self.trainable = trainable
def build(self, input_shape):
init_const = np.ones(self.dim_z) * 1e-3 // self.scale**2
init = np.sqrt(np.maximum(np.square(self.r_diag) - init_const, 0))
# the constant bias keeps the predicted covariance away from zero
self.bias_fixed = \
self.add_weight(name='bias_fixed', shape=[self.dim_z],
trainable=False,
initializer=tf.constant_initializer(init_const))
num = self.dim_z * (self.dim_z + 1) // 2
wd = 1e-3 * self.scale**2
if self.hetero and self.diag:
# for heteroscedastic noise with diagonal covariance matrix
# position
self.het_diag_pos_c1 = self._conv_layer('het_diag_pos_c1', 5, 16,
stride=[2, 2],
trainable=self.trainable)
self.het_diag_pos_c2 = self._conv_layer('het_diag_pos_c2', 3, 32,
stride=[2, 2],
trainable=self.trainable)
self.het_diag_pos_fc1 = self._fc_layer('het_diag_pos_fc1', 64,
trainable=self.trainable)
self.het_diag_pos_fc2 = self._fc_layer('het_diag_pos_fc2', 2,
mean=0, std=1e-3,
activation=None,
trainable=self.trainable)
# rotation, normal, contact point and contact
self.het_diag_rot_fc = self._fc_layer('het_diag_rot_fc', 1,
mean=0, std=1e-3,
activation=None,
trainable=self.trainable)
self.het_diag_fc1 = self._fc_layer('het_diag_fc1', 64, std=1e-4,
trainable=self.trainable)
self.het_diag_fc2 = self._fc_layer('het_diag_fc2', 32, std=1e-3,
trainable=self.trainable)
self.het_diag_fc3 = self._fc_layer('het_diag_fc3', 5, std=1e-2,
activation=None,
trainable=self.trainable)
self.het_diag_init_bias = \
self.add_weight(name='het_diag_init_bias',
shape=[self.dim_z],
trainable=self.trainable,
regularizer=tf.keras.regularizers.l2(l=wd),
initializer=tf.constant_initializer(init))
elif not self.hetero and self.diag:
# for constant noise with diagonal covariance matrix
self.const_diag = \
self.add_weight(name='const_diag',
shape=[self.dim_z],
trainable=self.trainable,
regularizer=tf.keras.regularizers.l2(l=wd),
initializer=tf.constant_initializer(init))
elif self.hetero and not self.diag:
# for heteroscedastic noise with full covariance matrix
self.het_full_pos_c1 = self._conv_layer('het_full_pos_c1', 5, 16,
stride=[2, 2],
trainable=self.trainable)
self.het_full_pos_c2 = self._conv_layer('het_full_pos_c2', 3, 32,
stride=[2, 2],
trainable=self.trainable)
self.het_full_pos_fc = self._fc_layer('het_full_pos_fc',
self.dim_z,
trainable=self.trainable)
# rotation, normal, contact point and contact
self.het_full_rot_fc = self._fc_layer('het_full_rot_fc',
self.dim_z,
trainable=self.trainable)
self.het_full_g_fc1 = self._fc_layer('het_full_g_fc1', 64,
std=1e-3,
trainable=self.trainable)
self.het_full_g_fc2 = self._fc_layer('het_full_g_f2', 32,
trainable=self.trainable)
self.het_full_fc1 = self._fc_layer('het_full_fc1', 64, std=1e-3,
trainable=self.trainable)
self.het_full_fc2 = \
self._fc_layer('het_full_fc2', num,
activation=None, trainable=self.trainable)
self.het_full_init_bias = \
self.add_weight(name='het_full_init_bias',
shape=[self.dim_z], trainable=self.trainable,
regularizer=tf.keras.regularizers.l2(l=wd),
initializer=tf.constant_initializer(init))
else:
# for constant noise with full covariance matrix
self.const_full = \
self.add_weight(name='const_tri', shape=[num],
regularizer=tf.keras.regularizers.l2(l=wd),
initializer=tf.constant_initializer(0.),
trainable=self.trainable)
self.const_full_init_bias = \
self.add_weight(name='const_full_init_bias',
shape=[self.dim_z],
trainable=self.trainable,
regularizer=tf.keras.regularizers.l2(l=wd),
initializer=tf.constant_initializer(init))
def call(self, inputs, training):
mask, rot_encoding, glimpse_encoding, pix = inputs
if self.hetero and self.diag:
het_diag_pos_c1 = self.het_diag_pos_c1(mask)
het_diag_pos_c2 = self.het_diag_pos_c2(het_diag_pos_c1)
het_diag_pos_c2 = tf.reshape(het_diag_pos_c2,
[self.batch_size, -1])
het_diag_pos_fc1 = self.het_diag_pos_fc1(het_diag_pos_c2)
het_diag_pos = self.het_diag_pos_fc2(het_diag_pos_fc1)
# rotation, normal, contact point and contact
het_diag_rot = self.het_diag_rot_fc(rot_encoding)
het_diag_fc1 = self.het_diag_fc1(glimpse_encoding)
het_diag_fc2 = self.het_diag_fc2(het_diag_fc1)
het_diag_rns = self.het_diag_fc3(het_diag_fc2)
diag = tf.concat([het_diag_pos, het_diag_rot, het_diag_rns],
axis=-1)
if self.summary:
tf.summary.image('het_diag_pos_c1_im',
het_diag_pos_c1[0:1, :, :, 0:1])
tf.summary.histogram('het_diag_pos_c1_out', het_diag_pos_c1)
tf.summary.histogram('het_diag_pos_c2_out', het_diag_pos_c2)
tf.summary.histogram('het_diag_pos_fc1_out', het_diag_pos_fc1)
tf.summary.histogram('het_diag_pos_fc2_out', het_diag_pos)
tf.summary.histogram('het_diag_rot_fc_out', het_diag_rot)
tf.summary.histogram('het_diag_rns_fc1_out', het_diag_fc1)
tf.summary.histogram('het_diag_rns_fc2_out', het_diag_fc2)
tf.summary.histogram('het_diag_rns_fc3_out', het_diag_rns)
tf.summary.histogram('het_diag_out', diag)
diag = tf.square(diag + self.het_diag_init_bias)
diag += self.bias_fixed
R = tf.linalg.diag(diag)
elif not self.hetero and self.diag:
diag = self.const_diag
diag = tf.square(diag) + self.bias_fixed
R = tf.linalg.tensor_diag(diag)
R = tf.tile(R[None, :, :], [self.batch_size, 1, 1])
elif self.hetero and not self.diag:
het_full_pos_c1 = self.het_full_pos_c1(mask)
het_full_pos_c2 = self.het_full_pos_c2(het_full_pos_c1)
het_full_pos_c2 = tf.reshape(het_full_pos_c2,
[self.batch_size, -1])
het_full_pos = self.het_full_pos_fc(het_full_pos_c2)
# rotation, normal, contact point and contact
het_full_rot = self.het_full_rot_fc(rot_encoding)
het_full_g1 = self.het_full_g_fc1(glimpse_encoding)
het_full_g2 = self.het_full_g_fc2(het_full_g1)
input_data = tf.concat([het_full_pos, het_full_rot, het_full_g2],
axis=-1)
het_full_fc1 = self.het_full_fc1(input_data)
tri = self.het_full_fc2(het_full_fc1)
if self.summary:
tf.summary.image('het_full_pos_c1_im',
het_full_pos_c1[0:1, :, :, 0:1])
tf.summary.histogram('het_full_pos_c1_out', het_full_pos_c1)
tf.summary.histogram('het_full_pos_c2_out', het_full_pos_c2)
tf.summary.histogram('het_full_pos_fc_out', het_full_pos)
tf.summary.histogram('het_full_rot_fc_out', het_full_rot)
tf.summary.histogram('het_full_g_fc1_out', het_full_g1)
tf.summary.histogram('het_full_g_fc2_out', het_full_g2)
tf.summary.histogram('het_full_fc1_out', het_full_fc1)
tf.summary.histogram('het_tri_out', tri)
R = compat.fill_triangular(tri)
R += tf.linalg.diag(self.het_full_init_bias)
R = tf.matmul(R, tf.linalg.matrix_transpose(R))
R = R + tf.linalg.diag(self.bias_fixed)
else:
tri = self.const_full
R = compat.fill_triangular(tri)
R += tf.linalg.diag(self.const_full_init_bias)
R = tf.matmul(R, tf.linalg.matrix_transpose(R))
R = R + tf.linalg.diag(self.bias_fixed)
R = tf.tile(R[None, :, :], [self.batch_size, 1, 1])
return R
class Likelihood(BaseLayer):
def __init__(self, dim_z, trainable, summary):
super(Likelihood, self).__init__()
self.summary = summary
self.dim_z = dim_z
self.like_pos_c1 = self._conv_layer('like_pos_c1', 5, 16,
stride=[2, 2],
trainable=self.trainable)
self.like_pos_c2 = self._conv_layer('like_pos_c2', 3, 32,
trainable=self.trainable)
self.like_pos_fc = self._fc_layer('like_pos_fc', 2*self.dim_z,
trainable=self.trainable)
# rotation, normal, contact point and contact
self.like_rot_fc = self._fc_layer('like_rot_fc', self.dim_z,
trainable=self.trainable)
self.like_rns_fc1 = self._fc_layer('like_rns_fc1', 128,
trainable=self.trainable)
self.like_rns_fc2 = self._fc_layer('like_rn2_fc2', 5*self.dim_z,
trainable=self.trainable)
self.fc1 = self._fc_layer('fc1', 128, trainable=trainable)
self.fc2 = self._fc_layer('fc2', 128, trainable=trainable)
self.fc3 = self._fc_layer('fc3', 1, trainable=trainable,
activation=tf.nn.sigmoid)
def call(self, inputs, training):
# unpack the inputs
particles, encoding = inputs
bs = particles.get_shape()[0].value
num_pred = particles.get_shape()[1].value
# diff, encoding = inputs
mask, rot_encoding, glimpse_encoding, pix = encoding
# preprocess the encodings
# mask
pos_c1 = self.like_pos_c1(mask)
pos_c2 = self.like_pos_c2(pos_c1)
pos_c2 = tf.reshape(pos_c2, [bs, -1])
pos_fc = self.like_pos_fc(pos_c2)
# rotation, normal, contact point and contact
rot_fc = self.like_rot_fc(rot_encoding)
rns_fc1 = self.like_rns_fc1(glimpse_encoding)
rns_fc2 = self.like_rns_fc2(rns_fc1)
# concatenate and tile the preprocessed encoding
encoding = tf.concat([pos_fc, rot_fc, rns_fc2], axis=-1)
encoding = tf.tile(encoding[:, None, :], [1, num_pred, 1])
input_data = tf.concat([encoding, particles], axis=-1)
input_data = tf.reshape(input_data, [bs * num_pred, -1])
fc1 = self.fc1(input_data)
if self.summary:
tf.summary.histogram('fc1_out', fc1)
fc2 = self.fc2(fc1)
if self.summary:
tf.summary.histogram('fc2_out', fc2)
like = self.fc3(fc2)
if self.summary:
tf.summary.histogram('pos_c1_out', pos_c1)
tf.summary.histogram('pos_c2_out', pos_c2)
tf.summary.histogram('pos_fc_out', pos_fc)
tf.summary.histogram('rot_fc_out', rot_fc)
tf.summary.histogram('rns_fc1_out', rns_fc1)
tf.summary.histogram('rns_fc2_out', rns_fc2)
tf.summary.histogram('fc1_out', fc1)
tf.summary.histogram('fc2_out', fc2)
tf.summary.histogram('like', like)
return like
class ObservationModel(BaseLayer):
def __init__(self, dim_z, batch_size):
super(ObservationModel, self).__init__()
self.dim_z = dim_z
self.batch_size = batch_size
def call(self, inputs, training):
H = tf.concat(
[tf.tile(np.array([[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0]]],
dtype=np.float32), [self.batch_size, 1, 1]),
tf.tile(np.array([[[0, 1, 0, 0, 0, 0, 0, 0, 0, 0]]],
dtype=np.float32), [self.batch_size, 1, 1]),
tf.tile(np.array([[[0, 0, 1, 0, 0, 0, 0, 0, 0, 0]]],
dtype=np.float32), [self.batch_size, 1, 1]),
tf.tile(np.array([[[0, 0, 0, 0, 0, 1, 0, 0, 0, 0]]],
dtype=np.float32), [self.batch_size, 1, 1]),
tf.tile(np.array([[[0, 0, 0, 0, 0, 0, 1, 0, 0, 0]]],
dtype=np.float32), [self.batch_size, 1, 1]),
tf.tile(np.array([[[0, 0, 0, 0, 0, 0, 0, 1, 0, 0]]],
dtype=np.float32), [self.batch_size, 1, 1]),
tf.tile(np.array([[[0, 0, 0, 0, 0, 0, 0, 0, 1, 0]]],
dtype=np.float32), [self.batch_size, 1, 1]),
tf.tile(np.array([[[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]]],
dtype=np.float32), [self.batch_size, 1, 1])],
axis=1)
z_pred = tf.concat([inputs[:, :3], inputs[:, 5:]], axis=1)
return z_pred, H
class ProcessModel(BaseLayer):
def __init__(self, batch_size, dim_x, scale, learned, jacobian,
trainable, summary):
super(ProcessModel, self).__init__()
self.summary = summary
self.batch_size = batch_size
self.dim_x = dim_x
self.learned = learned
self.jacobian = jacobian
self.scale = scale
if learned:
self.fc1 = self._fc_layer('fc1', 256, std=1e-4,
trainable=trainable)
self.fc2 = self._fc_layer('fc2', 128, trainable=trainable)
self.fc3 = self._fc_layer('fc3', 128, trainable=trainable)
self.update = self._fc_layer('fc4', self.dim_x, activation=None,
trainable=trainable)
def call(self, inputs, training):
# unpack the inputs
last_state, actions, ob = inputs
if self.learned:
fc1 = self.fc1(tf.concat([last_state, actions[:, :2]], axis=-1))
fc2 = self.fc2(fc1)
fc3 = self.fc3(fc2)
update = self.update(fc3)
# for the circular object, the orientation is always zero,
# so we have to set the prediction to 0 and adapt the
# jacobian
ob = tf.reshape(ob, [self.batch_size, 1])
bs = last_state.get_shape()[0]
ob = tf.tile(ob, [1, bs // self.batch_size])
ob = tf.reshape(ob, [-1])
ob = tf.strings.regex_replace(ob, "\000", "")
ob = tf.strings.regex_replace(ob, "\00", "")
rot_pred = update[:, 2:3]
rot_pred = tf.where(tf.equal(ob, 'ellip1'),
tf.zeros_like(rot_pred), rot_pred)
update = tf.concat([update[:, :2], rot_pred, update[:, 3:]],
axis=-1)
new_state = last_state + update
if self.summary:
tf.summary.histogram('fc1_out', fc1)
tf.summary.histogram('fc2_out', fc2)
tf.summary.histogram('fc3_out', fc3)
tf.summary.histogram('update_out', update)
if self.jacobian:
F = self._compute_jacobian(new_state, last_state)
else:
F = None
else:
if self.jacobian:
# with tf.GradientTape() as tape:
# tape.watch(last_state)
# # split the state into parts and undo the scaling
# last_state *= self.scale
# pos = last_state[:, :2]
# ori = last_state[:, 2:3]
# fr = last_state[:, 3:4]
# fr_mu = last_state[:, 4:5]
# cp = last_state[:, 5:7]
# n = last_state[:, 7:9]
# s = last_state[:, 9:]
# # undo the scaling for the actions as well
# actions *= self.scale
# # apply the analytical model to get predicted translation
# # and rotation
# tr_pred, rot_pred, keep_contact = \
# utils.physical_model(pos, cp, n, actions, fr, fr_mu, s)
# pos_pred = pos + tr_pred
# ori_pred = ori + rot_pred * 180.0/np.pi
# fr_pred = fr
# fr_mu_pred = fr_mu
# cp_pred = cp + actions
# keep_contact = tf.cast(keep_contact, tf.float32)
# n_pred = n * keep_contact
# s_pred = s * keep_contact
# # piece together the new state and apply scaling again
# new_state = \
# tf.concat([pos_pred, ori_pred, fr_pred,
# fr_mu_pred, cp_pred, n_pred, s_pred],
# axis=1) / self.scale
# # block vectorization to avoid excessive memory usage for
# # long sequences
# F = tape.batch_jacobian(new_state, last_state,
# experimental_use_pfor=False)
# split the state into parts and undo the scaling
last_state *= self.scale
pos = last_state[:, :2]
ori = last_state[:, 2:3]
fr = last_state[:, 3:4]
fr_mu = last_state[:, 4:5]
cp = last_state[:, 5:7]
n = last_state[:, 7:9]
s = last_state[:, 9:]
# undo the scaling for the actions as well
actions *= self.scale
# apply the analytical model to get predicted translation and
# rotation
tr_pred, rot_pred, keep_contact, dx, dy, dom = \
utils.physical_model_derivative(pos, cp, n, actions, fr,
fr_mu, s)
# for the circular object, the orientation is always zero,
# so we have to set the prediction to 0 and adapt the
# jacobian
ob = tf.squeeze(ob)
ob = tf.strings.regex_replace(ob, "\000", "")
ob = tf.strings.regex_replace(ob, "\00", "")
rot_pred = tf.where(tf.equal(ob, 'ellip1'),
tf.zeros_like(rot_pred), rot_pred)
dom = tf.where(tf.equal(ob, 'ellip1'),
tf.zeros_like(dom), dom)
pos_pred = pos + tr_pred
ori_pred = ori + rot_pred * 180.0 / np.pi
fr_pred = fr
fr_mu_pred = fr_mu
cp_pred = cp + actions
keep_contact = tf.cast(keep_contact, tf.float32)
n_pred = n * keep_contact
s_pred = s * keep_contact
# piece together the new state and apply scaling again
new_state = \
tf.concat([pos_pred, ori_pred, fr_pred,
fr_mu_pred, cp_pred, n_pred, s_pred],
axis=1) / self.scale
# piece together the jacobian (I found this to work better than
# getting the whole jacobian from tensorflow)
dom *= 180.0 / np.pi
dnx = tf.concat([tf.zeros([self.batch_size, 7]),
tf.cast(keep_contact, tf.float32),
tf.zeros([self.batch_size, 2])],
axis=-1)
dny = tf.concat([tf.zeros([self.batch_size, 8]),
tf.cast(keep_contact, tf.float32),
tf.zeros([self.batch_size, 1])],
axis=-1)
ds = tf.concat([tf.zeros([self.batch_size, 9]),
tf.cast(keep_contact, tf.float32)],
axis=-1)
F = tf.concat(
[dx + np.array([[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0.]]],
dtype=np.float32),
dy + np.array([[[0, 1, 0, 0, 0, 0, 0, 0, 0, 0.]]],
dtype=np.float32),
dom + np.array([[[0, 0, 1, 0, 0, 0, 0, 0, 0, 0.]]],
dtype=np.float32),
tf.tile(np.array([[[0, 0, 0, 1, 0, 0, 0, 0, 0, 0.]]],
dtype=np.float32),
[self.batch_size, 1, 1]),
tf.tile(np.array([[[0, 0, 0, 0, 1, 0, 0, 0, 0, 0.]]],
dtype=np.float32),
[self.batch_size, 1, 1]),
tf.tile(np.array([[[0, 0, 0, 0, 0, 1, 0, 0, 0, 0]]],
dtype=np.float32),
[self.batch_size, 1, 1]),
tf.tile(np.array([[[0, 0, 0, 0, 0, 0, 1, 0, 0, 0]]],
dtype=np.float32),
[self.batch_size, 1, 1]),
tf.reshape(dnx, [-1, 1, self.dim_x]),
tf.reshape(dny, [-1, 1, self.dim_x]),
tf.reshape(ds, [-1, 1, self.dim_x])], axis=1)
else:
# split the state into parts and undo the scaling
last_state *= self.scale
pos = last_state[:, :2]
ori = last_state[:, 2:3]
fr = last_state[:, 3:4]
fr_mu = last_state[:, 4:5]
cp = last_state[:, 5:7]
n = last_state[:, 7:9]
s = last_state[:, 9:]
# undo the scaling for the actions as well
actions *= self.scale
# apply the analytical model to get predicted translation and
# rotation
tr_pred, rot_pred, keep_contact = \
utils.physical_model(pos, cp, n, actions, fr, fr_mu, s)
pos_pred = pos + tr_pred
ori_pred = ori + rot_pred * 180.0 / np.pi
fr_pred = fr
fr_mu_pred = fr_mu
cp_pred = cp + actions
keep_contact = tf.cast(keep_contact, tf.float32)
n_pred = n * keep_contact
s_pred = s * keep_contact
# piece together the new state and apply scaling again
new_state = \
tf.concat([pos_pred, ori_pred, fr_pred,
fr_mu_pred, cp_pred, n_pred, s_pred],
axis=1) / self.scale
F = None
if self.jacobian:
F = tf.stop_gradient(F)
return new_state, F
class ProcessNoise(BaseLayer):
def __init__(self, batch_size, dim_x, q_diag, scale, hetero, diag, learned,
trainable, summary):
super(ProcessNoise, self).__init__()
self.hetero = hetero
self.diag = diag
self.learned = learned
self.trainable = trainable
self.dim_x = dim_x
self.q_diag = q_diag
self.scale = scale
self.batch_size = batch_size
self.summary = summary
def build(self, input_shape):
init_const = np.ones(self.dim_x) * 1e-5 / self.scale**2
init = np.sqrt(np.square(self.q_diag) - init_const)
# the constant bias keeps the predicted covariance away from zero
self.bias_fixed = \
self.add_weight(name='bias_fixed', shape=[self.dim_x],
trainable=False,
initializer=tf.constant_initializer(init_const))
num = self.dim_x * (self.dim_x + 1) // 2
wd = 1e-3 * self.scale**2
if self.hetero and self.diag and self.learned:
# for heteroscedastic noise with diagonal covariance matrix
self.het_diag_lrn_fc1 = self._fc_layer('het_diag_lrn_fc1', 128,
trainable=self.trainable)
self.het_diag_lrn_fc2 = self._fc_layer('het_diag_lrn_fc2', 64,
trainable=self.trainable)
self.het_diag_lrn_fc3 = \
self._fc_layer('het_diag_lrn_fc3', self.dim_x, mean=0,
std=1e-3, activation=None,
trainable=self.trainable)
self.het_diag_lrn_init_bias = \
self.add_weight(name='het_diag_lrn_init_bias',
shape=[self.dim_x], trainable=self.trainable,
regularizer=tf.keras.regularizers.l2(l=wd),
initializer=tf.constant_initializer(init))
elif not self.hetero and self.diag and self.learned:
# for constant noise with diagonal covariance matrix
self.const_diag_lrn = \
self.add_weight(name='const_diag_lrn', shape=[self.dim_x],
trainable=self.trainable,
regularizer=tf.keras.regularizers.l2(l=wd),
initializer=tf.constant_initializer(init))
elif self.hetero and not self.diag and self.learned:
# for heteroscedastic noise with full covariance matrix
self.het_full_lrn_fc1 = self._fc_layer('het_full_lrn_fc1', 128,
trainable=self.trainable)
self.het_full_lrn_fc2 = self._fc_layer('het_full_lrn_fc2', 64,
trainable=self.trainable)
self.het_full_lrn_fc3 = \
self._fc_layer('het_full_lrn_fc3', num, mean=0, std=1e-3,
activation=None, trainable=self.trainable)
self.het_full_lrn_init_bias = \
self.add_weight(name='het_full_lrn_init_bias',
shape=[self.dim_x], trainable=self.trainable,
regularizer=tf.keras.regularizers.l2(l=wd),
initializer=tf.constant_initializer(init))
elif not self.hetero and not self.diag and self.learned:
# for constant noise with full covariance matrix
self.const_full_lrn = \
self.add_weight(name='const_tri_lrn', shape=[num],
regularizer=tf.keras.regularizers.l2(l=wd),
initializer=tf.constant_initializer(0.),
trainable=self.trainable)
self.const_full_lrn_init_bias = \
self.add_weight(name='const_full_lrn_init_bias',
shape=[self.dim_x], trainable=self.trainable,
regularizer=tf.keras.regularizers.l2(l=wd),
initializer=tf.constant_initializer(init))
elif self.hetero and self.diag and not self.learned:
# for heteroscedastic noise with diagonal covariance matrix
self.het_diag_ana_fc1 = self._fc_layer('het_diag_ana_fc1', 128,
std=1e-3,
trainable=self.trainable)
self.het_diag_ana_fc2 = self._fc_layer('het_diag_ana_fc2', 64,
trainable=self.trainable)
self.het_diag_ana_fc3 = \
self._fc_layer('het_diag_ana_fc3', self.dim_x, mean=0,
std=1e-3, activation=None,
trainable=self.trainable)
self.het_diag_ana_init_bias = \
self.add_weight(name='het_diag_ana_init_bias',
shape=[self.dim_x], trainable=self.trainable,
regularizer=tf.keras.regularizers.l2(l=wd),
initializer=tf.constant_initializer(init))
elif not self.hetero and self.diag and not self.learned:
# for constant noise with diagonal covariance matrix
self.const_diag_ana = \
self.add_weight(name='const_diag_ana', shape=[self.dim_x],
trainable=self.trainable,
regularizer=tf.keras.regularizers.l2(l=wd),
initializer=tf.constant_initializer(init))
elif self.hetero and not self.diag and not self.learned:
# for heteroscedastic noise with full covariance matrix
self.het_full_ana_fc1 = self._fc_layer('het_full_ana_fc1', 128,
std=1e-3,
trainable=self.trainable)
self.het_full_ana_fc2 = self._fc_layer('het_full_ana_fc2', 64,
trainable=self.trainable)
self.het_full_ana_fc3 = \
self._fc_layer('het_full_ana_fc3', num, mean=0, std=1e-3,
activation=None, trainable=self.trainable)
self.het_full_ana_init_bias = \
self.add_weight(name='het_full_ana_init_bias',
shape=[self.dim_x], trainable=self.trainable,
regularizer=tf.keras.regularizers.l2(l=wd),
initializer=tf.constant_initializer(init))
elif not self.hetero and not self.diag and not self.learned:
# for constant noise with full covariance matrix
self.const_full_ana = \
self.add_weight(name='const_tri_ana', shape=[num],
regularizer=tf.keras.regularizers.l2(l=wd),
initializer=tf.constant_initializer(0.),
trainable=self.trainable)
self.const_full_ana_init_bias = \
self.add_weight(name='const_full_ana_init_bias',
shape=[self.dim_x], trainable=self.trainable,
regularizer=tf.keras.regularizers.l2(l=wd),
initializer=tf.constant_initializer(init))
def call(self, inputs, training):
old_state, actions = inputs
# exclude l from the inputs for stability
input_data = tf.concat([old_state[:, :3], old_state[:, 4:], actions],
axis=-1)
# input_data = tf.concat([old_state, actions], axis=-1)
if self.learned:
if self.hetero and self.diag:
fc1 = self.het_diag_lrn_fc1(input_data)
fc2 = self.het_diag_lrn_fc2(fc1)
diag = self.het_diag_lrn_fc3(fc2)
if self.summary:
tf.summary.histogram('het_diag_lrn_fc1_out', fc1)
tf.summary.histogram('het_diag_lrn_fc2_out', fc2)
tf.summary.histogram('het_diag_lrn_fc3_out', diag)
diag = tf.square(diag + self.het_diag_lrn_init_bias)
diag += self.bias_fixed
Q = tf.linalg.diag(diag)
elif not self.hetero and self.diag:
diag = self.const_diag_lrn
diag = tf.square(diag) + self.bias_fixed
Q = tf.linalg.tensor_diag(diag)
Q = tf.tile(Q[None, :, :], [self.batch_size, 1, 1])
elif self.hetero and not self.diag:
fc1 = self.het_full_lrn_fc1(input_data)
fc2 = self.het_full_lrn_fc2(fc1)
tri = self.het_full_lrn_fc3(fc2)
if self.summary:
tf.summary.histogram('het_full_lrn_fc1_out', fc1)
tf.summary.histogram('het_full_lrn_fc2_out', fc2)
tf.summary.histogram('het_full_lrn_out', tri)
Q = compat.fill_triangular(tri)
Q += tf.linalg.diag(self.het_full_lrn_init_bias)
Q = tf.matmul(Q, tf.linalg.matrix_transpose(Q))
Q = Q + tf.linalg.diag(self.bias_fixed)
else:
tri = self.const_full_lrn
Q = compat.fill_triangular(tri)
Q += tf.linalg.diag(self.const_full_lrn_init_bias)
Q = tf.matmul(Q, tf.linalg.matrix_transpose(Q))
Q = Q + tf.linalg.diag(self.bias_fixed)
Q = tf.tile(Q[None, :, :], [self.batch_size, 1, 1])
else:
if self.hetero and self.diag:
fc1 = self.het_diag_ana_fc1(input_data)
fc2 = self.het_diag_ana_fc2(fc1)
diag = self.het_diag_ana_fc3(fc2)
if self.summary:
tf.summary.histogram('het_diag_ana_fc1_out', fc1)
tf.summary.histogram('het_diag_ana_fc2_out', fc2)
tf.summary.histogram('het_diag_ana_fc3_out', diag)
diag = tf.square(diag + self.het_diag_ana_init_bias)
diag += self.bias_fixed
Q = tf.linalg.diag(diag)
elif not self.hetero and self.diag:
diag = self.const_diag_ana
diag = tf.square(diag) + self.bias_fixed
Q = tf.linalg.tensor_diag(diag)
Q = tf.tile(Q[None, :, :], [self.batch_size, 1, 1])
elif self.hetero and not self.diag:
fc1 = self.het_full_ana_fc1(input_data)
fc2 = self.het_full_ana_fc2(fc1)
tri = self.het_full_ana_fc3(fc2)
if self.summary:
tf.summary.histogram('het_full_ana_fc1_out', fc1)
tf.summary.histogram('het_full_ana_fc2_out', fc2)
tf.summary.histogram('het_full_ana_out', tri)
Q = compat.fill_triangular(tri)
Q += tf.linalg.diag(self.het_full_ana_init_bias)
Q = tf.matmul(Q, tf.linalg.matrix_transpose(Q))
Q = Q + tf.linalg.diag(self.bias_fixed)
else:
tri = self.const_full_ana
Q = compat.fill_triangular(tri)
Q += tf.linalg.diag(self.const_full_ana_init_bias)
Q = tf.matmul(Q, tf.linalg.matrix_transpose(Q))
Q = Q + tf.linalg.diag(self.bias_fixed)
Q = tf.tile(Q[None, :, :], [self.batch_size, 1, 1])
return Q
|
[
"tensorflow.compat.v1.zeros",
"numpy.arctan2",
"numpy.ones",
"tensorflow.compat.v1.summary.histogram",
"matplotlib.patches.Polygon",
"numpy.arange",
"differentiable_filters.utils.push_utils.physical_model",
"tensorflow.compat.v1.name_scope",
"os.path.dirname",
"tensorflow.compat.v1.constant_initializer",
"tensorflow.compat.v1.squeeze",
"tensorflow.compat.v1.multiply",
"differentiable_filters.utils.push_utils._to_3d_d",
"numpy.max",
"tensorflow.compat.v1.convert_to_tensor",
"tensorflow.compat.v1.norm",
"matplotlib.pyplot.subplots",
"numpy.save",
"tensorflow.compat.v1.linalg.matrix_transpose",
"tensorflow.compat.v1.stop_gradient",
"numpy.min",
"numpy.squeeze",
"tensorflow.compat.v1.summary.scalar",
"tensorflow.compat.v1.nn.sigmoid",
"numpy.linalg.eigh",
"tensorflow.compat.v1.abs",
"numpy.array",
"tensorflow.compat.v1.math.atan2",
"tensorflow.compat.v1.data.Dataset.from_tensor_slices",
"tensorflow.compat.v1.stack",
"tensorflow.compat.v1.reduce_mean",
"tensorflow.compat.v1.keras.layers.BatchNormalization",
"tensorflow.compat.v1.sign",
"tensorflow.compat.v1.linalg.norm",
"numpy.asscalar",
"tensorflow.compat.v1.nn.max_pool2d",
"tensorflow.compat.v1.strings.regex_replace",
"differentiable_filters.utils.push_utils._to_3d",
"differentiable_filters.utils.push_utils.physical_model_derivative",
"numpy.square",
"tensorflow.compat.v1.reshape",
"tensorflow.compat.v1.greater",
"tensorflow.compat.v1.sin",
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.keras.layers.LayerNormalization",
"differentiable_filters.utils.tensorflow_compatability.fill_triangular",
"tensorflow.compat.v1.equal",
"pickle.load",
"numpy.mean",
"numpy.sin",
"os.path.join",
"tensorflow.compat.v1.slice",
"tensorflow.compat.v1.tile",
"tensorflow.compat.v1.reduce_sum",
"tensorflow.compat.v1.add_n",
"tensorflow.compat.v1.cast",
"numpy.corrcoef",
"tensorflow.compat.v1.cos",
"numpy.dot",
"tensorflow.compat.v1.linalg.diag_part",
"tensorflow.compat.v1.disable_v2_behavior",
"tensorflow.compat.v1.zeros_like",
"tensorflow.compat.v1.math.log",
"differentiable_filters.contexts.paper_base_context.PaperBaseContext.__init__",
"numpy.zeros",
"tensorflow.compat.v1.ones_like",
"tensorflow.compat.v1.maximum",
"tensorflow.compat.v1.image.extract_glimpse",
"tensorflow.compat.v1.sqrt",
"numpy.load",
"tensorflow.compat.v1.nn.l2_normalize",
"tensorflow.compat.v1.io.parse_single_example",
"tensorflow.compat.v1.linalg.tensor_diag",
"numpy.random.randint",
"numpy.random.normal",
"tensorflow.compat.v1.image.resize",
"tensorflow.compat.v1.cond",
"csv.DictWriter",
"tensorflow.compat.v1.square",
"tensorflow.compat.v1.ones",
"numpy.std",
"tensorflow.compat.v1.clip_by_value",
"tensorflow.compat.v1.linalg.diag",
"tensorflow.compat.v1.summary.image",
"numpy.cos",
"matplotlib.patches.Ellipse",
"tensorflow.compat.v1.math.is_finite",
"differentiable_filters.utils.recordio.RecordMeta.load",
"tensorflow.compat.v1.less",
"tensorflow.compat.v1.keras.regularizers.l2",
"numpy.sqrt"
] |
[((183, 207), 'tensorflow.compat.v1.disable_v2_behavior', 'tf.disable_v2_behavior', ([], {}), '()\n', (205, 207), True, 'import tensorflow.compat.v1 as tf\n'), ((1337, 1386), 'differentiable_filters.contexts.paper_base_context.PaperBaseContext.__init__', 'base.PaperBaseContext.__init__', (['self', 'param', 'mode'], {}), '(self, param, mode)\n', (1367, 1386), True, 'from differentiable_filters.contexts import paper_base_context as base\n'), ((2174, 2197), 'numpy.array', 'np.array', (['butter_points'], {}), '(butter_points)\n', (2182, 2197), True, 'import numpy as np\n'), ((3308, 3349), 'tensorflow.compat.v1.convert_to_tensor', 'tf.convert_to_tensor', (['q'], {'dtype': 'tf.float32'}), '(q, dtype=tf.float32)\n', (3328, 3349), True, 'import tensorflow.compat.v1 as tf\n'), ((3411, 3452), 'tensorflow.compat.v1.convert_to_tensor', 'tf.convert_to_tensor', (['r'], {'dtype': 'tf.float32'}), '(r, dtype=tf.float32)\n', (3431, 3452), True, 'import tensorflow.compat.v1 as tf\n'), ((16446, 16516), 'tensorflow.compat.v1.concat', 'tf.concat', (['[init_z[:, :3], base_state[:, 3:5], init_z[:, 3:]]'], {'axis': '(-1)'}), '([init_z[:, :3], base_state[:, 3:5], init_z[:, 3:]], axis=-1)\n', (16455, 16516), True, 'import tensorflow.compat.v1 as tf\n'), ((17833, 17886), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['states', '[self.batch_size, -1, self.dim_x]'], {}), '(states, [self.batch_size, -1, self.dim_x])\n', (17843, 17886), True, 'import tensorflow.compat.v1 as tf\n'), ((17904, 17969), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['covars', '[self.batch_size, -1, self.dim_x, self.dim_x]'], {}), '(covars, [self.batch_size, -1, self.dim_x, self.dim_x])\n', (17914, 17969), True, 'import tensorflow.compat.v1 as tf\n'), ((19428, 19523), 'tensorflow.compat.v1.concat', 'tf.concat', (['[seq_label[:, :, :3] - z[:, :, 0:3], seq_label[:, :, 5:] - z[:, :, 3:]]'], {'axis': '(-1)'}), '([seq_label[:, :, :3] - z[:, :, 0:3], seq_label[:, :, 5:] - z[:, :,\n 3:]], axis=-1)\n', (19437, 19523), True, 'import tensorflow.compat.v1 as tf\n'), ((20139, 20161), 'tensorflow.compat.v1.linalg.diag_part', 'tf.linalg.diag_part', (['r'], {}), '(r)\n', (20158, 20161), True, 'import tensorflow.compat.v1 as tf\n'), ((20227, 20263), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['diag_r', '[-1, self.dim_z]'], {}), '(diag_r, [-1, self.dim_z])\n', (20237, 20263), True, 'import tensorflow.compat.v1 as tf\n'), ((21036, 21058), 'tensorflow.compat.v1.linalg.diag_part', 'tf.linalg.diag_part', (['q'], {}), '(q)\n', (21055, 21058), True, 'import tensorflow.compat.v1 as tf\n'), ((21124, 21160), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['diag_q', '[-1, self.dim_x]'], {}), '(diag_q, [-1, self.dim_x])\n', (21134, 21160), True, 'import tensorflow.compat.v1 as tf\n'), ((21713, 21756), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""out/m_per_tr"""', 'm_per_tr'], {}), "('out/m_per_tr', m_per_tr)\n", (21730, 21756), True, 'import tensorflow.compat.v1 as tf\n'), ((21765, 21814), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""out/deg_per_deg"""', 'deg_per_deg'], {}), "('out/deg_per_deg', deg_per_deg)\n", (21782, 21814), True, 'import tensorflow.compat.v1 as tf\n'), ((22468, 22480), 'tensorflow.compat.v1.add_n', 'tf.add_n', (['wd'], {}), '(wd)\n', (22476, 22480), True, 'import tensorflow.compat.v1 as tf\n'), ((22631, 22656), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['total_mse'], {}), '(total_mse)\n', (22645, 22656), True, 'import tensorflow.compat.v1 as tf\n'), ((22677, 22700), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['dist_ob'], {}), '(dist_ob)\n', (22691, 22700), True, 'import tensorflow.compat.v1 as tf\n'), ((23724, 23792), 'tensorflow.compat.v1.cond', 'tf.cond', (['training', '(lambda : total_loss + wd)', '(lambda : total_loss_val)'], {}), '(training, lambda : total_loss + wd, lambda : total_loss_val)\n', (23731, 23792), True, 'import tensorflow.compat.v1 as tf\n'), ((23848, 23886), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""loss/total"""', 'total'], {}), "('loss/total', total)\n", (23865, 23886), True, 'import tensorflow.compat.v1 as tf\n'), ((23895, 23927), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""loss/wd"""', 'wd'], {}), "('loss/wd', wd)\n", (23912, 23927), True, 'import tensorflow.compat.v1 as tf\n'), ((24009, 24059), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""loss/tracking"""', 'total_tracking'], {}), "('loss/tracking', total_tracking)\n", (24026, 24059), True, 'import tensorflow.compat.v1 as tf\n'), ((24068, 24117), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""loss/observations"""', 'total_obs'], {}), "('loss/observations', total_obs)\n", (24085, 24117), True, 'import tensorflow.compat.v1 as tf\n'), ((25960, 25975), 'tensorflow.compat.v1.square', 'tf.square', (['diff'], {}), '(diff)\n', (25969, 25975), True, 'import tensorflow.compat.v1 as tf\n'), ((25991, 26019), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['diff'], {'axis': '(-1)'}), '(diff, axis=-1)\n', (26004, 26019), True, 'import tensorflow.compat.v1 as tf\n'), ((28024, 28055), 'tensorflow.compat.v1.linalg.diag_part', 'tf.linalg.diag_part', (['R_het_diag'], {}), '(R_het_diag)\n', (28043, 28055), True, 'import tensorflow.compat.v1 as tf\n'), ((28148, 28193), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['diag_r_het_diag', '[-1, self.dim_z]'], {}), '(diag_r_het_diag, [-1, self.dim_z])\n', (28158, 28193), True, 'import tensorflow.compat.v1 as tf\n'), ((28219, 28249), 'tensorflow.compat.v1.linalg.diag_part', 'tf.linalg.diag_part', (['R_het_tri'], {}), '(R_het_tri)\n', (28238, 28249), True, 'import tensorflow.compat.v1 as tf\n'), ((28339, 28383), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['diag_r_het_tri', '[-1, self.dim_z]'], {}), '(diag_r_het_tri, [-1, self.dim_z])\n', (28349, 28383), True, 'import tensorflow.compat.v1 as tf\n'), ((29836, 29850), 'tensorflow.compat.v1.add_n', 'tf.add_n', (['mses'], {}), '(mses)\n', (29844, 29850), True, 'import tensorflow.compat.v1 as tf\n'), ((29971, 30013), 'tensorflow.compat.v1.image.resize', 'tf.image.resize', (['seg_pred', '[height, width]'], {}), '(seg_pred, [height, width])\n', (29986, 30013), True, 'import tensorflow.compat.v1 as tf\n'), ((30041, 30091), 'tensorflow.compat.v1.image.resize', 'tf.image.resize', (['initial_seg_pred', '[height, width]'], {}), '(initial_seg_pred, [height, width])\n', (30056, 30091), True, 'import tensorflow.compat.v1 as tf\n'), ((30747, 30770), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['pix_mse'], {}), '(pix_mse)\n', (30761, 30770), True, 'import tensorflow.compat.v1 as tf\n'), ((31068, 31099), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['initial_pix_mse'], {}), '(initial_pix_mse)\n', (31082, 31099), True, 'import tensorflow.compat.v1 as tf\n'), ((31763, 31778), 'tensorflow.compat.v1.abs', 'tf.abs', (['z[:, 2]'], {}), '(z[:, 2])\n', (31769, 31778), True, 'import tensorflow.compat.v1 as tf\n'), ((31973, 32000), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['rot_penalty'], {}), '(rot_penalty)\n', (31987, 32000), True, 'import tensorflow.compat.v1 as tf\n'), ((32197, 32209), 'tensorflow.compat.v1.add_n', 'tf.add_n', (['wd'], {}), '(wd)\n', (32205, 32209), True, 'import tensorflow.compat.v1 as tf\n'), ((33571, 33630), 'tensorflow.compat.v1.cond', 'tf.cond', (['training', '(lambda : total_train)', '(lambda : total_val)'], {}), '(training, lambda : total_train, lambda : total_val)\n', (33578, 33630), True, 'import tensorflow.compat.v1 as tf\n'), ((33662, 33700), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""loss/total"""', 'total'], {}), "('loss/total', total)\n", (33679, 33700), True, 'import tensorflow.compat.v1 as tf\n'), ((33709, 33741), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""loss/wd"""', 'wd'], {}), "('loss/wd', wd)\n", (33726, 33741), True, 'import tensorflow.compat.v1 as tf\n'), ((35466, 35528), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""observation_loss/rot_penalty"""', 'rot_penalty'], {}), "('observation_loss/rot_penalty', rot_penalty)\n", (35483, 35528), True, 'import tensorflow.compat.v1 as tf\n'), ((35537, 35583), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""loss/like_good"""', 'good_loss'], {}), "('loss/like_good', good_loss)\n", (35554, 35583), True, 'import tensorflow.compat.v1 as tf\n'), ((35592, 35636), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""loss/like_bad"""', 'bad_loss'], {}), "('loss/like_bad', bad_loss)\n", (35609, 35636), True, 'import tensorflow.compat.v1 as tf\n'), ((35645, 35691), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""loss/like_loss"""', 'like_loss'], {}), "('loss/like_loss', like_loss)\n", (35662, 35691), True, 'import tensorflow.compat.v1 as tf\n'), ((35701, 35749), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""loss/segmentation"""', 'seg_loss'], {}), "('loss/segmentation', seg_loss)\n", (35718, 35749), True, 'import tensorflow.compat.v1 as tf\n'), ((35758, 35797), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""loss/seg_label"""', 'seg'], {}), "('loss/seg_label', seg)\n", (35774, 35797), True, 'import tensorflow.compat.v1 as tf\n'), ((35806, 35849), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""loss/seg_pred"""', 'seg_pred'], {}), "('loss/seg_pred', seg_pred)\n", (35822, 35849), True, 'import tensorflow.compat.v1 as tf\n'), ((35858, 35913), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""loss/initial_seg_label"""', 'initial_seg'], {}), "('loss/initial_seg_label', initial_seg)\n", (35874, 35913), True, 'import tensorflow.compat.v1 as tf\n'), ((35922, 35980), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""loss/inital_seg_pred"""', 'initial_seg_pred'], {}), "('loss/inital_seg_pred', initial_seg_pred)\n", (35938, 35980), True, 'import tensorflow.compat.v1 as tf\n'), ((39445, 39459), 'tensorflow.compat.v1.add_n', 'tf.add_n', (['mses'], {}), '(mses)\n', (39453, 39459), True, 'import tensorflow.compat.v1 as tf\n'), ((39917, 39929), 'tensorflow.compat.v1.add_n', 'tf.add_n', (['wd'], {}), '(wd)\n', (39925, 39929), True, 'import tensorflow.compat.v1 as tf\n'), ((40687, 40725), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""loss/total"""', 'total'], {}), "('loss/total', total)\n", (40704, 40725), True, 'import tensorflow.compat.v1 as tf\n'), ((40734, 40766), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""loss/wd"""', 'wd'], {}), "('loss/wd', wd)\n", (40751, 40766), True, 'import tensorflow.compat.v1 as tf\n'), ((45536, 45574), 'tensorflow.compat.v1.norm', 'tf.norm', (['pred'], {'axis': '(-1)', 'keep_dims': '(True)'}), '(pred, axis=-1, keep_dims=True)\n', (45543, 45574), True, 'import tensorflow.compat.v1 as tf\n'), ((45596, 45635), 'tensorflow.compat.v1.norm', 'tf.norm', (['label'], {'axis': '(-1)', 'keep_dims': '(True)'}), '(label, axis=-1, keep_dims=True)\n', (45603, 45635), True, 'import tensorflow.compat.v1 as tf\n'), ((45651, 45679), 'tensorflow.compat.v1.nn.l2_normalize', 'tf.nn.l2_normalize', (['pred', '(-1)'], {}), '(pred, -1)\n', (45669, 45679), True, 'import tensorflow.compat.v1 as tf\n'), ((45696, 45725), 'tensorflow.compat.v1.nn.l2_normalize', 'tf.nn.l2_normalize', (['label', '(-1)'], {}), '(label, -1)\n', (45714, 45725), True, 'import tensorflow.compat.v1 as tf\n'), ((47007, 47050), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['label', '[self.batch_size, -1, 1]'], {}), '(label, [self.batch_size, -1, 1])\n', (47017, 47050), True, 'import tensorflow.compat.v1 as tf\n'), ((47066, 47108), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['pred', '[self.batch_size, -1, 1]'], {}), '(pred, [self.batch_size, -1, 1])\n', (47076, 47108), True, 'import tensorflow.compat.v1 as tf\n'), ((47155, 47185), 'tensorflow.compat.v1.clip_by_value', 'tf.clip_by_value', (['pred', '(0)', '(1.0)'], {}), '(pred, 0, 1.0)\n', (47171, 47185), True, 'import tensorflow.compat.v1 as tf\n'), ((50010, 50034), 'tensorflow.compat.v1.multiply', 'tf.multiply', (['ps', 'weights'], {}), '(ps, weights)\n', (50021, 50034), True, 'import tensorflow.compat.v1 as tf\n'), ((50050, 50077), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['mult'], {'axis': '(1)'}), '(mult, axis=1)\n', (50063, 50077), True, 'import tensorflow.compat.v1 as tf\n'), ((50163, 50228), 'tensorflow.compat.v1.concat', 'tf.concat', (['[mean[:, :2], ang1 / self.scale, mean[:, 4:]]'], {'axis': '(-1)'}), '([mean[:, :2], ang1 / self.scale, mean[:, 4:]], axis=-1)\n', (50172, 50228), True, 'import tensorflow.compat.v1 as tf\n'), ((50582, 50606), 'tensorflow.compat.v1.multiply', 'tf.multiply', (['ps', 'weights'], {}), '(ps, weights)\n', (50593, 50606), True, 'import tensorflow.compat.v1 as tf\n'), ((50622, 50652), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['mult'], {'axis': 'axis'}), '(mult, axis=axis)\n', (50635, 50652), True, 'import tensorflow.compat.v1 as tf\n'), ((50737, 50801), 'tensorflow.compat.v1.concat', 'tf.concat', (['[mean[:, :2], ang / self.scale, mean[:, 4:]]'], {'axis': '(-1)'}), '([mean[:, :2], ang / self.scale, mean[:, 4:]], axis=-1)\n', (50746, 50801), True, 'import tensorflow.compat.v1 as tf\n'), ((50916, 50975), 'tensorflow.compat.v1.clip_by_value', 'tf.clip_by_value', (['fr', '(0.1 / self.scale)', '(5000.0 / self.scale)'], {}), '(fr, 0.1 / self.scale, 5000.0 / self.scale)\n', (50932, 50975), True, 'import tensorflow.compat.v1 as tf\n'), ((51082, 51138), 'tensorflow.compat.v1.clip_by_value', 'tf.clip_by_value', (['m', '(0.1 / self.scale)', '(90.0 / self.scale)'], {}), '(m, 0.1 / self.scale, 90.0 / self.scale)\n', (51098, 51138), True, 'import tensorflow.compat.v1 as tf\n'), ((51244, 51273), 'tensorflow.compat.v1.clip_by_value', 'tf.clip_by_value', (['s', '(0.0)', '(1.0)'], {}), '(s, 0.0, 1.0)\n', (51260, 51273), True, 'import tensorflow.compat.v1 as tf\n'), ((52768, 52782), 'tensorflow.compat.v1.squeeze', 'tf.squeeze', (['ob'], {}), '(ob)\n', (52778, 52782), True, 'import tensorflow.compat.v1 as tf\n'), ((52796, 52836), 'tensorflow.compat.v1.strings.regex_replace', 'tf.strings.regex_replace', (['ob', "'\\x00'", '""""""'], {}), "(ob, '\\x00', '')\n", (52820, 52836), True, 'import tensorflow.compat.v1 as tf\n'), ((52850, 52890), 'tensorflow.compat.v1.strings.regex_replace', 'tf.strings.regex_replace', (['ob', "'\\x00'", '""""""'], {}), "(ob, '\\x00', '')\n", (52874, 52890), True, 'import tensorflow.compat.v1 as tf\n'), ((56178, 56233), 'differentiable_filters.utils.recordio.RecordMeta.load', 'tfr.RecordMeta.load', (['path', "(name + '_' + data_mode + '_')"], {}), "(path, name + '_' + data_mode + '_')\n", (56197, 56233), True, 'from differentiable_filters.utils import recordio as tfr\n'), ((57451, 57502), 'tensorflow.compat.v1.io.parse_single_example', 'tf.io.parse_single_example', (['example_proto', 'features'], {}), '(example_proto, features)\n', (57477, 57502), True, 'import tensorflow.compat.v1 as tf\n'), ((58068, 58178), 'tensorflow.compat.v1.concat', 'tf.concat', (['[pose[:, 0:1] * 1000 / self.scale, pose[:, 1:2] * 1000 / self.scale, ori /\n self.scale]'], {'axis': '(1)'}), '([pose[:, 0:1] * 1000 / self.scale, pose[:, 1:2] * 1000 / self.\n scale, ori / self.scale], axis=1)\n', (58077, 58178), True, 'import tensorflow.compat.v1 as tf\n'), ((58284, 58324), 'tensorflow.compat.v1.cast', 'tf.cast', (["features['contact']", 'tf.float32'], {}), "(features['contact'], tf.float32)\n", (58291, 58324), True, 'import tensorflow.compat.v1 as tf\n'), ((58476, 58503), 'tensorflow.compat.v1.linalg.norm', 'tf.linalg.norm', (['cp'], {'axis': '(-1)'}), '(cp, axis=-1)\n', (58490, 58503), True, 'import tensorflow.compat.v1 as tf\n'), ((58905, 58940), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['mask'], {'axis': '[1, 2, 3]'}), '(mask, axis=[1, 2, 3])\n', (58918, 58940), True, 'import tensorflow.compat.v1 as tf\n'), ((58997, 59028), 'tensorflow.compat.v1.concat', 'tf.concat', (['[im, coord]'], {'axis': '(-1)'}), '([im, coord], axis=-1)\n', (59006, 59028), True, 'import tensorflow.compat.v1 as tf\n'), ((59083, 59118), 'tensorflow.compat.v1.reshape', 'tf.reshape', (["features['object']", '[1]'], {}), "(features['object'], [1])\n", (59093, 59118), True, 'import tensorflow.compat.v1 as tf\n'), ((59133, 59170), 'tensorflow.compat.v1.reshape', 'tf.reshape', (["features['material']", '[1]'], {}), "(features['material'], [1])\n", (59143, 59170), True, 'import tensorflow.compat.v1 as tf\n'), ((62285, 62298), 'tensorflow.compat.v1.stack', 'tf.stack', (['ims'], {}), '(ims)\n', (62293, 62298), True, 'import tensorflow.compat.v1 as tf\n'), ((62319, 62338), 'tensorflow.compat.v1.stack', 'tf.stack', (['start_ims'], {}), '(start_ims)\n', (62327, 62338), True, 'import tensorflow.compat.v1 as tf\n'), ((62358, 62376), 'tensorflow.compat.v1.stack', 'tf.stack', (['start_ts'], {}), '(start_ts)\n', (62366, 62376), True, 'import tensorflow.compat.v1 as tf\n'), ((62391, 62404), 'tensorflow.compat.v1.stack', 'tf.stack', (['tes'], {}), '(tes)\n', (62399, 62404), True, 'import tensorflow.compat.v1 as tf\n'), ((62421, 62436), 'tensorflow.compat.v1.stack', 'tf.stack', (['pixts'], {}), '(pixts)\n', (62429, 62436), True, 'import tensorflow.compat.v1 as tf\n'), ((62453, 62468), 'tensorflow.compat.v1.stack', 'tf.stack', (['pixte'], {}), '(pixte)\n', (62461, 62468), True, 'import tensorflow.compat.v1 as tf\n'), ((62482, 62500), 'tensorflow.compat.v1.tile', 'tf.tile', (['ob', '[num]'], {}), '(ob, [num])\n', (62489, 62500), True, 'import tensorflow.compat.v1 as tf\n'), ((62515, 62534), 'tensorflow.compat.v1.tile', 'tf.tile', (['mat', '[num]'], {}), '(mat, [num])\n', (62522, 62534), True, 'import tensorflow.compat.v1 as tf\n'), ((63422, 63462), 'tensorflow.compat.v1.cast', 'tf.cast', (["features['contact']", 'tf.float32'], {}), "(features['contact'], tf.float32)\n", (63429, 63462), True, 'import tensorflow.compat.v1 as tf\n'), ((63609, 63636), 'tensorflow.compat.v1.linalg.norm', 'tf.linalg.norm', (['cp'], {'axis': '(-1)'}), '(cp, axis=-1)\n', (63623, 63636), True, 'import tensorflow.compat.v1 as tf\n'), ((63985, 64020), 'tensorflow.compat.v1.reshape', 'tf.reshape', (["features['object']", '[1]'], {}), "(features['object'], [1])\n", (63995, 64020), True, 'import tensorflow.compat.v1 as tf\n'), ((64035, 64072), 'tensorflow.compat.v1.reshape', 'tf.reshape', (["features['material']", '[1]'], {}), "(features['material'], [1])\n", (64045, 64072), True, 'import tensorflow.compat.v1 as tf\n'), ((65392, 65413), 'tensorflow.compat.v1.stack', 'tf.stack', (['start_state'], {}), '(start_state)\n', (65400, 65413), True, 'import tensorflow.compat.v1 as tf\n'), ((65427, 65439), 'tensorflow.compat.v1.stack', 'tf.stack', (['us'], {}), '(us)\n', (65435, 65439), True, 'import tensorflow.compat.v1 as tf\n'), ((65453, 65471), 'tensorflow.compat.v1.tile', 'tf.tile', (['ob', '[num]'], {}), '(ob, [num])\n', (65460, 65471), True, 'import tensorflow.compat.v1 as tf\n'), ((65486, 65505), 'tensorflow.compat.v1.tile', 'tf.tile', (['mat', '[num]'], {}), '(mat, [num])\n', (65493, 65505), True, 'import tensorflow.compat.v1 as tf\n'), ((66122, 66162), 'tensorflow.compat.v1.cast', 'tf.cast', (["features['contact']", 'tf.float32'], {}), "(features['contact'], tf.float32)\n", (66129, 66162), True, 'import tensorflow.compat.v1 as tf\n'), ((66309, 66336), 'tensorflow.compat.v1.linalg.norm', 'tf.linalg.norm', (['cp'], {'axis': '(-1)'}), '(cp, axis=-1)\n', (66323, 66336), True, 'import tensorflow.compat.v1 as tf\n'), ((67134, 67169), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['mask'], {'axis': '[1, 2, 3]'}), '(mask, axis=[1, 2, 3])\n', (67147, 67169), True, 'import tensorflow.compat.v1 as tf\n'), ((67183, 67214), 'tensorflow.compat.v1.concat', 'tf.concat', (['[im, coord]'], {'axis': '(-1)'}), '([im, coord], axis=-1)\n', (67192, 67214), True, 'import tensorflow.compat.v1 as tf\n'), ((67267, 67302), 'tensorflow.compat.v1.reshape', 'tf.reshape', (["features['object']", '[1]'], {}), "(features['object'], [1])\n", (67277, 67302), True, 'import tensorflow.compat.v1 as tf\n'), ((67317, 67354), 'tensorflow.compat.v1.reshape', 'tf.reshape', (["features['material']", '[1]'], {}), "(features['material'], [1])\n", (67327, 67354), True, 'import tensorflow.compat.v1 as tf\n'), ((69761, 69774), 'tensorflow.compat.v1.stack', 'tf.stack', (['ims'], {}), '(ims)\n', (69769, 69774), True, 'import tensorflow.compat.v1 as tf\n'), ((69795, 69814), 'tensorflow.compat.v1.stack', 'tf.stack', (['start_ims'], {}), '(start_ims)\n', (69803, 69814), True, 'import tensorflow.compat.v1 as tf\n'), ((69834, 69852), 'tensorflow.compat.v1.stack', 'tf.stack', (['start_ts'], {}), '(start_ts)\n', (69842, 69852), True, 'import tensorflow.compat.v1 as tf\n'), ((69875, 69896), 'tensorflow.compat.v1.stack', 'tf.stack', (['start_state'], {}), '(start_state)\n', (69883, 69896), True, 'import tensorflow.compat.v1 as tf\n'), ((69910, 69922), 'tensorflow.compat.v1.stack', 'tf.stack', (['us'], {}), '(us)\n', (69918, 69922), True, 'import tensorflow.compat.v1 as tf\n'), ((69937, 69950), 'tensorflow.compat.v1.stack', 'tf.stack', (['tes'], {}), '(tes)\n', (69945, 69950), True, 'import tensorflow.compat.v1 as tf\n'), ((69967, 69982), 'tensorflow.compat.v1.stack', 'tf.stack', (['pixts'], {}), '(pixts)\n', (69975, 69982), True, 'import tensorflow.compat.v1 as tf\n'), ((69999, 70014), 'tensorflow.compat.v1.stack', 'tf.stack', (['pixte'], {}), '(pixte)\n', (70007, 70014), True, 'import tensorflow.compat.v1 as tf\n'), ((70032, 70048), 'tensorflow.compat.v1.stack', 'tf.stack', (['mv_trs'], {}), '(mv_trs)\n', (70040, 70048), True, 'import tensorflow.compat.v1 as tf\n'), ((70067, 70084), 'tensorflow.compat.v1.stack', 'tf.stack', (['mv_rots'], {}), '(mv_rots)\n', (70075, 70084), True, 'import tensorflow.compat.v1 as tf\n'), ((70100, 70114), 'tensorflow.compat.v1.stack', 'tf.stack', (['viss'], {}), '(viss)\n', (70108, 70114), True, 'import tensorflow.compat.v1 as tf\n'), ((70129, 70147), 'tensorflow.compat.v1.tile', 'tf.tile', (['ob', '[num]'], {}), '(ob, [num])\n', (70136, 70147), True, 'import tensorflow.compat.v1 as tf\n'), ((70162, 70181), 'tensorflow.compat.v1.tile', 'tf.tile', (['mat', '[num]'], {}), '(mat, [num])\n', (70169, 70181), True, 'import tensorflow.compat.v1 as tf\n'), ((74936, 74955), 'numpy.linalg.eigh', 'np.linalg.eigh', (['cov'], {}), '(cov)\n', (74950, 74955), True, 'import numpy as np\n'), ((75241, 75268), 'numpy.squeeze', 'np.squeeze', (['seq_pred[:, :2]'], {}), '(seq_pred[:, :2])\n', (75251, 75268), True, 'import numpy as np\n'), ((75287, 75313), 'numpy.squeeze', 'np.squeeze', (['seq_pred[:, 2]'], {}), '(seq_pred[:, 2])\n', (75297, 75313), True, 'import numpy as np\n'), ((75331, 75357), 'numpy.squeeze', 'np.squeeze', (['seq_pred[:, 3]'], {}), '(seq_pred[:, 3])\n', (75341, 75357), True, 'import numpy as np\n'), ((75376, 75402), 'numpy.squeeze', 'np.squeeze', (['seq_pred[:, 4]'], {}), '(seq_pred[:, 4])\n', (75386, 75402), True, 'import numpy as np\n'), ((75421, 75449), 'numpy.squeeze', 'np.squeeze', (['seq_pred[:, 5:7]'], {}), '(seq_pred[:, 5:7])\n', (75431, 75449), True, 'import numpy as np\n'), ((75467, 75495), 'numpy.squeeze', 'np.squeeze', (['seq_pred[:, 7:9]'], {}), '(seq_pred[:, 7:9])\n', (75477, 75495), True, 'import numpy as np\n'), ((75513, 75539), 'numpy.squeeze', 'np.squeeze', (['seq_pred[:, 9]'], {}), '(seq_pred[:, 9])\n', (75523, 75539), True, 'import numpy as np\n'), ((77617, 77653), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(3)'], {'figsize': '[20, 15]'}), '(2, 3, figsize=[20, 15])\n', (77629, 77653), True, 'import matplotlib.pyplot as plt\n'), ((77667, 77695), 'numpy.arange', 'np.arange', (['pos_pred.shape[0]'], {}), '(pos_pred.shape[0])\n', (77676, 77695), True, 'import numpy as np\n'), ((81505, 81541), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(3)'], {'figsize': '[20, 15]'}), '(2, 3, figsize=[20, 15])\n', (81517, 81541), True, 'import matplotlib.pyplot as plt\n'), ((81555, 81583), 'numpy.arange', 'np.arange', (['pos_pred.shape[0]'], {}), '(pos_pred.shape[0])\n', (81564, 81583), True, 'import numpy as np\n'), ((89522, 89549), 'numpy.squeeze', 'np.squeeze', (['seq_pred[:, :2]'], {}), '(seq_pred[:, :2])\n', (89532, 89549), True, 'import numpy as np\n'), ((89817, 89847), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '[15, 15]'}), '(figsize=[15, 15])\n', (89829, 89847), True, 'import matplotlib.pyplot as plt\n'), ((89899, 89929), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '[17, 17]'}), '(figsize=[17, 17])\n', (89911, 89929), True, 'import matplotlib.pyplot as plt\n'), ((96536, 96586), 'os.path.join', 'os.path.join', (['path', '"""resources"""', '"""plane_image.npy"""'], {}), "(path, 'resources', 'plane_image.npy')\n", (96548, 96586), False, 'import os\n'), ((96710, 96763), 'tensorflow.compat.v1.tile', 'tf.tile', (['self.plane_depth', '[self.batch_size, 1, 1, 1]'], {}), '(self.plane_depth, [self.batch_size, 1, 1, 1])\n', (96717, 96763), True, 'import tensorflow.compat.v1 as tf\n'), ((104084, 104134), 'os.path.join', 'os.path.join', (['path', '"""resources"""', '"""plane_image.npy"""'], {}), "(path, 'resources', 'plane_image.npy')\n", (104096, 104134), False, 'import os\n'), ((104258, 104311), 'tensorflow.compat.v1.tile', 'tf.tile', (['self.plane_depth', '[self.batch_size, 1, 1, 1]'], {}), '(self.plane_depth, [self.batch_size, 1, 1, 1])\n', (104265, 104311), True, 'import tensorflow.compat.v1 as tf\n'), ((128109, 128137), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['pos_c2', '[bs, -1]'], {}), '(pos_c2, [bs, -1])\n', (128119, 128137), True, 'import tensorflow.compat.v1 as tf\n'), ((128459, 128504), 'tensorflow.compat.v1.concat', 'tf.concat', (['[pos_fc, rot_fc, rns_fc2]'], {'axis': '(-1)'}), '([pos_fc, rot_fc, rns_fc2], axis=-1)\n', (128468, 128504), True, 'import tensorflow.compat.v1 as tf\n'), ((128524, 128571), 'tensorflow.compat.v1.tile', 'tf.tile', (['encoding[:, None, :]', '[1, num_pred, 1]'], {}), '(encoding[:, None, :], [1, num_pred, 1])\n', (128531, 128571), True, 'import tensorflow.compat.v1 as tf\n'), ((128594, 128635), 'tensorflow.compat.v1.concat', 'tf.concat', (['[encoding, particles]'], {'axis': '(-1)'}), '([encoding, particles], axis=-1)\n', (128603, 128635), True, 'import tensorflow.compat.v1 as tf\n'), ((128657, 128700), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['input_data', '[bs * num_pred, -1]'], {}), '(input_data, [bs * num_pred, -1])\n', (128667, 128700), True, 'import tensorflow.compat.v1 as tf\n'), ((130890, 130939), 'tensorflow.compat.v1.concat', 'tf.concat', (['[inputs[:, :3], inputs[:, 5:]]'], {'axis': '(1)'}), '([inputs[:, :3], inputs[:, 5:]], axis=1)\n', (130899, 130939), True, 'import tensorflow.compat.v1 as tf\n'), ((148450, 148515), 'tensorflow.compat.v1.concat', 'tf.concat', (['[old_state[:, :3], old_state[:, 4:], actions]'], {'axis': '(-1)'}), '([old_state[:, :3], old_state[:, 4:], actions], axis=-1)\n', (148459, 148515), True, 'import tensorflow.compat.v1 as tf\n'), ((2129, 2144), 'pickle.load', 'pickle.load', (['bf'], {}), '(bf)\n', (2140, 2144), False, 'import pickle\n'), ((3267, 3289), 'numpy.square', 'np.square', (['self.q_diag'], {}), '(self.q_diag)\n', (3276, 3289), True, 'import numpy as np\n'), ((3370, 3392), 'numpy.square', 'np.square', (['self.r_diag'], {}), '(self.r_diag)\n', (3379, 3392), True, 'import numpy as np\n'), ((3594, 3654), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (3602, 3654), True, 'import numpy as np\n'), ((3659, 3786), 'numpy.array', 'np.array', (['[49.8394116, -2.3510439, 0, 2.5196417, 1.93745247, 27.6656989, 67.1287098, \n 0.03124815, -0.18917632, -0.14730855]'], {}), '([49.8394116, -2.3510439, 0, 2.5196417, 1.93745247, 27.6656989, \n 67.1287098, 0.03124815, -0.18917632, -0.14730855])\n', (3667, 3786), True, 'import numpy as np\n'), ((3842, 3971), 'numpy.array', 'np.array', (['[27.9914853, -30.3366791, 0, -4.6963326, -2.96631439, 3.6698755, -\n 14.5376077, -0.49956926, 0.56362964, 0.54478971]'], {}), '([27.9914853, -30.3366791, 0, -4.6963326, -2.96631439, 3.6698755, -\n 14.5376077, -0.49956926, 0.56362964, 0.54478971])\n', (3850, 3971), True, 'import numpy as np\n'), ((18316, 18365), 'tensorflow.compat.v1.tile', 'tf.tile', (['seq_label[:, :, None, :]', '[1, 1, num, 1]'], {}), '(seq_label[:, :, None, :], [1, 1, num, 1])\n', (18323, 18365), True, 'import tensorflow.compat.v1 as tf\n'), ((20187, 20209), 'tensorflow.compat.v1.abs', 'tf.abs', (['(diag_r + 1e-05)'], {}), '(diag_r + 1e-05)\n', (20193, 20209), True, 'import tensorflow.compat.v1 as tf\n'), ((20549, 20563), 'tensorflow.compat.v1.add_n', 'tf.add_n', (['corr'], {}), '(corr)\n', (20557, 20563), True, 'import tensorflow.compat.v1 as tf\n'), ((20964, 20985), 'tensorflow.compat.v1.add_n', 'tf.add_n', (['corr_r_cont'], {}), '(corr_r_cont)\n', (20972, 20985), True, 'import tensorflow.compat.v1 as tf\n'), ((21084, 21106), 'tensorflow.compat.v1.abs', 'tf.abs', (['(diag_q + 1e-05)'], {}), '(diag_q + 1e-05)\n', (21090, 21106), True, 'import tensorflow.compat.v1 as tf\n'), ((21488, 21504), 'tensorflow.compat.v1.add_n', 'tf.add_n', (['corr_q'], {}), '(corr_q)\n', (21496, 21504), True, 'import tensorflow.compat.v1 as tf\n'), ((21857, 21878), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['mv_tr'], {}), '(mv_tr)\n', (21871, 21878), True, 'import tensorflow.compat.v1 as tf\n'), ((21923, 21945), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['mv_rot'], {}), '(mv_rot)\n', (21937, 21945), True, 'import tensorflow.compat.v1 as tf\n'), ((21989, 22012), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['dist_tr'], {}), '(dist_tr)\n', (22003, 22012), True, 'import tensorflow.compat.v1 as tf\n'), ((22057, 22081), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['dist_rot'], {}), '(dist_rot)\n', (22071, 22081), True, 'import tensorflow.compat.v1 as tf\n'), ((22758, 22784), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood'], {}), '(likelihood)\n', (22772, 22784), True, 'import tensorflow.compat.v1 as tf\n'), ((23557, 23583), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood'], {}), '(likelihood)\n', (23571, 23583), True, 'import tensorflow.compat.v1 as tf\n'), ((23973, 23999), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood'], {}), '(likelihood)\n', (23987, 23999), True, 'import tensorflow.compat.v1 as tf\n'), ((24163, 24181), 'tensorflow.compat.v1.squeeze', 'tf.squeeze', (['corr_r'], {}), '(corr_r)\n', (24173, 24181), True, 'import tensorflow.compat.v1 as tf\n'), ((24229, 24252), 'tensorflow.compat.v1.squeeze', 'tf.squeeze', (['corr_r_cont'], {}), '(corr_r_cont)\n', (24239, 24252), True, 'import tensorflow.compat.v1 as tf\n'), ((24300, 24318), 'tensorflow.compat.v1.squeeze', 'tf.squeeze', (['corr_q'], {}), '(corr_q)\n', (24310, 24318), True, 'import tensorflow.compat.v1 as tf\n'), ((25438, 25458), 'tensorflow.compat.v1.greater', 'tf.greater', (['mv_tr', '(0)'], {}), '(mv_tr, 0)\n', (25448, 25458), True, 'import tensorflow.compat.v1 as tf\n'), ((25562, 25583), 'tensorflow.compat.v1.greater', 'tf.greater', (['mv_rot', '(0)'], {}), '(mv_rot, 0)\n', (25572, 25583), True, 'import tensorflow.compat.v1 as tf\n'), ((25715, 25739), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['m_per_tr'], {}), '(m_per_tr)\n', (25729, 25739), True, 'import tensorflow.compat.v1 as tf\n'), ((25741, 25768), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['deg_per_deg'], {}), '(deg_per_deg)\n', (25755, 25768), True, 'import tensorflow.compat.v1 as tf\n'), ((26045, 26064), 'tensorflow.compat.v1.greater', 'tf.greater', (['diff', '(0)'], {}), '(diff, 0)\n', (26055, 26064), True, 'import tensorflow.compat.v1 as tf\n'), ((26066, 26079), 'tensorflow.compat.v1.sqrt', 'tf.sqrt', (['diff'], {}), '(diff)\n', (26073, 26079), True, 'import tensorflow.compat.v1 as tf\n'), ((27062, 27084), 'tensorflow.compat.v1.stop_gradient', 'tf.stop_gradient', (['diff'], {}), '(diff)\n', (27078, 27084), True, 'import tensorflow.compat.v1 as tf\n'), ((27265, 27287), 'tensorflow.compat.v1.stop_gradient', 'tf.stop_gradient', (['diff'], {}), '(diff)\n', (27281, 27287), True, 'import tensorflow.compat.v1 as tf\n'), ((28090, 28121), 'tensorflow.compat.v1.abs', 'tf.abs', (['(diag_r_het_diag + 1e-05)'], {}), '(diag_r_het_diag + 1e-05)\n', (28096, 28121), True, 'import tensorflow.compat.v1 as tf\n'), ((28283, 28313), 'tensorflow.compat.v1.abs', 'tf.abs', (['(diag_r_het_tri + 1e-05)'], {}), '(diag_r_het_tri + 1e-05)\n', (28289, 28313), True, 'import tensorflow.compat.v1 as tf\n'), ((28943, 28962), 'tensorflow.compat.v1.add_n', 'tf.add_n', (['corr_diag'], {}), '(corr_diag)\n', (28951, 28962), True, 'import tensorflow.compat.v1 as tf\n'), ((28996, 29015), 'tensorflow.compat.v1.add_n', 'tf.add_n', (['corr_full'], {}), '(corr_full)\n', (29004, 29015), True, 'import tensorflow.compat.v1 as tf\n'), ((30281, 30317), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['seg_loss'], {'axis': '[1, 2]'}), '(seg_loss, axis=[1, 2])\n', (30294, 30317), True, 'import tensorflow.compat.v1 as tf\n'), ((30526, 30563), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['seg_loss2'], {'axis': '[1, 2]'}), '(seg_loss2, axis=[1, 2])\n', (30539, 30563), True, 'import tensorflow.compat.v1 as tf\n'), ((31810, 31835), 'tensorflow.compat.v1.greater', 'tf.greater', (['rot_pred', '(180)'], {}), '(rot_pred, 180)\n', (31820, 31835), True, 'import tensorflow.compat.v1 as tf\n'), ((31868, 31893), 'tensorflow.compat.v1.square', 'tf.square', (['(rot_pred - 180)'], {}), '(rot_pred - 180)\n', (31877, 31893), True, 'import tensorflow.compat.v1 as tf\n'), ((31926, 31949), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['rot_pred'], {}), '(rot_pred)\n', (31939, 31949), True, 'import tensorflow.compat.v1 as tf\n'), ((32320, 32354), 'tensorflow.compat.v1.less', 'tf.less', (['step', '(self.epoch_size * 2)'], {}), '(step, self.epoch_size * 2)\n', (32327, 32354), True, 'import tensorflow.compat.v1 as tf\n'), ((32902, 32936), 'tensorflow.compat.v1.less', 'tf.less', (['step', '(self.epoch_size * 5)'], {}), '(step, self.epoch_size * 5)\n', (32909, 32936), True, 'import tensorflow.compat.v1 as tf\n'), ((33824, 33861), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood_const_diag'], {}), '(likelihood_const_diag)\n', (33838, 33861), True, 'import tensorflow.compat.v1 as tf\n'), ((33944, 33980), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood_const_tri'], {}), '(likelihood_const_tri)\n', (33958, 33980), True, 'import tensorflow.compat.v1 as tf\n'), ((34062, 34097), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood_het_diag'], {}), '(likelihood_het_diag)\n', (34076, 34097), True, 'import tensorflow.compat.v1 as tf\n'), ((34178, 34212), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood_het_tri'], {}), '(likelihood_het_tri)\n', (34192, 34212), True, 'import tensorflow.compat.v1 as tf\n'), ((34274, 34321), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (["('label/' + name)", 'label[0, i]'], {}), "('label/' + name, label[0, i])\n", (34291, 34321), True, 'import tensorflow.compat.v1 as tf\n'), ((34819, 34846), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['corr_r_diag'], {}), '(corr_r_diag)\n', (34833, 34846), True, 'import tensorflow.compat.v1 as tf\n'), ((34898, 34925), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['corr_r_full'], {}), '(corr_r_full)\n', (34912, 34925), True, 'import tensorflow.compat.v1 as tf\n'), ((35010, 35036), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['normal_ang'], {}), '(normal_ang)\n', (35024, 35036), True, 'import tensorflow.compat.v1 as tf\n'), ((35119, 35138), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['vis'], {}), '(vis)\n', (35133, 35138), True, 'import tensorflow.compat.v1 as tf\n'), ((35221, 35245), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['pix_dist'], {}), '(pix_dist)\n', (35235, 35245), True, 'import tensorflow.compat.v1 as tf\n'), ((35327, 35350), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['dist_3d'], {}), '(dist_3d)\n', (35341, 35350), True, 'import tensorflow.compat.v1 as tf\n'), ((35438, 35456), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['ce'], {}), '(ce)\n', (35452, 35456), True, 'import tensorflow.compat.v1 as tf\n'), ((39974, 40008), 'tensorflow.compat.v1.less', 'tf.less', (['step', '(self.epoch_size * 5)'], {}), '(step, self.epoch_size * 5)\n', (39981, 40008), True, 'import tensorflow.compat.v1 as tf\n'), ((40849, 40886), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood_const_diag'], {}), '(likelihood_const_diag)\n', (40863, 40886), True, 'import tensorflow.compat.v1 as tf\n'), ((40969, 41005), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood_const_tri'], {}), '(likelihood_const_tri)\n', (40983, 41005), True, 'import tensorflow.compat.v1 as tf\n'), ((41087, 41122), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood_het_diag'], {}), '(likelihood_het_diag)\n', (41101, 41122), True, 'import tensorflow.compat.v1 as tf\n'), ((41203, 41237), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood_het_tri'], {}), '(likelihood_het_tri)\n', (41217, 41237), True, 'import tensorflow.compat.v1 as tf\n'), ((41325, 41366), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood_const_diag_ana'], {}), '(likelihood_const_diag_ana)\n', (41339, 41366), True, 'import tensorflow.compat.v1 as tf\n'), ((41453, 41493), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood_const_tri_ana'], {}), '(likelihood_const_tri_ana)\n', (41467, 41493), True, 'import tensorflow.compat.v1 as tf\n'), ((41579, 41618), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood_het_diag_ana'], {}), '(likelihood_het_diag_ana)\n', (41593, 41618), True, 'import tensorflow.compat.v1 as tf\n'), ((41703, 41741), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood_het_tri_ana'], {}), '(likelihood_het_tri_ana)\n', (41717, 41741), True, 'import tensorflow.compat.v1 as tf\n'), ((41786, 41805), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['mse'], {}), '(mse)\n', (41800, 41805), True, 'import tensorflow.compat.v1 as tf\n'), ((45992, 46041), 'tensorflow.compat.v1.clip_by_value', 'tf.clip_by_value', (['prod', '(-0.999999999)', '(0.999999999)'], {}), '(prod, -0.999999999, 0.999999999)\n', (46008, 46041), True, 'import tensorflow.compat.v1 as tf\n'), ((46291, 46340), 'tensorflow.compat.v1.clip_by_value', 'tf.clip_by_value', (['prod', '(-0.999999999)', '(0.999999999)'], {}), '(prod, -0.999999999, 0.999999999)\n', (46307, 46340), True, 'import tensorflow.compat.v1 as tf\n'), ((46498, 46526), 'tensorflow.compat.v1.greater', 'tf.greater', (['pred_norm', '(1e-06)'], {}), '(pred_norm, 1e-06)\n', (46508, 46526), True, 'import tensorflow.compat.v1 as tf\n'), ((46560, 46589), 'tensorflow.compat.v1.greater', 'tf.greater', (['label_norm', '(1e-06)'], {}), '(label_norm, 1e-06)\n', (46570, 46589), True, 'import tensorflow.compat.v1 as tf\n'), ((46633, 46656), 'tensorflow.compat.v1.math.is_finite', 'tf.math.is_finite', (['prod'], {}), '(prod)\n', (46650, 46656), True, 'import tensorflow.compat.v1 as tf\n'), ((46691, 46703), 'tensorflow.compat.v1.abs', 'tf.abs', (['prod'], {}), '(prod)\n', (46697, 46703), True, 'import tensorflow.compat.v1 as tf\n'), ((46705, 46724), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['prod'], {}), '(prod)\n', (46718, 46724), True, 'import tensorflow.compat.v1 as tf\n'), ((48112, 48147), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['state', '[-1, self.dim_x]'], {}), '(state, [-1, self.dim_x])\n', (48122, 48147), True, 'import tensorflow.compat.v1 as tf\n'), ((48938, 48982), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['state', '(shape[:-1] + [self.dim_x])'], {}), '(state, shape[:-1] + [self.dim_x])\n', (48948, 48982), True, 'import tensorflow.compat.v1 as tf\n'), ((49349, 49383), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['diff', '[-1, self.dim_z]'], {}), '(diff, [-1, self.dim_z])\n', (49359, 49383), True, 'import tensorflow.compat.v1 as tf\n'), ((49624, 49667), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['diff', '(shape[:-1] + [self.dim_z])'], {}), '(diff, shape[:-1] + [self.dim_z])\n', (49634, 49667), True, 'import tensorflow.compat.v1 as tf\n'), ((52730, 52747), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['rot'], {}), '(rot)\n', (52742, 52747), True, 'import tensorflow.compat.v1 as tf\n'), ((58228, 58258), 'tensorflow.compat.v1.squeeze', 'tf.squeeze', (["features['normal']"], {}), "(features['normal'])\n", (58238, 58258), True, 'import tensorflow.compat.v1 as tf\n'), ((58339, 58363), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['con', '[-1, 1]'], {}), '(con, [-1, 1])\n', (58349, 58363), True, 'import tensorflow.compat.v1 as tf\n'), ((60169, 60205), 'numpy.random.randint', 'np.random.randint', (['(2)', '(seq_len - 2)', '(5)'], {}), '(2, seq_len - 2, 5)\n', (60186, 60205), True, 'import numpy as np\n'), ((60337, 60365), 'numpy.arange', 'np.arange', (['(2)', '(seq_len - 2)', '(8)'], {}), '(2, seq_len - 2, 8)\n', (60346, 60365), True, 'import numpy as np\n'), ((61285, 61356), 'tensorflow.compat.v1.concat', 'tf.concat', (['[pose[si, :2], relative_rot, cp[si], n[si], con[si]]'], {'axis': '(0)'}), '([pose[si, :2], relative_rot, cp[si], n[si], con[si]], axis=0)\n', (61294, 61356), True, 'import tensorflow.compat.v1 as tf\n'), ((61443, 61491), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': '(0.1)', 'size': '(24, 8)'}), '(loc=0, scale=0.1, size=(24, 8))\n', (61459, 61491), True, 'import numpy as np\n'), ((61550, 61597), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(10)', 'scale': '(5)', 'size': '(24, 8)'}), '(loc=10, scale=5, size=(24, 8))\n', (61566, 61597), True, 'import numpy as np\n'), ((61627, 61675), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(-10)', 'scale': '(5)', 'size': '(12, 8)'}), '(loc=-10, scale=5, size=(12, 8))\n', (61643, 61675), True, 'import numpy as np\n'), ((62580, 62596), 'tensorflow.compat.v1.stack', 'tf.stack', (['labels'], {}), '(labels)\n', (62588, 62596), True, 'import tensorflow.compat.v1 as tf\n'), ((62616, 62633), 'tensorflow.compat.v1.stack', 'tf.stack', (['good_zs'], {}), '(good_zs)\n', (62624, 62633), True, 'import tensorflow.compat.v1 as tf\n'), ((62653, 62669), 'tensorflow.compat.v1.stack', 'tf.stack', (['bad_zs'], {}), '(bad_zs)\n', (62661, 62669), True, 'import tensorflow.compat.v1 as tf\n'), ((62723, 62739), 'tensorflow.compat.v1.stack', 'tf.stack', (['labels'], {}), '(labels)\n', (62731, 62739), True, 'import tensorflow.compat.v1 as tf\n'), ((62741, 62755), 'tensorflow.compat.v1.stack', 'tf.stack', (['pixs'], {}), '(pixs)\n', (62749, 62755), True, 'import tensorflow.compat.v1 as tf\n'), ((62757, 62777), 'tensorflow.compat.v1.stack', 'tf.stack', (['start_pixs'], {}), '(start_pixs)\n', (62765, 62777), True, 'import tensorflow.compat.v1 as tf\n'), ((62797, 62811), 'tensorflow.compat.v1.stack', 'tf.stack', (['segs'], {}), '(segs)\n', (62805, 62811), True, 'import tensorflow.compat.v1 as tf\n'), ((62813, 62833), 'tensorflow.compat.v1.stack', 'tf.stack', (['start_segs'], {}), '(start_segs)\n', (62821, 62833), True, 'import tensorflow.compat.v1 as tf\n'), ((62835, 62849), 'tensorflow.compat.v1.stack', 'tf.stack', (['viss'], {}), '(viss)\n', (62843, 62849), True, 'import tensorflow.compat.v1 as tf\n'), ((63255, 63321), 'tensorflow.compat.v1.concat', 'tf.concat', (['[pose[:, 0:1] * 1000, pose[:, 1:2] * 1000, ori]'], {'axis': '(1)'}), '([pose[:, 0:1] * 1000, pose[:, 1:2] * 1000, ori], axis=1)\n', (63264, 63321), True, 'import tensorflow.compat.v1 as tf\n'), ((63366, 63396), 'tensorflow.compat.v1.squeeze', 'tf.squeeze', (["features['normal']"], {}), "(features['normal'])\n", (63376, 63396), True, 'import tensorflow.compat.v1 as tf\n'), ((63477, 63501), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['con', '[-1, 1]'], {}), '(con, [-1, 1])\n', (63487, 63501), True, 'import tensorflow.compat.v1 as tf\n'), ((64435, 64472), 'numpy.random.randint', 'np.random.randint', (['(2)', '(seq_len - 1)', '(10)'], {}), '(2, seq_len - 1, 10)\n', (64452, 64472), True, 'import numpy as np\n'), ((64603, 64631), 'numpy.arange', 'np.arange', (['(2)', '(seq_len - 1)', '(8)'], {}), '(2, seq_len - 1, 8)\n', (64612, 64631), True, 'import numpy as np\n'), ((65222, 65312), 'tensorflow.compat.v1.concat', 'tf.concat', (['[pose[si, :2], relative_rot, friction, mu, cp[si], n[si], con[si]]'], {'axis': '(0)'}), '([pose[si, :2], relative_rot, friction, mu, cp[si], n[si], con[si]\n ], axis=0)\n', (65231, 65312), True, 'import tensorflow.compat.v1 as tf\n'), ((65955, 66021), 'tensorflow.compat.v1.concat', 'tf.concat', (['[pose[:, 0:1] * 1000, pose[:, 1:2] * 1000, ori]'], {'axis': '(1)'}), '([pose[:, 0:1] * 1000, pose[:, 1:2] * 1000, ori], axis=1)\n', (65964, 66021), True, 'import tensorflow.compat.v1 as tf\n'), ((66066, 66096), 'tensorflow.compat.v1.squeeze', 'tf.squeeze', (["features['normal']"], {}), "(features['normal'])\n", (66076, 66096), True, 'import tensorflow.compat.v1 as tf\n'), ((66177, 66201), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['con', '[-1, 1]'], {}), '(con, [-1, 1])\n', (66187, 66201), True, 'import tensorflow.compat.v1 as tf\n'), ((67536, 67584), 'numpy.random.randint', 'np.random.randint', (['(1)', '(seq_len - self.sl - 2)', 'num'], {}), '(1, seq_len - self.sl - 2, num)\n', (67553, 67584), True, 'import numpy as np\n'), ((71406, 71436), 'csv.DictWriter', 'csv.DictWriter', (['log_file', 'keys'], {}), '(log_file, keys)\n', (71420, 71436), False, 'import csv\n'), ((71652, 71692), 'csv.DictWriter', 'csv.DictWriter', (['log_file_corr', 'keys_corr'], {}), '(log_file_corr, keys_corr)\n', (71666, 71692), False, 'import csv\n'), ((72806, 72823), 'numpy.mean', 'np.mean', (['corr_vis'], {}), '(corr_vis)\n', (72813, 72823), True, 'import numpy as np\n'), ((72857, 72875), 'numpy.mean', 'np.mean', (['corr_cont'], {}), '(corr_cont)\n', (72864, 72875), True, 'import numpy as np\n'), ((73195, 73213), 'numpy.mean', 'np.mean', (['corr_cont'], {}), '(corr_cont)\n', (73202, 73213), True, 'import numpy as np\n'), ((75560, 75571), 'numpy.max', 'np.max', (['vis'], {}), '(vis)\n', (75566, 75571), True, 'import numpy as np\n'), ((75621, 75641), 'numpy.squeeze', 'np.squeeze', (['z[:, :2]'], {}), '(z[:, :2])\n', (75631, 75641), True, 'import numpy as np\n'), ((75663, 75682), 'numpy.squeeze', 'np.squeeze', (['z[:, 2]'], {}), '(z[:, 2])\n', (75673, 75682), True, 'import numpy as np\n'), ((75703, 75724), 'numpy.squeeze', 'np.squeeze', (['z[:, 3:5]'], {}), '(z[:, 3:5])\n', (75713, 75724), True, 'import numpy as np\n'), ((75745, 75766), 'numpy.squeeze', 'np.squeeze', (['z[:, 5:7]'], {}), '(z[:, 5:7])\n', (75755, 75766), True, 'import numpy as np\n'), ((75787, 75806), 'numpy.squeeze', 'np.squeeze', (['z[:, 7]'], {}), '(z[:, 7])\n', (75797, 75806), True, 'import numpy as np\n'), ((84813, 84843), 'csv.DictWriter', 'csv.DictWriter', (['log_file', 'keys'], {}), '(log_file, keys)\n', (84827, 84843), False, 'import csv\n'), ((86058, 86088), 'csv.DictWriter', 'csv.DictWriter', (['log_file', 'keys'], {}), '(log_file, keys)\n', (86072, 86088), False, 'import csv\n'), ((86925, 86954), 'numpy.save', 'np.save', (["(name + '_init')", 'init'], {}), "(name + '_init', init)\n", (86932, 86954), True, 'import numpy as np\n'), ((86967, 86995), 'numpy.save', 'np.save', (["(name + '_true')", 'seq'], {}), "(name + '_true', seq)\n", (86974, 86995), True, 'import numpy as np\n'), ((87008, 87041), 'numpy.save', 'np.save', (["(name + '_pred')", 'seq_pred'], {}), "(name + '_pred', seq_pred)\n", (87015, 87041), True, 'import numpy as np\n'), ((87054, 87079), 'numpy.save', 'np.save', (["(name + '_obs')", 'z'], {}), "(name + '_obs', z)\n", (87061, 87079), True, 'import numpy as np\n'), ((87092, 87122), 'numpy.save', 'np.save', (["(name + '_c')", 'cov_pred'], {}), "(name + '_c', cov_pred)\n", (87099, 87122), True, 'import numpy as np\n'), ((87135, 87163), 'numpy.save', 'np.save', (["(name + '_q')", 'q_pred'], {}), "(name + '_q', q_pred)\n", (87142, 87163), True, 'import numpy as np\n'), ((87176, 87204), 'numpy.save', 'np.save', (["(name + '_r')", 'r_pred'], {}), "(name + '_r', r_pred)\n", (87183, 87204), True, 'import numpy as np\n'), ((87217, 87244), 'numpy.save', 'np.save', (["(name + '_vis')", 'vis'], {}), "(name + '_vis', vis)\n", (87224, 87244), True, 'import numpy as np\n'), ((87257, 87286), 'numpy.save', 'np.save', (["(name + '_u')", 'actions'], {}), "(name + '_u', actions)\n", (87264, 87286), True, 'import numpy as np\n'), ((87299, 87324), 'numpy.save', 'np.save', (["(name + '_ob')", 'ob'], {}), "(name + '_ob', ob)\n", (87306, 87324), True, 'import numpy as np\n'), ((89569, 89586), 'numpy.min', 'np.min', (['seq[:, 0]'], {}), '(seq[:, 0])\n', (89575, 89586), True, 'import numpy as np\n'), ((89588, 89610), 'numpy.min', 'np.min', (['pos_pred[:, 0]'], {}), '(pos_pred[:, 0])\n', (89594, 89610), True, 'import numpy as np\n'), ((89631, 89648), 'numpy.min', 'np.min', (['seq[:, 1]'], {}), '(seq[:, 1])\n', (89637, 89648), True, 'import numpy as np\n'), ((89650, 89672), 'numpy.min', 'np.min', (['pos_pred[:, 1]'], {}), '(pos_pred[:, 1])\n', (89656, 89672), True, 'import numpy as np\n'), ((89693, 89710), 'numpy.max', 'np.max', (['seq[:, 0]'], {}), '(seq[:, 0])\n', (89699, 89710), True, 'import numpy as np\n'), ((89712, 89734), 'numpy.max', 'np.max', (['pos_pred[:, 0]'], {}), '(pos_pred[:, 0])\n', (89718, 89734), True, 'import numpy as np\n'), ((89755, 89772), 'numpy.max', 'np.max', (['seq[:, 1]'], {}), '(seq[:, 1])\n', (89761, 89772), True, 'import numpy as np\n'), ((89774, 89796), 'numpy.max', 'np.max', (['pos_pred[:, 1]'], {}), '(pos_pred[:, 1])\n', (89780, 89796), True, 'import numpy as np\n'), ((95192, 95268), 'matplotlib.patches.Ellipse', 'Ellipse', ([], {'xy': 'pos_pred[-1]', 'width': 'width', 'height': 'height', 'angle': 'theta', 'alpha': '(0.1)'}), '(xy=pos_pred[-1], width=width, height=height, angle=theta, alpha=0.1)\n', (95199, 95268), False, 'from matplotlib.patches import Ellipse\n'), ((97671, 97748), 'tensorflow.compat.v1.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'name': '"""segment/norm1"""', 'trainable': 'trainable'}), "(name='segment/norm1', trainable=trainable)\n", (97705, 97748), True, 'import tensorflow.compat.v1 as tf\n'), ((97842, 97919), 'tensorflow.compat.v1.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'name': '"""segment/norm2"""', 'trainable': 'trainable'}), "(name='segment/norm2', trainable=trainable)\n", (97876, 97919), True, 'import tensorflow.compat.v1 as tf\n'), ((98013, 98090), 'tensorflow.compat.v1.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'name': '"""segment/norm3"""', 'trainable': 'trainable'}), "(name='segment/norm3', trainable=trainable)\n", (98047, 98090), True, 'import tensorflow.compat.v1 as tf\n'), ((98185, 98262), 'tensorflow.compat.v1.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'name': '"""segment/norm4"""', 'trainable': 'trainable'}), "(name='segment/norm4', trainable=trainable)\n", (98219, 98262), True, 'import tensorflow.compat.v1 as tf\n'), ((98357, 98434), 'tensorflow.compat.v1.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'name': '"""segment/norm5"""', 'trainable': 'trainable'}), "(name='segment/norm5', trainable=trainable)\n", (98391, 98434), True, 'import tensorflow.compat.v1 as tf\n'), ((99869, 99886), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['""""""'], {}), "('')\n", (99882, 99886), True, 'import tensorflow.compat.v1 as tf\n'), ((103065, 103106), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['pos_pix', '[self.batch_size, 2]'], {}), '(pos_pix, [self.batch_size, 2])\n', (103075, 103106), True, 'import tensorflow.compat.v1 as tf\n'), ((103125, 103164), 'differentiable_filters.utils.push_utils._to_3d', 'utils._to_3d', (['pos_pix', 'self.plane_depth'], {}), '(pos_pix, self.plane_depth)\n', (103137, 103164), True, 'from differentiable_filters.utils import push_utils as utils\n'), ((103289, 103350), 'tensorflow.compat.v1.concat', 'tf.concat', (['[pos_pix[:, 1:2] * 2, pos_pix[:, 0:1] * 2]'], {'axis': '(1)'}), '([pos_pix[:, 1:2] * 2, pos_pix[:, 0:1] * 2], axis=1)\n', (103298, 103350), True, 'import tensorflow.compat.v1 as tf\n'), ((103430, 103534), 'tensorflow.compat.v1.image.extract_glimpse', 'tf.image.extract_glimpse', (['images'], {'size': '[72, 72]', 'offsets': 'coords_rot', 'centered': '(True)', 'normalized': '(False)'}), '(images, size=[72, 72], offsets=coords_rot,\n centered=True, normalized=False)\n', (103454, 103534), True, 'import tensorflow.compat.v1 as tf\n'), ((105886, 105963), 'tensorflow.compat.v1.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'name': '"""glimpse/norm1"""', 'trainable': 'trainable'}), "(name='glimpse/norm1', trainable=trainable)\n", (105920, 105963), True, 'import tensorflow.compat.v1 as tf\n'), ((106057, 106134), 'tensorflow.compat.v1.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'name': '"""glimpse/norm2"""', 'trainable': 'trainable'}), "(name='glimpse/norm2', trainable=trainable)\n", (106091, 106134), True, 'import tensorflow.compat.v1 as tf\n'), ((106228, 106305), 'tensorflow.compat.v1.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'name': '"""glimpse/norm3"""', 'trainable': 'trainable'}), "(name='glimpse/norm3', trainable=trainable)\n", (106262, 106305), True, 'import tensorflow.compat.v1 as tf\n'), ((106399, 106472), 'tensorflow.compat.v1.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'name': '"""rot/norm1"""', 'trainable': 'trainable'}), "(name='rot/norm1', trainable=trainable)\n", (106433, 106472), True, 'import tensorflow.compat.v1 as tf\n'), ((106566, 106639), 'tensorflow.compat.v1.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'name': '"""rot/norm2"""', 'trainable': 'trainable'}), "(name='rot/norm2', trainable=trainable)\n", (106600, 106639), True, 'import tensorflow.compat.v1 as tf\n'), ((108092, 108109), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['""""""'], {}), "('')\n", (108105, 108109), True, 'import tensorflow.compat.v1 as tf\n'), ((114573, 114620), 'tensorflow.compat.v1.linalg.norm', 'tf.linalg.norm', (['n[:, :2]'], {'axis': '(1)', 'keepdims': '(True)'}), '(n[:, :2], axis=1, keepdims=True)\n', (114587, 114620), True, 'import tensorflow.compat.v1 as tf\n'), ((115030, 115069), 'tensorflow.compat.v1.concat', 'tf.concat', (['[pos, rot, r, n, s]'], {'axis': '(-1)'}), '([pos, rot, r, n, s], axis=-1)\n', (115039, 115069), True, 'import tensorflow.compat.v1 as tf\n'), ((115108, 115141), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""r_x"""', 'r[0, 0]'], {}), "('r_x', r[0, 0])\n", (115125, 115141), True, 'import tensorflow.compat.v1 as tf\n'), ((115154, 115187), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""r_y"""', 'r[0, 1]'], {}), "('r_y', r[0, 1])\n", (115171, 115187), True, 'import tensorflow.compat.v1 as tf\n'), ((115200, 115233), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""n_x"""', 'n[0, 0]'], {}), "('n_x', n[0, 0])\n", (115217, 115233), True, 'import tensorflow.compat.v1 as tf\n'), ((115246, 115279), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""n_y"""', 'n[0, 1]'], {}), "('n_y', n[0, 1])\n", (115263, 115279), True, 'import tensorflow.compat.v1 as tf\n'), ((115292, 115327), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""o_x"""', 'pos[0, 0]'], {}), "('o_x', pos[0, 0])\n", (115309, 115327), True, 'import tensorflow.compat.v1 as tf\n'), ((115340, 115375), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""o_y"""', 'pos[0, 1]'], {}), "('o_y', pos[0, 1])\n", (115357, 115375), True, 'import tensorflow.compat.v1 as tf\n'), ((115388, 115427), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""t_x"""', 'tip_pos[0, 0]'], {}), "('t_x', tip_pos[0, 0])\n", (115405, 115427), True, 'import tensorflow.compat.v1 as tf\n'), ((115440, 115479), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""t_y"""', 'tip_pos[0, 1]'], {}), "('t_y', tip_pos[0, 1])\n", (115457, 115479), True, 'import tensorflow.compat.v1 as tf\n'), ((115492, 115523), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""s"""', 's[0, 0]'], {}), "('s', s[0, 0])\n", (115509, 115523), True, 'import tensorflow.compat.v1 as tf\n'), ((115536, 115571), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""rot"""', 'rot[0, 0]'], {}), "('rot', rot[0, 0])\n", (115553, 115571), True, 'import tensorflow.compat.v1 as tf\n'), ((122340, 122390), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['het_diag_pos_c2', '[self.batch_size, -1]'], {}), '(het_diag_pos_c2, [self.batch_size, -1])\n', (122350, 122390), True, 'import tensorflow.compat.v1 as tf\n'), ((122891, 122953), 'tensorflow.compat.v1.concat', 'tf.concat', (['[het_diag_pos, het_diag_rot, het_diag_rns]'], {'axis': '(-1)'}), '([het_diag_pos, het_diag_rot, het_diag_rns], axis=-1)\n', (122900, 122953), True, 'import tensorflow.compat.v1 as tf\n'), ((123818, 123859), 'tensorflow.compat.v1.square', 'tf.square', (['(diag + self.het_diag_init_bias)'], {}), '(diag + self.het_diag_init_bias)\n', (123827, 123859), True, 'import tensorflow.compat.v1 as tf\n'), ((123912, 123932), 'tensorflow.compat.v1.linalg.diag', 'tf.linalg.diag', (['diag'], {}), '(diag)\n', (123926, 123932), True, 'import tensorflow.compat.v1 as tf\n'), ((128774, 128810), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""fc1_out"""', 'fc1'], {}), "('fc1_out', fc1)\n", (128794, 128810), True, 'import tensorflow.compat.v1 as tf\n'), ((128876, 128912), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""fc2_out"""', 'fc2'], {}), "('fc2_out', fc2)\n", (128896, 128912), True, 'import tensorflow.compat.v1 as tf\n'), ((128980, 129022), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""pos_c1_out"""', 'pos_c1'], {}), "('pos_c1_out', pos_c1)\n", (129000, 129022), True, 'import tensorflow.compat.v1 as tf\n'), ((129035, 129077), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""pos_c2_out"""', 'pos_c2'], {}), "('pos_c2_out', pos_c2)\n", (129055, 129077), True, 'import tensorflow.compat.v1 as tf\n'), ((129090, 129132), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""pos_fc_out"""', 'pos_fc'], {}), "('pos_fc_out', pos_fc)\n", (129110, 129132), True, 'import tensorflow.compat.v1 as tf\n'), ((129145, 129187), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""rot_fc_out"""', 'rot_fc'], {}), "('rot_fc_out', rot_fc)\n", (129165, 129187), True, 'import tensorflow.compat.v1 as tf\n'), ((129200, 129244), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""rns_fc1_out"""', 'rns_fc1'], {}), "('rns_fc1_out', rns_fc1)\n", (129220, 129244), True, 'import tensorflow.compat.v1 as tf\n'), ((129257, 129301), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""rns_fc2_out"""', 'rns_fc2'], {}), "('rns_fc2_out', rns_fc2)\n", (129277, 129301), True, 'import tensorflow.compat.v1 as tf\n'), ((129314, 129350), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""fc1_out"""', 'fc1'], {}), "('fc1_out', fc1)\n", (129334, 129350), True, 'import tensorflow.compat.v1 as tf\n'), ((129363, 129399), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""fc2_out"""', 'fc2'], {}), "('fc2_out', fc2)\n", (129383, 129399), True, 'import tensorflow.compat.v1 as tf\n'), ((129412, 129446), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""like"""', 'like'], {}), "('like', like)\n", (129432, 129446), True, 'import tensorflow.compat.v1 as tf\n'), ((132247, 132283), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['ob', '[self.batch_size, 1]'], {}), '(ob, [self.batch_size, 1])\n', (132257, 132283), True, 'import tensorflow.compat.v1 as tf\n'), ((132344, 132383), 'tensorflow.compat.v1.tile', 'tf.tile', (['ob', '[1, bs // self.batch_size]'], {}), '(ob, [1, bs // self.batch_size])\n', (132351, 132383), True, 'import tensorflow.compat.v1 as tf\n'), ((132401, 132421), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['ob', '[-1]'], {}), '(ob, [-1])\n', (132411, 132421), True, 'import tensorflow.compat.v1 as tf\n'), ((132439, 132479), 'tensorflow.compat.v1.strings.regex_replace', 'tf.strings.regex_replace', (['ob', "'\\x00'", '""""""'], {}), "(ob, '\\x00', '')\n", (132463, 132479), True, 'import tensorflow.compat.v1 as tf\n'), ((132497, 132537), 'tensorflow.compat.v1.strings.regex_replace', 'tf.strings.regex_replace', (['ob', "'\\x00'", '""""""'], {}), "(ob, '\\x00', '')\n", (132521, 132537), True, 'import tensorflow.compat.v1 as tf\n'), ((132720, 132780), 'tensorflow.compat.v1.concat', 'tf.concat', (['[update[:, :2], rot_pred, update[:, 3:]]'], {'axis': '(-1)'}), '([update[:, :2], rot_pred, update[:, 3:]], axis=-1)\n', (132729, 132780), True, 'import tensorflow.compat.v1 as tf\n'), ((140773, 140792), 'tensorflow.compat.v1.stop_gradient', 'tf.stop_gradient', (['F'], {}), '(F)\n', (140789, 140792), True, 'import tensorflow.compat.v1 as tf\n'), ((1957, 1982), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1972, 1982), False, 'import os\n'), ((2003, 2055), 'os.path.join', 'os.path.join', (['path', '"""resources"""', '"""butter_points.pkl"""'], {}), "(path, 'resources', 'butter_points.pkl')\n", (2015, 2055), False, 'import os\n'), ((16584, 16646), 'tensorflow.compat.v1.concat', 'tf.concat', (['[base_covar[:, :3, :3], init_R[:, :3, :3]]'], {'axis': '(-1)'}), '([base_covar[:, :3, :3], init_R[:, :3, :3]], axis=-1)\n', (16593, 16646), True, 'import tensorflow.compat.v1 as tf\n'), ((16750, 16812), 'tensorflow.compat.v1.concat', 'tf.concat', (['[base_covar[:, 5:, 5:], init_R[:, 3:, 3:]]'], {'axis': '(-1)'}), '([base_covar[:, 5:, 5:], init_R[:, 3:, 3:]], axis=-1)\n', (16759, 16812), True, 'import tensorflow.compat.v1 as tf\n'), ((20799, 20839), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['seq_label[:, :, 9:]', '[-1, 1]'], {}), '(seq_label[:, :, 9:], [-1, 1])\n', (20809, 20839), True, 'import tensorflow.compat.v1 as tf\n'), ((21333, 21373), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['seq_label[:, :, 9:]', '[-1, 1]'], {}), '(seq_label[:, :, 9:], [-1, 1])\n', (21343, 21373), True, 'import tensorflow.compat.v1 as tf\n'), ((24453, 24477), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['dists[i]'], {}), '(dists[i])\n', (24467, 24477), True, 'import tensorflow.compat.v1 as tf\n'), ((24615, 24642), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['dist_obs[i]'], {}), '(dist_obs[i])\n', (24629, 24642), True, 'import tensorflow.compat.v1 as tf\n'), ((30171, 30200), 'tensorflow.compat.v1.squeeze', 'tf.squeeze', (['seg_pred'], {'axis': '(-1)'}), '(seg_pred, axis=-1)\n', (30181, 30200), True, 'import tensorflow.compat.v1 as tf\n'), ((30221, 30245), 'tensorflow.compat.v1.squeeze', 'tf.squeeze', (['seg'], {'axis': '(-1)'}), '(seg, axis=-1)\n', (30231, 30245), True, 'import tensorflow.compat.v1 as tf\n'), ((30399, 30436), 'tensorflow.compat.v1.squeeze', 'tf.squeeze', (['initial_seg_pred'], {'axis': '(-1)'}), '(initial_seg_pred, axis=-1)\n', (30409, 30436), True, 'import tensorflow.compat.v1 as tf\n'), ((30457, 30489), 'tensorflow.compat.v1.squeeze', 'tf.squeeze', (['initial_seg'], {'axis': '(-1)'}), '(initial_seg, axis=-1)\n', (30467, 30489), True, 'import tensorflow.compat.v1 as tf\n'), ((34458, 34485), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['dist_obs[i]'], {}), '(dist_obs[i])\n', (34472, 34485), True, 'import tensorflow.compat.v1 as tf\n'), ((34622, 34650), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['corr_diag[i]'], {}), '(corr_diag[i])\n', (34636, 34650), True, 'import tensorflow.compat.v1 as tf\n'), ((34739, 34767), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['corr_full[i]'], {}), '(corr_full[i])\n', (34753, 34767), True, 'import tensorflow.compat.v1 as tf\n'), ((39341, 39378), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['(mse * self.scale ** 2)'], {}), '(mse * self.scale ** 2)\n', (39355, 39378), True, 'import tensorflow.compat.v1 as tf\n'), ((39398, 39431), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['(dist * self.scale)'], {}), '(dist * self.scale)\n', (39412, 39431), True, 'import tensorflow.compat.v1 as tf\n'), ((39696, 39729), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['(dist * self.scale)'], {}), '(dist * self.scale)\n', (39710, 39729), True, 'import tensorflow.compat.v1 as tf\n'), ((41940, 41964), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['dists[i]'], {}), '(dists[i])\n', (41954, 41964), True, 'import tensorflow.compat.v1 as tf\n'), ((42060, 42088), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['dists_ana[i]'], {}), '(dists_ana[i])\n', (42074, 42088), True, 'import tensorflow.compat.v1 as tf\n'), ((45849, 45894), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['pred', '[self.batch_size, -1, 1, 2]'], {}), '(pred, [self.batch_size, -1, 1, 2])\n', (45859, 45894), True, 'import tensorflow.compat.v1 as tf\n'), ((45925, 45971), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['label', '[self.batch_size, -1, 2, 1]'], {}), '(label, [self.batch_size, -1, 2, 1])\n', (45935, 45971), True, 'import tensorflow.compat.v1 as tf\n'), ((46069, 46111), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['prod', '[self.batch_size, -1, 1]'], {}), '(prod, [self.batch_size, -1, 1])\n', (46079, 46111), True, 'import tensorflow.compat.v1 as tf\n'), ((46156, 46197), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['pred', '[self.batch_size, 1, 2]'], {}), '(pred, [self.batch_size, 1, 2])\n', (46166, 46197), True, 'import tensorflow.compat.v1 as tf\n'), ((46228, 46270), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['label', '[self.batch_size, 2, 1]'], {}), '(label, [self.batch_size, 2, 1])\n', (46238, 46270), True, 'import tensorflow.compat.v1 as tf\n'), ((46368, 46406), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['prod', '[self.batch_size, 1]'], {}), '(prod, [self.batch_size, 1])\n', (46378, 46406), True, 'import tensorflow.compat.v1 as tf\n'), ((49819, 49873), 'tensorflow.compat.v1.sin', 'tf.sin', (['(points[:, :, 2:3] * self.scale * np.pi / 180.0)'], {}), '(points[:, :, 2:3] * self.scale * np.pi / 180.0)\n', (49825, 49873), True, 'import tensorflow.compat.v1 as tf\n'), ((49893, 49947), 'tensorflow.compat.v1.cos', 'tf.cos', (['(points[:, :, 2:3] * self.scale * np.pi / 180.0)'], {}), '(points[:, :, 2:3] * self.scale * np.pi / 180.0)\n', (49899, 49947), True, 'import tensorflow.compat.v1 as tf\n'), ((50094, 50135), 'tensorflow.compat.v1.math.atan2', 'tf.math.atan2', (['mean[:, 2:3]', 'mean[:, 3:4]'], {}), '(mean[:, 2:3], mean[:, 3:4])\n', (50107, 50135), True, 'import tensorflow.compat.v1 as tf\n'), ((50391, 50445), 'tensorflow.compat.v1.sin', 'tf.sin', (['(points[:, :, 2:3] * self.scale * np.pi / 180.0)'], {}), '(points[:, :, 2:3] * self.scale * np.pi / 180.0)\n', (50397, 50445), True, 'import tensorflow.compat.v1 as tf\n'), ((50465, 50519), 'tensorflow.compat.v1.cos', 'tf.cos', (['(points[:, :, 2:3] * self.scale * np.pi / 180.0)'], {}), '(points[:, :, 2:3] * self.scale * np.pi / 180.0)\n', (50471, 50519), True, 'import tensorflow.compat.v1 as tf\n'), ((50668, 50709), 'tensorflow.compat.v1.math.atan2', 'tf.math.atan2', (['mean[:, 2:3]', 'mean[:, 3:4]'], {}), '(mean[:, 2:3], mean[:, 3:4])\n', (50681, 50709), True, 'import tensorflow.compat.v1 as tf\n'), ((53535, 53557), 'tensorflow.compat.v1.equal', 'tf.equal', (['ob', '"""ellip1"""'], {}), "(ob, 'ellip1')\n", (53543, 53557), True, 'import tensorflow.compat.v1 as tf\n'), ((53839, 53873), 'tensorflow.compat.v1.greater', 'tf.greater', (['rot_new', '(rot_max / 2.0)'], {}), '(rot_new, rot_max / 2.0)\n', (53849, 53873), True, 'import tensorflow.compat.v1 as tf\n'), ((53962, 53994), 'tensorflow.compat.v1.less', 'tf.less', (['rot_add', '(-rot_max / 2.0)'], {}), '(rot_add, -rot_max / 2.0)\n', (53969, 53994), True, 'import tensorflow.compat.v1 as tf\n'), ((54229, 54252), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['ob', '[-1, 1]'], {}), '(ob, [-1, 1])\n', (54239, 54252), True, 'import tensorflow.compat.v1 as tf\n'), ((54346, 54368), 'tensorflow.compat.v1.equal', 'tf.equal', (['ob', '"""ellip1"""'], {}), "(ob, 'ellip1')\n", (54354, 54368), True, 'import tensorflow.compat.v1 as tf\n'), ((54370, 54388), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['rot'], {}), '(rot)\n', (54383, 54388), True, 'import tensorflow.compat.v1 as tf\n'), ((54461, 54482), 'tensorflow.compat.v1.equal', 'tf.equal', (['ob', '"""rect1"""'], {}), "(ob, 'rect1')\n", (54469, 54482), True, 'import tensorflow.compat.v1 as tf\n'), ((54577, 54597), 'tensorflow.compat.v1.equal', 'tf.equal', (['ob', '"""tri1"""'], {}), "(ob, 'tri1')\n", (54585, 54597), True, 'import tensorflow.compat.v1 as tf\n'), ((54693, 54713), 'tensorflow.compat.v1.equal', 'tf.equal', (['ob', '"""tri2"""'], {}), "(ob, 'tri2')\n", (54701, 54713), True, 'import tensorflow.compat.v1 as tf\n'), ((54809, 54829), 'tensorflow.compat.v1.equal', 'tf.equal', (['ob', '"""tri3"""'], {}), "(ob, 'tri3')\n", (54817, 54829), True, 'import tensorflow.compat.v1 as tf\n'), ((54925, 54944), 'tensorflow.compat.v1.equal', 'tf.equal', (['ob', '"""hex"""'], {}), "(ob, 'hex')\n", (54933, 54944), True, 'import tensorflow.compat.v1 as tf\n'), ((55040, 55062), 'tensorflow.compat.v1.equal', 'tf.equal', (['ob', '"""ellip1"""'], {}), "(ob, 'ellip1')\n", (55048, 55062), True, 'import tensorflow.compat.v1 as tf\n'), ((55064, 55082), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['rot'], {}), '(rot)\n', (55077, 55082), True, 'import tensorflow.compat.v1 as tf\n'), ((55335, 55369), 'tensorflow.compat.v1.greater', 'tf.greater', (['rot_new', '(rot_max / 2.0)'], {}), '(rot_new, rot_max / 2.0)\n', (55345, 55369), True, 'import tensorflow.compat.v1 as tf\n'), ((55458, 55490), 'tensorflow.compat.v1.less', 'tf.less', (['rot_add', '(-rot_max / 2.0)'], {}), '(rot_add, -rot_max / 2.0)\n', (55465, 55490), True, 'import tensorflow.compat.v1 as tf\n'), ((57190, 57232), 'tensorflow.compat.v1.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(x, y)'], {}), '((x, y))\n', (57224, 57232), True, 'import tensorflow.compat.v1 as tf\n'), ((58526, 58550), 'tensorflow.compat.v1.less', 'tf.less', (['con_norm', '(1e-06)'], {}), '(con_norm, 1e-06)\n', (58533, 58550), True, 'import tensorflow.compat.v1 as tf\n'), ((58782, 58803), 'tensorflow.compat.v1.greater', 'tf.greater', (['mask', '(2.5)'], {}), '(mask, 2.5)\n', (58792, 58803), True, 'import tensorflow.compat.v1 as tf\n'), ((58805, 58823), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['mask'], {}), '(mask)\n', (58817, 58823), True, 'import tensorflow.compat.v1 as tf\n'), ((58857, 58876), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['mask'], {}), '(mask)\n', (58870, 58876), True, 'import tensorflow.compat.v1 as tf\n'), ((63782, 63819), 'tensorflow.compat.v1.reshape', 'tf.reshape', (["features['friction']", '[1]'], {}), "(features['friction'], [1])\n", (63792, 63819), True, 'import tensorflow.compat.v1 as tf\n'), ((66482, 66519), 'tensorflow.compat.v1.reshape', 'tf.reshape', (["features['friction']", '[1]'], {}), "(features['friction'], [1])\n", (66492, 66519), True, 'import tensorflow.compat.v1 as tf\n'), ((67011, 67032), 'tensorflow.compat.v1.greater', 'tf.greater', (['mask', '(2.5)'], {}), '(mask, 2.5)\n', (67021, 67032), True, 'import tensorflow.compat.v1 as tf\n'), ((67034, 67052), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['mask'], {}), '(mask)\n', (67046, 67052), True, 'import tensorflow.compat.v1 as tf\n'), ((67086, 67105), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['mask'], {}), '(mask)\n', (67099, 67105), True, 'import tensorflow.compat.v1 as tf\n'), ((67729, 67784), 'numpy.arange', 'np.arange', (['(1)', '(seq_len - self.sl - 2)', '((self.sl + 1) // 2)'], {}), '(1, seq_len - self.sl - 2, (self.sl + 1) // 2)\n', (67738, 67784), True, 'import numpy as np\n'), ((68977, 69020), 'tensorflow.compat.v1.tile', 'tf.tile', (['pose[si:si + 1, 2:3]', '[self.sl, 1]'], {}), '(pose[si:si + 1, 2:3], [self.sl, 1])\n', (68984, 69020), True, 'import tensorflow.compat.v1 as tf\n'), ((74347, 74366), 'numpy.mean', 'np.mean', (['corr_diags'], {}), '(corr_diags)\n', (74354, 74366), True, 'import numpy as np\n'), ((74407, 74426), 'numpy.mean', 'np.mean', (['corr_fulls'], {}), '(corr_fulls)\n', (74414, 74426), True, 'import numpy as np\n'), ((75939, 75968), 'numpy.squeeze', 'np.squeeze', (['cov_pred[:, 0, 0]'], {}), '(cov_pred[:, 0, 0])\n', (75949, 75968), True, 'import numpy as np\n'), ((75995, 76024), 'numpy.squeeze', 'np.squeeze', (['cov_pred[:, 1, 1]'], {}), '(cov_pred[:, 1, 1])\n', (76005, 76024), True, 'import numpy as np\n'), ((76051, 76080), 'numpy.squeeze', 'np.squeeze', (['cov_pred[:, 2, 2]'], {}), '(cov_pred[:, 2, 2])\n', (76061, 76080), True, 'import numpy as np\n'), ((76107, 76136), 'numpy.squeeze', 'np.squeeze', (['cov_pred[:, 3, 3]'], {}), '(cov_pred[:, 3, 3])\n', (76117, 76136), True, 'import numpy as np\n'), ((76164, 76193), 'numpy.squeeze', 'np.squeeze', (['cov_pred[:, 4, 4]'], {}), '(cov_pred[:, 4, 4])\n', (76174, 76193), True, 'import numpy as np\n'), ((76221, 76250), 'numpy.squeeze', 'np.squeeze', (['cov_pred[:, 5, 5]'], {}), '(cov_pred[:, 5, 5])\n', (76231, 76250), True, 'import numpy as np\n'), ((76278, 76307), 'numpy.squeeze', 'np.squeeze', (['cov_pred[:, 6, 6]'], {}), '(cov_pred[:, 6, 6])\n', (76288, 76307), True, 'import numpy as np\n'), ((76335, 76364), 'numpy.squeeze', 'np.squeeze', (['cov_pred[:, 7, 7]'], {}), '(cov_pred[:, 7, 7])\n', (76345, 76364), True, 'import numpy as np\n'), ((76392, 76421), 'numpy.squeeze', 'np.squeeze', (['cov_pred[:, 8, 8]'], {}), '(cov_pred[:, 8, 8])\n', (76402, 76421), True, 'import numpy as np\n'), ((76448, 76477), 'numpy.squeeze', 'np.squeeze', (['cov_pred[:, 9, 9]'], {}), '(cov_pred[:, 9, 9])\n', (76458, 76477), True, 'import numpy as np\n'), ((76642, 76669), 'numpy.squeeze', 'np.squeeze', (['q_pred[:, 0, 0]'], {}), '(q_pred[:, 0, 0])\n', (76652, 76669), True, 'import numpy as np\n'), ((76696, 76723), 'numpy.squeeze', 'np.squeeze', (['q_pred[:, 1, 1]'], {}), '(q_pred[:, 1, 1])\n', (76706, 76723), True, 'import numpy as np\n'), ((76750, 76777), 'numpy.squeeze', 'np.squeeze', (['q_pred[:, 2, 2]'], {}), '(q_pred[:, 2, 2])\n', (76760, 76777), True, 'import numpy as np\n'), ((76804, 76831), 'numpy.squeeze', 'np.squeeze', (['q_pred[:, 3, 3]'], {}), '(q_pred[:, 3, 3])\n', (76814, 76831), True, 'import numpy as np\n'), ((76859, 76886), 'numpy.squeeze', 'np.squeeze', (['q_pred[:, 4, 4]'], {}), '(q_pred[:, 4, 4])\n', (76869, 76886), True, 'import numpy as np\n'), ((76914, 76941), 'numpy.squeeze', 'np.squeeze', (['q_pred[:, 5, 5]'], {}), '(q_pred[:, 5, 5])\n', (76924, 76941), True, 'import numpy as np\n'), ((76969, 76996), 'numpy.squeeze', 'np.squeeze', (['q_pred[:, 6, 6]'], {}), '(q_pred[:, 6, 6])\n', (76979, 76996), True, 'import numpy as np\n'), ((77024, 77051), 'numpy.squeeze', 'np.squeeze', (['q_pred[:, 7, 7]'], {}), '(q_pred[:, 7, 7])\n', (77034, 77051), True, 'import numpy as np\n'), ((77079, 77106), 'numpy.squeeze', 'np.squeeze', (['q_pred[:, 8, 8]'], {}), '(q_pred[:, 8, 8])\n', (77089, 77106), True, 'import numpy as np\n'), ((77133, 77160), 'numpy.squeeze', 'np.squeeze', (['q_pred[:, 9, 9]'], {}), '(q_pred[:, 9, 9])\n', (77143, 77160), True, 'import numpy as np\n'), ((77187, 77214), 'numpy.squeeze', 'np.squeeze', (['r_pred[:, 0, 0]'], {}), '(r_pred[:, 0, 0])\n', (77197, 77214), True, 'import numpy as np\n'), ((77241, 77268), 'numpy.squeeze', 'np.squeeze', (['r_pred[:, 1, 1]'], {}), '(r_pred[:, 1, 1])\n', (77251, 77268), True, 'import numpy as np\n'), ((77295, 77322), 'numpy.squeeze', 'np.squeeze', (['r_pred[:, 2, 2]'], {}), '(r_pred[:, 2, 2])\n', (77305, 77322), True, 'import numpy as np\n'), ((77350, 77377), 'numpy.squeeze', 'np.squeeze', (['r_pred[:, 3, 3]'], {}), '(r_pred[:, 3, 3])\n', (77360, 77377), True, 'import numpy as np\n'), ((77405, 77432), 'numpy.squeeze', 'np.squeeze', (['r_pred[:, 4, 4]'], {}), '(r_pred[:, 4, 4])\n', (77415, 77432), True, 'import numpy as np\n'), ((77460, 77487), 'numpy.squeeze', 'np.squeeze', (['r_pred[:, 5, 5]'], {}), '(r_pred[:, 5, 5])\n', (77470, 77487), True, 'import numpy as np\n'), ((77515, 77542), 'numpy.squeeze', 'np.squeeze', (['r_pred[:, 6, 6]'], {}), '(r_pred[:, 6, 6])\n', (77525, 77542), True, 'import numpy as np\n'), ((77569, 77596), 'numpy.squeeze', 'np.squeeze', (['r_pred[:, 7, 7]'], {}), '(r_pred[:, 7, 7])\n', (77579, 77596), True, 'import numpy as np\n'), ((81605, 81615), 'numpy.max', 'np.max', (['qx'], {}), '(qx)\n', (81611, 81615), True, 'import numpy as np\n'), ((81617, 81627), 'numpy.max', 'np.max', (['qy'], {}), '(qy)\n', (81623, 81627), True, 'import numpy as np\n'), ((81629, 81639), 'numpy.max', 'np.max', (['rx'], {}), '(rx)\n', (81635, 81639), True, 'import numpy as np\n'), ((81641, 81651), 'numpy.max', 'np.max', (['ry'], {}), '(ry)\n', (81647, 81651), True, 'import numpy as np\n'), ((82083, 82093), 'numpy.max', 'np.max', (['qt'], {}), '(qt)\n', (82089, 82093), True, 'import numpy as np\n'), ((82095, 82105), 'numpy.max', 'np.max', (['rt'], {}), '(rt)\n', (82101, 82105), True, 'import numpy as np\n'), ((82437, 82448), 'numpy.max', 'np.max', (['qrx'], {}), '(qrx)\n', (82443, 82448), True, 'import numpy as np\n'), ((82450, 82461), 'numpy.max', 'np.max', (['qry'], {}), '(qry)\n', (82456, 82461), True, 'import numpy as np\n'), ((82463, 82474), 'numpy.max', 'np.max', (['rrx'], {}), '(rrx)\n', (82469, 82474), True, 'import numpy as np\n'), ((82476, 82487), 'numpy.max', 'np.max', (['rry'], {}), '(rry)\n', (82482, 82487), True, 'import numpy as np\n'), ((82928, 82939), 'numpy.max', 'np.max', (['qnx'], {}), '(qnx)\n', (82934, 82939), True, 'import numpy as np\n'), ((82941, 82952), 'numpy.max', 'np.max', (['qny'], {}), '(qny)\n', (82947, 82952), True, 'import numpy as np\n'), ((82954, 82965), 'numpy.max', 'np.max', (['rnx'], {}), '(rnx)\n', (82960, 82965), True, 'import numpy as np\n'), ((82967, 82978), 'numpy.max', 'np.max', (['rny'], {}), '(rny)\n', (82973, 82978), True, 'import numpy as np\n'), ((83412, 83423), 'numpy.max', 'np.max', (['qmu'], {}), '(qmu)\n', (83418, 83423), True, 'import numpy as np\n'), ((83425, 83435), 'numpy.max', 'np.max', (['ql'], {}), '(ql)\n', (83431, 83435), True, 'import numpy as np\n'), ((83711, 83721), 'numpy.max', 'np.max', (['qs'], {}), '(qs)\n', (83717, 83721), True, 'import numpy as np\n'), ((83723, 83733), 'numpy.max', 'np.max', (['rs'], {}), '(rs)\n', (83729, 83733), True, 'import numpy as np\n'), ((90288, 90363), 'matplotlib.patches.Ellipse', 'Ellipse', ([], {'xy': 'pos_pred[i]', 'width': 'width', 'height': 'height', 'angle': 'theta', 'alpha': '(0.1)'}), '(xy=pos_pred[i], width=width, height=height, angle=theta, alpha=0.1)\n', (90295, 90363), False, 'from matplotlib.patches import Ellipse\n'), ((95091, 95120), 'numpy.arctan2', 'np.arctan2', (['*vecs[:, 0][::-1]'], {}), '(*vecs[:, 0][::-1])\n', (95101, 95120), True, 'import numpy as np\n'), ((95158, 95171), 'numpy.sqrt', 'np.sqrt', (['vals'], {}), '(vals)\n', (95165, 95171), True, 'import numpy as np\n'), ((96493, 96518), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (96508, 96518), False, 'import os\n'), ((96650, 96663), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (96657, 96663), True, 'import numpy as np\n'), ((98568, 98645), 'tensorflow.compat.v1.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'name': '"""segment/norm1"""', 'trainable': 'trainable'}), "(name='segment/norm1', trainable=trainable)\n", (98602, 98645), True, 'import tensorflow.compat.v1 as tf\n'), ((98739, 98816), 'tensorflow.compat.v1.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'name': '"""segment/norm2"""', 'trainable': 'trainable'}), "(name='segment/norm2', trainable=trainable)\n", (98773, 98816), True, 'import tensorflow.compat.v1 as tf\n'), ((98910, 98987), 'tensorflow.compat.v1.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'name': '"""segment/norm3"""', 'trainable': 'trainable'}), "(name='segment/norm3', trainable=trainable)\n", (98944, 98987), True, 'import tensorflow.compat.v1 as tf\n'), ((99082, 99159), 'tensorflow.compat.v1.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'name': '"""segment/norm4"""', 'trainable': 'trainable'}), "(name='segment/norm4', trainable=trainable)\n", (99116, 99159), True, 'import tensorflow.compat.v1 as tf\n'), ((99254, 99331), 'tensorflow.compat.v1.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'name': '"""segment/norm5"""', 'trainable': 'trainable'}), "(name='segment/norm5', trainable=trainable)\n", (99288, 99331), True, 'import tensorflow.compat.v1 as tf\n'), ((99937, 99961), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['"""segment"""'], {}), "('segment')\n", (99950, 99961), True, 'import tensorflow.compat.v1 as tf\n'), ((100030, 100075), 'tensorflow.compat.v1.nn.max_pool2d', 'tf.nn.max_pool2d', (['conv1', '(3)', '(2)'], {'padding': '"""SAME"""'}), "(conv1, 3, 2, padding='SAME')\n", (100046, 100075), True, 'import tensorflow.compat.v1 as tf\n'), ((100338, 100383), 'tensorflow.compat.v1.nn.max_pool2d', 'tf.nn.max_pool2d', (['conv2', '(3)', '(2)'], {'padding': '"""SAME"""'}), "(conv2, 3, 2, padding='SAME')\n", (100354, 100383), True, 'import tensorflow.compat.v1 as tf\n'), ((100646, 100691), 'tensorflow.compat.v1.nn.max_pool2d', 'tf.nn.max_pool2d', (['conv3', '(5)', '(4)'], {'padding': '"""SAME"""'}), "(conv3, 5, 4, padding='SAME')\n", (100662, 100691), True, 'import tensorflow.compat.v1 as tf\n'), ((101325, 101376), 'tensorflow.compat.v1.image.resize', 'tf.image.resize', (['deconv2', '[height // 2, width // 2]'], {}), '(deconv2, [height // 2, width // 2])\n', (101340, 101376), True, 'import tensorflow.compat.v1 as tf\n'), ((101651, 101693), 'tensorflow.compat.v1.image.resize', 'tf.image.resize', (['mask_out', '[height, width]'], {}), '(mask_out, [height, width])\n', (101666, 101693), True, 'import tensorflow.compat.v1 as tf\n'), ((104041, 104066), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (104056, 104066), False, 'import os\n'), ((104198, 104211), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (104205, 104211), True, 'import numpy as np\n'), ((106773, 106850), 'tensorflow.compat.v1.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'name': '"""glimpse/norm1"""', 'trainable': 'trainable'}), "(name='glimpse/norm1', trainable=trainable)\n", (106807, 106850), True, 'import tensorflow.compat.v1 as tf\n'), ((106944, 107021), 'tensorflow.compat.v1.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'name': '"""glimpse/norm2"""', 'trainable': 'trainable'}), "(name='glimpse/norm2', trainable=trainable)\n", (106978, 107021), True, 'import tensorflow.compat.v1 as tf\n'), ((107115, 107192), 'tensorflow.compat.v1.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'name': '"""glimpse/norm3"""', 'trainable': 'trainable'}), "(name='glimpse/norm3', trainable=trainable)\n", (107149, 107192), True, 'import tensorflow.compat.v1 as tf\n'), ((107286, 107359), 'tensorflow.compat.v1.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'name': '"""rot/norm1"""', 'trainable': 'trainable'}), "(name='rot/norm1', trainable=trainable)\n", (107320, 107359), True, 'import tensorflow.compat.v1 as tf\n'), ((107453, 107526), 'tensorflow.compat.v1.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'name': '"""rot/norm2"""', 'trainable': 'trainable'}), "(name='rot/norm2', trainable=trainable)\n", (107487, 107526), True, 'import tensorflow.compat.v1 as tf\n'), ((108166, 108186), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['"""rot"""'], {}), "('rot')\n", (108179, 108186), True, 'import tensorflow.compat.v1 as tf\n'), ((108604, 108654), 'tensorflow.compat.v1.nn.max_pool2d', 'tf.nn.max_pool2d', (['rot_conv1', '(3)', '(2)'], {'padding': '"""VALID"""'}), "(rot_conv1, 3, 2, padding='VALID')\n", (108620, 108654), True, 'import tensorflow.compat.v1 as tf\n'), ((108943, 108993), 'tensorflow.compat.v1.nn.max_pool2d', 'tf.nn.max_pool2d', (['rot_conv2', '(3)', '(2)'], {'padding': '"""VALID"""'}), "(rot_conv2, 3, 2, padding='VALID')\n", (108959, 108993), True, 'import tensorflow.compat.v1 as tf\n'), ((109997, 110021), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['"""glimpse"""'], {}), "('glimpse')\n", (110010, 110021), True, 'import tensorflow.compat.v1 as tf\n'), ((110182, 110223), 'tensorflow.compat.v1.concat', 'tf.concat', (['[tip_pix_y, tip_pix_x]'], {'axis': '(1)'}), '([tip_pix_y, tip_pix_x], axis=1)\n', (110191, 110223), True, 'import tensorflow.compat.v1 as tf\n'), ((110272, 110372), 'tensorflow.compat.v1.image.extract_glimpse', 'tf.image.extract_glimpse', (['coord'], {'size': '[64, 64]', 'offsets': 'coords', 'centered': '(True)', 'normalized': '(False)'}), '(coord, size=[64, 64], offsets=coords, centered=\n True, normalized=False)\n', (110296, 110372), True, 'import tensorflow.compat.v1 as tf\n'), ((110509, 110609), 'tensorflow.compat.v1.image.extract_glimpse', 'tf.image.extract_glimpse', (['image'], {'size': '[64, 64]', 'offsets': 'coords', 'centered': '(True)', 'normalized': '(False)'}), '(image, size=[64, 64], offsets=coords, centered=\n True, normalized=False)\n', (110533, 110609), True, 'import tensorflow.compat.v1 as tf\n'), ((110842, 110883), 'tensorflow.compat.v1.concat', 'tf.concat', (['[im_glimpse, glimpse]'], {'axis': '(-1)'}), '([im_glimpse, glimpse], axis=-1)\n', (110851, 110883), True, 'import tensorflow.compat.v1 as tf\n'), ((110953, 111001), 'tensorflow.compat.v1.nn.max_pool2d', 'tf.nn.max_pool2d', (['g_conv1', '(3)', '(2)'], {'padding': '"""VALID"""'}), "(g_conv1, 3, 2, padding='VALID')\n", (110969, 111001), True, 'import tensorflow.compat.v1 as tf\n'), ((111275, 111323), 'tensorflow.compat.v1.nn.max_pool2d', 'tf.nn.max_pool2d', (['g_conv2', '(3)', '(2)'], {'padding': '"""VALID"""'}), "(g_conv2, 3, 2, padding='VALID')\n", (111291, 111323), True, 'import tensorflow.compat.v1 as tf\n'), ((111885, 111927), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['g_conv3', '[self.batch_size, -1]'], {}), '(g_conv3, [self.batch_size, -1])\n', (111895, 111927), True, 'import tensorflow.compat.v1 as tf\n'), ((111986, 112037), 'tensorflow.compat.v1.concat', 'tf.concat', (['[tip_pix_end - tip_pix, tip_pix]'], {'axis': '(1)'}), '([tip_pix_end - tip_pix, tip_pix], axis=1)\n', (111995, 112037), True, 'import tensorflow.compat.v1 as tf\n'), ((112073, 112118), 'tensorflow.compat.v1.concat', 'tf.concat', (['[glimpse_encoding, pix_u]'], {'axis': '(-1)'}), '([glimpse_encoding, pix_u], axis=-1)\n', (112082, 112118), True, 'import tensorflow.compat.v1 as tf\n'), ((112644, 112695), 'differentiable_filters.utils.push_utils._to_3d_d', 'utils._to_3d_d', (['r_pix', 'coord[:, :, :, -1:]', 'tip_pos'], {}), '(r_pix, coord[:, :, :, -1:], tip_pos)\n', (112658, 112695), True, 'from differentiable_filters.utils import push_utils as utils\n'), ((113073, 113128), 'differentiable_filters.utils.push_utils._to_3d_d', 'utils._to_3d_d', (['n_end_pix', 'coord[:, :, :, -1:]', 'tip_pos'], {}), '(n_end_pix, coord[:, :, :, -1:], tip_pos)\n', (113087, 113128), True, 'from differentiable_filters.utils import push_utils as utils\n'), ((113329, 113345), 'tensorflow.compat.v1.nn.sigmoid', 'tf.nn.sigmoid', (['s'], {}), '(s)\n', (113342, 113345), True, 'import tensorflow.compat.v1 as tf\n'), ((116105, 116124), 'numpy.ones', 'np.ones', (['self.dim_z'], {}), '(self.dim_z)\n', (116112, 116124), True, 'import numpy as np\n'), ((116478, 116513), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['init_const'], {}), '(init_const)\n', (116501, 116513), True, 'import tensorflow.compat.v1 as tf\n'), ((123028, 123099), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""het_diag_pos_c1_im"""', 'het_diag_pos_c1[0:1, :, :, 0:1]'], {}), "('het_diag_pos_c1_im', het_diag_pos_c1[0:1, :, :, 0:1])\n", (123044, 123099), True, 'import tensorflow.compat.v1 as tf\n'), ((123149, 123209), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_diag_pos_c1_out"""', 'het_diag_pos_c1'], {}), "('het_diag_pos_c1_out', het_diag_pos_c1)\n", (123169, 123209), True, 'import tensorflow.compat.v1 as tf\n'), ((123226, 123286), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_diag_pos_c2_out"""', 'het_diag_pos_c2'], {}), "('het_diag_pos_c2_out', het_diag_pos_c2)\n", (123246, 123286), True, 'import tensorflow.compat.v1 as tf\n'), ((123303, 123365), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_diag_pos_fc1_out"""', 'het_diag_pos_fc1'], {}), "('het_diag_pos_fc1_out', het_diag_pos_fc1)\n", (123323, 123365), True, 'import tensorflow.compat.v1 as tf\n'), ((123382, 123440), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_diag_pos_fc2_out"""', 'het_diag_pos'], {}), "('het_diag_pos_fc2_out', het_diag_pos)\n", (123402, 123440), True, 'import tensorflow.compat.v1 as tf\n'), ((123457, 123514), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_diag_rot_fc_out"""', 'het_diag_rot'], {}), "('het_diag_rot_fc_out', het_diag_rot)\n", (123477, 123514), True, 'import tensorflow.compat.v1 as tf\n'), ((123531, 123589), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_diag_rns_fc1_out"""', 'het_diag_fc1'], {}), "('het_diag_rns_fc1_out', het_diag_fc1)\n", (123551, 123589), True, 'import tensorflow.compat.v1 as tf\n'), ((123606, 123664), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_diag_rns_fc2_out"""', 'het_diag_fc2'], {}), "('het_diag_rns_fc2_out', het_diag_fc2)\n", (123626, 123664), True, 'import tensorflow.compat.v1 as tf\n'), ((123681, 123739), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_diag_rns_fc3_out"""', 'het_diag_rns'], {}), "('het_diag_rns_fc3_out', het_diag_rns)\n", (123701, 123739), True, 'import tensorflow.compat.v1 as tf\n'), ((123756, 123798), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_diag_out"""', 'diag'], {}), "('het_diag_out', diag)\n", (123776, 123798), True, 'import tensorflow.compat.v1 as tf\n'), ((124081, 124108), 'tensorflow.compat.v1.linalg.tensor_diag', 'tf.linalg.tensor_diag', (['diag'], {}), '(diag)\n', (124102, 124108), True, 'import tensorflow.compat.v1 as tf\n'), ((124125, 124172), 'tensorflow.compat.v1.tile', 'tf.tile', (['R[None, :, :]', '[self.batch_size, 1, 1]'], {}), '(R[None, :, :], [self.batch_size, 1, 1])\n', (124132, 124172), True, 'import tensorflow.compat.v1 as tf\n'), ((131917, 131965), 'tensorflow.compat.v1.concat', 'tf.concat', (['[last_state, actions[:, :2]]'], {'axis': '(-1)'}), '([last_state, actions[:, :2]], axis=-1)\n', (131926, 131965), True, 'import tensorflow.compat.v1 as tf\n'), ((132608, 132630), 'tensorflow.compat.v1.equal', 'tf.equal', (['ob', '"""ellip1"""'], {}), "(ob, 'ellip1')\n", (132616, 132630), True, 'import tensorflow.compat.v1 as tf\n'), ((132664, 132687), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['rot_pred'], {}), '(rot_pred)\n', (132677, 132687), True, 'import tensorflow.compat.v1 as tf\n'), ((132902, 132938), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""fc1_out"""', 'fc1'], {}), "('fc1_out', fc1)\n", (132922, 132938), True, 'import tensorflow.compat.v1 as tf\n'), ((132955, 132991), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""fc2_out"""', 'fc2'], {}), "('fc2_out', fc2)\n", (132975, 132991), True, 'import tensorflow.compat.v1 as tf\n'), ((133008, 133044), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""fc3_out"""', 'fc3'], {}), "('fc3_out', fc3)\n", (133028, 133044), True, 'import tensorflow.compat.v1 as tf\n'), ((133061, 133103), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""update_out"""', 'update'], {}), "('update_out', update)\n", (133081, 133103), True, 'import tensorflow.compat.v1 as tf\n'), ((135830, 135896), 'differentiable_filters.utils.push_utils.physical_model_derivative', 'utils.physical_model_derivative', (['pos', 'cp', 'n', 'actions', 'fr', 'fr_mu', 's'], {}), '(pos, cp, n, actions, fr, fr_mu, s)\n', (135861, 135896), True, 'from differentiable_filters.utils import push_utils as utils\n'), ((136143, 136157), 'tensorflow.compat.v1.squeeze', 'tf.squeeze', (['ob'], {}), '(ob)\n', (136153, 136157), True, 'import tensorflow.compat.v1 as tf\n'), ((136179, 136219), 'tensorflow.compat.v1.strings.regex_replace', 'tf.strings.regex_replace', (['ob', "'\\x00'", '""""""'], {}), "(ob, '\\x00', '')\n", (136203, 136219), True, 'import tensorflow.compat.v1 as tf\n'), ((136241, 136281), 'tensorflow.compat.v1.strings.regex_replace', 'tf.strings.regex_replace', (['ob', "'\\x00'", '""""""'], {}), "(ob, '\\x00', '')\n", (136265, 136281), True, 'import tensorflow.compat.v1 as tf\n'), ((136758, 136791), 'tensorflow.compat.v1.cast', 'tf.cast', (['keep_contact', 'tf.float32'], {}), '(keep_contact, tf.float32)\n', (136765, 136791), True, 'import tensorflow.compat.v1 as tf\n'), ((140015, 140070), 'differentiable_filters.utils.push_utils.physical_model', 'utils.physical_model', (['pos', 'cp', 'n', 'actions', 'fr', 'fr_mu', 's'], {}), '(pos, cp, n, actions, fr, fr_mu, s)\n', (140035, 140070), True, 'from differentiable_filters.utils import push_utils as utils\n'), ((140305, 140338), 'tensorflow.compat.v1.cast', 'tf.cast', (['keep_contact', 'tf.float32'], {}), '(keep_contact, tf.float32)\n', (140312, 140338), True, 'import tensorflow.compat.v1 as tf\n'), ((141345, 141364), 'numpy.ones', 'np.ones', (['self.dim_x'], {}), '(self.dim_x)\n', (141352, 141364), True, 'import numpy as np\n'), ((141411, 141433), 'numpy.square', 'np.square', (['self.q_diag'], {}), '(self.q_diag)\n', (141420, 141433), True, 'import numpy as np\n'), ((141702, 141737), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['init_const'], {}), '(init_const)\n', (141725, 141737), True, 'import tensorflow.compat.v1 as tf\n'), ((149100, 149145), 'tensorflow.compat.v1.square', 'tf.square', (['(diag + self.het_diag_lrn_init_bias)'], {}), '(diag + self.het_diag_lrn_init_bias)\n', (149109, 149145), True, 'import tensorflow.compat.v1 as tf\n'), ((149206, 149226), 'tensorflow.compat.v1.linalg.diag', 'tf.linalg.diag', (['diag'], {}), '(diag)\n', (149220, 149226), True, 'import tensorflow.compat.v1 as tf\n'), ((151007, 151052), 'tensorflow.compat.v1.square', 'tf.square', (['(diag + self.het_diag_ana_init_bias)'], {}), '(diag + self.het_diag_ana_init_bias)\n', (151016, 151052), True, 'import tensorflow.compat.v1 as tf\n'), ((151113, 151133), 'tensorflow.compat.v1.linalg.diag', 'tf.linalg.diag', (['diag'], {}), '(diag)\n', (151127, 151133), True, 'import tensorflow.compat.v1 as tf\n'), ((2600, 2613), 'numpy.array', 'np.array', (['cov'], {}), '(cov)\n', (2608, 2613), True, 'import numpy as np\n'), ((2673, 2692), 'numpy.ones', 'np.ones', (['self.dim_x'], {}), '(self.dim_x)\n', (2680, 2692), True, 'import numpy as np\n'), ((2959, 2972), 'numpy.array', 'np.array', (['cov'], {}), '(cov)\n', (2967, 2972), True, 'import numpy as np\n'), ((3032, 3051), 'numpy.ones', 'np.ones', (['self.dim_z'], {}), '(self.dim_z)\n', (3039, 3051), True, 'import numpy as np\n'), ((18657, 18681), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['likelihood'], {}), '(likelihood)\n', (18669, 18681), True, 'import tensorflow.compat.v1 as tf\n'), ((18682, 18705), 'tensorflow.compat.v1.math.log', 'tf.math.log', (['self.scale'], {}), '(self.scale)\n', (18693, 18705), True, 'import tensorflow.compat.v1 as tf\n'), ((20436, 20460), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['vis', '[-1, 1]'], {}), '(vis, [-1, 1])\n', (20446, 20460), True, 'import tensorflow.compat.v1 as tf\n'), ((28598, 28622), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['vis', '[-1, 1]'], {}), '(vis, [-1, 1])\n', (28608, 28622), True, 'import tensorflow.compat.v1 as tf\n'), ((28825, 28849), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['vis', '[-1, 1]'], {}), '(vis, [-1, 1])\n', (28835, 28849), True, 'import tensorflow.compat.v1 as tf\n'), ((29589, 29629), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['(dist * scale_dist * cont)'], {}), '(dist * scale_dist * cont)\n', (29603, 29629), True, 'import tensorflow.compat.v1 as tf\n'), ((29652, 29689), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['(mse * scale_mse * cont)'], {}), '(mse * scale_mse * cont)\n', (29665, 29689), True, 'import tensorflow.compat.v1 as tf\n'), ((29734, 29767), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['(dist * scale_dist)'], {}), '(dist * scale_dist)\n', (29748, 29767), True, 'import tensorflow.compat.v1 as tf\n'), ((29792, 29822), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['(mse * scale_mse)'], {}), '(mse * scale_mse)\n', (29805, 29822), True, 'import tensorflow.compat.v1 as tf\n'), ((31501, 31529), 'tensorflow.compat.v1.maximum', 'tf.maximum', (['like_good', '(1e-06)'], {}), '(like_good, 1e-06)\n', (31511, 31529), True, 'import tensorflow.compat.v1 as tf\n'), ((31592, 31625), 'tensorflow.compat.v1.maximum', 'tf.maximum', (['(1.0 - like_bad)', '(1e-06)'], {}), '(1.0 - like_bad, 1e-06)\n', (31602, 31625), True, 'import tensorflow.compat.v1 as tf\n'), ((33509, 33535), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood'], {}), '(likelihood)\n', (33523, 33535), True, 'import tensorflow.compat.v1 as tf\n'), ((36153, 36179), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['normal_ang'], {}), '(normal_ang)\n', (36167, 36179), True, 'import tensorflow.compat.v1 as tf\n'), ((36204, 36222), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['ce'], {}), '(ce)\n', (36218, 36222), True, 'import tensorflow.compat.v1 as tf\n'), ((36224, 36248), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['vis', '[-1, 1]'], {}), '(vis, [-1, 1])\n', (36234, 36248), True, 'import tensorflow.compat.v1 as tf\n'), ((46867, 46878), 'tensorflow.compat.v1.abs', 'tf.abs', (['ang'], {}), '(ang)\n', (46873, 46878), True, 'import tensorflow.compat.v1 as tf\n'), ((52975, 52997), 'tensorflow.compat.v1.equal', 'tf.equal', (['ob', '"""ellip1"""'], {}), "(ob, 'ellip1')\n", (52983, 52997), True, 'import tensorflow.compat.v1 as tf\n'), ((53052, 53073), 'tensorflow.compat.v1.equal', 'tf.equal', (['ob', '"""rect1"""'], {}), "(ob, 'rect1')\n", (53060, 53073), True, 'import tensorflow.compat.v1 as tf\n'), ((53131, 53151), 'tensorflow.compat.v1.equal', 'tf.equal', (['ob', '"""tri1"""'], {}), "(ob, 'tri1')\n", (53139, 53151), True, 'import tensorflow.compat.v1 as tf\n'), ((53210, 53230), 'tensorflow.compat.v1.equal', 'tf.equal', (['ob', '"""tri2"""'], {}), "(ob, 'tri2')\n", (53218, 53230), True, 'import tensorflow.compat.v1 as tf\n'), ((53289, 53309), 'tensorflow.compat.v1.equal', 'tf.equal', (['ob', '"""tri3"""'], {}), "(ob, 'tri3')\n", (53297, 53309), True, 'import tensorflow.compat.v1 as tf\n'), ((53368, 53387), 'tensorflow.compat.v1.equal', 'tf.equal', (['ob', '"""hex"""'], {}), "(ob, 'hex')\n", (53376, 53387), True, 'import tensorflow.compat.v1 as tf\n'), ((53567, 53585), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['rot'], {}), '(rot)\n', (53580, 53585), True, 'import tensorflow.compat.v1 as tf\n'), ((54285, 54307), 'tensorflow.compat.v1.tile', 'tf.tile', (['ob', '[1, mult]'], {}), '(ob, [1, mult])\n', (54292, 54307), True, 'import tensorflow.compat.v1 as tf\n'), ((54484, 54501), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['rot'], {}), '(rot)\n', (54496, 54501), True, 'import tensorflow.compat.v1 as tf\n'), ((54599, 54616), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['rot'], {}), '(rot)\n', (54611, 54616), True, 'import tensorflow.compat.v1 as tf\n'), ((54715, 54732), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['rot'], {}), '(rot)\n', (54727, 54732), True, 'import tensorflow.compat.v1 as tf\n'), ((54831, 54848), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['rot'], {}), '(rot)\n', (54843, 54848), True, 'import tensorflow.compat.v1 as tf\n'), ((54946, 54963), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['rot'], {}), '(rot)\n', (54958, 54963), True, 'import tensorflow.compat.v1 as tf\n'), ((55149, 55161), 'tensorflow.compat.v1.sign', 'tf.sign', (['rot'], {}), '(rot)\n', (55156, 55161), True, 'import tensorflow.compat.v1 as tf\n'), ((62153, 62185), 'tensorflow.compat.v1.tile', 'tf.tile', (['label[None, :]', '[24, 1]'], {}), '(label[None, :], [24, 1])\n', (62160, 62185), True, 'import tensorflow.compat.v1 as tf\n'), ((62223, 62255), 'tensorflow.compat.v1.tile', 'tf.tile', (['label[None, :]', '[24, 1]'], {}), '(label[None, :], [24, 1])\n', (62230, 62255), True, 'import tensorflow.compat.v1 as tf\n'), ((63659, 63683), 'tensorflow.compat.v1.less', 'tf.less', (['con_norm', '(1e-06)'], {}), '(con_norm, 1e-06)\n', (63666, 63683), True, 'import tensorflow.compat.v1 as tf\n'), ((64877, 64890), 'tensorflow.compat.v1.zeros', 'tf.zeros', (['[1]'], {}), '([1])\n', (64885, 64890), True, 'import tensorflow.compat.v1 as tf\n'), ((66359, 66383), 'tensorflow.compat.v1.less', 'tf.less', (['con_norm', '(1e-06)'], {}), '(con_norm, 1e-06)\n', (66366, 66383), True, 'import tensorflow.compat.v1 as tf\n'), ((67950, 67989), 'numpy.arange', 'np.arange', (['(1)', '(seq_len - self.sl - 2)', '(20)'], {}), '(1, seq_len - self.sl - 2, 20)\n', (67959, 67989), True, 'import numpy as np\n'), ((68451, 68464), 'tensorflow.compat.v1.zeros', 'tf.zeros', (['[1]'], {}), '([1])\n', (68459, 68464), True, 'import tensorflow.compat.v1 as tf\n'), ((69215, 69255), 'tensorflow.compat.v1.tile', 'tf.tile', (['friction[None, :]', '[self.sl, 1]'], {}), '(friction[None, :], [self.sl, 1])\n', (69222, 69255), True, 'import tensorflow.compat.v1 as tf\n'), ((69288, 69322), 'tensorflow.compat.v1.tile', 'tf.tile', (['mu[None, :]', '[self.sl, 1]'], {}), '(mu[None, :], [self.sl, 1])\n', (69295, 69322), True, 'import tensorflow.compat.v1 as tf\n'), ((69596, 69623), 'tensorflow.compat.v1.norm', 'tf.norm', (['mv[:, :2]'], {'axis': '(-1)'}), '(mv[:, :2], axis=-1)\n', (69603, 69623), True, 'import tensorflow.compat.v1 as tf\n'), ((69732, 69743), 'tensorflow.compat.v1.abs', 'tf.abs', (['mvr'], {}), '(mvr)\n', (69738, 69743), True, 'import tensorflow.compat.v1 as tf\n'), ((71983, 71993), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (71990, 71993), True, 'import numpy as np\n'), ((72032, 72041), 'numpy.std', 'np.std', (['v'], {}), '(v)\n', (72038, 72041), True, 'import numpy as np\n'), ((72513, 72547), 'numpy.corrcoef', 'np.corrcoef', (['r_pred[i:i + 1]', 'cont'], {}), '(r_pred[i:i + 1], cont)\n', (72524, 72547), True, 'import numpy as np\n'), ((72574, 72607), 'numpy.corrcoef', 'np.corrcoef', (['r_pred[i:i + 1]', 'vis'], {}), '(r_pred[i:i + 1], vis)\n', (72585, 72607), True, 'import numpy as np\n'), ((73041, 73075), 'numpy.corrcoef', 'np.corrcoef', (['q_pred[i:i + 1]', 'cont'], {}), '(q_pred[i:i + 1], cont)\n', (73052, 73075), True, 'import numpy as np\n'), ((73597, 73607), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (73604, 73607), True, 'import numpy as np\n'), ((73646, 73655), 'numpy.std', 'np.std', (['v'], {}), '(v)\n', (73652, 73655), True, 'import numpy as np\n'), ((90183, 90212), 'numpy.arctan2', 'np.arctan2', (['*vecs[:, 0][::-1]'], {}), '(*vecs[:, 0][::-1])\n', (90193, 90212), True, 'import numpy as np\n'), ((90250, 90263), 'numpy.sqrt', 'np.sqrt', (['vals'], {}), '(vals)\n', (90257, 90263), True, 'import numpy as np\n'), ((93669, 93685), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (93677, 93685), True, 'import numpy as np\n'), ((93773, 93787), 'numpy.cos', 'np.cos', (['r_pred'], {}), '(r_pred)\n', (93779, 93787), True, 'import numpy as np\n'), ((93868, 93882), 'numpy.sin', 'np.sin', (['r_pred'], {}), '(r_pred)\n', (93874, 93882), True, 'import numpy as np\n'), ((93915, 93929), 'numpy.cos', 'np.cos', (['r_pred'], {}), '(r_pred)\n', (93921, 93929), True, 'import numpy as np\n'), ((93956, 93972), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (93964, 93972), True, 'import numpy as np\n'), ((94053, 94065), 'numpy.cos', 'np.cos', (['r_la'], {}), '(r_la)\n', (94059, 94065), True, 'import numpy as np\n'), ((94144, 94156), 'numpy.sin', 'np.sin', (['r_la'], {}), '(r_la)\n', (94150, 94156), True, 'import numpy as np\n'), ((94189, 94201), 'numpy.cos', 'np.cos', (['r_la'], {}), '(r_la)\n', (94195, 94201), True, 'import numpy as np\n'), ((102005, 102049), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""rgb"""', 'images[:, :, :, :3]'], {}), "('rgb', images[:, :, :, :3])\n", (102021, 102049), True, 'import tensorflow.compat.v1 as tf\n'), ((102070, 102117), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""depth"""', 'coords[:, :, :, -1:]'], {}), "('depth', coords[:, :, :, -1:])\n", (102086, 102117), True, 'import tensorflow.compat.v1 as tf\n'), ((102138, 102189), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""conv1_im"""', 'conv1[0:1, :, :, 0:1]'], {}), "('conv1_im', conv1[0:1, :, :, 0:1])\n", (102154, 102189), True, 'import tensorflow.compat.v1 as tf\n'), ((102210, 102250), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""conv1_out"""', 'conv1'], {}), "('conv1_out', conv1)\n", (102230, 102250), True, 'import tensorflow.compat.v1 as tf\n'), ((102271, 102322), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""conv2_im"""', 'conv2[0:1, :, :, 0:1]'], {}), "('conv2_im', conv2[0:1, :, :, 0:1])\n", (102287, 102322), True, 'import tensorflow.compat.v1 as tf\n'), ((102343, 102383), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""conv2_out"""', 'conv2'], {}), "('conv2_out', conv2)\n", (102363, 102383), True, 'import tensorflow.compat.v1 as tf\n'), ((102404, 102455), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""conv3_im"""', 'conv3[0:1, :, :, 0:1]'], {}), "('conv3_im', conv3[0:1, :, :, 0:1])\n", (102420, 102455), True, 'import tensorflow.compat.v1 as tf\n'), ((102476, 102516), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""conv3_out"""', 'conv3'], {}), "('conv3_out', conv3)\n", (102496, 102516), True, 'import tensorflow.compat.v1 as tf\n'), ((102537, 102592), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""deconv1_im"""', 'deconv1[0:1, :, :, 0:1]'], {}), "('deconv1_im', deconv1[0:1, :, :, 0:1])\n", (102553, 102592), True, 'import tensorflow.compat.v1 as tf\n'), ((102613, 102657), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""deconv1_out"""', 'deconv1'], {}), "('deconv1_out', deconv1)\n", (102633, 102657), True, 'import tensorflow.compat.v1 as tf\n'), ((102678, 102733), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""deconv2_im"""', 'deconv2[0:1, :, :, 0:1]'], {}), "('deconv2_im', deconv2[0:1, :, :, 0:1])\n", (102694, 102733), True, 'import tensorflow.compat.v1 as tf\n'), ((102754, 102798), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""deconv2_out"""', 'deconv2'], {}), "('deconv2_out', deconv2)\n", (102774, 102798), True, 'import tensorflow.compat.v1 as tf\n'), ((102819, 102858), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""mask"""', 'mask_out[0:1]'], {}), "('mask', mask_out[0:1])\n", (102835, 102858), True, 'import tensorflow.compat.v1 as tf\n'), ((109031, 109075), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['rot_conv2', '[self.batch_size, -1]'], {}), '(rot_conv2, [self.batch_size, -1])\n', (109041, 109075), True, 'import tensorflow.compat.v1 as tf\n'), ((109266, 109325), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""glimpse_rot"""', 'glimpse_rot[0:1, :, :, :3]'], {}), "('glimpse_rot', glimpse_rot[0:1, :, :, :3])\n", (109282, 109325), True, 'import tensorflow.compat.v1 as tf\n'), ((109383, 109446), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""glimpse_start"""', 'start_glimpse[0:1, :, :, :3]'], {}), "('glimpse_start', start_glimpse[0:1, :, :, :3])\n", (109399, 109446), True, 'import tensorflow.compat.v1 as tf\n'), ((109504, 109559), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""conv1_im"""', 'rot_conv1[0:1, :, :, 0:1]'], {}), "('conv1_im', rot_conv1[0:1, :, :, 0:1])\n", (109520, 109559), True, 'import tensorflow.compat.v1 as tf\n'), ((109580, 109624), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""conv1_out"""', 'rot_conv1'], {}), "('conv1_out', rot_conv1)\n", (109600, 109624), True, 'import tensorflow.compat.v1 as tf\n'), ((109645, 109700), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""conv2_im"""', 'rot_conv2[0:1, :, :, 0:1]'], {}), "('conv2_im', rot_conv2[0:1, :, :, 0:1])\n", (109661, 109700), True, 'import tensorflow.compat.v1 as tf\n'), ((109721, 109765), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""conv2_out"""', 'rot_conv2'], {}), "('conv2_out', rot_conv2)\n", (109741, 109765), True, 'import tensorflow.compat.v1 as tf\n'), ((109786, 109826), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""fc1_out"""', 'rot_fc1'], {}), "('fc1_out', rot_fc1)\n", (109806, 109826), True, 'import tensorflow.compat.v1 as tf\n'), ((109847, 109887), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""fc2_out"""', 'rot_fc2'], {}), "('fc2_out', rot_fc2)\n", (109867, 109887), True, 'import tensorflow.compat.v1 as tf\n'), ((109908, 109944), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""rot_out"""', 'rot'], {}), "('rot_out', rot)\n", (109928, 109944), True, 'import tensorflow.compat.v1 as tf\n'), ((110051, 110085), 'tensorflow.compat.v1.slice', 'tf.slice', (['tip_pix', '[0, 0]', '[-1, 1]'], {}), '(tip_pix, [0, 0], [-1, 1])\n', (110059, 110085), True, 'import tensorflow.compat.v1 as tf\n'), ((110118, 110152), 'tensorflow.compat.v1.slice', 'tf.slice', (['tip_pix', '[0, 1]', '[-1, 1]'], {}), '(tip_pix, [0, 1], [-1, 1])\n', (110126, 110152), True, 'import tensorflow.compat.v1 as tf\n'), ((112949, 112972), 'tensorflow.compat.v1.stop_gradient', 'tf.stop_gradient', (['r_pix'], {}), '(r_pix)\n', (112965, 112972), True, 'import tensorflow.compat.v1 as tf\n'), ((113196, 113215), 'tensorflow.compat.v1.stop_gradient', 'tf.stop_gradient', (['r'], {}), '(r)\n', (113212, 113215), True, 'import tensorflow.compat.v1 as tf\n'), ((113583, 113637), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""glimpse_z"""', 'glimpse[0:1, :, :, -1:]'], {}), "('glimpse_z', glimpse[0:1, :, :, -1:])\n", (113599, 113637), True, 'import tensorflow.compat.v1 as tf\n'), ((113658, 113706), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""glimpse_rgb"""', 'im_glimpse[0:1]'], {}), "('glimpse_rgb', im_glimpse[0:1])\n", (113674, 113706), True, 'import tensorflow.compat.v1 as tf\n'), ((113727, 113780), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""conv1_im"""', 'g_conv1[0:1, :, :, 0:1]'], {}), "('conv1_im', g_conv1[0:1, :, :, 0:1])\n", (113743, 113780), True, 'import tensorflow.compat.v1 as tf\n'), ((113801, 113843), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""conv1_out"""', 'g_conv1'], {}), "('conv1_out', g_conv1)\n", (113821, 113843), True, 'import tensorflow.compat.v1 as tf\n'), ((113864, 113917), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""conv2_im"""', 'g_conv2[0:1, :, :, 0:1]'], {}), "('conv2_im', g_conv2[0:1, :, :, 0:1])\n", (113880, 113917), True, 'import tensorflow.compat.v1 as tf\n'), ((113938, 113980), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""conv2_out"""', 'g_conv2'], {}), "('conv2_out', g_conv2)\n", (113958, 113980), True, 'import tensorflow.compat.v1 as tf\n'), ((114001, 114054), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""conv3_im"""', 'g_conv3[0:1, :, :, 0:1]'], {}), "('conv3_im', g_conv3[0:1, :, :, 0:1])\n", (114017, 114054), True, 'import tensorflow.compat.v1 as tf\n'), ((114075, 114115), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""g_fc1_out"""', 'g_fc1'], {}), "('g_fc1_out', g_fc1)\n", (114095, 114115), True, 'import tensorflow.compat.v1 as tf\n'), ((114136, 114178), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""g_rfc2_out"""', 'g_rfc2'], {}), "('g_rfc2_out', g_rfc2)\n", (114156, 114178), True, 'import tensorflow.compat.v1 as tf\n'), ((114199, 114239), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""r_pix_out"""', 'r_pix'], {}), "('r_pix_out', r_pix)\n", (114219, 114239), True, 'import tensorflow.compat.v1 as tf\n'), ((114260, 114302), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""g_nfc2_out"""', 'g_nfc2'], {}), "('g_nfc2_out', g_nfc2)\n", (114280, 114302), True, 'import tensorflow.compat.v1 as tf\n'), ((114323, 114363), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""n_pix_out"""', 'n_pix'], {}), "('n_pix_out', n_pix)\n", (114343, 114363), True, 'import tensorflow.compat.v1 as tf\n'), ((114384, 114432), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""n_end_pix_out"""', 'n_end_pix'], {}), "('n_end_pix_out', n_end_pix)\n", (114404, 114432), True, 'import tensorflow.compat.v1 as tf\n'), ((114657, 114675), 'tensorflow.compat.v1.squeeze', 'tf.squeeze', (['n_norm'], {}), '(n_norm)\n', (114667, 114675), True, 'import tensorflow.compat.v1 as tf\n'), ((114780, 114798), 'tensorflow.compat.v1.tile', 'tf.tile', (['s', '[1, 2]'], {}), '(s, [1, 2])\n', (114787, 114798), True, 'import tensorflow.compat.v1 as tf\n'), ((116183, 116205), 'numpy.square', 'np.square', (['self.r_diag'], {}), '(self.r_diag)\n', (116192, 116205), True, 'import numpy as np\n'), ((118726, 118756), 'tensorflow.compat.v1.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': 'wd'}), '(l=wd)\n', (118750, 118756), True, 'import tensorflow.compat.v1 as tf\n'), ((118802, 118831), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['init'], {}), '(init)\n', (118825, 118831), True, 'import tensorflow.compat.v1 as tf\n'), ((124031, 124046), 'tensorflow.compat.v1.square', 'tf.square', (['diag'], {}), '(diag)\n', (124040, 124046), True, 'import tensorflow.compat.v1 as tf\n'), ((124372, 124422), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['het_full_pos_c2', '[self.batch_size, -1]'], {}), '(het_full_pos_c2, [self.batch_size, -1])\n', (124382, 124422), True, 'import tensorflow.compat.v1 as tf\n'), ((124798, 124859), 'tensorflow.compat.v1.concat', 'tf.concat', (['[het_full_pos, het_full_rot, het_full_g2]'], {'axis': '(-1)'}), '([het_full_pos, het_full_rot, het_full_g2], axis=-1)\n', (124807, 124859), True, 'import tensorflow.compat.v1 as tf\n'), ((125743, 125770), 'differentiable_filters.utils.tensorflow_compatability.fill_triangular', 'compat.fill_triangular', (['tri'], {}), '(tri)\n', (125765, 125770), True, 'from differentiable_filters.utils import tensorflow_compatability as compat\n'), ((125788, 125827), 'tensorflow.compat.v1.linalg.diag', 'tf.linalg.diag', (['self.het_full_init_bias'], {}), '(self.het_full_init_bias)\n', (125802, 125827), True, 'import tensorflow.compat.v1 as tf\n'), ((126004, 126031), 'differentiable_filters.utils.tensorflow_compatability.fill_triangular', 'compat.fill_triangular', (['tri'], {}), '(tri)\n', (126026, 126031), True, 'from differentiable_filters.utils import tensorflow_compatability as compat\n'), ((126049, 126090), 'tensorflow.compat.v1.linalg.diag', 'tf.linalg.diag', (['self.const_full_init_bias'], {}), '(self.const_full_init_bias)\n', (126063, 126090), True, 'import tensorflow.compat.v1 as tf\n'), ((126219, 126266), 'tensorflow.compat.v1.tile', 'tf.tile', (['R[None, :, :]', '[self.batch_size, 1, 1]'], {}), '(R[None, :, :], [self.batch_size, 1, 1])\n', (126226, 126266), True, 'import tensorflow.compat.v1 as tf\n'), ((129744, 129806), 'numpy.array', 'np.array', (['[[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0]]]'], {'dtype': 'np.float32'}), '([[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0]]], dtype=np.float32)\n', (129752, 129806), True, 'import numpy as np\n'), ((129885, 129947), 'numpy.array', 'np.array', (['[[[0, 1, 0, 0, 0, 0, 0, 0, 0, 0]]]'], {'dtype': 'np.float32'}), '([[[0, 1, 0, 0, 0, 0, 0, 0, 0, 0]]], dtype=np.float32)\n', (129893, 129947), True, 'import numpy as np\n'), ((130026, 130088), 'numpy.array', 'np.array', (['[[[0, 0, 1, 0, 0, 0, 0, 0, 0, 0]]]'], {'dtype': 'np.float32'}), '([[[0, 0, 1, 0, 0, 0, 0, 0, 0, 0]]], dtype=np.float32)\n', (130034, 130088), True, 'import numpy as np\n'), ((130167, 130229), 'numpy.array', 'np.array', (['[[[0, 0, 0, 0, 0, 1, 0, 0, 0, 0]]]'], {'dtype': 'np.float32'}), '([[[0, 0, 0, 0, 0, 1, 0, 0, 0, 0]]], dtype=np.float32)\n', (130175, 130229), True, 'import numpy as np\n'), ((130308, 130370), 'numpy.array', 'np.array', (['[[[0, 0, 0, 0, 0, 0, 1, 0, 0, 0]]]'], {'dtype': 'np.float32'}), '([[[0, 0, 0, 0, 0, 0, 1, 0, 0, 0]]], dtype=np.float32)\n', (130316, 130370), True, 'import numpy as np\n'), ((130449, 130511), 'numpy.array', 'np.array', (['[[[0, 0, 0, 0, 0, 0, 0, 1, 0, 0]]]'], {'dtype': 'np.float32'}), '([[[0, 0, 0, 0, 0, 0, 0, 1, 0, 0]]], dtype=np.float32)\n', (130457, 130511), True, 'import numpy as np\n'), ((130590, 130652), 'numpy.array', 'np.array', (['[[[0, 0, 0, 0, 0, 0, 0, 0, 1, 0]]]'], {'dtype': 'np.float32'}), '([[[0, 0, 0, 0, 0, 0, 0, 0, 1, 0]]], dtype=np.float32)\n', (130598, 130652), True, 'import numpy as np\n'), ((130731, 130793), 'numpy.array', 'np.array', (['[[[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]]]'], {'dtype': 'np.float32'}), '([[[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]]], dtype=np.float32)\n', (130739, 130793), True, 'import numpy as np\n'), ((136318, 136340), 'tensorflow.compat.v1.equal', 'tf.equal', (['ob', '"""ellip1"""'], {}), "(ob, 'ellip1')\n", (136326, 136340), True, 'import tensorflow.compat.v1 as tf\n'), ((136378, 136401), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['rot_pred'], {}), '(rot_pred)\n', (136391, 136401), True, 'import tensorflow.compat.v1 as tf\n'), ((136444, 136466), 'tensorflow.compat.v1.equal', 'tf.equal', (['ob', '"""ellip1"""'], {}), "(ob, 'ellip1')\n", (136452, 136466), True, 'import tensorflow.compat.v1 as tf\n'), ((136499, 136517), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['dom'], {}), '(dom)\n', (136512, 136517), True, 'import tensorflow.compat.v1 as tf\n'), ((136998, 137088), 'tensorflow.compat.v1.concat', 'tf.concat', (['[pos_pred, ori_pred, fr_pred, fr_mu_pred, cp_pred, n_pred, s_pred]'], {'axis': '(1)'}), '([pos_pred, ori_pred, fr_pred, fr_mu_pred, cp_pred, n_pred, s_pred\n ], axis=1)\n', (137007, 137088), True, 'import tensorflow.compat.v1 as tf\n'), ((140545, 140635), 'tensorflow.compat.v1.concat', 'tf.concat', (['[pos_pred, ori_pred, fr_pred, fr_mu_pred, cp_pred, n_pred, s_pred]'], {'axis': '(1)'}), '([pos_pred, ori_pred, fr_pred, fr_mu_pred, cp_pred, n_pred, s_pred\n ], axis=1)\n', (140554, 140635), True, 'import tensorflow.compat.v1 as tf\n'), ((142708, 142738), 'tensorflow.compat.v1.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': 'wd'}), '(l=wd)\n', (142732, 142738), True, 'import tensorflow.compat.v1 as tf\n'), ((142784, 142813), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['init'], {}), '(init)\n', (142807, 142813), True, 'import tensorflow.compat.v1 as tf\n'), ((148886, 148935), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_diag_lrn_fc1_out"""', 'fc1'], {}), "('het_diag_lrn_fc1_out', fc1)\n", (148906, 148935), True, 'import tensorflow.compat.v1 as tf\n'), ((148956, 149005), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_diag_lrn_fc2_out"""', 'fc2'], {}), "('het_diag_lrn_fc2_out', fc2)\n", (148976, 149005), True, 'import tensorflow.compat.v1 as tf\n'), ((149026, 149076), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_diag_lrn_fc3_out"""', 'diag'], {}), "('het_diag_lrn_fc3_out', diag)\n", (149046, 149076), True, 'import tensorflow.compat.v1 as tf\n'), ((149395, 149422), 'tensorflow.compat.v1.linalg.tensor_diag', 'tf.linalg.tensor_diag', (['diag'], {}), '(diag)\n', (149416, 149422), True, 'import tensorflow.compat.v1 as tf\n'), ((149443, 149490), 'tensorflow.compat.v1.tile', 'tf.tile', (['Q[None, :, :]', '[self.batch_size, 1, 1]'], {}), '(Q[None, :, :], [self.batch_size, 1, 1])\n', (149450, 149490), True, 'import tensorflow.compat.v1 as tf\n'), ((150793, 150842), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_diag_ana_fc1_out"""', 'fc1'], {}), "('het_diag_ana_fc1_out', fc1)\n", (150813, 150842), True, 'import tensorflow.compat.v1 as tf\n'), ((150863, 150912), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_diag_ana_fc2_out"""', 'fc2'], {}), "('het_diag_ana_fc2_out', fc2)\n", (150883, 150912), True, 'import tensorflow.compat.v1 as tf\n'), ((150933, 150983), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_diag_ana_fc3_out"""', 'diag'], {}), "('het_diag_ana_fc3_out', diag)\n", (150953, 150983), True, 'import tensorflow.compat.v1 as tf\n'), ((151302, 151329), 'tensorflow.compat.v1.linalg.tensor_diag', 'tf.linalg.tensor_diag', (['diag'], {}), '(diag)\n', (151323, 151329), True, 'import tensorflow.compat.v1 as tf\n'), ((151350, 151397), 'tensorflow.compat.v1.tile', 'tf.tile', (['Q[None, :, :]', '[self.batch_size, 1, 1]'], {}), '(Q[None, :, :], [self.batch_size, 1, 1])\n', (151357, 151397), True, 'import tensorflow.compat.v1 as tf\n'), ((40167, 40197), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood_ana'], {}), '(likelihood_ana)\n', (40181, 40197), True, 'import tensorflow.compat.v1 as tf\n'), ((40229, 40255), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood'], {}), '(likelihood)\n', (40243, 40255), True, 'import tensorflow.compat.v1 as tf\n'), ((40287, 40317), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood_ana'], {}), '(likelihood_ana)\n', (40301, 40317), True, 'import tensorflow.compat.v1 as tf\n'), ((40356, 40375), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['mse'], {}), '(mse)\n', (40370, 40375), True, 'import tensorflow.compat.v1 as tf\n'), ((40565, 40595), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood_ana'], {}), '(likelihood_ana)\n', (40579, 40595), True, 'import tensorflow.compat.v1 as tf\n'), ((40632, 40651), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['mse'], {}), '(mse)\n', (40646, 40651), True, 'import tensorflow.compat.v1 as tf\n'), ((46800, 46811), 'tensorflow.compat.v1.abs', 'tf.abs', (['ang'], {}), '(ang)\n', (46806, 46811), True, 'import tensorflow.compat.v1 as tf\n'), ((46854, 46865), 'tensorflow.compat.v1.abs', 'tf.abs', (['ang'], {}), '(ang)\n', (46860, 46865), True, 'import tensorflow.compat.v1 as tf\n'), ((47337, 47364), 'tensorflow.compat.v1.maximum', 'tf.maximum', (['(1 - pred)', '(1e-07)'], {}), '(1 - pred, 1e-07)\n', (47347, 47364), True, 'import tensorflow.compat.v1 as tf\n'), ((47402, 47425), 'tensorflow.compat.v1.maximum', 'tf.maximum', (['pred', '(1e-07)'], {}), '(pred, 1e-07)\n', (47412, 47425), True, 'import tensorflow.compat.v1 as tf\n'), ((47466, 47493), 'tensorflow.compat.v1.maximum', 'tf.maximum', (['(1 - pred)', '(1e-07)'], {}), '(1 - pred, 1e-07)\n', (47476, 47493), True, 'import tensorflow.compat.v1 as tf\n'), ((47531, 47554), 'tensorflow.compat.v1.maximum', 'tf.maximum', (['pred', '(1e-07)'], {}), '(pred, 1e-07)\n', (47541, 47554), True, 'import tensorflow.compat.v1 as tf\n'), ((53007, 53025), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['rot'], {}), '(rot)\n', (53020, 53025), True, 'import tensorflow.compat.v1 as tf\n'), ((53653, 53665), 'tensorflow.compat.v1.sign', 'tf.sign', (['rot'], {}), '(rot)\n', (53660, 53665), True, 'import tensorflow.compat.v1 as tf\n'), ((55127, 55138), 'tensorflow.compat.v1.abs', 'tf.abs', (['rot'], {}), '(rot)\n', (55133, 55138), True, 'import tensorflow.compat.v1 as tf\n'), ((63895, 63925), 'tensorflow.compat.v1.ones', 'tf.ones', (['[1]'], {'dtype': 'tf.float32'}), '([1], dtype=tf.float32)\n', (63902, 63925), True, 'import tensorflow.compat.v1 as tf\n'), ((66595, 66625), 'tensorflow.compat.v1.ones', 'tf.ones', (['[1]'], {'dtype': 'tf.float32'}), '([1], dtype=tf.float32)\n', (66602, 66625), True, 'import tensorflow.compat.v1 as tf\n'), ((87782, 87797), 'numpy.asscalar', 'np.asscalar', (['ob'], {}), '(ob)\n', (87793, 87797), True, 'import numpy as np\n'), ((93103, 93233), 'matplotlib.patches.Ellipse', 'Ellipse', (['(pos_pred[i, 0], pos_pred[i, 1])', '(2 * a * 1000)', '(2 * b * 1000)', 'seq_pred[i, 2]'], {'alpha': '(0.1)', 'facecolor': '"""r"""', 'edgecolor': '"""r"""'}), "((pos_pred[i, 0], pos_pred[i, 1]), 2 * a * 1000, 2 * b * 1000,\n seq_pred[i, 2], alpha=0.1, facecolor='r', edgecolor='r')\n", (93110, 93233), False, 'from matplotlib.patches import Ellipse\n'), ((93387, 93502), 'matplotlib.patches.Ellipse', 'Ellipse', (['(seq[i, 0], seq[i, 1])', '(2 * a * 1000)', '(2 * b * 1000)', 'seq[i, 2]'], {'alpha': '(0.1)', 'facecolor': '"""g"""', 'edgecolor': '"""g"""'}), "((seq[i, 0], seq[i, 1]), 2 * a * 1000, 2 * b * 1000, seq[i, 2],\n alpha=0.1, facecolor='g', edgecolor='g')\n", (93394, 93502), False, 'from matplotlib.patches import Ellipse\n'), ((93821, 93835), 'numpy.sin', 'np.sin', (['r_pred'], {}), '(r_pred)\n', (93827, 93835), True, 'import numpy as np\n'), ((94099, 94111), 'numpy.sin', 'np.sin', (['r_la'], {}), '(r_la)\n', (94105, 94111), True, 'import numpy as np\n'), ((94672, 94730), 'matplotlib.patches.Polygon', 'Polygon', (['points_p'], {'alpha': '(0.1)', 'facecolor': '"""r"""', 'edgecolor': '"""r"""'}), "(points_p, alpha=0.1, facecolor='r', edgecolor='r')\n", (94679, 94730), False, 'from matplotlib.patches import Polygon\n'), ((94810, 94868), 'matplotlib.patches.Polygon', 'Polygon', (['points_l'], {'alpha': '(0.1)', 'facecolor': '"""g"""', 'edgecolor': '"""g"""'}), "(points_l, alpha=0.1, facecolor='g', edgecolor='g')\n", (94817, 94868), False, 'from matplotlib.patches import Polygon\n'), ((101801, 101856), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""n1_mean"""', 'self.im_n1.moving_mean'], {}), "('n1_mean', self.im_n1.moving_mean)\n", (101821, 101856), True, 'import tensorflow.compat.v1 as tf\n'), ((101881, 101939), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""n1_var"""', 'self.im_n1.moving_variance'], {}), "('n1_var', self.im_n1.moving_variance)\n", (101901, 101939), True, 'import tensorflow.compat.v1 as tf\n'), ((119179, 119209), 'tensorflow.compat.v1.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': 'wd'}), '(l=wd)\n', (119203, 119209), True, 'import tensorflow.compat.v1 as tf\n'), ((119255, 119284), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['init'], {}), '(init)\n', (119278, 119284), True, 'import tensorflow.compat.v1 as tf\n'), ((125047, 125118), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""het_full_pos_c1_im"""', 'het_full_pos_c1[0:1, :, :, 0:1]'], {}), "('het_full_pos_c1_im', het_full_pos_c1[0:1, :, :, 0:1])\n", (125063, 125118), True, 'import tensorflow.compat.v1 as tf\n'), ((125168, 125228), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_full_pos_c1_out"""', 'het_full_pos_c1'], {}), "('het_full_pos_c1_out', het_full_pos_c1)\n", (125188, 125228), True, 'import tensorflow.compat.v1 as tf\n'), ((125245, 125305), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_full_pos_c2_out"""', 'het_full_pos_c2'], {}), "('het_full_pos_c2_out', het_full_pos_c2)\n", (125265, 125305), True, 'import tensorflow.compat.v1 as tf\n'), ((125322, 125379), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_full_pos_fc_out"""', 'het_full_pos'], {}), "('het_full_pos_fc_out', het_full_pos)\n", (125342, 125379), True, 'import tensorflow.compat.v1 as tf\n'), ((125396, 125453), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_full_rot_fc_out"""', 'het_full_rot'], {}), "('het_full_rot_fc_out', het_full_rot)\n", (125416, 125453), True, 'import tensorflow.compat.v1 as tf\n'), ((125470, 125525), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_full_g_fc1_out"""', 'het_full_g1'], {}), "('het_full_g_fc1_out', het_full_g1)\n", (125490, 125525), True, 'import tensorflow.compat.v1 as tf\n'), ((125542, 125597), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_full_g_fc2_out"""', 'het_full_g2'], {}), "('het_full_g_fc2_out', het_full_g2)\n", (125562, 125597), True, 'import tensorflow.compat.v1 as tf\n'), ((125614, 125668), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_full_fc1_out"""', 'het_full_fc1'], {}), "('het_full_fc1_out', het_full_fc1)\n", (125634, 125668), True, 'import tensorflow.compat.v1 as tf\n'), ((125685, 125725), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_tri_out"""', 'tri'], {}), "('het_tri_out', tri)\n", (125705, 125725), True, 'import tensorflow.compat.v1 as tf\n'), ((125857, 125886), 'tensorflow.compat.v1.linalg.matrix_transpose', 'tf.linalg.matrix_transpose', (['R'], {}), '(R)\n', (125883, 125886), True, 'import tensorflow.compat.v1 as tf\n'), ((125908, 125939), 'tensorflow.compat.v1.linalg.diag', 'tf.linalg.diag', (['self.bias_fixed'], {}), '(self.bias_fixed)\n', (125922, 125939), True, 'import tensorflow.compat.v1 as tf\n'), ((126120, 126149), 'tensorflow.compat.v1.linalg.matrix_transpose', 'tf.linalg.matrix_transpose', (['R'], {}), '(R)\n', (126146, 126149), True, 'import tensorflow.compat.v1 as tf\n'), ((126171, 126202), 'tensorflow.compat.v1.linalg.diag', 'tf.linalg.diag', (['self.bias_fixed'], {}), '(self.bias_fixed)\n', (126185, 126202), True, 'import tensorflow.compat.v1 as tf\n'), ((137371, 137401), 'tensorflow.compat.v1.zeros', 'tf.zeros', (['[self.batch_size, 7]'], {}), '([self.batch_size, 7])\n', (137379, 137401), True, 'import tensorflow.compat.v1 as tf\n'), ((137436, 137469), 'tensorflow.compat.v1.cast', 'tf.cast', (['keep_contact', 'tf.float32'], {}), '(keep_contact, tf.float32)\n', (137443, 137469), True, 'import tensorflow.compat.v1 as tf\n'), ((137504, 137534), 'tensorflow.compat.v1.zeros', 'tf.zeros', (['[self.batch_size, 2]'], {}), '([self.batch_size, 2])\n', (137512, 137534), True, 'import tensorflow.compat.v1 as tf\n'), ((137611, 137641), 'tensorflow.compat.v1.zeros', 'tf.zeros', (['[self.batch_size, 8]'], {}), '([self.batch_size, 8])\n', (137619, 137641), True, 'import tensorflow.compat.v1 as tf\n'), ((137676, 137709), 'tensorflow.compat.v1.cast', 'tf.cast', (['keep_contact', 'tf.float32'], {}), '(keep_contact, tf.float32)\n', (137683, 137709), True, 'import tensorflow.compat.v1 as tf\n'), ((137744, 137774), 'tensorflow.compat.v1.zeros', 'tf.zeros', (['[self.batch_size, 1]'], {}), '([self.batch_size, 1])\n', (137752, 137774), True, 'import tensorflow.compat.v1 as tf\n'), ((137850, 137880), 'tensorflow.compat.v1.zeros', 'tf.zeros', (['[self.batch_size, 9]'], {}), '([self.batch_size, 9])\n', (137858, 137880), True, 'import tensorflow.compat.v1 as tf\n'), ((137914, 137947), 'tensorflow.compat.v1.cast', 'tf.cast', (['keep_contact', 'tf.float32'], {}), '(keep_contact, tf.float32)\n', (137921, 137947), True, 'import tensorflow.compat.v1 as tf\n'), ((139169, 139205), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['dnx', '[-1, 1, self.dim_x]'], {}), '(dnx, [-1, 1, self.dim_x])\n', (139179, 139205), True, 'import tensorflow.compat.v1 as tf\n'), ((139228, 139264), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['dny', '[-1, 1, self.dim_x]'], {}), '(dny, [-1, 1, self.dim_x])\n', (139238, 139264), True, 'import tensorflow.compat.v1 as tf\n'), ((139287, 139322), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['ds', '[-1, 1, self.dim_x]'], {}), '(ds, [-1, 1, self.dim_x])\n', (139297, 139322), True, 'import tensorflow.compat.v1 as tf\n'), ((143154, 143184), 'tensorflow.compat.v1.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': 'wd'}), '(l=wd)\n', (143178, 143184), True, 'import tensorflow.compat.v1 as tf\n'), ((143230, 143259), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['init'], {}), '(init)\n', (143253, 143259), True, 'import tensorflow.compat.v1 as tf\n'), ((149341, 149356), 'tensorflow.compat.v1.square', 'tf.square', (['diag'], {}), '(diag)\n', (149350, 149356), True, 'import tensorflow.compat.v1 as tf\n'), ((149953, 149980), 'differentiable_filters.utils.tensorflow_compatability.fill_triangular', 'compat.fill_triangular', (['tri'], {}), '(tri)\n', (149975, 149980), True, 'from differentiable_filters.utils import tensorflow_compatability as compat\n'), ((150002, 150045), 'tensorflow.compat.v1.linalg.diag', 'tf.linalg.diag', (['self.het_full_lrn_init_bias'], {}), '(self.het_full_lrn_init_bias)\n', (150016, 150045), True, 'import tensorflow.compat.v1 as tf\n'), ((150246, 150273), 'differentiable_filters.utils.tensorflow_compatability.fill_triangular', 'compat.fill_triangular', (['tri'], {}), '(tri)\n', (150268, 150273), True, 'from differentiable_filters.utils import tensorflow_compatability as compat\n'), ((150295, 150340), 'tensorflow.compat.v1.linalg.diag', 'tf.linalg.diag', (['self.const_full_lrn_init_bias'], {}), '(self.const_full_lrn_init_bias)\n', (150309, 150340), True, 'import tensorflow.compat.v1 as tf\n'), ((150481, 150528), 'tensorflow.compat.v1.tile', 'tf.tile', (['Q[None, :, :]', '[self.batch_size, 1, 1]'], {}), '(Q[None, :, :], [self.batch_size, 1, 1])\n', (150488, 150528), True, 'import tensorflow.compat.v1 as tf\n'), ((151248, 151263), 'tensorflow.compat.v1.square', 'tf.square', (['diag'], {}), '(diag)\n', (151257, 151263), True, 'import tensorflow.compat.v1 as tf\n'), ((151860, 151887), 'differentiable_filters.utils.tensorflow_compatability.fill_triangular', 'compat.fill_triangular', (['tri'], {}), '(tri)\n', (151882, 151887), True, 'from differentiable_filters.utils import tensorflow_compatability as compat\n'), ((151909, 151952), 'tensorflow.compat.v1.linalg.diag', 'tf.linalg.diag', (['self.het_full_ana_init_bias'], {}), '(self.het_full_ana_init_bias)\n', (151923, 151952), True, 'import tensorflow.compat.v1 as tf\n'), ((152153, 152180), 'differentiable_filters.utils.tensorflow_compatability.fill_triangular', 'compat.fill_triangular', (['tri'], {}), '(tri)\n', (152175, 152180), True, 'from differentiable_filters.utils import tensorflow_compatability as compat\n'), ((152202, 152247), 'tensorflow.compat.v1.linalg.diag', 'tf.linalg.diag', (['self.const_full_ana_init_bias'], {}), '(self.const_full_ana_init_bias)\n', (152216, 152247), True, 'import tensorflow.compat.v1 as tf\n'), ((152388, 152435), 'tensorflow.compat.v1.tile', 'tf.tile', (['Q[None, :, :]', '[self.batch_size, 1, 1]'], {}), '(Q[None, :, :], [self.batch_size, 1, 1])\n', (152395, 152435), True, 'import tensorflow.compat.v1 as tf\n'), ((22938, 22964), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood'], {}), '(likelihood)\n', (22952, 22964), True, 'import tensorflow.compat.v1 as tf\n'), ((33464, 33492), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['contact_loss'], {}), '(contact_loss)\n', (33478, 33492), True, 'import tensorflow.compat.v1 as tf\n'), ((40044, 40063), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['mse'], {}), '(mse)\n', (40058, 40063), True, 'import tensorflow.compat.v1 as tf\n'), ((40102, 40128), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood'], {}), '(likelihood)\n', (40116, 40128), True, 'import tensorflow.compat.v1 as tf\n'), ((40501, 40527), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood'], {}), '(likelihood)\n', (40515, 40527), True, 'import tensorflow.compat.v1 as tf\n'), ((53083, 53100), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['rot'], {}), '(rot)\n', (53095, 53100), True, 'import tensorflow.compat.v1 as tf\n'), ((53161, 53178), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['rot'], {}), '(rot)\n', (53173, 53178), True, 'import tensorflow.compat.v1 as tf\n'), ((53240, 53257), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['rot'], {}), '(rot)\n', (53252, 53257), True, 'import tensorflow.compat.v1 as tf\n'), ((53319, 53336), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['rot'], {}), '(rot)\n', (53331, 53336), True, 'import tensorflow.compat.v1 as tf\n'), ((53397, 53414), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['rot'], {}), '(rot)\n', (53409, 53414), True, 'import tensorflow.compat.v1 as tf\n'), ((53631, 53642), 'tensorflow.compat.v1.abs', 'tf.abs', (['rot'], {}), '(rot)\n', (53637, 53642), True, 'import tensorflow.compat.v1 as tf\n'), ((74185, 74222), 'numpy.corrcoef', 'np.corrcoef', (['r_het_diag[i:i + 1]', 'vis'], {}), '(r_het_diag[i:i + 1], vis)\n', (74196, 74222), True, 'import numpy as np\n'), ((74263, 74299), 'numpy.corrcoef', 'np.corrcoef', (['r_het_tri[i:i + 1]', 'vis'], {}), '(r_het_tri[i:i + 1], vis)\n', (74274, 74299), True, 'import numpy as np\n'), ((121201, 121231), 'tensorflow.compat.v1.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': 'wd'}), '(l=wd)\n', (121225, 121231), True, 'import tensorflow.compat.v1 as tf\n'), ((121277, 121306), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['init'], {}), '(init)\n', (121300, 121306), True, 'import tensorflow.compat.v1 as tf\n'), ((121522, 121552), 'tensorflow.compat.v1.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': 'wd'}), '(l=wd)\n', (121546, 121552), True, 'import tensorflow.compat.v1 as tf\n'), ((121598, 121626), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (121621, 121626), True, 'import tensorflow.compat.v1 as tf\n'), ((121942, 121972), 'tensorflow.compat.v1.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': 'wd'}), '(l=wd)\n', (121966, 121972), True, 'import tensorflow.compat.v1 as tf\n'), ((122018, 122047), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['init'], {}), '(init)\n', (122041, 122047), True, 'import tensorflow.compat.v1 as tf\n'), ((138048, 138112), 'numpy.array', 'np.array', (['[[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0.0]]]'], {'dtype': 'np.float32'}), '([[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0.0]]], dtype=np.float32)\n', (138056, 138112), True, 'import numpy as np\n'), ((138174, 138238), 'numpy.array', 'np.array', (['[[[0, 1, 0, 0, 0, 0, 0, 0, 0, 0.0]]]'], {'dtype': 'np.float32'}), '([[[0, 1, 0, 0, 0, 0, 0, 0, 0, 0.0]]], dtype=np.float32)\n', (138182, 138238), True, 'import numpy as np\n'), ((138301, 138365), 'numpy.array', 'np.array', (['[[[0, 0, 1, 0, 0, 0, 0, 0, 0, 0.0]]]'], {'dtype': 'np.float32'}), '([[[0, 0, 1, 0, 0, 0, 0, 0, 0, 0.0]]], dtype=np.float32)\n', (138309, 138365), True, 'import numpy as np\n'), ((138431, 138495), 'numpy.array', 'np.array', (['[[[0, 0, 0, 1, 0, 0, 0, 0, 0, 0.0]]]'], {'dtype': 'np.float32'}), '([[[0, 0, 0, 1, 0, 0, 0, 0, 0, 0.0]]], dtype=np.float32)\n', (138439, 138495), True, 'import numpy as np\n'), ((138618, 138682), 'numpy.array', 'np.array', (['[[[0, 0, 0, 0, 1, 0, 0, 0, 0, 0.0]]]'], {'dtype': 'np.float32'}), '([[[0, 0, 0, 0, 1, 0, 0, 0, 0, 0.0]]], dtype=np.float32)\n', (138626, 138682), True, 'import numpy as np\n'), ((138805, 138867), 'numpy.array', 'np.array', (['[[[0, 0, 0, 0, 0, 1, 0, 0, 0, 0]]]'], {'dtype': 'np.float32'}), '([[[0, 0, 0, 0, 0, 1, 0, 0, 0, 0]]], dtype=np.float32)\n', (138813, 138867), True, 'import numpy as np\n'), ((138991, 139053), 'numpy.array', 'np.array', (['[[[0, 0, 0, 0, 0, 0, 1, 0, 0, 0]]]'], {'dtype': 'np.float32'}), '([[[0, 0, 0, 0, 0, 0, 1, 0, 0, 0]]], dtype=np.float32)\n', (138999, 139053), True, 'import numpy as np\n'), ((144110, 144140), 'tensorflow.compat.v1.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': 'wd'}), '(l=wd)\n', (144134, 144140), True, 'import tensorflow.compat.v1 as tf\n'), ((144186, 144215), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['init'], {}), '(init)\n', (144209, 144215), True, 'import tensorflow.compat.v1 as tf\n'), ((149746, 149795), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_full_lrn_fc1_out"""', 'fc1'], {}), "('het_full_lrn_fc1_out', fc1)\n", (149766, 149795), True, 'import tensorflow.compat.v1 as tf\n'), ((149816, 149865), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_full_lrn_fc2_out"""', 'fc2'], {}), "('het_full_lrn_fc2_out', fc2)\n", (149836, 149865), True, 'import tensorflow.compat.v1 as tf\n'), ((149886, 149931), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_full_lrn_out"""', 'tri'], {}), "('het_full_lrn_out', tri)\n", (149906, 149931), True, 'import tensorflow.compat.v1 as tf\n'), ((150079, 150108), 'tensorflow.compat.v1.linalg.matrix_transpose', 'tf.linalg.matrix_transpose', (['Q'], {}), '(Q)\n', (150105, 150108), True, 'import tensorflow.compat.v1 as tf\n'), ((150134, 150165), 'tensorflow.compat.v1.linalg.diag', 'tf.linalg.diag', (['self.bias_fixed'], {}), '(self.bias_fixed)\n', (150148, 150165), True, 'import tensorflow.compat.v1 as tf\n'), ((150374, 150403), 'tensorflow.compat.v1.linalg.matrix_transpose', 'tf.linalg.matrix_transpose', (['Q'], {}), '(Q)\n', (150400, 150403), True, 'import tensorflow.compat.v1 as tf\n'), ((150429, 150460), 'tensorflow.compat.v1.linalg.diag', 'tf.linalg.diag', (['self.bias_fixed'], {}), '(self.bias_fixed)\n', (150443, 150460), True, 'import tensorflow.compat.v1 as tf\n'), ((151653, 151702), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_full_ana_fc1_out"""', 'fc1'], {}), "('het_full_ana_fc1_out', fc1)\n", (151673, 151702), True, 'import tensorflow.compat.v1 as tf\n'), ((151723, 151772), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_full_ana_fc2_out"""', 'fc2'], {}), "('het_full_ana_fc2_out', fc2)\n", (151743, 151772), True, 'import tensorflow.compat.v1 as tf\n'), ((151793, 151838), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_full_ana_out"""', 'tri'], {}), "('het_full_ana_out', tri)\n", (151813, 151838), True, 'import tensorflow.compat.v1 as tf\n'), ((151986, 152015), 'tensorflow.compat.v1.linalg.matrix_transpose', 'tf.linalg.matrix_transpose', (['Q'], {}), '(Q)\n', (152012, 152015), True, 'import tensorflow.compat.v1 as tf\n'), ((152041, 152072), 'tensorflow.compat.v1.linalg.diag', 'tf.linalg.diag', (['self.bias_fixed'], {}), '(self.bias_fixed)\n', (152055, 152072), True, 'import tensorflow.compat.v1 as tf\n'), ((152281, 152310), 'tensorflow.compat.v1.linalg.matrix_transpose', 'tf.linalg.matrix_transpose', (['Q'], {}), '(Q)\n', (152307, 152310), True, 'import tensorflow.compat.v1 as tf\n'), ((152336, 152367), 'tensorflow.compat.v1.linalg.diag', 'tf.linalg.diag', (['self.bias_fixed'], {}), '(self.bias_fixed)\n', (152350, 152367), True, 'import tensorflow.compat.v1 as tf\n'), ((23079, 23105), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood'], {}), '(likelihood)\n', (23093, 23105), True, 'import tensorflow.compat.v1 as tf\n'), ((33393, 33407), 'tensorflow.compat.v1.add_n', 'tf.add_n', (['mses'], {}), '(mses)\n', (33401, 33407), True, 'import tensorflow.compat.v1 as tf\n'), ((33415, 33441), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['normal_ang'], {}), '(normal_ang)\n', (33429, 33441), True, 'import tensorflow.compat.v1 as tf\n'), ((144490, 144520), 'tensorflow.compat.v1.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': 'wd'}), '(l=wd)\n', (144514, 144520), True, 'import tensorflow.compat.v1 as tf\n'), ((144566, 144594), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (144589, 144594), True, 'import tensorflow.compat.v1 as tf\n'), ((144886, 144916), 'tensorflow.compat.v1.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': 'wd'}), '(l=wd)\n', (144910, 144916), True, 'import tensorflow.compat.v1 as tf\n'), ((144962, 144991), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['init'], {}), '(init)\n', (144985, 144991), True, 'import tensorflow.compat.v1 as tf\n'), ((23220, 23246), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood'], {}), '(likelihood)\n', (23234, 23246), True, 'import tensorflow.compat.v1 as tf\n'), ((23327, 23361), 'tensorflow.compat.v1.less', 'tf.less', (['step', '(self.epoch_size * 3)'], {}), '(step, self.epoch_size * 3)\n', (23334, 23361), True, 'import tensorflow.compat.v1 as tf\n'), ((94451, 94462), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (94459, 94462), True, 'import numpy as np\n'), ((145945, 145975), 'tensorflow.compat.v1.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': 'wd'}), '(l=wd)\n', (145969, 145975), True, 'import tensorflow.compat.v1 as tf\n'), ((146021, 146050), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['init'], {}), '(init)\n', (146044, 146050), True, 'import tensorflow.compat.v1 as tf\n'), ((23461, 23487), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood'], {}), '(likelihood)\n', (23475, 23487), True, 'import tensorflow.compat.v1 as tf\n'), ((32708, 32734), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood'], {}), '(likelihood)\n', (32722, 32734), True, 'import tensorflow.compat.v1 as tf\n'), ((33191, 33219), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['contact_loss'], {}), '(contact_loss)\n', (33205, 33219), True, 'import tensorflow.compat.v1 as tf\n'), ((33258, 33284), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood'], {}), '(likelihood)\n', (33272, 33284), True, 'import tensorflow.compat.v1 as tf\n'), ((89363, 89376), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (89369, 89376), True, 'import numpy as np\n'), ((89414, 89427), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (89420, 89427), True, 'import numpy as np\n'), ((94521, 94536), 'numpy.dot', 'np.dot', (['r_p', 'pt'], {}), '(r_p, pt)\n', (94527, 94536), True, 'import numpy as np\n'), ((94598, 94613), 'numpy.dot', 'np.dot', (['r_l', 'pt'], {}), '(r_l, pt)\n', (94604, 94613), True, 'import numpy as np\n'), ((146395, 146425), 'tensorflow.compat.v1.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': 'wd'}), '(l=wd)\n', (146419, 146425), True, 'import tensorflow.compat.v1 as tf\n'), ((146471, 146500), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['init'], {}), '(init)\n', (146494, 146500), True, 'import tensorflow.compat.v1 as tf\n'), ((32641, 32669), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['contact_loss'], {}), '(contact_loss)\n', (32655, 32669), True, 'import tensorflow.compat.v1 as tf\n'), ((33127, 33153), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['normal_ang'], {}), '(normal_ang)\n', (33141, 33153), True, 'import tensorflow.compat.v1 as tf\n'), ((147416, 147446), 'tensorflow.compat.v1.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': 'wd'}), '(l=wd)\n', (147440, 147446), True, 'import tensorflow.compat.v1 as tf\n'), ((147492, 147521), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['init'], {}), '(init)\n', (147515, 147521), True, 'import tensorflow.compat.v1 as tf\n'), ((32577, 32603), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['normal_ang'], {}), '(normal_ang)\n', (32591, 32603), True, 'import tensorflow.compat.v1 as tf\n'), ((33011, 33025), 'tensorflow.compat.v1.add_n', 'tf.add_n', (['mses'], {}), '(mses)\n', (33019, 33025), True, 'import tensorflow.compat.v1 as tf\n'), ((147800, 147830), 'tensorflow.compat.v1.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': 'wd'}), '(l=wd)\n', (147824, 147830), True, 'import tensorflow.compat.v1 as tf\n'), ((147876, 147904), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (147899, 147904), True, 'import tensorflow.compat.v1 as tf\n'), ((148196, 148226), 'tensorflow.compat.v1.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': 'wd'}), '(l=wd)\n', (148220, 148226), True, 'import tensorflow.compat.v1 as tf\n'), ((148272, 148301), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['init'], {}), '(init)\n', (148295, 148301), True, 'import tensorflow.compat.v1 as tf\n'), ((32461, 32475), 'tensorflow.compat.v1.add_n', 'tf.add_n', (['mses'], {}), '(mses)\n', (32469, 32475), True, 'import tensorflow.compat.v1 as tf\n')]
|
from s3_encryption_sdk import EncryptedBucket
def test_object_get(materials_provider, bucket):
crypto_bucket = EncryptedBucket(
bucket=bucket,
materials_provider=materials_provider,
)
body = "foo bar 4711"
crypto_bucket.put_object(
Key="object",
Body=body,
)
encrypted_obj = bucket.Object("object").get()
decrypted_obj = crypto_bucket.Object("object").get()
assert body != encrypted_obj["Body"].read().decode()
assert body == decrypted_obj["Body"].read().decode()
|
[
"s3_encryption_sdk.EncryptedBucket"
] |
[((117, 186), 's3_encryption_sdk.EncryptedBucket', 'EncryptedBucket', ([], {'bucket': 'bucket', 'materials_provider': 'materials_provider'}), '(bucket=bucket, materials_provider=materials_provider)\n', (132, 186), False, 'from s3_encryption_sdk import EncryptedBucket\n')]
|
from django.contrib import admin
from tworaven_apps.solver_interfaces.models import StatisticalModel
class StatisticalModelAdmin(admin.ModelAdmin):
list_display = ('model_id',
'created_on',
'user')
save_on_top = True
admin.site.register(StatisticalModel, StatisticalModelAdmin)
|
[
"django.contrib.admin.site.register"
] |
[((270, 330), 'django.contrib.admin.site.register', 'admin.site.register', (['StatisticalModel', 'StatisticalModelAdmin'], {}), '(StatisticalModel, StatisticalModelAdmin)\n', (289, 330), False, 'from django.contrib import admin\n')]
|
from selenium import webdriver
import time
import json
import shutil
import re
import os
#Need firefox/chrome driver for selenium
#Get path to firefox driver
firefox_path = r"PATH/To/Gecko/Driver"
#This never changes unless you change campaigns
#Path to the journal containing the JSON of the players
path_to_external_journal = r"URL/For/External/Journal"
#Define webdriver with path
driver = webdriver.Firefox()
#Define while variable
ShouldRun = True
#Empty dictionary for character data
characterData = {}
def initializeHealth(character, data):
incomingHP = str(data['curr_hp'])
incomingMAX = str(data['max_hp'])
hpfile = open(character + '_hp.txt', 'w')
hpfile.write(incomingHP)
hpfile.close()
maxhpfile = open(character + '_maxhp.txt', 'w')
maxhpfile.write(incomingMAX)
maxhpfile.close()
updateHealthBar(character, incomingHP, incomingMAX)
def initializeAC(character, data):
incomingAC = str(data['ac'])
acfile = open(character + '_ac.txt', 'w')
acfile.write(incomingAC)
acfile.close()
def initializeINI(character, data):
incomingINI = str(data['initiative'])
inifile = open(character + '_ini.txt', 'w')
inifile.write(incomingINI)
inifile.close()
def initializeLVL(character, data):
incomingLVL = str(data['level'])
levelfile = open(character + '_level.txt', 'w+')
levelfile.write(incomingLVL)
levelfile.close()
#Update the txt health items
def updateHealth(character, data):
dataChanged = False
#Grab incoming data from external journal
incomingHP = str(data['curr_hp'])
incomingMAX = str(data['max_hp'])
#Grab locally stored 'previous/old' data
hp = characterData.get(character, {}).get('curr_hp',None)
maxhp = characterData.get(character, {}).get('max_hp',None)
#if the incoming hp is not the same as the previous checked data => update data
if hp != incomingHP:
print("Updating Current Health File For ", character, "...")
dataChanged = True
characterData[character].update({'curr_hp': incomingHP})
hpfile = open(character + '_hp.txt', 'w')
hpfile.write(incomingHP)
hpfile.close()
print("Finished Updating Health File For ", character, "...")
#if incoming max hp is not the same as the previous checked data => update data.
if maxhp != incomingMAX:
print("Updating Max Health File For ", character, "...")
dataChanged = True
characterData[character].update({'max_hp': incomingMAX})
maxhpfile = open(character + '_maxhp.txt', 'w')
maxhpfile.write(incomingMAX)
maxhpfile.close()
print("Finished Updating Health File For ", character, "...")
#if either curr_hp or max_hp was changed update health bar change
if dataChanged == True:
print("Updating Damage For Health File For ", character, "...")
updateHealthBar(character, incomingHP, incomingMAX)
print("Finished Updating Health File For ", character, "...")
#Write the AC values to a file
def updateAC(character, data):
incomingAC = str(data['ac'])
ac = characterData.get(character, {}).get('ac',None)
#if the incoming ac is not the same as in the file, update the file
if ac != incomingAC:
print("Updating AC File For ", character, "...")
characterData[character].update({'ac': incomingAC})
acfile = open(character + '_ac.txt', 'w')
acfile.write(incomingAC)
acfile.close()
print("Finished Updating AC File For ", character, "...")
#Write the Initiative values to a file
def updateINI(character, data):
incomingINI = str(data['initiative'])
ini = characterData.get(character, {}).get('initiative',None)
#if the incoming initiative is not the same as in the file, update the file
if ini != incomingINI:
print("Updating Initiative File For ", character, "...")
characterData[character].update({'initiative': incomingINI})
inifile = open(character + '_ini.txt', 'w')
inifile.write(incomingINI)
inifile.close()
print("Finished Updating Initiative File For ", character, "...")
def updateHealthBar(character, hp, maxhp):
print("Updating Damage To Health File For ", character, "...")
healthBar = int(maxhp) - int(hp)
updateDamage = open(character + '_damage.txt', 'w')
updateDamage.write(str(healthBar))
updateDamage.close()
print("Finished Updating Damage To Health File For ", character, "...")
#Write the Level values to a file
def updateLevel(character, data):
incomingLVL = str(data['level'])
lvl = characterData.get(character, {}).get('level', None)
if lvl != incomingLVL:
print("Updating Level File For ", character, "...")
characterData[character].update({'level': incomingLVL})
levelfile = open(character + '_level.txt', 'w+')
levelfile.write(incomingLVL)
levelfile.close()
print("Finished Updating Level File For ", character, "...")
def main():
#Try to run script
try:
roll20search = re.search('Roll20: Online virtual tabletop', driver.title)
#If the title of the page already exists (ie, the window is open), don't open a new one
if roll20search:
#Get the text from HTML element
text = driver.find_element_by_xpath("""//*[@id="openpages"]/div/span""").text
#print(text)
varJSON = json.loads(text)
print("Sleep Mode For 5 Seconds...")
time.sleep(5)
print("Waking Up From Sleep Mode...")
if varJSON:
print("Checking For Updates...")
for character, value in varJSON.items():
updateHealth(character, value)
updateAC(character, value)
updateINI(character, value)
updateLevel(character, value)
print("Finished Checking For Updates...")
#if window is not open
else:
#Open URL to roll20 handout
driver.get("URL/For/External/Journal/On/Roll20")
varJSON = ""
time.sleep(5)
while not varJSON:
#Get the text from HTML element
text = driver.find_element_by_xpath("""//*[@id="openpages"]/div/span""").text
varJSON = json.loads(text)
time.sleep(5)
#print(varJSON)
if varJSON:
print("Initializing Files For Characters...")
for character, value in varJSON.items():
characterData.update({character: value})
initializeHealth(character, value)
initializeAC(character, value)
initializeINI(character, value)
initializeLVL(character, value)
print("Finished Initializing Files...")
#If you can't find the window, raise exception and exit script
except Exception as e:
print(str(e))
#Stop running for loop
ShouldRun = False
#Quit driver
driver.quit()
#Exit script
exit()
print("Running Roll20toPython Character Information Tracker Script...")
print("Do not close this window unless you are finished using this script...")
while ShouldRun:
main()
time.sleep(5)
|
[
"time.sleep",
"re.search",
"json.loads",
"selenium.webdriver.Firefox"
] |
[((410, 429), 'selenium.webdriver.Firefox', 'webdriver.Firefox', ([], {}), '()\n', (427, 429), False, 'from selenium import webdriver\n'), ((6684, 6697), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (6694, 6697), False, 'import time\n'), ((4823, 4881), 're.search', 're.search', (['"""Roll20: Online virtual tabletop"""', 'driver.title'], {}), "('Roll20: Online virtual tabletop', driver.title)\n", (4832, 4881), False, 'import re\n'), ((5142, 5158), 'json.loads', 'json.loads', (['text'], {}), '(text)\n', (5152, 5158), False, 'import json\n'), ((5204, 5217), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (5214, 5217), False, 'import time\n'), ((5692, 5705), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (5702, 5705), False, 'import time\n'), ((5866, 5882), 'json.loads', 'json.loads', (['text'], {}), '(text)\n', (5876, 5882), False, 'import json\n'), ((5888, 5901), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (5898, 5901), False, 'import time\n')]
|
import numpy as np
from scipy.optimize import leastsq
import matplotlib
matplotlib.use('TkAgg')
import pylab as plt
from math import sqrt, atan, cos
from process_data import *
guess_mean = np.mean(y1)/2
guess_std = 3*np.std(y1)/(2**0.5)
guess_phase = 0
guess_stretch = 0.3
data_first_guess = guess_std*np.sin(np.sin(guess_stretch**-1 * (x1))) + guess_mean
optimize_func = lambda x: x[0]*np.sin(np.sin(x[1]**-1 *(x1))) - y1
est_std, est_stretch, est_mean = leastsq(optimize_func, [guess_std, guess_stretch, guess_mean])[0]
fig = plt.figure(1, figsize=(9, 5), dpi=150)
fig.suptitle('\\textbf{Torque Felt by Driven Gear vs. Difference in Displacements}', fontweight='bold')
fig.subplots_adjust(left=0.11, top=0.9, right=0.98, bottom=0.1)
plt.plot(x1, y1, '.', label='Processed Data Points', c='black')
plt.plot(x1, est_std*np.sin(est_stretch**-1 *(x1)+est_mean), '--', label='Fitted Sine Wave', c='black')
plt.ylabel('\\textbf{Torque Felt by\\\\Driven Gear (Nm)}')
plt.xlabel('\\textbf{Difference in Displacements (rad)}')
plt.xlim(0, np.pi/2)
plt.legend(numpoints=1)
plt.show()
|
[
"pylab.show",
"numpy.std",
"pylab.ylabel",
"scipy.optimize.leastsq",
"matplotlib.use",
"pylab.figure",
"numpy.mean",
"pylab.xlabel",
"numpy.sin",
"pylab.xlim",
"pylab.legend",
"pylab.plot"
] |
[((72, 95), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (86, 95), False, 'import matplotlib\n'), ((533, 571), 'pylab.figure', 'plt.figure', (['(1)'], {'figsize': '(9, 5)', 'dpi': '(150)'}), '(1, figsize=(9, 5), dpi=150)\n', (543, 571), True, 'import pylab as plt\n'), ((741, 804), 'pylab.plot', 'plt.plot', (['x1', 'y1', '"""."""'], {'label': '"""Processed Data Points"""', 'c': '"""black"""'}), "(x1, y1, '.', label='Processed Data Points', c='black')\n", (749, 804), True, 'import pylab as plt\n'), ((910, 968), 'pylab.ylabel', 'plt.ylabel', (['"""\\\\textbf{Torque Felt by\\\\\\\\Driven Gear (Nm)}"""'], {}), "('\\\\textbf{Torque Felt by\\\\\\\\Driven Gear (Nm)}')\n", (920, 968), True, 'import pylab as plt\n'), ((969, 1026), 'pylab.xlabel', 'plt.xlabel', (['"""\\\\textbf{Difference in Displacements (rad)}"""'], {}), "('\\\\textbf{Difference in Displacements (rad)}')\n", (979, 1026), True, 'import pylab as plt\n'), ((1027, 1049), 'pylab.xlim', 'plt.xlim', (['(0)', '(np.pi / 2)'], {}), '(0, np.pi / 2)\n', (1035, 1049), True, 'import pylab as plt\n'), ((1049, 1072), 'pylab.legend', 'plt.legend', ([], {'numpoints': '(1)'}), '(numpoints=1)\n', (1059, 1072), True, 'import pylab as plt\n'), ((1073, 1083), 'pylab.show', 'plt.show', ([], {}), '()\n', (1081, 1083), True, 'import pylab as plt\n'), ((190, 201), 'numpy.mean', 'np.mean', (['y1'], {}), '(y1)\n', (197, 201), True, 'import numpy as np\n'), ((460, 522), 'scipy.optimize.leastsq', 'leastsq', (['optimize_func', '[guess_std, guess_stretch, guess_mean]'], {}), '(optimize_func, [guess_std, guess_stretch, guess_mean])\n', (467, 522), False, 'from scipy.optimize import leastsq\n'), ((218, 228), 'numpy.std', 'np.std', (['y1'], {}), '(y1)\n', (224, 228), True, 'import numpy as np\n'), ((826, 867), 'numpy.sin', 'np.sin', (['(est_stretch ** -1 * x1 + est_mean)'], {}), '(est_stretch ** -1 * x1 + est_mean)\n', (832, 867), True, 'import numpy as np\n'), ((312, 344), 'numpy.sin', 'np.sin', (['(guess_stretch ** -1 * x1)'], {}), '(guess_stretch ** -1 * x1)\n', (318, 344), True, 'import numpy as np\n'), ((398, 421), 'numpy.sin', 'np.sin', (['(x[1] ** -1 * x1)'], {}), '(x[1] ** -1 * x1)\n', (404, 421), True, 'import numpy as np\n')]
|
import pygame as pg
from collections import defaultdict
cl_hellbraun = (254, 206, 158); cl_dunkelbraun = (209, 139, 71); cl_grün = (0, 50, 0)
cl_weiss = (255, 255, 255); cl_schwarz = (0, 0, 0); cl_dunkelblau = (0, 0, 100)
cl_hellblau = (0, 0, 255); cl_rot = (255, 0, 0)
def bewertung():
return sum([stein for stein in brett.values()])
def generiere_schläge(spieler, von, stein, sequenz, sequenzen):
dead_end = True
for n in richtungen[stein]:
for i in range(1, abs(stein)+1):
über = von + n*i
zu = über + n
if über not in brett or zu not in brett or \
brett[über] in steine[spieler] or \
(brett[über] != 0 and brett[zu] != 0):
break
if brett[über] in steine[not spieler] and brett[zu] == 0:
dead_end = False
sequenz.extend([zu, von, stein, über, brett[über]])
ziehe(spieler, sequenz[-5:])
generiere_schläge(spieler, zu, stein, sequenz.copy(), sequenzen)
ziehe_rückgängig(spieler, sequenz[-5:])
sequenz = sequenz[:-5]
break
if dead_end and sequenz:
sequenzen[sequenz[1]].append(sequenz)
return sequenzen
def generiere_zugliste(spieler):
züge, schläge = defaultdict(list), defaultdict(list)
for von, stein in brett.items():
if stein not in steine[spieler]:
continue
schläge.update(generiere_schläge(
spieler, von, stein, [], defaultdict(list)))
if schläge:
continue
for n in richtungen[stein]:
for i in range(1, abs(stein)+1):
zu = von+n*i
if zu not in brett or brett[zu] != 0:
break
züge[von].append([zu, von, stein, None, None])
return schläge if schläge else züge
def ziehe(spieler, zug):
for i in range(0, len(zug), 5):
zu, von, stein, über, _ = zug[i:i + 5]
brett[von] = 0
brett[zu] = stein
if über:
brett[über] = 0
anz_steine[not spieler] -= 1
if zu in umwandlung[spieler] and abs(stein) == 1:
brett[zu] *= 8
return anz_steine[not spieler] == 0
def ziehe_rückgängig(spieler, zug):
for i in reversed(range(0, len(zug), 5)):
zu, von, stein, über, geschlagen = zug[i:i+5]
brett[von] = stein
brett[zu] = 0
if über:
brett[über] = geschlagen
anz_steine[not spieler] += 1
def minimax(tiefe, alpha, beta, spieler, win):
if tiefe == 0:
return (bewertung(), None)
if win:
return (-99999+tiefe if spieler else 99999-tiefe, None)
zugliste = generiere_zugliste(spieler)
if not zugliste:
return (-99999+tiefe if spieler else 99999-tiefe, None)
value = -999999 if spieler else 999999
for züge in zugliste.values():
for zug in züge:
win = ziehe(spieler, zug)
score, _ = minimax(tiefe-1, alpha, beta, not spieler, win)
ziehe_rückgängig(spieler, zug)
if spieler:
if score > value:
bester_zug = zug
value = score
alpha = max(value, alpha)
else:
if score < value:
bester_zug = zug
value = score
beta = min(value, beta)
if alpha >= beta:
break
return value, bester_zug
def feld_zentrum(feld):
s, z = feld % 8, feld // 8
zentrum = ZELLE // 2
return (s * ZELLE + zentrum, z * ZELLE + zentrum)
def xy2cell(pos):
x, y = pos
return y // ZELLE * 8 + x // ZELLE
def cell2xy(i):
return i % 8 * ZELLE, i // 8 * ZELLE
def zeichne_stein(feld_nr):
if feld_nr not in brett or brett[feld_nr] == 0:
return
farbe = cl_weiss if brett[feld_nr] > 0 else cl_schwarz
pg.draw.circle(screen, farbe, feld_zentrum(feld_nr), int(ZELLE * 0.2))
if abs(brett[feld_nr]) == 8:
farbe = cl_weiss if brett[feld_nr] - 8 else cl_schwarz
pg.draw.circle(screen, farbe, feld_zentrum(feld_nr), int(ZELLE * 0.05))
def zeichne_brett(status):
for i in range(64):
farbe = cl_dunkelbraun if i in brett else cl_hellbraun
pg.draw.rect(screen, farbe, (cell2xy(i), (ZELLE, ZELLE)))
zeichne_stein(i)
if not status:
for i in züge:
pg.draw.rect(screen, cl_grün, (cell2xy(i), (ZELLE, ZELLE)), 7)
if status == 'von ausgewählt':
pg.draw.rect(screen, cl_rot, (cell2xy(sel_von), (ZELLE, ZELLE)), 7)
for zug in züge[sel_von]:
pg.draw.circle(screen, cl_dunkelblau,
feld_zentrum(zug[0]), int(ZELLE * 0.1))
if status == 'zeige computerzug':
for i in range(0, len(computerzug), 5):
pg.draw.line(screen, cl_hellblau, feld_zentrum(
computerzug[i]), feld_zentrum(computerzug[i+1]), 10)
pg.draw.circle(screen, cl_hellblau, feld_zentrum(
computerzug[-5]), int(ZELLE * 0.1))
if numerierung:
for von in brett:
color = cl_weiss if brett[von] in steine[False] else cl_schwarz
font = pg.font.Font(None, 32)
text = font.render(str(von), True, color)
text_rect = text.get_rect(center=(feld_zentrum(von)))
screen.blit(text, text_rect)
pg.display.flip()
def state_machine(status, feld):
global züge, sel_von, weiss, computerzug
erster_schlag = None
if not status:
if feld not in züge:
return
sel_von = feld
return 'von ausgewählt'
if status == 'von ausgewählt':
for zug in züge[sel_von]:
if feld == zug[0]:
if len(zug) == 5:
ziehe(weiss, zug)
return 'computer'
else:
erster_schlag = zug[:5]
züge[feld].append(zug[5:])
if not erster_schlag:
return
ziehe(weiss, erster_schlag)
sel_von = feld
return 'von ausgewählt'
if status == 'computer':
weiss = not weiss
_, computerzug = minimax(6, -999999, 999999, weiss, False)
return 'zeige computerzug'
if status == 'zeige computerzug':
ziehe(weiss, computerzug)
weiss = not weiss
züge = generiere_zugliste(weiss)
return
brett = {i: 0 for i in range(64) if i % 8 % 2 != i // 8 % 2}
brett[35] = 8
brett[51] = -1
brett[53] = -1
brett[33] = -1
brett[37] = -1
brett[17] = -1
brett[19] = -1
brett[21] = -1
# for i in brett:
# if i < 24:
# brett[i] = -1
# if i > 39:
# brett[i] = 1
richtungen = {1: (-7, -9), -1: (7, 9), -8: (-7, -9, 9, 7), 8: (-7, -9, 9, 7)}
steine = {True: {1, 8}, False: (-1, -8)}
anz_steine = {True: sum([1 for feld in brett.values() if feld > 0]),
False: sum([1 for feld in brett.values() if feld < 0])}
umwandlung = {True: {1, 3, 5, 7}, False: {56, 58, 60, 62}}
weiss = True
züge = generiere_zugliste(weiss)
computerzug = []
sel_von = state = None
numerierung = False
AUFLÖSUNG = 800
ZELLE = AUFLÖSUNG // 8
pg.init()
screen = pg.display.set_mode([AUFLÖSUNG, AUFLÖSUNG])
weitermachen = True
clock = pg.time.Clock()
while weitermachen:
clock.tick(20)
screen.fill((0, 0, 0))
for ereignis in pg.event.get():
if ereignis.type == pg.QUIT:
weitermachen = False
if ereignis.type == pg.MOUSEBUTTONDOWN and pg.mouse.get_pressed()[0]:
state = state_machine(state, xy2cell(pg.mouse.get_pos()))
if ereignis.type == pg.KEYDOWN and ereignis.key == pg.K_SPACE:
numerierung = not numerierung
zeichne_brett(state)
if state == 'computer':
state = state_machine(state, None)
pg.quit()
|
[
"pygame.quit",
"pygame.mouse.get_pressed",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.init",
"pygame.display.flip",
"collections.defaultdict",
"pygame.font.Font",
"pygame.mouse.get_pos",
"pygame.time.Clock"
] |
[((6468, 6477), 'pygame.init', 'pg.init', ([], {}), '()\n', (6475, 6477), True, 'import pygame as pg\n'), ((6487, 6530), 'pygame.display.set_mode', 'pg.display.set_mode', (['[AUFLÖSUNG, AUFLÖSUNG]'], {}), '([AUFLÖSUNG, AUFLÖSUNG])\n', (6506, 6530), True, 'import pygame as pg\n'), ((6559, 6574), 'pygame.time.Clock', 'pg.time.Clock', ([], {}), '()\n', (6572, 6574), True, 'import pygame as pg\n'), ((7066, 7075), 'pygame.quit', 'pg.quit', ([], {}), '()\n', (7073, 7075), True, 'import pygame as pg\n'), ((4858, 4875), 'pygame.display.flip', 'pg.display.flip', ([], {}), '()\n', (4873, 4875), True, 'import pygame as pg\n'), ((6656, 6670), 'pygame.event.get', 'pg.event.get', ([], {}), '()\n', (6668, 6670), True, 'import pygame as pg\n'), ((1187, 1204), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1198, 1204), False, 'from collections import defaultdict\n'), ((1206, 1223), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1217, 1223), False, 'from collections import defaultdict\n'), ((4689, 4711), 'pygame.font.Font', 'pg.font.Font', (['None', '(32)'], {}), '(None, 32)\n', (4701, 4711), True, 'import pygame as pg\n'), ((1380, 1397), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1391, 1397), False, 'from collections import defaultdict\n'), ((6779, 6801), 'pygame.mouse.get_pressed', 'pg.mouse.get_pressed', ([], {}), '()\n', (6799, 6801), True, 'import pygame as pg\n'), ((6849, 6867), 'pygame.mouse.get_pos', 'pg.mouse.get_pos', ([], {}), '()\n', (6865, 6867), True, 'import pygame as pg\n')]
|
# SPDX-FileCopyrightText: (c) 2021 <NAME> <github.com/rtmigo>
# SPDX-License-Identifier: MIT
import unittest
from framefile import hash_extract_number, pct_extract_number, \
PatternMismatchError
class TestHashExtractNumber(unittest.TestCase):
def test_match(self):
self.assertEqual(hash_extract_number('file_####.jpg', 'file_1234.jpg'),
1234)
self.assertEqual(hash_extract_number('file_####.jpg', 'file_0000.jpg'),
0)
def test_mismatch(self):
with self.assertRaises(PatternMismatchError):
hash_extract_number('file_####.jpg', 'file_123.jpg')
class TestPctExtractNumber(unittest.TestCase):
def test_match(self):
self.assertEqual(pct_extract_number('file_%04d.jpg', 'file_1234.jpg'),
1234)
self.assertEqual(pct_extract_number('file_%04d.jpg', 'file_0000.jpg'),
0)
def test_mismatch(self):
with self.assertRaises(PatternMismatchError):
pct_extract_number('file_%04d.jpg', 'file_123.jpg')
|
[
"framefile.hash_extract_number",
"framefile.pct_extract_number"
] |
[((302, 355), 'framefile.hash_extract_number', 'hash_extract_number', (['"""file_####.jpg"""', '"""file_1234.jpg"""'], {}), "('file_####.jpg', 'file_1234.jpg')\n", (321, 355), False, 'from framefile import hash_extract_number, pct_extract_number, PatternMismatchError\n'), ((413, 466), 'framefile.hash_extract_number', 'hash_extract_number', (['"""file_####.jpg"""', '"""file_0000.jpg"""'], {}), "('file_####.jpg', 'file_0000.jpg')\n", (432, 466), False, 'from framefile import hash_extract_number, pct_extract_number, PatternMismatchError\n'), ((592, 644), 'framefile.hash_extract_number', 'hash_extract_number', (['"""file_####.jpg"""', '"""file_123.jpg"""'], {}), "('file_####.jpg', 'file_123.jpg')\n", (611, 644), False, 'from framefile import hash_extract_number, pct_extract_number, PatternMismatchError\n'), ((745, 797), 'framefile.pct_extract_number', 'pct_extract_number', (['"""file_%04d.jpg"""', '"""file_1234.jpg"""'], {}), "('file_%04d.jpg', 'file_1234.jpg')\n", (763, 797), False, 'from framefile import hash_extract_number, pct_extract_number, PatternMismatchError\n'), ((855, 907), 'framefile.pct_extract_number', 'pct_extract_number', (['"""file_%04d.jpg"""', '"""file_0000.jpg"""'], {}), "('file_%04d.jpg', 'file_0000.jpg')\n", (873, 907), False, 'from framefile import hash_extract_number, pct_extract_number, PatternMismatchError\n'), ((1033, 1084), 'framefile.pct_extract_number', 'pct_extract_number', (['"""file_%04d.jpg"""', '"""file_123.jpg"""'], {}), "('file_%04d.jpg', 'file_123.jpg')\n", (1051, 1084), False, 'from framefile import hash_extract_number, pct_extract_number, PatternMismatchError\n')]
|
# <NAME>, Github: falfat
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 22 18:07:11 2019
@author: falol
"""
import matplotlib.pyplot as plt
square_t =[0.67874999999999996, 0.90500000000000003, 1.1312500000000001, 1.3574999999999999,1.5837500000000002, 1.8100000000000001, 2.0362499999999999, 2.2625000000000002, 2.48875, 2.7149999999999999, 2.9412500000000001, 3.1675000000000004, 3.3937499999999998, 3.6200000000000001, 3.8462500000000004, 4.0724999999999998, 4.2987500000000001, 4.5250000000000004, 4.7512499999999998, 4.9775, 5.2037500000000003, 5.4299999999999997]
square_p =[0.0, 0.02, 0.059999999999999998, 0.084000000000000005, 0.17999999999999999, 0.25, 0.34999999999999998, 0.5, 0.60999999999999999, 0.68999999999999995, 0.80000000000000004, 0.88, 0.92000000000000004, 0.97999999999999998, 0.97999999999999998, 0.98999999999999999, 0.98999999999999999, 1.0, 1.0, 1.0, 1.0, 1.0]
# hexagon
hexagon_t = [0.93562499999999993, 1.2475000000000001, 1.5593750000000002, 1.8712499999999999, 2.1831250000000004, 2.4950000000000001, 2.8068749999999998, 3.1187500000000004, 3.430625, 3.7424999999999997, 4.0543750000000003]
hexagon_p = [0.035999999999999997, 0.048000000000000001, 0.091999999999999998, 0.19600000000000001, 0.32800000000000001, 0.53600000000000003, 0.67600000000000005, 0.84799999999999998, 0.872, 0.94399999999999995, 0.97599999999999998]
# triangle
tri_t=[1.2149999999999999, 1.3500000000000001, 1.4850000000000001, 1.6199999999999999, 1.7550000000000001, 1.8900000000000001, 2.0249999999999999, 2.1600000000000001, 2.2950000000000004, 2.4299999999999997, 2.5649999999999999, 2.7000000000000002, 2.835, 2.9700000000000002, 3.105, 3.2399999999999998, 3.5100000000000002, 3.6450000000000005, 3.7800000000000002, 3.9149999999999996, 4.0499999999999998]
tri_p=[0.040000000000000001, 0.059999999999999998, 0.080000000000000002, 0.13600000000000001, 0.16, 0.22800000000000001, 0.26800000000000002, 0.36799999999999999, 0.48399999999999999, 0.55600000000000005, 0.62, 0.748, 0.82399999999999995, 0.876, 0.90000000000000002, 0.92400000000000004, 0.96799999999999997, 0.97999999999999998, 0.99199999999999999, 0.99199999999999999, 1.0]
# octagon
oct_t = [0.69237499999999996, 1.0385624999999998, 1.3847499999999999, 1.7309375, 2.07375, 2.2120000000000002, 2.35025, 2.4884999999999997,2.7650000000000001, 3.1106249999999998, 3.4562500000000003, 3.8018749999999999, 4.1475, 4.493125]
oct_p = [0.0, 0.032000000000000001, 0.064000000000000001, 0.188, 0.32000000000000001, 0.35199999999999998, 0.47199999999999998, 0.57599999999999996 , 0.65200000000000002, 0.78400000000000003, 0.872, 0.95999999999999996, 0.97599999999999998, 0.996]
fig = plt.figure(figsize=(8, 6))
ax1 = fig.add_subplot(111)
#ax1.plot(pentagon_t , pentagon_p, 'ro-', label='pentagon')
ax1.plot(square_t, square_p, 'bo-', label='square')
ax1.plot(hexagon_t, hexagon_p, 'go-', label='hexagon')
ax1.plot(tri_t, tri_p, 'ro-', label='triangle')
ax1.plot(oct_t, oct_p, 'yo-', label='octagon')
plt.xlabel("dimensionless density")
plt.ylabel("percolation probability")
plt.title("percolation threshold plot")
# Add a legend
ax1.legend(loc='best', fontsize=14)
plt.savefig('percolation_threshold')
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((2650, 2676), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (2660, 2676), True, 'import matplotlib.pyplot as plt\n'), ((2968, 3003), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""dimensionless density"""'], {}), "('dimensionless density')\n", (2978, 3003), True, 'import matplotlib.pyplot as plt\n'), ((3004, 3041), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""percolation probability"""'], {}), "('percolation probability')\n", (3014, 3041), True, 'import matplotlib.pyplot as plt\n'), ((3042, 3081), 'matplotlib.pyplot.title', 'plt.title', (['"""percolation threshold plot"""'], {}), "('percolation threshold plot')\n", (3051, 3081), True, 'import matplotlib.pyplot as plt\n'), ((3133, 3169), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""percolation_threshold"""'], {}), "('percolation_threshold')\n", (3144, 3169), True, 'import matplotlib.pyplot as plt\n'), ((3170, 3180), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3178, 3180), True, 'import matplotlib.pyplot as plt\n')]
|
from cursor import device
from cursor import path
from cursor import filter
import json
import random
from alive_progress import alive_bar
def file_to_paths(pc, file, pen):
# pc = path.PathCollection()
counter = 0
contours = len(file)
with alive_bar(contours) as bar:
for d in file:
c = 0
p = path.Path()
pos = path.TimedPosition()
for current_pos in d:
if c % 2 == 0:
pos.x = current_pos
else:
pos.y = current_pos
p.add(pos.x, pos.y, 0)
pos = path.TimedPosition()
c += 1
p.add(d[0], d[1], 0) # add first one to close shape
# print(p.shannon_direction_changes)
# p.pen_select = pen
p.pen_select = random.randint(1, 4)
# print(p.pen_select)
# p.translate(random.randint(0, 400), random.randint(0, 400))
# p.scale(0.1, 0.1)
pc.add(p)
bar()
counter += 1
print(len(pc))
return pc
def split_path_and_three_colors(pc, p):
pass
def make_filled_polygon(pc):
file = open("g22_18_three_colors.hpgl", "w")
file.write("IN;\n")
for pa in pc:
file.write(f"SP{pa.pen_select};\n")
file.write("PM0;\n") # maybe not close it? 😈
file.write(f"PA{int(pa[0].x)},{int(pa[0].y)};\n")
for point in pa:
file.write(f"PD{int(point.x)},{int(point.y)};\n")
file.write("PM2;\n") # maybe not close it? 😈
file.write("FP;\n") # maybe not close it? 😈
file.close()
if __name__ == "__main__":
# recordings = data.DataDirHandler().recordings()
# _loader = loader.Loader(directory=recordings, limit_files=1)
# pc = _loader.all_paths()
categories = ["broccoli"]
pc_all = path.PathCollection()
for cat in categories:
# cat = "all"
fname = f"{cat}.json"
data = json.load(open(fname))
print(f"done loading {fname}")
file_to_paths(pc_all, data, categories.index(cat) + 1)
# sorter = filter.Sorter(param=filter.Sorter.POINT_COUNT, reverse=True)
# pc_all.sort(sorter)
# rs = pc_all[0]
entropy_filter = filter.EntropyMinFilter(1.5, 1.5)
point_filter1 = filter.MinPointCountFilter(100)
point_filter2 = filter.MaxPointCountFilter(30)
# pc_all.filter(point_filter1)
# pc_all.filter(point_filter2)
pc_all.clean()
# pc_all.filter(entropy_filter)
pc = path.PathCollection()
rows = 3
for i in range(rows * rows):
# p = pc_all.random().copy()
p = pc_all.random()
split1 = random.uniform(0, 0.5)
split2 = random.uniform(0.5, 1.0)
end1 = int(len(p) * split1)
end2 = int(len(p) * split2)
p1 = path.Path(p.vertices[:end1])
p2 = path.Path(p.vertices[end1:end2])
p3 = path.Path(p.vertices[end2:])
p1.pen_select = (((i % 3) + 1) % 3) + 1
p2.pen_select = (((i % 3) + 2) % 3) + 1
p3.pen_select = (((i % 3) + 3) % 3) + 1
p1.is_polygon = True
p2.is_polygon = True
p3.is_polygon = True
x = (i % rows) + 1
y = (int(i / rows)) + 1
center = p1.centeroid()
p1.translate(-center[0], -center[1])
pc1 = path.PathCollection()
pc1.add(p1)
pc1.fit((280, 280))
p1 = pc1[0]
p1.translate(300 * x, 300 * y)
center = p2.centeroid()
p2.translate(-center[0], -center[1])
pc2 = path.PathCollection()
pc2.add(p2)
pc2.fit((280, 280))
p2 = pc2[0]
p2.translate(300 * x, 300 * y)
center = p3.centeroid()
p3.translate(-center[0], -center[1])
pc3 = path.PathCollection()
pc3.add(p3)
pc3.fit((280, 280))
p3 = pc3[0]
p3.translate(300 * x, 300 * y)
pc.add(p1)
pc.add(p2)
pc.add(p3)
# rs2.translate(i * 1, i * 1)
# pc.add(rs2)
# pc.scale(10, 10)
# make_filled_polygon(pc)
device.SimpleExportWrapper().ex(
pc,
device.PlotterType.HP_7595A_A3,
device.PaperSize.LANDSCAPE_A3,
25,
"genuary22_18_three_colors",
f"split_path_{pc.hash()}",
)
|
[
"cursor.path.TimedPosition",
"random.randint",
"random.uniform",
"cursor.path.Path",
"cursor.filter.EntropyMinFilter",
"alive_progress.alive_bar",
"cursor.filter.MaxPointCountFilter",
"cursor.device.SimpleExportWrapper",
"cursor.path.PathCollection",
"cursor.filter.MinPointCountFilter"
] |
[((1880, 1901), 'cursor.path.PathCollection', 'path.PathCollection', ([], {}), '()\n', (1899, 1901), False, 'from cursor import path\n'), ((2267, 2300), 'cursor.filter.EntropyMinFilter', 'filter.EntropyMinFilter', (['(1.5)', '(1.5)'], {}), '(1.5, 1.5)\n', (2290, 2300), False, 'from cursor import filter\n'), ((2321, 2352), 'cursor.filter.MinPointCountFilter', 'filter.MinPointCountFilter', (['(100)'], {}), '(100)\n', (2347, 2352), False, 'from cursor import filter\n'), ((2373, 2403), 'cursor.filter.MaxPointCountFilter', 'filter.MaxPointCountFilter', (['(30)'], {}), '(30)\n', (2399, 2403), False, 'from cursor import filter\n'), ((2539, 2560), 'cursor.path.PathCollection', 'path.PathCollection', ([], {}), '()\n', (2558, 2560), False, 'from cursor import path\n'), ((259, 278), 'alive_progress.alive_bar', 'alive_bar', (['contours'], {}), '(contours)\n', (268, 278), False, 'from alive_progress import alive_bar\n'), ((2690, 2712), 'random.uniform', 'random.uniform', (['(0)', '(0.5)'], {}), '(0, 0.5)\n', (2704, 2712), False, 'import random\n'), ((2730, 2754), 'random.uniform', 'random.uniform', (['(0.5)', '(1.0)'], {}), '(0.5, 1.0)\n', (2744, 2754), False, 'import random\n'), ((2842, 2870), 'cursor.path.Path', 'path.Path', (['p.vertices[:end1]'], {}), '(p.vertices[:end1])\n', (2851, 2870), False, 'from cursor import path\n'), ((2884, 2916), 'cursor.path.Path', 'path.Path', (['p.vertices[end1:end2]'], {}), '(p.vertices[end1:end2])\n', (2893, 2916), False, 'from cursor import path\n'), ((2930, 2958), 'cursor.path.Path', 'path.Path', (['p.vertices[end2:]'], {}), '(p.vertices[end2:])\n', (2939, 2958), False, 'from cursor import path\n'), ((3343, 3364), 'cursor.path.PathCollection', 'path.PathCollection', ([], {}), '()\n', (3362, 3364), False, 'from cursor import path\n'), ((3564, 3585), 'cursor.path.PathCollection', 'path.PathCollection', ([], {}), '()\n', (3583, 3585), False, 'from cursor import path\n'), ((3785, 3806), 'cursor.path.PathCollection', 'path.PathCollection', ([], {}), '()\n', (3804, 3806), False, 'from cursor import path\n'), ((344, 355), 'cursor.path.Path', 'path.Path', ([], {}), '()\n', (353, 355), False, 'from cursor import path\n'), ((374, 394), 'cursor.path.TimedPosition', 'path.TimedPosition', ([], {}), '()\n', (392, 394), False, 'from cursor import path\n'), ((851, 871), 'random.randint', 'random.randint', (['(1)', '(4)'], {}), '(1, 4)\n', (865, 871), False, 'import random\n'), ((4092, 4120), 'cursor.device.SimpleExportWrapper', 'device.SimpleExportWrapper', ([], {}), '()\n', (4118, 4120), False, 'from cursor import device\n'), ((632, 652), 'cursor.path.TimedPosition', 'path.TimedPosition', ([], {}), '()\n', (650, 652), False, 'from cursor import path\n')]
|
import requests
from bs4 import BeautifulSoup
import pandas
import matplotlib.pyplot as plt
addresses = []
prices = []
numBeds = []
for i in range(0, 5):
try:
url = 'https://www.century21.com/real-estate/carmel-in/LCINCARMEL/?s={}'.format(i*20)
page = requests.get(url)
soup = BeautifulSoup(page.text, "html.parser")
houses = soup.find_all('div', class_='property-card-primary-info')
for house in houses:
price = house.find('a', class_="listing-price").text
price = price.replace('\n', '')
price = price.strip()
price = price.replace('$', '')
price = price.replace(',', '')
bed = house.find('div', class_="property-beds")
if (bed == None):
bed = 'N/A'
else:
bed = bed.text
bed = bed.replace('\n', '')
bed = bed.strip()
address = house.find('div', class_="property-address").text
address = address.replace('\n', '')
address = address.strip()
#print(address, price, bed)
addresses.append(address)
prices.append(int(price))
numBeds.append(bed)
except:
print("error")
housesdf = pandas.DataFrame(
{
'Address': addresses,
'Price': prices,
'Beds': numBeds
})
#print(housesdf)
housesdf.to_csv('carmelHouses.csv')
housesdf.hist(column="Price")
housesdf.plot.bar() #As to change the graph units/stuff like that, you should be look up the documentation
plt.show()
|
[
"pandas.DataFrame",
"matplotlib.pyplot.show",
"requests.get",
"bs4.BeautifulSoup"
] |
[((1318, 1392), 'pandas.DataFrame', 'pandas.DataFrame', (["{'Address': addresses, 'Price': prices, 'Beds': numBeds}"], {}), "({'Address': addresses, 'Price': prices, 'Beds': numBeds})\n", (1334, 1392), False, 'import pandas\n'), ((1630, 1640), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1638, 1640), True, 'import matplotlib.pyplot as plt\n'), ((286, 303), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (298, 303), False, 'import requests\n'), ((320, 359), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page.text', '"""html.parser"""'], {}), "(page.text, 'html.parser')\n", (333, 359), False, 'from bs4 import BeautifulSoup\n')]
|
from argparse import ArgumentParser
import pytorch_lightning as pl
from torchvision import transforms
from repalette.constants import DEFAULT_IMAGE_SIZE, DEFAULT_PRETRAIN_BATCH_SIZE
from repalette.datasets import GANDataset, PreTrainDataset
from repalette.datasets.utils import ShuffleDataLoader
class PreTrainDataModule(pl.LightningDataModule):
def __init__(
self,
batch_size=DEFAULT_PRETRAIN_BATCH_SIZE,
multiplier=16,
shuffle=True,
num_workers=15,
transform=None,
image_size=DEFAULT_IMAGE_SIZE,
size=1,
pin_memory=True,
train_batch_from_same_image=False,
val_batch_from_same_image=False,
test_batch_from_same_image=False,
**kwargs,
):
super().__init__()
self.batch_size = batch_size
self.multiplier = multiplier
self.num_workers = num_workers
self.size = size
self.pin_memory = pin_memory
self.shuffle = shuffle
self.train_batch_from_same_image = train_batch_from_same_image
self.val_batch_from_same_image = val_batch_from_same_image
self.test_batch_from_same_image = test_batch_from_same_image
self.train = None
self.val = None
self.test = None
# transform
if transform is None:
transform = transforms.Compose(
[
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.Resize(image_size),
]
)
self.transform = transform
def setup(self, stage=None):
data = PreTrainDataset(
multiplier=self.multiplier,
shuffle=self.shuffle,
transform=self.transform,
)
data, _ = data.split(
test_size=(1 - self.size),
shuffle=True,
)
train, val = data.split(test_size=0.2, shuffle=True)
val, test = val.split(test_size=0.5, shuffle=True)
self.train = train
self.val = val
self.test = test
def train_dataloader(self):
train_dataloader = ShuffleDataLoader(
self.train,
shuffle=not self.train_batch_from_same_image,
num_workers=self.num_workers,
batch_size=self.batch_size,
pin_memory=self.pin_memory,
)
# train dataloader should be shuffled!
train_dataloader.shuffle(True) # this will make no difference if self.train_batch_from_same_image == True
return train_dataloader
def val_dataloader(self):
val_dataloader = ShuffleDataLoader(
self.val,
shuffle=not self.val_batch_from_same_image,
num_workers=self.num_workers,
batch_size=self.batch_size,
pin_memory=self.pin_memory,
)
return val_dataloader
def test_dataloader(self):
test_dataloader = ShuffleDataLoader(
self.test,
shuffle=not self.test_batch_from_same_image,
num_workers=self.num_workers,
batch_size=self.batch_size,
pin_memory=self.pin_memory,
)
return test_dataloader
@staticmethod
def add_argparse_args(parent_parser: ArgumentParser) -> ArgumentParser:
hparams_parser = ArgumentParser(parents=[parent_parser], add_help=False)
hparams_parser.add_argument("--batch-size", type=int, default=8)
hparams_parser.add_argument("--multiplier", type=int, default=16)
hparams_parser.add_argument("--num-workers", type=int, default=7)
hparams_parser.add_argument("--shuffle", type=bool, default=True)
hparams_parser.add_argument("--size", type=float, default=1.0)
hparams_parser.add_argument("--pin-memory", type=bool, default=True)
hparams_parser.add_argument("--train-batch-from-same-image", type=bool, default=False)
hparams_parser.add_argument("--val-batch-from-same-image", type=bool, default=True)
hparams_parser.add_argument("--test-batch-from-same-image", type=bool, default=True)
return hparams_parser
# don't uncomment!!!
# def transfer_batch_to_device(self, batch: Any, device: torch.device) -> Any:
# # maybe we want this later
#
# def prepare_data(self, *args, **kwargs):
# # maybe we want this later
class GANDataModule(PreTrainDataModule):
def setup(self, stage=None):
data = GANDataset(
multiplier=self.multiplier,
shuffle=self.shuffle,
transform=self.transform,
)
data, _ = data.split(
test_size=(1 - self.size),
shuffle=True,
)
train, val = data.split(test_size=0.2, shuffle=True)
val, test = val.split(test_size=0.5, shuffle=True)
self.train = train
self.val = val
self.test = test
|
[
"argparse.ArgumentParser",
"torchvision.transforms.RandomHorizontalFlip",
"repalette.datasets.utils.ShuffleDataLoader",
"torchvision.transforms.RandomVerticalFlip",
"repalette.datasets.PreTrainDataset",
"repalette.datasets.GANDataset",
"torchvision.transforms.Resize"
] |
[((1658, 1754), 'repalette.datasets.PreTrainDataset', 'PreTrainDataset', ([], {'multiplier': 'self.multiplier', 'shuffle': 'self.shuffle', 'transform': 'self.transform'}), '(multiplier=self.multiplier, shuffle=self.shuffle, transform\n =self.transform)\n', (1673, 1754), False, 'from repalette.datasets import GANDataset, PreTrainDataset\n'), ((2158, 2328), 'repalette.datasets.utils.ShuffleDataLoader', 'ShuffleDataLoader', (['self.train'], {'shuffle': '(not self.train_batch_from_same_image)', 'num_workers': 'self.num_workers', 'batch_size': 'self.batch_size', 'pin_memory': 'self.pin_memory'}), '(self.train, shuffle=not self.train_batch_from_same_image,\n num_workers=self.num_workers, batch_size=self.batch_size, pin_memory=\n self.pin_memory)\n', (2175, 2328), False, 'from repalette.datasets.utils import ShuffleDataLoader\n'), ((2641, 2807), 'repalette.datasets.utils.ShuffleDataLoader', 'ShuffleDataLoader', (['self.val'], {'shuffle': '(not self.val_batch_from_same_image)', 'num_workers': 'self.num_workers', 'batch_size': 'self.batch_size', 'pin_memory': 'self.pin_memory'}), '(self.val, shuffle=not self.val_batch_from_same_image,\n num_workers=self.num_workers, batch_size=self.batch_size, pin_memory=\n self.pin_memory)\n', (2658, 2807), False, 'from repalette.datasets.utils import ShuffleDataLoader\n'), ((2958, 3126), 'repalette.datasets.utils.ShuffleDataLoader', 'ShuffleDataLoader', (['self.test'], {'shuffle': '(not self.test_batch_from_same_image)', 'num_workers': 'self.num_workers', 'batch_size': 'self.batch_size', 'pin_memory': 'self.pin_memory'}), '(self.test, shuffle=not self.test_batch_from_same_image,\n num_workers=self.num_workers, batch_size=self.batch_size, pin_memory=\n self.pin_memory)\n', (2975, 3126), False, 'from repalette.datasets.utils import ShuffleDataLoader\n'), ((3340, 3395), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'parents': '[parent_parser]', 'add_help': '(False)'}), '(parents=[parent_parser], add_help=False)\n', (3354, 3395), False, 'from argparse import ArgumentParser\n'), ((4478, 4569), 'repalette.datasets.GANDataset', 'GANDataset', ([], {'multiplier': 'self.multiplier', 'shuffle': 'self.shuffle', 'transform': 'self.transform'}), '(multiplier=self.multiplier, shuffle=self.shuffle, transform=self\n .transform)\n', (4488, 4569), False, 'from repalette.datasets import GANDataset, PreTrainDataset\n'), ((1403, 1436), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (1434, 1436), False, 'from torchvision import transforms\n'), ((1458, 1489), 'torchvision.transforms.RandomVerticalFlip', 'transforms.RandomVerticalFlip', ([], {}), '()\n', (1487, 1489), False, 'from torchvision import transforms\n'), ((1511, 1540), 'torchvision.transforms.Resize', 'transforms.Resize', (['image_size'], {}), '(image_size)\n', (1528, 1540), False, 'from torchvision import transforms\n')]
|