code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
import argparse
import os
import pandas as pd
from oaprogression.metadata import most
from oaprogression.metadata import oai
from oaprogression.metadata.utils import data_stats
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--oai_meta',
default='/media/lext/FAST/OA_progression_project/Data/X-Ray_Image_Assessments_SAS')
parser.add_argument('--most_meta', default='/media/lext/FAST/OA_progression_project/Data/most_meta')
parser.add_argument('--imgs_dir', default='/media/lext/FAST/OA_progression_project/Data/MOST_OAI_00_0_2')
parser.add_argument('--save_meta', default='/media/lext/FAST/OA_progression_project/workdir/Metadata/')
args = parser.parse_args()
os.makedirs(args.save_meta, exist_ok=True)
if not os.path.isfile(os.path.join(args.save_meta, 'OAI_progression.csv')):
oai_meta = oai.build_img_progression_meta(args.oai_meta)
oai_meta.to_csv(os.path.join(args.save_meta, 'OAI_progression.csv'), index=None)
else:
oai_meta = pd.read_csv(os.path.join(args.save_meta, 'OAI_progression.csv'))
print('OAI progression metadata exists!')
if not os.path.isfile(os.path.join(args.save_meta, 'OAI_participants.csv')):
oai_participants = oai.build_clinical(args.oai_meta)
oai_participants.to_csv(os.path.join(args.save_meta, 'OAI_participants.csv'), index=None)
else:
oai_participants = pd.read_csv(os.path.join(args.save_meta, 'OAI_participants.csv'))
print('OAI participants metadata exists!')
if not os.path.isfile(os.path.join(args.save_meta, 'MOST_progression.csv')):
most_meta = most.build_img_progression_meta(args.most_meta, args.imgs_dir)
most_meta.to_csv(os.path.join(args.save_meta, 'MOST_progression.csv'), index=None)
else:
most_meta = pd.read_csv(os.path.join(args.save_meta, 'MOST_progression.csv'))
print('MOST progression metadata exists!')
if not os.path.isfile(os.path.join(args.save_meta, 'MOST_participants.csv')):
most_participants = most.build_clinical(args.most_meta)
most_participants.to_csv(os.path.join(args.save_meta, 'MOST_participants.csv'), index=None)
else:
most_participants = pd.read_csv(os.path.join(args.save_meta, 'MOST_participants.csv'))
print('MOST participants metadata exists!')
print(" ")
print("# ======== OAI ======== ")
data_stats(oai_meta, oai_participants)
print(" ")
print("# ======== MOST ======== ")
data_stats(most_meta, most_participants)
|
[
"oaprogression.metadata.most.build_clinical",
"oaprogression.metadata.oai.build_img_progression_meta",
"oaprogression.metadata.oai.build_clinical",
"os.makedirs",
"argparse.ArgumentParser",
"oaprogression.metadata.most.build_img_progression_meta",
"oaprogression.metadata.utils.data_stats",
"os.path.join"
] |
[((220, 245), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (243, 245), False, 'import argparse\n'), ((751, 793), 'os.makedirs', 'os.makedirs', (['args.save_meta'], {'exist_ok': '(True)'}), '(args.save_meta, exist_ok=True)\n', (762, 793), False, 'import os\n'), ((2432, 2470), 'oaprogression.metadata.utils.data_stats', 'data_stats', (['oai_meta', 'oai_participants'], {}), '(oai_meta, oai_participants)\n', (2442, 2470), False, 'from oaprogression.metadata.utils import data_stats\n'), ((2529, 2569), 'oaprogression.metadata.utils.data_stats', 'data_stats', (['most_meta', 'most_participants'], {}), '(most_meta, most_participants)\n', (2539, 2569), False, 'from oaprogression.metadata.utils import data_stats\n'), ((893, 938), 'oaprogression.metadata.oai.build_img_progression_meta', 'oai.build_img_progression_meta', (['args.oai_meta'], {}), '(args.oai_meta)\n', (923, 938), False, 'from oaprogression.metadata import oai\n'), ((1281, 1314), 'oaprogression.metadata.oai.build_clinical', 'oai.build_clinical', (['args.oai_meta'], {}), '(args.oai_meta)\n', (1299, 1314), False, 'from oaprogression.metadata import oai\n'), ((1669, 1731), 'oaprogression.metadata.most.build_img_progression_meta', 'most.build_img_progression_meta', (['args.most_meta', 'args.imgs_dir'], {}), '(args.most_meta, args.imgs_dir)\n', (1700, 1731), False, 'from oaprogression.metadata import most\n'), ((2081, 2116), 'oaprogression.metadata.most.build_clinical', 'most.build_clinical', (['args.most_meta'], {}), '(args.most_meta)\n', (2100, 2116), False, 'from oaprogression.metadata import most\n'), ((820, 871), 'os.path.join', 'os.path.join', (['args.save_meta', '"""OAI_progression.csv"""'], {}), "(args.save_meta, 'OAI_progression.csv')\n", (832, 871), False, 'import os\n'), ((963, 1014), 'os.path.join', 'os.path.join', (['args.save_meta', '"""OAI_progression.csv"""'], {}), "(args.save_meta, 'OAI_progression.csv')\n", (975, 1014), False, 'import os\n'), ((1069, 1120), 'os.path.join', 'os.path.join', (['args.save_meta', '"""OAI_progression.csv"""'], {}), "(args.save_meta, 'OAI_progression.csv')\n", (1081, 1120), False, 'import os\n'), ((1199, 1251), 'os.path.join', 'os.path.join', (['args.save_meta', '"""OAI_participants.csv"""'], {}), "(args.save_meta, 'OAI_participants.csv')\n", (1211, 1251), False, 'import os\n'), ((1347, 1399), 'os.path.join', 'os.path.join', (['args.save_meta', '"""OAI_participants.csv"""'], {}), "(args.save_meta, 'OAI_participants.csv')\n", (1359, 1399), False, 'import os\n'), ((1462, 1514), 'os.path.join', 'os.path.join', (['args.save_meta', '"""OAI_participants.csv"""'], {}), "(args.save_meta, 'OAI_participants.csv')\n", (1474, 1514), False, 'import os\n'), ((1594, 1646), 'os.path.join', 'os.path.join', (['args.save_meta', '"""MOST_progression.csv"""'], {}), "(args.save_meta, 'MOST_progression.csv')\n", (1606, 1646), False, 'import os\n'), ((1757, 1809), 'os.path.join', 'os.path.join', (['args.save_meta', '"""MOST_progression.csv"""'], {}), "(args.save_meta, 'MOST_progression.csv')\n", (1769, 1809), False, 'import os\n'), ((1865, 1917), 'os.path.join', 'os.path.join', (['args.save_meta', '"""MOST_progression.csv"""'], {}), "(args.save_meta, 'MOST_progression.csv')\n", (1877, 1917), False, 'import os\n'), ((1997, 2050), 'os.path.join', 'os.path.join', (['args.save_meta', '"""MOST_participants.csv"""'], {}), "(args.save_meta, 'MOST_participants.csv')\n", (2009, 2050), False, 'import os\n'), ((2150, 2203), 'os.path.join', 'os.path.join', (['args.save_meta', '"""MOST_participants.csv"""'], {}), "(args.save_meta, 'MOST_participants.csv')\n", (2162, 2203), False, 'import os\n'), ((2267, 2320), 'os.path.join', 'os.path.join', (['args.save_meta', '"""MOST_participants.csv"""'], {}), "(args.save_meta, 'MOST_participants.csv')\n", (2279, 2320), False, 'import os\n')]
|
# -*- coding: Utf-8 -*
import os
import sys
import configparser
import csv
import json
import pickle
import webbrowser
import shutil
import tkinter as tk
import requests
import packaging.version
from tkinter.messagebox import showerror, showinfo, askquestion
from cryptography.fernet import Fernet, InvalidToken
from .prestashop import PrestaShopAPI, PrestaShopAPIFilter
from .window import Window
from .log import Log
from .settings import Settings
from .download_latest_update import DownloadLatestUpdate
from .functions import thread_function
from .version import __version__
maxInt = sys.maxsize
while True:
# decrease the maxInt value by factor 10
# as long as the OverflowError occurs.
try:
csv.field_size_limit(maxInt)
break
except OverflowError:
maxInt = int(maxInt/10)
API_KEY_SAVE_FILE = os.path.join(sys.path[0], "api.key")
SETTINGS_FILE = os.path.join(sys.path[0], "settings.ini")
BACKUP_FOLDER = os.path.join(sys.path[0], "backup")
ICON_FILE = os.path.join(sys.path[0], "icon.ico")
GUIDE_LINK = "https://github.com/francis-clairicia/GLS_WinEXP_check/blob/master/README.md"
LICENSE_LINK = "https://github.com/francis-clairicia/GLS_WinEXP_check/blob/master/LICENSE"
ISSUES_LINK = "https://github.com/francis-clairicia/GLS_WinEXP_check/issues"
RELEASE_LINK = f"https://github.com/francis-clairicia/GLS_WinEXP_check/releases/tag/v{__version__}"
class GLSWinEXPCheck(Window):
def __init__(self):
Window.__init__(self, title=f"Prestashop customer check for GLS Winexpé v{__version__}", width=900, height=600)
if os.path.isfile(ICON_FILE):
self.iconbitmap(ICON_FILE)
self.menu_bar.add_section("Fichier")
self.menu_bar.add_section_command("Fichier", "Quitter", self.stop, accelerator="Ctrl+Q")
self.menu_bar.add_section("Configurer")
self.menu_bar.add_section_command("Configurer", "Général", lambda page=Settings.GENERAL: self.change_settings(page))
self.menu_bar.add_section_command("Configurer", "Commandes", lambda page=Settings.ORDERS: self.change_settings(page))
self.menu_bar.add_section("Aide")
self.menu_bar.add_section_command("Aide", "Guide d'utilisation", lambda link=GUIDE_LINK: webbrowser.open(link, new=2))
self.menu_bar.add_section_command("Aide", "Voir la licence", lambda link=LICENSE_LINK: webbrowser.open(link, new=2))
self.menu_bar.add_section_command("Aide", "Signaler un problème", lambda link=ISSUES_LINK: webbrowser.open(link, new=2))
self.menu_bar.add_section_separator("Aide")
self.menu_bar.add_section_command("Aide", "Note de mise à jour", lambda link=RELEASE_LINK: webbrowser.open(link, new=2))
self.menu_bar.add_section_command("Aide", "Mise à jour", self.launch_application_update)
self.update_app = False
self.auto_check_update = False
self.prestashop = PrestaShopAPI(id_group_shop=1)
self.gls_folder = None
self.api_URL = tk.StringVar()
self.api_key = tk.StringVar()
self.gls_folder_label = tk.StringVar()
self.order_state_list = list()
self.order_select_mode_label = tk.StringVar()
self.order_select_mode = "nb_last_orders"
self.all_order_select_modes = {
"nb_last_orders": "Récupérer les {X} dernières commandes",
"last_gotten_order_id": "Récupérer toutes les commandes effectués depuis la commande {ID}"
}
self.nb_last_orders = 20
self.last_gotten_order_id = 0
self.central_frame = tk.Frame(self)
self.central_frame.grid(row=0, column=0)
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure(0, weight=1)
text_font = ("", 12)
tk.Label(self.central_frame, text="API URL:", font=text_font).grid(row=0, column=0, padx=10, pady=10, sticky=tk.W)
tk.Entry(self.central_frame, textvariable=self.api_URL, font=text_font, width=40, state="readonly").grid(row=0, column=1, padx=10, pady=10, sticky=tk.W)
tk.Label(self.central_frame, text="Dossier d'installation GLS WinEXPé:", font=text_font).grid(row=1, column=0, padx=10, pady=10, sticky=tk.W)
tk.Label(self.central_frame, textvariable=self.gls_folder_label, font=text_font).grid(row=1, column=1, padx=10, pady=10, sticky=tk.W)
tk.Label(self.central_frame, text="Mode de sélection des commandes:", font=text_font).grid(row=2, column=0, padx=10, pady=10, sticky=tk.W)
tk.Message(self.central_frame, textvariable=self.order_select_mode_label, font=text_font, aspect=900).grid(row=2, column=1, padx=10, pady=10, sticky=tk.W)
self.update_customers_button = tk.Button(self.central_frame, text="Mettre à jour", font=text_font, command=self.update_customers)
self.update_customers_button.grid(row=3, columnspan=3, padx=10, pady=10)
self.log = Log(self.central_frame, relief=tk.RIDGE, bd=4)
self.log.grid(row=4, columnspan=3, padx=10, pady=10, sticky=tk.NSEW)
self.central_frame.grid_rowconfigure(4, weight=1)
self.load_settings()
self.open_api_key_file()
self.update_stringvars()
self.settings_toplevel = dict()
if self.auto_check_update:
self.after(50, lambda: self.launch_application_update(at_start=True))
self.all_country_codes = dict()
self.csv_columns_formatter = {
"Identifiant": lambda param: param["customer"]["id"],
"Nom": lambda param: "{firstname} {lastname}".format(**param["address"]),
"Nom Contact": lambda param: "{firstname} {lastname}".format(**param["address"]),
"Code Produit": None,
"COMPTE GLS": None,
"Chargeur 2": None,
"Adresse 1": lambda param: param["address"]["address1"],
"Adresse 2": lambda param: param["address"]["address2"],
"Adresse 3": None,
"Code Postal": lambda param: param["address"]["postcode"],
"Ville": lambda param: param["address"]["city"],
"Code Pays": lambda param, self=self: self.all_country_codes.get(int(param["address"]["id_country"])),
"TEL": lambda param: param["address"]["phone"],
"Mobile": lambda param: param["address"]["phone_mobile"],
"Note": lambda param: param["customer"]["note"],
"Mail": lambda param: param["customer"]["email"],
"Code NUIT": None
}
def stop(self):
self.prestashop.close()
self.quit()
@thread_function
def launch_application_update(self, at_start=False):
release = self.get_latest_update()
if release is None:
return
tag = str(release["tag_name"])
version = tag[tag.find("v") + 1:]
if release is None or packaging.version.parse(__version__) >= packaging.version.parse(version):
if not at_start:
showinfo("Mise à jour", "Vous êtes sous la dernière version connue")
return
if askquestion("Nouvelle mise à jour", f"Voulez-vous installer la nouvelle version {version} ?") == "no":
return
toplevel = DownloadLatestUpdate(self, release["assets"])
self.wait_window(toplevel)
try:
if not toplevel.error_download:
gls_model = os.path.join(sys.path[0], "Prestashop.ini")
if self.gls_folder and os.path.isdir(os.path.join(self.gls_folder, "DAT", "ConsDscr")):
if os.path.isfile(os.path.join(self.gls_folder, "DAT", "ConsDscr", os.path.basename(gls_model))):
os.remove(os.path.join(self.gls_folder, "DAT", "ConsDscr", os.path.basename(gls_model)))
shutil.move(gls_model, os.path.join(self.gls_folder, "DAT", "ConsDscr"))
else:
os.remove(gls_model)
self.update_app = True
self.stop()
except Exception as e:
showerror(e.__class__.__name__, str(e))
def get_latest_update(self) -> dict:
if self.check_github_api_rate_limit() is False:
return {"tag_name": "v0.0.0"}
url = "https://api.github.com/repos/francis-clairicia/GLS_WinEXP_check/releases/latest"
headers = {
"Accept": "application/vnd.github.v3+json"
}
try:
response = requests.get(url, headers=headers)
response.raise_for_status()
except Exception as e:
showerror(e.__class__.__name__, str(e))
return None
return response.json()
def check_github_api_rate_limit(self) -> bool:
url = "https://api.github.com/rate_limit"
headers = {
"Accept": "application/vnd.github.v3+json"
}
try:
response = requests.get(url, headers=headers)
response.raise_for_status()
except Exception:
return False
return bool(response.json()["resources"]["core"]["remaining"] > 0)
def update_stringvars(self):
self.api_URL.set(self.prestashop.url or "No API URL")
self.api_key.set(self.prestashop.key or str())
self.gls_folder_label.set(self.gls_folder or "No Folder")
self.order_select_mode_label.set(self.all_order_select_modes[self.order_select_mode].format(X=self.nb_last_orders, ID=self.last_gotten_order_id))
def open_api_key_file(self):
if not os.path.isfile(API_KEY_SAVE_FILE):
return
try:
with open(API_KEY_SAVE_FILE, "rb") as file:
data = pickle.load(file)
except (IOError, pickle.UnpicklingError):
return
if not isinstance(data, tuple) or len(data) != 2:
return
fernet = Fernet(data[0])
try:
api_key = fernet.decrypt(data[1]).decode()
except InvalidToken:
return
self.prestashop.key = api_key
def save_api_key(self):
if self.prestashop.key:
key = Fernet.generate_key()
fernet = Fernet(key)
encrypted_api_key = fernet.encrypt(self.prestashop.key.encode())
with open(API_KEY_SAVE_FILE, "wb") as file:
pickle.dump((key, encrypted_api_key), file)
elif os.path.isfile(API_KEY_SAVE_FILE):
os.remove(API_KEY_SAVE_FILE)
def load_settings(self):
config = configparser.ConfigParser()
config.read(SETTINGS_FILE)
self.prestashop.url = config.get("API", "url", fallback=None)
try:
self.order_state_list = json.loads(config.get("ORDERS", "order_states", fallback="[]"))
except json.JSONDecodeError:
pass
order_select_mode = config.get("ORDERS", "order_select_mode", fallback=self.order_select_mode)
if self.order_select_mode in self.all_order_select_modes:
self.order_select_mode = order_select_mode
try:
self.nb_last_orders = config.getint("ORDERS", "nb_last_orders_to_get", fallback=self.nb_last_orders)
except:
pass
try:
self.last_gotten_order_id = config.getint("ORDERS", "last_gotten_order_id", fallback=self.last_gotten_order_id)
except:
pass
self.gls_folder = config.get("GLS WINEXPE", "location", fallback=None)
try:
self.auto_check_update = config.getboolean("UPDATER", "auto_check_update", fallback=self.auto_check_update)
except:
pass
def save_settings(self):
settings = {
"API": {
"url": self.prestashop.url
},
"GLS WINEXPE": {
"location": self.gls_folder if self.gls_folder else str(),
},
"ORDERS": {
"order_select_mode": self.order_select_mode,
"order_states": json.dumps(self.order_state_list),
"nb_last_orders_to_get": self.nb_last_orders,
"last_gotten_order_id": self.last_gotten_order_id
},
"UPDATER": {
"auto_check_update": "yes" if self.auto_check_update else "no"
}
}
config = configparser.ConfigParser()
config.read_dict(settings)
with open(SETTINGS_FILE, "w") as file:
config.write(file, space_around_delimiters=False)
def change_settings(self, page: int):
if page not in self.settings_toplevel:
try:
self.settings_toplevel[page] = Settings(self, page)
except Exception as e:
showerror(e.__class__.__name__, str(e))
else:
self.settings_toplevel[page].focus_set()
@thread_function
def update_customers(self):
self.log.clear()
self.update_customers_button.configure(state="disabled")
self.protocol("WM_DELETE_WINDOW", lambda: None)
try:
prestashop = self.prestashop
self.log.print("Checking GLS Folder...")
if not os.path.isdir(self.gls_folder):
raise FileNotFoundError(f"Can't find '{self.gls_folder_label.get()}' folder")
output_folder = os.path.join(self.gls_folder.replace("/", "\\"), "DAT", "CsIMP")
if not os.path.isdir(output_folder):
raise FileNotFoundError(f"Can't find '{output_folder}' folder")
self.log.print("Reading backup of previous update...")
csv_filename = "Client_Prestashop.csv"
csv_file = os.path.join(BACKUP_FOLDER, csv_filename)
csv_customers = dict()
if not os.path.isdir(BACKUP_FOLDER):
os.mkdir(BACKUP_FOLDER)
if not os.path.isfile(csv_file):
self.log.print("No Backup")
else:
lines_with_errors = 0
with open(csv_file, "r", newline="") as file:
reader = csv.DictReader(file, delimiter=";", quoting=csv.QUOTE_NONE)
for i, row in enumerate(reader):
if i == 0:
continue
try:
csv_customers[int(row["Identifiant"])] = {
key: value.strip() if isinstance(value, str) else None
for key, value in row.items() if key in self.csv_columns_formatter
}
except (KeyError, ValueError):
lines_with_errors += 1
self.log.print(f"{reader.line_num - 2} lines read, removing the duplicates")
self.log.print(f"{len(csv_customers)} lines saved ({lines_with_errors} lines not valid)")
if self.order_select_mode == "nb_last_orders":
self.log.print(f"Getting the last {self.nb_last_orders} orders...")
orders = prestashop.get_all(
resource="orders",
display=["id", "id_customer", "id_address_delivery"],
filters={"current_state": PrestaShopAPIFilter.field_in_list(self.order_state_list)},
sort={"id": "DESC"},
limit=self.nb_last_orders
)
orders.sort(key=lambda order: order["id"])
elif self.order_select_mode == "last_gotten_order_id":
self.log.print(f"Getting orders with ID greater than {self.last_gotten_order_id}")
orders = prestashop.get_all(
resource="orders",
display=["id", "id_customer", "id_address_delivery"],
filters={
"id": PrestaShopAPIFilter.field_greater_than(self.last_gotten_order_id),
"current_state": PrestaShopAPIFilter.field_in_list(self.order_state_list)
},
sort={"id": "ASC"}
)
else:
orders = list()
self.log.print(f"{len(orders)} orders selected")
if orders:
self.log.print("Getting the delivery addresses list according to the order list...")
addresses = prestashop.get_all(
resource="addresses",
display=["id", "firstname", "lastname", "address1", "address2", "postcode", "city", "id_country", "phone", "phone_mobile"],
filters={"id": PrestaShopAPIFilter.field_in_list(orders, key=lambda order: order["id_address_delivery"])}
)
self.log.print(f"{len(addresses)} addresses gotten")
self.log.print("Getting the customers infos...")
customers = prestashop.get_all(
resource="customers",
display=["id", "note", "email"],
filters={"id": PrestaShopAPIFilter.field_in_list(orders, key=lambda order: order["id_customer"])}
)
self.log.print("Getting all country codes...")
self.all_country_codes = {
country["id"]: country["iso_code"] for country in prestashop.get_all("countries", display=["id", "iso_code"])
}
self.log.print("Linking addresses and customers infos...")
customer_address_list = [
{
"customer": list(filter(lambda customer: int(customer["id"]) == int(order["id_customer"]), customers))[0],
"address": list(filter(lambda address: int(address["id"]) == int(order["id_address_delivery"]), addresses))[0]
} for order in orders
]
self.log.print("Updating customers...")
for customer_address in customer_address_list:
customer_id = customer_address["customer"]["id"]
if customer_id not in csv_customers:
csv_customers[customer_id] = dict.fromkeys(self.csv_columns_formatter.keys())
row = csv_customers[customer_id]
for column in row:
updater = self.csv_columns_formatter[column]
if callable(updater):
row[column] = str(updater(customer_address)).strip()
output = os.path.join(output_folder, csv_filename)
self.log.print(f"Save customers in '{output}'")
for filepath in (output, csv_file):
with open(filepath, "w", newline="") as file:
writer = csv.DictWriter(file, fieldnames=list(self.csv_columns_formatter.keys()), delimiter=";")
writer.writeheader()
writer.writerows(csv_customers.values())
self.last_gotten_order_id = max(orders, key=lambda order: order["id"])["id"]
self.log.print("Update successful")
except Exception as e:
error_name = e.__class__.__name__
error_message = str(e)
self.log.print(f"{error_name}: {error_message}")
showerror(error_name, error_message)
else:
self.save_settings()
self.update_stringvars()
if not orders:
showinfo("Mise à jour terminée", "Rien à mettre à jour :)")
else:
showinfo("Mise à jour réussie", "La mise à jour des clients a été effectuée")
finally:
self.update_customers_button.configure(state="normal")
self.protocol("WM_DELETE_WINDOW", self.stop)
|
[
"tkinter.StringVar",
"os.mkdir",
"pickle.dump",
"os.remove",
"csv.field_size_limit",
"json.dumps",
"os.path.isfile",
"pickle.load",
"tkinter.Frame",
"tkinter.Label",
"os.path.join",
"tkinter.Message",
"tkinter.Button",
"tkinter.Entry",
"requests.get",
"configparser.ConfigParser",
"cryptography.fernet.Fernet",
"os.path.basename",
"csv.DictReader",
"tkinter.messagebox.showinfo",
"tkinter.messagebox.showerror",
"webbrowser.open",
"tkinter.messagebox.askquestion",
"os.path.isdir",
"cryptography.fernet.Fernet.generate_key"
] |
[((843, 879), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""api.key"""'], {}), "(sys.path[0], 'api.key')\n", (855, 879), False, 'import os\n'), ((896, 937), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""settings.ini"""'], {}), "(sys.path[0], 'settings.ini')\n", (908, 937), False, 'import os\n'), ((954, 989), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""backup"""'], {}), "(sys.path[0], 'backup')\n", (966, 989), False, 'import os\n'), ((1002, 1039), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""icon.ico"""'], {}), "(sys.path[0], 'icon.ico')\n", (1014, 1039), False, 'import os\n'), ((721, 749), 'csv.field_size_limit', 'csv.field_size_limit', (['maxInt'], {}), '(maxInt)\n', (741, 749), False, 'import csv\n'), ((1587, 1612), 'os.path.isfile', 'os.path.isfile', (['ICON_FILE'], {}), '(ICON_FILE)\n', (1601, 1612), False, 'import os\n'), ((2980, 2994), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (2992, 2994), True, 'import tkinter as tk\n'), ((3018, 3032), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (3030, 3032), True, 'import tkinter as tk\n'), ((3065, 3079), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (3077, 3079), True, 'import tkinter as tk\n'), ((3158, 3172), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (3170, 3172), True, 'import tkinter as tk\n'), ((3548, 3562), 'tkinter.Frame', 'tk.Frame', (['self'], {}), '(self)\n', (3556, 3562), True, 'import tkinter as tk\n'), ((4657, 4760), 'tkinter.Button', 'tk.Button', (['self.central_frame'], {'text': '"""Mettre à jour"""', 'font': 'text_font', 'command': 'self.update_customers'}), "(self.central_frame, text='Mettre à jour', font=text_font, command\n =self.update_customers)\n", (4666, 4760), True, 'import tkinter as tk\n'), ((9729, 9744), 'cryptography.fernet.Fernet', 'Fernet', (['data[0]'], {}), '(data[0])\n', (9735, 9744), False, 'from cryptography.fernet import Fernet, InvalidToken\n'), ((10362, 10389), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (10387, 10389), False, 'import configparser\n'), ((12149, 12176), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (12174, 12176), False, 'import configparser\n'), ((6996, 7093), 'tkinter.messagebox.askquestion', 'askquestion', (['"""Nouvelle mise à jour"""', 'f"""Voulez-vous installer la nouvelle version {version} ?"""'], {}), "('Nouvelle mise à jour',\n f'Voulez-vous installer la nouvelle version {version} ?')\n", (7007, 7093), False, 'from tkinter.messagebox import showerror, showinfo, askquestion\n'), ((8345, 8379), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (8357, 8379), False, 'import requests\n'), ((8781, 8815), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (8793, 8815), False, 'import requests\n'), ((9402, 9435), 'os.path.isfile', 'os.path.isfile', (['API_KEY_SAVE_FILE'], {}), '(API_KEY_SAVE_FILE)\n', (9416, 9435), False, 'import os\n'), ((9978, 9999), 'cryptography.fernet.Fernet.generate_key', 'Fernet.generate_key', ([], {}), '()\n', (9997, 9999), False, 'from cryptography.fernet import Fernet, InvalidToken\n'), ((10021, 10032), 'cryptography.fernet.Fernet', 'Fernet', (['key'], {}), '(key)\n', (10027, 10032), False, 'from cryptography.fernet import Fernet, InvalidToken\n'), ((10239, 10272), 'os.path.isfile', 'os.path.isfile', (['API_KEY_SAVE_FILE'], {}), '(API_KEY_SAVE_FILE)\n', (10253, 10272), False, 'import os\n'), ((13469, 13510), 'os.path.join', 'os.path.join', (['BACKUP_FOLDER', 'csv_filename'], {}), '(BACKUP_FOLDER, csv_filename)\n', (13481, 13510), False, 'import os\n'), ((2233, 2261), 'webbrowser.open', 'webbrowser.open', (['link'], {'new': '(2)'}), '(link, new=2)\n', (2248, 2261), False, 'import webbrowser\n'), ((2358, 2386), 'webbrowser.open', 'webbrowser.open', (['link'], {'new': '(2)'}), '(link, new=2)\n', (2373, 2386), False, 'import webbrowser\n'), ((2488, 2516), 'webbrowser.open', 'webbrowser.open', (['link'], {'new': '(2)'}), '(link, new=2)\n', (2503, 2516), False, 'import webbrowser\n'), ((2669, 2697), 'webbrowser.open', 'webbrowser.open', (['link'], {'new': '(2)'}), '(link, new=2)\n', (2684, 2697), False, 'import webbrowser\n'), ((3740, 3801), 'tkinter.Label', 'tk.Label', (['self.central_frame'], {'text': '"""API URL:"""', 'font': 'text_font'}), "(self.central_frame, text='API URL:', font=text_font)\n", (3748, 3801), True, 'import tkinter as tk\n'), ((3863, 3966), 'tkinter.Entry', 'tk.Entry', (['self.central_frame'], {'textvariable': 'self.api_URL', 'font': 'text_font', 'width': '(40)', 'state': '"""readonly"""'}), "(self.central_frame, textvariable=self.api_URL, font=text_font,\n width=40, state='readonly')\n", (3871, 3966), True, 'import tkinter as tk\n'), ((4024, 4116), 'tkinter.Label', 'tk.Label', (['self.central_frame'], {'text': '"""Dossier d\'installation GLS WinEXPé:"""', 'font': 'text_font'}), '(self.central_frame, text="Dossier d\'installation GLS WinEXPé:",\n font=text_font)\n', (4032, 4116), True, 'import tkinter as tk\n'), ((4174, 4259), 'tkinter.Label', 'tk.Label', (['self.central_frame'], {'textvariable': 'self.gls_folder_label', 'font': 'text_font'}), '(self.central_frame, textvariable=self.gls_folder_label, font=text_font\n )\n', (4182, 4259), True, 'import tkinter as tk\n'), ((4316, 4406), 'tkinter.Label', 'tk.Label', (['self.central_frame'], {'text': '"""Mode de sélection des commandes:"""', 'font': 'text_font'}), "(self.central_frame, text='Mode de sélection des commandes:', font=\n text_font)\n", (4324, 4406), True, 'import tkinter as tk\n'), ((4463, 4568), 'tkinter.Message', 'tk.Message', (['self.central_frame'], {'textvariable': 'self.order_select_mode_label', 'font': 'text_font', 'aspect': '(900)'}), '(self.central_frame, textvariable=self.order_select_mode_label,\n font=text_font, aspect=900)\n', (4473, 4568), True, 'import tkinter as tk\n'), ((6897, 6965), 'tkinter.messagebox.showinfo', 'showinfo', (['"""Mise à jour"""', '"""Vous êtes sous la dernière version connue"""'], {}), "('Mise à jour', 'Vous êtes sous la dernière version connue')\n", (6905, 6965), False, 'from tkinter.messagebox import showerror, showinfo, askquestion\n'), ((7303, 7346), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""Prestashop.ini"""'], {}), "(sys.path[0], 'Prestashop.ini')\n", (7315, 7346), False, 'import os\n'), ((9548, 9565), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (9559, 9565), False, 'import pickle\n'), ((10182, 10225), 'pickle.dump', 'pickle.dump', (['(key, encrypted_api_key)', 'file'], {}), '((key, encrypted_api_key), file)\n', (10193, 10225), False, 'import pickle\n'), ((10286, 10314), 'os.remove', 'os.remove', (['API_KEY_SAVE_FILE'], {}), '(API_KEY_SAVE_FILE)\n', (10295, 10314), False, 'import os\n'), ((11826, 11859), 'json.dumps', 'json.dumps', (['self.order_state_list'], {}), '(self.order_state_list)\n', (11836, 11859), False, 'import json\n'), ((12980, 13010), 'os.path.isdir', 'os.path.isdir', (['self.gls_folder'], {}), '(self.gls_folder)\n', (12993, 13010), False, 'import os\n'), ((13218, 13246), 'os.path.isdir', 'os.path.isdir', (['output_folder'], {}), '(output_folder)\n', (13231, 13246), False, 'import os\n'), ((13565, 13593), 'os.path.isdir', 'os.path.isdir', (['BACKUP_FOLDER'], {}), '(BACKUP_FOLDER)\n', (13578, 13593), False, 'import os\n'), ((13611, 13634), 'os.mkdir', 'os.mkdir', (['BACKUP_FOLDER'], {}), '(BACKUP_FOLDER)\n', (13619, 13634), False, 'import os\n'), ((13654, 13678), 'os.path.isfile', 'os.path.isfile', (['csv_file'], {}), '(csv_file)\n', (13668, 13678), False, 'import os\n'), ((18281, 18322), 'os.path.join', 'os.path.join', (['output_folder', 'csv_filename'], {}), '(output_folder, csv_filename)\n', (18293, 18322), False, 'import os\n'), ((19066, 19102), 'tkinter.messagebox.showerror', 'showerror', (['error_name', 'error_message'], {}), '(error_name, error_message)\n', (19075, 19102), False, 'from tkinter.messagebox import showerror, showinfo, askquestion\n'), ((19230, 19289), 'tkinter.messagebox.showinfo', 'showinfo', (['"""Mise à jour terminée"""', '"""Rien à mettre à jour :)"""'], {}), "('Mise à jour terminée', 'Rien à mettre à jour :)')\n", (19238, 19289), False, 'from tkinter.messagebox import showerror, showinfo, askquestion\n'), ((19324, 19401), 'tkinter.messagebox.showinfo', 'showinfo', (['"""Mise à jour réussie"""', '"""La mise à jour des clients a été effectuée"""'], {}), "('Mise à jour réussie', 'La mise à jour des clients a été effectuée')\n", (19332, 19401), False, 'from tkinter.messagebox import showerror, showinfo, askquestion\n'), ((7817, 7837), 'os.remove', 'os.remove', (['gls_model'], {}), '(gls_model)\n', (7826, 7837), False, 'import os\n'), ((13871, 13930), 'csv.DictReader', 'csv.DictReader', (['file'], {'delimiter': '""";"""', 'quoting': 'csv.QUOTE_NONE'}), "(file, delimiter=';', quoting=csv.QUOTE_NONE)\n", (13885, 13930), False, 'import csv\n'), ((7400, 7448), 'os.path.join', 'os.path.join', (['self.gls_folder', '"""DAT"""', '"""ConsDscr"""'], {}), "(self.gls_folder, 'DAT', 'ConsDscr')\n", (7412, 7448), False, 'import os\n'), ((7725, 7773), 'os.path.join', 'os.path.join', (['self.gls_folder', '"""DAT"""', '"""ConsDscr"""'], {}), "(self.gls_folder, 'DAT', 'ConsDscr')\n", (7737, 7773), False, 'import os\n'), ((7538, 7565), 'os.path.basename', 'os.path.basename', (['gls_model'], {}), '(gls_model)\n', (7554, 7565), False, 'import os\n'), ((7652, 7679), 'os.path.basename', 'os.path.basename', (['gls_model'], {}), '(gls_model)\n', (7668, 7679), False, 'import os\n')]
|
# Generated by Django 2.1.1 on 2019-06-29 17:43
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('customers', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='customer',
name='date',
field=models.DateTimeField(default=datetime.datetime(2019, 6, 29, 10, 43, 49, 222212), null=True),
),
]
|
[
"datetime.datetime"
] |
[((372, 422), 'datetime.datetime', 'datetime.datetime', (['(2019)', '(6)', '(29)', '(10)', '(43)', '(49)', '(222212)'], {}), '(2019, 6, 29, 10, 43, 49, 222212)\n', (389, 422), False, 'import datetime\n')]
|
import abc
import os
from collections import namedtuple
from dagster import check
from dagster.config import Field
from dagster.config.source import IntSource
from dagster.core.definitions.run_request import InstigatorType
from dagster.core.errors import DagsterError
from dagster.core.host_representation import ExternalSchedule
from dagster.core.instance import DagsterInstance
from dagster.core.scheduler.instigation import (
InstigatorState,
InstigatorStatus,
ScheduleInstigatorData,
)
from dagster.serdes import ConfigurableClass
from dagster.seven import get_current_datetime_in_utc
from dagster.utils import mkdir_p
class DagsterSchedulerError(DagsterError):
"""Base class for all Dagster Scheduler errors"""
class DagsterScheduleDoesNotExist(DagsterSchedulerError):
"""Errors raised when ending a job for a schedule."""
class SchedulerDebugInfo(
namedtuple("SchedulerDebugInfo", "errors scheduler_config_info scheduler_info schedule_storage")
):
def __new__(cls, errors, scheduler_config_info, scheduler_info, schedule_storage):
return super(SchedulerDebugInfo, cls).__new__(
cls,
errors=check.list_param(errors, "errors", of_type=str),
scheduler_config_info=check.str_param(scheduler_config_info, "scheduler_config_info"),
scheduler_info=check.str_param(scheduler_info, "scheduler_info"),
schedule_storage=check.list_param(schedule_storage, "schedule_storage", of_type=str),
)
class Scheduler(abc.ABC):
"""Abstract base class for a scheduler. This component is responsible for interfacing with
an external system such as cron to ensure scheduled repeated execution according.
"""
def start_schedule(self, instance, external_schedule):
"""
Updates the status of the given schedule to `InstigatorStatus.RUNNING` in schedule storage,
This should not be overridden by subclasses.
Args:
instance (DagsterInstance): The current instance.
external_schedule (ExternalSchedule): The schedule to start
"""
check.inst_param(instance, "instance", DagsterInstance)
check.inst_param(external_schedule, "external_schedule", ExternalSchedule)
schedule_state = instance.get_job_state(external_schedule.get_external_origin_id())
if external_schedule.get_current_instigator_state(schedule_state).is_running:
raise DagsterSchedulerError(
"You have attempted to start schedule {name}, but it is already running".format(
name=external_schedule.name
)
)
new_instigator_data = ScheduleInstigatorData(
external_schedule.cron_schedule,
get_current_datetime_in_utc().timestamp(),
)
if not schedule_state:
started_schedule = InstigatorState(
external_schedule.get_external_origin(),
InstigatorType.SCHEDULE,
InstigatorStatus.RUNNING,
new_instigator_data,
)
instance.add_job_state(started_schedule)
else:
started_schedule = schedule_state.with_status(InstigatorStatus.RUNNING).with_data(
new_instigator_data
)
instance.update_job_state(started_schedule)
return started_schedule
def stop_schedule(self, instance, schedule_origin_id, external_schedule):
"""
Updates the status of the given schedule to `InstigatorStatus.STOPPED` in schedule storage,
This should not be overridden by subclasses.
Args:
schedule_origin_id (string): The id of the schedule target to stop running.
"""
check.str_param(schedule_origin_id, "schedule_origin_id")
check.opt_inst_param(external_schedule, "external_schedule", ExternalSchedule)
schedule_state = instance.get_job_state(schedule_origin_id)
if (
external_schedule
and not external_schedule.get_current_instigator_state(schedule_state).is_running
) or (schedule_state and not schedule_state.is_running):
raise DagsterSchedulerError(
"You have attempted to stop schedule {name}, but it is already stopped".format(
name=external_schedule.name
)
)
if not schedule_state:
stopped_schedule = InstigatorState(
external_schedule.get_external_origin(),
InstigatorType.SCHEDULE,
InstigatorStatus.STOPPED,
ScheduleInstigatorData(
external_schedule.cron_schedule,
),
)
instance.add_job_state(stopped_schedule)
else:
stopped_schedule = schedule_state.with_status(InstigatorStatus.STOPPED).with_data(
ScheduleInstigatorData(
cron_schedule=schedule_state.job_specific_data.cron_schedule,
)
)
instance.update_job_state(stopped_schedule)
return stopped_schedule
@abc.abstractmethod
def debug_info(self):
"""Returns debug information about the scheduler"""
@abc.abstractmethod
def get_logs_path(self, instance, schedule_origin_id):
"""Get path to store logs for schedule
Args:
schedule_origin_id (string): The id of the schedule target to retrieve the log path for
"""
DEFAULT_MAX_CATCHUP_RUNS = 5
class DagsterDaemonScheduler(Scheduler, ConfigurableClass):
"""Default scheduler implementation that submits runs from the `dagster-daemon`
long-lived process. Periodically checks each running schedule for execution times that don't
have runs yet and launches them.
"""
def __init__(
self, max_catchup_runs=DEFAULT_MAX_CATCHUP_RUNS, max_tick_retries=0, inst_data=None
):
self.max_catchup_runs = check.opt_int_param(
max_catchup_runs, "max_catchup_runs", DEFAULT_MAX_CATCHUP_RUNS
)
self.max_tick_retries = check.opt_int_param(max_tick_retries, "max_tick_retries", 0)
self._inst_data = inst_data
@property
def inst_data(self):
return self._inst_data
@classmethod
def config_type(cls):
return {
"max_catchup_runs": Field(
IntSource,
is_required=False,
default_value=DEFAULT_MAX_CATCHUP_RUNS,
description="""For partitioned schedules, controls the maximum number of past
partitions for each schedule that will be considered when looking for missing
runs . Generally this parameter will only come into play if the scheduler
falls behind or launches after experiencing downtime. This parameter will not be checked for
schedules without partition sets (for example, schedules created using the @schedule
decorator) - only the most recent execution time will be considered for those schedules.
Note that no matter what this value is, the scheduler will never launch a run from a time
before the schedule was turned on (even if the start_date on the schedule is earlier) - if
you want to launch runs for earlier partitions, launch a backfill.
""",
),
"max_tick_retries": Field(
IntSource,
default_value=0,
is_required=False,
description="For each schedule tick that raises an error, how many times to retry that tick",
),
}
@staticmethod
def from_config_value(inst_data, config_value):
return DagsterDaemonScheduler(inst_data=inst_data, **config_value)
def debug_info(self):
return ""
def wipe(self, instance):
pass
def _get_or_create_logs_directory(self, instance, schedule_origin_id):
check.inst_param(instance, "instance", DagsterInstance)
check.str_param(schedule_origin_id, "schedule_origin_id")
logs_directory = os.path.join(instance.schedules_directory(), "logs", schedule_origin_id)
if not os.path.isdir(logs_directory):
mkdir_p(logs_directory)
return logs_directory
def get_logs_path(self, instance, schedule_origin_id):
check.inst_param(instance, "instance", DagsterInstance)
check.str_param(schedule_origin_id, "schedule_origin_id")
logs_directory = self._get_or_create_logs_directory(instance, schedule_origin_id)
return os.path.join(logs_directory, "scheduler.log")
|
[
"dagster.check.opt_inst_param",
"dagster.check.str_param",
"dagster.config.Field",
"os.path.isdir",
"dagster.check.inst_param",
"dagster.core.scheduler.instigation.ScheduleInstigatorData",
"dagster.seven.get_current_datetime_in_utc",
"dagster.check.opt_int_param",
"dagster.utils.mkdir_p",
"collections.namedtuple",
"os.path.join",
"dagster.check.list_param"
] |
[((885, 985), 'collections.namedtuple', 'namedtuple', (['"""SchedulerDebugInfo"""', '"""errors scheduler_config_info scheduler_info schedule_storage"""'], {}), "('SchedulerDebugInfo',\n 'errors scheduler_config_info scheduler_info schedule_storage')\n", (895, 985), False, 'from collections import namedtuple\n'), ((2111, 2166), 'dagster.check.inst_param', 'check.inst_param', (['instance', '"""instance"""', 'DagsterInstance'], {}), "(instance, 'instance', DagsterInstance)\n", (2127, 2166), False, 'from dagster import check\n'), ((2175, 2249), 'dagster.check.inst_param', 'check.inst_param', (['external_schedule', '"""external_schedule"""', 'ExternalSchedule'], {}), "(external_schedule, 'external_schedule', ExternalSchedule)\n", (2191, 2249), False, 'from dagster import check\n'), ((3752, 3809), 'dagster.check.str_param', 'check.str_param', (['schedule_origin_id', '"""schedule_origin_id"""'], {}), "(schedule_origin_id, 'schedule_origin_id')\n", (3767, 3809), False, 'from dagster import check\n'), ((3818, 3896), 'dagster.check.opt_inst_param', 'check.opt_inst_param', (['external_schedule', '"""external_schedule"""', 'ExternalSchedule'], {}), "(external_schedule, 'external_schedule', ExternalSchedule)\n", (3838, 3896), False, 'from dagster import check\n'), ((5974, 6061), 'dagster.check.opt_int_param', 'check.opt_int_param', (['max_catchup_runs', '"""max_catchup_runs"""', 'DEFAULT_MAX_CATCHUP_RUNS'], {}), "(max_catchup_runs, 'max_catchup_runs',\n DEFAULT_MAX_CATCHUP_RUNS)\n", (5993, 6061), False, 'from dagster import check\n'), ((6112, 6172), 'dagster.check.opt_int_param', 'check.opt_int_param', (['max_tick_retries', '"""max_tick_retries"""', '(0)'], {}), "(max_tick_retries, 'max_tick_retries', 0)\n", (6131, 6172), False, 'from dagster import check\n'), ((7976, 8031), 'dagster.check.inst_param', 'check.inst_param', (['instance', '"""instance"""', 'DagsterInstance'], {}), "(instance, 'instance', DagsterInstance)\n", (7992, 8031), False, 'from dagster import check\n'), ((8040, 8097), 'dagster.check.str_param', 'check.str_param', (['schedule_origin_id', '"""schedule_origin_id"""'], {}), "(schedule_origin_id, 'schedule_origin_id')\n", (8055, 8097), False, 'from dagster import check\n'), ((8378, 8433), 'dagster.check.inst_param', 'check.inst_param', (['instance', '"""instance"""', 'DagsterInstance'], {}), "(instance, 'instance', DagsterInstance)\n", (8394, 8433), False, 'from dagster import check\n'), ((8442, 8499), 'dagster.check.str_param', 'check.str_param', (['schedule_origin_id', '"""schedule_origin_id"""'], {}), "(schedule_origin_id, 'schedule_origin_id')\n", (8457, 8499), False, 'from dagster import check\n'), ((8606, 8651), 'os.path.join', 'os.path.join', (['logs_directory', '"""scheduler.log"""'], {}), "(logs_directory, 'scheduler.log')\n", (8618, 8651), False, 'import os\n'), ((6373, 7321), 'dagster.config.Field', 'Field', (['IntSource'], {'is_required': '(False)', 'default_value': 'DEFAULT_MAX_CATCHUP_RUNS', 'description': '"""For partitioned schedules, controls the maximum number of past\n partitions for each schedule that will be considered when looking for missing\n runs . Generally this parameter will only come into play if the scheduler\n falls behind or launches after experiencing downtime. This parameter will not be checked for\n schedules without partition sets (for example, schedules created using the @schedule\n decorator) - only the most recent execution time will be considered for those schedules.\n\n Note that no matter what this value is, the scheduler will never launch a run from a time\n before the schedule was turned on (even if the start_date on the schedule is earlier) - if\n you want to launch runs for earlier partitions, launch a backfill.\n """'}), '(IntSource, is_required=False, default_value=DEFAULT_MAX_CATCHUP_RUNS,\n description=\n """For partitioned schedules, controls the maximum number of past\n partitions for each schedule that will be considered when looking for missing\n runs . Generally this parameter will only come into play if the scheduler\n falls behind or launches after experiencing downtime. This parameter will not be checked for\n schedules without partition sets (for example, schedules created using the @schedule\n decorator) - only the most recent execution time will be considered for those schedules.\n\n Note that no matter what this value is, the scheduler will never launch a run from a time\n before the schedule was turned on (even if the start_date on the schedule is earlier) - if\n you want to launch runs for earlier partitions, launch a backfill.\n """\n )\n', (6378, 7321), False, 'from dagster.config import Field\n'), ((7420, 7576), 'dagster.config.Field', 'Field', (['IntSource'], {'default_value': '(0)', 'is_required': '(False)', 'description': '"""For each schedule tick that raises an error, how many times to retry that tick"""'}), "(IntSource, default_value=0, is_required=False, description=\n 'For each schedule tick that raises an error, how many times to retry that tick'\n )\n", (7425, 7576), False, 'from dagster.config import Field\n'), ((8212, 8241), 'os.path.isdir', 'os.path.isdir', (['logs_directory'], {}), '(logs_directory)\n', (8225, 8241), False, 'import os\n'), ((8255, 8278), 'dagster.utils.mkdir_p', 'mkdir_p', (['logs_directory'], {}), '(logs_directory)\n', (8262, 8278), False, 'from dagster.utils import mkdir_p\n'), ((1163, 1210), 'dagster.check.list_param', 'check.list_param', (['errors', '"""errors"""'], {'of_type': 'str'}), "(errors, 'errors', of_type=str)\n", (1179, 1210), False, 'from dagster import check\n'), ((1246, 1309), 'dagster.check.str_param', 'check.str_param', (['scheduler_config_info', '"""scheduler_config_info"""'], {}), "(scheduler_config_info, 'scheduler_config_info')\n", (1261, 1309), False, 'from dagster import check\n'), ((1338, 1387), 'dagster.check.str_param', 'check.str_param', (['scheduler_info', '"""scheduler_info"""'], {}), "(scheduler_info, 'scheduler_info')\n", (1353, 1387), False, 'from dagster import check\n'), ((1418, 1485), 'dagster.check.list_param', 'check.list_param', (['schedule_storage', '"""schedule_storage"""'], {'of_type': 'str'}), "(schedule_storage, 'schedule_storage', of_type=str)\n", (1434, 1485), False, 'from dagster import check\n'), ((4621, 4676), 'dagster.core.scheduler.instigation.ScheduleInstigatorData', 'ScheduleInstigatorData', (['external_schedule.cron_schedule'], {}), '(external_schedule.cron_schedule)\n', (4643, 4676), False, 'from dagster.core.scheduler.instigation import InstigatorState, InstigatorStatus, ScheduleInstigatorData\n'), ((4909, 4998), 'dagster.core.scheduler.instigation.ScheduleInstigatorData', 'ScheduleInstigatorData', ([], {'cron_schedule': 'schedule_state.job_specific_data.cron_schedule'}), '(cron_schedule=schedule_state.job_specific_data.\n cron_schedule)\n', (4931, 4998), False, 'from dagster.core.scheduler.instigation import InstigatorState, InstigatorStatus, ScheduleInstigatorData\n'), ((2759, 2788), 'dagster.seven.get_current_datetime_in_utc', 'get_current_datetime_in_utc', ([], {}), '()\n', (2786, 2788), False, 'from dagster.seven import get_current_datetime_in_utc\n')]
|
from turtle import Screen, Turtle
from paddle import Paddle
from ball import Ball
from scoreboard import Scoreboard
import time
screen = Screen()
screen.bgcolor("black")
screen.setup(width=800, height=600)
screen.title("Pong")
screen.tracer(0)
r_paddle = Paddle((350, 0))
l_paddle = Paddle((-350, 0))
ball = Ball()
scoreboard = Scoreboard()
screen.listen()
screen.onkeypress(r_paddle.go_up, "Up")
screen.onkeypress(r_paddle.go_down, "Down")
screen.onkeypress(l_paddle.go_up, "w")
screen.onkeypress(l_paddle.go_down, "s")
game_is_on = True
while game_is_on:
screen.update()
ball.move()
#Detect collision with wall
if ball.ycor() > 280 or ball.ycor() < -280:
ball.bounce_y()
#Detect collision with paddle
if ball.distance(r_paddle) < 50 and ball.xcor() > 320 or ball.distance(l_paddle) < 50 and ball.xcor() < -320:
ball.bounce_x()
#Detect R paddle misses
if ball.xcor() > 380:
ball.reset_position()
scoreboard.l_point()
#Detect L paddle misses:
if ball.xcor() < -380:
ball.reset_position()
scoreboard.r_point()
screen.exitonclick()
|
[
"scoreboard.Scoreboard",
"turtle.Screen",
"ball.Ball",
"paddle.Paddle"
] |
[((138, 146), 'turtle.Screen', 'Screen', ([], {}), '()\n', (144, 146), False, 'from turtle import Screen, Turtle\n'), ((257, 273), 'paddle.Paddle', 'Paddle', (['(350, 0)'], {}), '((350, 0))\n', (263, 273), False, 'from paddle import Paddle\n'), ((285, 302), 'paddle.Paddle', 'Paddle', (['(-350, 0)'], {}), '((-350, 0))\n', (291, 302), False, 'from paddle import Paddle\n'), ((310, 316), 'ball.Ball', 'Ball', ([], {}), '()\n', (314, 316), False, 'from ball import Ball\n'), ((330, 342), 'scoreboard.Scoreboard', 'Scoreboard', ([], {}), '()\n', (340, 342), False, 'from scoreboard import Scoreboard\n')]
|
from os.path import join
from dataset import DataSetFromFolder
import torchvision.transforms as transforms
def get_training_set(root_dir):
train_dir = join(root_dir, "train")
return DataSetFromFolder(train_dir)
def get_test_set(root_dir):
test_dir = join(root_dir, "test")
return DataSetFromFolder(test_dir)
|
[
"dataset.DataSetFromFolder",
"os.path.join"
] |
[((157, 180), 'os.path.join', 'join', (['root_dir', '"""train"""'], {}), "(root_dir, 'train')\n", (161, 180), False, 'from os.path import join\n'), ((197, 225), 'dataset.DataSetFromFolder', 'DataSetFromFolder', (['train_dir'], {}), '(train_dir)\n', (214, 225), False, 'from dataset import DataSetFromFolder\n'), ((276, 298), 'os.path.join', 'join', (['root_dir', '"""test"""'], {}), "(root_dir, 'test')\n", (280, 298), False, 'from os.path import join\n'), ((315, 342), 'dataset.DataSetFromFolder', 'DataSetFromFolder', (['test_dir'], {}), '(test_dir)\n', (332, 342), False, 'from dataset import DataSetFromFolder\n')]
|
from team29.analizer.abstract.expression import Expression
from team29.analizer.reports import Nodo
class CheckValue(Expression):
"""
Clase que representa un valor del la condicion a desarrollar
en el CHECK
"""
def __init__(self, value, type_, row, column):
self.value = value
self.type = type_
self.row = row
self.column = column
def execute(self, environment):
return self
def dot(self):
new = Nodo.Nodo("CHECK")
new.addNode(Nodo.Nodo(str(self.value)))
return new
|
[
"team29.analizer.reports.Nodo.Nodo"
] |
[((476, 494), 'team29.analizer.reports.Nodo.Nodo', 'Nodo.Nodo', (['"""CHECK"""'], {}), "('CHECK')\n", (485, 494), False, 'from team29.analizer.reports import Nodo\n')]
|
"""
Author: <NAME>
Contact: <EMAIL>
"""
import torch
import torch.nn as nn
from .blocks import ShuffleV2Block
class ShuffleNetV2(nn.Module):
"""
Reference:
https://github.com/megvii-model/ShuffleNet-Series/tree/master/ShuffleNetV2
"""
def __init__(self, input_size=224, n_class=1000, model_size='1.5x'):
super(ShuffleNetV2, self).__init__()
print('model size is ', model_size)
self.stage_repeats = [4, 8, 4]
self.model_size = model_size
if model_size == '0.5x':
self.stage_out_channels = [-1, 24, 48, 96, 192, 1024]
elif model_size == '1.0x':
self.stage_out_channels = [-1, 24, 116, 232, 464, 1024]
elif model_size == '1.5x':
self.stage_out_channels = [-1, 24, 176, 352, 704, 1024]
elif model_size == '2.0x':
self.stage_out_channels = [-1, 24, 244, 488, 976, 2048]
else:
raise NotImplementedError
# building first layer
input_channel = self.stage_out_channels[1]
self.first_conv = nn.Sequential(
nn.Conv2d(3, input_channel, 3, 2, 1, bias=False),
nn.BatchNorm2d(input_channel),
nn.ReLU(inplace=True),
)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.features = []
for idxstage in range(len(self.stage_repeats)):
numrepeat = self.stage_repeats[idxstage]
output_channel = self.stage_out_channels[idxstage + 2]
for i in range(numrepeat):
if i == 0:
self.features.append(ShuffleV2Block(input_channel, output_channel,
mid_channels=output_channel // 2, ksize=3, stride=2))
else:
self.features.append(ShuffleV2Block(input_channel // 2, output_channel,
mid_channels=output_channel // 2, ksize=3, stride=1))
input_channel = output_channel
self.features = nn.Sequential(*self.features)
self.conv_last = nn.Sequential(
nn.Conv2d(input_channel, self.stage_out_channels[-1], 1, 1, 0, bias=False),
nn.BatchNorm2d(self.stage_out_channels[-1]),
nn.ReLU(inplace=True)
)
self.globalpool = nn.AvgPool2d(7)
if self.model_size == '2.0x':
self.dropout = nn.Dropout(0.2)
self.classifier = nn.Sequential(nn.Linear(self.stage_out_channels[-1], n_class, bias=False))
self._initialize_weights()
def forward(self, x):
x = self.first_conv(x)
x = self.maxpool(x)
x = self.features(x)
x = self.conv_last(x)
x = self.globalpool(x)
if self.model_size == '2.0x':
x = self.dropout(x)
x = x.contiguous().view(-1, self.stage_out_channels[-1])
x = self.classifier(x)
return x
def _initialize_weights(self):
for name, m in self.named_modules():
if isinstance(m, nn.Conv2d):
if 'first' in name:
nn.init.normal_(m.weight, 0, 0.01)
else:
nn.init.normal_(m.weight, 0, 1.0 / m.weight.shape[1])
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0.0001)
nn.init.constant_(m.running_mean, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0.0001)
nn.init.constant_(m.running_mean, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if __name__ == "__main__":
model = ShuffleNetV2()
# print(model)
test_data = torch.rand(5, 3, 224, 224)
test_outputs = model(test_data)
print(test_outputs.size())
|
[
"torch.nn.Dropout",
"torch.nn.ReLU",
"torch.nn.Sequential",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.BatchNorm2d",
"torch.nn.init.normal_",
"torch.nn.init.constant_",
"torch.rand",
"torch.nn.MaxPool2d",
"torch.nn.AvgPool2d"
] |
[((4102, 4128), 'torch.rand', 'torch.rand', (['(5)', '(3)', '(224)', '(224)'], {}), '(5, 3, 224, 224)\n', (4112, 4128), False, 'import torch\n'), ((1256, 1304), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)'}), '(kernel_size=3, stride=2, padding=1)\n', (1268, 1304), True, 'import torch.nn as nn\n'), ((2070, 2099), 'torch.nn.Sequential', 'nn.Sequential', (['*self.features'], {}), '(*self.features)\n', (2083, 2099), True, 'import torch.nn as nn\n'), ((2356, 2371), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', (['(7)'], {}), '(7)\n', (2368, 2371), True, 'import torch.nn as nn\n'), ((1094, 1142), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', 'input_channel', '(3)', '(2)', '(1)'], {'bias': '(False)'}), '(3, input_channel, 3, 2, 1, bias=False)\n', (1103, 1142), True, 'import torch.nn as nn\n'), ((1156, 1185), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['input_channel'], {}), '(input_channel)\n', (1170, 1185), True, 'import torch.nn as nn\n'), ((1199, 1220), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1206, 1220), True, 'import torch.nn as nn\n'), ((2153, 2227), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_channel', 'self.stage_out_channels[-1]', '(1)', '(1)', '(0)'], {'bias': '(False)'}), '(input_channel, self.stage_out_channels[-1], 1, 1, 0, bias=False)\n', (2162, 2227), True, 'import torch.nn as nn\n'), ((2241, 2284), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['self.stage_out_channels[-1]'], {}), '(self.stage_out_channels[-1])\n', (2255, 2284), True, 'import torch.nn as nn\n'), ((2298, 2319), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2305, 2319), True, 'import torch.nn as nn\n'), ((2437, 2452), 'torch.nn.Dropout', 'nn.Dropout', (['(0.2)'], {}), '(0.2)\n', (2447, 2452), True, 'import torch.nn as nn\n'), ((2493, 2552), 'torch.nn.Linear', 'nn.Linear', (['self.stage_out_channels[-1]', 'n_class'], {'bias': '(False)'}), '(self.stage_out_channels[-1], n_class, bias=False)\n', (2502, 2552), True, 'import torch.nn as nn\n'), ((3127, 3161), 'torch.nn.init.normal_', 'nn.init.normal_', (['m.weight', '(0)', '(0.01)'], {}), '(m.weight, 0, 0.01)\n', (3142, 3161), True, 'import torch.nn as nn\n'), ((3204, 3257), 'torch.nn.init.normal_', 'nn.init.normal_', (['m.weight', '(0)', '(1.0 / m.weight.shape[1])'], {}), '(m.weight, 0, 1.0 / m.weight.shape[1])\n', (3219, 3257), True, 'import torch.nn as nn\n'), ((3317, 3345), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (3334, 3345), True, 'import torch.nn as nn\n'), ((3410, 3440), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.weight', '(1)'], {}), '(m.weight, 1)\n', (3427, 3440), True, 'import torch.nn as nn\n'), ((3550, 3586), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.running_mean', '(0)'], {}), '(m.running_mean, 0)\n', (3567, 3586), True, 'import torch.nn as nn\n'), ((3500, 3533), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0.0001)'], {}), '(m.bias, 0.0001)\n', (3517, 3533), True, 'import torch.nn as nn\n'), ((3651, 3681), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.weight', '(1)'], {}), '(m.weight, 1)\n', (3668, 3681), True, 'import torch.nn as nn\n'), ((3791, 3827), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.running_mean', '(0)'], {}), '(m.running_mean, 0)\n', (3808, 3827), True, 'import torch.nn as nn\n'), ((3741, 3774), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0.0001)'], {}), '(m.bias, 0.0001)\n', (3758, 3774), True, 'import torch.nn as nn\n'), ((3887, 3921), 'torch.nn.init.normal_', 'nn.init.normal_', (['m.weight', '(0)', '(0.01)'], {}), '(m.weight, 0, 0.01)\n', (3902, 3921), True, 'import torch.nn as nn\n'), ((3981, 4009), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (3998, 4009), True, 'import torch.nn as nn\n')]
|
import tempfile
import subprocess
import warnings
import os
import codecs
import tempfile
import copy
import sys
import time
from tree import SeqTree, RelativeLevelTreeEncoder
from logging import warning
def rebuild_input_sentence(lines):
if len(lines[0].split("\t")) > 3:
sentence = [ (l.split("\t")[0],
l.split("\t")[1]+"##"+"|".join(feat for feat in l.split("\t")[2:-1]
if feat != "-")+"##")
for l in lines]
else:
sentence = [tuple(l.split("\t")[0:2]) for l in lines]
return sentence
"""
Transforms an encoding of a tree in a relative scale into an
encoding of the tree in an absolute scale.
"""
# def to_absolute_scale(relative_levels):
#
# absolute_sequence = [0]*len(relative_levels)
# current_level = 0
# for j,level in enumerate(relative_levels):
#
# if level in ["-BOS-","-EOS-", "NONE"]:
# absolute_sequence[j] = level
# else:
#
# if level == "ROOT":
# current_level=1
# label_j=str(current_level)
#
# elif "ROOT" in level:
# current_level+=int(level.replace("ROOT",""))
# label_j = str(current_level)
# else:
# current_level+= int(level)
# label_j=str(current_level)
#
# absolute_sequence[j] = label_j
#
# return absolute_sequence
"""
Returns a labels as a tuple of 3 elements: (level,label,leaf_unary_chain)
"""
#NEWJOINT
def split_label(label, split_char):
if label in ["-BOS-","-EOS-","NONE"]:
return (label,"-EMPTY-","-EMPTY-")
if len(label.split(split_char)) == 2:
return (label.split(split_char)[0], label.split(split_char)[1],"-EMPTY-")
return tuple(label.split(split_char))
# def split_label(label):
# if label in ["-BOS-","-EOS-","NONE"]:
# return (label,"-EMPTY-","-EMPTY-")
# if len(label.split("_")) == 2:
# return (label.split("_")[0], label.split("_")[1],"-EMPTY-")
#
# return tuple(label.split("_"))
"""
Transforms a list of list into a single list
"""
def flat_list(l):
flat_l = []
for sublist in l:
for item in sublist:
flat_l.append(item)
return flat_l
"""
Determines if a stringx can be converted into a string
"""
def is_int(x):
try:
int(x)
return True
except ValueError:
return False
"""
Auxiliar function to compute the accuracy in an homogeneous way respect
to the enriched approach and the .seq_lu format
"""
def get_enriched_labels_for_retagger(preds,unary_preds):
#TODO: Update for SPRML corpus
warning("The model will not work well if + is not an unique joint char for collapsed branches (update for SPRML)")
new_preds = []
for zpreds, zunaries in zip(preds, unary_preds):
aux = []
for zpred, zunary in zip(zpreds,zunaries):
if "+" in zunary and zpred not in ["-EOS-","NONE","-BOS-"]:
if zpred == "ROOT":
new_zpred = "+".join(zunary.split("+")[:-1])
else:
new_zpred = zpred+"_"+"+".join(zunary.split("+")[:-1])
else:
new_zpred = zpred
aux.append(new_zpred)
new_preds.append(aux)
return new_preds
"""
Transforms a list of sentences and predictions (labels) into parenthesized trees
@param sentences: A list of list of (word,postag)
@param labels: A list of list of predictions
@return A list of parenthesized trees
"""
#NEWJOINT
def sequence_to_parenthesis(sentences,labels,join_char="~", split_char="@"):
#def sequence_to_parenthesis(sentences,labels,join_char="+"):
parenthesized_trees = []
relative_encoder = RelativeLevelTreeEncoder(join_char=join_char, split_char=split_char)
f_max_in_common = SeqTree.maxincommon_to_tree
f_uncollapse = relative_encoder.uncollapse
total_posprocessing_time = 0
for noutput, output in enumerate(labels):
if output != "": #We reached the end-of-file
init_parenthesized_time = time.time()
sentence = []
preds = []
for ((word,postag), pred) in zip(sentences[noutput][1:-1],output[1:-1]):
if len(pred.split(split_char))==3: #and "+" in pred.split("_")[2]:
sentence.append((word,pred.split(split_char)[2]+join_char+postag))
else:
sentence.append((word,postag))
# if len(pred.split("_"))==3: #and "+" in pred.split("_")[2]:
# sentence.append((word,pred.split("_")[2]+"+"+postag))
# else:
# sentence.append((word,postag))
#
#TODO: This is currently needed as a workaround for the retagging strategy and sentences of length one
# if len(output)==3 and output[1] == "ROOT":
# pred = "NONE"
preds.append(pred)
# print preds
# print sentence
tree = f_max_in_common(preds, sentence, relative_encoder)
#Removing empty label from root
if tree.label() == SeqTree.EMPTY_LABEL:
#If a node has more than two children
#it means that the constituent should have been filled.
if len(tree) > 1:
print ("WARNING: ROOT empty node with more than one child")
else:
while (tree.label() == SeqTree.EMPTY_LABEL) and len(tree) == 1:
tree = tree[0]
#Uncollapsing the root. Rare needed
if join_char in tree.label():
aux = SeqTree(tree.label().split(join_char)[0],[])
aux.append(SeqTree(join_char.join(tree.label().split(join_char)[1:]), tree ))
tree = aux
# if "+" in tree.label():
# aux = SeqTree(tree.label().split("+")[0],[])
# aux.append(SeqTree("+".join(tree.label().split("+")[1:]), tree ))
# tree = aux
tree = f_uncollapse(tree)
total_posprocessing_time+= time.time()-init_parenthesized_time
#To avoid problems when dumping the parenthesized tree to a file
aux = tree.pformat(margin=100000000)
if aux.startswith("( ("): #Ad-hoc workarounf for sentences of length 1 in German SPRML
aux = aux[2:-1]
parenthesized_trees.append(aux)
return parenthesized_trees
|
[
"logging.warning",
"tree.RelativeLevelTreeEncoder",
"time.time"
] |
[((2876, 3000), 'logging.warning', 'warning', (['"""The model will not work well if + is not an unique joint char for collapsed branches (update for SPRML)"""'], {}), "(\n 'The model will not work well if + is not an unique joint char for collapsed branches (update for SPRML)'\n )\n", (2883, 3000), False, 'from logging import warning\n'), ((3982, 4050), 'tree.RelativeLevelTreeEncoder', 'RelativeLevelTreeEncoder', ([], {'join_char': 'join_char', 'split_char': 'split_char'}), '(join_char=join_char, split_char=split_char)\n', (4006, 4050), False, 'from tree import SeqTree, RelativeLevelTreeEncoder\n'), ((4335, 4346), 'time.time', 'time.time', ([], {}), '()\n', (4344, 4346), False, 'import time\n'), ((6570, 6581), 'time.time', 'time.time', ([], {}), '()\n', (6579, 6581), False, 'import time\n')]
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from llnl.util.filesystem import install, mkdirp
import llnl.util.tty as tty
from spack.build_systems.cmake import CMakePackage
from spack.package import run_after
def cmake_cache_path(name, value, comment=""):
"""Generate a string for a cmake cache variable"""
return 'set({0} "{1}" CACHE PATH "{2}")\n'.format(name, value, comment)
def cmake_cache_string(name, value, comment=""):
"""Generate a string for a cmake cache variable"""
return 'set({0} "{1}" CACHE STRING "{2}")\n'.format(name, value, comment)
def cmake_cache_option(name, boolean_value, comment=""):
"""Generate a string for a cmake configuration option"""
value = "ON" if boolean_value else "OFF"
return 'set({0} {1} CACHE BOOL "{2}")\n'.format(name, value, comment)
class CachedCMakePackage(CMakePackage):
"""Specialized class for packages built using CMake initial cache.
This feature of CMake allows packages to increase reproducibility,
especially between Spack- and manual builds. It also allows packages to
sidestep certain parsing bugs in extremely long ``cmake`` commands, and to
avoid system limits on the length of the command line."""
phases = ['initconfig', 'cmake', 'build', 'install']
@property
def cache_name(self):
return "{0}-{1}-{2}@{3}.cmake".format(
self.name,
self.spec.architecture,
self.spec.compiler.name,
self.spec.compiler.version,
)
@property
def cache_path(self):
return os.path.join(self.stage.source_path, self.cache_name)
def flag_handler(self, name, flags):
if name in ('cflags', 'cxxflags', 'cppflags', 'fflags'):
return (None, None, None) # handled in the cmake cache
return (flags, None, None)
def initconfig_compiler_entries(self):
# This will tell cmake to use the Spack compiler wrappers when run
# through Spack, but use the underlying compiler when run outside of
# Spack
spec = self.spec
# Fortran compiler is optional
if "FC" in os.environ:
spack_fc_entry = cmake_cache_path(
"CMAKE_Fortran_COMPILER", os.environ['FC'])
system_fc_entry = cmake_cache_path(
"CMAKE_Fortran_COMPILER", self.compiler.fc)
else:
spack_fc_entry = "# No Fortran compiler defined in spec"
system_fc_entry = "# No Fortran compiler defined in spec"
entries = [
"#------------------{0}".format("-" * 60),
"# Compilers",
"#------------------{0}".format("-" * 60),
"# Compiler Spec: {0}".format(spec.compiler),
"#------------------{0}".format("-" * 60),
'if(DEFINED ENV{SPACK_CC})\n',
' ' + cmake_cache_path(
"CMAKE_C_COMPILER", os.environ['CC']),
' ' + cmake_cache_path(
"CMAKE_CXX_COMPILER", os.environ['CXX']),
' ' + spack_fc_entry,
'else()\n',
' ' + cmake_cache_path(
"CMAKE_C_COMPILER", self.compiler.cc),
' ' + cmake_cache_path(
"CMAKE_CXX_COMPILER", self.compiler.cxx),
' ' + system_fc_entry,
'endif()\n'
]
# use global spack compiler flags
cppflags = ' '.join(spec.compiler_flags['cppflags'])
if cppflags:
# avoid always ending up with ' ' with no flags defined
cppflags += ' '
cflags = cppflags + ' '.join(spec.compiler_flags['cflags'])
if cflags:
entries.append(cmake_cache_string("CMAKE_C_FLAGS", cflags))
cxxflags = cppflags + ' '.join(spec.compiler_flags['cxxflags'])
if cxxflags:
entries.append(cmake_cache_string("CMAKE_CXX_FLAGS", cxxflags))
fflags = ' '.join(spec.compiler_flags['fflags'])
if fflags:
entries.append(cmake_cache_string("CMAKE_Fortran_FLAGS", fflags))
# Override XL compiler family
familymsg = ("Override to proper compiler family for XL")
if "xlf" in (self.compiler.fc or ''): # noqa: F821
entries.append(cmake_cache_string(
"CMAKE_Fortran_COMPILER_ID", "XL",
familymsg))
if "xlc" in self.compiler.cc: # noqa: F821
entries.append(cmake_cache_string(
"CMAKE_C_COMPILER_ID", "XL",
familymsg))
if "xlC" in self.compiler.cxx: # noqa: F821
entries.append(cmake_cache_string(
"CMAKE_CXX_COMPILER_ID", "XL",
familymsg))
return entries
def initconfig_mpi_entries(self):
spec = self.spec
if "+mpi" not in spec:
return []
entries = [
"#------------------{0}".format("-" * 60),
"# MPI",
"#------------------{0}\n".format("-" * 60),
]
entries.append(cmake_cache_path("MPI_C_COMPILER",
spec['mpi'].mpicc))
entries.append(cmake_cache_path("MPI_CXX_COMPILER",
spec['mpi'].mpicxx))
entries.append(cmake_cache_path("MPI_Fortran_COMPILER",
spec['mpi'].mpifc))
# Check for slurm
using_slurm = False
slurm_checks = ['+slurm',
'schedulers=slurm',
'process_managers=slurm']
if any(spec['mpi'].satisfies(variant) for variant in slurm_checks):
using_slurm = True
# Determine MPIEXEC
if using_slurm:
if spec['mpi'].external:
# Heuristic until we have dependents on externals
mpiexec = '/usr/bin/srun'
else:
mpiexec = os.path.join(spec['slurm'].prefix.bin, 'srun')
else:
mpiexec = os.path.join(spec['mpi'].prefix.bin, 'mpirun')
if not os.path.exists(mpiexec):
mpiexec = os.path.join(spec['mpi'].prefix.bin, 'mpiexec')
if not os.path.exists(mpiexec):
msg = "Unable to determine MPIEXEC, %s tests may fail" % self.name
entries.append("# {0}\n".format(msg))
tty.warn(msg)
else:
# starting with cmake 3.10, FindMPI expects MPIEXEC_EXECUTABLE
# vs the older versions which expect MPIEXEC
if self.spec["cmake"].satisfies('@3.10:'):
entries.append(cmake_cache_path("MPIEXEC_EXECUTABLE",
mpiexec))
else:
entries.append(cmake_cache_path("MPIEXEC", mpiexec))
# Determine MPIEXEC_NUMPROC_FLAG
if using_slurm:
entries.append(cmake_cache_string("MPIEXEC_NUMPROC_FLAG", "-n"))
else:
entries.append(cmake_cache_string("MPIEXEC_NUMPROC_FLAG", "-np"))
return entries
def initconfig_hardware_entries(self):
spec = self.spec
entries = [
"#------------------{0}".format("-" * 60),
"# Hardware",
"#------------------{0}\n".format("-" * 60),
]
if '+cuda' in spec:
entries.append("#------------------{0}".format("-" * 30))
entries.append("# Cuda")
entries.append("#------------------{0}\n".format("-" * 30))
cudatoolkitdir = spec['cuda'].prefix
entries.append(cmake_cache_path("CUDA_TOOLKIT_ROOT_DIR",
cudatoolkitdir))
cudacompiler = "${CUDA_TOOLKIT_ROOT_DIR}/bin/nvcc"
entries.append(cmake_cache_path("CMAKE_CUDA_COMPILER",
cudacompiler))
if "+mpi" in spec:
entries.append(cmake_cache_path("CMAKE_CUDA_HOST_COMPILER",
"${MPI_CXX_COMPILER}"))
else:
entries.append(cmake_cache_path("CMAKE_CUDA_HOST_COMPILER",
"${CMAKE_CXX_COMPILER}"))
return entries
def std_initconfig_entries(self):
return [
"#------------------{0}".format("-" * 60),
"# !!!! This is a generated file, edit at own risk !!!!",
"#------------------{0}".format("-" * 60),
"# CMake executable path: {0}".format(
self.spec['cmake'].command.path),
"#------------------{0}\n".format("-" * 60),
]
def initconfig(self, spec, prefix):
cache_entries = (self.std_initconfig_entries() +
self.initconfig_compiler_entries() +
self.initconfig_mpi_entries() +
self.initconfig_hardware_entries() +
self.initconfig_package_entries())
with open(self.cache_name, 'w') as f:
for entry in cache_entries:
f.write('%s\n' % entry)
f.write('\n')
@property
def std_cmake_args(self):
args = super(CachedCMakePackage, self).std_cmake_args
args.extend(['-C', self.cache_path])
return args
@run_after('install')
def install_cmake_cache(self):
mkdirp(self.spec.prefix.share.cmake)
install(self.cache_path, self.spec.prefix.share.cmake)
|
[
"llnl.util.tty.warn",
"llnl.util.filesystem.mkdirp",
"spack.package.run_after",
"os.path.exists",
"llnl.util.filesystem.install",
"os.path.join"
] |
[((9407, 9427), 'spack.package.run_after', 'run_after', (['"""install"""'], {}), "('install')\n", (9416, 9427), False, 'from spack.package import run_after\n'), ((1727, 1780), 'os.path.join', 'os.path.join', (['self.stage.source_path', 'self.cache_name'], {}), '(self.stage.source_path, self.cache_name)\n', (1739, 1780), False, 'import os\n'), ((9471, 9507), 'llnl.util.filesystem.mkdirp', 'mkdirp', (['self.spec.prefix.share.cmake'], {}), '(self.spec.prefix.share.cmake)\n', (9477, 9507), False, 'from llnl.util.filesystem import install, mkdirp\n'), ((9516, 9570), 'llnl.util.filesystem.install', 'install', (['self.cache_path', 'self.spec.prefix.share.cmake'], {}), '(self.cache_path, self.spec.prefix.share.cmake)\n', (9523, 9570), False, 'from llnl.util.filesystem import install, mkdirp\n'), ((6110, 6156), 'os.path.join', 'os.path.join', (["spec['mpi'].prefix.bin", '"""mpirun"""'], {}), "(spec['mpi'].prefix.bin, 'mpirun')\n", (6122, 6156), False, 'import os\n'), ((6291, 6314), 'os.path.exists', 'os.path.exists', (['mpiexec'], {}), '(mpiexec)\n', (6305, 6314), False, 'import os\n'), ((6457, 6470), 'llnl.util.tty.warn', 'tty.warn', (['msg'], {}), '(msg)\n', (6465, 6470), True, 'import llnl.util.tty as tty\n'), ((6027, 6073), 'os.path.join', 'os.path.join', (["spec['slurm'].prefix.bin", '"""srun"""'], {}), "(spec['slurm'].prefix.bin, 'srun')\n", (6039, 6073), False, 'import os\n'), ((6176, 6199), 'os.path.exists', 'os.path.exists', (['mpiexec'], {}), '(mpiexec)\n', (6190, 6199), False, 'import os\n'), ((6227, 6274), 'os.path.join', 'os.path.join', (["spec['mpi'].prefix.bin", '"""mpiexec"""'], {}), "(spec['mpi'].prefix.bin, 'mpiexec')\n", (6239, 6274), False, 'import os\n')]
|
from parse import parse
from sqlalchemy import text as sql_text
from alembic_utils.exceptions import SQLParseFailure
from alembic_utils.on_entity_mixin import OnEntityMixin
from alembic_utils.replaceable_entity import ReplaceableEntity
from alembic_utils.statement import coerce_to_quoted
class PGPolicy(OnEntityMixin, ReplaceableEntity):
"""A PostgreSQL Policy compatible with `alembic revision --autogenerate`
**Parameters:**
* **schema** - *str*: A SQL schema name
* **signature** - *str*: A SQL policy name and tablename, separated by "."
* **definition** - *str*: The definition of the policy, incl. permissive, for, to, using, with check
* **on_entity** - *str*: fully qualifed entity that the policy applies
"""
@classmethod
def from_sql(cls, sql: str) -> "PGPolicy":
"""Create an instance instance from a SQL string"""
template = "create policy{:s}{signature}{:s}on{:s}{on_entity}{:s}{definition}"
result = parse(template, sql.strip(), case_sensitive=False)
if result is not None:
on_entity = result["on_entity"]
if "." not in on_entity:
schema = "public"
on_entity = schema + "." + on_entity
schema, _, _ = on_entity.partition(".")
return cls(
schema=schema,
signature=result["signature"],
definition=result["definition"],
on_entity=on_entity,
)
raise SQLParseFailure(f'Failed to parse SQL into PGPolicy """{sql}"""')
def to_sql_statement_create(self):
""" Generates a SQL "create poicy" statement for PGPolicy """
return sql_text(f"CREATE POLICY {self.signature} on {self.on_entity} {self.definition}")
def to_sql_statement_drop(self, cascade=False):
"""Generates a SQL "drop policy" statement for PGPolicy"""
cascade = "cascade" if cascade else ""
return sql_text(f"DROP POLICY {self.signature} on {self.on_entity} {cascade}")
def to_sql_statement_create_or_replace(self):
"""Not implemented, postgres policies do not support replace."""
return sql_text(
f"""
DROP POLICY IF EXISTS {self.signature} on {self.on_entity};
CREATE POLICY {self.signature} on {self.on_entity} {self.definition};
"""
)
@classmethod
def from_database(cls, connection, schema):
"""Get a list of all policies defined in the db"""
sql = sql_text(
f"""
select
schemaname,
tablename,
policyname,
permissive,
roles,
cmd,
qual,
with_check
from
pg_policies
where
schemaname = '{schema}'
"""
)
rows = connection.execute(sql).fetchall()
def get_definition(permissive, roles, cmd, qual, with_check):
definition = ""
if permissive is not None:
definition += f"as {permissive} "
if cmd is not None:
definition += f"for {cmd} "
if roles is not None:
definition += f"to {', '.join(roles)} "
if qual is not None:
if qual[0] != "(":
qual = f"({qual})"
definition += f"using {qual} "
if with_check is not None:
if with_check[0] != "(":
with_check = f"({with_check})"
definition += f"with check {with_check} "
return definition
db_policies = []
for schema, table, policy_name, permissive, roles, cmd, qual, with_check in rows:
definition = get_definition(permissive, roles, cmd, qual, with_check)
schema = coerce_to_quoted(schema)
table = coerce_to_quoted(table)
policy_name = coerce_to_quoted(policy_name)
policy = PGPolicy.from_sql(
f"create policy {policy_name} on {schema}.{table} {definition}"
)
db_policies.append(policy)
for policy in db_policies:
assert policy is not None
return db_policies
|
[
"sqlalchemy.text",
"alembic_utils.exceptions.SQLParseFailure",
"alembic_utils.statement.coerce_to_quoted"
] |
[((1505, 1570), 'alembic_utils.exceptions.SQLParseFailure', 'SQLParseFailure', (['f"""Failed to parse SQL into PGPolicy ""\\"{sql}""\\""""'], {}), '(f\'Failed to parse SQL into PGPolicy """{sql}"""\')\n', (1520, 1570), False, 'from alembic_utils.exceptions import SQLParseFailure\n'), ((1697, 1783), 'sqlalchemy.text', 'sql_text', (['f"""CREATE POLICY {self.signature} on {self.on_entity} {self.definition}"""'], {}), "(\n f'CREATE POLICY {self.signature} on {self.on_entity} {self.definition}')\n", (1705, 1783), True, 'from sqlalchemy import text as sql_text\n'), ((1961, 2032), 'sqlalchemy.text', 'sql_text', (['f"""DROP POLICY {self.signature} on {self.on_entity} {cascade}"""'], {}), "(f'DROP POLICY {self.signature} on {self.on_entity} {cascade}')\n", (1969, 2032), True, 'from sqlalchemy import text as sql_text\n'), ((2172, 2362), 'sqlalchemy.text', 'sql_text', (['f"""\n DROP POLICY IF EXISTS {self.signature} on {self.on_entity};\n CREATE POLICY {self.signature} on {self.on_entity} {self.definition};\n """'], {}), '(\n f"""\n DROP POLICY IF EXISTS {self.signature} on {self.on_entity};\n CREATE POLICY {self.signature} on {self.on_entity} {self.definition};\n """\n )\n', (2180, 2362), True, 'from sqlalchemy import text as sql_text\n'), ((2514, 2824), 'sqlalchemy.text', 'sql_text', (['f"""\n select\n schemaname,\n tablename,\n policyname,\n permissive,\n roles,\n cmd,\n qual,\n with_check\n from\n pg_policies\n where\n schemaname = \'{schema}\'\n """'], {}), '(\n f"""\n select\n schemaname,\n tablename,\n policyname,\n permissive,\n roles,\n cmd,\n qual,\n with_check\n from\n pg_policies\n where\n schemaname = \'{schema}\'\n """\n )\n', (2522, 2824), True, 'from sqlalchemy import text as sql_text\n'), ((3834, 3858), 'alembic_utils.statement.coerce_to_quoted', 'coerce_to_quoted', (['schema'], {}), '(schema)\n', (3850, 3858), False, 'from alembic_utils.statement import coerce_to_quoted\n'), ((3879, 3902), 'alembic_utils.statement.coerce_to_quoted', 'coerce_to_quoted', (['table'], {}), '(table)\n', (3895, 3902), False, 'from alembic_utils.statement import coerce_to_quoted\n'), ((3929, 3958), 'alembic_utils.statement.coerce_to_quoted', 'coerce_to_quoted', (['policy_name'], {}), '(policy_name)\n', (3945, 3958), False, 'from alembic_utils.statement import coerce_to_quoted\n')]
|
# -*- coding: utf-8 -*-
import logging
__all__ = ["get_progress_bar"]
try:
import tqdm
except ImportError:
tqdm = None
class _NoOpPBar(object):
"""This class implements the progress bar interface but does nothing"""
def __init__(self):
pass
def __enter__(self, *args, **kwargs):
return self
def __exit__(self, *args, **kwargs):
pass
def update(self, count):
pass
def get_progress_bar(display, total):
"""Get a progress bar interface with given properties
If the tqdm library is not installed, this will always return a "progress
bar" that does nothing.
Args:
display (bool or str): Should the bar actually show the progress? Or a
string to indicate which tqdm bar to use.
total (int): The total size of the progress bar.
"""
if display:
if tqdm is None:
logging.warning(
"You must install the tqdm library to use progress "
"indicators with emcee"
)
return _NoOpPBar()
else:
if display is True:
return tqdm.tqdm(total=total)
else:
return getattr(tqdm, "tqdm_" + display)(total=total)
return _NoOpPBar()
|
[
"logging.warning",
"tqdm.tqdm"
] |
[((920, 1015), 'logging.warning', 'logging.warning', (['"""You must install the tqdm library to use progress indicators with emcee"""'], {}), "(\n 'You must install the tqdm library to use progress indicators with emcee')\n", (935, 1015), False, 'import logging\n'), ((1160, 1182), 'tqdm.tqdm', 'tqdm.tqdm', ([], {'total': 'total'}), '(total=total)\n', (1169, 1182), False, 'import tqdm\n')]
|
import geopandas as gpd
import numpy as np
import pandas as pd
from shapely.geometry import Point, LineString, Polygon
def convert_geo_list_to_geoseries(geo_list):
for i in range(0, len(geo_list)):
try:
out = out.append(geo_list[i])
except:
out = gpd.GeoSeries(geo_list[i])
return out
def make_vertical_segments(scanline_row, step_increment = 0.1, segment_width = 1):
# need to adjust the np.arange to step and not divide....
# need to draw this out. We have x boxes (given by np_arrang and the step increment) and we
# want to offset them by a distance (say 1 m - the window width)
# so we get an index of y_coords and take the first one to the -n one
# where n is floor(window_width/step_increment)
n = int(segment_width/step_increment)
scanline = scanline_row[1].loc['orig_geom']
x_coord = np.unique(scanline.xy[0])
y_coords = np.arange(np.min(scanline.xy[1]), np.max(scanline.xy[1]), step_increment)
seg_start_point = list(zip(np.repeat(x_coord,len(y_coords[0:-n])), y_coords[0:-n]))
seg_end_point = list(zip(np.repeat(x_coord,len(y_coords[n:])), y_coords[n:]))
seg_points = list(zip(seg_start_point,seg_end_point))
scanline_segments = gpd.GeoSeries(map(LineString, seg_points))
name = scanline_row[1]['name']
names = [name + '_seg_' + str(i) for i in np.arange(0,len(scanline_segments))+1]
segment_df = gpd.GeoDataFrame({
'name': names,
'x_coord': np.repeat(x_coord, len(names)),
'y_midpoint': (y_coords[0:-n] + y_coords[n:])/2},
geometry = scanline_segments
)
segment_df['orig_length'] = segment_df.length
segment_df['orig_geom'] = segment_df['geometry']
return segment_df
def make_horizontal_segments(scanline_row, step_increment = 0.1, segment_width = 1):
n = int(segment_width/step_increment)
scanline = scanline_row[1].loc['orig_geom']
y_coord = np.unique(scanline.xy[1])
x_coords = np.arange(np.min(scanline.xy[0]), np.max(scanline.xy[0]), step_increment)
seg_start_point = list(zip(x_coords[0:-n], np.repeat(y_coord,len(x_coords[0:-n]))))
seg_end_point = list(zip(x_coords[n:],np.repeat(y_coord,len(x_coords[n:]))))
seg_points = list(zip(seg_start_point,seg_end_point))
scanline_segments = gpd.GeoSeries(map(LineString, seg_points))
name = scanline_row[1]['name']
names = [name + '_seg_' + str(i) for i in np.arange(0,len(scanline_segments))+1]
segment_df = gpd.GeoDataFrame({
'name': names,
'y_coord': np.repeat(y_coord, len(names)),
'x_midpoint': (x_coords[0:-n] + x_coords[n:])/2},
geometry = scanline_segments
)
segment_df['orig_length'] = segment_df.length
segment_df['orig_geom'] = segment_df['geometry']
return segment_df
def make_polygon_from_tuple(x,y,w):
return Polygon([[x - w/2, y - w/2], [x - w/2, y + w/2],
[x + w/2, y + w/2], [x + w/2, y - w/2]])
|
[
"geopandas.GeoSeries",
"shapely.geometry.Polygon",
"numpy.min",
"numpy.max",
"numpy.unique"
] |
[((890, 915), 'numpy.unique', 'np.unique', (['scanline.xy[0]'], {}), '(scanline.xy[0])\n', (899, 915), True, 'import numpy as np\n'), ((2032, 2057), 'numpy.unique', 'np.unique', (['scanline.xy[1]'], {}), '(scanline.xy[1])\n', (2041, 2057), True, 'import numpy as np\n'), ((3027, 3136), 'shapely.geometry.Polygon', 'Polygon', (['[[x - w / 2, y - w / 2], [x - w / 2, y + w / 2], [x + w / 2, y + w / 2], [x +\n w / 2, y - w / 2]]'], {}), '([[x - w / 2, y - w / 2], [x - w / 2, y + w / 2], [x + w / 2, y + w /\n 2], [x + w / 2, y - w / 2]])\n', (3034, 3136), False, 'from shapely.geometry import Point, LineString, Polygon\n'), ((941, 963), 'numpy.min', 'np.min', (['scanline.xy[1]'], {}), '(scanline.xy[1])\n', (947, 963), True, 'import numpy as np\n'), ((965, 987), 'numpy.max', 'np.max', (['scanline.xy[1]'], {}), '(scanline.xy[1])\n', (971, 987), True, 'import numpy as np\n'), ((2083, 2105), 'numpy.min', 'np.min', (['scanline.xy[0]'], {}), '(scanline.xy[0])\n', (2089, 2105), True, 'import numpy as np\n'), ((2107, 2129), 'numpy.max', 'np.max', (['scanline.xy[0]'], {}), '(scanline.xy[0])\n', (2113, 2129), True, 'import numpy as np\n'), ((294, 320), 'geopandas.GeoSeries', 'gpd.GeoSeries', (['geo_list[i]'], {}), '(geo_list[i])\n', (307, 320), True, 'import geopandas as gpd\n')]
|
import os,copy
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from collections import OrderedDict
def read_spectral_k(filename="tc_dos.dat"):
"""
Reads the spectrial thermal conductivity information
"""
tcdos_labels = [
"wavelength",
"k_xx_raw","k_yy_raw","k_zz_raw",
"k_xx_smooth","k_yy_smooth","k_zz_smooth",
"lifetime_dos1 ","lifetime_dos2"]
def subselect_table_block(i_start,lines):
i = i_start + 1
table = []
while(lines[i].strip() != ""):
args = lines[i].split()
args = [arg.strip() for arg in args]
args = [float(arg) for arg in args]
table.append(args)
i += 1
return np.array(table)
line = None # initialize
with open(filename,'r') as f:
lines = f.readlines()
lines = [s.strip() for s in lines]
temperatures = []
tcdos_dict = OrderedDict()
for il,line in enumerate(lines):
if line.startswith('# Temp:'):
args = line.split(':')
T = int(float(args[1].strip()))
temperatures.append(T)
tcdos_dict[T] = subselect_table_block(il,lines)
tcdos_df_dict = OrderedDict()
for temp in temperatures:
tcdos_df_dict[temp] = pd.DataFrame(
copy.deepcopy(tcdos_dict[temp]),
columns=list(tcdos_labels))
return {k:v.copy() for k,v in tcdos_df_dict.items()}
def normalize_tcdos(
data_filename='tc_dos.dat'):
tcdos_df_dict = read_spectral_k(filename=data_filename)
tcdos_df_dict_n = tcdos_df_dict
for k, v in tcdos_df_dict.items():
k_xx_raw = sum(list(tcdos_df_dict[k]['k_xx_raw']))
k_yy_raw = sum(list(tcdos_df_dict[k]['k_yy_raw']))
k_zz_raw = sum(list(tcdos_df_dict[k]['k_zz_raw']))
k_xx_smooth = sum(list(tcdos_df_dict[k]['k_xx_smooth']))
k_yy_smooth = sum(list(tcdos_df_dict[k]['k_yy_smooth']))
k_zz_smooth = sum(list(tcdos_df_dict[k]['k_zz_smooth']))
tcdos_df_dict_n[k]['k_xx_raw'] = tcdos_df_dict[k]['k_xx_raw']/k_xx_raw
tcdos_df_dict_n[k]['k_yy_raw'] = tcdos_df_dict[k]['k_yy_raw']/k_yy_raw
tcdos_df_dict_n[k]['k_zz_raw'] = tcdos_df_dict[k]['k_zz_raw']/k_zz_raw
tcdos_df_dict_n[k]['k_xx_smooth'] = tcdos_df_dict[k]['k_xx_smooth']/k_xx_smooth
tcdos_df_dict_n[k]['k_yy_smooth'] = tcdos_df_dict[k]['k_yy_smooth']/k_yy_smooth
tcdos_df_dict_n[k]['k_zz_smooth'] = tcdos_df_dict[k]['k_zz_smooth']/k_zz_smooth
return {k:v.copy() for k,v in tcdos_df_dict_n.items()}
def make_tcdos_plot(
data_filename='tc_dos.dat',
figure_prefix='tc_dos',
xlim=None,
ylim=None):
tcdos_df_dict = normalize_tcdos(data_filename=data_filename)
for keys in tcdos_df_dict.keys():
tcdos_figure_filename = tcdos_figure_prefix + '_' + str(keys) + 'K' + '.png'
figure = plt.figure()
tcdos_plot = figure.add_subplot(111)
tcdos_plot.plot(tcdos_df_dict[keys]['wavelength'],tcdos_df_dict[keys]['k_xx_raw'], label='k_xx_raw', color='g')
tcdos_plot.plot(tcdos_df_dict[keys]['wavelength'],tcdos_df_dict[keys]['k_yy_raw'], label='k_yy_raw', color='b')
tcdos_plot.plot(tcdos_df_dict[keys]['wavelength'],tcdos_df_dict[keys]['k_zz_raw'], label='k_zz_raw', color='c')
tcdos_plot.plot(tcdos_df_dict[keys]['wavelength'],tcdos_df_dict[keys]['k_xx_smooth'], label='k_xx_smooth', color='y')
tcdos_plot.plot(tcdos_df_dict[keys]['wavelength'],tcdos_df_dict[keys]['k_yy_smooth'], label='k_yy_smooth', color='m')
tcdos_plot.plot(tcdos_df_dict[keys]['wavelength'],tcdos_df_dict[keys]['k_zz_smooth'], label='k_zz_smooth', color='r')
tcdos_title=plt.title('Spectral thermal conductivity'+ ' at ' + str(keys)+ 'K', fontname='Times New Roman')
tcdos_xlabel=plt.xlabel('Frequency (THz)', fontname='Times New Roman')
tcdos_ylabel=plt.ylabel('Thermal conductivity (W/mK)', fontname='Times New Roman')
tcdos_legend=plt.legend(loc='upper right', prop={'size':8})
tcdos_font=plt.rc('font', family='Times New Roman')
#set axis here
if xlim is not None:
tcdos_plot.set_xlim(xlim)
if ylim is not None:
tcdos_plot.set_ylim(ylim)
figure.savefig(tcdos_figure_filename)
plt.close
if __name__ == "__main__":
phonts_sim_dir = 'Ar_result'
tcdos_data_filename = os.path.join(phonts_sim_dir,'tc_dos.dat')
tcdos_figure_prefix = 'tc_dos'
assert type(tcdos_data_filename)
assert os.path.isfile(tcdos_data_filename)
tcdos_df_dict = read_spectral_k(filename=tcdos_data_filename)
# example how to use make_tcdos_plot(i)
make_tcdos_plot(
data_filename = tcdos_data_filename,
figure_prefix = tcdos_figure_prefix,
xlim = [0,15],
ylim = [0,0.06])
|
[
"copy.deepcopy",
"matplotlib.pyplot.legend",
"os.path.isfile",
"matplotlib.pyplot.figure",
"numpy.array",
"matplotlib.pyplot.rc",
"collections.OrderedDict",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"os.path.join"
] |
[((944, 957), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (955, 957), False, 'from collections import OrderedDict\n'), ((1230, 1243), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1241, 1243), False, 'from collections import OrderedDict\n'), ((4479, 4521), 'os.path.join', 'os.path.join', (['phonts_sim_dir', '"""tc_dos.dat"""'], {}), "(phonts_sim_dir, 'tc_dos.dat')\n", (4491, 4521), False, 'import os, copy\n'), ((4609, 4644), 'os.path.isfile', 'os.path.isfile', (['tcdos_data_filename'], {}), '(tcdos_data_filename)\n', (4623, 4644), False, 'import os, copy\n'), ((755, 770), 'numpy.array', 'np.array', (['table'], {}), '(table)\n', (763, 770), True, 'import numpy as np\n'), ((2943, 2955), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2953, 2955), True, 'import matplotlib.pyplot as plt\n'), ((3876, 3933), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency (THz)"""'], {'fontname': '"""Times New Roman"""'}), "('Frequency (THz)', fontname='Times New Roman')\n", (3886, 3933), True, 'import matplotlib.pyplot as plt\n'), ((3955, 4024), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Thermal conductivity (W/mK)"""'], {'fontname': '"""Times New Roman"""'}), "('Thermal conductivity (W/mK)', fontname='Times New Roman')\n", (3965, 4024), True, 'import matplotlib.pyplot as plt\n'), ((4046, 4093), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'prop': "{'size': 8}"}), "(loc='upper right', prop={'size': 8})\n", (4056, 4093), True, 'import matplotlib.pyplot as plt\n'), ((4121, 4161), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""Times New Roman"""'}), "('font', family='Times New Roman')\n", (4127, 4161), True, 'import matplotlib.pyplot as plt\n'), ((1330, 1361), 'copy.deepcopy', 'copy.deepcopy', (['tcdos_dict[temp]'], {}), '(tcdos_dict[temp])\n', (1343, 1361), False, 'import os, copy\n')]
|
from cnaas_nms.scheduler.scheduler import Scheduler
from cnaas_nms.scheduler.wrapper import job_wrapper
from cnaas_nms.scheduler.jobresult import DictJobResult
from cnaas_nms.tools.printlastjobs import print_jobs
from apscheduler.job import Job
import pprint
import unittest
import pkg_resources
import yaml
import os
import time
@job_wrapper
def testfunc_success(text=''):
print(text)
return DictJobResult(
result = {'status': 'success'}
)
@job_wrapper
def testfunc_exception(text=''):
print(text)
raise Exception("testfunc_exception raised exception")
class InitTests(unittest.TestCase):
def setUp(self):
data_dir = pkg_resources.resource_filename(__name__, 'data')
with open(os.path.join(data_dir, 'testdata.yml'), 'r') as f_testdata:
self.testdata = yaml.safe_load(f_testdata)
scheduler = Scheduler()
scheduler.start()
def tearDown(self):
scheduler = Scheduler()
time.sleep(3)
scheduler.get_scheduler().print_jobs()
print_jobs(2)
scheduler.shutdown()
def test_add_schedule(self):
scheduler = Scheduler()
job1 = scheduler.add_onetime_job(testfunc_success, when=1, kwargs={'text': 'success'})
job2 = scheduler.add_onetime_job(testfunc_exception, when=1, kwargs={'text': 'exception'})
assert isinstance(job1, Job)
assert isinstance(job2, Job)
print(f"Job1 scheduled as ID { job1.id }")
print(f"Job2 scheduled as ID { job2.id }")
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"cnaas_nms.scheduler.jobresult.DictJobResult",
"time.sleep",
"pkg_resources.resource_filename",
"yaml.safe_load",
"cnaas_nms.scheduler.scheduler.Scheduler",
"cnaas_nms.tools.printlastjobs.print_jobs",
"os.path.join"
] |
[((405, 448), 'cnaas_nms.scheduler.jobresult.DictJobResult', 'DictJobResult', ([], {'result': "{'status': 'success'}"}), "(result={'status': 'success'})\n", (418, 448), False, 'from cnaas_nms.scheduler.jobresult import DictJobResult\n'), ((1552, 1567), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1565, 1567), False, 'import unittest\n'), ((664, 713), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['__name__', '"""data"""'], {}), "(__name__, 'data')\n", (695, 713), False, 'import pkg_resources\n'), ((868, 879), 'cnaas_nms.scheduler.scheduler.Scheduler', 'Scheduler', ([], {}), '()\n', (877, 879), False, 'from cnaas_nms.scheduler.scheduler import Scheduler\n'), ((951, 962), 'cnaas_nms.scheduler.scheduler.Scheduler', 'Scheduler', ([], {}), '()\n', (960, 962), False, 'from cnaas_nms.scheduler.scheduler import Scheduler\n'), ((971, 984), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (981, 984), False, 'import time\n'), ((1040, 1053), 'cnaas_nms.tools.printlastjobs.print_jobs', 'print_jobs', (['(2)'], {}), '(2)\n', (1050, 1053), False, 'from cnaas_nms.tools.printlastjobs import print_jobs\n'), ((1137, 1148), 'cnaas_nms.scheduler.scheduler.Scheduler', 'Scheduler', ([], {}), '()\n', (1146, 1148), False, 'from cnaas_nms.scheduler.scheduler import Scheduler\n'), ((820, 846), 'yaml.safe_load', 'yaml.safe_load', (['f_testdata'], {}), '(f_testdata)\n', (834, 846), False, 'import yaml\n'), ((732, 770), 'os.path.join', 'os.path.join', (['data_dir', '"""testdata.yml"""'], {}), "(data_dir, 'testdata.yml')\n", (744, 770), False, 'import os\n')]
|
#!/usr/bin/env python3
import signal
import sys
import time
import RPi.GPIO as GPIO
import random
GPIO.setmode(GPIO.BCM) # BCM Pin Nummern
GPIO.setup(6, GPIO.OUT) # Rot
GPIO.setup(13, GPIO.OUT) # Rot
# GPIO.setup(22, GPIO.OUT) # Grün
## Sigterm abfangen und LEDs wieder aus machen!!
def signal_term_handler(signal, frame):
auge1.stop()
auge2.stop()
# GPIO.output(22,False)
GPIO.cleanup()
sys.exit(0)
signal.signal(signal.SIGTERM, signal_term_handler)
## Builtin-PWN-Objekte: PIN,Frequenz
auge1=GPIO.PWM(6,100)
auge2=GPIO.PWM(13,100)
## Starte mit Helligkeit 0
auge1.start(0)
auge2.start(0)
pause_time = 0.02
##
try:
while True:
for i in range(0,101): # Stackoverflow: 101 because it stops when it finishes 100
auge1.ChangeDutyCycle(i)
auge2.ChangeDutyCycle(i)
## Schlimmer Workaround für die Kopf-led
# if i % 10 == 0:
# GPIO.output(22, bool( random.getrandbits(1) ) )
time.sleep(pause_time)
for i in range(100,-1,-1): # Stackoverflow: from 100 to zero in steps of -1
auge1.ChangeDutyCycle(i)
auge2.ChangeDutyCycle(i)
# if i % 10 == 0:
# GPIO.output(22, bool( random.getrandbits(1) ) )
time.sleep(pause_time)
## Alles zurücksetzen wenn was schief geht!
except:
auge1.stop()
auge2.stop()
# GPIO.output(22,False)
GPIO.cleanup()
sys.exit(0)
|
[
"RPi.GPIO.setmode",
"RPi.GPIO.cleanup",
"RPi.GPIO.setup",
"time.sleep",
"RPi.GPIO.PWM",
"signal.signal",
"sys.exit"
] |
[((99, 121), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (111, 121), True, 'import RPi.GPIO as GPIO\n'), ((142, 165), 'RPi.GPIO.setup', 'GPIO.setup', (['(6)', 'GPIO.OUT'], {}), '(6, GPIO.OUT)\n', (152, 165), True, 'import RPi.GPIO as GPIO\n'), ((174, 198), 'RPi.GPIO.setup', 'GPIO.setup', (['(13)', 'GPIO.OUT'], {}), '(13, GPIO.OUT)\n', (184, 198), True, 'import RPi.GPIO as GPIO\n'), ((432, 482), 'signal.signal', 'signal.signal', (['signal.SIGTERM', 'signal_term_handler'], {}), '(signal.SIGTERM, signal_term_handler)\n', (445, 482), False, 'import signal\n'), ((530, 546), 'RPi.GPIO.PWM', 'GPIO.PWM', (['(6)', '(100)'], {}), '(6, 100)\n', (538, 546), True, 'import RPi.GPIO as GPIO\n'), ((554, 571), 'RPi.GPIO.PWM', 'GPIO.PWM', (['(13)', '(100)'], {}), '(13, 100)\n', (562, 571), True, 'import RPi.GPIO as GPIO\n'), ((400, 414), 'RPi.GPIO.cleanup', 'GPIO.cleanup', ([], {}), '()\n', (412, 414), True, 'import RPi.GPIO as GPIO\n'), ((419, 430), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (427, 430), False, 'import sys\n'), ((1463, 1477), 'RPi.GPIO.cleanup', 'GPIO.cleanup', ([], {}), '()\n', (1475, 1477), True, 'import RPi.GPIO as GPIO\n'), ((1482, 1493), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1490, 1493), False, 'import sys\n'), ((1014, 1036), 'time.sleep', 'time.sleep', (['pause_time'], {}), '(pause_time)\n', (1024, 1036), False, 'import time\n'), ((1317, 1339), 'time.sleep', 'time.sleep', (['pause_time'], {}), '(pause_time)\n', (1327, 1339), False, 'import time\n')]
|
import asyncio
import datetime
import math
import os
import re
import statistics
import time
from types import *
from enum import Enum
from collections import OrderedDict
import aiohttp
import disnake
import utils.drawing.dota as drawdota
import utils.drawing.graph as drawgraph
from disnake.ext import commands
from utils.command.commandargs import *
from utils.other.metastats import get_total_pro_games
from utils.tools.globals import botdata, httpgetter, logger, settings
from utils.tools.helpers import *
from cogs.mangocog import *
class MatchNotParsedError(UserError):
def __init__(self, match_id, action=None):
self.action = action if action else "do that"
super().__init__(f"This match must be parsed before I can {self.action}.\nTry `{{cmdpfx}}parse {match_id}` to request a parse.")
class StratzMatchNotParsedError(UserError):
def __init__(self, match_id):
super().__init__(f"It looks like match `{match_id}` hasn't been parsed by STRATZ. To have your matches parsed by STRATZ a bit faster, you can login to their site: <https://stratz.com>")
class InvalidMatchIdError(UserError):
def __init__(self, match_id):
super().__init__(f"Sorry, looks like `{match_id}` isn't a valid match id")
opendota_html_errors = {
404: "Dats not a valid query. Take a look at the OpenDota API Documentation: https://docs.opendota.com",
521: "[http error 521] Looks like the OpenDota API is down or somethin, so ya gotta wait a sec",
502: "[http error 502] Looks like there was an issue with the OpenDota API. Try again in a bit",
"default": "OpenDota said we did things wrong 😢. http status code: {}"
}
graphtypes = {
"Team Gold/Experience Difference": "teamdiff",
"Player Gold": "playergold"
}
default_steam_icon = "https://steamcdn-a.akamaihd.net/steamcommunity/public/images/avatars/fe/fef49e7fa7e1997310d705b2a6158ff8dc1cdfeb_full.jpg"
# converter for a single dota match
class DotaMatch():
def __init__(self, match, steamid):
self.match = match
self.steamid = steamid
@commands.converter_method
async def convert(cls, inter: disnake.CmdInter, match_string: str):
steamid = None
if match_string.isnumeric():
try:
match = await get_match(match_string)
return cls(match, steamid)
except InvalidMatchIdError as e:
pass # ignore this and continue, they probably passed in a steam id
if match_string in [ "lm", "lastmatch", "last", "me" ]:
match_string = "" # this way the converter will look at the author
try:
matchfilter = await MatchFilter.convert(inter, str(match_string))
if matchfilter.player:
steamid = matchfilter.player.steam_id
match_id = await get_lastmatch_id(matchfilter)
match = await get_match(match_id)
return cls(match, steamid)
except CustomBadArgument as e:
raise UserError(f"Couldn't find a match_id or a player when given '{match_string}'")
def opendota_query_get_url(querystring):
if settings.odota:
if "?" in querystring:
querystring += f"&api_key={settings.odota}"
else:
querystring += f"?api_key={settings.odota}"
return f"https://api.opendota.com/api{querystring}"
async def opendota_query(querystring, cache=False):
url = opendota_query_get_url(querystring)
return await httpgetter.get(url, cache=cache, errors=opendota_html_errors)
async def opendota_query_filter(matchfilter):
matches = await opendota_query(matchfilter.to_query_url())
matches = matchfilter.post_filter(matches)
return matches
# rate_limit = false if this is the only query we're sending
async def get_match(match_id):
url = opendota_query_get_url(f"/matches/{match_id}")
cached_data = httpgetter.cache.get(url, "json")
def check_valid_match(match_data):
if match_data.get('radiant_win', True) is None:
raise InvalidMatchIdError(match_id)
if cached_data:
if is_parsed(cached_data):
check_valid_match(cached_data)
return cached_data
else:
await httpgetter.cache.remove(url)
try:
data = await httpgetter.get(url, cache=True, errors=opendota_html_errors)
check_valid_match(data)
return data
except HttpError as e:
if e.code == 404:
await httpgetter.cache.remove(url)
raise InvalidMatchIdError(match_id)
else:
raise
# rate_limit = false if this is the only query we're sending
async def get_stratz_match(match_id):
if settings.stratz is None:
raise UserError("Stratz not configured properly. The bot owner has gotta put the stratz api key in the config file")
url = f"https://api.stratz.com/api/v1/match/{match_id}"
cached_data = httpgetter.cache.get(url, "json")
if cached_data:
if is_stratz_parsed(cached_data):
return cached_data
else:
await httpgetter.cache.remove(url)
try:
auth_header = { "Authorization": f"Bearer {settings.stratz}" }
return await httpgetter.get(url, cache=True, errors={
500: "Looks like something wrong with the STRATZ api",
204: "STRATZ hasn't recieved this match yet. Try again a bit later"
}, headers=auth_header)
except aiohttp.ClientConnectorError:
logger.info("ClientConnectorError on stratz api result")
raise StratzMatchNotParsedError(match_id)
async def get_lastmatch_id(matchfilter, reverse=False):
no_filter = matchfilter.to_query_args() == ""
matchfilter.set_arg("significant", 0, False)
if not reverse:
matchfilter.set_arg("limit", 1)
matches = await opendota_query_filter(matchfilter)
if matches:
if reverse:
return matches[-1]["match_id"]
else:
return matches[0]["match_id"]
else:
if no_filter:
raise NoMatchHistoryError(matchfilter.player.steam_id)
else:
raise UserError("No matches found using that filter")
def s_if_plural(text, n):
return text + "s" if n > 1 else text
def pretty_list(l, none=None):
if len(l) == 0:
return none
if len(l) == 1:
return l[0]
elif len(l) == 2:
return l[0] + " and " + l[1]
else:
l[-1] = "and " + str(l[-1])
return ", ".join(l)
def get_pretty_time(seconds):
seconds = abs(seconds)
if seconds == 0:
return None
times = [
["{t} second{s}", 60],
["{t} minute{s}", 60],
["{t} hour{s}", 24],
["{t} day{s}", 30.416666666666], # Won't be exactly correct
["{t} month{s}", 12],
["{t} year{s}", 100],
]
result = []
divisor = 1
for time in times:
t = int((seconds // divisor) % time[1])
if t > 0:
result.insert(0, time[0].format(t=t, s="s" if t > 1 else ""))
divisor *= time[1]
return pretty_list(result)
def get_pretty_duration(duration, postfix=True):
if duration == 0:
return "the exact start of the game"
is_after = duration > 0
result = get_pretty_time(duration)
if postfix:
result += " in" if is_after else " before the game started"
return result
def is_parsed(match):
return match.get("version", None) is not None
def is_stratz_parsed(match):
return match.get("parsedDateTime") and match["players"][0].get("playbackData") and match["players"][0].get("playbackData").get("playerUpdatePositionEvents")
def format_teamfight(teamfight):
if teamfight['our_dead'] is None and teamfight['their_dead'] is None:
format_str = "There was a teamfight with no deaths"
elif teamfight['our_dead'] is None:
format_str = "We killed their {their_dead} without losing anyone"
elif teamfight['their_dead'] is None:
format_str = "We lost our {our_dead} and couldn't kill any of them"
else:
format_str = "We traded our {our_dead} for {their_dead}"
format_str += ", resulting in a net {gain_loss} of {net_change:,} gold"
return format_str.format(**teamfight)
def _match_avg(player_matches, key, round_place=0):
x = 0
total_count = 0
for player in player_matches:
if isinstance(key, LambdaType):
val = key(player)
else:
if player.get(key) is None:
continue
val = player.get(key, 0)
x += val
total_count += 1
if total_count == 0:
return None
x = round(x / total_count, round_place)
return int(x) if round_place == 0 else x
def _match_percent(player_matches, key, round_place=0, needs_key=None):
count = 0
total_count = 0
for player in player_matches:
if needs_key and player.get(needs_key) is None:
continue
if isinstance(key, LambdaType):
success = key(player)
else:
success = player.get(key, 0)
if success:
count += 1
total_count += 1
if total_count == 0:
return None
if round_place == "floor":
count = math.floor((count * 100) / total_count)
round_place = 0
else:
count = round((count * 100) / total_count, round_place)
value = int(count) if round_place == 0 else count
return f"{value}%"
class DotaStats(MangoCog):
"""Commands for displaying information about Dota 2 players and matches
Most of the data for this is collected through the [OpenDota API](https://docs.opendota.com/)"""
def __init__(self, bot):
MangoCog.__init__(self, bot)
self.embed_color = disnake.Color.teal()
dotabase = self.bot.get_cog("Dotabase")
if not dotabase:
raise ImportError("The Dotabase cog must be added before the DotaStats cog")
self.dota_game_strings = read_json(settings.resource("json/dota_game_strings.json"))
self.hero_info = dotabase.get_hero_infos()
self.lookup_hero = dotabase.lookup_hero
self.chat_wheel_info = dotabase.get_chat_wheel_infos()
self.dota_gif_lock = asyncio.Lock()
async def get_meta_json(self):
url = 'https://api.opendota.com/api/herostats'
return await httpgetter.get(url)
def sort_meta(self, json, num=10):
"""re-orders the meta json based on pick/ban + winrate.
num = number of top heroes to include """
total_games = get_total_pro_games(json)
json = list(filter(lambda x: x.get('pro_pick', 1) > 0, json))
sorted_json = sorted(
json,
reverse=True,
# sorts by (winrate) + (pick/ban rate)
key=lambda x: (((x.get('pro_pick', 0) + x.get('pro_ban', 0)) / total_games)
+ ((x.get('pro_win', 0) / x.get('pro_pick', 1)))),
)
if num > 0:
return sorted_json[:num]
return sorted_json
def get_pretty_hero(self, player, use_icons=False):
dotabase = self.bot.get_cog("Dotabase")
if player["hero_id"] not in self.hero_info:
return "**Unknown**"
name = self.hero_info[player["hero_id"]]["name"]
if use_icons:
emoji = self.hero_info[player["hero_id"]]["emoji"]
return f"{emoji}**{name}**"
return f"**{name}**"
def get_player_rank(self, playerinfo):
# gets the players rank information as a string with a rank emoticaon
rank_strings = [ "Unranked", "Herald", "Guardian", "Crusader", "Archon", "Legend", "Ancient", "Divine", "Immortal" ]
base_rank_tier = playerinfo.get("rank_tier")
if base_rank_tier is None:
base_rank_tier = 0
rank_tier = base_rank_tier // 10
leaderboard_rank = playerinfo.get("leaderboard_rank")
rank_string = f"**{rank_strings[rank_tier]}**"
stars = min(base_rank_tier % 10, 7)
if stars > 0:
rank_string += f" [{stars}]"
on_leaderboard = rank_tier >= 7 and leaderboard_rank
if on_leaderboard:
rank_string = f"**Immortal** [Rank {leaderboard_rank}]"
rank_tier = 8
emoji_id = f"rank_{rank_tier}"
if on_leaderboard:
if leaderboard_rank <= 10:
emoji_id += "c"
elif leaderboard_rank <= 100:
emoji_id += "b"
rank_string = self.get_emoji(emoji_id) + " " + rank_string
return rank_string
async def get_player_mention(self, steamid, ctx_inter: InterContext):
# expects that steamid is a valid int
player = await DotaPlayer.convert(ctx_inter, steamid)
return player.mention
async def create_dota_gif(self, match, stratz_match, start_time, end_time, ms_per_second=100):
await self.dota_gif_lock.acquire()
try:
result = await drawdota.create_dota_gif(self.bot, match, stratz_match, start_time, end_time, ms_per_second)
finally:
self.dota_gif_lock.release()
return result
async def get_teamfights(self, game, is_radiant):
teamfights = []
for teamfight in game['teamfights']:
net_gain = 0
our_dead = []
their_dead = []
num_players = min([ len(teamfight['players']), len(game['players']) ])
for i in range(0, num_players):
deadtext = self.get_pretty_hero(game['players'][i])
if teamfight['players'][i]['deaths'] == 0:
deadtext = None
elif teamfight['players'][i]['deaths'] > 1:
deadtext += "(x{})".format(teamfight['players'][i]['deaths'])
if (game['players'][i]['isRadiant'] == is_radiant): # on our team
net_gain += teamfight['players'][i]['gold_delta']
if deadtext:
our_dead.append(deadtext)
else:
net_gain -= teamfight['players'][i]['gold_delta']
if deadtext:
their_dead.append(deadtext)
teamfight_dict = {
"gain_loss": "gain" if net_gain >= 0 else "loss",
"our_dead": pretty_list(our_dead, None),
"their_dead": pretty_list(their_dead, None),
"net_change": abs(net_gain),
"deaths": teamfight['deaths'],
"time": teamfight['start'],
"time_end": teamfight['end']
}
teamfight_dict['formatted'] = format_teamfight(teamfight_dict)
teamfights.append(teamfight_dict)
return teamfights
async def get_firstblood_story(self, game, is_radiant):
fb_objective = next((obj for obj in game['objectives'] if obj['type'] == "CHAT_MESSAGE_FIRSTBLOOD"), None)
if fb_objective is None:
return "" # No first blood this game, or it wasnt reported in objectives log
fb_log = None
fb_killer = next(p for p in game['players'] if p['player_slot'] == fb_objective['player_slot'])
fb_log = next((kill for kill in fb_killer['kills_log'] if kill['time'] == fb_objective['time']), None)
if fb_log is None:
return "" # Can't find the kill log of when first blood happened
dotabase = self.bot.get_cog("Dotabase")
fb_victim_id = next(h for h in self.hero_info if self.hero_info[h]['full_name'] == fb_log['key'])
fb_victim = next(p for p in game['players'] if p['hero_id'] == fb_victim_id)
return "First blood was drawn when {} {} killed {} {} at {}\n\n".format(
"our" if (fb_killer['isRadiant'] == is_radiant) else "their",
self.get_pretty_hero(fb_killer),
"our" if (fb_victim['isRadiant'] == is_radiant) else "their",
self.get_pretty_hero(fb_victim),
get_pretty_duration(fb_objective['time']))
async def get_teamfight_stories(self, game, is_radiant):
teamfights = await self.get_teamfights(game, is_radiant)
teamfights_count = len(teamfights)
story = ""
timeline = []
most_deaths_fights = 2
most_change_fights = 2
if len(teamfights) > most_deaths_fights + most_change_fights:
# do calcs
teamfights = sorted(teamfights, key=lambda t: t['net_change'], reverse=True)
for i in range(0, most_change_fights):
timeline.append(teamfights.pop(0))
teamfights = sorted(teamfights, key=lambda t: t['deaths'], reverse=True)
for i in range(0, most_deaths_fights):
timeline.append(teamfights.pop(0))
else:
timeline.extend(teamfights)
teamfights = []
timeline = sorted(timeline, key=lambda t: t['time'])
return list(map(lambda t: t["formatted"], timeline))
async def get_lane_story(self, players, laneid, is_radiant, use_icons=False):
our_eff = 0
their_eff = 0
our_heroes = []
their_heroes = []
for player in players:
if player['lane'] == laneid and not player.get('is_roaming', False):
if (player['isRadiant'] == is_radiant): #on our team
if player.get('lane_efficiency', 0) > our_eff:
our_eff = player['lane_efficiency']
our_heroes.append(self.get_pretty_hero(player, use_icons))
else: #on their team
if player.get('lane_efficiency', 0) > their_eff:
their_eff = player['lane_efficiency']
their_heroes.append(self.get_pretty_hero(player, use_icons))
return {
"us": pretty_list(our_heroes, "An empty lane"),
"won_lost": "won" if our_eff > their_eff else "lost",
"them": pretty_list(their_heroes, "an empty lane")
}
# gets the story for all of the lanes
async def get_lane_stories(self, game, is_radiant, use_icons=False):
story = ""
lanes = {1: "bottom", 2: "middle", 3: "top"}
for laneid in lanes:
story += "• {0[us]} {0[won_lost]} {1} lane vs {0[them]}\n".format(await self.get_lane_story(game['players'], laneid, is_radiant, use_icons), lanes[laneid])
roamers = [self.get_pretty_hero(p, use_icons) for p in game['players'] if p.get('is_roaming')]
if roamers:
story += f"• {pretty_list(roamers)} roamed\n"
return story
def set_match_footer(self, match, embed):
dotabase = self.bot.get_cog("Dotabase")
footer_text = str(match["match_id"])
patch = dotabase.get_match_patch(match)
if patch:
footer_text += " • " + patch
embed.set_footer(text=footer_text)
embed.timestamp = datetime.datetime.fromtimestamp(match['start_time'], tz=datetime.timezone.utc)
# prints the stats for the given player's latest game
async def player_match_stats(self, steamid, match, inter):
# Finds the player in the game which has our matching steam32 id
match_id = match["match_id"]
player = None
if steamid:
player = next((p for p in match['players'] if p['account_id'] == steamid), None)
if player is None:
await self.print_match_stats(inter, match)
return
hero_name = self.hero_info[player['hero_id']]['name']
duration = get_pretty_duration(match['duration'], postfix=False)
winstatus = "Won" if player["win"] != 0 else "Lost"
game_mode = self.dota_game_strings.get(f"game_mode_{match.get('game_mode')}", "Unknown")
lobby_type = self.dota_game_strings.get(f"lobby_type_{match.get('lobby_type')}", "Unknown") + " "
if lobby_type == "Normal ":
lobby_type = ""
description = (f"{winstatus} a {lobby_type}**{game_mode}** match as {hero_name} in {duration}. "
f"More info at [DotaBuff](https://www.dotabuff.com/matches/{match_id}), "
f"[OpenDota](https://www.opendota.com/matches/{match_id}), or "
f"[STRATZ](https://www.stratz.com/match/{match_id})")
embed = disnake.Embed(description=description, color=self.embed_color)
embed.set_author(name=player.get('personaname') or "Anonymous", icon_url=self.hero_info[player['hero_id']]['icon'], url="https://www.opendota.com/players/{}".format(steamid))
damage_format = "KDA: **{kills}**/**{deaths}**/**{assists}**\n"
if player.get("hero_damage") is not None:
damage_format += "Hero Damage: {hero_damage:,}\n"
if player.get("hero_healing") is not None:
damage_format += "Hero Healing: {hero_healing:,}\n"
if player.get("tower_damage") is not None:
damage_format += "Tower Damage: {tower_damage:,}\n"
embed.add_field(name="Damage", value=damage_format.format(**player))
if not player.get("total_gold"):
player["total_gold"] = 0
embed.add_field(name="Economy", value=(
"Net Worth: {total_gold:,}\n"
"Last Hits: {last_hits:,}\n"
"Denies: {denies}\n"
"Level: {level}\n".format(**player)))
match_image = disnake.File(await drawdota.create_match_image(match), "match.png")
embed.set_image(url=f"attachment://{match_image.filename}")
self.set_match_footer(match, embed)
await inter.send(embed=embed, file=match_image)
@commands.slash_command()
async def lm(self, inter: disnake.CmdInter, matchfilter: MatchFilter = None):
"""Gets info about the player's last dota game
Parameters
----------
matchfilter: Specify how to filter these matches. To learn more, try '/docs Match Filter'
"""
await inter.response.defer()
matchfilter = await MatchFilter.init(matchfilter, inter)
player = matchfilter.player
match_id = await get_lastmatch_id(matchfilter)
match = await get_match(match_id)
await self.player_match_stats(player.steam_id, match, inter)
@commands.slash_command()
async def firstmatch(self, inter: disnake.CmdInter, matchfilter: MatchFilter = None):
"""Gets info about the player's first dota game
Parameters
----------
matchfilter: Specify how to filter these matches. To learn more, try '/docs Match Filter'
"""
await inter.response.defer()
matchfilter = await MatchFilter.init(matchfilter, inter)
player = matchfilter.player
match_id = await get_lastmatch_id(matchfilter, reverse=True)
match = await get_match(match_id)
await self.player_match_stats(player.steam_id, match, inter)
async def print_match_stats(self, inter, match):
match_id = match["match_id"]
duration = get_pretty_duration(match['duration'], postfix=False)
game_mode = self.dota_game_strings.get(f"game_mode_{match.get('game_mode')}", "Unknown")
lobby_type = self.dota_game_strings.get(f"lobby_type_{match.get('lobby_type')}", "Unknown") + " "
if lobby_type == "Normal ":
lobby_type = ""
embed = disnake.Embed(color=self.embed_color)
embed.description = (f"This {lobby_type}**{game_mode}** match ended in {duration} \n"
f"More info at [DotaBuff](https://www.dotabuff.com/matches/{match_id}), "
f"[OpenDota](https://www.opendota.com/matches/{match_id}), or "
f"[STRATZ](https://www.stratz.com/match/{match_id})")
embed.set_author(name="Match {}".format(match_id), url="https://www.opendota.com/matches/{}".format(match_id))
embed.add_field(name="Game Mode", value=game_mode)
embed.add_field(name="Lobby Type", value=game_mode)
match_image = disnake.File(await drawdota.create_match_image(match), filename="matchimage.png")
embed.set_image(url=f"attachment://{match_image.filename}")
self.set_match_footer(match, embed)
await inter.send(embed=embed, file=match_image)
# a header to be used for sub commands
@commands.slash_command()
async def match(self, inter: disnake.CmdInter):
await inter.response.defer()
pass
@match.sub_command(name="info")
async def match_info(self, inter: disnake.CmdInter, match: DotaMatch):
"""Creates a table with some basic stats and information about the dota match
Parameters
----------
match: The ID of the match, a reference to a player, or 'lm'. See '/docs Match Argument` for more info
"""
await self.player_match_stats(match.steamid, match.match, inter)
@match.sub_command(name="story")
async def match_story(self, inter: disnake.CmdInter, match: DotaMatch, perspective: commands.option_enum(OrderedDict({"Radiant": "radiant", "Dire": "dire"})) = "radiant"):
"""Tells the story of the match
Parameters
----------
match: The ID of the match, a reference to a player, or 'lm'. See '/docs Match Argument` for more info
perspective: The team who's perspective we should tell the match from
"""
steamid = match.steamid
match = match.match
if steamid is not None:
player_data = next((p for p in match['players'] if p['account_id'] == steamid), None)
if steamid is not None and player_data is not None:
is_radiant = player_data['isRadiant']
perspective = "{2}({0}, {1})".format(self.get_pretty_hero(player_data), "Radiant" if is_radiant else "Dire", player_data.get("personaname"))
else:
is_radiant = True
elif perspective.lower() == "radiant":
is_radiant = True
perspective = None
elif perspective.lower() == "dire":
is_radiant = False
perspective = None
else:
raise UserError("Perspective must be either radiant or dire")
if not is_parsed(match):
raise MatchNotParsedError(match["match_id"], "create a story")
if not perspective:
perspective = "The Radiant" if is_radiant else "The Dire"
end_perspective = perspective
else:
end_perspective = f"{perspective} and their friends"
story = (f"*Told from the perspective of {perspective}*\n"
f"To see a more extensive story, try the [story tab](https://www.opendota.com/matches/{match['match_id']}/story) on opendota\n\n")
story += await self.get_firstblood_story(match, is_radiant)
story += await self.get_lane_stories(match, is_radiant)
teamfights = await self.get_teamfight_stories(match, is_radiant)
match_ending_state = "won" if (is_radiant == match['radiant_win']) else "lost"
story_end = f"\n{end_perspective} {match_ending_state} the match at { get_pretty_duration(match['duration']) }"
i = 0
while i < len(teamfights) and (len(story) + len(teamfights[i]) + len(story_end)) < 2000:
story += f"\n\n{teamfights[i]}"
i += 1
embed = disnake.Embed(description=story, color=self.embed_color)
embed.title = f"Story of Match {match['match_id']}"
embed.url = f"https://www.opendota.com/matches/{match['match_id']}/story"
self.set_match_footer(match, embed)
await inter.send(embed=embed)
@commands.slash_command()
async def recent(self, inter: disnake.CmdInter, matchfilter: MatchFilter = None):
"""Gets a list of your recent dota matches
Parameters
----------
matchfilter: Specify how to filter these matches. To learn more, try '/docs Match Filter'
"""
await inter.response.defer()
matchfilter = await MatchFilter.init(matchfilter, inter)
steam32 = matchfilter.player.steam_id
matchfilter.set_arg("limit", 10, False)
matchfilter.set_arg("significant", 0, False)
limit_max = 100
if matchfilter.get_arg("limit") > limit_max or matchfilter.has_value("date"):
matchfilter.set_arg("limit", limit_max, True)
if matchfilter.get_arg("limit") < 1:
raise UserError("Limit of matches can't be less than 1")
hero = matchfilter.hero
matchfilter.add_projections([ "kills", "deaths", "assists", "hero_id", "version", "game_mode", "lobby_type", "region", "duration", "start_time" ])
matches = await opendota_query_filter(matchfilter)
if not matches:
raise UserError("I can't find any matches that match that filter")
matches = sorted(matches, key=lambda m: m.get("start_time"), reverse=True)
embed = disnake.Embed()
embed.title = "Recent Matches"
embed.url = f"https://www.opendota.com/players/{steam32}/matches"
if hero:
embed.title += f" as {hero.localized_name}"
embed.url += f"?hero_id={hero.id}"
if hero.color:
embed.color = disnake.Color(int(hero.color[1:], 16))
matches_image = await drawdota.draw_matches_table(matches, self.dota_game_strings)
matches_image = disnake.File(matches_image, "matches.png")
embed.set_image(url=f"attachment://{matches_image.filename}")
embed.set_footer(text=f"Try /matchids to get copy-pastable match ids")
await inter.send(embed=embed, file=matches_image)
@commands.slash_command()
async def matchids(self, inter: disnake.CmdInter, matchfilter: MatchFilter = None):
"""Gets a list of recent matchids that match the given filter
Parameters
----------
matchfilter: Specify how to filter these matches. To learn more, try '/docs Match Filter'"""
await inter.response.defer()
matchfilter = await MatchFilter.init(matchfilter, inter)
steam32 = matchfilter.player.steam_id
matchfilter.set_arg("limit", 10, False)
matchfilter.set_arg("significant", 0, False)
limit_max = 100
if matchfilter.get_arg("limit") > limit_max or matchfilter.has_value("date"):
matchfilter.set_arg("limit", limit_max, True)
if matchfilter.get_arg("limit") < 1:
raise UserError("Limit of matches can't be less than 1")
matchfilter.add_projections([ "kills", "deaths", "assists", "hero_id", "version", "game_mode", "lobby_type", "region", "duration", "start_time" ])
matches = await opendota_query_filter(matchfilter)
if not matches:
raise UserError("I can't find any matches that match that filter")
matches = sorted(matches, key=lambda m: m.get("start_time"), reverse=True)
embed = disnake.Embed()
embed.title = "Recent Matches"
embed.url = f"https://www.opendota.com/players/{steam32}/matches"
embed.description = "```\n"
embed.description += "\n".join(list(map(lambda m: str(m["match_id"]), matches)))
embed.description += "\n```"
embed.set_footer(text=f"Try /recent to get more details about these matches")
await inter.send(embed=embed)
@commands.slash_command()
async def meta(self, inter: disnake.CmdInter, count: commands.Range[1, 120] = 10):
"""Prints the top meta heroes from https://opendota.com/heroes
Parameters
----------
count: The number of heroes to show
"""
await inter.response.defer()
json = await self.get_meta_json()
sorted_json = self.sort_meta(json, count)
description = (f"Top {count} meta hero(s) in professional matches")
embed = disnake.Embed(description = description, color=self.embed_color)
meta_table = disnake.File(await drawdota.draw_meta_table(sorted_json, json), "meta.png")
embed.set_image(url=f"attachment://{meta_table.filename}")
await inter.send(embed=embed, file=meta_table)
@commands.slash_command()
async def profile(self, inter: disnake.CmdInter, player: DotaPlayer = None):
"""Displays information about the player's dota profile
Parameters
----------
player: Either a steam32 id, a steam64 id, or an @mention of a discord user who has a steamid set
"""
if not player:
player = await DotaPlayer.from_author(inter)
steam32 = player.steam_id
await inter.response.defer()
playerinfo = await opendota_query(f"/players/{steam32}")
matches = await opendota_query(f"/players/{steam32}/matches")
matches = list(filter(lambda m: m.get('player_slot') is not None, matches))
rank_string = self.get_player_rank(playerinfo)
gamesplayed = len(matches)
if gamesplayed > 0:
winrate = "{:.2%}".format(len(list(filter(lambda m: m.get('radiant_win', False) == ((m.get('player_slot', 0) or 0) < 128), matches))) / gamesplayed)
else:
winrate = "0%"
heroes = {}
for match in matches:
heroes[match['hero_id']] = heroes.get(match['hero_id'], 0) + 1
heroes = sorted(heroes.items(), key=lambda x: x[1], reverse=True)
favs = ""
for i in range(0,3):
if i < len(heroes):
favs += self.hero_info[heroes[i][0]]['emoji']
# Recent means 2 months / 60 days
timecutoff = time.time() - (86400 * 60)
heroes = {}
for match in matches:
if match['start_time'] > timecutoff:
heroes[match['hero_id']] = heroes.get(match['hero_id'], 0) + 1
heroes = sorted(heroes.items(), key=lambda x: x[1], reverse=True)
recent_favs = ""
for i in range(0,3):
if i < len(heroes):
recent_favs += self.hero_info[heroes[i][0]]['emoji']
recent_count = 0
activity_delta = []
activity_count = []
count = 1
for i in range(0, len(matches) - 1):
delta = matches[i]["start_time"] - (matches[i + 1]["start_time"] + matches[i]["duration"])
if delta < (60 * 60 * 2): # If these are part of the same group
count += 1
continue
else:
activity_count.append(count)
activity_delta.append(delta)
count = 1
if matches[i]["start_time"] > timecutoff:
recent_count += 1
if not activity_delta:
activity_delta = [ 0 ]
activity_count = [ 0 ]
overall_time_played = 0
for match in matches:
overall_time_played += match["duration"]
overall_activity_delta = get_pretty_time((int(statistics.mean(activity_delta)) // 60) * 60)
if recent_count:
recent_activity_delta = get_pretty_time((int(statistics.mean(activity_delta[:recent_count])) // 60) * 60)
else:
recent_activity_delta = None
# overall_activity_count = int(statistics.mean(activity_count))
# recent_activity_count = int(statistics.mean(activity_count[:recent_count]))
plus_text = ""
if playerinfo["profile"].get("plus"):
plus_text = f"\n{self.get_emoji('dota_plus')} has Dota Plus"
embed = disnake.Embed(color=self.embed_color)
embed.set_author(
name=playerinfo["profile"]["personaname"] or "Anonymous",
icon_url=playerinfo["profile"]["avatar"] or default_steam_icon,
url=playerinfo["profile"]["profileurl"] or f"https://www.opendota.com/players/{steam32}")
embed.add_field(name="General", value=(
f"Winrate: **{winrate}**\n"
f"Games Played: **{gamesplayed}**\n"
f"Total Hours In Game: **{overall_time_played // 3600:,}**\n"
f"{rank_string}"
f"{plus_text}"))
embed.add_field(name="Profiles", value=(
f"[Steam]({playerinfo['profile']['profileurl']})\n"
f"[OpenDota](https://www.opendota.com/players/{steam32})\n"
f"[DotaBuff](https://www.dotabuff.com/players/{steam32})\n"
f"[STRATZ](https://www.stratz.com/player/{steam32})"))
embed.add_field(name="Heroes", value=(
f"[Recent Favs](https://www.opendota.com/players/{steam32}/heroes?date=60) {recent_favs}\n"
f"[Overall Favs](https://www.opendota.com/players/{steam32}/heroes) {favs}\n"))
embed.add_field(name="Activity", value=(
"*Average time between groups of games*\n"
f"**Recent**: {recent_activity_delta}\n"
f"**Overall**: {overall_activity_delta}\n"), inline=False)
if player.is_author:
player_mention = ""
else:
player_mention = player.steam_id
rank_icon = await drawdota.dota_rank_icon(playerinfo.get("rank_tier"), playerinfo.get("leaderboard_rank"))
rank_icon = disnake.File(rank_icon, "rank.png")
embed.set_thumbnail(url=f"attachment://{rank_icon.filename}")
embed.set_footer(text=f"Steam ID: {steam32}")
await inter.send(embed=embed, file=rank_icon)
@commands.slash_command()
async def twenty(self, inter: disnake.CmdInter, matchfilter: MatchFilter = None):
"""Gets stats from the player's last 20 parsed games
Parameters
----------
matchfilter: Specify how to filter these matches. To learn more, try '/docs Match Filter'
"""
matchfilter = await MatchFilter.init(matchfilter, inter)
matchfilter.set_arg("limit", 20, True)
matchfilter.set_arg("_parsed", True)
await self.do_playerstats(inter, matchfilter, do_downloaded=True)
@commands.slash_command()
async def playerstats(self, inter: disnake.CmdInter, matchfilter: MatchFilter = None):
"""Gets stats about the player's dota matches
Parameters
----------
matchfilter: Specify how to filter these matches. To learn more, try '/docs Match Filter'
"""
matchfilter = await MatchFilter.init(matchfilter, inter)
await self.do_playerstats(inter, matchfilter)
# the main internal logic for the playerstats and twenty commands
async def do_playerstats(self, inter: disnake.CmdInter, matchfilter: MatchFilter, do_downloaded=False):
matchfilter.add_projections([ "kills", "deaths", "assists", "party_size", "version", "hero_id", "lane_role", "is_roaming", "lobby_type", "start_time", "duration" ])
steam32 = matchfilter.player.steam_id
await inter.response.defer()
#
# STEP 1: download all match data
#
playerinfo = await opendota_query(f"/players/{steam32}")
matches_info = await opendota_query_filter(matchfilter)
matches_info = sorted(matches_info, key=lambda m: m["start_time"])
player_matches = []
if do_downloaded:
matches = []
i = 0
while i < len(matches_info) and len(matches) < 20:
if matches_info[i].get('version', None) is not None:
match = await get_match(matches_info[i]['match_id'])
player_match = next((p for p in match['players'] if p['account_id'] == steam32), None)
if player_match is not None:
player_matches.append(player_match)
matches.append(match)
i += 1
else:
player_matches = matches_info
if len(player_matches) == 0:
if do_downloaded:
await inter.send("Not enough parsed matches!")
else:
await inter.send("Not enough matches found!")
return
#
# STEP 2: initialize discord embed, depending on what we filtered for
#
embed = disnake.Embed(color=self.embed_color)
embed_attachment = None
if do_downloaded:
embed.description = f"*The following are averages and percentages based on the last {len(player_matches)} parsed matches*"
else:
embed.description = ""
embed.set_footer(text=f"To see the filtering options for this command, try \"/docs matchfilter\"")
matches_url = f"https://www.opendota.com/players/{steam32}/matches?{matchfilter.to_query_args(for_web_url=True)}"
author_name = playerinfo["profile"]["personaname"] or "Anonymous"
author_icon_url = playerinfo["profile"]["avatar"] or default_steam_icon
# if this is stats for playing as a specific hero
if matchfilter.has_value("hero_id"):
hero = self.lookup_hero(matchfilter.get_arg("hero_id"))
author_icon_url = self.hero_info[hero.id]["icon"]
embed.set_thumbnail(url=self.hero_info[hero.id]['portrait'])
embed.color = disnake.Color(int(hero.color[1:], 16))
# if this is stats for playing with someone
if matchfilter.has_value("included_account_id"):
# make friends image
avatar1 = playerinfo['profile']['avatarfull'] or default_steam_icon
player2_id = matchfilter.get_arg("included_account_id")
player2_info = await opendota_query(f"/players/{player2_id}")
avatar2 = player2_info['profile']['avatarfull'] or default_steam_icon
image = disnake.File(await drawdota.combine_image_halves(avatar1, avatar2), "profile.png")
embed.set_thumbnail(url=f"attachment://{image.filename}")
embed_attachment = image
author_name += f" + {player2_info['profile']['personaname'] or 'Anonymous'}"
# also add the dates of first and last match to description
first_match = player_matches[0]
last_match = player_matches[-1]
def get_time_diff(match):
timediff = time.time() - match["start_time"]
timediff -= timediff % 60 # only show up to minutes level of detail
if timediff > (29 * 60 * 60 * 24): # if was over a month ago
timediff -= (timediff % (60 * 60 * 24)) # only show up to days level of detail
if timediff > (3 * 60 * 60 * 24): # if was over a couple day ago
timediff -= (timediff % (60 * 60)) # only show up to hours level of detail
return get_pretty_time(timediff)
embed.description += f"\n[First Match](https://www.opendota.com/matches/{first_match['match_id']}): {get_time_diff(first_match)} ago"
embed.description += f"\n[Last Match](https://www.opendota.com/matches/{last_match['match_id']}): {get_time_diff(last_match)} ago"
embed.set_author(
name=author_name,
icon_url=author_icon_url,
url=matches_url)
#
# STEP 3: define all stats together
#
def avg(*args, **kwargs):
return _match_avg(player_matches, *args, **kwargs)
def percent(*args, **kwargs):
return _match_percent(player_matches, *args, **kwargs)
# compute favorites
heroes = {}
for match in player_matches:
heroes[match['hero_id']] = heroes.get(match['hero_id'], 0) + 1
heroes = sorted(heroes.items(), key=lambda x: x[1], reverse=True)
favorite_heroes = "".join(map(lambda h: self.hero_info[h[0]]['emoji'], heroes[0:3]))
zeropercent = "0%"
# laning postfix if needed
laning_postfix = ""
parsed_count = len(list(filter(lambda m: m.get("version") is not None, player_matches)))
if parsed_count != len(player_matches) and not do_downloaded:
laning_postfix = f" ({parsed_count} parsed matches)"
class CoolStat():
def __init__(self, caption, value, filter_key=None, ignore_value=None, separator=": ", bold=True):
self.caption = caption
self.value = value
self.filter_key = filter_key
self.ignore_value = ignore_value
self.separator = separator
self.bold = bold
def should_show(self):
return not(self.value == self.ignore_value or matchfilter.has_value(self.filter_key))
def render(self):
if not self.caption:
return str(self.value)
value = self.value
if self.bold:
value = f"**{value}**"
return f"{self.caption}{self.separator}{value}"
stat_sections = [
{
"caption": "General",
"stats": [
CoolStat(f"[Matches]({matches_url})", len(player_matches)),
CoolStat("Winrate", percent(lambda p: p.get('radiant_win') == (p.get('player_slot') < 128)), filter_key="win"),
CoolStat("KDA", f"{avg('kills')}/{avg('deaths')}/{avg('assists')}"),
CoolStat("Duration", format_duration_simple(avg('duration') or 0)),
CoolStat("In a Party", percent(lambda p: p.get('party_size') > 1, needs_key='party_size', round_place="floor")),
CoolStat("Ranked", percent(lambda p: p['lobby_type'] == 7), filter_key="lobby_type")
]
},
{
"caption": "Heroes",
"filter_key": "hero_id",
"stats": [
CoolStat(self.get_emoji('attr_strength'), percent(lambda p: self.hero_info.get(p['hero_id'], {}).get('attr') == 'strength'), separator=" "),
CoolStat(self.get_emoji('attr_agility'), percent(lambda p: self.hero_info.get(p['hero_id'], {}).get('attr') == 'agility'), separator=" "),
CoolStat(self.get_emoji('attr_intelligence'), percent(lambda p: self.hero_info.get(p['hero_id'], {}).get('attr') == 'intelligence'), separator=" "),
CoolStat("Randomed", percent('randomed'), ignore_value=zeropercent),
CoolStat("__Favorites__", f"\n{favorite_heroes}")
]
},
{
"caption": f"Laning{laning_postfix}",
"filter_key": "lane_role",
"stats": [
CoolStat("Safe Lane", percent(lambda p: p.get('lane_role') == 1 and not p.get('is_roaming'), needs_key="lane_role"), ignore_value=zeropercent),
CoolStat("Mid Lane", percent(lambda p: p.get('lane_role') == 2 and not p.get('is_roaming'), needs_key="lane_role"), ignore_value=zeropercent),
CoolStat("Off Lane", percent(lambda p: p.get('lane_role') == 3 and not p.get('is_roaming'), needs_key="lane_role"), ignore_value=zeropercent),
CoolStat("Jungle", percent(lambda p: p.get('lane_role') == 4 and not p.get('is_roaming'), needs_key="lane_role"), ignore_value=zeropercent),
CoolStat("Roaming", percent(lambda p: p.get('is_roaming'), needs_key="is_roaming"), ignore_value=zeropercent),
]
}
]
if do_downloaded:# if we've downloaded all of these matches, compute all the chat history stuff, and add the downloaded stats
chat_wheel_counts = {}
chat_wheel_total = 0
longest_message_heading = "Longest Chat Message"
message_count = 0
longest_message = None
longest_message_match_id = None
for match in matches:
player = next((p for p in match['players'] if p['account_id'] == steam32), None)
match_chat = match.get('chat', None)
if match_chat:
for message in match_chat:
if message.get('player_slot', -1) == player['player_slot']:
if message["type"] == "chat":
message_count += 1
if longest_message is None or len(longest_message) <= len(message['key']):
longest_message = message['key']
longest_message_match_id = match['match_id']
elif message["type"] == "chatwheel":
msg_id = int(message['key'])
if msg_id >= 1000:
continue # skip hero chat wheels
chat_wheel_counts[msg_id] = chat_wheel_counts.get(msg_id, 0) + 1
chat_wheel_total += 1
message_count = int(round(message_count / len(matches)))
if longest_message is not None:
longest_message = f"\"{longest_message}\""
longest_message_heading = f"[{longest_message_heading}](https://www.opendota.com/matches/{longest_message_match_id}/chat)"
chat_wheel_text = "*No chat wheel usage found*"
if chat_wheel_counts != {}:
lines = []
chat_wheel_counts = sorted(chat_wheel_counts.items(), key=lambda m: m[1], reverse=True)
for i in range(0, min(3, len(chat_wheel_counts))):
msg_id, count = chat_wheel_counts[i]
message = self.chat_wheel_info.get(msg_id, { "message": "Unknown" })
icon = self.get_emoji("chat_wheel_sound" if message.get('is_sound') else "chat_wheel_text")
lines.append(f"{icon} {message['message']}")
chat_wheel_text = "\n".join(lines)
def wards_placed(p):
obs = 0 if p.get('obs_placed') is None else p.get('obs_placed')
sents = 0 if p.get('sen_placed') is None else p.get('sen_placed')
return obs + sents
# these are the downloaded_only sections
stat_sections.extend([{
"caption": "Economy",
"stats": [
CoolStat("GPM", avg('gold_per_min')),
CoolStat("XPM", avg('xp_per_min')),
CoolStat("Last Hits/min", avg(lambda p: p['last_hits'] / (1 + (p['duration'] / 60)), 2)),
CoolStat("Neutral Creeps", avg(lambda p: 100 * p.get('neutral_kills', 0) / (1 + p['last_hits'])))
]
},
{
"caption": "Other",
"stats": [
CoolStat("APM", avg('actions_per_min')),
CoolStat("Pings", avg('pings')),
CoolStat("Wards Placed", avg(lambda p: wards_placed(p)))
]
},
{
"caption": "Chat Wheel",
"stats": [
CoolStat(None, chat_wheel_text)
]
},
{
"caption": "All Chat",
"inline": False,
"stats": [
CoolStat("Messages per Game", message_count),
CoolStat(longest_message_heading, longest_message, bold=False)
]
}])
#
# STEP 4: transform these all into embed fields
#
for category in stat_sections:
if category.get("filter_key") and matchfilter.has_value(category.get("filter_key")):
continue # skip this category if its already filtered out by the matchfilter
value = "\n".join(map(lambda s: s.render(), filter(lambda s: s.should_show(), category.get("stats"))))
if value == "":
continue # skip if theres no values to show
embed.add_field(name=category.get("caption"), value=value, inline=category.get("inline", True))
if embed_attachment:
await inter.send(embed=embed, file=image)
else:
await inter.send(embed=embed)
@commands.slash_command()
async def dotagif(self, inter: disnake.CmdInter, match: DotaMatch, start: str, end: str, ms_per_second : int = 100):
"""Creates a gif of a specific part of a dota match
Parameters
----------
match: The ID of the match, a reference to a player, or 'lm'. See '/docs Match Argument` for more info
start: How many minutes into the match to start the gif. ex: 28:37
end: How many minutes into the match to end the gif. ex: 30:30
ms_per_second: How many miliseconds between each frame of the gif (each frame is 1 dota second)
"""
await inter.response.defer()
match = match.match
match_id = match["match_id"]
if not is_parsed(match):
raise MatchNotParsedError(match_id, "get laning info")
stratz_match = await get_stratz_match(match_id)
if not is_stratz_parsed(stratz_match):
raise StratzMatchNotParsedError(match_id)
start = int(get_time(start))
end = int(get_time(end))
if end - start > 600:
raise UserError("The length of this clip must be less than 10 minutes")
if ms_per_second < 1 or ms_per_second > 655350:
raise UserError("That is outside the bounds of the `ms_per_second` value")
lastframe = match["duration"] - 1
if start > lastframe and end > lastframe:
raise UserError("The game didn't last that long")
# "https://stratz.com/en-us/match/{match_id}/playback?pb_time={seconds}"
image = disnake.File(await self.create_dota_gif(match, stratz_match, start, end, ms_per_second), "map.gif")
await inter.send(file=image)
@match.sub_command(name="laning")
async def match_laning(self, inter: disnake.CmdInter, match: DotaMatch):
"""Creates gif of the laning stage with a caption
Parameters
----------
match: The ID of the match, a reference to a player, or 'lm'. See '/docs Match Argument` for more info
"""
steamid = match.steamid
match = match.match
match_id = match["match_id"]
if not is_parsed(match):
raise MatchNotParsedError(match_id, "get laning info")
stratz_match = await get_stratz_match(match_id)
if not is_stratz_parsed(stratz_match):
raise StratzMatchNotParsedError(match_id)
player_data = None
if steamid:
player_data = next((p for p in match['players'] if p['account_id'] == steamid), None)
perspective = player_data.get("isRadiant") if player_data else True
embed = disnake.Embed(description=await self.get_lane_stories(match, perspective, True))
embed.title = f"Laning"
embed.url = f"https://stratz.com/en-us/match/{match_id}/playback"
image = disnake.File(await self.create_dota_gif(match, stratz_match, -89, 600, 100), "map.gif")
embed.set_image(url=f"attachment://{image.filename}")
self.set_match_footer(match, embed)
await inter.send(embed=embed, file=image)
@commands.command(aliases=["analyze", "studymatch"])
async def parse(self, ctx, match_id : int = None):
"""Requests that OpenDota parses a match
The input should be the match_id of the match
Note that matches from more than a couple days ago may not be able to be parsed because replay files are not saved that long
Not giving a matchid will make mangobyte attempt to use your last played match"""
if match_id is None:
matchfilter = await MatchFilter.init(None, ctx)
match_id = await get_lastmatch_id(matchfilter)
await ctx.message.add_reaction("⏳")
await ctx.send("⏳ Requesting a parse...", delete_after=5)
try:
data = await httpgetter.post(f"https://api.opendota.com/api/request/{match_id}", errors=opendota_html_errors)
except HttpError as e:
await ctx.message.remove_reaction("⏳", self.bot.user)
if e.code == 400:
await ctx.send("❌ Looks like that's not a valid match id")
return
raise
if data.get("status") == "failed" or data.get("err") is not None:
await ctx.message.remove_reaction("⏳", self.bot.user)
await ctx.send(f"❌ There was an error requesting the parse for match {match_id}")
return
jobId = data["job"]["jobId"]
await asyncio.sleep(3)
seconds_per_check = 20
seconds_till_timeout = 120
while seconds_till_timeout > 0:
data = await opendota_query(f"/request/{jobId}", False)
if data is not None:
await asyncio.sleep(seconds_per_check)
seconds_till_timeout -= seconds_per_check
else:
await ctx.message.remove_reaction("⏳", self.bot.user)
await ctx.message.add_reaction("✅")
await ctx.send(f"✅ Parsing of match {match_id} has completed!", delete_after=10)
return
# if we get to here, timeout
await ctx.message.remove_reaction("⏳", self.bot.user)
await ctx.message.add_reaction("❌")
await ctx.send(f"❌ Parsing of match {match_id} timed out. Try again later or on the opendota site.", delete_after=10)
@commands.slash_command()
async def whoishere(self, inter: disnake.CmdInter, users: str = None, show_ranks: bool = False):
"""Shows the linked steam accounts of anyone who is in voice chat with mango
Parameters
----------
users: Any additional users to show the linked accounts of
show_ranks: Whether or not to show the ranks of the players when showing their steam accounts
"""
if inter.guild is None:
raise UserError("You have to use that command in a server")
logger.info(users)
additional_user_ids = []
if users:
matches = re.findall(r"<@!?(\d+)>", users)
for match in matches:
additional_user_ids.append(int(match))
voice_channel = None
if inter.author.voice and inter.author.voice.channel:
voice_channel = inter.author.voice.channel
else:
audio = self.bot.get_cog("Audio")
audioplayer = await audio.audioplayer(inter, False)
if audioplayer is None or audioplayer.voice_channel is None:
if len(additional_user_ids) == 0:
raise UserError("One of us needs to be in a voice channel for that to work")
else:
voice_channel = audioplayer.voice_channel
members = []
if voice_channel:
members.extend(map(lambda u: u.id, voice_channel.members))
if additional_user_ids:
members.extend(additional_user_ids)
mentions = []
links = []
ranks = []
for user_id in members:
if voice_channel:
if user_id == voice_channel.guild.me.id:
continue
mentions.append(f"<@!{user_id}>")
userinfo = botdata.userinfo(user_id)
if userinfo.steam is None:
links.append("Unknown")
ranks.append("Unknown")
else:
player_info = await opendota_query(f"/players/{userinfo.steam}")
links.append(f"[{player_info['profile']['personaname']}](https://www.opendota.com/players/{userinfo.steam})")
ranks.append(self.get_player_rank(player_info))
if len(mentions) == 0:
raise UserError("There isn't anyone in my voice channel 😢")
#raise UserError("This command is broken right now but my developer is working on fixing it! For now you can mention people manually in the command and it should work.")
embed = disnake.Embed()
embed.add_field(name="Discord", value="\n".join(mentions))
embed.add_field(name="Steam", value="\n".join(links))
if show_ranks:
embed.add_field(name="Rank", value="\n".join(ranks))
await inter.send(embed=embed)
@commands.slash_command()
async def rolesgraph(self, inter: disnake.CmdInter, player: DotaPlayer = None):
"""Gets a graph displaying the dota player's hero roles
Parameters
----------
player: Either a steam32 id, a steam64 id, or an @mention of a discord user who has a steamid set
"""
if not player:
player = await DotaPlayer.from_author(inter)
playerinfo = await opendota_query(f"/players/{player.steam_id}")
matches = await opendota_query(f"/players/{player.steam_id}/matches?limit=30")
if len(matches) == 0:
raise UserError("You haven't played any matches recently")
hero_ids = []
for match in matches:
hero_ids.append(match["hero_id"])
roles = [ "Escape", "Nuker", "Support", "Pusher", "Disabler", "Jungler", "Carry", "Durable", "Initiator" ]
role_scores = dict.fromkeys(roles, 0)
dotabase = self.bot.get_cog("Dotabase")
for heroid in hero_ids:
hero_info = self.hero_info[heroid]
for role, value in hero_info["roles"].items():
role_scores[role] += value
role_scores = [role_scores[role] for role in roles]
# weight it against the biases in the system
role_totals = dict.fromkeys(roles, 0)
for hero_info in self.hero_info.values():
for role, value in hero_info["roles"].items():
role_totals[role] += value
role_totals = role_totals.values()
role_totals_avg = sum(role_totals) / len(role_totals)
role_totals_modifiers = list(map(lambda x: role_totals_avg / x, role_totals))
for i in range(len(roles)):
role_scores[i] *= role_totals_modifiers[i]
# normalize so its a percentage based on the highest one
divisor = max(role_scores)
role_scores = list(map(lambda x: x / divisor, role_scores))
embed = disnake.Embed()
embed.set_author(
name=playerinfo["profile"]["personaname"] or "Anonymous",
icon_url=playerinfo["profile"]["avatar"] or default_steam_icon,
url=playerinfo["profile"]["profileurl"] or f"https://www.opendota.com/players/{player.steam_id}")
image = disnake.File(drawdota.draw_polygraph(role_scores, roles), "rolesgraph.png")
embed.set_image(url=f"attachment://{image.filename}")
await inter.send(embed=embed, file=image)
@match.sub_command(name="skillbuild")
async def match_skillbuild(self, inter: disnake.CmdInter, match: DotaMatch):
"""Gets the ability upgrades for a match
Parameters
----------
match: The ID of the match, a reference to a player, or 'lm'. See '/docs Match Argument` for more info
"""
match = match.match
match_id = match["match_id"]
embed = disnake.Embed()
embed.title = f"Match {match_id}"
embed.url = f"https://opendota.com/matches/{match_id}"
embed.description = "Skill Builds"
image = disnake.File(await drawdota.draw_match_ability_upgrades(match), "upgrades.png")
embed.set_image(url=f"attachment://{image.filename}")
self.set_match_footer(match, embed)
await inter.send(embed=embed, file=image)
@match.sub_command(name="graph")
async def match_graph(self, inter: disnake.CmdInter, match: DotaMatch, graphtype: commands.option_enum(OrderedDict(graphtypes)) = "teamdiff"):
"""Creates a graph for a dota match
Parameters
----------
match: The ID of the match, a reference to a player, or 'lm'. See '/docs Match Argument` for more info
graphtype: The type of graph to create
"""
match = match.match
match_id = match["match_id"]
if not is_parsed(match):
raise MatchNotParsedError(match["match_id"], "create a graph")
embed = disnake.Embed()
embed.title = f"Match {match_id}"
embed.url = f"https://opendota.com/matches/{match_id}"
embed.description = next(key for key, value in graphtypes.items() if value == graphtype)
if graphtype == "teamdiff":
lines = [ match["radiant_gold_adv"], match["radiant_xp_adv"] ]
colors = [ "#FFFF00", "#ADD8E6" ]
labels = [ "Gold", "Experience" ]
elif graphtype == "playergold":
playercolors = {
"0": "#3375FF",
"1": "#66FFBF",
"2": "#BF00BF",
"3": "#F3F00B",
"4": "#FF6B00",
"128": "#FE86C2",
"129": "#A1B447",
"130": "#65D9F7",
"131": "#008321",
"132": "#A46900"
}
lines = []
colors = []
labels = []
for player in match["players"]:
colors.append(playercolors[str(player["player_slot"])] if str(player["player_slot"]) else "#FF0000")
lines.append(player["gold_t"])
labels.append(self.hero_info[player["hero_id"]]["name"] if player["hero_id"] in self.hero_info else "Unknown")
else:
raise UserError("oops, look like thats not implemented yet")
image = disnake.File(drawgraph.drawgraph(lines, colors, labels), "graph.png")
embed.set_image(url=f"attachment://{image.filename}")
self.set_match_footer(match, embed)
await inter.send(embed=embed, file=image)
# @commands.command(aliases=["wrapped"])
async def dotawrapped(self, ctx, player : DotaPlayer = None):
"""Gets the "dota wrapped" summary for the player
This is from the site https://gameishard.gg/dotawrapped/
Yes, I got permission from the guy who made this to include this in mangobyte"""
if not player:
player = await DotaPlayer.from_author(ctx)
wrapped_url = f"https://gameishard.gg/dotawrapped/?id={player.steam_id}"
wrapped_image_url = f"https://gameishard.gg/dotawrapped/assets/images/players/{player.steam_id}.png"
# await thinker.think(ctx.message)
await httpgetter.get(wrapped_url, return_type="text")
# await thinker.stop_thinking(ctx.message)
embed = disnake.Embed()
embed.title = f"Dota 2 Wrapped"
embed.url = wrapped_url
embed.set_image(url=wrapped_image_url)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(DotaStats(bot))
|
[
"utils.drawing.dota.draw_match_ability_upgrades",
"utils.tools.globals.httpgetter.get",
"utils.drawing.dota.create_dota_gif",
"utils.tools.globals.logger.info",
"disnake.ext.commands.slash_command",
"disnake.ext.commands.command",
"utils.drawing.dota.combine_image_halves",
"utils.drawing.dota.create_match_image",
"utils.tools.globals.httpgetter.cache.get",
"utils.other.metastats.get_total_pro_games",
"re.findall",
"utils.drawing.dota.draw_polygraph",
"utils.drawing.dota.draw_matches_table",
"utils.tools.globals.httpgetter.cache.remove",
"disnake.Embed",
"asyncio.sleep",
"utils.drawing.graph.drawgraph",
"utils.tools.globals.botdata.userinfo",
"asyncio.Lock",
"statistics.mean",
"datetime.datetime.fromtimestamp",
"utils.tools.globals.httpgetter.post",
"utils.tools.globals.settings.resource",
"disnake.Color.teal",
"disnake.File",
"math.floor",
"time.time",
"utils.drawing.dota.draw_meta_table",
"collections.OrderedDict"
] |
[((3593, 3626), 'utils.tools.globals.httpgetter.cache.get', 'httpgetter.cache.get', (['url', '"""json"""'], {}), "(url, 'json')\n", (3613, 3626), False, 'from utils.tools.globals import botdata, httpgetter, logger, settings\n'), ((4486, 4519), 'utils.tools.globals.httpgetter.cache.get', 'httpgetter.cache.get', (['url', '"""json"""'], {}), "(url, 'json')\n", (4506, 4519), False, 'from utils.tools.globals import botdata, httpgetter, logger, settings\n'), ((18735, 18759), 'disnake.ext.commands.slash_command', 'commands.slash_command', ([], {}), '()\n', (18757, 18759), False, 'from disnake.ext import commands\n'), ((19288, 19312), 'disnake.ext.commands.slash_command', 'commands.slash_command', ([], {}), '()\n', (19310, 19312), False, 'from disnake.ext import commands\n'), ((21110, 21134), 'disnake.ext.commands.slash_command', 'commands.slash_command', ([], {}), '()\n', (21132, 21134), False, 'from disnake.ext import commands\n'), ((24020, 24044), 'disnake.ext.commands.slash_command', 'commands.slash_command', ([], {}), '()\n', (24042, 24044), False, 'from disnake.ext import commands\n'), ((25810, 25834), 'disnake.ext.commands.slash_command', 'commands.slash_command', ([], {}), '()\n', (25832, 25834), False, 'from disnake.ext import commands\n'), ((27342, 27366), 'disnake.ext.commands.slash_command', 'commands.slash_command', ([], {}), '()\n', (27364, 27366), False, 'from disnake.ext import commands\n'), ((28050, 28074), 'disnake.ext.commands.slash_command', 'commands.slash_command', ([], {}), '()\n', (28072, 28074), False, 'from disnake.ext import commands\n'), ((32444, 32468), 'disnake.ext.commands.slash_command', 'commands.slash_command', ([], {}), '()\n', (32466, 32468), False, 'from disnake.ext import commands\n'), ((32943, 32967), 'disnake.ext.commands.slash_command', 'commands.slash_command', ([], {}), '()\n', (32965, 32967), False, 'from disnake.ext import commands\n'), ((44411, 44435), 'disnake.ext.commands.slash_command', 'commands.slash_command', ([], {}), '()\n', (44433, 44435), False, 'from disnake.ext import commands\n'), ((47157, 47208), 'disnake.ext.commands.command', 'commands.command', ([], {'aliases': "['analyze', 'studymatch']"}), "(aliases=['analyze', 'studymatch'])\n", (47173, 47208), False, 'from disnake.ext import commands\n'), ((49091, 49115), 'disnake.ext.commands.slash_command', 'commands.slash_command', ([], {}), '()\n', (49113, 49115), False, 'from disnake.ext import commands\n'), ((51450, 51474), 'disnake.ext.commands.slash_command', 'commands.slash_command', ([], {}), '()\n', (51472, 51474), False, 'from disnake.ext import commands\n'), ((3202, 3263), 'utils.tools.globals.httpgetter.get', 'httpgetter.get', (['url'], {'cache': 'cache', 'errors': 'opendota_html_errors'}), '(url, cache=cache, errors=opendota_html_errors)\n', (3216, 3263), False, 'from utils.tools.globals import botdata, httpgetter, logger, settings\n'), ((8212, 8249), 'math.floor', 'math.floor', (['(count * 100 / total_count)'], {}), '(count * 100 / total_count)\n', (8222, 8249), False, 'import math\n'), ((8686, 8706), 'disnake.Color.teal', 'disnake.Color.teal', ([], {}), '()\n', (8704, 8706), False, 'import disnake\n'), ((9102, 9116), 'asyncio.Lock', 'asyncio.Lock', ([], {}), '()\n', (9114, 9116), False, 'import asyncio\n'), ((9391, 9416), 'utils.other.metastats.get_total_pro_games', 'get_total_pro_games', (['json'], {}), '(json)\n', (9410, 9416), False, 'from utils.other.metastats import get_total_pro_games\n'), ((16362, 16440), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (["match['start_time']"], {'tz': 'datetime.timezone.utc'}), "(match['start_time'], tz=datetime.timezone.utc)\n", (16393, 16440), False, 'import datetime\n'), ((17583, 17645), 'disnake.Embed', 'disnake.Embed', ([], {'description': 'description', 'color': 'self.embed_color'}), '(description=description, color=self.embed_color)\n', (17596, 17645), False, 'import disnake\n'), ((20261, 20298), 'disnake.Embed', 'disnake.Embed', ([], {'color': 'self.embed_color'}), '(color=self.embed_color)\n', (20274, 20298), False, 'import disnake\n'), ((23758, 23814), 'disnake.Embed', 'disnake.Embed', ([], {'description': 'story', 'color': 'self.embed_color'}), '(description=story, color=self.embed_color)\n', (23771, 23814), False, 'import disnake\n'), ((25181, 25196), 'disnake.Embed', 'disnake.Embed', ([], {}), '()\n', (25194, 25196), False, 'import disnake\n'), ((25574, 25616), 'disnake.File', 'disnake.File', (['matches_image', '"""matches.png"""'], {}), "(matches_image, 'matches.png')\n", (25586, 25616), False, 'import disnake\n'), ((26962, 26977), 'disnake.Embed', 'disnake.Embed', ([], {}), '()\n', (26975, 26977), False, 'import disnake\n'), ((27781, 27843), 'disnake.Embed', 'disnake.Embed', ([], {'description': 'description', 'color': 'self.embed_color'}), '(description=description, color=self.embed_color)\n', (27794, 27843), False, 'import disnake\n'), ((30825, 30862), 'disnake.Embed', 'disnake.Embed', ([], {'color': 'self.embed_color'}), '(color=self.embed_color)\n', (30838, 30862), False, 'import disnake\n'), ((32243, 32278), 'disnake.File', 'disnake.File', (['rank_icon', '"""rank.png"""'], {}), "(rank_icon, 'rank.png')\n", (32255, 32278), False, 'import disnake\n'), ((34732, 34769), 'disnake.Embed', 'disnake.Embed', ([], {'color': 'self.embed_color'}), '(color=self.embed_color)\n', (34745, 34769), False, 'import disnake\n'), ((49577, 49595), 'utils.tools.globals.logger.info', 'logger.info', (['users'], {}), '(users)\n', (49588, 49595), False, 'from utils.tools.globals import botdata, httpgetter, logger, settings\n'), ((51208, 51223), 'disnake.Embed', 'disnake.Embed', ([], {}), '()\n', (51221, 51223), False, 'import disnake\n'), ((53142, 53157), 'disnake.Embed', 'disnake.Embed', ([], {}), '()\n', (53155, 53157), False, 'import disnake\n'), ((53959, 53974), 'disnake.Embed', 'disnake.Embed', ([], {}), '()\n', (53972, 53974), False, 'import disnake\n'), ((54892, 54907), 'disnake.Embed', 'disnake.Embed', ([], {}), '()\n', (54905, 54907), False, 'import disnake\n'), ((56856, 56871), 'disnake.Embed', 'disnake.Embed', ([], {}), '()\n', (56869, 56871), False, 'import disnake\n'), ((3925, 3985), 'utils.tools.globals.httpgetter.get', 'httpgetter.get', (['url'], {'cache': '(True)', 'errors': 'opendota_html_errors'}), '(url, cache=True, errors=opendota_html_errors)\n', (3939, 3985), False, 'from utils.tools.globals import botdata, httpgetter, logger, settings\n'), ((4730, 4931), 'utils.tools.globals.httpgetter.get', 'httpgetter.get', (['url'], {'cache': '(True)', 'errors': '{(500): \'Looks like something wrong with the STRATZ api\', (204):\n "STRATZ hasn\'t recieved this match yet. Try again a bit later"}', 'headers': 'auth_header'}), '(url, cache=True, errors={(500):\n \'Looks like something wrong with the STRATZ api\', (204):\n "STRATZ hasn\'t recieved this match yet. Try again a bit later"},\n headers=auth_header)\n', (4744, 4931), False, 'from utils.tools.globals import botdata, httpgetter, logger, settings\n'), ((4966, 5022), 'utils.tools.globals.logger.info', 'logger.info', (['"""ClientConnectorError on stratz api result"""'], {}), "('ClientConnectorError on stratz api result')\n", (4977, 5022), False, 'from utils.tools.globals import botdata, httpgetter, logger, settings\n'), ((8885, 8933), 'utils.tools.globals.settings.resource', 'settings.resource', (['"""json/dota_game_strings.json"""'], {}), "('json/dota_game_strings.json')\n", (8902, 8933), False, 'from utils.tools.globals import botdata, httpgetter, logger, settings\n'), ((9215, 9234), 'utils.tools.globals.httpgetter.get', 'httpgetter.get', (['url'], {}), '(url)\n', (9229, 9234), False, 'from utils.tools.globals import botdata, httpgetter, logger, settings\n'), ((25495, 25555), 'utils.drawing.dota.draw_matches_table', 'drawdota.draw_matches_table', (['matches', 'self.dota_game_strings'], {}), '(matches, self.dota_game_strings)\n', (25522, 25555), True, 'import utils.drawing.dota as drawdota\n'), ((29285, 29296), 'time.time', 'time.time', ([], {}), '()\n', (29294, 29296), False, 'import time\n'), ((48359, 48375), 'asyncio.sleep', 'asyncio.sleep', (['(3)'], {}), '(3)\n', (48372, 48375), False, 'import asyncio\n'), ((49649, 49681), 're.findall', 're.findall', (['"""<@!?(\\\\d+)>"""', 'users'], {}), "('<@!?(\\\\d+)>', users)\n", (49659, 49681), False, 'import re\n'), ((50579, 50604), 'utils.tools.globals.botdata.userinfo', 'botdata.userinfo', (['user_id'], {}), '(user_id)\n', (50595, 50604), False, 'from utils.tools.globals import botdata, httpgetter, logger, settings\n'), ((53433, 53476), 'utils.drawing.dota.draw_polygraph', 'drawdota.draw_polygraph', (['role_scores', 'roles'], {}), '(role_scores, roles)\n', (53456, 53476), True, 'import utils.drawing.dota as drawdota\n'), ((55963, 56005), 'utils.drawing.graph.drawgraph', 'drawgraph.drawgraph', (['lines', 'colors', 'labels'], {}), '(lines, colors, labels)\n', (55982, 56005), True, 'import utils.drawing.graph as drawgraph\n'), ((56752, 56799), 'utils.tools.globals.httpgetter.get', 'httpgetter.get', (['wrapped_url'], {'return_type': '"""text"""'}), "(wrapped_url, return_type='text')\n", (56766, 56799), False, 'from utils.tools.globals import botdata, httpgetter, logger, settings\n'), ((3874, 3902), 'utils.tools.globals.httpgetter.cache.remove', 'httpgetter.cache.remove', (['url'], {}), '(url)\n', (3897, 3902), False, 'from utils.tools.globals import botdata, httpgetter, logger, settings\n'), ((4614, 4642), 'utils.tools.globals.httpgetter.cache.remove', 'httpgetter.cache.remove', (['url'], {}), '(url)\n', (4637, 4642), False, 'from utils.tools.globals import botdata, httpgetter, logger, settings\n'), ((11420, 11516), 'utils.drawing.dota.create_dota_gif', 'drawdota.create_dota_gif', (['self.bot', 'match', 'stratz_match', 'start_time', 'end_time', 'ms_per_second'], {}), '(self.bot, match, stratz_match, start_time,\n end_time, ms_per_second)\n', (11444, 11516), True, 'import utils.drawing.dota as drawdota\n'), ((18531, 18565), 'utils.drawing.dota.create_match_image', 'drawdota.create_match_image', (['match'], {}), '(match)\n', (18558, 18565), True, 'import utils.drawing.dota as drawdota\n'), ((20852, 20886), 'utils.drawing.dota.create_match_image', 'drawdota.create_match_image', (['match'], {}), '(match)\n', (20879, 20886), True, 'import utils.drawing.dota as drawdota\n'), ((21755, 21806), 'collections.OrderedDict', 'OrderedDict', (["{'Radiant': 'radiant', 'Dire': 'dire'}"], {}), "({'Radiant': 'radiant', 'Dire': 'dire'})\n", (21766, 21806), False, 'from collections import OrderedDict\n'), ((27880, 27923), 'utils.drawing.dota.draw_meta_table', 'drawdota.draw_meta_table', (['sorted_json', 'json'], {}), '(sorted_json, json)\n', (27904, 27923), True, 'import utils.drawing.dota as drawdota\n'), ((36487, 36498), 'time.time', 'time.time', ([], {}), '()\n', (36496, 36498), False, 'import time\n'), ((47815, 47916), 'utils.tools.globals.httpgetter.post', 'httpgetter.post', (['f"""https://api.opendota.com/api/request/{match_id}"""'], {'errors': 'opendota_html_errors'}), "(f'https://api.opendota.com/api/request/{match_id}', errors=\n opendota_html_errors)\n", (47830, 47916), False, 'from utils.tools.globals import botdata, httpgetter, logger, settings\n'), ((54137, 54180), 'utils.drawing.dota.draw_match_ability_upgrades', 'drawdota.draw_match_ability_upgrades', (['match'], {}), '(match)\n', (54173, 54180), True, 'import utils.drawing.dota as drawdota\n'), ((54477, 54500), 'collections.OrderedDict', 'OrderedDict', (['graphtypes'], {}), '(graphtypes)\n', (54488, 54500), False, 'from collections import OrderedDict\n'), ((4079, 4107), 'utils.tools.globals.httpgetter.cache.remove', 'httpgetter.cache.remove', (['url'], {}), '(url)\n', (4102, 4107), False, 'from utils.tools.globals import botdata, httpgetter, logger, settings\n'), ((36081, 36128), 'utils.drawing.dota.combine_image_halves', 'drawdota.combine_image_halves', (['avatar1', 'avatar2'], {}), '(avatar1, avatar2)\n', (36110, 36128), True, 'import utils.drawing.dota as drawdota\n'), ((48559, 48591), 'asyncio.sleep', 'asyncio.sleep', (['seconds_per_check'], {}), '(seconds_per_check)\n', (48572, 48591), False, 'import asyncio\n'), ((30332, 30363), 'statistics.mean', 'statistics.mean', (['activity_delta'], {}), '(activity_delta)\n', (30347, 30363), False, 'import statistics\n'), ((30445, 30491), 'statistics.mean', 'statistics.mean', (['activity_delta[:recent_count]'], {}), '(activity_delta[:recent_count])\n', (30460, 30491), False, 'import statistics\n')]
|
# import lookml as lookml
# # import src.lookml.lang as lang
# # import src.lookml.project as Project
# import lookml.lkml as lkml
# from lookml import lookml,lkml
# from lookml.common import project
import lookml
import lookml.lkml as lkml
import unittest, copy, json
from pprint import pprint
import warnings
import configparser
config = configparser.ConfigParser()
import pprint, base64
config.read('lookml/tests/.conf/settings.ini')
#P2: subtraction hooks / warnings
#P3: put colin's recursive sql table finder in the library
#P2: test coverage for sets
#P3: test coverage for manifest files
#P2: organize the tests
class testAllProps(unittest.TestCase):
def setUp(self):
self.proj = lookml.Project(path='.tmp/testAllProps')
self.test_model_file = self.proj.new_file('test_model.model.lkml')
self.test_model_file.write()
def type_for_value(self,t:dict):
map = {
'sql':'public.order_items'
,'yesno':'yes'
,'string':'test_string'
,'string_unquoted':'test_string_unquoted'
,'include':''
,'options':''
,'list_unquoted':['a','b','c']
,'html':'<b>{{value}}</b>'
,'options_quoted':''
,'filters':[{'field1':'%val%'},{'field2':'>1'}]
,'expression':''
,'list_quoted':['x','y','z']
,'anonymous_construct':{}
,'named_construct':{'name':'foo'}
,'named_construct_single':{'name':'foo'}
,'anonymous_construct_plural':[{},{}]
,'sorts':''
}
return map[t['type']]
def test_view(self):
v = lookml.View('test')
_allowed_children = lookml.lib.language_data._allowed_children._allowed_children
config = lookml.lib.language_data.config.config
for child in _allowed_children['view']:
try:
v.__setattr__(child,self.type_for_value(config[child]['view']))
except:
pass
v + lookml.Dimension('foo')
for child in _allowed_children['dimension']:
try:
v.foo.__setattr__(child,self.type_for_value(config[child]['dimension']))
except:
pass
self.test_model_file + v
def tearDown(self):
self.test_model_file.write()
class testMain(unittest.TestCase):
'''
Read a view file, test its methods and manipulation
'''
def setUp(self):
pass
def test_model_file(self):
pm = lkml.load(open('.tmp/pylookml/eav_example/eav.model.lkml'))
p = lookml.Model(pm)
print(p)
def test_parse_print(self):
self.parsed_view = lkml.load(open('lookml/tests/files/basic_parsing/basic.view.lkml'))
self.myView = lookml.View(self.parsed_view['views'][0])
print(self.myView)
def test_filters(self):
self.parsed_view = lkml.load(open('lookml/tests/files/basic_parsing/basic.view.lkml'))
self.myView = lookml.View(self.parsed_view['views'][0])
self.myView.sum_foo.filters.foo.contains('test')
self.myView + '''
measure: total_sales {type: sum sql: ${TABLE}.sales ;;}
'''
self.myView.total_sales + 'filters: [foo:">100"]'
self.myView.total_sales.filters + {'bar':'>100'}
self.assertEqual(self.myView.total_sales.filters.bar.value,'>100')
self.assertEqual(self.myView.total_sales.filters.foo.value,'>100')
self.assertEqual(self.myView.sum_foo.filters.foo.value,'%test%')
def test_json_serialization(self):
self.parsed_view = lkml.load(open('lookml/tests/files/basic_parsing/basic.view.lkml'))
self.myView = lookml.View(self.parsed_view['views'][0])
self.assertIsNotNone(self.myView._json())
print(self.myView._json())
def test_refs(self):
self.parsed_view = lkml.load(open('lookml/tests/files/basic_parsing/basic.view.lkml'))
self.myView = lookml.View(self.parsed_view['views'][0])
for f in self.myView._fields():
#full reference
print('full ref __ref__: ',f.__ref__)
#short reference
print('short ref __refs__: ',f.__refs__)
#full reference -- regex escaped
print('full ref regex __refre__: ',f.__refre__)
#Short reference -- regex escaped
print('short ref regex __refsre__: ',f.__refsre__)
#Raw Reference
print('raw full ref __refr__: ',f.__refr__)
#Raw refence short
print('raw ref short __refrs__: ',f.__refrs__)
#Raw Reference regex
print('raw ref regex __refrre__: ',f.__refrre__)
def test_addition(self):
tmp = lookml.View('''
view: foo {
derived_table: {
explore_source: order_items {
column: id { field: id }
column: id_2 { field: id }
}
}
}
''')
tmp.derived_table.explore_source.column + 'column: foo {}'
tmp.derived_table.explore_source + 'limit: 500'
self.assertTrue('column: id_2 ' in str(tmp.derived_table.explore_source))
print(tmp)
def test_tags(self):
self.parsed_view = lkml.load(open('lookml/tests/files/basic_parsing/basic.view.lkml'))
self.myView = lookml.View(self.parsed_view['views'][0])
self.myView.transaction.tags + ['tag5','tag6','tag7','tag8']
self.myView.transaction.tags - 'tag7'
self.myView.transaction.tags - ['tag6','tag4']
self.assertTrue('tag3' in self.myView.transaction.tags)
for tag in self.myView.transaction.tags:
if tag == 'tag8':
self.myView.transaction.tags - tag
self.assertCountEqual(self.myView.transaction.tags,['tag1','tag2','tag3','tag5'])
def test_adhoc_model_test(self):
x = '''
connection: "my_data"
view: foo {
dimension: a {
type: string
primary_key: yes
}
dimension: b {
#primary_key: yes
}
dimension: c {}
}
view: bar {
dimension: x {}
dimension: y {}
dimension: z {}
}
explore: bar {
join: foo {}
join: test12 {}
}
explore: foo {
join: bar {}
aggregate_table: blah {
query: {
dimensions: [dim1,dim2,dim3]
filters: [foo:"%myfoo%",bar:"mybar%"]
sorts: [dim1: asc]
pivots: [dim4]
}
materialization: {
datagroup_trigger: foo
}
}
}
'''
parsed = lkml.load(x)
# print(parsed)
m = lookml.Model(parsed)
# print(str(m.__dict__))
m.views.foo.a.primary_key.value = 'no'
m.views.foo.b.primary_key = 'yes'
print(str(m))
# print(str(m.explores['foo']))
# print(str(m.explores.foo))
def test_file_model_binding(self):
proj = lookml.Project(path='lookml/tests/files/basic_parsing')
x = proj.file('basic.model.lkml')
cool = ['test123','test456','test890']
x.explores['trip'] + ''.join([f'''
join: {item} {{}}
''' for item in cool])
self.assertTrue('test890' in str(x.explores.trip))
def test_other(self):
self.parsed_view = lkml.load(open('lookml/tests/files/basic_parsing/basic.view.lkml'))
self.myView = lookml.View(self.parsed_view['views'][0])
# if isinstance(self.myView.sum_foo.sql,lookml.prop):
# if not isinstance(self.myView.sum_foo.sql,tuple()):
# if (True if self.myView.sum_foo.sql._type() == '' else False):
# print('wow')
# pprint(self.myView.transaction.__dict__)
# print(self.myView.foo.link[0])
# if 'transaction' in self.myView:
# print(self.myView.transaction)
#P1: list size changed during iteration unless call parentheses are added
# for i in self.myView.bar():
# print(i)
self.assertTrue(self.myView.suggestions)
# print(type(self.myView.transaction.tags))
# print(lang.props._allowed_children['dimension_group'])
# print(self.myView.derived_table.sql.value)
# for f in self.myView(type=lookml.Field):
# if f.type.value == 'string':
# self.myView + f'measure: sum_{f.name} {{ type: sum sql: {f.__refs__};; }}'
# print(lang.props._allowed_children['dimension'])
# self.myView.sum_foo.direction = 'row'
# for field in self.myView(type=lookml.Field):
# if '${TABLE}' in field.sql:
# print(field.name,": ", field.sql)
# for f in self.myView._first_order_fields():
# print(f.sql)
# print(self.myView.__dict__)
# print(self.myView.set.__dict__)
# print(self.myView.set.set1)
# self.myView.set.set1.fields + 'foo'
print(self.myView.foo.link[0].url.value)
# print(self.myView)
# for p in self.myView.bar.__iter__(type=lookml.prop):
# for p in self.myView.transaction(type=lookml.prop, sub_type='timeframes', exclude_subtype='timeframes'):
# # if isinstance(p,lookml.prop_list_unquoted):
# print(p)
def test_plural_anonymous_constructs(self):
raw = '''
view: x {
derived_table: {
explore_source: order_items {
column: id { field: order_items.id }
column: date { field: order_items.created_date }
column: status { field: order_items.status }
bind_filters: {
from_field: created_date
to_field: date
}
bind_filters: {
from_field: order_items.id
to_field: id
}
bind_filters: {
from_field: order_items.status
to_field: status
}
}
}
dimension: date { type: date }
dimension: id { type: number }
dimension: status {
type: string
link: {
url: "http://facebook.com"
}
link: {
url: "http://yahoo.com"
}
link: {
url: "http://myspace.com"
}
}
measure: count { type: count }
}
'''
x = lookml.View(raw)
self.assertTrue(x.derived_table.explore_source.bind_filters[0].from_field.value=='created_date')
i = 0
for item in x.derived_table.explore_source.bind_filters:
i +=1
self.assertEqual(i,3)
x.status.link + 'link: { url: "http://foo.com" }'
# print(x.status.link)
self.assertTrue(x.status.link[3].url.value=='http://foo.com')
x.status.link[3].remove()
i=0
#P3: make this a list over the generator and len()
for link in x.status.link:
i += 1
self.assertEqual(i,3)
def test_anonymous_construct(self):
#P1 support addition like lookml objects / pull out common addition functions
# add, insert, add_hook, sub, subhook etc
pass
def test_all_subscriptability(self):
proj = lookml.Project(
path='lookml/tests/files/pylookml_test_project'
)
print(proj['views/01_order_items.view.lkml']['views']['order_items']['id'].primary_key.value)
# print(proj['order_items.view.lkml']['views'])
#test file from project
#test view/explore/prop from file/model
#test field/prop from view
#test prop from field
def test_constructor(self):
a = lookml.View('''
view: foo {
sql_table_name: public.order_items ;;
dimension: foo {}
measure: count {}
}
''')
self.assertTrue(isinstance(a,lookml.View))
self.assertEqual(a.name,'foo')
b = lookml.View('foo')
self.assertTrue(isinstance(b,lookml.View))
self.assertEqual(b.name,'foo')
c = lookml.View({'name':'foo'})
self.assertTrue(isinstance(c,lookml.View))
self.assertEqual(c.name,'foo')
with self.assertRaises(Exception) as context:
lookml.View('''
view: foo {
sql_table_name: public.order_items ;;
dimension: foo {}
measure: count {}
}
view: bar {}
''')
self.assertTrue('contains more than one view' in str(context.exception))
#P3 does not throw the right exception, something is wrong with the ws.view_pattern and checker function
with self.assertRaises(Exception) as context:
lookml.View('explore: foo {}')
# self.assertTrue('contains more than one view' in str(context.exception))
x = lookml.Dimension('foo')
y = lookml.Measure('bar')
z = lookml.Filter('baz')
aa = lookml.Parameter('fizz')
ab = lookml.Dimension_Group('buzz')
print(x,y,z,aa,ab)
x = lookml.Dimension('dimension: foo { type: string }')
y = lookml.Measure('measure: bar { type: count }')
z = lookml.Filter('filter: baz { type: date }')
aa = lookml.Parameter('parameter: fizz { type: unquoted }')
ab = lookml.Dimension_Group('dimension_group: buzz { type: time }')
print(x,y,z,aa,ab)
exp = lookml.Explore('my_explore')
# jn = lookml.Join('my_join')
# mdl = lookml.Model('my_model')
# mnfst = lookml.Manifest('my_manifest')
# print(exp,jn,mdl,mnfst)
print(exp)
exp1 = lookml.Explore('''explore: my_explore {
join: foo {}
}''')
print(exp1)
# mdl1 = lookml.Model('conection: "cool" ')
# mnfst1 = lookml.Manifest('project_name: "example"')
# print(exp1,jn1,mdl1,mnfst1)
# print(exp1,jn1)
x = lookml.Dimension('dimension: +mydim1234567590 {}')
#P1 current validation allows + to be on the dimension and other non-extenable objects
def test_sql_enahancement_methods(self):
tmp = lookml.View(
'''
view: tmp {
dimension: foo {
sql: ${TABLE}.foo ;;
}
}
'''
)
#P3: add arguments and date casting
tmp.foo.sql.nvl()
self.assertTrue('nvl' in tmp.foo.sql.value)
print(tmp)
def test_more_add_hooks(self):
#P2: other rules that should be run: various coexistence errors
#P3: run coexistence errors and warnings and put to CSV
foo = lookml.View('view: foo {}')
foo + 'dimension: foo {}'
# throws an assertion error:
# foo.foo + 'type: count'
def test_adding_joins_to_explore(self):
#P1 test adding joins to explores
pass
def test_adding_fields_to_view(self):
#P1 test adding fields to a view
pass
def test_set_property(self):
x = lookml.Dimension('''
dimension: x {}
''')
x.setProperty('sql','${TABLE}.foo')
self.assertEqual(x.sql.value,'${TABLE}.foo')
y = lookml.View('y')
y.setProperty('sql_table_name', 'public.order_items')
self.assertEqual(y.sql_table_name.value,'public.order_items')
def test_children_ancestors(self):
# field.children() -> direct chilren
#field.children_all() -> all chilren multi generation
#field.dependency_chain(view=self.parent) -> tuple chain
#view.print_dependency_map(format='human' or 'csv') -> print out of dep chains in view
#field.print_dependency_map() -> print out of dep chain at field level
#field.ancestors() -> any directs in the field
#field.ancestors_all() -> any fields referenced up to ${TABLE}
#field.print_ancestor_map() -> prints all fields which are on ancestor chain
v = lookml.View('''
view: v {
dimension: a {}
dimension: c { sql: ${a} ;; }
dimension: d { sql: ${c} ;; }
dimension: e { sql: ${d}/${a} ;; }
dimension: b { sql: ${a} ;; }
}
''')
# v.print_dependency_map()
for i in v.e.ancestors():
print(i.__refs__)
def test_has_prop(self):
x = lookml.Dimension('''
dimension: x {
description: "hello"
}
''')
self.assertTrue(x.hasProp('description'))
self.assertFalse(x.hasProp('view_label'))
def test_dependency_map(self):
parsed_view = lkml.load(open('lookml/tests/files/the_look/views/01_order_items.view.lkml'))
myView = lookml.View(parsed_view['views'][0])
myView.print_dependency_map()
def test_remove_fields(self):
x = lookml.View('''
view: x {
dimension: a {}
dimension: b { sql: IFNULL(${a},0) ;;}
dimension: c { sql: ${a} ;; }
dimension: x {}
dimension: y {}
dimension: z {}
dimension: zz {}
}
''')
with self.assertWarns(UserWarning) as wrn:
y = ['a','b','c']
for i in y:
x - i
for w in wrn.warnings:
self.assertTrue('a had dependencies:' in str(w))
self.assertTrue('a' not in x)
self.assertTrue('b' not in x)
self.assertTrue('c' not in x)
with self.assertWarns(UserWarning) as wrn:
x - 'sql_table_name'
for w in wrn.warnings:
self.assertTrue('sql_table_name did not exist on x' in str(w))
x + 'sql_table_name: order_items ;;'
x - 'sql_table_name'
x.removeField(x.x)
x.removeField('y')
x - x.z
x - 'zz'
self.assertTrue('x' not in x)
self.assertTrue('y' not in x)
self.assertTrue('z' not in x)
self.assertTrue('zz' not in x)
def test_set_name_safe(self):
x = lookml.View('''
view: x {
dimension: a {}
dimension: b { sql: NVL(${a}) ;;}
dimension: c { sql: ${a} ;; }
}
''')
x.a.setName_safe('foo')
x.foo.setName_replace_references('bar')
self.assertTrue('bar' in x)
self.assertTrue('${bar}' in x.b.sql.value)
self.assertTrue('${bar}' in x.c.sql.value)
#P3: message / comments
# def test_messages_comments(self): pass
def test_legacy_methods(self):
x = lookml.View('''
view: x {
dimension: test456 {
type: number
tags: ["my_tag"]
}
dimension: test123 {
type: string
}
measure: a {}
measure: b {}
dimension: test789 {}
filter: c {}
filter: d {}
parameter: z {}
parameter: y {
tags: ["my_tag"]
}
dimension_group: created {}
}
''')
self.assertTrue('test456' in x)
self.assertFalse('imnotthere' in x)
#
fieldNames = list()
for i in x.fieldNames():
fieldNames.append(i)
result = ['test456','test123','test789','a','b','c','d','z','y','created']
self.assertEqual(fieldNames,result)
#
testSort = list()
for i in x.getFieldsSorted():
testSort.append(i.name)
result = ['c','created','d','test123','test456','test789','y','z','a','b']
self.assertEqual(result, testSort)
#
filterList = list()
for i in x.filters():
filterList.append(i.name)
result = ['c','d']
self.assertEqual(result, filterList)
#
paramList = list()
for i in x.parameters():
paramList.append(i.name)
result = ['z','y']
self.assertEqual(result, paramList)
#
dimList = list()
for i in x.dimensions():
dimList.append(i.name)
result = ['test456','test123','test789']
self.assertEqual(result, dimList)
#
measList = list()
for i in x.measures():
measList.append(i.name)
result = ['a','b']
self.assertEqual(result, measList)
#
dimGroupList = list()
for i in x.dimensionGroups():
dimGroupList.append(i.name)
result = ['created']
self.assertEqual(result, dimGroupList)
#
byTagList = list()
for i in x.getFieldsByTag('my_tag'):
byTagList.append(i.name)
result = ['test456','y']
self.assertEqual(byTagList,result)
#
byTypeList = list()
for i in x.getFieldsByType('string'):
byTypeList.append(i.name)
result = ['test123','test789','c','d','z','y']
self.assertEqual(byTypeList,result)
#
x.addDimension('orderItemID')
self.assertTrue('order_item_id' in x)
testDim = lookml.Dimension('dimension: test_dim {}')
x.addDimension(testDim)
self.assertTrue('test_dim' in x)
#
x.addAverage(x.test456)
self.assertTrue('test456_avg' in x)
self.assertEqual(x.test456_avg.sql.value, '${x.test456}')
#
x.test123.setPrimaryKey()
self.assertTrue(x._View__pk.name == 'test123')
x.test123.setName('new')
self.assertTrue(x._View__pk.name == 'new')
x.new.unSetPrimaryKey()
x.test456.setPrimaryKey()
self.assertTrue(x._View__pk.name == 'test456')
#
x.new.setType('number')
x.new.setDescription('this is my new dimension')
new_view = lookml.View('''
view: new_view {
view_label: "hello"
}
''')
x.new.setViewLabel(new_view.view_label.value)
self.assertEqual('hello',x.new.view_label.value)
x.test456.setDBColumn( 'mycol', changeIdentifier=True)
self.assertTrue('mycol' in x)
self.assertEqual(x.mycol.sql.value,"${TABLE}.`mycol`")
x.y.removeTag('my_tag')
x.z.setString()
x.a.addLink('http://yahoo.com{% condition foo %}{{value | uri_encode }}{% endcondition %}','Go to Yahoo')
def test_refinements(self):
#parse a file with multiple refinements on the same object
#confirm they collapse to a single view file
proj = lookml.Project(path='lookml/tests/files/basic_parsing')
test_file = proj.file('refine.view.lkml')
print(test_file)
self.assertTrue('first' in test_file.views['+test1'])
self.assertTrue('second' in test_file.views['+test1'])
def test_filtered_measure(self):
meas = lookml.Measure('total_money')
meas.setProperty('group_label','foo')
# meas.properties.addProperty('filters',{'field':'order_items.price','value':'>100'})
# meas.setProperty('filters',[{'field':'order_items.price','value':'>100'}])
meas + ('filters: { field: order_items.price value:">' + str(5) + '" }')
meas.setViewLabel('test_viewLabel')
# print(filt)
# meas + filt
print(meas)
def test_add_micro_units(self):
testView = lookml.View('test_view')
testView + 'dimension: id {}'
testView + 'dimension: success {}'
testView + '''
derived_table: {
explore_source: order_items {
column: order_id {field: order_items.order_id_no_actions }
column: items_in_order { field: order_items.count }
column: order_amount { field: order_items.total_sale_price }
column: order_cost { field: inventory_items.total_cost }
column: user_id {field: order_items.user_id }
column: created_at {field: order_items.created_raw}
column: order_gross_margin {field: order_items.total_gross_margin}
derived_column: order_sequence_number {
sql: RANK() OVER (PARTITION BY user_id ORDER BY created_at) ;;
}
}
datagroup_trigger: ecommerce_etl
}
'''
print(testView)
def test_adding_property(self):
v = lookml.View('test')
v + '''
derived_table: {
explore_source: order_items {
column: order_id {field: order_items.order_id_no_actions }
column: items_in_order { field: order_items.count }
column: order_amount { field: order_items.total_sale_price }
column: order_cost { field: inventory_items.total_cost }
column: user_id {field: order_items.user_id }
column: created_at {field: order_items.created_raw}
column: order_gross_margin {field: order_items.total_gross_margin}
derived_column: order_sequence_number {
sql: RANK() OVER (PARTITION BY user_id ORDER BY created_at) ;;
}
}
datagroup_trigger: ecommerce_etl
}
'''
v + 'dimension: id {}'
v.id + 'sql: ${TABLE}.id ;;'
for item in ('a', 'b', 'c'):
v + f'''
dimension: {item}_id {{
sql: {v.id.__refs__} + {item} ;;
}}'''
v + f'''measure: sum_of_{item} {{
type: sum
sql: ${{{item}_id}};;
}}
'''
for f in v.measures():
if f.type.value == 'sum':
f.addTag('my function is to add')
ex = lookml.Explore(v.name)
ex + '''join: test_2 {
from: test
type: left_outer
relationship: one_to_many
sql_on: ${testid} = ${test_2.id};;
}
'''
ex.join.test_2 + 'sql_on: foo ;;'
# F = lookml.File(ex)
# F + v
print(v)
#P2: obtain the real list of timezones from Looker itself
#P3: add CLI support
#P3: option to omit defaults
#P3: add looker version numbers to the lang map and throw warning if prop depreicated or error if not yet supported
#P1: merge refinements that are present in a single file (currently only last)
# see lookml/tests/files/pylookml_test_project/models/queries_for_order_items.view.lkml
class testProjFile(unittest.TestCase):
def setUp(self):
pass
def pylookml_test_add_file(self,proj):
nf = proj.new_file('hello.view.lkml')
nf + lookml.View('''
view: hello {}
''')
nf.write()
def pylookml_test_delete_file(self,proj):
x = proj.new_file('hello.view.lkml')
f = proj.file('hello.view.lkml')
f.delete()
x = proj.new_file('hello2.view.lkml')
x.write()
proj.delete_file('hello2.view.lkml')
def pylookml_test_mutate_file(self,proj):
#P1: should be file not new file, this method
# should throw warning for nf'ing an existing
nf = proj.file('scratch/subfolder/pylookml_scratch.view.lkml')
if isinstance(proj,lookml.ProjectSSH):
commitMessage = proj._commit_message
t = f'SSH {proj._path}'
elif isinstance(proj,lookml.ProjectGithub):
commitMessage = proj._commit_message
t = f'pyGithub {proj._path}'
else:
commitMessage = 'local filesystem'
t = f'direct filesystem {proj._path}'
nf.views.pylookml_scratch.test.description = f'{t} {commitMessage}'
#write the file
nf.write()
nf.views.pylookml_scratch.test.sql = '${hello}'
nf.write()
#confirm write
abc = proj.file('scratch/subfolder/pylookml_scratch.view.lkml')
self.assertTrue('test' in abc.views.pylookml_scratch)
def pylookml_test_project_routine(self,proj):
# self.assertEqual(len(list(proj.view_files())),19)
#access a file's deep object via [] syntax
# try:
self.pylookml_test_delete_file(proj)
# except:
# pass
val_a = proj['views/01_order_items.view.lkml']['views']['order_items']['order_id'].action[0].url.value
self.assertEqual(val_a,'https://hooks.zapier.com/hooks/catch/1662138/tvc3zj/')
#access a file's deep object via .file() syntax
val_b = proj.file('views/01_order_items.view.lkml').views.order_items.order_id.action[0].url.value
self.assertEqual(val_b,'https://hooks.zapier.com/hooks/catch/1662138/tvc3zj/')
#mutate a file
self.pylookml_test_mutate_file(proj)
self.pylookml_test_add_file(proj)
#proj.dir_list()
self.pylookml_test_delete_file(proj)
#test optional pyyaml
def test_project_from_local_path(self):
#connect
proj = lookml.Project(
path='lookml/tests/files/pylookml_test_project'
)
# pprint.pprint(proj._index)
self.pylookml_test_project_routine(proj)
def test_project_from_github(self):
proj = lookml.Project(
repo= "pythonruss/pylookml_test_project",
access_token=config['project1']['access_token'],
index_whole=True
)
self.pylookml_test_project_routine(proj)
def test_project_from_github_fast(self):
proj = lookml.ProjectGithub(
repo= "pythonruss/pylookml_test_project",
access_token=config['project1']['access_token'],
index_whole=False
)
self.pylookml_test_project_routine(proj)
def test_github_fast_file_exists_on_write(self):
proj = lookml.ProjectGithub(
repo= "pythonruss/pylookml_test_project",
access_token=config['project1']['access_token'],
index_whole=False
)
x = proj.file('views/01_order_items.view.lkml')
x.views.order_items + 'dimension: test_github_fast_file_exists_on_write {}'
x.write()
def test_sha_not_supplied(self):
#P2 reconstruct this test, was adhoc and stopped when it passed
proj = lookml.ProjectGithub(
repo= "llooker/aes_demo_final",
access_token=config['autotune']['access_token'],
branch=config['autotune']['branch'],
index_whole=False
)
if proj._exists(f'pylookml/bike_share_aggs.view.lkml'):
# click.echo('passed exists check')
f = proj.file(f'pylookml/bike_share_aggs.view.lkml')
# click.echo(f.sha)
else:
f = proj.new_file(f'pylookml/bike_share_aggs.view.lkml')
f.write()
def test_project_from_ssh(self):
#connect
proj = lookml.ProjectSSH(
git_url='git@github.com:pythonruss/pylookml_test_project.git'
,looker_project_name='pylookml_test_project'
,looker_host='https://dat.dev.looker.com/'
)
self.pylookml_test_project_routine(proj)
proj._git.add()
proj.commit()
proj._git.pushRemote()
proj.deploy()
def test_orphan_file(self):
x = lookml.File('lookml/tests/files/basic_parsing/basic.view.lkml')
self.assertTrue(isinstance(x,lookml.File))
def test_adhoc_bryan(self):
project = lookml.Project(
repo= "bryan-at-looker/lots_of_access_grants",
# access_token="",
looker_host="https://sandbox.dev.looker.com/",
looker_project_name="lots_of_access_grants",
branch="dev-bryan-weber-ymjr"
)
for pf in project.files():
print(pf.path)
class testOtherFiles(unittest.TestCase):
# Objective / coverage
# Read all file types from filesystem:
# model,
# view,
# other lkml files,
# manifest,
# dashboard,
# js,
# json,
# maplayer
# Read Files with isolated types of special syntax:
# filters: old and new syntax
# materializations
# extensions
# refinements
def setUp(self):
pass
def test_parsing_aggregate_tables(self):
x = lookml.File('lookml/tests/files/basic_parsing/agg.model.lkml')
# x = lkml.load(open('lookml/tests/files/basic_parsing/agg.model.lkml','r', encoding="utf-8"))
# print(str(x))
print(x.explores.foo.aggregate_table.bar)
def test_model_file(self):
self.model_file = lookml.File('lookml/tests/files/basic_parsing/basic.model.lkml')
print(str(self.model_file))
def test_view_refinement(self):
x = lookml.File('lookml/tests/files/basic_parsing/refine.view.lkml')
print(str(x))
def test_other_lkml_file(self):
pass
def test_manifest_file(self):
x = lookml.File('lookml/tests/files/basic_parsing/manifest.lkml')
#works
self.assertEqual(x.remote_dependency['ga360'].url.value,'https://github.com/llooker/google_ga360')
self.assertEqual(x.remote_dependency.ga360.url.value,'https://github.com/llooker/google_ga360')
# print(x.remote_dependency['ga360'])
# print(x.remote_dependency.ga360)
self.assertTrue('ga360' in x.remote_dependency)
for remote in x.remote_dependency:
print(remote.override_constant)
# print(type(x.contents))
#anon: local_dependency, visualization,
#named: constant, application,
#other props / project name etc
# print(str(x.contents))
def test_dashboard_file(self):
lookml.lib.project.LOOKML_DASHBOARDS = True
proj = lookml.Project(path='lookml/tests/files/the_look')
x = proj.file('dashboards/brand_lookup.dashboard.lookml')
self.assertEqual(x.content[0]['dashboard'],'brand_lookup')
#P1 fix writing, currently lookml dashboards should be considered read only
x.content[0]['dashboard'] = 'foo'
# x.write()
def test_js_file(self):
pass
def test_json_file(self):
pass
def test_maplayer_topojson(self):
pass
#P1: Looping through view.views poduces a [None]
# Explore source??
class testExceptions(unittest.TestCase):
def setup(self): pass
def test_invalid_lookml_attribute(self):
with self.assertWarns(UserWarning) as wrn:
testView = lookml.View('test')
testView + '''
dimension: foo {
xxx: "yyy"
}
'''
# Verify
for w in wrn.warnings:
self.assertTrue('xxx skipped. xxx not a valid attribute' in str(w))
def test_duplicate_primary_key(self):
#test to ensure that primary key is added, then if another primary key is set it throws an error, unless
#the original primary key was set to no prior to the operation
lookml.OMIT_DEFAULTS = True
x = lookml.View('''
view: x {
dimension: x {
primary_key: yes
}
dimension: y {}
}
''')
with self.assertRaises(lookml.lib.lang.DuplicatePrimaryKey) as context:
x.y.primary_key = 'yes'
x.x.primary_key = 'no'
# print(x)
x.y.primary_key = 'yes'
#P2: document that this is the way to print and __pk does not return the pk
# print(x)
# print(x._View__pk)
def test_coexistance_error(self):
#checks to ensure the add_hook / exception process is working
x = lookml.View('foo')
x.sql_table_name = 'public.foo'
with self.assertRaises(lookml.lib.lang.CoexistanceError):
x + 'derived_table: { sql: select * from foo ;; }'
del x.sql_table_name
x + 'derived_table: { sql: select * from foo ;; }'
with self.assertRaises(lookml.lib.lang.CoexistanceError):
x.sql_table_name = 'public.foo'
class testWalks(unittest.TestCase):
def setUp(self):
self.proj = lookml.Project(path='lookml/tests/files/basic_parsing')
self.view = self.proj.file('basic.view.lkml')
def test_walking(self):
for explore in self.view.explores:
assert explore.name in ['basic']
assert type(explore.join) == lookml.core.prop_named_construct
def test_walk_explore(self):
explore = self.view.explores.basic
assert type(explore.join.cool) == lookml.core.prop_named_construct_single
def test_walk_join(self):
join = self.view.explores.basic.join.cool
assert type(join) == lookml.core.prop_named_construct_single
assert type(join.relationship) == lookml.core.prop_options
assert join.relationship.value == 'many_to_one'
assert type(join.type) == lookml.core.prop_options
assert join.type.value == 'left_outer'
assert type(join.sql_on) == lookml.core.prop_sql
assert join.sql_on.value == '${basic.cool_id} = ${cool.basic_id}'
# assert join.from == lookml.prop.string_unquoted
def test_walk_view(self):
view = self.view.views.basic
assert type(view.extends) == lookml.core.prop_list_unquoted
assert view.extends.value == ['base']
assert type(view.extension) == lookml.core.prop_options
assert view.extension.value == 'required'
assert type(view.final) == lookml.core.prop_yesno
assert view.final.value == 'no'
assert type(view.label) == lookml.core.prop_string
assert view.label.value == 'basic'
assert type(view.view_label) == lookml.core.prop_string
assert view.view_label.value == 'basic'
assert type(view.required_access_grants) == lookml.core.prop_list_unquoted
assert view.required_access_grants.value == ['a', 'b', 'c']
assert type(view.suggestions) == lookml.core.prop_yesno
assert view.suggestions.value == 'yes'
assert type(view.derived_table) == lookml.core.prop_anonymous_construct
for dim in view._dims():
assert dim.name in ['foo', 'bar']
assert type(view.transaction) == lookml.Dimension_Group
for param in view._params():
assert param.name in ['myparam']
for measure in view._measures():
assert measure.name in ['sum_foo', 'sum_bar']
def test_walk_measure(self):
measure = self.view.views.basic.sum_foo
assert type(measure) == lookml.Measure
assert measure.name == 'sum_foo'
assert type(measure.type) == lookml.core.prop_options
assert measure.type.value == 'sum'
assert type(measure.sql) == lookml.core.prop_sql
assert measure.sql.value == '${foo}'
assert type(measure.filters) == lookml.core.prop_filters
def test_walk_filters(self):
filter = self.view.views.basic.sum_foo.filters.foo
filter2 = self.view.views.basic.sum_bar.filters.foo
assert type(filter) == lookml.core.flt
assert filter.value == '%cool%'
assert type(filter2) == lookml.core.flt
assert filter2.value == '%cool%'
def test_walk_dimension_group(self):
dg = self.view.views.basic.transaction
assert type(dg) == lookml.Dimension_Group
assert dg.name == 'transaction'
assert type(dg.type) == lookml.core.prop_options
assert dg.type.value == 'time'
assert type(dg.tags) == lookml.core.prop_list_quoted
assert dg.tags.value == ['tag1', 'tag2', 'tag3', 'tag4']
assert type(dg.timeframes) == lookml.core.prop_list_unquoted
assert dg.timeframes.value == ['raw', 'time', 'date', 'week', 'month',
'quarter', 'year', 'week_of_year', 'month_num']
assert type(dg.sql) == lookml.core.prop_sql
assert dg.sql.value == '${TABLE}.transaction_timestamp'
def test_walk_dimension(self):
dimension = self.view.views.basic.foo
assert type(dimension) == lookml.core.Dimension
assert type(dimension.type) == lookml.core.prop_options
assert dimension.type.value == 'string'
assert type(dimension.style) == lookml.core.prop_options
assert dimension.style.value == 'classic'
assert type(dimension.sql) == lookml.core.prop_sql
assert dimension.sql.value == '${TABLE}.foo'
assert type(dimension.link) == lookml.core.prop_anonymous_construct_plural
class testMicroUnits(unittest.TestCase):
def test_add_orphan_to_github(self):
proj = lookml.Project(
repo= config['github']['repo']
,access_token=config['github']['access_token']
,looker_host="https://profservices.dev.looker.com/"
,looker_project_name="russ_sanbox"
)
if proj._exists('lookml/tests/files/basic_parsing/refine.view.lkml'):
x = proj.file('lookml/tests/files/basic_parsing/refine.view.lkml')
proj.delete(x)
f = lookml.File('lookml/tests/files/basic_parsing/refine.view.lkml')
proj.put(f)
f.delete()
def test_cool(self):
# x = lkml.load('lookml/tests/files/basic_parsing/wow.view.lkml')
# print(x)
# with open('lookml/tests/files/basic_parsing/wow.view.lkml','r') as z:
# wow = lkml.load(z)
# print(wow)
x = lookml.File('lookml/tests/files/basic_parsing/wow.view.lkml')
print(x)
# def test_join_back_an_ndt(self):
# v = lookml.View('order_items')
# v + f'''
# sql_table_name: public.order_items ;;
# dimension: id {
# primary_key: yes
# }
# dimension: state {}
# dimension: sale_price {}
# parameter: {dynamic_dim_selector} {
# type: unquoted
# # suggestions: ["Brand","Category","Department"]
# allowed_value: {
# label: "Category"
# value: "Category"
# }
# allowed_value: {
# label: "Brand"
# value: "Brand"
# }
# allowed_value: {
# label: "Department"
# value: "Department"
# }
# allowed_value: {
# label: "State"
# value: "State"
# }
# }
# dimension: user_id {}
# dimension: inventory_item_id {
# sql: ${TABLE}.inventory_item_id ;;
# }
# dimension: new_dimension {
# type: string
# sql:
# {% if order_items.dynamic_dim_selector._parameter_value == 'Brand' %} ${products.brand}
# {% elsif order_items.dynamic_dim_selector._parameter_value == 'Category' %} ${products.category}
# {% elsif order_items.dynamic_dim_selector._parameter_value == 'Department' %} ${products.department}
# {% elsif order_items.dynamic_dim_selector._parameter_value == 'State' %} ${users.state}
# {% else %} 'N/A'
# {% endif %}
# ;;
# }
# measure: total_sale_price {
# type: sum
# sql: ${sale_price} ;;
# }
# '''
# ex = lookml.Explore(v.name)
# agg = lookml.View('agg')
# agg + '''
# derived_table: {
# explore_source: order_items {
# column: new_dimension {field: order_items.new_dimension}
# column: total_sale_price {field: order_items.total_sale_price}
# derived_column: rank {
# sql: ROW_NUMBER() OVER (ORDER BY total_sale_price DESC) ;;
# }
# # bind_all_filters: yes
# bind_filters: {
# from_field: order_items.{dynamic_dim_selector}
# to_field: order_items.{dynamic_dim_selector}
# }
# # bind_filters: {
# # from_field: order_items.created_date
# # to_field: order_items.created_date
# # }
# }
# }
# dimension: new_dimension {
# sql: ${TABLE}.new_dimension ;;
# }
# dimension: rank {
# type: number
# hidden: yes
# }
# filter: tail_threshold {
# type: number
# hidden: yes
# }
# dimension: stacked_rank {
# type: string
# sql:
# CASE
# WHEN ${rank} < 10 then '0' || ${rank} || ') '|| ${new_dimension}
# ELSE ${rank} || ') ' || ${new_dimension}
# END
# ;;
# }
# dimension: ranked_brand_with_tail {
# type: string
# sql:
# CASE WHEN {% condition tail_threshold %} ${rank} {% endcondition %} THEN ${stacked_rank}
# ELSE 'x) Other'
# END
# ;;
# }
# dimension: total_sale_price {
# value_format: "$#,##0.00"
# type: number
# }
# '''
# ex + '''
# join: inventory_items {
# type: left_outer
# relationship: one_to_many
# sql_on: ${order_items.inventory_item_id} = ${inventory_items.id} ;;
# }
# join: products {
# type: left_outer
# sql_on: ${inventory_items.product_id} = ${products.id} ;;
# relationship: many_to_one
# }
# join: users {
# type: left_outer
# sql_on: ${order_items.user_id} = ${users.id} ;;
# relationship: many_to_one
# }
# join: agg {
# type: left_outer
# relationship: many_to_one
# sql_on: ${order_items.new_dimension} = ${agg.new_dimension};;
# }
# '''
# myModel = lookml.File(ex)
# myModel + v
# myModel + agg
# myModel.properties.addProperty('connection', 'snowlooker')
# myModel.properties.addProperty('include', 'views/*.lkml')
# myModel.name = 'core2.model.lkml'
# proj = lookml.Project(
# repo= 'russlooker/oi'
# ,access_token=config['github']['access_token']
# ,looker_host="https://profservices.dev.looker.com/"
# ,looker_project_name="test_pylookml"
# )
# myModel
# proj.put(myModel)
# proj.deploy()
def test_one_line_access_github(self):
print(
lookml.Project(**config['project1'])['order_items.view.lkml']['views']['order_items']['id'].primary_key.value
# lookml.Project(**config['project1']).file('order_items.view.lkml').views.order_items.id.primary_key.value
)
def test_local_file(self):
x = lookml.File('lookml/tests/files/kitchenSink/kitchenSink.model.lkml')
for v in x.views:
for f in v.measures():
if f.type.value == 'sum' and not f.name.endswith('_total'):
f.name = f.name + '_total'
#Optionally Change the location
# x.setFolder('.tmp')
#Write the file
x.write()
def test_model_file_creation(self):
#initialize project
#create new model file
#put and check output
pass
def test_parse_references(self):
results = list(lookml.lib.lang.parse_references('''
${test.one_1} - ${test.two}
{% condition test.three %} ${four} {% endcondition %}
{% parameter test.five %}
{{ six }}
{{seven}}
{{test.eight}}
{{ _filters['test.nine'] | url_encode}}
{% _filters['ten10'] %}
'''))
self.assertEqual(results[0]['field'],'test.one_1')
self.assertEqual(results[1]['field'],'test.two')
self.assertEqual(results[2]['field'],'test.three')
self.assertEqual(results[3]['field'],'four')
self.assertEqual(results[4]['field'],'test.five')
self.assertEqual(results[5]['field'],'six')
self.assertEqual(results[6]['field'],'seven')
self.assertEqual(results[7]['field'],'test.eight')
self.assertEqual(results[8]['field'],'test.nine')
self.assertEqual(results[9]['field'],'ten10')
self.assertEqual(results[9]['fully_qualified_reference'],False)
def test_field_deletion(self):
self.proj = lookml.Project(
# repo= config['github']['repo']
# ,access_token=config['github']['access_token']
git_url='<EMAIL>:llooker/russ_sandbox.git'
,looker_host="https://dat.dev.looker.com/"
,looker_project_name="pylookml"
)
viewFile = self.proj.file('01_order_items.view.lkml')
view = viewFile['views']['order_items']
for f in view.fields():
if f.name not in ('id'):
view - f
print(view)
# or alternatively remove the fields with a loop
# for field in viewFile.fields():
# if field.name in ('count','sales_price'):
# viewFile.removeField(field)
class testWriting(unittest.TestCase):
'''
Objective / coverage
Read all file types from filesystem:
model,
view,
other lkml files,
manifest,
dashboard,
js,
json,
maplayer
Success Criteria:
need coverage of all basic operations: mutating each sub property and asserting the effect
novel convenience methods should be covered in test cases
written object needs to be reparsible to pass
'''
def setUp(self):
pass
#basic objects
def test_model_file(self):
pass
def test_view_file(self):
pass
def test_other_lkml_file(self):
pass
def test_manifest_file(self):
pass
def test_dashboard_file(self):
pass
def test_js_file(self):
pass
def test_json_file(self):
pass
def test_maplayer_topojson(self):
pass
#syntax constructs
def test_refinements(self):
pass
def test_extensions(self):
pass
def test_dimension(self):
pass
def test_measure(self):
pass
def tearDown(self):
pass
class testModel(unittest.TestCase):
def setUp(self):
self.model = lookml.File('lookml/tests/files/basic_parsing/basic.model.lkml')
self.explore_names = ['trip', 'station_weather_forecast', 'station_forecasting']
def test_walking(self):
for explore in self.model.explores:
assert explore.name in self.model.explores
assert isinstance(explore.join, lookml.core.prop_named_construct)
def test_walk_explore(self):
explore = self.model.explores.trip
assert type(explore.join.start_station) == lookml.core.prop_named_construct_single
explore2 = self.model.explores.station_weather_forecast
assert type(explore2.hidden) == lookml.core.prop_yesno
assert explore2.hidden.value == 'yes'
assert type(explore.view_name) == lookml.core.prop_string_unquoted
assert explore2.view_name.value == 'weather_forecast'
# assert explore2.from == lookml.prop.string_unquoted
def test_walk_join(self):
join = self.model.explores.trip.join.start_station
assert type(join) == lookml.core.prop_named_construct_single
assert type(join.relationship) == lookml.core.prop_options
assert join.relationship.value == 'many_to_one'
assert type(join.type) == lookml.core.prop_options
assert join.type.value == 'left_outer'
assert type(join.sql_on) == lookml.core.prop_sql
assert join.sql_on.value == '${trip.from_station_id} = ${start_station.station_id}'
# assert join.from == lookml.prop.string_unquoted
|
[
"lookml.File",
"lookml.Model",
"lookml.View",
"lookml.Measure",
"lookml.Filter",
"lookml.ProjectGithub",
"lookml.lkml.load",
"lookml.Parameter",
"lookml.Dimension_Group",
"lookml.Explore",
"lookml.ProjectSSH",
"lookml.lib.lang.parse_references",
"lookml.Project",
"configparser.ConfigParser",
"lookml.Dimension"
] |
[((340, 367), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (365, 367), False, 'import configparser\n'), ((701, 741), 'lookml.Project', 'lookml.Project', ([], {'path': '""".tmp/testAllProps"""'}), "(path='.tmp/testAllProps')\n", (715, 741), False, 'import lookml\n'), ((1727, 1746), 'lookml.View', 'lookml.View', (['"""test"""'], {}), "('test')\n", (1738, 1746), False, 'import lookml\n'), ((2686, 2702), 'lookml.Model', 'lookml.Model', (['pm'], {}), '(pm)\n', (2698, 2702), False, 'import lookml\n'), ((2870, 2911), 'lookml.View', 'lookml.View', (["self.parsed_view['views'][0]"], {}), "(self.parsed_view['views'][0])\n", (2881, 2911), False, 'import lookml\n'), ((3085, 3126), 'lookml.View', 'lookml.View', (["self.parsed_view['views'][0]"], {}), "(self.parsed_view['views'][0])\n", (3096, 3126), False, 'import lookml\n'), ((3785, 3826), 'lookml.View', 'lookml.View', (["self.parsed_view['views'][0]"], {}), "(self.parsed_view['views'][0])\n", (3796, 3826), False, 'import lookml\n'), ((4055, 4096), 'lookml.View', 'lookml.View', (["self.parsed_view['views'][0]"], {}), "(self.parsed_view['views'][0])\n", (4066, 4096), False, 'import lookml\n'), ((4822, 5121), 'lookml.View', 'lookml.View', (['"""\n view: foo {\n derived_table: {\n explore_source: order_items {\n column: id { field: id }\n column: id_2 { field: id }\n }\n }\n }\n """'], {}), '(\n """\n view: foo {\n derived_table: {\n explore_source: order_items {\n column: id { field: id }\n column: id_2 { field: id }\n }\n }\n }\n """\n )\n', (4833, 5121), False, 'import lookml\n'), ((5479, 5520), 'lookml.View', 'lookml.View', (["self.parsed_view['views'][0]"], {}), "(self.parsed_view['views'][0])\n", (5490, 5520), False, 'import lookml\n'), ((7088, 7100), 'lookml.lkml.load', 'lkml.load', (['x'], {}), '(x)\n', (7097, 7100), True, 'import lookml.lkml as lkml\n'), ((7137, 7157), 'lookml.Model', 'lookml.Model', (['parsed'], {}), '(parsed)\n', (7149, 7157), False, 'import lookml\n'), ((7443, 7498), 'lookml.Project', 'lookml.Project', ([], {'path': '"""lookml/tests/files/basic_parsing"""'}), "(path='lookml/tests/files/basic_parsing')\n", (7457, 7498), False, 'import lookml\n'), ((7893, 7934), 'lookml.View', 'lookml.View', (["self.parsed_view['views'][0]"], {}), "(self.parsed_view['views'][0])\n", (7904, 7934), False, 'import lookml\n'), ((11123, 11139), 'lookml.View', 'lookml.View', (['raw'], {}), '(raw)\n', (11134, 11139), False, 'import lookml\n'), ((11977, 12040), 'lookml.Project', 'lookml.Project', ([], {'path': '"""lookml/tests/files/pylookml_test_project"""'}), "(path='lookml/tests/files/pylookml_test_project')\n", (11991, 12040), False, 'import lookml\n'), ((12429, 12627), 'lookml.View', 'lookml.View', (['"""\n view: foo {\n sql_table_name: public.order_items ;;\n dimension: foo {}\n measure: count {}\n }\n """'], {}), '(\n """\n view: foo {\n sql_table_name: public.order_items ;;\n dimension: foo {}\n measure: count {}\n }\n """\n )\n', (12440, 12627), False, 'import lookml\n'), ((12720, 12738), 'lookml.View', 'lookml.View', (['"""foo"""'], {}), "('foo')\n", (12731, 12738), False, 'import lookml\n'), ((12841, 12869), 'lookml.View', 'lookml.View', (["{'name': 'foo'}"], {}), "({'name': 'foo'})\n", (12852, 12869), False, 'import lookml\n'), ((13629, 13652), 'lookml.Dimension', 'lookml.Dimension', (['"""foo"""'], {}), "('foo')\n", (13645, 13652), False, 'import lookml\n'), ((13665, 13686), 'lookml.Measure', 'lookml.Measure', (['"""bar"""'], {}), "('bar')\n", (13679, 13686), False, 'import lookml\n'), ((13699, 13719), 'lookml.Filter', 'lookml.Filter', (['"""baz"""'], {}), "('baz')\n", (13712, 13719), False, 'import lookml\n'), ((13733, 13757), 'lookml.Parameter', 'lookml.Parameter', (['"""fizz"""'], {}), "('fizz')\n", (13749, 13757), False, 'import lookml\n'), ((13771, 13801), 'lookml.Dimension_Group', 'lookml.Dimension_Group', (['"""buzz"""'], {}), "('buzz')\n", (13793, 13801), False, 'import lookml\n'), ((13841, 13892), 'lookml.Dimension', 'lookml.Dimension', (['"""dimension: foo { type: string }"""'], {}), "('dimension: foo { type: string }')\n", (13857, 13892), False, 'import lookml\n'), ((13905, 13951), 'lookml.Measure', 'lookml.Measure', (['"""measure: bar { type: count }"""'], {}), "('measure: bar { type: count }')\n", (13919, 13951), False, 'import lookml\n'), ((13964, 14007), 'lookml.Filter', 'lookml.Filter', (['"""filter: baz { type: date }"""'], {}), "('filter: baz { type: date }')\n", (13977, 14007), False, 'import lookml\n'), ((14021, 14075), 'lookml.Parameter', 'lookml.Parameter', (['"""parameter: fizz { type: unquoted }"""'], {}), "('parameter: fizz { type: unquoted }')\n", (14037, 14075), False, 'import lookml\n'), ((14089, 14151), 'lookml.Dimension_Group', 'lookml.Dimension_Group', (['"""dimension_group: buzz { type: time }"""'], {}), "('dimension_group: buzz { type: time }')\n", (14111, 14151), False, 'import lookml\n'), ((14193, 14221), 'lookml.Explore', 'lookml.Explore', (['"""my_explore"""'], {}), "('my_explore')\n", (14207, 14221), False, 'import lookml\n'), ((14418, 14496), 'lookml.Explore', 'lookml.Explore', (['"""explore: my_explore {\n join: foo {}\n }"""'], {}), '("""explore: my_explore {\n join: foo {}\n }""")\n', (14432, 14496), False, 'import lookml\n'), ((14707, 14757), 'lookml.Dimension', 'lookml.Dimension', (['"""dimension: +mydim1234567590 {}"""'], {}), "('dimension: +mydim1234567590 {}')\n", (14723, 14757), False, 'import lookml\n'), ((14913, 15085), 'lookml.View', 'lookml.View', (['"""\n view: tmp {\n dimension: foo {\n sql: ${TABLE}.foo ;;\n }\n }\n """'], {}), '(\n """\n view: tmp {\n dimension: foo {\n sql: ${TABLE}.foo ;;\n }\n }\n """\n )\n', (14924, 15085), False, 'import lookml\n'), ((15426, 15453), 'lookml.View', 'lookml.View', (['"""view: foo {}"""'], {}), "('view: foo {}')\n", (15437, 15453), False, 'import lookml\n'), ((15800, 15857), 'lookml.Dimension', 'lookml.Dimension', (['"""\n dimension: x {}\n """'], {}), '("""\n dimension: x {}\n """)\n', (15816, 15857), False, 'import lookml\n'), ((15967, 15983), 'lookml.View', 'lookml.View', (['"""y"""'], {}), "('y')\n", (15978, 15983), False, 'import lookml\n'), ((16725, 17020), 'lookml.View', 'lookml.View', (['"""\n view: v {\n dimension: a {}\n dimension: c { sql: ${a} ;; }\n dimension: d { sql: ${c} ;; }\n dimension: e { sql: ${d}/${a} ;; }\n dimension: b { sql: ${a} ;; }\n }\n """'], {}), '(\n """\n view: v {\n dimension: a {}\n dimension: c { sql: ${a} ;; }\n dimension: d { sql: ${c} ;; }\n dimension: e { sql: ${d}/${a} ;; }\n dimension: b { sql: ${a} ;; }\n }\n """\n )\n', (16736, 17020), False, 'import lookml\n'), ((17152, 17273), 'lookml.Dimension', 'lookml.Dimension', (['"""\n dimension: x {\n description: "hello"\n }\n """'], {}), '(\n """\n dimension: x {\n description: "hello"\n }\n """\n )\n', (17168, 17273), False, 'import lookml\n'), ((17517, 17553), 'lookml.View', 'lookml.View', (["parsed_view['views'][0]"], {}), "(parsed_view['views'][0])\n", (17528, 17553), False, 'import lookml\n'), ((17643, 17979), 'lookml.View', 'lookml.View', (['"""\n view: x {\n dimension: a {}\n dimension: b { sql: IFNULL(${a},0) ;;}\n dimension: c { sql: ${a} ;; }\n dimension: x {}\n dimension: y {}\n dimension: z {}\n dimension: zz {}\n }\n """'], {}), '(\n """\n view: x {\n dimension: a {}\n dimension: b { sql: IFNULL(${a},0) ;;}\n dimension: c { sql: ${a} ;; }\n dimension: x {}\n dimension: y {}\n dimension: z {}\n dimension: zz {}\n }\n """\n )\n', (17654, 17979), False, 'import lookml\n'), ((18870, 19072), 'lookml.View', 'lookml.View', (['"""\n view: x {\n dimension: a {}\n dimension: b { sql: NVL(${a}) ;;}\n dimension: c { sql: ${a} ;; }\n }\n """'], {}), '(\n """\n view: x {\n dimension: a {}\n dimension: b { sql: NVL(${a}) ;;}\n dimension: c { sql: ${a} ;; }\n }\n """\n )\n', (18881, 19072), False, 'import lookml\n'), ((19403, 20008), 'lookml.View', 'lookml.View', (['"""\n view: x {\n dimension: test456 {\n type: number\n tags: ["my_tag"]\n }\n dimension: test123 {\n type: string\n }\n measure: a {}\n measure: b {}\n dimension: test789 {}\n filter: c {}\n filter: d {}\n parameter: z {}\n parameter: y {\n tags: ["my_tag"]\n }\n dimension_group: created {}\n }\n """'], {}), '(\n """\n view: x {\n dimension: test456 {\n type: number\n tags: ["my_tag"]\n }\n dimension: test123 {\n type: string\n }\n measure: a {}\n measure: b {}\n dimension: test789 {}\n filter: c {}\n filter: d {}\n parameter: z {}\n parameter: y {\n tags: ["my_tag"]\n }\n dimension_group: created {}\n }\n """\n )\n', (19414, 20008), False, 'import lookml\n'), ((21995, 22037), 'lookml.Dimension', 'lookml.Dimension', (['"""dimension: test_dim {}"""'], {}), "('dimension: test_dim {}')\n", (22011, 22037), False, 'import lookml\n'), ((22685, 22790), 'lookml.View', 'lookml.View', (['"""\n view: new_view {\n view_label: "hello"\n }\n """'], {}), '(\n """\n view: new_view {\n view_label: "hello"\n }\n """\n )\n', (22696, 22790), False, 'import lookml\n'), ((23394, 23449), 'lookml.Project', 'lookml.Project', ([], {'path': '"""lookml/tests/files/basic_parsing"""'}), "(path='lookml/tests/files/basic_parsing')\n", (23408, 23449), False, 'import lookml\n'), ((23703, 23732), 'lookml.Measure', 'lookml.Measure', (['"""total_money"""'], {}), "('total_money')\n", (23717, 23732), False, 'import lookml\n'), ((24204, 24228), 'lookml.View', 'lookml.View', (['"""test_view"""'], {}), "('test_view')\n", (24215, 24228), False, 'import lookml\n'), ((25297, 25316), 'lookml.View', 'lookml.View', (['"""test"""'], {}), "('test')\n", (25308, 25316), False, 'import lookml\n'), ((26758, 26780), 'lookml.Explore', 'lookml.Explore', (['v.name'], {}), '(v.name)\n', (26772, 26780), False, 'import lookml\n'), ((29985, 30048), 'lookml.Project', 'lookml.Project', ([], {'path': '"""lookml/tests/files/pylookml_test_project"""'}), "(path='lookml/tests/files/pylookml_test_project')\n", (29999, 30048), False, 'import lookml\n'), ((30217, 30344), 'lookml.Project', 'lookml.Project', ([], {'repo': '"""pythonruss/pylookml_test_project"""', 'access_token': "config['project1']['access_token']", 'index_whole': '(True)'}), "(repo='pythonruss/pylookml_test_project', access_token=config\n ['project1']['access_token'], index_whole=True)\n", (30231, 30344), False, 'import lookml\n'), ((30497, 30631), 'lookml.ProjectGithub', 'lookml.ProjectGithub', ([], {'repo': '"""pythonruss/pylookml_test_project"""', 'access_token': "config['project1']['access_token']", 'index_whole': '(False)'}), "(repo='pythonruss/pylookml_test_project', access_token=\n config['project1']['access_token'], index_whole=False)\n", (30517, 30631), False, 'import lookml\n'), ((30793, 30927), 'lookml.ProjectGithub', 'lookml.ProjectGithub', ([], {'repo': '"""pythonruss/pylookml_test_project"""', 'access_token': "config['project1']['access_token']", 'index_whole': '(False)'}), "(repo='pythonruss/pylookml_test_project', access_token=\n config['project1']['access_token'], index_whole=False)\n", (30813, 30927), False, 'import lookml\n'), ((31253, 31418), 'lookml.ProjectGithub', 'lookml.ProjectGithub', ([], {'repo': '"""llooker/aes_demo_final"""', 'access_token': "config['autotune']['access_token']", 'branch': "config['autotune']['branch']", 'index_whole': '(False)'}), "(repo='llooker/aes_demo_final', access_token=config[\n 'autotune']['access_token'], branch=config['autotune']['branch'],\n index_whole=False)\n", (31273, 31418), False, 'import lookml\n'), ((31849, 32031), 'lookml.ProjectSSH', 'lookml.ProjectSSH', ([], {'git_url': '"""git@github.com:pythonruss/pylookml_test_project.git"""', 'looker_project_name': '"""pylookml_test_project"""', 'looker_host': '"""https://dat.dev.looker.com/"""'}), "(git_url=\n 'git@github.com:pythonruss/pylookml_test_project.git',\n looker_project_name='pylookml_test_project', looker_host=\n 'https://dat.dev.looker.com/')\n", (31866, 32031), False, 'import lookml\n'), ((32266, 32329), 'lookml.File', 'lookml.File', (['"""lookml/tests/files/basic_parsing/basic.view.lkml"""'], {}), "('lookml/tests/files/basic_parsing/basic.view.lkml')\n", (32277, 32329), False, 'import lookml\n'), ((32432, 32625), 'lookml.Project', 'lookml.Project', ([], {'repo': '"""bryan-at-looker/lots_of_access_grants"""', 'looker_host': '"""https://sandbox.dev.looker.com/"""', 'looker_project_name': '"""lots_of_access_grants"""', 'branch': '"""dev-bryan-weber-ymjr"""'}), "(repo='bryan-at-looker/lots_of_access_grants', looker_host=\n 'https://sandbox.dev.looker.com/', looker_project_name=\n 'lots_of_access_grants', branch='dev-bryan-weber-ymjr')\n", (32446, 32625), False, 'import lookml\n'), ((33227, 33289), 'lookml.File', 'lookml.File', (['"""lookml/tests/files/basic_parsing/agg.model.lkml"""'], {}), "('lookml/tests/files/basic_parsing/agg.model.lkml')\n", (33238, 33289), False, 'import lookml\n'), ((33525, 33589), 'lookml.File', 'lookml.File', (['"""lookml/tests/files/basic_parsing/basic.model.lkml"""'], {}), "('lookml/tests/files/basic_parsing/basic.model.lkml')\n", (33536, 33589), False, 'import lookml\n'), ((33675, 33739), 'lookml.File', 'lookml.File', (['"""lookml/tests/files/basic_parsing/refine.view.lkml"""'], {}), "('lookml/tests/files/basic_parsing/refine.view.lkml')\n", (33686, 33739), False, 'import lookml\n'), ((33859, 33920), 'lookml.File', 'lookml.File', (['"""lookml/tests/files/basic_parsing/manifest.lkml"""'], {}), "('lookml/tests/files/basic_parsing/manifest.lkml')\n", (33870, 33920), False, 'import lookml\n'), ((34697, 34747), 'lookml.Project', 'lookml.Project', ([], {'path': '"""lookml/tests/files/the_look"""'}), "(path='lookml/tests/files/the_look')\n", (34711, 34747), False, 'import lookml\n'), ((35980, 36200), 'lookml.View', 'lookml.View', (['"""\n view: x {\n dimension: x {\n primary_key: yes\n }\n dimension: y {}\n }\n """'], {}), '(\n """\n view: x {\n dimension: x {\n primary_key: yes\n }\n dimension: y {}\n }\n """\n )\n', (35991, 36200), False, 'import lookml\n'), ((36646, 36664), 'lookml.View', 'lookml.View', (['"""foo"""'], {}), "('foo')\n", (36657, 36664), False, 'import lookml\n'), ((37109, 37164), 'lookml.Project', 'lookml.Project', ([], {'path': '"""lookml/tests/files/basic_parsing"""'}), "(path='lookml/tests/files/basic_parsing')\n", (37123, 37164), False, 'import lookml\n'), ((41316, 41504), 'lookml.Project', 'lookml.Project', ([], {'repo': "config['github']['repo']", 'access_token': "config['github']['access_token']", 'looker_host': '"""https://profservices.dev.looker.com/"""', 'looker_project_name': '"""russ_sanbox"""'}), "(repo=config['github']['repo'], access_token=config['github']\n ['access_token'], looker_host='https://profservices.dev.looker.com/',\n looker_project_name='russ_sanbox')\n", (41330, 41504), False, 'import lookml\n'), ((41768, 41832), 'lookml.File', 'lookml.File', (['"""lookml/tests/files/basic_parsing/refine.view.lkml"""'], {}), "('lookml/tests/files/basic_parsing/refine.view.lkml')\n", (41779, 41832), False, 'import lookml\n'), ((42141, 42202), 'lookml.File', 'lookml.File', (['"""lookml/tests/files/basic_parsing/wow.view.lkml"""'], {}), "('lookml/tests/files/basic_parsing/wow.view.lkml')\n", (42152, 42202), False, 'import lookml\n'), ((48238, 48306), 'lookml.File', 'lookml.File', (['"""lookml/tests/files/kitchenSink/kitchenSink.model.lkml"""'], {}), "('lookml/tests/files/kitchenSink/kitchenSink.model.lkml')\n", (48249, 48306), False, 'import lookml\n'), ((49852, 49990), 'lookml.Project', 'lookml.Project', ([], {'git_url': '"""<EMAIL>:llooker/russ_sandbox.git"""', 'looker_host': '"""https://dat.dev.looker.com/"""', 'looker_project_name': '"""pylookml"""'}), "(git_url='<EMAIL>:llooker/russ_sandbox.git', looker_host=\n 'https://dat.dev.looker.com/', looker_project_name='pylookml')\n", (49866, 49990), False, 'import lookml\n'), ((51897, 51961), 'lookml.File', 'lookml.File', (['"""lookml/tests/files/basic_parsing/basic.model.lkml"""'], {}), "('lookml/tests/files/basic_parsing/basic.model.lkml')\n", (51908, 51961), False, 'import lookml\n'), ((2091, 2114), 'lookml.Dimension', 'lookml.Dimension', (['"""foo"""'], {}), "('foo')\n", (2107, 2114), False, 'import lookml\n'), ((13025, 13252), 'lookml.View', 'lookml.View', (['"""\n view: foo {\n sql_table_name: public.order_items ;;\n dimension: foo {}\n measure: count {}\n }\n view: bar {}\n """'], {}), '(\n """\n view: foo {\n sql_table_name: public.order_items ;;\n dimension: foo {}\n measure: count {}\n }\n view: bar {}\n """\n )\n', (13036, 13252), False, 'import lookml\n'), ((13503, 13533), 'lookml.View', 'lookml.View', (['"""explore: foo {}"""'], {}), "('explore: foo {}')\n", (13514, 13533), False, 'import lookml\n'), ((27696, 27747), 'lookml.View', 'lookml.View', (['"""\n view: hello {}\n """'], {}), '("""\n view: hello {}\n """)\n', (27707, 27747), False, 'import lookml\n'), ((35420, 35439), 'lookml.View', 'lookml.View', (['"""test"""'], {}), "('test')\n", (35431, 35439), False, 'import lookml\n'), ((48807, 49169), 'lookml.lib.lang.parse_references', 'lookml.lib.lang.parse_references', (['"""\n ${test.one_1} - ${test.two}\n {% condition test.three %} ${four} {% endcondition %}\n {% parameter test.five %}\n {{ six }}\n {{seven}}\n {{test.eight}}\n {{ _filters[\'test.nine\'] | url_encode}}\n {% _filters[\'ten10\'] %}\n """'], {}), '(\n """\n ${test.one_1} - ${test.two}\n {% condition test.three %} ${four} {% endcondition %}\n {% parameter test.five %}\n {{ six }}\n {{seven}}\n {{test.eight}}\n {{ _filters[\'test.nine\'] | url_encode}}\n {% _filters[\'ten10\'] %}\n """\n )\n', (48839, 49169), False, 'import lookml\n'), ((47958, 47994), 'lookml.Project', 'lookml.Project', ([], {}), "(**config['project1'])\n", (47972, 47994), False, 'import lookml\n')]
|
import torch.nn as nn
from ...classification.resnet import BasicBlock, Bottleneck, resnet18, resnet34, resnet50, resnet101, resnet152
__all__= ['LinkNet', 'linknet_resnet18','linknet_resnet34','linknet_resnet50','linknet_resnet101','linknet_resnet152']
model_urls = {
'linknet_resnet18': None,
'linknet_resnet34': None,
'linknet_resnet50': None,
'linknet_resnet101': None,
'linknet_resnet152': None,
}
_arch_dict = {
'linknet_resnet18': ( (2, 2, 2, 2), BasicBlock ),
'linknet_resnet34': ( (2, 2, 2, 2), BasicBlock ),
'linknet_resnet50': ( (3, 4, 6, 3), Bottleneck ),
'linknet_resnet101': ( (3, 4, 23, 3), Bottleneck ),
'linknet_resnet152': ( (3, 8, 36, 3), Bottleneck ),
}
_backbone_dict = {
'linknet_resnet18': resnet18,
'linknet_resnet34': resnet34,
'linknet_resnet50': resnet50,
'linknet_resnet101': resnet101,
'linknet_resnet152': resnet152,
}
__all__=['LinkNet']
class LinkNetDecoder(nn.Sequential):
def __init__(self, in_channels, out_channels, stride=1):
super(LinkNetDecoder, self).__init__(
nn.Conv2d(in_channels, in_channels//4, kernel_size=1, padding=0, stride=1, bias=False),
nn.BatchNorm2d(in_channels//4),
nn.ReLU(inplace=True),
# upsample
nn.ConvTranspose2d(in_channels//4, in_channels//4, kernel_size=3, stride=stride, padding=1, output_padding=int(stride==2)),
nn.BatchNorm2d(in_channels//4),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels//4, out_channels, kernel_size=1, padding=0, stride=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
)
class LinkNet(nn.Module):
def __init__(self, arch='linknet_resnet18', num_classes=21, in_channels=3, pretrained_backbone=False, channel_list=(64, 128, 256, 512 ), block=BasicBlock):
super(LinkNet, self).__init__()
# predefined arch
if isinstance(arch, str):
arch_name = arch
assert arch_name in _arch_dict.keys(), "arch_name for SegNet should be one of %s"%( _arch_dict.keys() )
arch, block = _arch_dict[arch_name]
# customized arch
elif isinstance( arch, (list, tuple) ):
arch_name = 'customized'
# Encoder
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.inplanes = 64
# Encoder
self.layer1 = self._make_layer(block, planes=channel_list[0], blocks=arch[0])
self.layer2 = self._make_layer(block, planes=channel_list[1], blocks=arch[1], stride=2)
self.layer3 = self._make_layer(block, planes=channel_list[2], blocks=arch[2], stride=2)
self.layer4 = self._make_layer(block, planes=channel_list[3], blocks=arch[3], stride=2)
decoder_channel_list = [ c*block.expansion for c in channel_list ]
# Decoder
self.decoder4 = LinkNetDecoder(decoder_channel_list[3], decoder_channel_list[2], stride=2)
self.decoder3 = LinkNetDecoder(decoder_channel_list[2], decoder_channel_list[1], stride=2)
self.decoder2 = LinkNetDecoder(decoder_channel_list[1], decoder_channel_list[0], stride=2)
self.decoder1 = LinkNetDecoder(decoder_channel_list[0], decoder_channel_list[0])
# Final Classifier
self.classifier = nn.Sequential(
nn.ConvTranspose2d(decoder_channel_list[0], 32, kernel_size=3, stride=2, padding=1, output_padding=1),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(32, num_classes, kernel_size=2, stride=2, padding=0)
)
if pretrained_backbone:
self.load_from_pretrained_resnet(arch_name)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def load_from_pretrained_resnet(self, resnet):
if isinstance(resnet, str):
resnet = _backbone_dict[ resnet ](pretrained=True)
def copy_params(layer1, layer2):
for p1, p2 in zip( layer1.parameters(), layer2.parameters() ):
p1.data = p2.data
linknet_part = [ self.conv1, self.bn1, self.layer1, self.layer2, self.layer3, self.layer4 ]
resnet_part = [ resnet.conv1, resnet.bn1, resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4 ]
for linknet_layer, resnet_layer in zip( linknet_part, resnet_part ):
copy_params( linknet_layer, resnet_layer )
def forward(self, x):
# Encoder
out_size = x.shape[2:]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
e1 = self.layer1(x)
e2 = self.layer2(e1)
e3 = self.layer3(e2)
e4 = self.layer4(e3)
d4 = self.decoder4(e4)
d4 = d4+e3
d3 = self.decoder3(d4)
d3 = d3+e2
d2 = self.decoder2(d3)
d2 = d2+e1
d1 = self.decoder1(d2)
logits = self.classifier(d1)
if logits.shape[2:]!=out_size:
logits = nn.functional.interpolate( logits, size=out_size, mode='bilinear', align_corners=True )
return logits
def linknet_resnet18(pretrained=False, progress=True, **kwargs):
model = LinkNet(arch='linknet_resnet18', **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
def linknet_resnet34(pretrained=False, progress=True, **kwargs):
model = LinkNet(arch='linknet_resnet34', **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
def linknet_resnet50(pretrained=False, progress=True, **kwargs):
model = LinkNet(arch='linknet_resnet50', **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
def linknet_resnet101(pretrained=False, progress=True, **kwargs):
model = LinkNet(arch='linknet_resnet101', **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
def linknet_resnet152(pretrained=False, progress=True, **kwargs):
model = LinkNet(arch='linknet_resnet152', **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
|
[
"torch.nn.ReLU",
"torch.nn.ConvTranspose2d",
"torch.nn.Sequential",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d",
"torch.nn.MaxPool2d",
"torch.nn.functional.interpolate"
] |
[((2331, 2395), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)'], {'kernel_size': '(7)', 'stride': '(2)', 'padding': '(3)', 'bias': '(False)'}), '(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n', (2340, 2395), True, 'import torch.nn as nn\n'), ((2446, 2464), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (2460, 2464), True, 'import torch.nn as nn\n'), ((2485, 2506), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2492, 2506), True, 'import torch.nn as nn\n'), ((2530, 2578), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)'}), '(kernel_size=3, stride=2, padding=1)\n', (2542, 2578), True, 'import torch.nn as nn\n'), ((4806, 4828), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (4819, 4828), True, 'import torch.nn as nn\n'), ((1103, 1195), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', '(in_channels // 4)'], {'kernel_size': '(1)', 'padding': '(0)', 'stride': '(1)', 'bias': '(False)'}), '(in_channels, in_channels // 4, kernel_size=1, padding=0, stride=1,\n bias=False)\n', (1112, 1195), True, 'import torch.nn as nn\n'), ((1203, 1235), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(in_channels // 4)'], {}), '(in_channels // 4)\n', (1217, 1235), True, 'import torch.nn as nn\n'), ((1247, 1268), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1254, 1268), True, 'import torch.nn as nn\n'), ((1442, 1474), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(in_channels // 4)'], {}), '(in_channels // 4)\n', (1456, 1474), True, 'import torch.nn as nn\n'), ((1486, 1507), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1493, 1507), True, 'import torch.nn as nn\n'), ((1522, 1616), 'torch.nn.Conv2d', 'nn.Conv2d', (['(in_channels // 4)', 'out_channels'], {'kernel_size': '(1)', 'padding': '(0)', 'stride': '(1)', 'bias': '(False)'}), '(in_channels // 4, out_channels, kernel_size=1, padding=0, stride=\n 1, bias=False)\n', (1531, 1616), True, 'import torch.nn as nn\n'), ((1623, 1651), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (1637, 1651), True, 'import torch.nn as nn\n'), ((1665, 1686), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1672, 1686), True, 'import torch.nn as nn\n'), ((3561, 3666), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['decoder_channel_list[0]', '(32)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'output_padding': '(1)'}), '(decoder_channel_list[0], 32, kernel_size=3, stride=2,\n padding=1, output_padding=1)\n', (3579, 3666), True, 'import torch.nn as nn\n'), ((3676, 3694), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (3690, 3694), True, 'import torch.nn as nn\n'), ((3708, 3729), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3715, 3729), True, 'import torch.nn as nn\n'), ((3744, 3809), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(32)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(32, 32, kernel_size=3, stride=1, padding=1, bias=False)\n', (3753, 3809), True, 'import torch.nn as nn\n'), ((3823, 3841), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (3837, 3841), True, 'import torch.nn as nn\n'), ((3855, 3876), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3862, 3876), True, 'import torch.nn as nn\n'), ((3891, 3962), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(32)', 'num_classes'], {'kernel_size': '(2)', 'stride': '(2)', 'padding': '(0)'}), '(32, num_classes, kernel_size=2, stride=2, padding=0)\n', (3909, 3962), True, 'import torch.nn as nn\n'), ((6066, 6155), 'torch.nn.functional.interpolate', 'nn.functional.interpolate', (['logits'], {'size': 'out_size', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(logits, size=out_size, mode='bilinear',\n align_corners=True)\n", (6091, 6155), True, 'import torch.nn as nn\n'), ((4274, 4371), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.inplanes', '(planes * block.expansion)'], {'kernel_size': '(1)', 'stride': 'stride', 'bias': '(False)'}), '(self.inplanes, planes * block.expansion, kernel_size=1, stride=\n stride, bias=False)\n', (4283, 4371), True, 'import torch.nn as nn\n'), ((4503, 4543), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(planes * block.expansion)'], {}), '(planes * block.expansion)\n', (4517, 4543), True, 'import torch.nn as nn\n')]
|
import pandas as pd
from decisionengine.framework.modules import Transform
_ATTR_TRANSLATION_MAP = {
'GLIDEIN_Supported_VOs': 'AWSProfile',
'INSTANCE_TYPE': 'InstanceType',
'AVAILABILITY_ZONE': 'AvailabilityZone',
'GlideinConfigPerEntryMaxGlideins': 'MaxLimit',
}
@Transform.consumes(Factory_Entries_AWS=pd.DataFrame)
@Transform.produces(aws_instance_limits=pd.DataFrame,
spot_occupancy_config=pd.DataFrame)
class AWSFactoryEntryData(Transform.Transform):
def transform(self, datablock):
# Get the dataframe containing AWS entries
aws_entries = self.Factory_Entries_AWS(datablock)
limits_df = pd.DataFrame()
so_config_dict = {}
if not aws_entries.empty:
# Get relevant columns from the dataframe
sub_df = aws_entries[_ATTR_TRANSLATION_MAP.keys()]
"""
GLIDEIN_Supported_VOs can be list of comma separated strings.
Convert it into flat list of string
"""
vos = [i for sublist in
[x.split(",") for x in list(sub_df.GLIDEIN_Supported_VOs)]
for i in sublist]
# unique VOs
vo_set = set(vos)
# Convert to relevant aws data and config
for vo in vo_set:
if vo:
df = sub_df.loc[sub_df['GLIDEIN_Supported_VOs'].str.contains(vo), ['INSTANCE_TYPE', 'AVAILABILITY_ZONE', 'GlideinConfigPerEntryMaxGlideins']]
df['GLIDEIN_Supported_VOs'] = vo
if limits_df is None:
limits_df = df
else:
limits_df = limits_df.append(df, ignore_index=True)
az_it = sub_df.loc[sub_df['GLIDEIN_Supported_VOs'].str.contains(vo), ['INSTANCE_TYPE', 'AVAILABILITY_ZONE']]
regions = set([az[:-1] for az in az_it.AVAILABILITY_ZONE.unique()])
so_config_dict[vo] = {}
for region in regions:
it = az_it.loc[az_it['AVAILABILITY_ZONE'].str.contains(region)].INSTANCE_TYPE.unique().tolist()
so_config_dict[vo][region] = it
limits_df = limits_df.rename(columns=_ATTR_TRANSLATION_MAP)
return {'aws_instance_limits': limits_df,
'spot_occupancy_config': pd.DataFrame.from_dict(so_config_dict)}
Transform.describe(AWSFactoryEntryData)
|
[
"pandas.DataFrame",
"decisionengine.framework.modules.Transform.produces",
"pandas.DataFrame.from_dict",
"decisionengine.framework.modules.Transform.consumes",
"decisionengine.framework.modules.Transform.describe"
] |
[((284, 336), 'decisionengine.framework.modules.Transform.consumes', 'Transform.consumes', ([], {'Factory_Entries_AWS': 'pd.DataFrame'}), '(Factory_Entries_AWS=pd.DataFrame)\n', (302, 336), False, 'from decisionengine.framework.modules import Transform\n'), ((338, 431), 'decisionengine.framework.modules.Transform.produces', 'Transform.produces', ([], {'aws_instance_limits': 'pd.DataFrame', 'spot_occupancy_config': 'pd.DataFrame'}), '(aws_instance_limits=pd.DataFrame, spot_occupancy_config=\n pd.DataFrame)\n', (356, 431), False, 'from decisionengine.framework.modules import Transform\n'), ((2413, 2452), 'decisionengine.framework.modules.Transform.describe', 'Transform.describe', (['AWSFactoryEntryData'], {}), '(AWSFactoryEntryData)\n', (2431, 2452), False, 'from decisionengine.framework.modules import Transform\n'), ((663, 677), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (675, 677), True, 'import pandas as pd\n'), ((2371, 2409), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['so_config_dict'], {}), '(so_config_dict)\n', (2393, 2409), True, 'import pandas as pd\n')]
|
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tinctest
from mpp.lib.gppkg.gppkg import Gppkg
from mpp.models import SQLTestCase
from tinctest.lib import run_shell_command
class SubtLimitTestCase(SQLTestCase):
"""
@optimizer_mode off
@tags gppkg
"""
sql_dir = 'sql/'
ans_dir = 'expected'
out_dir = 'output/'
@classmethod
def setUpClass(cls):
"""
Checking if plperl package installed, otherwise install the package
"""
super(SubtLimitTestCase, cls).setUpClass()
cmd = 'gpssh --version'
res = {'rc':0, 'stderr':'', 'stdout':''}
run_shell_command (cmd, 'check product version', res)
gppkg = Gppkg()
product_version = res['stdout']
gppkg.gppkg_install(product_version, 'plperl')
|
[
"tinctest.lib.run_shell_command",
"mpp.lib.gppkg.gppkg.Gppkg"
] |
[((1257, 1309), 'tinctest.lib.run_shell_command', 'run_shell_command', (['cmd', '"""check product version"""', 'res'], {}), "(cmd, 'check product version', res)\n", (1274, 1309), False, 'from tinctest.lib import run_shell_command\n'), ((1327, 1334), 'mpp.lib.gppkg.gppkg.Gppkg', 'Gppkg', ([], {}), '()\n', (1332, 1334), False, 'from mpp.lib.gppkg.gppkg import Gppkg\n')]
|
import argparse
import random
import collections
import ncbitax
from metax import ioutil
def choose_best_random(counter):
if len(counter) > 1:
most_common = counter.most_common()
n_best_hit = most_common[0][1]
hits = [x for x in most_common if x[1] == n_best_hit]
return random.choice(hits)
elif len(counter) == 1:
return counter.popitem()
def mmseqs_report(args):
db = ncbitax.TaxonomyDb.from_args(args, load_nodes=True, load_names=True, load_merged=True)
total_species_counter = collections.Counter()
total_genus_counter = collections.Counter()
taxid_map = {}
last_read_id = None
species_counter = collections.Counter()
genus_counter = collections.Counter()
with ioutil.compressed_open(args.input, 'rt') as in_f:
for line in in_f:
parts = line.rstrip().split('\t')
read_id, taxid, _, rank, tax_name = parts
taxid = int(taxid)
if read_id != last_read_id:
best = choose_best_random(species_counter)
if best:
total_species_counter[best[0]] += 1
# else:
# total_species_counter['unclassified'] += 1
best = choose_best_random(genus_counter)
if best:
total_genus_counter[best[0]] += 1
# else:
# total_genus_counter['unclassified'] += 1
species_counter = collections.Counter()
genus_counter = collections.Counter()
if rank == 'species':
species_counter[taxid] += 1
elif rank == 'genus':
genus_counter[taxid] += 1
taxid_map[taxid] = tax_name
last_read_id = read_id
total_reads = args.total_reads
species_total = sum(x[1] for x in total_species_counter.items())
genus_total = sum(x[1] for x in total_genus_counter.items())
with ioutil.compressed_open(args.output, 'wt') as out_f:
print('\t'.join(['0', 'unclassified', str((total_reads - species_total) / total_reads), 'species']), file=out_f)
for taxid, n_reads in total_species_counter.most_common():
abundance = n_reads / total_reads
print('\t'.join(str(x) for x in [taxid, taxid_map[taxid], abundance, 'species']), file=out_f)
print('\t'.join(['0', 'unclassified', str((total_reads - genus_total) / total_reads), 'genus']), file=out_f)
for taxid, n_reads in total_genus_counter.most_common():
abundance = n_reads / total_reads
print('\t'.join(str(x) for x in [taxid, taxid_map[taxid], abundance, 'genus']), file=out_f)
def add_command(subparsers):
parser = subparsers.add_parser('mmseqs-report')
parser.add_argument('input', help='Input mmseqs taxonomy tsv output')
parser.add_argument('output', help='Output report')
parser.add_argument('--total-reads', required=True, type=int)
ncbitax.add_taxonomy_arguments(parser)
parser.set_defaults(func=mmseqs_report)
|
[
"ncbitax.add_taxonomy_arguments",
"ncbitax.TaxonomyDb.from_args",
"metax.ioutil.compressed_open",
"random.choice",
"collections.Counter"
] |
[((427, 517), 'ncbitax.TaxonomyDb.from_args', 'ncbitax.TaxonomyDb.from_args', (['args'], {'load_nodes': '(True)', 'load_names': '(True)', 'load_merged': '(True)'}), '(args, load_nodes=True, load_names=True,\n load_merged=True)\n', (455, 517), False, 'import ncbitax\n'), ((543, 564), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (562, 564), False, 'import collections\n'), ((591, 612), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (610, 612), False, 'import collections\n'), ((680, 701), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (699, 701), False, 'import collections\n'), ((722, 743), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (741, 743), False, 'import collections\n'), ((2983, 3021), 'ncbitax.add_taxonomy_arguments', 'ncbitax.add_taxonomy_arguments', (['parser'], {}), '(parser)\n', (3013, 3021), False, 'import ncbitax\n'), ((310, 329), 'random.choice', 'random.choice', (['hits'], {}), '(hits)\n', (323, 329), False, 'import random\n'), ((753, 793), 'metax.ioutil.compressed_open', 'ioutil.compressed_open', (['args.input', '"""rt"""'], {}), "(args.input, 'rt')\n", (775, 793), False, 'from metax import ioutil\n'), ((1973, 2014), 'metax.ioutil.compressed_open', 'ioutil.compressed_open', (['args.output', '"""wt"""'], {}), "(args.output, 'wt')\n", (1995, 2014), False, 'from metax import ioutil\n'), ((1487, 1508), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (1506, 1508), False, 'import collections\n'), ((1541, 1562), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (1560, 1562), False, 'import collections\n')]
|
#!/bin/env python3
from sys import argv, exit
from typing import List
from pre_processing import CorpusHandler
def get_words(text: str) -> List[str]:
fs = [
'to_lower',
'remove_non_pt_chars',
'remove_punctuations',
'remove_numbers',
'remove_extra_spaces',
'remove_small_big_words',
'tokenize'
]
for f in fs:
text = getattr(CorpusHandler, f)(text)
return text
if __name__ == '__main__':
if len(argv) < 2:
print('Incorrect usage! Usage: python3 make_vocabulary.py [corpus]')
exit(1)
with open(argv[1]) as corpus_file:
content = corpus_file.read()
words = set(get_words(content))
vocabulary_path = 'vocabulary.txt'
with open(vocabulary_path, 'w') as vocabulary_file:
vocabulary_file.write('\n'.join(words))
print(f'Vocabulary saved to {vocabulary_path}!')
|
[
"sys.exit"
] |
[((570, 577), 'sys.exit', 'exit', (['(1)'], {}), '(1)\n', (574, 577), False, 'from sys import argv, exit\n')]
|
import datetime
import fileinput
import glob
import gzip
import multiprocessing
import os
import random # for log file names
import re
import shutil
import subprocess
import sys
import time
import urllib as ul # for removing url style encoding from gff text notes
from pathlib import Path
import configargparse
import pandas as pd
import pandasql as ps
import pysam # sequence format specific module fastq/bam/sam...
import gffpandas.gffpandas as gffpd # annotation format specific module gff3
from fuzzysearch import find_near_matches
pimms_mssg = """
===========================================================================================================
Pragmatic Insertional Mutation Mapping system (PIMMS) mapping pipeline v2
===========================================================================================================
o o
o o
// //
// //
|_||_| |_||_| @@@@@ @@@@@@ @@ @@ @@ @@ @@@@@@ @@@@@@
|@||@| |@||@| @@ @@ @@ @@@@ @@@@ @@@@ @@@@ @@ @@ @@
|@||@| |@||@| @@@@@ @@ @@ @@@ @@ @@ @@@ @@ @@@ @@
|@||@| |@||@| @@ @@ @@ @ @@ @@ @ @@ @@@ @@
|@@@@| |@@@@| @@ @@ @@ @@ @@ @@ @@ @@
|@@@@| |@@@@| @@ @@@@@@ @@ @@ @@ @@ @@@@@@@ @@@@@@@@
===========================================================================================================
PIMMS2 """
pimms_mssg2 = """ mode
===========================================================================================================
"""
class Range(object):
def __init__(self, start, end):
self.start = start
self.end = end
def __eq__(self, other):
return self.start <= other <= self.end
def create_folder(directory):
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print('Error: Creating directory. ' + directory)
def make_results_dirs_in_sam_dir(samfile_path, run_label):
samdirname = os.path.dirname(samfile_path)
results_dir_db = os.path.join(samdirname, run_label + '_out_dashboard')
results_dir_info = os.path.join(samdirname, run_label + '_out_info')
# results_dir_kept = os.path.join(samdirname, run_label + '_out_kept')
if not os.path.exists(results_dir_db):
try:
os.makedirs(results_dir_db)
os.makedirs(results_dir_info)
except OSError:
print("Error while creating result dirs in {samdirname}")
else:
results_dir_db = os.path.join(samdirname, run_label + time.strftime("_%d%m%y_%H%M%S") + '_results_dashboard')
results_dir_info = os.path.join(samdirname, run_label + time.strftime("_%d%m%y_%H%M%S") + '_results_info')
try:
os.makedirs(results_dir_db)
os.makedirs(results_dir_info)
except OSError:
print("Error while creating incremented result dirs in {samdirname}")
return samdirname, results_dir_db, results_dir_info
def delete_file_list(file_list):
for file_path in file_list:
try:
os.remove(file_path)
except OSError:
print("Error while deleting file {filePath}")
def extant_file(x):
"""
'Type' for argparse - checks that file exists but does not open.
"""
if not os.path.exists(x):
# Argparse uses the ArgumentTypeError to give a rejection message like:
# error: argument input: x does not exist
raise configargparse.ArgumentTypeError("{0} does not exist".format(x))
return x
def prog_in_path_check(prog_to_check):
if shutil.which(prog_to_check):
print('required mapper is in the path : ' + prog_to_check)
else:
sys.exit('\nERROR: ' + prog_to_check +
' cannot be found in the path. \nSYS.EXIT: Please check your environment and ensure ' + prog_to_check +
' is installed and available before trying again.\n\n')
def concat_fastq_raw(flanking_fastq_list, label, fq_file_suffix, concat_out_dir):
concat_fastq_result_filename = os.path.join(concat_out_dir, label + '_RX_concat' + fq_file_suffix + '.gz')
print(concat_fastq_result_filename)
print(" ".join(flanking_fastq_list))
with gzip.open(concat_fastq_result_filename, "wt", compresslevel=6) as big_file:
with fileinput.input(files=flanking_fastq_list) as inputs:
for line in inputs:
big_file.write(line)
if not parsed_args[0].keep:
print('Removing intermediate fastq flanking reads files')
# print(flanking_fastq_list)
delete_file_list(flanking_fastq_list)
return concat_fastq_result_filename
############################
# FIND_FLANK FUNCTIONS:
############################
def find_read_files_with_glob(indir, wildcards):
for suffix_wc in wildcards:
read_files = glob.glob(indir + suffix_wc)
if len(read_files):
return read_files
sys.exit("SYS EXIT: unable to find read files, check file suffixes match permissible: " + wildcards + '\n')
def merge_logs(log_path):
log_files = glob.glob(os.path.join(log_path, "log_*txt"))
df_from_each_log = (pd.read_table(f) for f in log_files)
merged_logs_df = pd.concat(df_from_each_log, ignore_index=True)
merged_logs_df = merged_logs_df.sort_values(by=['fq_filename'])
log_sums = merged_logs_df.sum(numeric_only=True)
log_sums['fq_filename'] = 'COMBINED'
merged_logs_df = merged_logs_df.append(log_sums, ignore_index=True)
merged_logs_df.to_csv(os.path.join(log_path, "..", 'result_summary.txt'), sep='\t', index=False)
print(merged_logs_df.to_string(index=False))
return merged_logs_df
def run_minimap2(flanking_fastq_concat_result, sam_output_result, genome_fasta):
stream = os.popen('minimap2 --version')
output = stream.read()
print('calling minimap version: ' + output)
# process = subprocess.Popen(['minimap2', '--version'],
print(' '.join(['minimap2', '-x', 'sr', '-a',
'-y', # -y adds fastq comment to sam?
'-o', sam_output_result, genome_fasta, flanking_fastq_concat_result,
'--secondary=no', '--sam-hit-only']))
if parsed_args[0].nano:
mm_mode = 'map-ont'
else:
mm_mode = 'sr'
process = subprocess.Popen(
['minimap2', '-x', mm_mode,
'-a',
'-y', # -y adds fastq comment to sam
'-o', sam_output_result,
genome_fasta,
flanking_fastq_concat_result,
'--secondary=no', '--sam-hit-only'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
print(stdout.decode('utf-8'))
print(stderr.decode('utf-8'))
def run_bwa(flanking_fastq_concat_result, sam_output_result, genome_fasta, ncpus):
bwa_index_dir = Path(genome_fasta).stem + '_index'
if not os.path.exists(os.path.join(bwa_index_dir, Path(genome_fasta).name + '.sa')):
print('Creating BWA index...')
create_folder(bwa_index_dir)
fasta_to_index = os.path.join(bwa_index_dir, Path(genome_fasta).name)
shutil.copyfile(genome_fasta, fasta_to_index)
process = subprocess.Popen(
['bwa', 'index',
fasta_to_index],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
print(stdout.decode('utf-8'))
print(stderr.decode('utf-8'))
else:
print('Using existing BWA index...')
print(' '.join(['bwa', 'mem', genome_fasta, flanking_fastq_concat_result, sam_output_result]))
# with open(sam_output_result, 'w') as f:
process = subprocess.Popen(
['bwa', 'mem',
'-t', str(ncpus), "-C",
'-o', sam_output_result,
os.path.join(bwa_index_dir, Path(genome_fasta).name),
flanking_fastq_concat_result],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
print(stdout.decode('utf-8'))
print(stderr.decode('utf-8'))
def py_sam_to_bam(sam_output_result):
bam_output_result = re.sub('.sam', '.bam', sam_output_result)
# add?? line to remove unmapped readsbased on: | samtools view -F 4 -o onlyMapped.bam ??
# noinspection PyUnresolvedReferences
pysam.sort('-O' 'BAM', "-o", bam_output_result, sam_output_result)
# noinspection PyUnresolvedReferences
pysam.index(bam_output_result)
print('\nMapping stats (flagstat):\n')
# noinspection PyUnresolvedReferences
for fsline in pysam.flagstat(bam_output_result).splitlines()[:5]:
print(fsline)
print('\n\n')
# if parsed_args[0].rmfiles:
if not parsed_args[0].keep:
delete_file_list([sam_output_result])
return bam_output_result
def pimms_fastq(fq_filename, fqout_filename, out_dir_logs, nano):
trans = str.maketrans('ATGCN', 'TACGN') # complement DNA lookup
qry1 = parsed_args[0].motif1[0].strip("'\"")
qry2 = parsed_args[0].motif2[0].strip("'\"")
# print(str(qry1))
# print(str(qry2))
# revcomp using maketrans lookup and a string reverse
qry1rc = qry1.translate(trans)[::-1] # reverse complement transposon motif1 ([::-1] -> reverse)
qry2rc = qry2.translate(trans)[::-1] # reverse complement transposon motif2
# print(str(qry1rc))
# print(str(qry2rc))
# if parsed_args[0].nano: # nano == True
if nano: # nano == True
# parsed_args[0].noreps = True
# print('nano == True\n')
fuzzy_levenshtein = True
l_dist = parsed_args[0].lev[0] # maximum Levenshtein Distance
# min_length = 50
# max_length = 200
min_length = parsed_args[0].min[0]
max_length = parsed_args[0].max[0]
qual_char = parsed_args[0].qual_char
print('Nanopore appropriate settings: Levenshtein distance of ' + str(l_dist)
+ ' + sequence length min = ' + str(min_length) + ', max = ' + str(max_length))
else:
# print('nano == False\n')
# fuzzy_levenshtein = False
subs = parsed_args[0].sub[0]
l_dist = parsed_args[0].lev[0] # maximum Levenstein Distance
fuzzy_levenshtein = bool(l_dist)
insrt = parsed_args[0].insert[0]
dels = parsed_args[0].deletion[0]
min_length = parsed_args[0].min[0]
max_length = parsed_args[0].max[0]
# print('standard settings\n')
print('illumina settings: Levenshtein distance of ' + str(l_dist)
+ ' + sequence length min = ' + str(min_length) + ', max = ' + str(max_length))
count = 0
countq1 = 0
countq2 = 0
countq1q2 = 0
countq1rc = 0
countq2rc = 0
# countq1rcq2rc = 0
hit_but_short_q1_q2 = 0
hit_q1_q2 = 0
countq2rcq1rc = 0
hit_but_short_q2rc_q1rc = 0
hit_q2rc_q1rc = 0
wrongq2q1 = 0
wrongq1rcq2rc = 0
countqqrc = 0
countqmulti = 0
hit_but_short_q1_only = 0
hit_q1_only = 0
hit_but_short_q1rc_only = 0
hit_q1rc_only = 0
hit_but_short_q2_only = 0
hit_q2_only = 0
hit_but_short_q2rc_only = 0
hit_q2rc_only = 0
# is_contam = 0
# reject_reads_list = []
# reject_reads_dict = dict()
# To resolve/reharmonise: diferent processing code for nanopore and Illumina input files
if nano:
with pysam.FastxFile(fq_filename, persist=False) as fin, open(fqout_filename, mode='wt') as fout:
print(fq_filename, ' ==>\n\t##\t##\t', fqout_filename, '\n')
for entry in fin:
count += 1
# print(str(count) + '\n')
if not fuzzy_levenshtein:
# print('find_near_matches \n')
matchesq1 = find_near_matches(qry1, entry.sequence, max_substitutions=subs, max_deletions=dels,
max_insertions=insrt)
matchesq2 = find_near_matches(qry2, entry.sequence, max_substitutions=subs, max_deletions=dels,
max_insertions=insrt)
matchesq1rc = find_near_matches(qry1rc, entry.sequence, max_substitutions=subs, max_deletions=dels,
max_insertions=insrt)
matchesq2rc = find_near_matches(qry2rc, entry.sequence, max_substitutions=subs, max_deletions=dels,
max_insertions=insrt)
else:
# print('find_near_matches lev\n')
matchesq1 = find_near_matches(qry1, entry.sequence, max_l_dist=l_dist)
matchesq2 = find_near_matches(qry2, entry.sequence, max_l_dist=l_dist)
matchesq1rc = find_near_matches(qry1rc, entry.sequence, max_l_dist=l_dist)
matchesq2rc = find_near_matches(qry2rc, entry.sequence, max_l_dist=l_dist)
if not bool(matchesq1 + matchesq2 + matchesq1rc + matchesq2rc):
# print(matchesq1 + matchesq2 + matchesq1rc + matchesq1rc)
# reject_reads_dict.update({entry.name: 'nomatch'})
continue
# skip fastq entry if multiple matches to same motif query seq
if len(matchesq1) > 1:
countqmulti += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'multi'})
continue
if len(matchesq2) > 1:
countqmulti += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'multi'})
continue
if len(matchesq1rc) > 1:
countqmulti += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'multi'})
continue
if len(matchesq2rc) > 1:
countqmulti += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'multi'})
continue
# skip fastq entry if multiple matches to same motif query direct and reverse complement
if (len(matchesq1) == 1) and (len(matchesq1rc) == 1):
countqqrc += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'multicomp'})
continue
if (len(matchesq2) == 1) and (len(matchesq2rc) == 1):
countqqrc += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'multicomp'})
continue
# or matches to two incompatible motifs
if (len(matchesq1) == 1) and (len(matchesq2rc) == 1):
countqqrc += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'multicomp'})
continue
if (len(matchesq2) == 1) and (len(matchesq1rc) == 1):
countqqrc += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'multicomp'})
continue
# process motif match pairs to extract target sequences
if (len(matchesq1) == 1) and (len(matchesq2) == 1):
countq1q2 += 1
captured_seqstring = str(entry.sequence)[matchesq1[0].end:matchesq2[0].start]
captured_qualstring = str(entry.quality)[matchesq1[0].end:matchesq2[0].start]
if len(captured_qualstring) < 5:
captured_qualstring = qual_char * len(captured_seqstring)
if matchesq2[0].start <= matchesq1[0].end:
wrongq2q1 += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'ooorder'})
continue
if len(captured_seqstring) >= min_length:
hit_q1_q2 += 1
# print('@' + str(entry.name) + ' ' + str(entry.comment) + '\n')
# fout.write('@' + str(entry.name) + ' ' + str(entry.comment) + '\n')
fout.write('@' + str(entry.name) + ' ' + 'CO:Z:' + str(
entry.comment) + '\n') # make comment bam compatible
fout.write(captured_seqstring[0:max_length] + '\n')
fout.write('+' + '\n')
fout.write(captured_qualstring[0:max_length] + '\n')
continue
else:
hit_but_short_q1_q2 += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'short'})
continue
# break
if (len(matchesq1rc) == 1) and (len(matchesq2rc) == 1):
countq2rcq1rc += 1
captured_seqstring = str(entry.sequence)[matchesq2rc[0].end:matchesq1rc[0].start]
captured_qualstring = str(entry.quality)[matchesq2rc[0].end:matchesq1rc[0].start]
if len(captured_qualstring) < 5:
captured_qualstring = qual_char * len(captured_seqstring)
if matchesq1rc[0].start <= matchesq2rc[0].end:
wrongq1rcq2rc += 1
# reject_reads_dict.update({entry.name: 'ooorder'})
if len(captured_seqstring) >= min_length:
hit_q2rc_q1rc += 1
# fout.write('@' + str(entry.name) + ' ' + str(entry.comment) + '\n')
fout.write('@' + str(entry.name) + ' ' + 'CO:Z:' + str(
entry.comment) + '\n') # make comment bam compatible
fout.write(captured_seqstring[0:max_length] + '\n')
fout.write('+' + '\n')
fout.write(captured_qualstring[0:max_length] + '\n')
continue
else:
hit_but_short_q2rc_q1rc += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'short'})
continue
# process single motif matches to extract target sequences
if len(matchesq1) == 1:
countq1 += 1
captured_seqstring = str(entry.sequence)[
matchesq1[0].end:] # nothing after colon indicates end of string
captured_qualstring = str(entry.quality)[
matchesq1[0].end:]
if len(captured_qualstring) < 5:
captured_qualstring = qual_char * len(captured_seqstring)
if len(captured_seqstring) >= min_length:
hit_q1_only += 1
# fout.write('@' + str(entry.name) + ' ' + str(entry.comment) + '\n')
fout.write('@' + str(entry.name) + ' ' + 'CO:Z:' + str(
entry.comment) + '\n') # make comment bam compatible
fout.write(captured_seqstring[0:max_length] + '\n')
fout.write('+' + '\n')
fout.write(captured_qualstring[0:max_length] + '\n')
continue
else:
hit_but_short_q1_only += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'short'})
continue
if len(matchesq2rc) == 1:
countq2rc += 1
captured_seqstring = str(entry.sequence)[
matchesq2rc[0].end:] # nothing after colon indicates end of string
captured_qualstring = str(entry.quality)[
matchesq2rc[0].end:]
if len(captured_qualstring) < 5:
captured_qualstring = qual_char * len(captured_seqstring)
if len(captured_seqstring) >= min_length:
hit_q2rc_only += 1
# fout.write('@' + str(entry.name) + ' ' + str(entry.comment) + '\n')
fout.write('@' + str(entry.name) + ' ' + 'CO:Z:' + str(
entry.comment) + '\n') # make comment bam compatible
fout.write(captured_seqstring[0:max_length] + '\n')
fout.write('+' + '\n')
fout.write(captured_qualstring[0:max_length] + '\n')
continue
else:
hit_but_short_q2rc_only += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'short'})
continue
if len(matchesq1rc) == 1:
countq1rc += 1
captured_seqstring = str(entry.sequence)[
0:matchesq1rc[0].start] # nothing after colon indicates end of string
captured_qualstring = str(entry.quality)[
0:matchesq1rc[0].start]
if len(captured_qualstring) < 5:
captured_qualstring = qual_char * len(captured_seqstring)
if len(captured_seqstring) >= min_length:
hit_q1rc_only += 1
# fout.write('@' + str(entry.name) + ' ' + str(entry.comment) + '\n')
fout.write('@' + str(entry.name) + ' ' + 'CO:Z:' + str(
entry.comment) + '\n') # make comment bam compatible
fout.write(captured_seqstring[-max_length:].translate(trans)[::-1] + '\n')
fout.write('+' + '\n')
fout.write(captured_qualstring[-max_length:][::-1] + '\n')
continue
else:
hit_but_short_q1rc_only += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'short'})
continue
if len(matchesq2) == 1:
countq2 += 1
captured_seqstring = str(entry.sequence)[
0:matchesq2[0].start] # nothing after colon indicates end of string
captured_qualstring = str(entry.quality)[
0:matchesq2[0].start]
if len(captured_qualstring) < 5:
captured_qualstring = qual_char * len(captured_seqstring)
if len(captured_seqstring) >= min_length:
hit_q2_only += 1
# fout.write('@' + str(entry.name) + ' ' + str(entry.comment) + '\n')
fout.write('@' + str(entry.name) + ' ' + 'CO:Z:' + str(
entry.comment) + '\n') # make comment bam compatible
fout.write(captured_seqstring[-max_length:].translate(trans)[::-1] + '\n')
fout.write('+' + '\n')
fout.write(captured_qualstring[-max_length:][::-1] + '\n')
continue
else:
hit_but_short_q2_only += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'short'})
continue
else:
with pysam.FastxFile(fq_filename, persist=False) as fin, open(fqout_filename, mode='wt') as fout:
print(fq_filename, ' ==>\n\t\t\t', fqout_filename, '\n')
for entry in fin:
count += 1
if not fuzzy_levenshtein:
# print('find_near_matches \n')
matchesq1 = find_near_matches(qry1, entry.sequence, max_substitutions=subs, max_deletions=dels,
max_insertions=insrt)
matchesq2 = find_near_matches(qry2, entry.sequence, max_substitutions=subs, max_deletions=dels,
max_insertions=insrt)
matchesq1rc = find_near_matches(qry1rc, entry.sequence, max_substitutions=subs, max_deletions=dels,
max_insertions=insrt)
matchesq2rc = find_near_matches(qry2rc, entry.sequence, max_substitutions=subs, max_deletions=dels,
max_insertions=insrt)
else:
# print('find_near_matches lev\n')
matchesq1 = find_near_matches(qry1, entry.sequence, max_l_dist=l_dist)
matchesq2 = find_near_matches(qry2, entry.sequence, max_l_dist=l_dist)
matchesq1rc = find_near_matches(qry1rc, entry.sequence, max_l_dist=l_dist)
matchesq2rc = find_near_matches(qry2rc, entry.sequence, max_l_dist=l_dist)
if not bool(matchesq1 + matchesq2 + matchesq1rc + matchesq2rc):
# print(matchesq1 + matchesq2 + matchesq1rc + matchesq1rc)
# reject_reads_dict.update({entry.name: 'nomatch'})
continue
# skip fastq entry if multiple matches to same motif query seq
if len(matchesq1) > 1:
countqmulti += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'multi'})
continue
if len(matchesq2) > 1:
countqmulti += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'multi'})
continue
if len(matchesq1rc) > 1:
countqmulti += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'multi'})
continue
if len(matchesq2rc) > 1:
countqmulti += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'multi'})
continue
# skip fastq entry if multiple matches to same motif query direct and reverse complement
if (len(matchesq1) == 1) and (len(matchesq1rc) == 1):
countqqrc += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'multicomp'})
continue
if (len(matchesq2) == 1) and (len(matchesq2rc) == 1):
countqqrc += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'multicomp'})
continue
# or matches to two incompatible motifs
if (len(matchesq1) == 1) and (len(matchesq2rc) == 1):
countqqrc += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'multicomp'})
continue
if (len(matchesq2) == 1) and (len(matchesq1rc) == 1):
countqqrc += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'multicomp'})
continue
# process motif match pairs to extract target sequences
if (len(matchesq1) == 1) and (len(matchesq2) == 1):
countq1q2 += 1
captured_seqstring = str(entry.sequence)[matchesq1[0].end:matchesq2[0].start]
captured_qualstring = str(entry.quality)[matchesq1[0].end:matchesq2[0].start]
if matchesq2[0].start <= matchesq1[0].end:
wrongq2q1 += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'ooorder'})
continue
if len(captured_seqstring) >= min_length:
hit_q1_q2 += 1
# print('@' + str(entry.name) + ' ' + str(entry.comment) + '\n')
# fout.write('@' + str(entry.name) + ' ' + str(entry.comment) + '\n')
fout.write('@' + str(entry.name) + ' ' + 'CO:Z:' + str(
entry.comment) + '\n') # make comment bam compatible
fout.write(captured_seqstring[0:max_length] + '\n')
fout.write('+' + '\n')
fout.write(captured_qualstring[0:max_length] + '\n')
continue
else:
hit_but_short_q1_q2 += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'short'})
continue
# break
if (len(matchesq1rc) == 1) and (len(matchesq2rc) == 1):
countq2rcq1rc += 1
captured_seqstring = str(entry.sequence)[matchesq2rc[0].end:matchesq1rc[0].start]
captured_qualstring = str(entry.quality)[matchesq2rc[0].end:matchesq1rc[0].start]
if matchesq1rc[0].start <= matchesq2rc[0].end:
wrongq1rcq2rc += 1
# reject_reads_dict.update({entry.name: 'ooorder'})
if len(captured_seqstring) >= min_length:
hit_q2rc_q1rc += 1
# fout.write('@' + str(entry.name) + ' ' + str(entry.comment) + '\n')
fout.write('@' + str(entry.name) + ' ' + 'CO:Z:' + str(
entry.comment) + '\n') # make comment bam compatible
fout.write(captured_seqstring[0:max_length] + '\n')
fout.write('+' + '\n')
fout.write(captured_qualstring[0:max_length] + '\n')
continue
else:
hit_but_short_q2rc_q1rc += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'short'})
continue
# process single motif matches to extract target sequences
if len(matchesq1) == 1:
countq1 += 1
captured_seqstring = str(entry.sequence)[
matchesq1[0].end:] # nothing after colon indicates end of string
captured_qualstring = str(entry.quality)[
matchesq1[0].end:]
if len(captured_seqstring) >= min_length:
hit_q1_only += 1
# fout.write('@' + str(entry.name) + ' ' + str(entry.comment) + '\n')
fout.write('@' + str(entry.name) + ' ' + 'CO:Z:' + str(
entry.comment) + '\n') # make comment bam compatible
fout.write(captured_seqstring[0:max_length] + '\n')
fout.write('+' + '\n')
fout.write(captured_qualstring[0:max_length] + '\n')
continue
else:
hit_but_short_q1_only += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'short'})
continue
if len(matchesq2rc) == 1:
countq2rc += 1
captured_seqstring = str(entry.sequence)[
matchesq2rc[0].end:] # nothing after colon indicates end of string
captured_qualstring = str(entry.quality)[
matchesq2rc[0].end:]
if len(captured_seqstring) >= min_length:
hit_q2rc_only += 1
# fout.write('@' + str(entry.name) + ' ' + str(entry.comment) + '\n')
fout.write('@' + str(entry.name) + ' ' + 'CO:Z:' + str(
entry.comment) + '\n') # make comment bam compatible
fout.write(captured_seqstring[0:max_length] + '\n')
fout.write('+' + '\n')
fout.write(captured_qualstring[0:max_length] + '\n')
continue
else:
hit_but_short_q2rc_only += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'short'})
continue
if len(matchesq1rc) == 1:
countq1rc += 1
captured_seqstring = str(entry.sequence)[
0:matchesq1rc[0].start] # nothing after colon indicates end of string
captured_qualstring = str(entry.quality)[
0:matchesq1rc[0].start]
if len(captured_seqstring) >= min_length:
hit_q1rc_only += 1
# fout.write('@' + str(entry.name) + ' ' + str(entry.comment) + '\n')
fout.write('@' + str(entry.name) + ' ' + 'CO:Z:' + str(
entry.comment) + '\n') # make comment bam compatible
fout.write(captured_seqstring[-max_length:].translate(trans)[::-1] + '\n')
fout.write('+' + '\n')
fout.write(captured_qualstring[-max_length:][::-1] + '\n')
continue
else:
hit_but_short_q1rc_only += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'short'})
continue
if len(matchesq2) == 1:
countq2 += 1
captured_seqstring = str(entry.sequence)[
0:matchesq2[0].start] # nothing after colon indicates end of string
captured_qualstring = str(entry.quality)[
0:matchesq2[0].start]
if len(captured_seqstring) >= min_length:
hit_q2_only += 1
# fout.write('@' + str(entry.name) + ' ' + str(entry.comment) + '\n')
fout.write('@' + str(entry.name) + ' ' + 'CO:Z:' + str(
entry.comment) + '\n') # make comment bam compatible
fout.write(captured_seqstring[-max_length:].translate(trans)[::-1] + '\n')
fout.write('+' + '\n')
fout.write(captured_qualstring[-max_length:][::-1] + '\n')
continue
else:
hit_but_short_q2_only += 1
# reject_reads_list.append(entry.name)
# reject_reads_dict.update({entry.name: 'short'})
continue
# print("\n" + fq_filename + " ->>\n" + fqout_filename + "#####################@@@@@@@@@@@@@@@@@@@@\n")
# print('#######################################################~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~log1\n')
# very cryptic logging needs reorganising and fixing to work with multiprocessing
# note added random int to get almost unique log file names need to find fix
log_file = open(
f'{out_dir_logs}/log_{os.getppid()}_{multiprocessing.current_process().pid}_{random.randint(1000, 9999)}.txt', 'w'
)
try:
# print(fq_filename, fqout_filename, 'logfail2\n')
log_file.write(
f'fq_filename\tread count:\tmultiple copies of a motif:\tmismatched motifs:\tboth motifs (fwd|revcomp):\t'
'both motifs (fwd|revcomp) >= {min_length}:\tsingle motif >= {min_length}:\ttotal passed\n')
log_file.write(f'{os.path.basename(fq_filename)}\t')
log_file.write(f'{count}\t')
log_file.write(f'{countqmulti}\t')
log_file.write(f'{countqqrc}\t')
log_file.write(
f'{hit_q1_q2 + hit_q2rc_q1rc + hit_but_short_q1_q2 + hit_but_short_q2rc_q1rc}\t')
log_file.write(f'{hit_q1_q2 + hit_q2rc_q1rc}\t')
log_file.write(f'{hit_q1_only + hit_q2_only + hit_q1rc_only + hit_q2rc_only}\t')
log_file.write(f'{hit_q1_only + hit_q2_only + hit_q1rc_only + hit_q2rc_only + hit_q1_q2 + hit_q2rc_q1rc}\n')
except Exception as e:
# Write problems to the error file
log_file.write(f'ERROR: {e} problem with motif matching to {fq_filename}!\n')
finally:
# Close the files!
log_file.close()
# def survey_fastq(resultx_reads_list, resultx_reads_dict, fqout):
def survey_fastq(resultx_reads_list, fqout):
with pysam.FastxFile(fqout, persist=False) as fh:
for entry in fh:
resultx_reads_list.append(entry.name)
# resultx_reads_dict[entry.name] = len(str(entry.sequence))
############################
# FIND_FLANK FUNCTIONS end
############################
############################
# SAM_COORDS FUNCTIONS:
############################
def process_gff(gff_file, gff_feat_type, gff_extra, rdir):
annotation = gffpd.read_gff3(gff_file)
annotation = annotation.filter_feature_of_type(gff_feat_type)
gff_stem, gff_ext = os.path.splitext(os.path.basename(gff_file))
if gff_feat_type[0] == "pseudogene":
annotation.to_gff3(os.path.join(rdir, gff_stem + '_pimms_features_pseudogene.gff'))
else:
annotation.to_gff3(os.path.join(rdir, gff_stem + '_pimms_features.gff'))
# break 9th gff column key=value pairs down to make additional columns
attr_to_columns = annotation.attributes_to_columns()
if attr_to_columns.empty:
# return empty dataframes if no rows of required type are found print('attr_to_columns is empty!')
return attr_to_columns, attr_to_columns
attr_to_columns = attr_to_columns.assign(
feat_length=(attr_to_columns.end - attr_to_columns.start + 1)).dropna(axis=1,
how='all').drop(columns=['attributes'])
data_top = attr_to_columns.head()
print(data_top)
# remove RFC 3986 % encoding from product (gff3 attribute)
# attr_to_columns = attr_to_columns.assign(product_nopc=attr_to_columns['product'].apply(ul.parse.unquote)).drop(
# columns=['product']).rename(columns={'product_nopc': 'product'})
# attr_to_columns['product'] = attr_to_columns['product'].apply(ul.parse.unquote)
if 'product' not in attr_to_columns:
attr_to_columns['product'] = '-'
else:
attr_to_columns['product'] = attr_to_columns['product'].fillna('').astype(str).apply(ul.parse.unquote) # added fix for None datatype
# fix to skip requested extra gff annotation field if not present in GFF
drop_gff_extra = []
for field in gff_extra:
if field not in attr_to_columns:
print("Warning: Unable to find '" + field + "' in " + str(gff_file) + ' file, continuing...')
drop_gff_extra.append(field)
gff_extra = [item for item in gff_extra if item not in drop_gff_extra]
# Remove URL character encoding from columns (skipping translation if present as this breaks the decoding
for field in gff_extra:
if field == 'translation':
continue
else:
attr_to_columns[field] = attr_to_columns[field].fillna('').astype(str).apply(ul.parse.unquote) # added fix for None datatype
gff_columns_addback = attr_to_columns[['seq_id',
'ID', # additional hopefully unique feature ID
'locus_tag',
'type',
'gene',
'start',
'end',
'feat_length',
'product'] + gff_extra].copy() # add extra fields from gff
# fix to remove na values and allow joining with processed data also processed with fillna to allow group_by usage
# note .copy on previous line
gff_columns_addback.fillna('', inplace=True)
data_top = gff_columns_addback.head()
print(data_top)
data_top2 = attr_to_columns.head()
print(data_top2)
return gff_columns_addback, attr_to_columns
# end process_gff()
def modify_sam_stem(sam_file, min_depth_cutoff, fraction_mismatch):
sam_stem, sam_ext = os.path.splitext(os.path.basename(sam_file))
sam_stem: str = sam_stem + '_md' + str(min_depth_cutoff) + '_mm' + str(fraction_mismatch or '0')
return sam_stem
def process_sam(sam_file, min_depth_cutoff, fraction_mismatch):
sam_stem = modify_sam_stem(sam_file, min_depth_cutoff, fraction_mismatch)
samfile = pysam.AlignmentFile(sam_file) # without , "rb" should auto detect sam or bams
open(sam_stem + ".bed", 'w').close()
f = open(sam_stem + ".bed", "a")
strand = ["+", "-"]
for read in samfile.fetch():
if read.is_unmapped:
continue
read_str = strand[int(read.is_reverse)]
read_bed = [read.reference_name, read.pos, read.reference_end, ".", read.mapping_quality, read_str, '# ' + read.query_name] # read group added
f.write('\t'.join([str(i) for i in read_bed]))
f.write('\n')
f.close()
print('#BED')
samfile.close()
samfile = pysam.AlignmentFile(sam_file)
open(sam_stem + "_insert_coords.txt", 'w').close()
f2 = open(sam_stem + "_insert_coords.txt", "a")
f2.write('\t'.join([str(i) for i in ['ref_name', 'coord', 'strand', 'read_name', 'read_grp', 'read_comment']]))
# f2.write('\t'.join([str(i) for i in ['ref_name', 'coord', 'strand', 'read_name', 'read_grp']]))
f2.write('\n')
strand = ["+", "-"]
for read in samfile.fetch():
if read.is_unmapped:
continue
# print(read.query_name + '.')
# continue
nm_value = read.get_tag('NM')
if fraction_mismatch: # and NM_value > 0:
if (read.query_alignment_length * fraction_mismatch[0]) > nm_value:
continue
read_str = strand[int(read.is_reverse)] # coverts is_reverse boolean into + or - strings
# print(STR)
# continue
if read_str == '+':
read_coords = [read.reference_name, (read.reference_start + 4), read_str, '# ' + read.query_name, ':'.join(read.query_name.split(':', 4)[:4]),
read.get_tag("CO").split(":")[-1]] # add fq comment sample id number
# ':'.join(read.query_name.split(':', 4)[:4])]
f2.write('\t'.join([str(i) for i in read_coords]))
f2.write('\n')
if read_str == '-':
read_coords = [read.reference_name, (read.reference_end - 4), read_str, '# ' + read.query_name, ':'.join(read.query_name.split(':', 4)[:4]),
read.get_tag("CO").split(":")[-1]] # add fq comment sample id number
# ':'.join(read.query_name.split(':', 4)[:4])]
f2.write('\t'.join([str(i) for i in read_coords]))
f2.write('\n')
f2.close()
samfile.close()
print('#COORDS')
# end process_sam()
def seqid_consistancy_check(mygffcolumns, my_sam):
af = pysam.AlignmentFile(my_sam)
sam_seq_id_list = [name['SN'] for name in af.header['SQ']]
gff_seq_id_list = mygffcolumns.seq_id.unique().tolist()
sam_seq_id_list.sort()
gff_seq_id_list.sort()
if sam_seq_id_list == gff_seq_id_list:
print('GFF & mapping reference sequence IDs match')
elif parsed_args[0].gff_force:
print('\nWARNING: GFF & mapping reference sequence IDs are inconsistent. \n' +
'sequence ID mismatch overridden by --gff_force\ngff:\n' +
str(gff_seq_id_list) + '\nsam/bam:\n' + str(sam_seq_id_list) + '\n')
else:
sys.exit(
'\nERROR: GFF & mapping reference sequence IDs are inconsistent. \n' +
'SYS.EXIT: Please check and update the sequence IDs in your sequence and gff files so they match up before running again.\ngff:\n' +
str(gff_seq_id_list) + '\nsam/bam:\n' + str(sam_seq_id_list) + '\n' +
'NOTE: If the sequence ID mismatch is benign e.g. an extra plasmid/contig, override by using --gff_force with bam_extract/full_process\n')
print(type(sam_seq_id_list))
print(type(gff_seq_id_list))
print(sam_seq_id_list)
print(gff_seq_id_list)
def coordinates_to_features_reps(sam_stem, attr_to_columns, condition_label):
coord_reps_df = pd.read_csv(sam_stem + "_insert_coords.txt", sep='\t', dtype={'ref_name': "str",
'coord': "int64",
'read_comment': "str",
# adding miseq support
'read_grp': "str"})
read_grps = sorted(coord_reps_df.read_grp.unique())
read_comments = sorted(coord_reps_df.read_comment.unique())
if (len(read_comments) > 20):
print(
"Warning: Unable to resolve different samples in fastq/sam/bam data (apparently too many?), continuing without replicate insertion counts" +
"\nNote: This may be due to old style Illumina header lines" +
"\n if this is an error the software will need updating to recognise different fastq header formatting conventions\n")
# returning an empty dataframe
return pd.DataFrame()
print(str(len(read_grps)) + ' readgroups found:')
print('\n'.join(read_grps))
print(str(len(read_comments)) + ' sample comments found:')
print(', '.join(read_comments))
if (len(read_grps) < len(read_comments)) & (len(read_comments) >= 3):
coord_counts_reps_df = coord_reps_df.drop('read_grp', 1).rename(columns={"read_comment": 'sample_info'},
inplace=False).groupby(["ref_name",
"coord",
'sample_info']).size().reset_index(
name=condition_label + '_') # adding miseq support
print(str(len(read_comments)) + " sample replicates/mutant pools established")
# print(coord_counts_reps_df.head())
elif (len(read_grps) >= 3) & (len(read_comments) < len(read_grps)):
coord_counts_reps_df = coord_reps_df.drop('read_comment', 1).rename(columns={"read_grp": 'sample_info'},
inplace=False).groupby(["ref_name",
"coord",
"sample_info"]).size().reset_index(
name=condition_label + '_') # adding miseq support
print(str(len(read_grps)) + " sample replicates/mutant pools established")
# print(coord_counts_reps_df.head())
# if max(len(read_grps), len(read_comments)) < 3:
else:
print(
"Warning: Unable to resolve >= 3 samples in fastq/sam/bam data, continuing without replicate insertion counts" +
"\nN.B: If this is an error the software may need updating to recognise novel fastq naming conventions")
# returning an empty dataframe
return pd.DataFrame()
coord_df_pivot = coord_counts_reps_df.copy(deep=False).pivot_table(index=["ref_name", "coord"],
columns=['sample_info'],
values=[condition_label + '_'],
fill_value=0).reset_index()
coord_df_pivot.columns = [''.join(col).strip() for col in coord_df_pivot.columns.values]
sample_grps = sorted(coord_counts_reps_df.sample_info.unique())
old_rep_names = [condition_label + '_' + str(x) for x in sample_grps]
new_rep_names = [condition_label + '_' + "MP" + str(x) for x in range(1, len(sample_grps) + 1)]
coord_df_pivot.rename(columns=dict(zip(old_rep_names, new_rep_names)), inplace=True)
attr_to_columns_short = attr_to_columns[["seq_id", "start", "end"]]
sqlcode = '''
select coord_df_pivot.*
,attr_to_columns_short.*
from attr_to_columns_short
left join coord_df_pivot
on coord_df_pivot.coord between attr_to_columns_short.start and attr_to_columns_short.end
where coord_df_pivot.ref_name like '%' || attr_to_columns_short.seq_id || '%'
'''
# wierd sqlite concatenation + >> || , '%' == wildcard double check effect of this
# this line should allow multi contig files
mp_coords_join_gff = ps.sqldf(sqlcode, locals())
# remove first 2 columns ref_name, coord sums the rest according to the feature coordinate groups
mp_reps_feature_counts = mp_coords_join_gff.drop(mp_coords_join_gff.columns[[0, 1]], axis=1).groupby(
['seq_id', 'start', 'end']).agg(["sum"]).reset_index()
mp_reps_feature_counts.columns = mp_reps_feature_counts.columns.get_level_values(0)
return mp_reps_feature_counts
def coordinates_to_features(sam_stem, attr_to_columns, gff_columns_addback, condition_label, min_depth_cutoff,
gff_extra, db_rdir):
coord_df = pd.read_csv(sam_stem + "_insert_coords.txt", sep='\t', dtype={'ref_name': "str", 'coord': "int64"})
coord_counts_df = coord_df.groupby(['ref_name', 'coord']).size().reset_index(name='counts')
# print(coord_counts_df.head())
number_of_insertion_sites = len(coord_counts_df)
number_of_reads_mapped = coord_counts_df['counts'].sum()
min_reads_at_site = coord_counts_df['counts'].min()
max_reads_at_site = coord_counts_df['counts'].max()
median_reads_at_site = round(coord_counts_df['counts'].median(), 2)
mean_insertion_site_depth = round(number_of_reads_mapped / number_of_insertion_sites, 2)
coord_counts_df = coord_counts_df[coord_counts_df['counts'] >= min_depth_cutoff]
# format insertion site info as a GFF
coord_counts_df_pimms2_gff = coord_counts_df.reset_index()
coord_counts_df_pimms2_gff['source'] = 'pimms2'
coord_counts_df_pimms2_gff['feature_type'] = 'misc_feature'
coord_counts_df_pimms2_gff['strand'] = '.'
coord_counts_df_pimms2_gff['phase'] = '.'
coord_counts_df_pimms2_gff['stop'] = coord_counts_df_pimms2_gff['coord']
coord_counts_df_pimms2_gff = coord_counts_df_pimms2_gff.rename(columns={'counts': 'score', 'coord': 'start'})
coord_counts_df_pimms2_gff['info'] = 'note=insertion;'
coord_counts_df_pimms2_gff = coord_counts_df_pimms2_gff[
['ref_name', 'source', 'feature_type', 'start', 'stop', 'score', 'strand', 'phase', 'info']]
print(coord_counts_df_pimms2_gff.head())
coord_counts_df_pimms2_gff.to_csv(os.path.join(db_rdir, condition_label + "_pimms_insert_coordinates" + ".gff"), index=False, sep='\t',
header=False)
# added .loc to fix warning
# SettingWithCopyWarning:
# A value is trying to be set on a copy of a slice from a DataFrame.
# Try using .loc[row_indexer,col_indexer] = value instead
# coord_counts_df = \
coord_counts_df.loc[:, 'between_insertion_gap'] = coord_counts_df.groupby(['ref_name'])['coord'].diff()
# coord_counts_df = coord_counts_df.loc[:, 'between_insertion_gap'] = coord_counts_df['coord'].diff()
# coord_counts_gt1_df = coord_counts_gt1_df({'between_insertion_gap': 0})
min_between_insertion_gap = coord_counts_df['between_insertion_gap'].min()
max_between_insertion_gap = coord_counts_df['between_insertion_gap'].max()
median_between_insertion_gap = coord_counts_df['between_insertion_gap'].median()
mean_between_insertion_gap = round(coord_counts_df['between_insertion_gap'].mean(), 2)
# attr_to_columns.to_csv("attr_to_columns" + ".txt", index=False, sep='\t', header=True)
sqlcode = '''
select coord_counts_df.*
,attr_to_columns.*
from attr_to_columns
left join coord_counts_df
on coord_counts_df.coord between attr_to_columns.start and attr_to_columns.end
where coord_counts_df.ref_name like '%' || attr_to_columns.seq_id || '%'
'''
# wierd sqlite concatenation + >> || , '%' == wildcard double check effect of this
# this line should allow multi contig files
coords_join_gff = ps.sqldf(sqlcode, locals())
# debugging save of intermediate data
# coords_join_gff.to_csv("pimms_coords_join_gffstuff" + condition_label + ".txt", index=False, sep='\t', header=False)
# quick dataframe summary
# coords_join_gff.count
# add position as percentile (needs manual confirmation)
coords_join_gff = coords_join_gff.assign(
# python/pandas implementation of PIMMS.pl code to derive insert position as percentile of gene length
# sprintf("%.1f", ((($in-$in_start)+1) / ($in_length/100)));
# sprintf("%.1f", ((($in_stop-$ in) + 1) / ($in_length / 100)));
posn_as_percentile=(((coords_join_gff.coord - coords_join_gff.start) + 1) / (
coords_join_gff.feat_length / 100)).where(
coords_join_gff.strand == '+', ((coords_join_gff.end - coords_join_gff.coord) + 1) / (
coords_join_gff.feat_length / 100))).round({"posn_as_percentile": 1})
print(list(attr_to_columns.columns.values))
# Important fix group by doesn't work -- any rows with nan values get dropped *yikes very bad!!!!*
coords_join_gff.fillna('', inplace=True)
pimms_result_table = coords_join_gff.groupby(
['seq_id', 'ID', # added ID field as unique identifier
'locus_tag', 'type', 'gene', 'start', 'end', 'feat_length', 'product'] + gff_extra).agg(
num_insertions_mapped_per_feat=('counts', 'sum'),
num_insert_sites_per_feat=('counts', 'count'),
first_insert_posn_as_percentile=('posn_as_percentile', 'min'),
last_insert_posn_as_percentile=('posn_as_percentile', 'max')
).reset_index()
# test diagnostic files
#pimms_result_table.to_csv("pimms_coords_join_prt1_" + condition_label + ".txt", index=False, sep='\t', header=True)
pimms_result_table = pimms_result_table.assign(num_insert_sites_per_feat_per_kb=(
(pimms_result_table.num_insert_sites_per_feat / pimms_result_table.feat_length) * 1000),
NRM_score=((pimms_result_table.num_insertions_mapped_per_feat / (
pimms_result_table.feat_length / 1000)) / (
number_of_reads_mapped / 1e6)),
NIM_score=((pimms_result_table.num_insert_sites_per_feat / (
pimms_result_table.feat_length / 1000)) / (
number_of_reads_mapped / 1e6))
).round({'num_insert_sites_per_feat_per_kb': 2, 'NRM_score': 2, 'NIM_score': 2})
print(list(pimms_result_table.columns.values))
# test diagnostic files
# pimms_result_table.to_csv("pimms_coords_join_prt2_" + condition_label + ".txt", index=False, sep='\t', header=False)
pimms_result_table = pimms_result_table[['seq_id',
'ID',
'locus_tag',
'type',
'gene',
'start',
'end',
'feat_length',
'product'] + gff_extra +
['num_insertions_mapped_per_feat',
'num_insert_sites_per_feat',
'num_insert_sites_per_feat_per_kb',
'first_insert_posn_as_percentile',
'last_insert_posn_as_percentile',
'NRM_score', # Normalised Reads Mapped
'NIM_score']] # Normalised Insertions Mapped
print(list(pimms_result_table.columns.values))
# pimms_result_table_full gff_columns_addback
navalues = {'num_insertions_mapped_per_feat': int(0),
'num_insert_sites_per_feat': int(0),
'num_insert_sites_per_feat_per_kb': int(0),
'first_insert_posn_as_percentile': int(0),
'last_insert_posn_as_percentile': int(0),
'NRM_score': int(0),
'NIM_score': int(0)}
pimms_result_table_full = pd.merge(gff_columns_addback, pimms_result_table, how='left').fillna(value=navalues)
# test diagnostic files
# gff_columns_addback.to_csv("pimms_coords_join_gff_columns_addback_" + condition_label + ".txt", index=False, sep='\t', header=False)
# pimms_result_table_full.to_csv("pimms_coords_join_prtf1_" + condition_label + ".txt", index=False, sep='\t', header=False)
# if set add prefix to columns
if condition_label:
label_cols = pimms_result_table_full.columns[
pimms_result_table_full.columns.isin(['num_insertions_mapped_per_feat',
'num_insert_sites_per_feat',
'num_insert_sites_per_feat_per_kb',
'first_insert_posn_as_percentile',
'last_insert_posn_as_percentile',
'NRM_score',
'NIM_score'])]
pimms_result_table_full.rename(columns=dict(zip(label_cols, condition_label + '_' + label_cols)),
inplace=True)
return pimms_result_table_full
# end of coordinates_to_features()
############################
# SAM_COORDS FUNCTIONS end
############################
def parse_arguments():
ap = configargparse.ArgumentParser( # description='PIMMS2 sam/bam processing',
prog="pimms2",
add_config_file_help=False,
config_file_parser_class=configargparse.DefaultConfigFileParser,
epilog="\n\n*** N.B. This is a development version ***\n \n ",
description='''description here'''
)
ap.add_argument('-v', '--version', action='version', version='%(prog)s 2.1 demo')
modes = ap.add_subparsers(parser_class=configargparse.ArgParser, dest='command')
findflank = modes.add_parser("find_flank", add_config_file_help=False,
help="Mode: find read regions flanking the IS sequence by mapping them to the target genome",
description="Args that start with '--' (eg. --fasta) can also be set in a config file (specified via -c)")
samcoords = modes.add_parser("bam_extract", add_config_file_help=False,
help="Mode: extract insertion site coordinates from sam file",
description="Args that start with '--' (eg. --fasta) can also be set in a config file (specified via -c)")
tablemerge = modes.add_parser("table_merge", add_config_file_help=False,
help='Mode: merge two compatible PIMMS results tables '
'(N.B: this step does a simple table join and does not check the data)').add_mutually_exclusive_group()
fullprocess = modes.add_parser("full_process", add_config_file_help=False,
help="Mode: find_flank + bam_extract",
description="Args that start with '--' (eg. --fasta) can also be set in a config file (specified via -c)")
# FIND_FLANK args
# to fix: nargs='?' deal with mistaken use of nargs=1 which give a single element list
findflank.add_argument("-c", "--config", required=False, is_config_file=True, # dest='config_file',
metavar='pimms2.config',
help="use parameters from config file")
findflank.add_argument("--nano", required=False, action='store_true', default=False,
help="override with settings more suitable for nanopore")
findflank.add_argument("--fasta", required=False, nargs=1, metavar='ref_genome.fasta', type=extant_file,
help="fasta file for reference genome ")
findflank.add_argument("--qual_char", required=False, nargs='?', type=str, default='0', choices=[chr(x + 33) for x in list(range(12, 31))],
help="substitute a quality score ascii character when fasta read files used (nanopore only) (phred +33: ascii +:?) ['0']")
findflank.add_argument("--nomap", required=False, action='store_true', default=False,
help="do not run mapping step")
findflank.add_argument("--mapper", required=False, nargs='?', type=str, default='bwa', choices=['minimap2', 'bwa'],
help="select mapping software from available options")
findflank.add_argument("--single", required=False, action='store_true', default=False,
help="only single direction Illumina data provided")
findflank.add_argument("--keep", required=False, action='store_true', default=False,
help="keep intermediate fastq files etc for diagnostic purposes")
findflank.add_argument("--lev", required=False, nargs=1, type=int, default=[0],
help="use Levenshtein distance (combined insert|del|sub score) [0]")
findflank.add_argument("--sub", required=False, nargs=1, type=int, default=[1],
help="number of permitted base substitutions in motif match [1]")
findflank.add_argument("--insert", required=False, nargs=1, type=int, default=[0],
help="number of permitted base insertions in motif match [0]")
findflank.add_argument("--del", required=False, nargs=1, type=int, default=[0], dest='deletion',
help="number of permitted base insertions in motif match [0]")
findflank.add_argument("--in_dir", required=True, nargs=1, dest='in_dir', type=extant_file,
help="directory containing input fastq files (assumed to match '*q.gz' or '*.fastq')")
findflank.add_argument("--fwdrev", required=False, nargs=1, type=str, default=['_R1_,_R2_'],
help="text substring to uniquely identify illumina fwd/rev paired fastq files ['_R1_,_R2_']")
findflank.add_argument("--out_dir", required=False, nargs=1, metavar='out_dir', default=[''],
action='store',
help="directory to contain result files ['pimms2_`label`_`dmy`_`HMS`']")
findflank.add_argument("--cpus", required=False, nargs=1, type=int, # default=[4],
default=[int(os.cpu_count() / 2)],
help="number of processors to use [(os.cpu_count() / 2)] ")
findflank.add_argument("--max", required=False, nargs=1, type=int, default=[60],
help="clip results to this length [60]")
findflank.add_argument("--min", required=False, nargs=1, type=int, default=[25],
help="minimum read length [25]")
findflank.add_argument("--motif1", required=False, nargs=1, type=str, default=['TCAGAAAACTTTGCAACAGAACC'],
# revcomp: GGTTCTGTTGCAAAGTTTTCTGA
help="IS end reference motif1 [TCAGAAAACTTTGCAACAGAACC](pGh9)")
findflank.add_argument("--motif2", required=False, nargs=1, type=str, default=['GGTTCTGTTGCAAAGTTTAAAAA'],
# revcomp: TTTTTAAACTTTGCAACAGAACC
help="IS end reference motif2 [GGTTCTGTTGCAAAGTTTAAAAA](pGh9)")
findflank.add_argument("--label", required=True, nargs=1, metavar='condition_name', default=[''],
help="identifying text tag to add to results file")
# SAM EXTRACT args
samcoords.add_argument("-c", "--config", required=False, is_config_file=True,
metavar='pimms2.config',
help="use parameters from config file")
samcoords.add_argument("--bam", required=True, nargs=1, metavar='pimms.bam/sam', type=extant_file,
help="bam/sam file of mapped IS flanking sequences ")
samcoords.add_argument("--nano", required=False, action='store_true', default=False,
help="override with settings more suitable for nanopore")
samcoords.add_argument("--label", required=True, nargs=1, metavar='condition_name', default=[''],
help="text tag to add to results file")
samcoords.add_argument("--mismatch", required=False, nargs=1, type=float, metavar='float', default=[None],
choices=[round(x * 0.01, 2) for x in range(0, 21)],
help="fraction of permitted mismatches in mapped read ( 0 <= mismatch < 0.2) [no filter]")
samcoords.add_argument("--min_depth", required=False, nargs=1, type=int, default=[2], metavar='int',
help="minimum read depth at insertion site >= int [2]")
samcoords.add_argument("--noreps", required=False, action='store_true', default=False,
help="do not separate illumina read groups as replicate insertion count columns")
samcoords.add_argument("--gff", required=True, nargs=1, type=extant_file, default='', metavar='genome.gff',
help="GFF3 formatted file to use\n(note fasta sequence present in the file must be deleted before use)")
samcoords.add_argument("--gff_extra", required=False, nargs=1, type=str, default='', metavar="'x,y,z'",
help="comma separated list of extra fields to include from the GFF3 annotation\ne.g. 'ID,translation,note' ")
samcoords.add_argument("--gff_force", required=False, action='store_true', default=False,
help="override GFF/BAM seq id discrepancies e.g. use when the gff has a plasmid not present in the reference sequence or vice-versa")
samcoords.add_argument("--out_fmt", required=False, nargs=1, type=str, default=['xlsx'],
choices=['xlsx', 'tsv', 'csv'],
help="set results table file format tab/comma separated or Excel (tsv|csv|xlsx) [xlsx]")
# TABLE_MERGE args
tablemerge.add_argument("--xlsx", required=False, nargs=2, type=extant_file,
help="2x .xlsx Excel files")
tablemerge.add_argument("--csv", required=False, nargs=2, type=extant_file,
help="2x .csv comma separated text/table files")
tablemerge.add_argument("--tsv", required=False, nargs=2, type=extant_file,
help='2x .tsv tab (\\t) separated text/table files')
# FULL_PROCESS ##########################
fullprocess.add_argument("-c", "--config", required=False, is_config_file=True,
metavar='pimms2_run.config',
help="use parameters from config file")
fullprocess.add_argument("--nano", required=False, action='store_true', default=False,
help="override with settings more suitable for nanopore")
fullprocess.add_argument("--qual_char", required=False, nargs='?', type=str, default='0', choices=[chr(x + 33) for x in list(range(12, 31))],
help="substitute a quality score ascii character when fasta read files used (nanopore only) (phred +33: ascii +:?) ['0']")
fullprocess.add_argument("--fasta", required=False, nargs=1, metavar='ref_genome.fasta', type=extant_file,
help="fasta file for reference genome ")
fullprocess.add_argument("--nomap", required=False, action='store_true', default=False,
help="do not run mapping step")
fullprocess.add_argument("--mapper", required=False, nargs='?', type=str, default='bwa', choices=['minimap2', 'bwa'],
help="select mapping software from available options")
fullprocess.add_argument("--single", required=False, action='store_true', default=False,
help="only single direction Illumina data provided")
fullprocess.add_argument("--keep", required=False, action='store_true', default=False,
help="keep intermediate files for diagnostic purposes")
fullprocess.add_argument("--lev", required=False, nargs=1, type=int, default=[0],
help="use Levenshtein distance (combined insert|del|sub score)")
fullprocess.add_argument("--sub", required=False, nargs=1, type=int, default=[1],
help="number of permitted base substitutions in motif match [1]")
fullprocess.add_argument("--insert", required=False, nargs=1, type=int, default=[0],
help="number of permitted base insertions in motif match [0]")
fullprocess.add_argument("--del", required=False, nargs=1, type=int, default=[0], dest='deletion',
help="number of permitted base insertions in motif match [0]")
fullprocess.add_argument("--in_dir", required=True, nargs=1, dest='in_dir', type=extant_file,
help="directory containing input fastq files (assumed to match '*q.gz' or '*.fastq')")
fullprocess.add_argument("--fwdrev", required=False, nargs=1, type=str, default=['_R1_,_R2_'],
help="text substring to uniquely identify illumina fwd/rev paired fastq files ['_R1_,_R2_']")
fullprocess.add_argument("--out_dir", required=False, nargs=1, metavar='out_dir', default=[''],
action='store',
help="directory to contain result files ['pimms2_`label`_`dmy`_`HMS`']")
fullprocess.add_argument("--cpus", required=False, nargs=1, type=int, # default=int(4),
default=[int(os.cpu_count() / 2)],
help="number of processors to use [(os.cpu_count() / 2)] ")
fullprocess.add_argument("--max", required=False, nargs=1, type=int, default=[60],
help="clip results to this length [60]")
fullprocess.add_argument("--min", required=False, nargs=1, type=int, default=[25],
help="minimum read length [25]")
fullprocess.add_argument("--motif1", required=False, nargs=1, type=str, default=['TCAGAAAACTTTGCAACAGAACC'],
# revcomp: GGTTCTGTTGCAAAGTTTTCTGA
help="IS end reference motif1 [TCAGAAAACTTTGCAACAGAACC](pGh9)")
fullprocess.add_argument("--motif2", required=False, nargs=1, type=str, default=['GGTTCTGTTGCAAAGTTTAAAAA'],
# revcomp: TTTTTAAACTTTGCAACAGAACC
help="IS end reference motif2 [GGTTCTGTTGCAAAGTTTAAAAA](pGh9)")
fullprocess.add_argument("--label", required=True, nargs=1, metavar='condition_name', default=[''],
help="identifying text tag to add to results file")
fullprocess.add_argument("--bam", required=False, nargs=1, metavar='pimms.bam/sam', # type=extant_file,
type=str, default=['bam?'],
help=configargparse.SUPPRESS)
# samcoords.add_argument("--nano", required=False, action='store_true', default=False,
# help="override with settings more suitable for nanopore")
# samcoords.add_argument("--label", required=False, nargs=1, metavar='condition_name', default=[''],
# help="text tag to add to results file")
fullprocess.add_argument("--mismatch", required=False, nargs=1, type=float, metavar='float', default=[None],
choices=[round(x * 0.01, 2) for x in range(0, 21)],
help="fraction of permitted mismatches in mapped read ( 0 <= mismatch < 0.2) [no filter]")
fullprocess.add_argument("--min_depth", required=False, nargs=1, type=int, default=[2], metavar='int',
help="minimum read depth at insertion site >= int [2]")
fullprocess.add_argument("--noreps", required=False, action='store_true', default=False,
help="do not separate illumina read groups as replicate insertion count columns")
fullprocess.add_argument("--gff", required=True, nargs=1, type=extant_file, default='', metavar='genome.gff',
help="GFF3 formatted file to use\n(note fasta sequence present in the file must be deleted before use)")
fullprocess.add_argument("--gff_extra", required=False, nargs=1, type=str, default='', metavar="'x,y,z'",
help="comma separated list of extra fields to include from the GFF3 annotation\ne.g. 'ID,translation,note' ")
fullprocess.add_argument("--gff_force", required=False, action='store_true', default=False,
help="override GFF/BAM seq id discrepancies "
"e.g. use when the gff has a plasmid not present in the reference sequence or vice-versa")
fullprocess.add_argument("--out_fmt", required=False, nargs=1, type=str, default=['xlsx'],
choices=['xlsx', 'tsv', 'csv'],
help="set results table file format tab/comma separated or Excel (tsv|csv|xlsx) [xlsx]")
local_parsed_args = ap.parse_known_args()
print("-----------------")
ap.print_values()
print("-----------------")
# print(ap.format_values())
print(local_parsed_args)
print("-----------------")
#
# exit and print short help message if no mode/arguments supplied
if len(sys.argv) <= 2:
ap.print_usage()
sys.exit(1)
if local_parsed_args[0].command == 'find_flank':
if not local_parsed_args[0].nomap:
prog_in_path_check(local_parsed_args[0].mapper)
# prog_in_path_check('bwa')
if local_parsed_args[0].fasta is None:
ap.error("unless the --nomap flag is used please supply a sequence file e.g: --fasta contigs.fasta")
elif not local_parsed_args[0].label:
ap.error("unless the --nomap flag is used please supply a text label string --label cond_01")
else:
print("reference seq file provided: " + local_parsed_args[0].fasta[0])
# print("##########")
# print(ap.format_values()) # useful for logging where different settings came from
# sys.exit(1)
# print("\n\n\n")
# print(parsed_args[0].command)
# print("----------======")
# print(ap.)
# sys.exit(1)
return local_parsed_args
# elif parsed_args[0].command == 'bam_extract':
def bam_extract_func(parsed_args_be):
print(pimms_mssg + parsed_args_be[0].command + pimms_mssg2)
if parsed_args_be[0].nano:
parsed_args_be[0].noreps = True
# sort out extra requested gff annotation fields
if parsed_args_be[0].gff_extra:
# strip any formatting quotes and turn comma separated string into a list of fields
gff_extra = parsed_args_be[0].gff_extra[0].strip("'\"").split(',')
else:
gff_extra = []
# process the gff file to get required fields
print("extra gff fields: " + str(gff_extra))
gff_file = parsed_args_be[0].gff[0]
gff_feat_type = ['CDS', 'tRNA', 'rRNA']
min_depth_cutoff = parsed_args_be[0].min_depth[0]
fraction_mismatch = parsed_args_be[0].mismatch[0]
sam_file = parsed_args_be[0].bam[0]
condition_label = parsed_args_be[0].label[0]
print("\ncond label " + condition_label + "\n")
# process pimms sam/bam file and produce coordinate / bed files
sam_dir, db_rdir, info_rdir = make_results_dirs_in_sam_dir(sam_file, condition_label)
# process the gff file to get required fields
gff_columns_addback, attr_to_columns = process_gff(gff_file, gff_feat_type, gff_extra, info_rdir)
gff_columns_addback_pseudo, attr_to_columns_pseudo = process_gff(gff_file, ['pseudogene'], [], info_rdir)
seqid_consistancy_check(gff_columns_addback, sam_file)
process_sam(sam_file, min_depth_cutoff, fraction_mismatch)
sam_stem = modify_sam_stem(sam_file, min_depth_cutoff, fraction_mismatch)
# allocate insertions to features and create results merged with GFF
# possibly poor coding to merge with gff here
pimms_result_table_full = coordinates_to_features(sam_stem, attr_to_columns, gff_columns_addback, condition_label,
min_depth_cutoff, gff_extra, db_rdir)
# if parsed_args[0].nano:
# print("--noreps forced for nanopore data\n")
if not parsed_args_be[0].noreps:
print("processing read groups as replicates for illumina data\n")
mp_reps_feature_counts = coordinates_to_features_reps(sam_stem, attr_to_columns, condition_label)
if not mp_reps_feature_counts.empty:
merged_with_reps = pimms_result_table_full.merge(mp_reps_feature_counts, on=["seq_id", "start", "end"],
how='outer')
# how='inner')
pimms_result_table_full = merged_with_reps.fillna(0)
else:
print("not processing read groups as replicates\n")
if not gff_columns_addback_pseudo.empty:
tag_psueudogenes = gff_columns_addback_pseudo['locus_tag']
pimms_result_table_full.loc[pimms_result_table_full.locus_tag.isin(tag_psueudogenes), "type"] = \
pimms_result_table_full['type'] + '_pseudo'
# print(parsed_args_be[0].out_fmt[0] + "out_fmt\n")
# write results as text/excel
if parsed_args_be[0].out_fmt[0] == 'tsv':
pimms_result_table_full.to_csv(sam_stem + "_countinfo.tsv", index=False, sep='\t')
elif parsed_args_be[0].out_fmt[0] == 'csv':
pimms_result_table_full.to_csv(sam_stem + "_countinfo.csv", index=False, sep=',')
else:
writer = pd.ExcelWriter(sam_stem + '_countinfo.xlsx', engine='xlsxwriter')
# Convert the dataframe to an XlsxWriter Excel object.
pimms_result_table_full.to_excel(writer, sheet_name='PIMMS2_result', index=False)
# Close the Pandas Excel writer and output the Excel file.
writer.save()
os.rename(sam_stem + "_countinfo." + parsed_args_be[0].out_fmt[0], os.path.join(db_rdir, sam_stem + "_countinfo." + parsed_args_be[0].out_fmt[0]))
os.rename(sam_stem + "_insert_coords.txt", os.path.join(info_rdir, sam_stem + "_insert_coords.txt"))
os.rename(sam_stem + ".bed", os.path.join(info_rdir, sam_stem + ".bed"))
# end bam_extract_func
def table_merge_func(parsed_args_tm):
print(pimms_mssg + parsed_args_tm[0].command + pimms_mssg2)
if parsed_args_tm[0].xlsx:
print("Join: ", parsed_args_tm[0].xlsx[0], "\t", parsed_args_tm[0].xlsx[1], "\n")
# requires dependancy installed
result_df1 = pd.read_excel(parsed_args_tm[0].xlsx[0], engine="openpyxl")
result_df2 = pd.read_excel(parsed_args_tm[0].xlsx[1], engine="openpyxl")
results_merged = pd.DataFrame.merge(result_df1, result_df2)
writer = pd.ExcelWriter('merged_result.xlsx', engine='xlsxwriter')
# Convert the dataframe to an XlsxWriter Excel object.
results_merged.to_excel(writer, sheet_name='PIMMS2_merged_result', index=False)
# Close the Pandas Excel writer and output the Excel file.
writer.save()
elif parsed_args_tm[0].csv:
print("Join: ", parsed_args_tm[0].csv[0], "\t", parsed_args_tm[0].csv[1], "\n")
result_df1 = pd.read_csv(parsed_args_tm[0].csv[0]).replace('"', '', regex=True)
result_df2 = pd.read_csv(parsed_args_tm[0].csv[1]).replace('"', '', regex=True)
results_merged = pd.DataFrame.merge(result_df1, result_df2)
results_merged.to_csv('merged_result.csv', index=False)
elif parsed_args_tm[0].tsv:
print("Join: ", parsed_args_tm[0].tsv[0], "\t", parsed_args_tm[0].tsv[1], "\n")
result_df1 = pd.read_csv(parsed_args_tm[0].tsv[0], sep="\t")
result_df2 = pd.read_csv(parsed_args_tm[0].tsv[1], sep="\t")
results_merged = pd.DataFrame.merge(result_df1, result_df2)
results_merged.to_csv('merged_result.txt', index=False, sep="\t")
else:
print("\nUnable to merge results tables\n")
parsed_args = parse_arguments() # parse command line arguments
# FIND_FLANK ###
def find_flank_func(parsed_args_ff):
# if parsed_args[0].command == 'find_flank':
print(pimms_mssg + parsed_args_ff[0].command + pimms_mssg2)
# print((vars(parsed_args)))
# sys.exit(1)
# config_file = parsed_args.config_file[0]
# construct config parser
# p2config = configparser.ConfigParser()
mapper = parsed_args_ff[0].mapper
label = parsed_args_ff[0].label[0]
print("\nFF label " + label + "\n")
if parsed_args_ff[0].out_dir[0]:
out_dir_ff = parsed_args_ff[0].out_dir[0]
# print('\ncreating result dir: ' + out_dir + '\n')
else:
out_dir_ff = 'pimms2_' + label + '_' + time.strftime("%d%m%y_%H%M%S")
# print('\ncreating result dir: ' + out_dir + '\n')
# createFolder(out_dir)
if os.path.isdir(out_dir_ff):
print('\nresult dir exists\n')
else:
print('\ncreating result dir: ' + out_dir_ff + '\n')
create_folder(out_dir_ff)
out_dir_logs = os.path.join(out_dir_ff, 'logs')
if os.path.isdir(out_dir_logs): # remove logs from previous runs
shutil.rmtree(out_dir_logs)
create_folder(out_dir_logs)
fwdrev_wc = parsed_args_ff[0].fwdrev[0].strip("'\"").split(',')
# exit(0)
# print(pimms_mls)
# dir(parsed_args[0].cpus[0])
# sys.exit(1)
ncpus = int(parsed_args_ff[0].cpus[0])
print('ncpus=' + str(ncpus))
nano = parsed_args_ff[0].nano
# experimental decontaminate transposon/vector sequence
# not currently effective try another implementation when time allows?
# decontam_tranposon = False
# print(parsed_args_ff[0].sub[0])
# fuzzy_levenshtein = bool(parsed_args_ff[0].lev[0])
# l_dist = parsed_args_ff[0].lev[0] # maximum Levenstein Distance
# fuzzy_levenshtein = bool(l_dist)
# set up some variables:
if nano: # nano == True
# parsed_args[0].noreps = True
fuzzy_levenshtein = True
l_dist = parsed_args_ff[0].lev[0] # maximum Levenshtein Distance
# min_length = 50
# max_length = 200
min_length = parsed_args_ff[0].min[0] # changed to array
max_length = parsed_args_ff[0].max[0] # changed to array
# print('overriding with Nanopore appropriate settings: Levenshtein distance of ' + str(
# l_dist) + ' + sequence length min = ' + str(min_length) + ', max = ' + str(max_length))
else:
l_dist = parsed_args_ff[0].lev[0] # maximum Levenshtein Distance
print(str(l_dist) + '~~~~~~~~~~~~')
fuzzy_levenshtein = bool(l_dist)
subs = parsed_args_ff[0].sub[0]
insrt = parsed_args_ff[0].insert[0]
dels = parsed_args_ff[0].deletion[0]
min_length = parsed_args_ff[0].min[0] # changed to array
max_length = parsed_args_ff[0].max[0] # changed to array
# set up some names
if nano:
seqtype = '_nano'
else:
seqtype = ''
if fuzzy_levenshtein:
fq_result_suffix = (seqtype + "_pimms2out_trim" + str(max_length) + "_lev" + str(l_dist) + ".fastq")
elif insrt > 0 | dels > 0:
fq_result_suffix = (
seqtype + "_pimms2out_trim" + str(max_length) + "_sub" + str(subs) + "_ins" + str(insrt) + "_del" + str(dels) + ".fastq")
else:
fq_result_suffix = (seqtype + "_pimms2out_trim" + str(max_length) + "_sub" + str(subs) + ".fastq")
sam_result_suffix = re.sub('.fastq', '.sam', fq_result_suffix)
fastq_dir = os.path.join(parsed_args_ff[0].in_dir[0], '')
flanking_fastq_result_list = []
if nano: # nano == True
glob_wc = ["*q.gz", "*.fastq", "*.fasta", "*.fasta.gz"]
glob_read_files = find_read_files_with_glob(fastq_dir, glob_wc)
print(glob_read_files, "...(glob_read_files)...\n")
print("nanopore PIMMS read filtering starting...\n")
print(datetime.datetime.now())
pi = multiprocessing.Pool(ncpus)
# for fq in glob.glob(fastq_dir + "*[aq].gz"):
for fq in glob_read_files:
fq_processed = os.path.join(out_dir_ff, Path(Path(fq).stem).stem + fq_result_suffix)
flanking_fastq_result_list = flanking_fastq_result_list + [fq_processed]
pi.apply_async(pimms_fastq,
# args=(fq, fq_processed, nano)
args=(fq, fq_processed, out_dir_logs, nano)
)
pi.close()
pi.join()
# pi = multiprocessing.Pool(ncpus)
# for fq in glob.glob(fastq_dir + "*.fast[aq]"):
# fq_processed = os.path.join(out_dir_ff, Path(Path(fq).stem).stem + fq_result_suffix)
# flanking_fastq_result_list = flanking_fastq_result_list + [fq_processed]
# pi.apply_async(pimms_fastq,
# args=(fq, fq_processed)
# )
#
# pi.close()
# pi.join()
print("nanopore PIMMS filtering completed...\n")
print(datetime.datetime.now())
else: # nano == False
glob_wc = ["*q.gz", "*.fastq"]
glob_read_files = find_read_files_with_glob(fastq_dir, glob_wc)
print("PIMMS read filtering starting...\n")
print(datetime.datetime.now())
pi = multiprocessing.Pool(ncpus)
# for fq in glob.glob(fastq_dir + "*.fastq"):
for fq in glob_read_files:
if not parsed_args_ff[0].single:
# print('fwd/rev Illumina data')
if not (fwdrev_wc[0] in fq or fwdrev_wc[1] in fq):
print("ERROR(fastq): text substrings " + fwdrev_wc[0] + "/" + fwdrev_wc[
1] + " NOT FOUND in read filenanes (to identify illumina fwd/rev fastq files)")
print("ERROR(fastq): Check the fastq file names and/or update the --fwdrev parameter")
sys.exit(1)
else:
print('.')
# print('single direction Illumina data')
# sys.exit(1)
fq_processed = os.path.join(out_dir_ff, Path(Path(fq).stem).stem + fq_result_suffix)
pi.apply_async(pimms_fastq,
args=(fq, fq_processed, out_dir_logs, nano)
)
pi.close()
pi.join()
# pi = multiprocessing.Pool(ncpus)
# for fq in glob.glob(fastq_dir + "*q.gz"):
# if not (fwdrev_wc[0] in fq or fwdrev_wc[1] in fq):
# print("ERROR(fastq): text substrings " + fwdrev_wc[0] + "/" + fwdrev_wc[
# 1] + " NOT FOUND in read filenanes (to identify illumina fwd/rev fastq files)")
# print("ERROR(fastq): Check the fastq file names and/or update the --fwdrev parameter")
# sys.exit(1)
#
# fq_processed = os.path.join(out_dir_ff, Path(Path(fq).stem).stem + fq_result_suffix)
# pi.apply_async(pimms_fastq,
# args=(fq, fq_processed, out_dir_logs)
# )
#
# pi.close()
# pi.join()
print("PIMMS read filtering completed...\n")
print(datetime.datetime.now())
# match fwdrev match substrings e.g: _R1_/_R2_ --fwdrev parameter
if not parsed_args_ff[0].single:
fqp_results_fwd = sorted(glob.glob(os.path.join(out_dir_ff, "*" + fwdrev_wc[0] + "*" + fq_result_suffix)))
fqp_results_rev = sorted(glob.glob(os.path.join(out_dir_ff, "*" + fwdrev_wc[1] + "*" + fq_result_suffix)))
print(fqp_results_fwd)
print(fqp_results_rev)
for fwd_fqp_result, rev_fqp_result in zip(fqp_results_fwd, fqp_results_rev):
result1_reads_list = []
result2_reads_list = []
print(fwd_fqp_result)
print(rev_fqp_result)
survey_fastq(result1_reads_list, fwd_fqp_result)
survey_fastq(result2_reads_list, rev_fqp_result)
a = set(result1_reads_list)
b = set(result2_reads_list)
c = b.difference(a)
tempfqname = Path(fwd_fqp_result).name
# fix so only substring in file name nit dir is updated
mrg_fqp_result_filename = os.path.join(Path(fwd_fqp_result).parent,
re.sub(fwdrev_wc[0], '_RX_', tempfqname,
count=1)) # replace fwd substring '_R1_'
# mrg_fqp_result_filename = re.sub(fwdrev_wc[0], '_RX_', fwd_fqp_result, count=1)
flanking_fastq_result_list = flanking_fastq_result_list + [mrg_fqp_result_filename]
print(mrg_fqp_result_filename)
print(str(len(a)))
print(str(len(b)))
print(str(len(c)))
# pysam bug doesn't parse files gzipped in chunks so gzipping removed here
# with pysam.FastxFile(fwd_fqp_result) as fin, gzip.open(mrg_fqp_result_filename, mode='wt') as fout:
with pysam.FastxFile(fwd_fqp_result, persist=False) as fin, open(mrg_fqp_result_filename,
mode='wt') as fout:
for entry in fin:
fout.write((str(entry) + '\n'))
# with pysam.FastxFile(rev_fqp_result) as fin, gzip.open(mrg_fqp_result_filename, mode='at') as fout:
with pysam.FastxFile(rev_fqp_result, persist=False) as fin, open(mrg_fqp_result_filename,
mode='at') as fout:
for entry in fin:
if entry.name in c:
fout.write((str(entry) + '\n'))
# fout.write(str(entry) + '\n')
# remove intermediate fastq files
# if parsed_args[0].rmfiles:
if not parsed_args_ff[0].keep:
delete_file_list(fqp_results_fwd)
delete_file_list(fqp_results_rev)
print("illumina merge of fwd/reverse data completed...\n")
else:
fqp_results_sing = sorted(glob.glob(os.path.join(out_dir_ff, "*" + fq_result_suffix)))
print(fqp_results_sing)
flanking_fastq_result_list = fqp_results_sing
# if not parsed_args_ff[0].keep:
# delete_file_list(fqp_results_sing)
# tidy up
print(flanking_fastq_result_list)
# concat_result_fastq = concat_fastq(flanking_fastq_result_list, parsed_args[0].label[0], fq_result_suffix, out_dir)
concat_result_fastq = concat_fastq_raw(flanking_fastq_result_list, label, fq_result_suffix, out_dir_ff)
# merge logs from different parallel cpus
merge_logs(out_dir_logs)
# do mapping stuff
bam_name_ff = ''
if parsed_args_ff[0].nomap:
print("Skipping mapping step...\n")
else:
if mapper == 'minimap2' or nano:
sam_output_mm = os.path.splitext(os.path.basename(parsed_args_ff[0].fasta[0]))[0] + '_' + label + re.sub('.sam', '_mm2.sam',
sam_result_suffix)
sam_output_mm = os.path.join(out_dir_ff, sam_output_mm)
run_minimap2(concat_result_fastq, sam_output_mm, parsed_args_ff[0].fasta[0])
bam_name_ff = py_sam_to_bam(sam_output_mm)
elif mapper == 'bwa':
sam_output_bwa = os.path.splitext(os.path.basename(parsed_args_ff[0].fasta[0]))[0] + '_' + label + re.sub('.sam', '_bwa.sam',
sam_result_suffix)
sam_output_bwa = os.path.join(out_dir_ff, sam_output_bwa)
run_bwa(concat_result_fastq, sam_output_bwa, parsed_args_ff[0].fasta[0], ncpus)
bam_name_ff = py_sam_to_bam(sam_output_bwa)
return out_dir_ff, bam_name_ff
# end find_flank_func ###
# FIND_FLANK ###
if parsed_args[0].command == 'find_flank':
out_dir, bam_name = find_flank_func(parsed_args)
# BAM_EXTRACT ###
elif parsed_args[0].command == 'bam_extract':
bam_extract_func(parsed_args)
# TABLE_MERGE ###
elif parsed_args[0].command == 'table_merge':
table_merge_func(parsed_args)
# FULL_PROCESS ###
elif parsed_args[0].command == 'full_process':
out_dir, bam_name = find_flank_func(parsed_args)
parsed_args[0].out_dir[0] = out_dir
parsed_args[0].bam[0] = bam_name
bam_extract_func(parsed_args)
|
[
"pysam.FastxFile",
"os.remove",
"pandas.read_csv",
"os.getppid",
"os.popen",
"gffpandas.gffpandas.read_gff3",
"time.strftime",
"pathlib.Path",
"pandas.DataFrame.merge",
"glob.glob",
"pandas.read_table",
"shutil.rmtree",
"configargparse.ArgumentParser",
"os.path.join",
"pandas.DataFrame",
"pysam.sort",
"random.randint",
"os.path.dirname",
"pandas.merge",
"os.path.exists",
"shutil.copyfile",
"fuzzysearch.find_near_matches",
"re.sub",
"pandas.concat",
"pandas.ExcelWriter",
"datetime.datetime.now",
"subprocess.Popen",
"multiprocessing.current_process",
"os.path.basename",
"pysam.AlignmentFile",
"shutil.which",
"pandas.read_excel",
"pysam.index",
"multiprocessing.Pool",
"sys.exit",
"gzip.open",
"os.makedirs",
"os.path.isdir",
"fileinput.input",
"os.cpu_count",
"pysam.flagstat"
] |
[((2105, 2134), 'os.path.dirname', 'os.path.dirname', (['samfile_path'], {}), '(samfile_path)\n', (2120, 2134), False, 'import os\n'), ((2156, 2210), 'os.path.join', 'os.path.join', (['samdirname', "(run_label + '_out_dashboard')"], {}), "(samdirname, run_label + '_out_dashboard')\n", (2168, 2210), False, 'import os\n'), ((2234, 2283), 'os.path.join', 'os.path.join', (['samdirname', "(run_label + '_out_info')"], {}), "(samdirname, run_label + '_out_info')\n", (2246, 2283), False, 'import os\n'), ((3694, 3721), 'shutil.which', 'shutil.which', (['prog_to_check'], {}), '(prog_to_check)\n', (3706, 3721), False, 'import shutil\n'), ((4160, 4235), 'os.path.join', 'os.path.join', (['concat_out_dir', "(label + '_RX_concat' + fq_file_suffix + '.gz')"], {}), "(concat_out_dir, label + '_RX_concat' + fq_file_suffix + '.gz')\n", (4172, 4235), False, 'import os\n'), ((5040, 5157), 'sys.exit', 'sys.exit', (["(\n 'SYS EXIT: unable to find read files, check file suffixes match permissible: '\n + wildcards + '\\n')"], {}), "(\n 'SYS EXIT: unable to find read files, check file suffixes match permissible: '\n + wildcards + '\\n')\n", (5048, 5157), False, 'import sys\n'), ((5321, 5367), 'pandas.concat', 'pd.concat', (['df_from_each_log'], {'ignore_index': '(True)'}), '(df_from_each_log, ignore_index=True)\n', (5330, 5367), True, 'import pandas as pd\n'), ((5874, 5904), 'os.popen', 'os.popen', (['"""minimap2 --version"""'], {}), "('minimap2 --version')\n", (5882, 5904), False, 'import os\n'), ((6400, 6623), 'subprocess.Popen', 'subprocess.Popen', (["['minimap2', '-x', mm_mode, '-a', '-y', '-o', sam_output_result,\n genome_fasta, flanking_fastq_concat_result, '--secondary=no',\n '--sam-hit-only']"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "(['minimap2', '-x', mm_mode, '-a', '-y', '-o',\n sam_output_result, genome_fasta, flanking_fastq_concat_result,\n '--secondary=no', '--sam-hit-only'], stdout=subprocess.PIPE, stderr=\n subprocess.PIPE)\n", (6416, 6623), False, 'import subprocess\n'), ((8225, 8266), 're.sub', 're.sub', (['""".sam"""', '""".bam"""', 'sam_output_result'], {}), "('.sam', '.bam', sam_output_result)\n", (8231, 8266), False, 'import re\n'), ((8406, 8469), 'pysam.sort', 'pysam.sort', (['"""-OBAM"""', '"""-o"""', 'bam_output_result', 'sam_output_result'], {}), "('-OBAM', '-o', bam_output_result, sam_output_result)\n", (8416, 8469), False, 'import pysam\n'), ((8519, 8549), 'pysam.index', 'pysam.index', (['bam_output_result'], {}), '(bam_output_result)\n', (8530, 8549), False, 'import pysam\n'), ((38402, 38427), 'gffpandas.gffpandas.read_gff3', 'gffpd.read_gff3', (['gff_file'], {}), '(gff_file)\n', (38417, 38427), True, 'import gffpandas.gffpandas as gffpd\n'), ((42118, 42147), 'pysam.AlignmentFile', 'pysam.AlignmentFile', (['sam_file'], {}), '(sam_file)\n', (42137, 42147), False, 'import pysam\n'), ((42728, 42757), 'pysam.AlignmentFile', 'pysam.AlignmentFile', (['sam_file'], {}), '(sam_file)\n', (42747, 42757), False, 'import pysam\n'), ((44615, 44642), 'pysam.AlignmentFile', 'pysam.AlignmentFile', (['my_sam'], {}), '(my_sam)\n', (44634, 44642), False, 'import pysam\n'), ((45911, 46056), 'pandas.read_csv', 'pd.read_csv', (["(sam_stem + '_insert_coords.txt')"], {'sep': '"""\t"""', 'dtype': "{'ref_name': 'str', 'coord': 'int64', 'read_comment': 'str', 'read_grp': 'str'}"}), "(sam_stem + '_insert_coords.txt', sep='\\t', dtype={'ref_name':\n 'str', 'coord': 'int64', 'read_comment': 'str', 'read_grp': 'str'})\n", (45922, 46056), True, 'import pandas as pd\n'), ((51059, 51162), 'pandas.read_csv', 'pd.read_csv', (["(sam_stem + '_insert_coords.txt')"], {'sep': '"""\t"""', 'dtype': "{'ref_name': 'str', 'coord': 'int64'}"}), "(sam_stem + '_insert_coords.txt', sep='\\t', dtype={'ref_name':\n 'str', 'coord': 'int64'})\n", (51070, 51162), True, 'import pandas as pd\n'), ((59765, 60011), 'configargparse.ArgumentParser', 'configargparse.ArgumentParser', ([], {'prog': '"""pimms2"""', 'add_config_file_help': '(False)', 'config_file_parser_class': 'configargparse.DefaultConfigFileParser', 'epilog': '"""\n\n*** N.B. This is a development version ***\n \n """', 'description': '"""description here"""'}), '(prog=\'pimms2\', add_config_file_help=False,\n config_file_parser_class=configargparse.DefaultConfigFileParser, epilog\n ="""\n\n*** N.B. This is a development version ***\n \n """, description=\n \'description here\')\n', (59794, 60011), False, 'import configargparse\n'), ((83251, 83276), 'os.path.isdir', 'os.path.isdir', (['out_dir_ff'], {}), '(out_dir_ff)\n', (83264, 83276), False, 'import os\n'), ((83442, 83474), 'os.path.join', 'os.path.join', (['out_dir_ff', '"""logs"""'], {}), "(out_dir_ff, 'logs')\n", (83454, 83474), False, 'import os\n'), ((83482, 83509), 'os.path.isdir', 'os.path.isdir', (['out_dir_logs'], {}), '(out_dir_logs)\n', (83495, 83509), False, 'import os\n'), ((85841, 85883), 're.sub', 're.sub', (['""".fastq"""', '""".sam"""', 'fq_result_suffix'], {}), "('.fastq', '.sam', fq_result_suffix)\n", (85847, 85883), False, 'import re\n'), ((85901, 85946), 'os.path.join', 'os.path.join', (['parsed_args_ff[0].in_dir[0]', '""""""'], {}), "(parsed_args_ff[0].in_dir[0], '')\n", (85913, 85946), False, 'import os\n'), ((2370, 2400), 'os.path.exists', 'os.path.exists', (['results_dir_db'], {}), '(results_dir_db)\n', (2384, 2400), False, 'import os\n'), ((3405, 3422), 'os.path.exists', 'os.path.exists', (['x'], {}), '(x)\n', (3419, 3422), False, 'import os\n'), ((3808, 4025), 'sys.exit', 'sys.exit', (['(\'\\nERROR: \' + prog_to_check +\n """ cannot be found in the path. \nSYS.EXIT: Please check your environment and ensure """\n + prog_to_check + """ is installed and available before trying again.\n\n"""\n )'], {}), '(\'\\nERROR: \' + prog_to_check +\n """ cannot be found in the path. \nSYS.EXIT: Please check your environment and ensure """\n + prog_to_check + """ is installed and available before trying again.\n\n"""\n )\n', (3816, 4025), False, 'import sys\n'), ((4327, 4389), 'gzip.open', 'gzip.open', (['concat_fastq_result_filename', '"""wt"""'], {'compresslevel': '(6)'}), "(concat_fastq_result_filename, 'wt', compresslevel=6)\n", (4336, 4389), False, 'import gzip\n'), ((4949, 4977), 'glob.glob', 'glob.glob', (['(indir + suffix_wc)'], {}), '(indir + suffix_wc)\n', (4958, 4977), False, 'import glob\n'), ((5202, 5236), 'os.path.join', 'os.path.join', (['log_path', '"""log_*txt"""'], {}), "(log_path, 'log_*txt')\n", (5214, 5236), False, 'import os\n'), ((5263, 5279), 'pandas.read_table', 'pd.read_table', (['f'], {}), '(f)\n', (5276, 5279), True, 'import pandas as pd\n'), ((5628, 5678), 'os.path.join', 'os.path.join', (['log_path', '""".."""', '"""result_summary.txt"""'], {}), "(log_path, '..', 'result_summary.txt')\n", (5640, 5678), False, 'import os\n'), ((7224, 7269), 'shutil.copyfile', 'shutil.copyfile', (['genome_fasta', 'fasta_to_index'], {}), '(genome_fasta, fasta_to_index)\n', (7239, 7269), False, 'import shutil\n'), ((7288, 7390), 'subprocess.Popen', 'subprocess.Popen', (["['bwa', 'index', fasta_to_index]"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "(['bwa', 'index', fasta_to_index], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n", (7304, 7390), False, 'import subprocess\n'), ((37962, 37999), 'pysam.FastxFile', 'pysam.FastxFile', (['fqout'], {'persist': '(False)'}), '(fqout, persist=False)\n', (37977, 37999), False, 'import pysam\n'), ((38536, 38562), 'os.path.basename', 'os.path.basename', (['gff_file'], {}), '(gff_file)\n', (38552, 38562), False, 'import os\n'), ((41811, 41837), 'os.path.basename', 'os.path.basename', (['sam_file'], {}), '(sam_file)\n', (41827, 41837), False, 'import os\n'), ((46993, 47007), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (47005, 47007), True, 'import pandas as pd\n'), ((52580, 52657), 'os.path.join', 'os.path.join', (['db_rdir', "(condition_label + '_pimms_insert_coordinates' + '.gff')"], {}), "(db_rdir, condition_label + '_pimms_insert_coordinates' + '.gff')\n", (52592, 52657), False, 'import os\n'), ((75826, 75837), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (75834, 75837), False, 'import sys\n'), ((80413, 80491), 'os.path.join', 'os.path.join', (['db_rdir', "(sam_stem + '_countinfo.' + parsed_args_be[0].out_fmt[0])"], {}), "(db_rdir, sam_stem + '_countinfo.' + parsed_args_be[0].out_fmt[0])\n", (80425, 80491), False, 'import os\n'), ((80540, 80596), 'os.path.join', 'os.path.join', (['info_rdir', "(sam_stem + '_insert_coords.txt')"], {}), "(info_rdir, sam_stem + '_insert_coords.txt')\n", (80552, 80596), False, 'import os\n'), ((80631, 80673), 'os.path.join', 'os.path.join', (['info_rdir', "(sam_stem + '.bed')"], {}), "(info_rdir, sam_stem + '.bed')\n", (80643, 80673), False, 'import os\n'), ((80987, 81046), 'pandas.read_excel', 'pd.read_excel', (['parsed_args_tm[0].xlsx[0]'], {'engine': '"""openpyxl"""'}), "(parsed_args_tm[0].xlsx[0], engine='openpyxl')\n", (81000, 81046), True, 'import pandas as pd\n'), ((81068, 81127), 'pandas.read_excel', 'pd.read_excel', (['parsed_args_tm[0].xlsx[1]'], {'engine': '"""openpyxl"""'}), "(parsed_args_tm[0].xlsx[1], engine='openpyxl')\n", (81081, 81127), True, 'import pandas as pd\n'), ((81153, 81195), 'pandas.DataFrame.merge', 'pd.DataFrame.merge', (['result_df1', 'result_df2'], {}), '(result_df1, result_df2)\n', (81171, 81195), True, 'import pandas as pd\n'), ((81213, 81270), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['"""merged_result.xlsx"""'], {'engine': '"""xlsxwriter"""'}), "('merged_result.xlsx', engine='xlsxwriter')\n", (81227, 81270), True, 'import pandas as pd\n'), ((83553, 83580), 'shutil.rmtree', 'shutil.rmtree', (['out_dir_logs'], {}), '(out_dir_logs)\n', (83566, 83580), False, 'import shutil\n'), ((86325, 86352), 'multiprocessing.Pool', 'multiprocessing.Pool', (['ncpus'], {}), '(ncpus)\n', (86345, 86352), False, 'import multiprocessing\n'), ((87668, 87695), 'multiprocessing.Pool', 'multiprocessing.Pool', (['ncpus'], {}), '(ncpus)\n', (87688, 87695), False, 'import multiprocessing\n'), ((1888, 1913), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (1902, 1913), False, 'import os\n'), ((1927, 1949), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (1938, 1949), False, 'import os\n'), ((2427, 2454), 'os.makedirs', 'os.makedirs', (['results_dir_db'], {}), '(results_dir_db)\n', (2438, 2454), False, 'import os\n'), ((2467, 2496), 'os.makedirs', 'os.makedirs', (['results_dir_info'], {}), '(results_dir_info)\n', (2478, 2496), False, 'import os\n'), ((2859, 2886), 'os.makedirs', 'os.makedirs', (['results_dir_db'], {}), '(results_dir_db)\n', (2870, 2886), False, 'import os\n'), ((2899, 2928), 'os.makedirs', 'os.makedirs', (['results_dir_info'], {}), '(results_dir_info)\n', (2910, 2928), False, 'import os\n'), ((3184, 3204), 'os.remove', 'os.remove', (['file_path'], {}), '(file_path)\n', (3193, 3204), False, 'import os\n'), ((4416, 4458), 'fileinput.input', 'fileinput.input', ([], {'files': 'flanking_fastq_list'}), '(files=flanking_fastq_list)\n', (4431, 4458), False, 'import fileinput\n'), ((6938, 6956), 'pathlib.Path', 'Path', (['genome_fasta'], {}), '(genome_fasta)\n', (6942, 6956), False, 'from pathlib import Path\n'), ((11418, 11461), 'pysam.FastxFile', 'pysam.FastxFile', (['fq_filename'], {'persist': '(False)'}), '(fq_filename, persist=False)\n', (11433, 11461), False, 'import pysam\n'), ((24252, 24295), 'pysam.FastxFile', 'pysam.FastxFile', (['fq_filename'], {'persist': '(False)'}), '(fq_filename, persist=False)\n', (24267, 24295), False, 'import pysam\n'), ((38633, 38696), 'os.path.join', 'os.path.join', (['rdir', "(gff_stem + '_pimms_features_pseudogene.gff')"], {}), "(rdir, gff_stem + '_pimms_features_pseudogene.gff')\n", (38645, 38696), False, 'import os\n'), ((38735, 38787), 'os.path.join', 'os.path.join', (['rdir', "(gff_stem + '_pimms_features.gff')"], {}), "(rdir, gff_stem + '_pimms_features.gff')\n", (38747, 38787), False, 'import os\n'), ((49034, 49048), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (49046, 49048), True, 'import pandas as pd\n'), ((58369, 58430), 'pandas.merge', 'pd.merge', (['gff_columns_addback', 'pimms_result_table'], {'how': '"""left"""'}), "(gff_columns_addback, pimms_result_table, how='left')\n", (58377, 58430), True, 'import pandas as pd\n'), ((80033, 80098), 'pandas.ExcelWriter', 'pd.ExcelWriter', (["(sam_stem + '_countinfo.xlsx')"], {'engine': '"""xlsxwriter"""'}), "(sam_stem + '_countinfo.xlsx', engine='xlsxwriter')\n", (80047, 80098), True, 'import pandas as pd\n'), ((81832, 81874), 'pandas.DataFrame.merge', 'pd.DataFrame.merge', (['result_df1', 'result_df2'], {}), '(result_df1, result_df2)\n', (81850, 81874), True, 'import pandas as pd\n'), ((83128, 83158), 'time.strftime', 'time.strftime', (['"""%d%m%y_%H%M%S"""'], {}), "('%d%m%y_%H%M%S')\n", (83141, 83158), False, 'import time\n'), ((86286, 86309), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (86307, 86309), False, 'import datetime\n'), ((87398, 87421), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (87419, 87421), False, 'import datetime\n'), ((87630, 87653), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (87651, 87653), False, 'import datetime\n'), ((89544, 89567), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (89565, 89567), False, 'import datetime\n'), ((93728, 93767), 'os.path.join', 'os.path.join', (['out_dir_ff', 'sam_output_mm'], {}), '(out_dir_ff, sam_output_mm)\n', (93740, 93767), False, 'import os\n'), ((7191, 7209), 'pathlib.Path', 'Path', (['genome_fasta'], {}), '(genome_fasta)\n', (7195, 7209), False, 'from pathlib import Path\n'), ((8653, 8686), 'pysam.flagstat', 'pysam.flagstat', (['bam_output_result'], {}), '(bam_output_result)\n', (8667, 8686), False, 'import pysam\n'), ((36637, 36649), 'os.getppid', 'os.getppid', ([], {}), '()\n', (36647, 36649), False, 'import os\n'), ((36692, 36718), 'random.randint', 'random.randint', (['(1000)', '(9999)'], {}), '(1000, 9999)\n', (36706, 36718), False, 'import random\n'), ((82080, 82127), 'pandas.read_csv', 'pd.read_csv', (['parsed_args_tm[0].tsv[0]'], {'sep': '"""\t"""'}), "(parsed_args_tm[0].tsv[0], sep='\\t')\n", (82091, 82127), True, 'import pandas as pd\n'), ((82149, 82196), 'pandas.read_csv', 'pd.read_csv', (['parsed_args_tm[0].tsv[1]'], {'sep': '"""\t"""'}), "(parsed_args_tm[0].tsv[1], sep='\\t')\n", (82160, 82196), True, 'import pandas as pd\n'), ((82222, 82264), 'pandas.DataFrame.merge', 'pd.DataFrame.merge', (['result_df1', 'result_df2'], {}), '(result_df1, result_df2)\n', (82240, 82264), True, 'import pandas as pd\n'), ((93537, 93582), 're.sub', 're.sub', (['""".sam"""', '"""_mm2.sam"""', 'sam_result_suffix'], {}), "('.sam', '_mm2.sam', sam_result_suffix)\n", (93543, 93582), False, 'import re\n'), ((94246, 94286), 'os.path.join', 'os.path.join', (['out_dir_ff', 'sam_output_bwa'], {}), '(out_dir_ff, sam_output_bwa)\n', (94258, 94286), False, 'import os\n'), ((2663, 2694), 'time.strftime', 'time.strftime', (['"""_%d%m%y_%H%M%S"""'], {}), "('_%d%m%y_%H%M%S')\n", (2676, 2694), False, 'import time\n'), ((2783, 2814), 'time.strftime', 'time.strftime', (['"""_%d%m%y_%H%M%S"""'], {}), "('_%d%m%y_%H%M%S')\n", (2796, 2814), False, 'import time\n'), ((7920, 7938), 'pathlib.Path', 'Path', (['genome_fasta'], {}), '(genome_fasta)\n', (7924, 7938), False, 'from pathlib import Path\n'), ((11811, 11920), 'fuzzysearch.find_near_matches', 'find_near_matches', (['qry1', 'entry.sequence'], {'max_substitutions': 'subs', 'max_deletions': 'dels', 'max_insertions': 'insrt'}), '(qry1, entry.sequence, max_substitutions=subs,\n max_deletions=dels, max_insertions=insrt)\n', (11828, 11920), False, 'from fuzzysearch import find_near_matches\n'), ((11999, 12108), 'fuzzysearch.find_near_matches', 'find_near_matches', (['qry2', 'entry.sequence'], {'max_substitutions': 'subs', 'max_deletions': 'dels', 'max_insertions': 'insrt'}), '(qry2, entry.sequence, max_substitutions=subs,\n max_deletions=dels, max_insertions=insrt)\n', (12016, 12108), False, 'from fuzzysearch import find_near_matches\n'), ((12189, 12300), 'fuzzysearch.find_near_matches', 'find_near_matches', (['qry1rc', 'entry.sequence'], {'max_substitutions': 'subs', 'max_deletions': 'dels', 'max_insertions': 'insrt'}), '(qry1rc, entry.sequence, max_substitutions=subs,\n max_deletions=dels, max_insertions=insrt)\n', (12206, 12300), False, 'from fuzzysearch import find_near_matches\n'), ((12383, 12494), 'fuzzysearch.find_near_matches', 'find_near_matches', (['qry2rc', 'entry.sequence'], {'max_substitutions': 'subs', 'max_deletions': 'dels', 'max_insertions': 'insrt'}), '(qry2rc, entry.sequence, max_substitutions=subs,\n max_deletions=dels, max_insertions=insrt)\n', (12400, 12494), False, 'from fuzzysearch import find_near_matches\n'), ((12652, 12710), 'fuzzysearch.find_near_matches', 'find_near_matches', (['qry1', 'entry.sequence'], {'max_l_dist': 'l_dist'}), '(qry1, entry.sequence, max_l_dist=l_dist)\n', (12669, 12710), False, 'from fuzzysearch import find_near_matches\n'), ((12743, 12801), 'fuzzysearch.find_near_matches', 'find_near_matches', (['qry2', 'entry.sequence'], {'max_l_dist': 'l_dist'}), '(qry2, entry.sequence, max_l_dist=l_dist)\n', (12760, 12801), False, 'from fuzzysearch import find_near_matches\n'), ((12836, 12896), 'fuzzysearch.find_near_matches', 'find_near_matches', (['qry1rc', 'entry.sequence'], {'max_l_dist': 'l_dist'}), '(qry1rc, entry.sequence, max_l_dist=l_dist)\n', (12853, 12896), False, 'from fuzzysearch import find_near_matches\n'), ((12931, 12991), 'fuzzysearch.find_near_matches', 'find_near_matches', (['qry2rc', 'entry.sequence'], {'max_l_dist': 'l_dist'}), '(qry2rc, entry.sequence, max_l_dist=l_dist)\n', (12948, 12991), False, 'from fuzzysearch import find_near_matches\n'), ((24599, 24708), 'fuzzysearch.find_near_matches', 'find_near_matches', (['qry1', 'entry.sequence'], {'max_substitutions': 'subs', 'max_deletions': 'dels', 'max_insertions': 'insrt'}), '(qry1, entry.sequence, max_substitutions=subs,\n max_deletions=dels, max_insertions=insrt)\n', (24616, 24708), False, 'from fuzzysearch import find_near_matches\n'), ((24787, 24896), 'fuzzysearch.find_near_matches', 'find_near_matches', (['qry2', 'entry.sequence'], {'max_substitutions': 'subs', 'max_deletions': 'dels', 'max_insertions': 'insrt'}), '(qry2, entry.sequence, max_substitutions=subs,\n max_deletions=dels, max_insertions=insrt)\n', (24804, 24896), False, 'from fuzzysearch import find_near_matches\n'), ((24977, 25088), 'fuzzysearch.find_near_matches', 'find_near_matches', (['qry1rc', 'entry.sequence'], {'max_substitutions': 'subs', 'max_deletions': 'dels', 'max_insertions': 'insrt'}), '(qry1rc, entry.sequence, max_substitutions=subs,\n max_deletions=dels, max_insertions=insrt)\n', (24994, 25088), False, 'from fuzzysearch import find_near_matches\n'), ((25171, 25282), 'fuzzysearch.find_near_matches', 'find_near_matches', (['qry2rc', 'entry.sequence'], {'max_substitutions': 'subs', 'max_deletions': 'dels', 'max_insertions': 'insrt'}), '(qry2rc, entry.sequence, max_substitutions=subs,\n max_deletions=dels, max_insertions=insrt)\n', (25188, 25282), False, 'from fuzzysearch import find_near_matches\n'), ((25440, 25498), 'fuzzysearch.find_near_matches', 'find_near_matches', (['qry1', 'entry.sequence'], {'max_l_dist': 'l_dist'}), '(qry1, entry.sequence, max_l_dist=l_dist)\n', (25457, 25498), False, 'from fuzzysearch import find_near_matches\n'), ((25531, 25589), 'fuzzysearch.find_near_matches', 'find_near_matches', (['qry2', 'entry.sequence'], {'max_l_dist': 'l_dist'}), '(qry2, entry.sequence, max_l_dist=l_dist)\n', (25548, 25589), False, 'from fuzzysearch import find_near_matches\n'), ((25624, 25684), 'fuzzysearch.find_near_matches', 'find_near_matches', (['qry1rc', 'entry.sequence'], {'max_l_dist': 'l_dist'}), '(qry1rc, entry.sequence, max_l_dist=l_dist)\n', (25641, 25684), False, 'from fuzzysearch import find_near_matches\n'), ((25719, 25779), 'fuzzysearch.find_near_matches', 'find_near_matches', (['qry2rc', 'entry.sequence'], {'max_l_dist': 'l_dist'}), '(qry2rc, entry.sequence, max_l_dist=l_dist)\n', (25736, 25779), False, 'from fuzzysearch import find_near_matches\n'), ((36652, 36685), 'multiprocessing.current_process', 'multiprocessing.current_process', ([], {}), '()\n', (36683, 36685), False, 'import multiprocessing\n'), ((37080, 37109), 'os.path.basename', 'os.path.basename', (['fq_filename'], {}), '(fq_filename)\n', (37096, 37109), False, 'import os\n'), ((81652, 81689), 'pandas.read_csv', 'pd.read_csv', (['parsed_args_tm[0].csv[0]'], {}), '(parsed_args_tm[0].csv[0])\n', (81663, 81689), True, 'import pandas as pd\n'), ((81740, 81777), 'pandas.read_csv', 'pd.read_csv', (['parsed_args_tm[0].csv[1]'], {}), '(parsed_args_tm[0].csv[1])\n', (81751, 81777), True, 'import pandas as pd\n'), ((88271, 88282), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (88279, 88282), False, 'import sys\n'), ((89733, 89802), 'os.path.join', 'os.path.join', (['out_dir_ff', "('*' + fwdrev_wc[0] + '*' + fq_result_suffix)"], {}), "(out_dir_ff, '*' + fwdrev_wc[0] + '*' + fq_result_suffix)\n", (89745, 89802), False, 'import os\n'), ((89852, 89921), 'os.path.join', 'os.path.join', (['out_dir_ff', "('*' + fwdrev_wc[1] + '*' + fq_result_suffix)"], {}), "(out_dir_ff, '*' + fwdrev_wc[1] + '*' + fq_result_suffix)\n", (89864, 89921), False, 'import os\n'), ((90523, 90543), 'pathlib.Path', 'Path', (['fwd_fqp_result'], {}), '(fwd_fqp_result)\n', (90527, 90543), False, 'from pathlib import Path\n'), ((90760, 90809), 're.sub', 're.sub', (['fwdrev_wc[0]', '"""_RX_"""', 'tempfqname'], {'count': '(1)'}), "(fwdrev_wc[0], '_RX_', tempfqname, count=1)\n", (90766, 90809), False, 'import re\n'), ((91486, 91532), 'pysam.FastxFile', 'pysam.FastxFile', (['fwd_fqp_result'], {'persist': '(False)'}), '(fwd_fqp_result, persist=False)\n', (91501, 91532), False, 'import pysam\n'), ((91906, 91952), 'pysam.FastxFile', 'pysam.FastxFile', (['rev_fqp_result'], {'persist': '(False)'}), '(rev_fqp_result, persist=False)\n', (91921, 91952), False, 'import pysam\n'), ((92660, 92708), 'os.path.join', 'os.path.join', (['out_dir_ff', "('*' + fq_result_suffix)"], {}), "(out_dir_ff, '*' + fq_result_suffix)\n", (92672, 92708), False, 'import os\n'), ((94053, 94098), 're.sub', 're.sub', (['""".sam"""', '"""_bwa.sam"""', 'sam_result_suffix'], {}), "('.sam', '_bwa.sam', sam_result_suffix)\n", (94059, 94098), False, 'import re\n'), ((7027, 7045), 'pathlib.Path', 'Path', (['genome_fasta'], {}), '(genome_fasta)\n', (7031, 7045), False, 'from pathlib import Path\n'), ((64736, 64750), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (64748, 64750), False, 'import os\n'), ((71972, 71986), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (71984, 71986), False, 'import os\n'), ((90676, 90696), 'pathlib.Path', 'Path', (['fwd_fqp_result'], {}), '(fwd_fqp_result)\n', (90680, 90696), False, 'from pathlib import Path\n'), ((86500, 86508), 'pathlib.Path', 'Path', (['fq'], {}), '(fq)\n', (86504, 86508), False, 'from pathlib import Path\n'), ((88470, 88478), 'pathlib.Path', 'Path', (['fq'], {}), '(fq)\n', (88474, 88478), False, 'from pathlib import Path\n'), ((93472, 93516), 'os.path.basename', 'os.path.basename', (['parsed_args_ff[0].fasta[0]'], {}), '(parsed_args_ff[0].fasta[0])\n', (93488, 93516), False, 'import os\n'), ((93988, 94032), 'os.path.basename', 'os.path.basename', (['parsed_args_ff[0].fasta[0]'], {}), '(parsed_args_ff[0].fasta[0])\n', (94004, 94032), False, 'import os\n')]
|
import sys
import time
import numpy as np
import pyaudio
from scipy import fftpack
from scipy.io import wavfile
import notes
from player import play
if len(sys.argv) < 2:
print(f'Usage: python app.py <WAV_FILE>')
sys.exit(1)
audio_input_file = sys.argv[1]
fs, audio_input = wavfile.read(audio_input_file)
print(f'Playing input file: {audio_input_file}')
play(audio_input, volume=1, rate=fs, channels=2, format=pyaudio.paInt32)
start = time.time()
audio_input_normalized = np.mean(audio_input, axis=1) / 2 ** 32
freqs = fftpack.fftfreq(audio_input_normalized.size) * fs
X = fftpack.fft(audio_input_normalized)
indices = np.flip(np.argsort(np.abs(X)))
freqs_ordered = np.abs(freqs[indices])[:100]
dist = np.vectorize(lambda f1, f2: abs(f1 - f2))
closest_frequencies_idx = np.fromiter(
(np.argmin(dist(f, notes.frequencies())) for f in freqs_ordered),
int,
)
_, idx = np.unique(closest_frequencies_idx, return_index=True)
closest_frequencies_idx = closest_frequencies_idx[np.sort(idx)]
closest_frequencies = notes.frequencies()[closest_frequencies_idx]
closest_notes = notes.frequencies_to_notes(closest_frequencies)
elapsed_time = time.time() - start
print(f'Playing detected notes (ordered by overall amplitude): {closest_notes} (took {elapsed_time:.3f}s)')
audio_output = np.concatenate([
notes.generate_sound_from_note(note, fs, 500)
for note in closest_notes
], axis=0).astype(np.float32)
play(audio_output, volume=1, rate=fs)
|
[
"numpy.abs",
"notes.generate_sound_from_note",
"player.play",
"notes.frequencies_to_notes",
"time.time",
"scipy.io.wavfile.read",
"scipy.fftpack.fft",
"numpy.sort",
"numpy.mean",
"notes.frequencies",
"sys.exit",
"scipy.fftpack.fftfreq",
"numpy.unique"
] |
[((286, 316), 'scipy.io.wavfile.read', 'wavfile.read', (['audio_input_file'], {}), '(audio_input_file)\n', (298, 316), False, 'from scipy.io import wavfile\n'), ((366, 438), 'player.play', 'play', (['audio_input'], {'volume': '(1)', 'rate': 'fs', 'channels': '(2)', 'format': 'pyaudio.paInt32'}), '(audio_input, volume=1, rate=fs, channels=2, format=pyaudio.paInt32)\n', (370, 438), False, 'from player import play\n'), ((448, 459), 'time.time', 'time.time', ([], {}), '()\n', (457, 459), False, 'import time\n'), ((587, 622), 'scipy.fftpack.fft', 'fftpack.fft', (['audio_input_normalized'], {}), '(audio_input_normalized)\n', (598, 622), False, 'from scipy import fftpack\n'), ((887, 940), 'numpy.unique', 'np.unique', (['closest_frequencies_idx'], {'return_index': '(True)'}), '(closest_frequencies_idx, return_index=True)\n', (896, 940), True, 'import numpy as np\n'), ((1088, 1135), 'notes.frequencies_to_notes', 'notes.frequencies_to_notes', (['closest_frequencies'], {}), '(closest_frequencies)\n', (1114, 1135), False, 'import notes\n'), ((1424, 1461), 'player.play', 'play', (['audio_output'], {'volume': '(1)', 'rate': 'fs'}), '(audio_output, volume=1, rate=fs)\n', (1428, 1461), False, 'from player import play\n'), ((224, 235), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (232, 235), False, 'import sys\n'), ((486, 514), 'numpy.mean', 'np.mean', (['audio_input'], {'axis': '(1)'}), '(audio_input, axis=1)\n', (493, 514), True, 'import numpy as np\n'), ((533, 577), 'scipy.fftpack.fftfreq', 'fftpack.fftfreq', (['audio_input_normalized.size'], {}), '(audio_input_normalized.size)\n', (548, 577), False, 'from scipy import fftpack\n'), ((680, 702), 'numpy.abs', 'np.abs', (['freqs[indices]'], {}), '(freqs[indices])\n', (686, 702), True, 'import numpy as np\n'), ((991, 1003), 'numpy.sort', 'np.sort', (['idx'], {}), '(idx)\n', (998, 1003), True, 'import numpy as np\n'), ((1027, 1046), 'notes.frequencies', 'notes.frequencies', ([], {}), '()\n', (1044, 1046), False, 'import notes\n'), ((1152, 1163), 'time.time', 'time.time', ([], {}), '()\n', (1161, 1163), False, 'import time\n'), ((652, 661), 'numpy.abs', 'np.abs', (['X'], {}), '(X)\n', (658, 661), True, 'import numpy as np\n'), ((820, 839), 'notes.frequencies', 'notes.frequencies', ([], {}), '()\n', (837, 839), False, 'import notes\n'), ((1318, 1363), 'notes.generate_sound_from_note', 'notes.generate_sound_from_note', (['note', 'fs', '(500)'], {}), '(note, fs, 500)\n', (1348, 1363), False, 'import notes\n')]
|
import json
import os
import pytz
import voluptuous
from dbt.adapters.factory import get_adapter
from dbt.compat import basestring, to_string
import dbt.clients.jinja
import dbt.flags
import dbt.schema
import dbt.tracking
import dbt.utils
import dbt.hooks
from dbt.logger import GLOBAL_LOGGER as logger # noqa
class DatabaseWrapper(object):
"""
Wrapper for runtime database interaction. Should only call adapter
functions.
"""
def __init__(self, model, adapter, profile):
self.model = model
self.adapter = adapter
self.profile = profile
# Fun with metaprogramming
# Most adapter functions take `profile` as the first argument, and
# `model_name` as the last. This automatically injects those arguments.
# In model code, these functions can be called without those two args.
for context_function in self.adapter.context_functions:
setattr(self,
context_function,
self.wrap_with_profile_and_model_name(context_function))
for raw_function in self.adapter.raw_functions:
setattr(self,
raw_function,
getattr(self.adapter, raw_function))
def wrap_with_profile_and_model_name(self, fn):
def wrapped(*args, **kwargs):
args = (self.profile,) + args
kwargs['model_name'] = self.model.get('name')
return getattr(self.adapter, fn)(*args, **kwargs)
return wrapped
def type(self):
return self.adapter.type()
def commit(self):
return self.adapter.commit_if_has_connection(
self.profile, self.model.get('name'))
def _add_macros(context, model, flat_graph):
macros_to_add = {'global': [], 'local': []}
for unique_id, macro in flat_graph.get('macros', {}).items():
package_name = macro.get('package_name')
macro_map = {
macro.get('name'): macro.get('generator')(context)
}
if context.get(package_name) is None:
context[package_name] = {}
context.get(package_name, {}) \
.update(macro_map)
if package_name == model.get('package_name'):
macros_to_add['local'].append(macro_map)
elif package_name == dbt.include.GLOBAL_PROJECT_NAME:
macros_to_add['global'].append(macro_map)
# Load global macros before local macros -- local takes precedence
unprefixed_macros = macros_to_add['global'] + macros_to_add['local']
for macro_map in unprefixed_macros:
context.update(macro_map)
return context
def _add_tracking(context):
if dbt.tracking.active_user is not None:
context = dbt.utils.merge(context, {
"run_started_at": dbt.tracking.active_user.run_started_at,
"invocation_id": dbt.tracking.active_user.invocation_id,
})
else:
context = dbt.utils.merge(context, {
"run_started_at": None,
"invocation_id": None
})
return context
def _add_validation(context):
validation_utils = dbt.utils.AttrDict({
'any': voluptuous.Any,
'all': voluptuous.All,
})
return dbt.utils.merge(
context,
{'validation': validation_utils})
def _env_var(var, default=None):
if var in os.environ:
return os.environ[var]
elif default is not None:
return default
else:
msg = "Env var required but not provided: '{}'".format(var)
dbt.clients.jinja.undefined_error(msg)
def _store_result(sql_results):
def call(name, status, data=[]):
sql_results[name] = dbt.utils.AttrDict({
'status': status,
'data': data
})
return ''
return call
def _load_result(sql_results):
def call(name):
return sql_results.get(name)
return call
def _add_sql_handlers(context):
sql_results = {}
return dbt.utils.merge(context, {
'_sql_results': sql_results,
'store_result': _store_result(sql_results),
'load_result': _load_result(sql_results),
})
def log(msg, info=False):
if info:
logger.info(msg)
else:
logger.debug(msg)
return ''
class Var(object):
UndefinedVarError = "Required var '{}' not found in config:\nVars "\
"supplied to {} = {}"
NoneVarError = "Supplied var '{}' is undefined in config:\nVars supplied "\
"to {} = {}"
def __init__(self, model, context):
self.model = model
self.context = context
if isinstance(model, dict) and model.get('unique_id'):
self.local_vars = model.get('config', {}).get('vars')
self.model_name = model.get('name')
else:
# still used for wrapping
self.model_name = model.nice_name
self.local_vars = model.config.get('vars', {})
def pretty_dict(self, data):
return json.dumps(data, sort_keys=True, indent=4)
def assert_var_defined(self, var_name, default):
if var_name not in self.local_vars and default is None:
pretty_vars = self.pretty_dict(self.local_vars)
dbt.exceptions.raise_compiler_error(
self.UndefinedVarError.format(
var_name, self.model_name, pretty_vars
),
self.model
)
def assert_var_not_none(self, var_name):
raw = self.local_vars[var_name]
if raw is None:
pretty_vars = self.pretty_dict(self.local_vars)
model_name = dbt.utils.get_model_name_or_none(self.model)
dbt.exceptions.raise_compiler_error(
self.NoneVarError.format(
var_name, model_name, pretty_vars
),
self.model
)
def __call__(self, var_name, default=None):
self.assert_var_defined(var_name, default)
if var_name not in self.local_vars:
return default
self.assert_var_not_none(var_name)
raw = self.local_vars[var_name]
# if bool/int/float/etc are passed in, don't compile anything
if not isinstance(raw, basestring):
return raw
return dbt.clients.jinja.get_rendered(raw, self.context)
def write(node, target_path, subdirectory):
def fn(payload):
node['build_path'] = dbt.writer.write_node(
node, target_path, subdirectory, payload)
return ''
return fn
def render(context, node):
def fn(string):
return dbt.clients.jinja.get_rendered(string, context, node)
return fn
def fromjson(string, default=None):
try:
return json.loads(string)
except ValueError as e:
return default
def tojson(value, default=None):
try:
return json.dumps(value)
except ValueError as e:
return default
def _return(value):
raise dbt.exceptions.MacroReturn(value)
def generate(model, project, flat_graph, provider=None):
"""
Not meant to be called directly. Call with either:
dbt.context.parser.generate
or
dbt.context.runtime.generate
"""
if provider is None:
raise dbt.exceptions.InternalException(
"Invalid provider given to context: {}".format(provider))
target_name = project.get('target')
profile = project.get('outputs').get(target_name)
target = profile.copy()
target.pop('pass', None)
target['name'] = target_name
adapter = get_adapter(profile)
context = {'env': target}
schema = profile.get('schema', 'public')
pre_hooks = model.get('config', {}).get('pre-hook')
post_hooks = model.get('config', {}).get('post-hook')
db_wrapper = DatabaseWrapper(model, adapter, profile)
context = dbt.utils.merge(context, {
"adapter": db_wrapper,
"column": dbt.schema.Column,
"config": provider.Config(model),
"env_var": _env_var,
"exceptions": dbt.exceptions,
"execute": provider.execute,
"flags": dbt.flags,
"graph": flat_graph,
"log": log,
"model": model,
"modules": {
"pytz": pytz,
},
"post_hooks": post_hooks,
"pre_hooks": pre_hooks,
"ref": provider.ref(model, project, profile, flat_graph),
"return": _return,
"schema": model.get('schema', schema),
"sql": model.get('injected_sql'),
"sql_now": adapter.date_function(),
"fromjson": fromjson,
"tojson": tojson,
"target": target,
"this": dbt.utils.Relation(profile, adapter, model, use_temp=True)
})
context = _add_tracking(context)
context = _add_validation(context)
context = _add_sql_handlers(context)
# we make a copy of the context for each of these ^^
context = _add_macros(context, model, flat_graph)
context["write"] = write(model, project.get('target-path'), 'run')
context["render"] = render(context, model)
context["var"] = Var(model, context=context)
context['context'] = context
return context
|
[
"dbt.logger.GLOBAL_LOGGER.info",
"json.loads",
"json.dumps",
"dbt.logger.GLOBAL_LOGGER.debug",
"dbt.adapters.factory.get_adapter"
] |
[((7536, 7556), 'dbt.adapters.factory.get_adapter', 'get_adapter', (['profile'], {}), '(profile)\n', (7547, 7556), False, 'from dbt.adapters.factory import get_adapter\n'), ((4175, 4191), 'dbt.logger.GLOBAL_LOGGER.info', 'logger.info', (['msg'], {}), '(msg)\n', (4186, 4191), True, 'from dbt.logger import GLOBAL_LOGGER as logger\n'), ((4210, 4227), 'dbt.logger.GLOBAL_LOGGER.debug', 'logger.debug', (['msg'], {}), '(msg)\n', (4222, 4227), True, 'from dbt.logger import GLOBAL_LOGGER as logger\n'), ((4977, 5019), 'json.dumps', 'json.dumps', (['data'], {'sort_keys': '(True)', 'indent': '(4)'}), '(data, sort_keys=True, indent=4)\n', (4987, 5019), False, 'import json\n'), ((6720, 6738), 'json.loads', 'json.loads', (['string'], {}), '(string)\n', (6730, 6738), False, 'import json\n'), ((6849, 6866), 'json.dumps', 'json.dumps', (['value'], {}), '(value)\n', (6859, 6866), False, 'import json\n')]
|
import datetime
import os
import subprocess
import warnings
import logging
import inspect
from collections import defaultdict
from time import sleep, time
from threading import Timer
import dateutil
import six
import sys
from dateutil.tz import tzlocal
from simple_monitor_alert.exceptions import InvalidScriptLineError, InvalidScriptLineLogging
from simple_monitor_alert.lines import Observable, RawLine, RawItemLine, get_observables_from_lines, RawHeaderLine
logger = logging.getLogger('sma')
TIMEOUT = 5
def get_verbose_condition(observable):
value = observable.get_line_value('value')
expected = observable.get_matcher()
if hasattr(expected, 'pattern'):
expected = expected.pattern
elif expected:
expected = expected.parse()
if isinstance(expected, six.string_types) or isinstance(expected, int):
expected = '== {}'.format(expected)
return '{} {}'.format(value, expected)
def log_evaluate(observable, result=None, use_logger=True):
from simple_monitor_alert.utils.system import get_hostname
result = result or observable.evaluate()
level = 'success' if result else observable.get_line_value('level') or 'warning'
msg = '{} - - Trigger: [{}] ({}) {}. '.format(get_hostname(), level,
getattr(getattr(observable, 'monitor', None), 'name', '?'),
observable.get_verbose_name_group())
msg += ('Result: {}' if result else 'Assertion {} failed').format(get_verbose_condition(observable))
if observable.param_used:
msg += '. Param used: {}'.format(observable.param_used)
extra_info = observable.get_line_value('extra_info')
if extra_info:
msg += '. Extra info: {}'.format(extra_info)
if use_logger:
getattr(logger, 'info' if result else 'warning')(msg)
else:
return msg
class Monitor(object):
lines = None
headers = None
items = None
timeout = None
def __init__(self, script_path, sma=None):
self.script_path = script_path
self.sma = sma
self.name = os.path.splitext(os.path.split(script_path)[1])[0]
def _execute_process(self, env):
lines = []
popen = subprocess.Popen([self.script_path], stdout=subprocess.PIPE, env=env)
l = self.get_timer(popen)
started_at = time()
# sleep(.1)
# Realtime Read
blocksize = 1
line = b''
missing_data = True
while popen.poll() is None or missing_data:
if self.timeout:
# No need to wait for X-Timeout header
blocksize = -1
if popen.poll() is not None and missing_data:
# I force the loop if is the last cycle
blocksize = -1
missing_data = False
line += popen.stdout.read(blocksize)
if b'\n' not in line:
continue
processed_lines = line.split(b'\n')
for line in processed_lines:
lines.append(line + b'\n')
timeout = self.get_headers(self.parse_lines([line])).get('X-Timeout')
timeout = int(timeout) if timeout is not None else None
if timeout and self.timeout is None:
l.cancel()
l = self.get_timer(popen, timeout - (time() - started_at))
self.timeout = timeout
l.cancel()
return lines
def get_timer(self, popen, timeout=TIMEOUT):
def terminate_popen():
popen.terminate()
popen.kill()
timeout = timeout if timeout > 0 else 0
l = Timer(timeout, terminate_popen)
l.start()
return l
def execute(self, parameters=None):
env = self.get_env(parameters)
lines = self.parse_lines(self._execute_process(env))
self.lines = list(lines)
self.items = self.get_observables(self.lines, parameters)
self.headers = self.get_headers(self.lines)
# self.evaluate_items()
return self.items.values()
def get_env(self, parameters):
env = os.environ
if parameters:
env = env.copy()
env.update(parameters)
return env
def parse_lines(self, lines, on_error=InvalidScriptLineLogging):
for i, line in enumerate(lines):
try:
yield RawLine.parse(line, self)
except InvalidScriptLineError:
if inspect.isclass(on_error) and issubclass(on_error, Warning):
warnings.warn_explicit(on_error(line, self.script_path), on_error, self.script_path, i + 1)
elif inspect.isclass(on_error) and issubclass(on_error, Exception):
raise on_error(line, self.script_path)
elif on_error is None:
pass
else:
on_error(line, self.script_path)
def save_headers(self):
self.sma.monitors_info.set_headers(self, self.headers)
self.sma.monitors_info.write()
def get_header(self, header_key):
return (self.sma.monitors_info.get_monitor(self, create=False) or {}).get('headers', {}).get(header_key)
def save_last_execution(self):
self.sma.monitors_info.set_last_execution(self)
self.sma.monitors_info.write()
def last_execution(self):
data = self.sma.monitors_info.get_monitor(self, create=False) or {}
last_execution = data.get('last_execution', None)
if last_execution:
return dateutil.parser.parse(last_execution).replace(tzinfo=tzlocal())
def shoud_be_executed(self):
last_execution = self.last_execution()
run_every = self.get_header('X-Run-Every-Seconds')
if not last_execution or not run_every:
return True
dt = datetime.datetime.now(dateutil.tz.tzlocal())
return dt - last_execution >= datetime.timedelta(seconds=run_every)
@staticmethod
def get_observables(lines, params=None):
return get_observables_from_lines(lines, params)
@staticmethod
def get_headers(lines):
headers = {}
for line in filter(lambda x: isinstance(x, RawHeaderLine), lines):
headers[line.key] = line.value
return headers
class Monitors(object):
monitors = None
_monitors_paths = None
def __init__(self, monitors_dir=None, config=None, sma=None):
# TODO: remove config parameter: get from sma
config = config or sma.config if sma else None
self.monitors_dir, self.config = monitors_dir, config
self.sma = sma
def get_monitors(self, monitors_dir=None):
if self.monitors:
return self.monitors
monitors_dir = monitors_dir or self.monitors_dir
self.monitors = [self.get_monitor(x) for x in self._get_monitors_paths(monitors_dir)]
return self.monitors
def _get_monitors_paths(self, monitors_dir):
if self._monitors_paths is None:
self._monitors_paths = [os.path.join(monitors_dir, file) for file in os.listdir(monitors_dir)]
return self._monitors_paths
def get_monitors_names(self, monitors_dir=None):
monitors_dir = monitors_dir or self.monitors_dir
return map(lambda x: os.path.splitext(x.split('/')[-1])[0], self._get_monitors_paths(monitors_dir))
def is_monitor_enabled(self, name):
return name in self.get_monitors_names()
def get_monitor(self, script_path):
return Monitor(script_path, self.sma)
def get_monitor_params(self, monitor):
observables = self.config.get_monitor_observables(monitor.name)
if isinstance(observables, dict):
observables = observables.values()
return dict(filter(lambda x: x[1] is not None, [(observable.get_verbose_name_group(), observable.get_param())
for observable in observables]))
@staticmethod
def get_parameters_cycles(parameters):
if not parameters:
return [{}]
names_parameters = defaultdict(list)
for parameter, value in parameters.items():
parameter = parameter.split('(')[0]
names_parameters[parameter].append(value)
cycles_num = len(sorted(names_parameters.items(), key=lambda x: len(x[1]), reverse=True)[0][1])
cycles = []
for i in range(cycles_num):
cycle = {}
cycles.append(cycle)
for key, values in names_parameters.items():
cycle[key] = values[i % len(values)]
return cycles
def execute(self, monitor):
parameters = self.get_monitor_params(monitor)
observables = []
try:
for params in self.get_parameters_cycles(parameters):
observables.extend(monitor.execute(params))
monitor.save_headers()
monitor.save_last_execution()
except PermissionError:
warnings.warn_explicit('No permissions for monitor. Check execution perms and read perms.',
UserWarning, monitor.script_path, 1)
return []
new_observables = []
for observable in observables:
if observable not in new_observables:
new_observables.append(observable)
return new_observables
def execute_all(self, use_config=True):
for monitor in self.get_monitors():
if not monitor.shoud_be_executed():
continue
observables = self.execute(monitor)
if use_config:
self.update_observables(monitor, observables)
for observable in observables:
observable.set_monitor(monitor)
yield observable
def update_observables(self, monitor, observables):
for observable in observables:
config_observable = self.config.get_observable(monitor.name, observable.name, observable.group)
observable.update_usign_observable(config_observable)
|
[
"subprocess.Popen",
"threading.Timer",
"dateutil.parser.parse",
"simple_monitor_alert.utils.system.get_hostname",
"simple_monitor_alert.lines.RawLine.parse",
"inspect.isclass",
"simple_monitor_alert.lines.get_observables_from_lines",
"time.time",
"collections.defaultdict",
"dateutil.tz.tzlocal",
"warnings.warn_explicit",
"datetime.timedelta",
"os.path.split",
"os.path.join",
"os.listdir",
"logging.getLogger"
] |
[((474, 498), 'logging.getLogger', 'logging.getLogger', (['"""sma"""'], {}), "('sma')\n", (491, 498), False, 'import logging\n'), ((1236, 1250), 'simple_monitor_alert.utils.system.get_hostname', 'get_hostname', ([], {}), '()\n', (1248, 1250), False, 'from simple_monitor_alert.utils.system import get_hostname\n'), ((2238, 2307), 'subprocess.Popen', 'subprocess.Popen', (['[self.script_path]'], {'stdout': 'subprocess.PIPE', 'env': 'env'}), '([self.script_path], stdout=subprocess.PIPE, env=env)\n', (2254, 2307), False, 'import subprocess\n'), ((2364, 2370), 'time.time', 'time', ([], {}), '()\n', (2368, 2370), False, 'from time import sleep, time\n'), ((3670, 3701), 'threading.Timer', 'Timer', (['timeout', 'terminate_popen'], {}), '(timeout, terminate_popen)\n', (3675, 3701), False, 'from threading import Timer\n'), ((6070, 6111), 'simple_monitor_alert.lines.get_observables_from_lines', 'get_observables_from_lines', (['lines', 'params'], {}), '(lines, params)\n', (6096, 6111), False, 'from simple_monitor_alert.lines import Observable, RawLine, RawItemLine, get_observables_from_lines, RawHeaderLine\n'), ((8124, 8141), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (8135, 8141), False, 'from collections import defaultdict\n'), ((5892, 5913), 'dateutil.tz.tzlocal', 'dateutil.tz.tzlocal', ([], {}), '()\n', (5911, 5913), False, 'import dateutil\n'), ((5953, 5990), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'run_every'}), '(seconds=run_every)\n', (5971, 5990), False, 'import datetime\n'), ((7069, 7101), 'os.path.join', 'os.path.join', (['monitors_dir', 'file'], {}), '(monitors_dir, file)\n', (7081, 7101), False, 'import os\n'), ((9016, 9153), 'warnings.warn_explicit', 'warnings.warn_explicit', (['"""No permissions for monitor. Check execution perms and read perms."""', 'UserWarning', 'monitor.script_path', '(1)'], {}), "(\n 'No permissions for monitor. Check execution perms and read perms.',\n UserWarning, monitor.script_path, 1)\n", (9038, 9153), False, 'import warnings\n'), ((2131, 2157), 'os.path.split', 'os.path.split', (['script_path'], {}), '(script_path)\n', (2144, 2157), False, 'import os\n'), ((4413, 4438), 'simple_monitor_alert.lines.RawLine.parse', 'RawLine.parse', (['line', 'self'], {}), '(line, self)\n', (4426, 4438), False, 'from simple_monitor_alert.lines import Observable, RawLine, RawItemLine, get_observables_from_lines, RawHeaderLine\n'), ((5581, 5618), 'dateutil.parser.parse', 'dateutil.parser.parse', (['last_execution'], {}), '(last_execution)\n', (5602, 5618), False, 'import dateutil\n'), ((5634, 5643), 'dateutil.tz.tzlocal', 'tzlocal', ([], {}), '()\n', (5641, 5643), False, 'from dateutil.tz import tzlocal\n'), ((7114, 7138), 'os.listdir', 'os.listdir', (['monitors_dir'], {}), '(monitors_dir)\n', (7124, 7138), False, 'import os\n'), ((4501, 4526), 'inspect.isclass', 'inspect.isclass', (['on_error'], {}), '(on_error)\n', (4516, 4526), False, 'import inspect\n'), ((4695, 4720), 'inspect.isclass', 'inspect.isclass', (['on_error'], {}), '(on_error)\n', (4710, 4720), False, 'import inspect\n'), ((3373, 3379), 'time.time', 'time', ([], {}), '()\n', (3377, 3379), False, 'from time import sleep, time\n')]
|
from copy import deepcopy
from math import inf
# This class represents a state.
class State(object):
# This is a constructor. It initializes object variables.
def __init__(self, table, turn = True):
self.table = table
self.next_moves = None
self.game_over = False
self.turn = turn
self.evaluation = 0
# This overloads the greater than operator for comparing State objects.
def __gt__(self, other):
return self.evaluation > other.get_evaluation()
# This overloads the greater than or equal to operator for comparing State objects.
def __ge__(self, other):
return self.evaluation >= other.get_evaluation()
# This overloads the less than operator for comparing State objects.
def __lt__(self, other):
return self.evaluation < other.get_evaluation()
# This overloads the less than or equal to operator for comparing State objects.
def __le__(self, other):
return self.evaluation <= other.get_evaluation()
# This overloads the equal operator for comparing State objects.
def __eq__(self, other):
return self.evaluation == other.get_evaluation()
# This overloads the hash function for State objects.
def __hash__(self):
return id(self)
# This returns the game over flag.
def get_game_end(self):
return self.game_over
# This updates the turn boolean flag.
def set_turn(self, value):
self.turn = value
# This returns the turn boolean flag.
def get_turn(self):
return self.turn
# This updates the state evaluation to the given parameter.
def set_evaluation(self, value):
self.evaluation = value
# This returns the state evaluation.
def get_evaluation(self):
return self.evaluation
# This returns the state's next moves.
def get_next_moves(self):
if self.next_moves is None:
self.generate_next_moves()
return self.next_moves
# This returns the table.
def get_table(self):
return self.table
# This counts the pieces of each player on the board.
def count_pieces(self):
# Initialize variables.
p1_counter = 0
p2_counter = 0
# Iterate through the board and count the pieces.
for i in range(len(self.table)):
for j in range(len(self.table[i])):
if self.table[i][j] == 'x':
p1_counter += 1
if self.table[i][j] == 'X':
p1_counter += 1
if self.table[i][j] == 'o':
p2_counter += 1
if self.table[i][j] == 'O':
p2_counter += 1
# Return the counters.
return p1_counter, p2_counter
# This function finds the move that was done.
def find_move_played(self, previous):
# Initialize variable.
move = []
# Iterate through the board and append the move done to the list.
for i in range(len(self.table)):
for j in range(len(self.table[i])):
if self.table[i][j] != previous[i][j]:
move.append((i, j))
# Return move list.
return move
# This function finds capturing moves.
def find_capturing_moves(self):
# Initialize variable.
moves = []
# Iterate through the board and find moves that can capture opponent's pieces.
for i in range(len(self.table)):
for j in range(len(self.table[i])):
if self.turn and (self.table[i][j] == 'x' or self.table[i][j] == 'X'):
move = self.find_valid_moves_for_piece((i, j))
for temp in move:
if i - temp[0] == 2 or i - temp[0] == -2:
moves.append((i, j))
break
if not self.turn and (self.table[i][j] == 'o' or self.table[i][j] == 'O'):
move = self.find_valid_moves_for_piece((i, j))
for temp in move:
if i - temp[0] == 2 or i - temp[0] == -2:
moves.append((i, j))
break
# Return move list.
return moves
# This function evaluates the state. Essentially, this is the utility function.
# It implements the Control the Center Strategy, where AI will favor center positions.
# Reference used, https://hobbylark.com/board-games/Checkers-Strategy-Tactics-How-To-Win.
def evaluate_state(self):
# Initialize variables.
p1_score = 0
p2_score = 0
p1_counter = 0
p2_counter = 0
# Iterate through the board and update the scores.
for i in range(len(self.table)):
for j in range(len(self.table[i])):
# Increment score if pawn.
if self.table[i][j] == 'x':
# Increment piece counter.
p1_counter += 1
# Increment scores. Favor center positions more.
if 2 < i < 5 and 1 < j < 6:
p1_score += 50
elif i < 4:
p1_score += 45
else:
p1_score += 40
# Increment score if king.
if self.table[i][j] == 'X':
# Increment piece counter.
p1_counter += 1
# Increment score.
p1_score += 60
# Increment score if pawn.
if self.table[i][j] == 'o':
# Increment piece counter.
p2_counter += 1
# Increment scores. Favor center positions more.
if 2 < i < 5 and 1 < j < 6:
p2_score += 50
elif i > 3:
p2_score += 45
else:
p2_score += 40
# Increment score if king.
if self.table[i][j] == 'O':
# Increment piece counter.
p2_counter += 1
# Increment score.
p2_score += 60
# Get the difference of the two scores.
self.evaluation = p2_score - p1_score
# If counter has no more pieces, game is over.
if p1_counter == 0:
self.evaluation = inf
self.game_over = True
# If counter has no more pieces, game is over.
if p2_counter == 0:
self.evaluation = -inf
self.game_over = True
# Return heuristic value.
return self.evaluation
# This function generates possible moves.
def generate_next_moves(self):
# initialize variables.
self.next_moves = []
captures = []
all_moves = []
# Iterate through the board and look for valid moves.
for i in range(len(self.table)):
for j in range(len(self.table[i])):
if self.turn:
# If valid piece, look for valid moves.
if self.table[i][j] == "x" or self.table[i][j] == "X":
valid_moves = self.find_valid_moves_for_piece((i, j))
for move in valid_moves:
# Append capturing move to list.
if move[0] - i == 2 or move[0] - i == -2:
new_table = self.generate_new_state((i, j), move)
position = State(new_table, not self.turn)
captures.append(position)
# Append non-capturing move to list.
else:
new_table = self.generate_new_state((i, j), move)
position = State(new_table, not self.turn)
all_moves.append(position)
else:
# If valid piece, look for valid moves.
if self.table[i][j] == "o" or self.table[i][j] == "O":
valid_moves = self.find_valid_moves_for_piece((i, j))
for move in valid_moves:
# Append capturing move to list.
if move[0] - i == 2 or move[0] - i == -2:
new_table = self.generate_new_state((i, j), move)
position = State(new_table, not self.turn)
captures.append(position)
else:
# Append non-capturing move to list.
new_table = self.generate_new_state((i, j), move)
position = State(new_table, not self.turn)
all_moves.append(position)
# Set next moves to captures if it's not empty. Otherwise, set next moves to non-capturing moves.
if len(captures) > 0:
self.next_moves = captures
else:
self.next_moves = captures + all_moves
# This function generates a new state with the piece to move.
def generate_new_state(self, piece, move):
# Initialize variables.
table_copy = deepcopy(self.table)
piece_type = table_copy[piece[0]][piece[1]]
# This moves the piece. It promotes the piece to king.
if piece_type == "x" or piece_type == "X":
if move[0] == 0:
table_copy[piece[0]][piece[1]] = "X"
if piece[0] - move[0] == 2 or piece[0] - move[0] == -2:
row = piece[0] + (move[0] - piece[0]) // 2
column = piece[1] + (move[1] - piece[1]) // 2
table_copy[row][column] = "-"
# This moves the piece. It promotes the piece to king.
if piece_type == "o" or piece_type == "O":
if move[0] == 7:
table_copy[piece[0]][piece[1]] = "O"
if piece[0] - move[0] == 2 or piece[0] - move[0] == -2:
row = piece[0] + (move[0] - piece[0]) // 2
column = piece[1] + (move[1] - piece[1]) // 2
table_copy[row][column] = "-"
table_copy[piece[0]][piece[1]], table_copy[move[0]][move[1]] = table_copy[move[0]][move[1]], \
table_copy[piece[0]][piece[1]]
# Return state.
return table_copy
# This function performs the move.
def play_move(self, piece, move):
# Initialize variables.
table = self.generate_new_state(piece, move)
position = None
# Iterate through valid states and find same table within the states.
for state in self.get_next_moves():
if table == state.get_table():
position = state
break
# Return the move.
return position
# This function looks for valid moves.
def find_valid_moves_for_piece(self, coordinates):
# Initialize variables.
captures = []
valid_moves = []
piece = self.table[coordinates[0]][coordinates[1]]
# Look for player moves.
if piece != "x":
if 0 <= coordinates[0] < 7:
# Search left side of the piece.
if (coordinates[1] - 1) >= 0:
# Search for non-capturing moves.
if self.table[coordinates[0] + 1][coordinates[1] - 1] == '-':
valid_moves.append((coordinates[0] + 1, coordinates[1] - 1))
# Search for capture moves.
elif coordinates[0] + 2 < 8 and coordinates[1] - 2 >= 0:
if self.table[coordinates[0] + 2][coordinates[1] - 2] == '-':
if piece.lower() != self.table[coordinates[0] + 1][coordinates[1] - 1].lower():
captures.append((coordinates[0] + 2, coordinates[1] - 2))
# Search right side of the piece.
if (coordinates[1] + 1) < 8:
# Search for non-capturing moves.
if self.table[coordinates[0] + 1][coordinates[1] + 1] == '-':
valid_moves.append((coordinates[0] + 1, coordinates[1] + 1))
# Search for capture moves.
elif coordinates[0] + 2 < 8 and coordinates[1] + 2 < 8:
if self.table[coordinates[0] + 2][coordinates[1] + 2] == '-':
if piece.lower() != self.table[coordinates[0] + 1][coordinates[1] + 1].lower():
captures.append((coordinates[0] + 2, coordinates[1] + 2))
# Look for AI moves.
if piece != "o":
if 0 < coordinates[0] < 8:
# Search left side of the piece.
if (coordinates[1] - 1) >= 0:
# Search for non-capturing moves.
if self.table[coordinates[0] - 1][coordinates[1] - 1] == '-':
valid_moves.append((coordinates[0] - 1, coordinates[1] - 1))
# Search for capture moves.
elif coordinates[0] - 2 >= 0 and coordinates[1] - 2 >= 0:
if self.table[coordinates[0] - 2][coordinates[1] - 2] == '-':
if piece.lower() != self.table[coordinates[0] - 1][coordinates[1] - 1].lower():
captures.append((coordinates[0] - 2, coordinates[1] - 2))
# Search right side of the piece.
if (coordinates[1] + 1) < 8:
# Search for non-capturing moves.
if self.table[coordinates[0] - 1][coordinates[1] + 1] == '-':
valid_moves.append((coordinates[0] - 1, coordinates[1] + 1))
# Search for capture moves.
elif coordinates[0] - 2 >= 0 and coordinates[1] + 2 < 8:
if self.table[coordinates[0] - 2][coordinates[1] + 2] == '-':
if piece.lower() != self.table[coordinates[0] - 1][coordinates[1] + 1].lower():
captures.append((coordinates[0] - 2, coordinates[1] + 2))
# Capturing moves list is not empty, return the list.
if len(captures) != 0:
return captures
# Otherwise, return all moves.
return captures + valid_moves
|
[
"copy.deepcopy"
] |
[((9535, 9555), 'copy.deepcopy', 'deepcopy', (['self.table'], {}), '(self.table)\n', (9543, 9555), False, 'from copy import deepcopy\n')]
|
import json
import logging
import operator
import threading
from datetime import datetime
from decimal import Decimal
from operator import eq
from model.model.common.data_page import DataPage
from sqlalchemy import Table, MetaData
from sqlalchemy import update, and_, or_, delete, desc, asc, \
text, JSON, inspect, func
from sqlalchemy.dialects.mysql import insert
from sqlalchemy.exc import NoSuchTableError, IntegrityError
from sqlalchemy.future import select
from watchmen_boot.storage.mysql.mysql_utils import parse_obj
from storage.storage.exception.exception import OptimisticLockError, InsertConflictError
from watchmen_boot.cache.cache_manage import cacheman, STMT, COLUMNS_BY_TABLE_NAME
from watchmen_boot.guid.snowflake import get_int_surrogate_key
from pipeline.common.utils.data_utils import build_data_pages, capital_to_lower, build_collection_name
from pipeline.common.utils.data_utils import convert_to_dict
from pipeline.database.topic.topic_storage_interface import TopicStorageInterface
log = logging.getLogger("app." + __name__)
# @singleton
class MysqlTopicStorage(TopicStorageInterface):
def __init__(self, client, storage_template):
self.engine = client
self.storage_template = storage_template
self.insp = inspect(client)
self.metadata = MetaData()
self.lock = threading.RLock()
log.info("mysql template initialized")
def get_topic_table_by_name(self, table_name):
self.lock.acquire()
try:
table = Table(table_name, self.metadata, extend_existing=False, autoload=True, autoload_with=self.engine)
return table
finally:
self.lock.release()
def build_mysql_where_expression(self, table, where):
for key, value in where.items():
if key == "and" or key == "or":
result_filters = self.get_result_filters(table, value)
if key == "and":
return and_(*result_filters)
if key == "or":
return or_(*result_filters)
else:
if isinstance(value, dict):
for k, v in value.items():
if k == "=":
return table.c[key.lower()] == v
if k == "!=":
return operator.ne(table.c[key.lower()], v)
if k == "like":
if v != "" or v != '' or v is not None:
return table.c[key.lower()].like("%" + v + "%")
if k == "in":
if isinstance(table.c[key.lower()].type, JSON):
stmt = ""
if isinstance(v, list):
# value_ = ",".join(v)
for item in v:
if stmt == "":
stmt = "JSON_CONTAINS(" + key.lower() + ", '[\"" + item + "\"]', '$') = 1"
else:
stmt = stmt + " or JSON_CONTAINS(" + key.lower() + ", '[\"" + item + "\"]', '$') = 1 "
else:
value_ = v
stmt = "JSON_CONTAINS(" + key.lower() + ", '[\"" + value_ + "\"]', '$') = 1"
return text(stmt)
else:
if isinstance(v, list):
return table.c[key.lower()].in_(v)
elif isinstance(v, str):
v_list = v.split(",")
return table.c[key.lower()].in_(v_list)
else:
raise TypeError(
"operator in, the value \"{0}\" is not list or str".format(v))
if k == "not-in":
if isinstance(table.c[key.lower()].type, JSON):
if isinstance(v, list):
value_ = ",".join(v)
else:
value_ = v
stmt = "JSON_CONTAINS(" + key.lower() + ", '[\"" + value_ + "\"]', '$') = 0"
return text(stmt)
else:
if isinstance(v, list):
return table.c[key.lower()].notin_(v)
elif isinstance(v, str):
v_list = ",".join(v)
return table.c[key.lower()].notin_(v_list)
else:
raise TypeError(
"operator not_in, the value \"{0}\" is not list or str".format(v))
if k == ">":
return table.c[key.lower()] > v
if k == ">=":
return table.c[key.lower()] >= v
if k == "<":
return table.c[key.lower()] < v
if k == "<=":
return table.c[key.lower()] <= v
if k == "between":
if (isinstance(v, tuple)) and len(v) == 2:
return table.c[key.lower()].between(v[0],
v[1])
else:
return table.c[key.lower()] == value
def get_result_filters(self, table, value):
if isinstance(value, list):
result_filters = []
for express in value:
result = self.build_mysql_where_expression(table, express)
result_filters.append(result)
return result_filters
else:
return []
# @staticmethod
def build_mysql_updates_expression(self, table, updates, stmt_type: str) -> dict:
if stmt_type == "insert":
new_updates = {}
for key in table.c.keys():
if key == "id_":
new_updates[key] = get_int_surrogate_key()
elif key == "version_":
new_updates[key] = 0
else:
if isinstance(table.c[key].type, JSON):
if updates.get(key) is not None:
new_updates[key] = updates.get(key)
else:
new_updates[key] = None
else:
if updates.get(key) is not None:
value_ = updates.get(key)
if isinstance(value_, dict):
for k, v in value_.items():
if k == "_sum":
new_updates[key.lower()] = v
elif k == "_count":
new_updates[key.lower()] = v
elif k == "_avg":
pass # todo
else:
new_updates[key] = value_
else:
default_value = self.get_table_column_default_value(table.name, key)
if default_value is not None:
value_ = default_value.strip("'").strip(" ")
if value_.isdigit():
new_updates[key] = Decimal(value_)
else:
new_updates[key] = value_
else:
new_updates[key] = None
return new_updates
elif stmt_type == "update":
new_updates = {}
for key in table.c.keys():
if key == "version_":
new_updates[key] = updates.get(key) + 1
else:
if isinstance(table.c[key].type, JSON):
if updates.get(key) is not None:
new_updates[key] = updates.get(key)
else:
if updates.get(key) is not None:
value_ = updates.get(key)
if isinstance(value_, dict):
for k, v in value_.items():
if k == "_sum":
new_updates[key.lower()] = text(f'{key.lower()} + {v}')
elif k == "_count":
new_updates[key.lower()] = text(f'{key.lower()} + {v}')
elif k == "_avg":
pass # todo
else:
new_updates[key] = value_
return new_updates
@staticmethod
def build_mysql_order(table, order_: list):
result = []
if order_ is None:
return result
else:
for item in order_:
if isinstance(item, tuple):
if item[1] == "desc":
new_ = desc(table.c[item[0].lower()])
result.append(new_)
if item[1] == "asc":
new_ = asc(table.c[item[0].lower()])
result.append(new_)
return result
def clear_metadata(self):
self.metadata.clear()
'''
topic data interface
'''
def drop_(self, topic_name):
return self.drop_topic_data_table(topic_name)
def drop_topic_data_table(self, topic_name):
table_name = 'topic_' + topic_name
try:
table = self.get_topic_table_by_name(table_name)
table.drop(self.engine)
except NoSuchTableError:
log.warning("drop table \"{0}\" not existed".format(table_name))
def topic_data_delete_(self, where, topic_name):
table_name = 'topic_' + topic_name
table = self.get_topic_table_by_name(table_name)
if where is None:
stmt = delete(table)
else:
stmt = delete(table).where(self.build_mysql_where_expression(table, where))
with self.engine.connect() as conn:
with conn.begin():
conn.execute(stmt)
@staticmethod
def build_stmt(stmt_type, table_name, table):
key = stmt_type + "-" + table_name
result = cacheman[STMT].get(key)
if result is not None:
return result
else:
if stmt_type == "insert":
stmt = insert(table)
cacheman[STMT].set(key, stmt)
return stmt
elif stmt_type == "update":
stmt = update(table)
cacheman[STMT].set(key, stmt)
return stmt
elif stmt_type == "select":
stmt = select(table)
cacheman[STMT].set(key, stmt)
return stmt
def topic_data_insert_one(self, one, topic_name):
table_name = f"topic_{topic_name}"
table = self.get_topic_table_by_name(table_name)
stmt = self.build_stmt("insert", table_name, table)
one_dict: dict = capital_to_lower(convert_to_dict(one))
value = self.build_mysql_updates_expression(table, one_dict, "insert")
with self.engine.connect() as conn:
with conn.begin():
try:
result = conn.execute(stmt, value)
except IntegrityError as e:
raise InsertConflictError("InsertConflict")
return result.rowcount
def topic_data_insert_(self, data, topic_name):
table_name = f"topic_{topic_name}"
table = self.get_topic_table_by_name(table_name)
values = []
for instance in data:
instance_dict: dict = convert_to_dict(instance)
instance_dict['id_'] = get_int_surrogate_key()
value = {}
for key in table.c.keys():
value[key] = instance_dict.get(key)
values.append(value)
stmt = self.build_stmt("insert", table_name, table)
with self.engine.connect() as conn:
with conn.begin():
conn.execute(stmt, values)
def topic_data_update_one(self, id_: int, one: any, topic_name: str):
table_name = 'topic_' + topic_name
table = self.get_topic_table_by_name(table_name)
stmt = self.build_stmt("update", table_name, table)
stmt = stmt.where(eq(table.c['id_'], id_))
one_dict = convert_to_dict(one)
values = self.build_mysql_updates_expression(table, capital_to_lower(one_dict), "update")
stmt = stmt.values(values)
with self.engine.begin() as conn:
conn.execute(stmt)
def topic_data_update_one_with_version(self, id_: int, version_: int, one: any, topic_name: str):
table_name = 'topic_' + topic_name
table = self.get_topic_table_by_name(table_name)
stmt = self.build_stmt("update", table_name, table)
stmt = stmt.where(and_(eq(table.c['id_'], id_), eq(table.c['version_'], version_)))
one_dict = convert_to_dict(one)
one_dict['version_'] = version_
values = self.build_mysql_updates_expression(table, capital_to_lower(one_dict), "update")
stmt = stmt.values(values)
with self.engine.begin() as conn:
result = conn.execute(stmt)
if result.rowcount == 0:
raise OptimisticLockError("Optimistic lock error")
def topic_data_update_(self, query_dict, instance, topic_name):
table_name = 'topic_' + topic_name
table = self.get_topic_table_by_name(table_name)
stmt = self.build_stmt("update", table_name, table)
stmt = (stmt.
where(self.build_mysql_where_expression(table, query_dict)))
instance_dict: dict = convert_to_dict(instance)
values = {}
for key, value in instance_dict.items():
if key != 'id_':
if key.lower() in table.c.keys():
values[key.lower()] = value
stmt = stmt.values(values)
with self.engine.begin() as conn:
# with conn.begin():
conn.execute(stmt)
def topic_data_find_by_id(self, id_: int, topic_name: str) -> any:
return self.topic_data_find_one({"id_": id_}, topic_name)
def topic_data_find_one(self, where, topic_name) -> any:
table_name = 'topic_' + topic_name
table = self.get_topic_table_by_name(table_name)
stmt = self.build_stmt("select", table_name, table)
stmt = stmt.where(self.build_mysql_where_expression(table, where))
with self.engine.connect() as conn:
cursor = conn.execute(stmt).cursor
columns = [col[0] for col in cursor.description]
row = cursor.fetchone()
if row is None:
return None
else:
result = {}
for index, name in enumerate(columns):
if isinstance(table.c[name.lower()].type, JSON):
if row[index] is not None:
result[name] = json.loads(row[index])
else:
result[name] = None
else:
result[name] = row[index]
return self._convert_dict_key(result, topic_name)
def topic_data_find_(self, where, topic_name):
table_name = 'topic_' + topic_name
table = self.get_topic_table_by_name(table_name)
stmt = self.build_stmt("select", table_name, table)
stmt = stmt.where(self.build_mysql_where_expression(table, where))
with self.engine.connect() as conn:
cursor = conn.execute(stmt).cursor
columns = [col[0] for col in cursor.description]
res = cursor.fetchall()
if res is None:
return None
else:
results = []
for row in res:
result = {}
for index, name in enumerate(columns):
if isinstance(table.c[name.lower()].type, JSON):
if row[index] is not None:
result[name] = json.loads(row[index])
else:
result[name] = None
else:
result[name] = row[index]
results.append(result)
return self._convert_list_elements_key(results, topic_name)
def topic_data_find_with_aggregate(self, where, topic_name, aggregate):
table_name = 'topic_' + topic_name
table = self.get_topic_table_by_name(table_name)
for key, value in aggregate.items():
if value == "sum":
stmt = select(text(f'sum({key.lower()})'))
elif value == "count":
stmt = select(func.count())
elif value == "avg":
stmt = select(text(f'avg({key.lower()})'))
stmt = stmt.select_from(table)
stmt = stmt.where(self.build_mysql_where_expression(table, where))
with self.engine.connect() as conn:
cursor = conn.execute(stmt).cursor
res = cursor.fetchone()
if res is None:
return None
else:
return res[0]
def topic_data_list_all(self, topic_name) -> list:
table_name = 'topic_' + topic_name
table = self.get_topic_table_by_name(table_name)
# stmt = select(table)
stmt = self.build_stmt("select", table_name, table)
with self.engine.connect() as conn:
cursor = conn.execute(stmt).cursor
columns = [col[0] for col in cursor.description]
res = cursor.fetchall()
if res is None:
return None
else:
results = []
for row in res:
result = {}
for index, name in enumerate(columns):
if isinstance(table.c[name.lower()].type, JSON):
if row[index] is not None:
result[name] = json.loads(row[index])
else:
result[name] = None
else:
result[name] = row[index]
if self.storage_template.check_topic_type(topic_name) == "raw":
results.append(result['data_'])
else:
results.append(result)
if self.storage_template.check_topic_type(topic_name) == "raw":
return results
else:
return self._convert_list_elements_key(results, topic_name)
def topic_data_page_(self, where, sort, pageable, model, name) -> DataPage:
table_name = build_collection_name(name)
count = self.count_topic_data_table(table_name)
table = self.get_topic_table_by_name(table_name)
stmt = self.build_stmt("select", table_name, table)
stmt = stmt.where(self.build_mysql_where_expression(table, where))
orders = self.build_mysql_order(table, sort)
for order in orders:
stmt = stmt.order_by(order)
offset = pageable.pageSize * (pageable.pageNumber - 1)
stmt = stmt.offset(offset).limit(pageable.pageSize)
results = []
with self.engine.connect() as conn:
cursor = conn.execute(stmt).cursor
columns = [col[0] for col in cursor.description]
res = cursor.fetchall()
if self.storage_template.check_topic_type(name) == "raw":
for row in res:
result = {}
for index, name in enumerate(columns):
if name == "data_":
result.update(json.loads(row[index]))
results.append(result)
else:
for row in res:
result = {}
for index, name in enumerate(columns):
if isinstance(table.c[name.lower()].type, JSON):
if row[index] is not None:
result[name] = json.loads(row[index])
else:
result[name] = None
else:
result[name] = row[index]
if model is not None:
results.append(parse_obj(model, result, table))
else:
results.append(result)
return build_data_pages(pageable, results, count)
'''
internal method
'''
def get_table_column_default_value(self, table_name, column_name):
columns = self._get_table_columns(table_name)
for column in columns:
if column["name"] == column_name:
return column["default"]
def _get_table_columns(self, table_name):
cached_columns = cacheman[COLUMNS_BY_TABLE_NAME].get(table_name)
if cached_columns is not None:
return cached_columns
columns = self.insp.get_columns(table_name)
if columns is not None:
cacheman[COLUMNS_BY_TABLE_NAME].set(table_name, columns)
return columns
def _convert_list_elements_key(self, list_info, topic_name):
if list_info is None:
return None
new_dict = {}
new_list = []
factors = self.storage_template.get_topic_factors(topic_name)
for item in list_info:
for factor in factors:
new_dict[factor['name']] = item[factor['name'].lower()]
new_dict['id_'] = item['id_']
if 'tenant_id_' in item:
new_dict['tenant_id_'] = item.get("tenant_id_", 1)
if "insert_time_" in item:
new_dict['insert_time_'] = item.get("insert_time_", datetime.now().replace(tzinfo=None))
if "update_time_" in item:
new_dict['update_time_'] = item.get("update_time_", datetime.now().replace(tzinfo=None))
if "version_" in item:
new_dict['version_'] = item.get("version_", 0)
if "aggregate_assist_" in item:
new_dict['aggregate_assist_'] = item.get("aggregate_assist_")
new_list.append(new_dict)
return new_list
def _convert_dict_key(self, dict_info, topic_name):
if dict_info is None:
return None
new_dict = {}
factors = self.storage_template.get_topic_factors(topic_name)
for factor in factors:
new_dict[factor['name']] = dict_info[factor['name'].lower()]
new_dict['id_'] = dict_info['id_']
if 'tenant_id_' in dict_info:
new_dict['tenant_id_'] = dict_info.get("tenant_id_", 1)
if "insert_time_" in dict_info:
new_dict['insert_time_'] = dict_info.get("insert_time_", datetime.now().replace(tzinfo=None))
if "update_time_" in dict_info:
new_dict['update_time_'] = dict_info.get("update_time_", datetime.now().replace(tzinfo=None))
if "version_" in dict_info:
new_dict['version_'] = dict_info.get("version_", None)
if "aggregate_assist_" in dict_info:
new_dict['aggregate_assist_'] = dict_info.get("aggregate_assist_")
return new_dict
def count_topic_data_table(self, table_name):
stmt = 'SELECT count(%s) AS count FROM %s' % ('id_', table_name)
with self.engine.connect() as conn:
cursor = conn.execute(text(stmt)).cursor
columns = [col[0] for col in cursor.description]
result = cursor.fetchone()
return result[0]
|
[
"sqlalchemy.Table",
"sqlalchemy.update",
"sqlalchemy.or_",
"storage.storage.exception.exception.InsertConflictError",
"sqlalchemy.future.select",
"json.loads",
"operator.eq",
"sqlalchemy.dialects.mysql.insert",
"watchmen_boot.guid.snowflake.get_int_surrogate_key",
"datetime.datetime.now",
"sqlalchemy.delete",
"sqlalchemy.inspect",
"pipeline.common.utils.data_utils.convert_to_dict",
"sqlalchemy.and_",
"threading.RLock",
"pipeline.common.utils.data_utils.build_data_pages",
"watchmen_boot.storage.mysql.mysql_utils.parse_obj",
"sqlalchemy.text",
"storage.storage.exception.exception.OptimisticLockError",
"pipeline.common.utils.data_utils.build_collection_name",
"sqlalchemy.MetaData",
"decimal.Decimal",
"pipeline.common.utils.data_utils.capital_to_lower",
"sqlalchemy.func.count",
"logging.getLogger"
] |
[((1018, 1054), 'logging.getLogger', 'logging.getLogger', (["('app.' + __name__)"], {}), "('app.' + __name__)\n", (1035, 1054), False, 'import logging\n'), ((1267, 1282), 'sqlalchemy.inspect', 'inspect', (['client'], {}), '(client)\n', (1274, 1282), False, 'from sqlalchemy import update, and_, or_, delete, desc, asc, text, JSON, inspect, func\n'), ((1307, 1317), 'sqlalchemy.MetaData', 'MetaData', ([], {}), '()\n', (1315, 1317), False, 'from sqlalchemy import Table, MetaData\n'), ((1338, 1355), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (1353, 1355), False, 'import threading\n'), ((13063, 13083), 'pipeline.common.utils.data_utils.convert_to_dict', 'convert_to_dict', (['one'], {}), '(one)\n', (13078, 13083), False, 'from pipeline.common.utils.data_utils import convert_to_dict\n'), ((13664, 13684), 'pipeline.common.utils.data_utils.convert_to_dict', 'convert_to_dict', (['one'], {}), '(one)\n', (13679, 13684), False, 'from pipeline.common.utils.data_utils import convert_to_dict\n'), ((14394, 14419), 'pipeline.common.utils.data_utils.convert_to_dict', 'convert_to_dict', (['instance'], {}), '(instance)\n', (14409, 14419), False, 'from pipeline.common.utils.data_utils import convert_to_dict\n'), ((19388, 19415), 'pipeline.common.utils.data_utils.build_collection_name', 'build_collection_name', (['name'], {}), '(name)\n', (19409, 19415), False, 'from pipeline.common.utils.data_utils import build_data_pages, capital_to_lower, build_collection_name\n'), ((21087, 21129), 'pipeline.common.utils.data_utils.build_data_pages', 'build_data_pages', (['pageable', 'results', 'count'], {}), '(pageable, results, count)\n', (21103, 21129), False, 'from pipeline.common.utils.data_utils import build_data_pages, capital_to_lower, build_collection_name\n'), ((1516, 1617), 'sqlalchemy.Table', 'Table', (['table_name', 'self.metadata'], {'extend_existing': '(False)', 'autoload': '(True)', 'autoload_with': 'self.engine'}), '(table_name, self.metadata, extend_existing=False, autoload=True,\n autoload_with=self.engine)\n', (1521, 1617), False, 'from sqlalchemy import Table, MetaData\n'), ((10561, 10574), 'sqlalchemy.delete', 'delete', (['table'], {}), '(table)\n', (10567, 10574), False, 'from sqlalchemy import update, and_, or_, delete, desc, asc, text, JSON, inspect, func\n'), ((11719, 11739), 'pipeline.common.utils.data_utils.convert_to_dict', 'convert_to_dict', (['one'], {}), '(one)\n', (11734, 11739), False, 'from pipeline.common.utils.data_utils import convert_to_dict\n'), ((12347, 12372), 'pipeline.common.utils.data_utils.convert_to_dict', 'convert_to_dict', (['instance'], {}), '(instance)\n', (12362, 12372), False, 'from pipeline.common.utils.data_utils import convert_to_dict\n'), ((12408, 12431), 'watchmen_boot.guid.snowflake.get_int_surrogate_key', 'get_int_surrogate_key', ([], {}), '()\n', (12429, 12431), False, 'from watchmen_boot.guid.snowflake import get_int_surrogate_key\n'), ((13019, 13042), 'operator.eq', 'eq', (["table.c['id_']", 'id_'], {}), "(table.c['id_'], id_)\n", (13021, 13042), False, 'from operator import eq\n'), ((13144, 13170), 'pipeline.common.utils.data_utils.capital_to_lower', 'capital_to_lower', (['one_dict'], {}), '(one_dict)\n', (13160, 13170), False, 'from pipeline.common.utils.data_utils import build_data_pages, capital_to_lower, build_collection_name\n'), ((13785, 13811), 'pipeline.common.utils.data_utils.capital_to_lower', 'capital_to_lower', (['one_dict'], {}), '(one_dict)\n', (13801, 13811), False, 'from pipeline.common.utils.data_utils import build_data_pages, capital_to_lower, build_collection_name\n'), ((13991, 14035), 'storage.storage.exception.exception.OptimisticLockError', 'OptimisticLockError', (['"""Optimistic lock error"""'], {}), "('Optimistic lock error')\n", (14010, 14035), False, 'from storage.storage.exception.exception import OptimisticLockError, InsertConflictError\n'), ((11072, 11085), 'sqlalchemy.dialects.mysql.insert', 'insert', (['table'], {}), '(table)\n', (11078, 11085), False, 'from sqlalchemy.dialects.mysql import insert\n'), ((13584, 13607), 'operator.eq', 'eq', (["table.c['id_']", 'id_'], {}), "(table.c['id_'], id_)\n", (13586, 13607), False, 'from operator import eq\n'), ((13609, 13642), 'operator.eq', 'eq', (["table.c['version_']", 'version_'], {}), "(table.c['version_'], version_)\n", (13611, 13642), False, 'from operator import eq\n'), ((1963, 1984), 'sqlalchemy.and_', 'and_', (['*result_filters'], {}), '(*result_filters)\n', (1967, 1984), False, 'from sqlalchemy import update, and_, or_, delete, desc, asc, text, JSON, inspect, func\n'), ((2044, 2064), 'sqlalchemy.or_', 'or_', (['*result_filters'], {}), '(*result_filters)\n', (2047, 2064), False, 'from sqlalchemy import update, and_, or_, delete, desc, asc, text, JSON, inspect, func\n'), ((6423, 6446), 'watchmen_boot.guid.snowflake.get_int_surrogate_key', 'get_int_surrogate_key', ([], {}), '()\n', (6444, 6446), False, 'from watchmen_boot.guid.snowflake import get_int_surrogate_key\n'), ((10608, 10621), 'sqlalchemy.delete', 'delete', (['table'], {}), '(table)\n', (10614, 10621), False, 'from sqlalchemy import update, and_, or_, delete, desc, asc, text, JSON, inspect, func\n'), ((11223, 11236), 'sqlalchemy.update', 'update', (['table'], {}), '(table)\n', (11229, 11236), False, 'from sqlalchemy import update, and_, or_, delete, desc, asc, text, JSON, inspect, func\n'), ((24121, 24131), 'sqlalchemy.text', 'text', (['stmt'], {}), '(stmt)\n', (24125, 24131), False, 'from sqlalchemy import update, and_, or_, delete, desc, asc, text, JSON, inspect, func\n'), ((11374, 11387), 'sqlalchemy.future.select', 'select', (['table'], {}), '(table)\n', (11380, 11387), False, 'from sqlalchemy.future import select\n'), ((12041, 12078), 'storage.storage.exception.exception.InsertConflictError', 'InsertConflictError', (['"""InsertConflict"""'], {}), "('InsertConflict')\n", (12060, 12078), False, 'from storage.storage.exception.exception import OptimisticLockError, InsertConflictError\n'), ((15668, 15690), 'json.loads', 'json.loads', (['row[index]'], {}), '(row[index])\n', (15678, 15690), False, 'import json\n'), ((17392, 17404), 'sqlalchemy.func.count', 'func.count', ([], {}), '()\n', (17402, 17404), False, 'from sqlalchemy import update, and_, or_, delete, desc, asc, text, JSON, inspect, func\n'), ((20974, 21005), 'watchmen_boot.storage.mysql.mysql_utils.parse_obj', 'parse_obj', (['model', 'result', 'table'], {}), '(model, result, table)\n', (20983, 21005), False, 'from watchmen_boot.storage.mysql.mysql_utils import parse_obj\n'), ((23485, 23499), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (23497, 23499), False, 'from datetime import datetime\n'), ((23631, 23645), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (23643, 23645), False, 'from datetime import datetime\n'), ((16727, 16749), 'json.loads', 'json.loads', (['row[index]'], {}), '(row[index])\n', (16737, 16749), False, 'import json\n'), ((20373, 20395), 'json.loads', 'json.loads', (['row[index]'], {}), '(row[index])\n', (20383, 20395), False, 'import json\n'), ((20724, 20746), 'json.loads', 'json.loads', (['row[index]'], {}), '(row[index])\n', (20734, 20746), False, 'import json\n'), ((3480, 3490), 'sqlalchemy.text', 'text', (['stmt'], {}), '(stmt)\n', (3484, 3490), False, 'from sqlalchemy import update, and_, or_, delete, desc, asc, text, JSON, inspect, func\n'), ((4501, 4511), 'sqlalchemy.text', 'text', (['stmt'], {}), '(stmt)\n', (4505, 4511), False, 'from sqlalchemy import update, and_, or_, delete, desc, asc, text, JSON, inspect, func\n'), ((18663, 18685), 'json.loads', 'json.loads', (['row[index]'], {}), '(row[index])\n', (18673, 18685), False, 'import json\n'), ((22433, 22447), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (22445, 22447), False, 'from datetime import datetime\n'), ((22585, 22599), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (22597, 22599), False, 'from datetime import datetime\n'), ((7882, 7897), 'decimal.Decimal', 'Decimal', (['value_'], {}), '(value_)\n', (7889, 7897), False, 'from decimal import Decimal\n')]
|
# -*- coding: utf-8 -*-
from django.contrib.gis import admin
# Register your models here.
from siting.models import County, SitingModel, ModelLayer
# regular geodjango admin
#admin.site.register(County, admin.GeoModelAdmin)
# open street map admin
admin.site.register(County, admin.OSMGeoAdmin)
admin.site.register(SitingModel)
admin.site.register(ModelLayer)
|
[
"django.contrib.gis.admin.site.register"
] |
[((252, 298), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['County', 'admin.OSMGeoAdmin'], {}), '(County, admin.OSMGeoAdmin)\n', (271, 298), False, 'from django.contrib.gis import admin\n'), ((300, 332), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['SitingModel'], {}), '(SitingModel)\n', (319, 332), False, 'from django.contrib.gis import admin\n'), ((334, 365), 'django.contrib.gis.admin.site.register', 'admin.site.register', (['ModelLayer'], {}), '(ModelLayer)\n', (353, 365), False, 'from django.contrib.gis import admin\n')]
|
from django.test import TestCase
from polymorphic.tests.models import Bottom, Middle, Top
class RegressionTests(TestCase):
def test_for_query_result_incomplete_with_inheritance(self):
""" https://github.com/bconstantin/django_polymorphic/issues/15 """
top = Top()
top.save()
middle = Middle()
middle.save()
bottom = Bottom()
bottom.save()
expected_queryset = [top, middle, bottom]
self.assertQuerysetEqual(Top.objects.order_by('pk'), [repr(r) for r in expected_queryset])
expected_queryset = [middle, bottom]
self.assertQuerysetEqual(Middle.objects.order_by('pk'), [repr(r) for r in expected_queryset])
expected_queryset = [bottom]
self.assertQuerysetEqual(Bottom.objects.order_by('pk'), [repr(r) for r in expected_queryset])
|
[
"polymorphic.tests.models.Top",
"polymorphic.tests.models.Middle.objects.order_by",
"polymorphic.tests.models.Top.objects.order_by",
"polymorphic.tests.models.Bottom.objects.order_by",
"polymorphic.tests.models.Bottom",
"polymorphic.tests.models.Middle"
] |
[((282, 287), 'polymorphic.tests.models.Top', 'Top', ([], {}), '()\n', (285, 287), False, 'from polymorphic.tests.models import Bottom, Middle, Top\n'), ((324, 332), 'polymorphic.tests.models.Middle', 'Middle', ([], {}), '()\n', (330, 332), False, 'from polymorphic.tests.models import Bottom, Middle, Top\n'), ((372, 380), 'polymorphic.tests.models.Bottom', 'Bottom', ([], {}), '()\n', (378, 380), False, 'from polymorphic.tests.models import Bottom, Middle, Top\n'), ((487, 513), 'polymorphic.tests.models.Top.objects.order_by', 'Top.objects.order_by', (['"""pk"""'], {}), "('pk')\n", (507, 513), False, 'from polymorphic.tests.models import Bottom, Middle, Top\n'), ((632, 661), 'polymorphic.tests.models.Middle.objects.order_by', 'Middle.objects.order_by', (['"""pk"""'], {}), "('pk')\n", (655, 661), False, 'from polymorphic.tests.models import Bottom, Middle, Top\n'), ((772, 801), 'polymorphic.tests.models.Bottom.objects.order_by', 'Bottom.objects.order_by', (['"""pk"""'], {}), "('pk')\n", (795, 801), False, 'from polymorphic.tests.models import Bottom, Middle, Top\n')]
|
#include "TStyle.h"
from ROOT import TPad, TStyle, kWhite, kTRUE, gPad
# tdrGrid: Turns the grid lines on (true) or off (false)
def tdrGrid(tdrStyle, gridOn):
tdrStyle.SetPadGridX(gridOn);
tdrStyle.SetPadGridY(gridOn);
# fixOverlay: Redraws the axis
def fixOverlay():
gPad.RedrawAxis();
def setTDRStyle():
tdrStyle = TStyle("tdrStyle","Style for P-TDR");
# For the canvas:
tdrStyle.SetCanvasBorderMode(0);
tdrStyle.SetCanvasColor(kWhite);
tdrStyle.SetCanvasDefH(600); #Height of canvas
tdrStyle.SetCanvasDefW(600); #Width of canvas
tdrStyle.SetCanvasDefX(0); #POsition on screen
tdrStyle.SetCanvasDefY(0);
# For the Pad:
tdrStyle.SetPadBorderMode(0);
# tdrStyle.SetPadBorderSize(Width_t size = 1);
tdrStyle.SetPadColor(kWhite);
tdrStyle.SetPadGridX(False);
tdrStyle.SetPadGridY(False);
tdrStyle.SetGridColor(0);
tdrStyle.SetGridStyle(3);
tdrStyle.SetGridWidth(1);
# For the frame:
tdrStyle.SetFrameBorderMode(0);
tdrStyle.SetFrameBorderSize(1);
tdrStyle.SetFrameFillColor(0);
tdrStyle.SetFrameFillStyle(0);
tdrStyle.SetFrameLineColor(1);
tdrStyle.SetFrameLineStyle(1);
tdrStyle.SetFrameLineWidth(1);
# For the histo:
# tdrStyle.SetHistFillColor(1);
# tdrStyle.SetHistFillStyle(0);
tdrStyle.SetHistLineColor(1);
tdrStyle.SetHistLineStyle(0);
tdrStyle.SetHistLineWidth(1);
# tdrStyle.SetLegoInnerR(Float_t rad = 0.5);
# tdrStyle.SetNumberContours(Int_t number = 20);
tdrStyle.SetEndErrorSize(2);
# tdrStyle.SetErrorMarker(20);
tdrStyle.SetErrorX(0.);
tdrStyle.SetMarkerStyle(20);
#For the fit/function:
tdrStyle.SetOptFit(1);
tdrStyle.SetFitFormat("5.4g");
tdrStyle.SetFuncColor(2);
tdrStyle.SetFuncStyle(1);
tdrStyle.SetFuncWidth(1);
#For the date:
tdrStyle.SetOptDate(0);
# tdrStyle.SetDateX(Float_t x = 0.01);
# tdrStyle.SetDateY(Float_t y = 0.01);
# For the statistics box:
tdrStyle.SetOptFile(0);
tdrStyle.SetOptStat(0); # To display the mean and RMS: SetOptStat("mr");
tdrStyle.SetStatColor(kWhite);
tdrStyle.SetStatFont(42);
tdrStyle.SetStatFontSize(0.025);
tdrStyle.SetStatTextColor(1);
tdrStyle.SetStatFormat("6.4g");
tdrStyle.SetStatBorderSize(1);
tdrStyle.SetStatH(0.1);
tdrStyle.SetStatW(0.15);
# tdrStyle.SetStatStyle(Style_t style = 1001);
# tdrStyle.SetStatX(Float_t x = 0);
# tdrStyle.SetStatY(Float_t y = 0);
# Margins:
tdrStyle.SetPadTopMargin(0.05);
tdrStyle.SetPadBottomMargin(0.13);
tdrStyle.SetPadLeftMargin(0.16);
tdrStyle.SetPadRightMargin(0.02);
# For the Global title:
tdrStyle.SetOptTitle(0);
tdrStyle.SetTitleFont(42);
tdrStyle.SetTitleColor(1);
tdrStyle.SetTitleTextColor(1);
tdrStyle.SetTitleFillColor(10);
tdrStyle.SetTitleFontSize(0.05);
# tdrStyle.SetTitleH(0); # Set the height of the title box
# tdrStyle.SetTitleW(0); # Set the width of the title box
# tdrStyle.SetTitleX(0); # Set the position of the title box
# tdrStyle.SetTitleY(0.985); # Set the position of the title box
# tdrStyle.SetTitleStyle(Style_t style = 1001);
# tdrStyle.SetTitleBorderSize(2);
# For the axis titles:
tdrStyle.SetTitleColor(1, "XYZ");
tdrStyle.SetTitleFont(42, "XYZ");
tdrStyle.SetTitleSize(0.06, "XYZ");
# tdrStyle.SetTitleXSize(Float_t size = 0.02); # Another way to set the size?
# tdrStyle.SetTitleYSize(Float_t size = 0.02);
tdrStyle.SetTitleXOffset(0.9);
tdrStyle.SetTitleYOffset(1.25);
# tdrStyle.SetTitleOffset(1.1, "Y"); # Another way to set the Offset
# For the axis labels:
tdrStyle.SetLabelColor(1, "XYZ");
tdrStyle.SetLabelFont(42, "XYZ");
tdrStyle.SetLabelOffset(0.007, "XYZ");
tdrStyle.SetLabelSize(0.05, "XYZ");
# For the axis:
tdrStyle.SetAxisColor(1, "XYZ");
tdrStyle.SetStripDecimals(kTRUE);
tdrStyle.SetTickLength(0.03, "XYZ");
tdrStyle.SetNdivisions(510, "XYZ");
tdrStyle.SetPadTickX(1); # To get tick marks on the opposite side of the frame
tdrStyle.SetPadTickY(1);
# Change for log plots:
tdrStyle.SetOptLogx(0);
tdrStyle.SetOptLogy(0);
tdrStyle.SetOptLogz(0);
# Postscript options:
tdrStyle.SetPaperSize(20.,20.);
# tdrStyle.SetLineScalePS(Float_t scale = 3);
# tdrStyle.SetLineStyleString(Int_t i, const char* text);
# tdrStyle.SetHeaderPS(const char* header);
# tdrStyle.SetTitlePS(const char* pstitle);
# tdrStyle.SetBarOffset(Float_t baroff = 0.5);
# tdrStyle.SetBarWidth(Float_t barwidth = 0.5);
# tdrStyle.SetPaintTextFormat(const char* format = "g");
# tdrStyle.SetPalette(Int_t ncolors = 0, Int_t* colors = 0);
# tdrStyle.SetTimeOffset(Double_t toffset);
# tdrStyle.SetHistMinimumZero(kTRUE);
tdrStyle.cd();
return tdrStyle
|
[
"ROOT.TStyle",
"ROOT.gPad.RedrawAxis"
] |
[((285, 302), 'ROOT.gPad.RedrawAxis', 'gPad.RedrawAxis', ([], {}), '()\n', (300, 302), False, 'from ROOT import TPad, TStyle, kWhite, kTRUE, gPad\n'), ((339, 376), 'ROOT.TStyle', 'TStyle', (['"""tdrStyle"""', '"""Style for P-TDR"""'], {}), "('tdrStyle', 'Style for P-TDR')\n", (345, 376), False, 'from ROOT import TPad, TStyle, kWhite, kTRUE, gPad\n')]
|
from msfreportmanager import MsfReportManager
from pymetasploit3.msfrpc import MsfRpcClient
class ExploitManager(object):
def __init__(self, client_conosle, output_filename, detected_services):
self.msfreporter = MsfReportManager(output_filename + "_exploits")
self.client_console = client_conosle
self.services = detected_services
self.host_ip = self._get_host_ip(self.client_console)
def run_exploits(self):
try:
exploit_modules = self._get_nonbruteforce_exploit_modules(self.client_console.get_client()) #TODO change into exploit modules
final_module_list = self._filter_modules_by_services(exploit_modules, "exploit", self.client_console)
self._try_modules(final_module_list)
sessions = self.client_console.get_command_response("sessions")
print(sessions)
self.msfreporter.add_to_report("Sessions created", sessions)
jobs = self.client_console.get_command_response("jobs")
print(jobs)
self.msfreporter.add_to_report("Jobs started", jobs)
loot = self.client_console.get_command_response("loot")
print (loot)
self.msfreporter.add_to_report("Loots", loot)
vulns = self.client_console.get_command_response("vulns")
print(vulns)
self.msfreporter.add_to_report("Vulnerabilities", vulns)
self.msfreporter.output_json_report()
except Exception as identifier:
print(identifier)
def _try_modules(self, module_list):
if (len(module_list)>0):
for module in module_list:
print(self.client_console.get_module_options(module)) #Use module
self._set_module_payload(module, self.client_console) #Set payload
self._set_lhost(self.client_console) #Set lhost
module_usable_without_user = self._check_module_usability(module, self.client_console) #doublecheck
if (module_usable_without_user):
print("Currently trying to run {0} module. Please wait untill it has finished with execution".format(module))
response = self.client_console.get_command_response("exploit")
sessions = self.client_console.get_command_response("sessions")
print(sessions)
sessions = self.client_console.get_command_response("sessions -K") #Removes any started sessions
self.msfreporter.add_to_report(module, response)
def _check_module_usability(self, module, client_console):
module_options = client_console.get_module_options(module)
if (module_options != ''):
module_option_rows = module_options.splitlines()
module_usable = self._all_required_settings_configured(module_option_rows)
return module_usable
def _all_required_settings_configured(self, module_options):
for row in module_options[5:]: #TODO from 5th position to index of ''x2 or Exploit target
if (row != ''):
words = row.split()
if (len(words) > 3 and words[1] == "yes" and words[2]!= "yes"):
#If the row is split by spaces/tabs and current setting is empty yes will be on index 1 instead of 2
#That means that a required setting is not set up and module can't be used
return False
return True
def _get_nonbruteforce_exploit_modules(self, client):
"""
Returns a dictionary of non-bruteforce modules of desired type and their descriptions
"""
print("Selection of potential exploit modules, this could take a while..")
exploit_modules = client.modules.exploits
final_modules = {}
for module in exploit_modules:
try:
module_info = self._get_module_description(module, client)
if ("bruteforc" not in module_info and "brute forc" not in module_info): #Filter out some bruteforce modules
final_modules[module] = module_info
except Exception as identifier:
print(identifier)
print("The initial list of all availabe non-bruteforce exploit modules compiled.")
return final_modules
def _filter_modules_by_services(self, modules, desired_module_type, client_console):
filtered_scanner_modules = self._filter_modules_by_service_name(modules)
return self._filter_modules_by_service_info(filtered_scanner_modules, desired_module_type, client_console)
def _filter_modules_by_service_name(self, modules):
print("Filtering possible exploits by detected service names")
final_modules = []
for module_key in modules.keys():
for service_key in self.services.keys():
if (service_key in module_key and service_key in modules[module_key]): #if service name is in module
final_modules.append(module_key)
return final_modules
def _filter_modules_by_service_info(self, list_of_possible_modules, desired_module_type, client_console):
print("Filtering possible exploits by detected service info")
module_list = set()
for service_key in self.services.keys():
if(self.services[service_key] != ''):
search_response = client_console.search_msf_modules(self.services[service_key])
response_modules = self._parse_search_response(search_response, desired_module_type)
module_list.update(response_modules)
possbile_modules = set(list_of_possible_modules)
print("Creating a list of exploits that are most-likely to be fitting to target the detected services")
result_list = possbile_modules.intersection(module_list)
return result_list
def _parse_search_response(self, response, module_type):
module_names = []
if (response != ''):
rows = response.splitlines()
for row in rows[6:(len(rows)-2)]:
columns = row.split()
if (len(columns)>2 and columns[1] != '' and columns[1] not in module_names and module_type in columns[1]):
module_name = columns[1]
position = len(module_type)+1 # crop the auxiliary, exploit or other module group names
module_names.append(module_name[position:])
if (len(module_names)>15):
return module_names[:10] #TODO if there are more than 10 outputs remove the rest.
else:
return module_names
def _get_module_description(self, module, client):
temporary_module = client.modules.use("exploit", module)
module_info = temporary_module.description
return module_info
def _set_module_payload(self, module, client):
module_object = client.get_client().modules.use("exploit", module)
possible_payloads = module_object.targetpayloads()
selected_payload = self._select_payload(possible_payloads)
if selected_payload is not None:
command = "set payload "+ selected_payload
print(client.get_command_response("show options"))
response = client.get_command_response(command)
print (response)
def _select_payload(self, payload_list):
if (len(payload_list)>0):
#TODO select meterpreter reverse tcp if available
for payload in payload_list:
if "/meterpreter/reverse_tcp" in payload:
return payload
for payload in payload_list:
if "reverse_tcp" in payload:
return payload
return payload_list[0] #TODO maybe use smarter payload selection, based on target host os from services or hosts report
else:
return None
def _set_lhost(self, client):
command = "set LHOST "+self.host_ip
response = client.get_command_response(command)
print (response)
def _get_host_ip(self, client):
command = "hostname -I | awk '{print $1}'"
response = client.get_command_response(command)
if (response != ''):
lines = response.splitlines()
if lines[2] is not None:
pass
return lines[2]
return "127.0.0.1"
|
[
"msfreportmanager.MsfReportManager"
] |
[((226, 273), 'msfreportmanager.MsfReportManager', 'MsfReportManager', (["(output_filename + '_exploits')"], {}), "(output_filename + '_exploits')\n", (242, 273), False, 'from msfreportmanager import MsfReportManager\n')]
|
import torch
import numpy as np
def mpjpe(predicted, target):
"""
Mean per-joint position error (i.e. mean Euclidean distance),
often referred to as "Protocol #1" in many papers.
"""
assert predicted.shape == target.shape
return torch.mean(torch.norm(predicted - target, dim=len(target.shape)-1))
def weighted_mpjpe(predicted, target, w):
"""
Weighted mean per-joint position error (i.e. mean Euclidean distance)
"""
assert predicted.shape == target.shape
assert w.shape[0] == predicted.shape[0]
return torch.mean(w * torch.norm(predicted - target, dim=len(target.shape)-1))
def p_mpjpe_torch(predicted, target, with_sRt=False,full_torch=False,with_aligned=False):
"""
Pose error: MPJPE after rigid alignment (scale, rotation, and translation),
often referred to as "Protocol #2" in many papers.
"""
assert predicted.shape == target.shape
muX = torch.mean(target, dim=1, keepdim=True)
muY = torch.mean(predicted, dim=1, keepdim=True)
X0 = target - muX
Y0 = predicted - muY
X0[X0**2<1e-6]=1e-3
normX = torch.sqrt(torch.sum(X0**2, dim=(1, 2), keepdim=True))
normY = torch.sqrt(torch.sum(Y0**2, dim=(1, 2), keepdim=True))
normX[normX<1e-3]=1e-3
X0 /= normX
Y0 /= normY
H = torch.matmul(X0.transpose(1,2), Y0)
if full_torch:
U, s, V = batch_svd(H)
else:
U, s, Vt = np.linalg.svd(H.cpu().numpy())
V = torch.from_numpy(Vt.transpose(0, 2, 1)).cuda()
U = torch.from_numpy(U).cuda()
s = torch.from_numpy(s).cuda()
R = torch.matmul(V, U.transpose(2, 1))
# Avoid improper rotations (reflections), i.e. rotations with det(R) = -1
sign_detR = torch.sign(torch.unsqueeze(torch.det(R[0]), 0))
V[:, :, -1] *= sign_detR.unsqueeze(0)
s[:, -1] *= sign_detR.flatten()
R = torch.matmul(V, U.transpose(2, 1)) # Rotation
tr = torch.unsqueeze(torch.sum(s, dim=1, keepdim=True), 2)
a = tr * normX / normY # Scale
t = muX - a*torch.matmul(muY, R) # Translation
if (a!=a).sum()>0:
print('NaN Error!!')
print('UsV:',U,s,V)
print('aRt:',a,R,t)
a[a!=a]=1.
R[R!=R]=0.
t[t!=t]=0.
# Perform rigid transformation on the input
predicted_aligned = a*torch.matmul(predicted, R) + t
if with_sRt:
return torch.sqrt(((predicted_aligned - target)**2).sum(-1)).mean(),(a,R,t)#torch.mean(torch.norm(predicted_aligned - target, dim=len(target.shape)-1))
if with_aligned:
return torch.sqrt(((predicted_aligned - target)**2).sum(-1)).mean(),predicted_aligned
# Return MPJPE
return torch.sqrt(((predicted_aligned - target)**2).sum(-1)).mean()#torch.mean(torch.norm(predicted_aligned - target, dim=len(target.shape)-1))#,(a,R,t),predicted_aligned
def batch_svd(H):
num = H.shape[0]
U_batch, s_batch, V_batch = [],[],[]
for i in range(num):
U, s, V = H[i].svd(some=False)
U_batch.append(U.unsqueeze(0))
s_batch.append(s.unsqueeze(0))
V_batch.append(V.unsqueeze(0))
return torch.cat(U_batch,0),torch.cat(s_batch,0),torch.cat(V_batch,0)
def p_mpjpe(predicted, target, with_sRt=False,full_torch=False,with_aligned=False,each_separate=False):
"""
Pose error: MPJPE after rigid alignment (scale, rotation, and translation),
often referred to as "Protocol #2" in many papers.
"""
assert predicted.shape == target.shape
muX = np.mean(target, axis=1, keepdims=True)
muY = np.mean(predicted, axis=1, keepdims=True)
X0 = target - muX
Y0 = predicted - muY
normX = np.sqrt(np.sum(X0**2, axis=(1, 2), keepdims=True))
normY = np.sqrt(np.sum(Y0**2, axis=(1, 2), keepdims=True))
X0 /= (normX+1e-6)
Y0 /= (normY+1e-6)
H = np.matmul(X0.transpose(0, 2, 1), Y0).astype(np.float16).astype(np.float64)
U, s, Vt = np.linalg.svd(H)
V = Vt.transpose(0, 2, 1)
R = np.matmul(V, U.transpose(0, 2, 1))
# Avoid improper rotations (reflections), i.e. rotations with det(R) = -1
sign_detR = np.sign(np.expand_dims(np.linalg.det(R), axis=1))
V[:, :, -1] *= sign_detR
s[:, -1] *= sign_detR.flatten()
R = np.matmul(V, U.transpose(0, 2, 1)) # Rotation
tr = np.expand_dims(np.sum(s, axis=1, keepdims=True), axis=2)
a = tr * normX / normY # Scale
t = muX - a*np.matmul(muY, R) # Translation
# Perform rigid transformation on the input
predicted_aligned = a*np.matmul(predicted, R) + t
if each_separate:
return np.linalg.norm(predicted_aligned - target, axis=len(target.shape)-1)
error = np.mean(np.linalg.norm(predicted_aligned - target, axis=len(target.shape)-1))
if with_sRt and not with_aligned:
return error, (a,R,t)
if with_aligned:
return error,(a,R,t),predicted_aligned
# Return MPJPE
return error
def n_mpjpe(predicted, target):
"""
Normalized MPJPE (scale only), adapted from:
https://github.com/hrhodin/UnsupervisedGeometryAwareRepresentationLearning/blob/master/losses/poses.py
"""
assert predicted.shape == target.shape
norm_predicted = torch.mean(torch.sum(predicted**2, dim=3, keepdim=True), dim=2, keepdim=True)
norm_target = torch.mean(torch.sum(target*predicted, dim=3, keepdim=True), dim=2, keepdim=True)
scale = norm_target / norm_predicted
return mpjpe(scale * predicted, target)
def mean_velocity_error(predicted, target):
"""
Mean per-joint velocity error (i.e. mean Euclidean distance of the 1st derivative)
"""
assert predicted.shape == target.shape
velocity_predicted = np.diff(predicted, axis=0)
velocity_target = np.diff(target, axis=0)
return np.mean(np.linalg.norm(velocity_predicted - velocity_target, axis=len(target.shape)-1))
def test():
r1 = np.random.rand(3,14,3)
r2 = np.random.rand(3,14,3)
pmpjpe = p_mpjpe(r1, r2,with_sRt=False)
pmpjpe_torch = p_mpjpe_torch(torch.from_numpy(r1), torch.from_numpy(r2),with_sRt=False,full_torch=True)
print('pmpjpe: {}; {:.6f}; {:.6f}; {:.6f}'.format(np.abs(pmpjpe-pmpjpe_torch.numpy())<0.01,pmpjpe,pmpjpe_torch.numpy(), pmpjpe-pmpjpe_torch.numpy()))
if __name__ == '__main__':
test()
|
[
"torch.mean",
"numpy.sum",
"torch.cat",
"torch.det",
"numpy.linalg.svd",
"numpy.mean",
"numpy.diff",
"numpy.matmul",
"numpy.random.rand",
"numpy.linalg.det",
"torch.matmul",
"torch.sum",
"torch.from_numpy"
] |
[((921, 960), 'torch.mean', 'torch.mean', (['target'], {'dim': '(1)', 'keepdim': '(True)'}), '(target, dim=1, keepdim=True)\n', (931, 960), False, 'import torch\n'), ((971, 1013), 'torch.mean', 'torch.mean', (['predicted'], {'dim': '(1)', 'keepdim': '(True)'}), '(predicted, dim=1, keepdim=True)\n', (981, 1013), False, 'import torch\n'), ((3435, 3473), 'numpy.mean', 'np.mean', (['target'], {'axis': '(1)', 'keepdims': '(True)'}), '(target, axis=1, keepdims=True)\n', (3442, 3473), True, 'import numpy as np\n'), ((3484, 3525), 'numpy.mean', 'np.mean', (['predicted'], {'axis': '(1)', 'keepdims': '(True)'}), '(predicted, axis=1, keepdims=True)\n', (3491, 3525), True, 'import numpy as np\n'), ((3848, 3864), 'numpy.linalg.svd', 'np.linalg.svd', (['H'], {}), '(H)\n', (3861, 3864), True, 'import numpy as np\n'), ((5575, 5601), 'numpy.diff', 'np.diff', (['predicted'], {'axis': '(0)'}), '(predicted, axis=0)\n', (5582, 5601), True, 'import numpy as np\n'), ((5624, 5647), 'numpy.diff', 'np.diff', (['target'], {'axis': '(0)'}), '(target, axis=0)\n', (5631, 5647), True, 'import numpy as np\n'), ((5770, 5794), 'numpy.random.rand', 'np.random.rand', (['(3)', '(14)', '(3)'], {}), '(3, 14, 3)\n', (5784, 5794), True, 'import numpy as np\n'), ((5802, 5826), 'numpy.random.rand', 'np.random.rand', (['(3)', '(14)', '(3)'], {}), '(3, 14, 3)\n', (5816, 5826), True, 'import numpy as np\n'), ((1110, 1154), 'torch.sum', 'torch.sum', (['(X0 ** 2)'], {'dim': '(1, 2)', 'keepdim': '(True)'}), '(X0 ** 2, dim=(1, 2), keepdim=True)\n', (1119, 1154), False, 'import torch\n'), ((1177, 1221), 'torch.sum', 'torch.sum', (['(Y0 ** 2)'], {'dim': '(1, 2)', 'keepdim': '(True)'}), '(Y0 ** 2, dim=(1, 2), keepdim=True)\n', (1186, 1221), False, 'import torch\n'), ((1919, 1952), 'torch.sum', 'torch.sum', (['s'], {'dim': '(1)', 'keepdim': '(True)'}), '(s, dim=1, keepdim=True)\n', (1928, 1952), False, 'import torch\n'), ((3062, 3083), 'torch.cat', 'torch.cat', (['U_batch', '(0)'], {}), '(U_batch, 0)\n', (3071, 3083), False, 'import torch\n'), ((3083, 3104), 'torch.cat', 'torch.cat', (['s_batch', '(0)'], {}), '(s_batch, 0)\n', (3092, 3104), False, 'import torch\n'), ((3104, 3125), 'torch.cat', 'torch.cat', (['V_batch', '(0)'], {}), '(V_batch, 0)\n', (3113, 3125), False, 'import torch\n'), ((3595, 3638), 'numpy.sum', 'np.sum', (['(X0 ** 2)'], {'axis': '(1, 2)', 'keepdims': '(True)'}), '(X0 ** 2, axis=(1, 2), keepdims=True)\n', (3601, 3638), True, 'import numpy as np\n'), ((3658, 3701), 'numpy.sum', 'np.sum', (['(Y0 ** 2)'], {'axis': '(1, 2)', 'keepdims': '(True)'}), '(Y0 ** 2, axis=(1, 2), keepdims=True)\n', (3664, 3701), True, 'import numpy as np\n'), ((4227, 4259), 'numpy.sum', 'np.sum', (['s'], {'axis': '(1)', 'keepdims': '(True)'}), '(s, axis=1, keepdims=True)\n', (4233, 4259), True, 'import numpy as np\n'), ((5106, 5152), 'torch.sum', 'torch.sum', (['(predicted ** 2)'], {'dim': '(3)', 'keepdim': '(True)'}), '(predicted ** 2, dim=3, keepdim=True)\n', (5115, 5152), False, 'import torch\n'), ((5202, 5252), 'torch.sum', 'torch.sum', (['(target * predicted)'], {'dim': '(3)', 'keepdim': '(True)'}), '(target * predicted, dim=3, keepdim=True)\n', (5211, 5252), False, 'import torch\n'), ((5902, 5922), 'torch.from_numpy', 'torch.from_numpy', (['r1'], {}), '(r1)\n', (5918, 5922), False, 'import torch\n'), ((5924, 5944), 'torch.from_numpy', 'torch.from_numpy', (['r2'], {}), '(r2)\n', (5940, 5944), False, 'import torch\n'), ((1740, 1755), 'torch.det', 'torch.det', (['R[0]'], {}), '(R[0])\n', (1749, 1755), False, 'import torch\n'), ((2009, 2029), 'torch.matmul', 'torch.matmul', (['muY', 'R'], {}), '(muY, R)\n', (2021, 2029), False, 'import torch\n'), ((2272, 2298), 'torch.matmul', 'torch.matmul', (['predicted', 'R'], {}), '(predicted, R)\n', (2284, 2298), False, 'import torch\n'), ((4056, 4072), 'numpy.linalg.det', 'np.linalg.det', (['R'], {}), '(R)\n', (4069, 4072), True, 'import numpy as np\n'), ((4321, 4338), 'numpy.matmul', 'np.matmul', (['muY', 'R'], {}), '(muY, R)\n', (4330, 4338), True, 'import numpy as np\n'), ((4428, 4451), 'numpy.matmul', 'np.matmul', (['predicted', 'R'], {}), '(predicted, R)\n', (4437, 4451), True, 'import numpy as np\n'), ((1508, 1527), 'torch.from_numpy', 'torch.from_numpy', (['U'], {}), '(U)\n', (1524, 1527), False, 'import torch\n'), ((1547, 1566), 'torch.from_numpy', 'torch.from_numpy', (['s'], {}), '(s)\n', (1563, 1566), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-04-06 00:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tasks', '0029_remove_exporttask_celery_uid'),
]
operations = [
migrations.RemoveField(
model_name='exporttaskresult',
name='task',
),
migrations.AddField(
model_name='exporttask',
name='filesize_bytes',
field=models.IntegerField(null=True),
),
migrations.DeleteModel(
name='ExportTaskResult',
),
]
|
[
"django.db.migrations.RemoveField",
"django.db.models.IntegerField",
"django.db.migrations.DeleteModel"
] |
[((305, 371), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""exporttaskresult"""', 'name': '"""task"""'}), "(model_name='exporttaskresult', name='task')\n", (327, 371), False, 'from django.db import migrations, models\n'), ((578, 625), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""ExportTaskResult"""'}), "(name='ExportTaskResult')\n", (600, 625), False, 'from django.db import migrations, models\n'), ((527, 557), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)'}), '(null=True)\n', (546, 557), False, 'from django.db import migrations, models\n')]
|
import pickle as pk
import numpy as np
import matplotlib.pyplot as pl
# plot data files
unsmoothed = ['T2_L2.pickle', 'T2_L4.pickle', 'T2_L8.pickle', 'T2_L16.pickle',
'T2_L32.pickle', 'T2_L64.pickle', 'T2_L128.pickle', 'T2_L256.pickle',
'T2_L512.pickle', 'T2_L1024.pickle']
smoothed = ['T2_L2s.pickle', 'T2_L4s.pickle', 'T2_L8s.pickle', 'T2_L16s.pickle',
'T2_L32s.pickle', 'T2_L64s.pickle', 'T2_L128s.pickle', 'T2_L256s.pickle',
'T2_L512s.pickle', 'T2_L1024s.pickle']
size = [2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
pl.figure()
for i in range(len(unsmoothed)):
# read data
f_file = open(unsmoothed[i], 'rb')
y = pk.load(f_file)
f_file.close()
x = range(len(y))
pl.loglog(x, y, label = "L = %d" % (size[i]))
pl.legend(loc = "upper left")
pl.grid(True)
pl.xlabel(r"Time $t$")
pl.ylabel(r"Height of the pile $h$")
pl.xlim(1, 1e6)
#pl.ylim(1, )
|
[
"matplotlib.pyplot.loglog",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"pickle.load",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid"
] |
[((587, 598), 'matplotlib.pyplot.figure', 'pl.figure', ([], {}), '()\n', (596, 598), True, 'import matplotlib.pyplot as pl\n'), ((705, 720), 'pickle.load', 'pk.load', (['f_file'], {}), '(f_file)\n', (712, 720), True, 'import pickle as pk\n'), ((781, 822), 'matplotlib.pyplot.loglog', 'pl.loglog', (['x', 'y'], {'label': "('L = %d' % size[i])"}), "(x, y, label='L = %d' % size[i])\n", (790, 822), True, 'import matplotlib.pyplot as pl\n'), ((832, 859), 'matplotlib.pyplot.legend', 'pl.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (841, 859), True, 'import matplotlib.pyplot as pl\n'), ((867, 880), 'matplotlib.pyplot.grid', 'pl.grid', (['(True)'], {}), '(True)\n', (874, 880), True, 'import matplotlib.pyplot as pl\n'), ((886, 907), 'matplotlib.pyplot.xlabel', 'pl.xlabel', (['"""Time $t$"""'], {}), "('Time $t$')\n", (895, 907), True, 'import matplotlib.pyplot as pl\n'), ((914, 949), 'matplotlib.pyplot.ylabel', 'pl.ylabel', (['"""Height of the pile $h$"""'], {}), "('Height of the pile $h$')\n", (923, 949), True, 'import matplotlib.pyplot as pl\n'), ((956, 977), 'matplotlib.pyplot.xlim', 'pl.xlim', (['(1)', '(1000000.0)'], {}), '(1, 1000000.0)\n', (963, 977), True, 'import matplotlib.pyplot as pl\n')]
|
import json
from flask_login import login_required
from ui import app
from flask import Response, request
from utils.json_encoder import JSONEncoder
from service.seed_service import get_keywords, update_keyword, delete_keyword
__author__ = 'tomas'
@app.route("/api/workspace/<workspace_id>/seed", methods=['GET'])
@login_required
def get_keyword_api(workspace_id):
in_doc = get_keywords(workspace_id)
out_doc = JSONEncoder().encode(in_doc)
return Response(out_doc, mimetype="application/json")
@app.route("/api/workspace/<workspace_id>/seed", methods=['POST'])
@login_required
def update_keyword_api(workspace_id):
scored_word = request.json
id = update_keyword(workspace_id, scored_word['word'], scored_word['score'])
return Response("{}", mimetype="application/json")
@app.route("/api/workspace/<workspace_id>/seed/<hash>", methods=['DELETE'])
@login_required
def delete_keyword_api(workspace_id, hash):
delete_keyword(workspace_id, hash)
return Response("{}", mimetype="application/json")
|
[
"service.seed_service.update_keyword",
"service.seed_service.get_keywords",
"ui.app.route",
"service.seed_service.delete_keyword",
"flask.Response",
"utils.json_encoder.JSONEncoder"
] |
[((252, 316), 'ui.app.route', 'app.route', (['"""/api/workspace/<workspace_id>/seed"""'], {'methods': "['GET']"}), "('/api/workspace/<workspace_id>/seed', methods=['GET'])\n", (261, 316), False, 'from ui import app\n'), ((512, 577), 'ui.app.route', 'app.route', (['"""/api/workspace/<workspace_id>/seed"""'], {'methods': "['POST']"}), "('/api/workspace/<workspace_id>/seed', methods=['POST'])\n", (521, 577), False, 'from ui import app\n'), ((802, 876), 'ui.app.route', 'app.route', (['"""/api/workspace/<workspace_id>/seed/<hash>"""'], {'methods': "['DELETE']"}), "('/api/workspace/<workspace_id>/seed/<hash>', methods=['DELETE'])\n", (811, 876), False, 'from ui import app\n'), ((381, 407), 'service.seed_service.get_keywords', 'get_keywords', (['workspace_id'], {}), '(workspace_id)\n', (393, 407), False, 'from service.seed_service import get_keywords, update_keyword, delete_keyword\n'), ((462, 508), 'flask.Response', 'Response', (['out_doc'], {'mimetype': '"""application/json"""'}), "(out_doc, mimetype='application/json')\n", (470, 508), False, 'from flask import Response, request\n'), ((672, 743), 'service.seed_service.update_keyword', 'update_keyword', (['workspace_id', "scored_word['word']", "scored_word['score']"], {}), "(workspace_id, scored_word['word'], scored_word['score'])\n", (686, 743), False, 'from service.seed_service import get_keywords, update_keyword, delete_keyword\n'), ((755, 798), 'flask.Response', 'Response', (['"""{}"""'], {'mimetype': '"""application/json"""'}), "('{}', mimetype='application/json')\n", (763, 798), False, 'from flask import Response, request\n'), ((941, 975), 'service.seed_service.delete_keyword', 'delete_keyword', (['workspace_id', 'hash'], {}), '(workspace_id, hash)\n', (955, 975), False, 'from service.seed_service import get_keywords, update_keyword, delete_keyword\n'), ((987, 1030), 'flask.Response', 'Response', (['"""{}"""'], {'mimetype': '"""application/json"""'}), "('{}', mimetype='application/json')\n", (995, 1030), False, 'from flask import Response, request\n'), ((422, 435), 'utils.json_encoder.JSONEncoder', 'JSONEncoder', ([], {}), '()\n', (433, 435), False, 'from utils.json_encoder import JSONEncoder\n')]
|
import discord
import logging
import asyncio
import re
from typing import List, Union, Tuple, Pattern
from discord.ext.commands.converter import Converter, IDConverter, RoleConverter
from discord.ext.commands.errors import BadArgument
from redbot.core.i18n import Translator
from redbot.core import commands
from redbot.core.utils.predicates import ReactionPredicate
from redbot.core.utils.menus import start_adding_reactions
log = logging.getLogger("red.ReTrigger")
_ = Translator("ReTrigger", __file__)
class TriggerExists(Converter):
async def convert(self, ctx, argument):
bot = ctx.bot
guild = ctx.guild
config = bot.get_cog("ReTrigger").config
trigger_list = await config.guild(guild).trigger_list()
result = None
if argument in trigger_list:
result = await Trigger.from_json(trigger_list[argument])
else:
result = argument
return result
class ValidRegex(Converter):
"""
This will check to see if the provided regex pattern is valid
Guidance code on how to do this from:
https://github.com/Rapptz/discord.py/blob/rewrite/discord/ext/commands/converter.py#L85
https://github.com/Cog-Creators/Red-DiscordBot/blob/V3/develop/redbot/cogs/mod/mod.py#L24
"""
async def convert(self, ctx: commands.Context, argument: str):
try:
re.compile(argument)
result = argument
except Exception as e:
log.error("Retrigger conversion error")
err_msg = _("`{arg}` is not a valid regex pattern. {e}").format(arg=argument, e=e)
raise BadArgument(err_msg)
return result
class MultiResponse(Converter):
"""
This will parse my defined multi response pattern and provide usable formats
to be used in multiple reponses
"""
async def convert(self, ctx: commands.Context, argument: str) -> Union[List[str], List[int]]:
result = []
match = re.split(r"(;)", argument)
valid_reactions = [
"dm",
"remove_role",
"add_role",
"ban",
"kick",
"text",
"filter",
"delete",
"react",
"command",
"mock",
]
log.debug(match)
my_perms = ctx.channel.permissions_for(ctx.me)
if match[0] not in valid_reactions:
raise BadArgument(
_("`{response}` is not a valid reaction type.").format(response=match[0])
)
for m in match:
if m == ";":
continue
else:
result.append(m)
if result[0] == "filter":
result[0] = "delete"
if len(result) < 2 and result[0] not in ["delete", "ban", "kick"]:
raise BadArgument(_("The provided multi response pattern is not valid."))
if result[0] in ["add_role", "remove_role"] and not my_perms.manage_roles:
raise BadArgument(_('I require "Manage Roles" permission to use that.'))
if result[0] == "filter" and not my_perms.manage_messages:
raise BadArgument(_('I require "Manage Messages" permission to use that.'))
if result[0] == "ban" and not my_perms.ban_members:
raise BadArgument(_('I require "Ban Members" permission to use that.'))
if result[0] == "kick" and not my_perms.kick_members:
raise BadArgument(_('I require "Kick Members" permission to use that.'))
if result[0] == "react" and not my_perms.add_reactions:
raise BadArgument(_('I require "Add Reactions" permission to use that.'))
if result[0] == "mock":
msg = await ctx.send(
_(
"Mock commands can allow any user to run a command "
"as if you did, are you sure you want to add this?"
)
)
start_adding_reactions(msg, ReactionPredicate.YES_OR_NO_EMOJIS)
pred = ReactionPredicate.yes_or_no(msg, ctx.author)
try:
await ctx.bot.wait_for("reaction_add", check=pred, timeout=15)
except asyncio.TimeoutError:
raise BadArgument(_("Not creating trigger."))
if not pred.result:
raise BadArgument(_("Not creating trigger."))
if result[0] in ["add_role", "remove_role"]:
good_roles = []
for r in result[1:]:
try:
role = await RoleConverter().convert(ctx, r)
if role < ctx.guild.me.top_role and role < ctx.author.top_role:
good_roles.append(role.id)
except BadArgument:
log.error("Role `{}` not found.".format(r))
result = [result[0]]
for r_id in good_roles:
result.append(r_id)
if result[0] == "react":
good_emojis = []
for r in result[1:]:
try:
emoji = await ValidEmoji().convert(ctx, r)
good_emojis.append(emoji)
except BadArgument:
log.error("Emoji `{}` not found.".format(r))
log.debug(good_emojis)
result = [result[0]] + good_emojis
return result
class ValidEmoji(IDConverter):
"""
This is from discord.py rewrite, first we'll match the actual emoji
then we'll match the emoji name if we can
if all else fails we may suspect that it's a unicode emoji and check that later
All lookups are done for the local guild first, if available. If that lookup
fails, then it checks the client's global cache.
The lookup strategy is as follows (in order):
1. Lookup by ID.
2. Lookup by extracting ID from the emoji.
3. Lookup by name
https://github.com/Rapptz/discord.py/blob/rewrite/discord/ext/commands/converter.py
"""
async def convert(self, ctx: commands.Context, argument: str):
match = self._get_id_match(argument) or re.match(
r"<a?:[a-zA-Z0-9\_]+:([0-9]+)>$|(:[a-zA-z0-9\_]+:$)", argument
)
result = None
bot = ctx.bot
guild = ctx.guild
if match is None:
# Try to get the emoji by name. Try local guild first.
if guild:
result = discord.utils.get(guild.emojis, name=argument)
if result is None:
result = discord.utils.get(bot.emojis, name=argument)
elif match.group(1):
emoji_id = int(match.group(1))
# Try to look up emoji by id.
if guild:
result = discord.utils.get(guild.emojis, id=emoji_id)
if result is None:
result = discord.utils.get(bot.emojis, id=emoji_id)
else:
emoji_name = str(match.group(2)).replace(":", "")
if guild:
result = discord.utils.get(guild.emojis, name=emoji_name)
if result is None:
result = discord.utils.get(bot.emojis, name=emoji_name)
if type(result) is discord.Emoji:
result = str(result)[1:-1]
if result is None:
try:
await ctx.message.add_reaction(argument)
result = argument
except Exception:
raise BadArgument(_("`{}` is not an emoji I can use.").format(argument))
return result
class ChannelUserRole(IDConverter):
"""
This will check to see if the provided argument is a channel, user, or role
Guidance code on how to do this from:
https://github.com/Rapptz/discord.py/blob/rewrite/discord/ext/commands/converter.py#L85
https://github.com/Cog-Creators/Red-DiscordBot/blob/V3/develop/redbot/cogs/mod/mod.py#L24
"""
async def convert(self, ctx: commands.Context, argument: str):
guild = ctx.guild
result = None
id_match = self._get_id_match(argument)
channel_match = re.match(r"<#([0-9]+)>$", argument)
member_match = re.match(r"<@!?([0-9]+)>$", argument)
role_match = re.match(r"<@&([0-9]+)>$", argument)
for converter in ["channel", "role", "member"]:
if converter == "channel":
match = id_match or channel_match
if match:
channel_id = match.group(1)
result = guild.get_channel(int(channel_id))
else:
result = discord.utils.get(guild.text_channels, name=argument)
if converter == "member":
match = id_match or member_match
if match:
member_id = match.group(1)
result = guild.get_member(int(member_id))
else:
result = guild.get_member_named(argument)
if converter == "role":
match = id_match or role_match
if match:
role_id = match.group(1)
result = guild.get_role(int(role_id))
else:
result = discord.utils.get(guild._roles.values(), name=argument)
if result:
break
if not result:
msg = _("{arg} is not a valid channel, user or role.").format(arg=argument)
raise BadArgument(msg)
return result
class Trigger:
"""
Trigger class to handle trigger objects
"""
name: str
regex: Pattern
response_type: list
author: int
count: int
image: Union[List[Union[int, str]], str, None]
text: Union[List[Union[int, str]], str, None]
whitelist: list
blacklist: list
cooldown: dict
multi_payload: Union[List[MultiResponse], Tuple[MultiResponse, ...]]
created: int
def __init__(
self,
name: str,
regex: str,
response_type: list,
author: int,
count: int,
image: Union[List[Union[int, str]], str, None],
text: Union[List[Union[int, str]], str, None],
whitelist: list,
blacklist: list,
cooldown: dict,
multi_payload: Union[List[MultiResponse], Tuple[MultiResponse, ...]],
created_at: int,
):
self.name = name
self.regex = re.compile(regex)
self.response_type = response_type
self.author = author
self.count = count
self.image = image
self.text = text
self.whitelist = whitelist
self.blacklist = blacklist
self.cooldown = cooldown
self.multi_payload = multi_payload
self.created_at = created_at
async def to_json(self) -> dict:
return {
"name": self.name,
"regex": self.regex.pattern,
"response_type": self.response_type,
"author": self.author,
"count": self.count,
"image": self.image,
"text": self.text,
"whitelist": self.whitelist,
"blacklist": self.blacklist,
"cooldown": self.cooldown,
"multi_payload": self.multi_payload,
"created_at": self.created_at
}
@classmethod
async def from_json(cls, data: dict):
cooldown: dict = {}
multi_payload: List[MultiResponse] = []
created_at: int = 0
if "cooldown" in data:
cooldown = data["cooldown"]
if type(data["response_type"]) is str:
response_type = [data["response_type"]]
else:
response_type = data["response_type"]
if "multi_payload" in data:
multi_payload = data["multi_payload"]
if "created_at" in data:
created_at = data["created_at"]
return cls(
data["name"],
data["regex"],
response_type,
data["author"],
data["count"],
data["image"],
data["text"],
data["whitelist"],
data["blacklist"],
cooldown,
multi_payload,
created_at,
)
|
[
"discord.utils.get",
"re.split",
"redbot.core.i18n.Translator",
"redbot.core.utils.menus.start_adding_reactions",
"redbot.core.utils.predicates.ReactionPredicate.yes_or_no",
"re.match",
"discord.ext.commands.converter.RoleConverter",
"discord.ext.commands.errors.BadArgument",
"logging.getLogger",
"re.compile"
] |
[((434, 468), 'logging.getLogger', 'logging.getLogger', (['"""red.ReTrigger"""'], {}), "('red.ReTrigger')\n", (451, 468), False, 'import logging\n'), ((473, 506), 'redbot.core.i18n.Translator', 'Translator', (['"""ReTrigger"""', '__file__'], {}), "('ReTrigger', __file__)\n", (483, 506), False, 'from redbot.core.i18n import Translator\n'), ((1967, 1992), 're.split', 're.split', (['"""(;)"""', 'argument'], {}), "('(;)', argument)\n", (1975, 1992), False, 'import re\n'), ((7986, 8020), 're.match', 're.match', (['"""<#([0-9]+)>$"""', 'argument'], {}), "('<#([0-9]+)>$', argument)\n", (7994, 8020), False, 'import re\n'), ((8045, 8081), 're.match', 're.match', (['"""<@!?([0-9]+)>$"""', 'argument'], {}), "('<@!?([0-9]+)>$', argument)\n", (8053, 8081), False, 'import re\n'), ((8104, 8139), 're.match', 're.match', (['"""<@&([0-9]+)>$"""', 'argument'], {}), "('<@&([0-9]+)>$', argument)\n", (8112, 8139), False, 'import re\n'), ((10269, 10286), 're.compile', 're.compile', (['regex'], {}), '(regex)\n', (10279, 10286), False, 'import re\n'), ((1375, 1395), 're.compile', 're.compile', (['argument'], {}), '(argument)\n', (1385, 1395), False, 'import re\n'), ((3918, 3981), 'redbot.core.utils.menus.start_adding_reactions', 'start_adding_reactions', (['msg', 'ReactionPredicate.YES_OR_NO_EMOJIS'], {}), '(msg, ReactionPredicate.YES_OR_NO_EMOJIS)\n', (3940, 3981), False, 'from redbot.core.utils.menus import start_adding_reactions\n'), ((4001, 4045), 'redbot.core.utils.predicates.ReactionPredicate.yes_or_no', 'ReactionPredicate.yes_or_no', (['msg', 'ctx.author'], {}), '(msg, ctx.author)\n', (4028, 4045), False, 'from redbot.core.utils.predicates import ReactionPredicate\n'), ((6038, 6111), 're.match', 're.match', (['"""<a?:[a-zA-Z0-9\\\\_]+:([0-9]+)>$|(:[a-zA-z0-9\\\\_]+:$)"""', 'argument'], {}), "('<a?:[a-zA-Z0-9\\\\_]+:([0-9]+)>$|(:[a-zA-z0-9\\\\_]+:$)', argument)\n", (6046, 6111), False, 'import re\n'), ((9328, 9344), 'discord.ext.commands.errors.BadArgument', 'BadArgument', (['msg'], {}), '(msg)\n', (9339, 9344), False, 'from discord.ext.commands.errors import BadArgument\n'), ((1622, 1642), 'discord.ext.commands.errors.BadArgument', 'BadArgument', (['err_msg'], {}), '(err_msg)\n', (1633, 1642), False, 'from discord.ext.commands.errors import BadArgument\n'), ((6343, 6389), 'discord.utils.get', 'discord.utils.get', (['guild.emojis'], {'name': 'argument'}), '(guild.emojis, name=argument)\n', (6360, 6389), False, 'import discord\n'), ((6447, 6491), 'discord.utils.get', 'discord.utils.get', (['bot.emojis'], {'name': 'argument'}), '(bot.emojis, name=argument)\n', (6464, 6491), False, 'import discord\n'), ((6654, 6698), 'discord.utils.get', 'discord.utils.get', (['guild.emojis'], {'id': 'emoji_id'}), '(guild.emojis, id=emoji_id)\n', (6671, 6698), False, 'import discord\n'), ((6756, 6798), 'discord.utils.get', 'discord.utils.get', (['bot.emojis'], {'id': 'emoji_id'}), '(bot.emojis, id=emoji_id)\n', (6773, 6798), False, 'import discord\n'), ((6923, 6971), 'discord.utils.get', 'discord.utils.get', (['guild.emojis'], {'name': 'emoji_name'}), '(guild.emojis, name=emoji_name)\n', (6940, 6971), False, 'import discord\n'), ((7029, 7075), 'discord.utils.get', 'discord.utils.get', (['bot.emojis'], {'name': 'emoji_name'}), '(bot.emojis, name=emoji_name)\n', (7046, 7075), False, 'import discord\n'), ((8475, 8528), 'discord.utils.get', 'discord.utils.get', (['guild.text_channels'], {'name': 'argument'}), '(guild.text_channels, name=argument)\n', (8492, 8528), False, 'import discord\n'), ((4507, 4522), 'discord.ext.commands.converter.RoleConverter', 'RoleConverter', ([], {}), '()\n', (4520, 4522), False, 'from discord.ext.commands.converter import Converter, IDConverter, RoleConverter\n')]
|
from __future__ import annotations
from cleo.events.event import Event
def test_is_propagation_stopped():
e = Event()
assert not e.is_propagation_stopped()
def test_stop_propagation_and_is_propagation_stopped():
e = Event()
e.stop_propagation()
assert e.is_propagation_stopped()
|
[
"cleo.events.event.Event"
] |
[((117, 124), 'cleo.events.event.Event', 'Event', ([], {}), '()\n', (122, 124), False, 'from cleo.events.event import Event\n'), ((234, 241), 'cleo.events.event.Event', 'Event', ([], {}), '()\n', (239, 241), False, 'from cleo.events.event import Event\n')]
|
from djangobench.utils import run_benchmark
def benchmark():
global Book
list(Book.objects.iterator())
def setup():
global Book
from query_all.models import Book
for i in range(0, 3000):
Book(pk=i, title='foobar_%s' % i).save()
run_benchmark(
benchmark,
setup=setup,
meta={
'description': 'A simple Model.objects.iterator() call for large number of objects.',
}
)
|
[
"djangobench.utils.run_benchmark",
"query_all.models.Book",
"query_all.models.Book.objects.iterator"
] |
[((260, 394), 'djangobench.utils.run_benchmark', 'run_benchmark', (['benchmark'], {'setup': 'setup', 'meta': "{'description':\n 'A simple Model.objects.iterator() call for large number of objects.'}"}), "(benchmark, setup=setup, meta={'description':\n 'A simple Model.objects.iterator() call for large number of objects.'})\n", (273, 394), False, 'from djangobench.utils import run_benchmark\n'), ((88, 111), 'query_all.models.Book.objects.iterator', 'Book.objects.iterator', ([], {}), '()\n', (109, 111), False, 'from query_all.models import Book\n'), ((218, 251), 'query_all.models.Book', 'Book', ([], {'pk': 'i', 'title': "('foobar_%s' % i)"}), "(pk=i, title='foobar_%s' % i)\n", (222, 251), False, 'from query_all.models import Book\n')]
|
import numpy as np
import pandas as pd
import random
#from data import Data
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import KFold
from sklearn import metrics
from sklearn import svm
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
###-----construct a workable data set-----###
def import_x_data(p):
train_rs = []
#train_label = []
path = p
with open(path, 'r+') as f:
for l in f:
if l.strip() == "":
continue
vec = [0 for _ in range(219)]
tokens = l.split(' ')
#labels = tokens[0]
#train_label.append(labels)
for pair in tokens[1:]:
t = pair.split(':')
idx = int(t[0])
value = int(t[1])
vec[idx-1]=value
train_rs.append(vec)
x = np.asarray(train_rs)
#y = np.asarray(train_label)
return x
x_train0 = import_x_data("./training00.data")
x_train1 = import_x_data("./training01.data")
x_train2 = import_x_data("./training02.data")
x_train3 = import_x_data("./training03.data")
x_train4 = import_x_data("./training04.data")
print(x_train0)
#-----create train and testing set X-----#
train_set1 = np.append(x_train0, x_train1, axis = 0)
train_set2 = np.append(train_set1, x_train2, axis = 0)
train_set0 = np.append(train_set2, x_train3, axis = 0)
train_set3 = np.append(train_set1, x_train3, axis = 0)
train_set4 = np.append(x_train0, x_train2, axis = 0)
train_set5 = np.append(train_set4, x_train3, axis = 0)
train_set6 = np.append(x_train1, x_train2, axis = 0)
train_set7 = np.append(train_set6, x_train3, axis = 0)
#print(train_set1)
xtrain_k = np.append(train_set0, x_train4, axis = 0)
xfold4 = np.append(train_set2, x_train3, axis = 0)
xfold3 = np.append(train_set2, x_train4, axis = 0)
xfold2 = np.append(train_set3, x_train4, axis = 0)
xfold1 = np.append(train_set5, x_train4, axis = 0)
xfold0 = np.append(train_set7, x_train4, axis = 0)
print(xfold4.shape)
#-----extract labels from the data-----#
def import_y_data(p):
train_label = []
path = p
with open(path, 'r+') as f:
for l in f:
if l.strip() == "":
continue
vec = [0 for _ in range(17)]
tokens = l.split(' ')
labels = int(tokens[0])
train_label.append(labels)
y = np.asarray(train_label)
return y
y_train0 = import_y_data("./training00.data")
y_train1 = import_y_data("./training01.data")
y_train2 = import_y_data("./training02.data")
y_train3 = import_y_data("./training03.data")
y_train4 = import_y_data("./training04.data")
#-----create training and testing set Y-----#
train1 = np.append(y_train0, y_train1, axis = 0)
train2 = np.append(train1, y_train2, axis = 0)
train0 = np.append(train2, y_train3, axis = 0)
train3 = np.append(train1, y_train3, axis = 0)
train4 = np.append(y_train0, y_train2, axis = 0)
train5 = np.append(train4, y_train3, axis = 0)
train6 = np.append(y_train1, y_train2, axis = 0)
train7 = np.append(train6, y_train3, axis = 0)
yfold4 = np.append(train2, y_train3, axis = 0)
yfold3 = np.append(train2, y_train4, axis = 0)
yfold2 = np.append(train3, y_train4, axis = 0)
yfold1 = np.append(train5, y_train4, axis = 0)
yfold0 = np.append(train7, y_train4, axis = 0)
ytrain_k = np.append(train0, y_train4, axis = 0)
#print(ytrain_k.shape)
print(yfold4.shape)
###----SUPPORT VECTOR MACHINE-----###
def fitt(X,Y,gamma,c):
epoch = 20
w = np.ones(219)
b = 1
for t in range(1, epoch):
#I need to shuffle data
gt = gamma/(1+epoch)
for i, x in enumerate(X):
if (Y[i]*np.dot(X[i], w)+b) <= 1:
w = ((1-gt)*w)+(gt*(c*(Y[i]*X[i])))*100000
#print(w[0:3])
else:
w = (1-gt)*w
#print(w[0:5])
return w
def predict(X,Y,w):
tp = 0
fp = 0
fn = 0
b = 1
#print(len(X))
for i, x in enumerate(X):
classification = np.sign(np.dot(X[i],w)+b)
if classification > 0 and Y[i] > 0:
tp += 1
elif classification > 0 and Y[i] < 0:
fp += 1
if classification < 0 and Y[i] > 0:
fn += 1
else:
pass
return tp,fp,fn
#-----test for F1 statistic-----#
def F1(TP,FP,FN):
if TP+FP == 0:
p = 1
else:
p = TP/(TP+FP)
if TP+FN == 0:
r = 1
else:
r = TP/(TP+FN)
f1 = 2*(p*r)/(p+r)
return f1
def p(TP,FP):
if TP+FP ==0:
p = 1
else:
p = TP/(TP+FP)
return p
def r(TP,FN):
if TP+FN == 0:
r = 1
else:
r = TP/(TP+FN)
return r
#-----average f1 score with: gamma=0.1, c=0.01-----#
t4 = fitt(xfold4,yfold4,0.1,0.01)
p_tst4 = predict(x_train4,y_train4,t4)
p_tra4 = predict(xfold4,yfold4,t4)
tf4 = F1(p_tst4[0],p_tst4[1],p_tst4[2])
trf4 = F1(p_tra4[0],p_tra4[1],p_tra4[2])
print(t4)
print(p_tst4)
print(p_tra4)
print(tf4)
print(trf4)
t3 = fitt(xfold3,yfold3,0.1,0.01)
p_tst3 = predict(x_train3,y_train3,t3)
p_tra3 = predict(xfold3,yfold3,t3)
tf3 = F1(p_tst3[0],p_tst3[1],p_tst3[2])
trf3 = F1(p_tra3[0],p_tra3[1],p_tra3[0])
print(t3)
print(p_tst3)
print(p_tra3)
print(tf3)
print(trf3)
t2 = fitt(xfold2,yfold2,0.1,0.01)
p_tst2 = predict(x_train2,y_train2,t2)
p_tra2 = predict(xfold2,yfold2,t2)
tf2 = F1(p_tst2[0],p_tst2[1],p_tst2[2])
trf2 = F1(p_tra2[0],p_tra2[1],p_tra2[2])
print(t2)
print(p_tst2)
print(p_tra2)
print(tf2)
print(trf2)
t1 = fitt(xfold1,yfold1,0.1,0.01)
p_tst1 = predict(x_train1,y_train1,t1)
p_tra1 = predict(xfold1,yfold1,t1)
tf1 = F1(p_tst1[0],p_tst1[1],p_tst1[2])
trf1 = F1(p_tra1[0],p_tra1[1],p_tra1[2])
print(t1)
print(p_tst1)
print(p_tra1)
print(tf1)
print(trf1)
t0 = fitt(xfold0,yfold0,0.1,0.01)
p_tst0 = predict(x_train0,y_train0,t0)
p_tra0 = predict(xfold0,yfold0,t0)
tf0 = F1(p_tst0[0],p_tst0[1],p_tst0[2])
trf0 = F1(p_tra0[0],p_tra0[1],p_tra0[2])
print(t0)
print(p_tst0)
print(p_tra0)
print(tf0)
print(trf0)
print('TESTavgf1(gamma=0.1,c=0.01):',(tf4+tf3+tf2+tf1+tf0)/5) #avg TEST F1#
print('TRAINavgf1(gamma=0.1,c=0.01):',(trf4+trf3+trf2+trf1+trf0)/5) #avg TRAIN F1#
### ###
#-----average f1 score with:: gamma=0.01, c=1-----#
t4 = fitt(xfold4,yfold4,0.01,1)
p_tst4 = predict(x_train4,y_train4,t4)
p_tra4 = predict(xfold4,yfold4,t4)
tf4 = F1(p_tst4[0],p_tst4[1],p_tst4[2])
trf4 = F1(p_tra4[0],p_tra4[1],p_tra4[2])
t3 = fitt(xfold3,yfold3,0.01,1)
p_tst3 = predict(x_train3,y_train3,t3)
p_tra3 = predict(xfold3,yfold3,t3)
tf3 = F1(p_tst3[0],p_tst3[1],p_tst3[2])
trf3 = F1(p_tra3[0],p_tra3[1],p_tra3[0])
t2 = fitt(xfold2,yfold2,0.01,1)
p_tst2 = predict(x_train2,y_train2,t2)
p_tra2 = predict(xfold2,yfold2,t2)
tf2 = F1(p_tst2[0],p_tst2[1],p_tst2[2])
trf2 = F1(p_tra2[0],p_tra2[1],p_tra2[2])
t1 = fitt(xfold1,yfold1,0.01,1)
p_tst1 = predict(x_train1,y_train1,t1)
p_tra1 = predict(xfold1,yfold1,t1)
tf1 = F1(p_tst1[0],p_tst1[1],p_tst1[2])
trf1 = F1(p_tra1[0],p_tra1[1],p_tra1[2])
t0 = fitt(xfold0,yfold0,0.01,1)
p_tst0 = predict(x_train0,y_train0,t0)
p_tra0 = predict(xfold0,yfold0,t0)
tf0 = F1(p_tst0[0],p_tst0[1],p_tst0[2])
trf0 = F1(p_tra0[0],p_tra0[1],p_tra0[2])
print('TESTavgf1(gamma=0.01,c=1):',(tf4+tf3+tf2+tf1+tf0)/5) #avg TEST F1
print('TRAINavgf1(gamma=0.01,c=1):',(trf4+trf3+trf2+trf1+trf0)/5) #avg TRAIN F1
### ###
#-----average f1 score with: gamma=0.1 c=0.00001-----#
t4 = fitt(xfold4,yfold4,0.1,0.00001)
p_tst4 = predict(x_train4,y_train4,t4)
p_tra4 = predict(xfold4,yfold4,t4)
tf4 = F1(p_tst4[0],p_tst4[1],p_tst4[2])
trf4 = F1(p_tra4[0],p_tra4[1],p_tra4[2])
print(p_tst4)
t3 = fitt(xfold3,yfold3,0.1,0.00001)
p_tst3 = predict(x_train3,y_train3,t3)
p_tra3 = predict(xfold3,yfold3,t3)
tf3 = F1(p_tst3[0],p_tst3[1],p_tst3[2])
trf3 = F1(p_tra3[0],p_tra3[1],p_tra3[0])
print(p_tst3)
t2 = fitt(xfold2,yfold2,0.1,0.00001)
p_tst2 = predict(x_train2,y_train2,t2)
p_tra2 = predict(xfold2,yfold2,t2)
tf2 = F1(p_tst2[0],p_tst2[1],p_tst2[2])
trf2 = F1(p_tra2[0],p_tra2[1],p_tra2[2])
print(p_tst2)
t1 = fitt(xfold1,yfold1,0.1,0.00001)
p_tst1 = predict(x_train1,y_train1,t1)
p_tra1 = predict(xfold1,yfold1,t1)
tf1 = F1(p_tst1[0],p_tst1[1],p_tst1[2])
trf1 = F1(p_tra1[0],p_tra1[1],p_tra1[2])
print(p_tst1)
t0 = fitt(xfold0,yfold0,0.1,0.00001)
p_tst0 = predict(x_train0,y_train0,t0)
p_tra0 = predict(xfold0,yfold0,t0)
tf0 = F1(p_tst0[0],p_tst0[1],p_tst0[2])
trf0 = F1(p_tra0[0],p_tra0[1],p_tra0[2])
print(p_tst0)
print('TESTavgf1(gamma=0.1,c=0.00001):',(tf4+tf3+tf2+tf1+tf0)/5) #avg TEST F1
print('TRAINavgf1(gamma=0.1,c=0.00001):',(trf4+trf3+trf2+trf1+trf0)/5) #avg TRAIN F1
### ###
#-----average f1 score with: gamma=0.001 c=10-----#
t4 = fitt(xfold4,yfold4,0.001,10)
p_tst4 = predict(x_train4,y_train4,t4)
p_tra4 = predict(xfold4,yfold4,t4)
tp4 = p(p_tst4[0],p_tst4[1])
tr4 = r(p_tst4[0],p_tst4[2])
trp4 = p(p_tra4[0],p_tra4[1])
trr4 = p(p_tra4[0],p_tra4[2])
tf4 = F1(p_tst4[0],p_tst4[1],p_tst4[2])
trf4 = F1(p_tra4[0],p_tra4[1],p_tra4[2])
print(p_tst4)
t3 = fitt(xfold3,yfold3,0.001,10)
p_tst3 = predict(x_train3,y_train3,t3)
p_tra3 = predict(xfold3,yfold3,t3)
tp3 = p(p_tst3[0],p_tst3[1])
tr3 = r(p_tst3[0],p_tst3[2])
trp3 = p(p_tra3[0],p_tra3[1])
trr3 = p(p_tra3[0],p_tra3[2])
tf3 = F1(p_tst3[0],p_tst3[1],p_tst3[2])
trf3 = F1(p_tra3[0],p_tra3[1],p_tra3[0])
print(p_tst3)
t2 = fitt(xfold2,yfold2,0.001,10)
p_tst2 = predict(x_train2,y_train2,t2)
p_tra2 = predict(xfold2,yfold2,t2)
tp2 = p(p_tst2[0],p_tst2[1])
tr2 = r(p_tst2[0],p_tst2[2])
trp2 = p(p_tra2[0],p_tra2[1])
trr2 = p(p_tra2[0],p_tra2[2])
tf2 = F1(p_tst2[0],p_tst2[1],p_tst2[2])
trf2 = F1(p_tra2[0],p_tra2[1],p_tra2[2])
print(p_tst2)
t1 = fitt(xfold1,yfold1,0.001,10)
p_tst1 = predict(x_train1,y_train1,t1)
p_tra1 = predict(xfold1,yfold1,t1)
tp1 = p(p_tst1[0],p_tst1[1])
tr1 = r(p_tst1[0],p_tst1[2])
trp1 = p(p_tra1[0],p_tra1[1])
trr1 = p(p_tra1[0],p_tra1[2])
tf1 = F1(p_tst1[0],p_tst1[1],p_tst1[2])
trf1 = F1(p_tra1[0],p_tra1[1],p_tra1[2])
print(p_tst1)
t0 = fitt(xfold0,yfold0,0.001,10)
p_tst0 = predict(x_train0,y_train0,t0)
p_tra0 = predict(xfold0,yfold0,t0)
tp0 = p(p_tst0[0],p_tst0[1])
tr0 = r(p_tst0[0],p_tst0[2])
trp0 = p(p_tra0[0],p_tra0[1])
trr0 = p(p_tra0[0],p_tra0[2])
tf0 = F1(p_tst0[0],p_tst0[1],p_tst0[2])
trf0 = F1(p_tra0[0],p_tra0[1],p_tra0[2])
print(p_tst0)
print('TESTavgp(gamma=0.001,c=10:',(tp4+tp3+tp2+tp1+tp0)/5)
print('TESTavgr(gamma=0.001,c=10:',(tr4+tr3+tr2+tr1+tr0)/5)
print('TRAINavgp(gamma=0.001,c=10:',(trp4+trp3+trp2+trp1+trp0)/5)
print('TRAINavgr(gamma=0.001,c=10:',(trr4+trr3+trr2+trr1+trr0)/5)
print('TESTavgf1(gamma=0.001,c=10):',(tf4+tf3+tf2+tf1+tf0)/5) #avg TEST F1
print('TRAINavgf1(gamma=0.001,c=10):',(trf4+trf3+trf2+trf1+trf0)/5) #avg TRAIN F1
print('### after many iterations my f1 score converged to roughly 36%, I have contrasted my model with sklearn and found a big difference. Sklearn produced an accuracy of 82% I believe this indicates that there is a bug in my code but I have yet to find it. My optimal hyperparameters turned out to be gamma=0.001, c=10 for a 39% F1 avg. accuracy on training set')
#-----report accuracy using sklearn-----#
print('please wait...thinking..')
model_svm = svm.SVC(C=1, gamma=0.01)
acc = cross_val_score(model_svm, xtrain_k, ytrain_k, cv=5)
print('sklearn acc',np.mean(acc))
###-----LOGISTIC REGRESSION-----###
def logit(X,Y,gamma,sigma,intercept=False):
w = np.ones(219)
b = 1
epoch = 20
for t in range(1, epoch):
for i, x in enumerate(X):
z = np.dot(X[i], w)
s = 1/(1+np.exp(-z))
gradient = np.dot(X[i], (s-Y[i]))+2*w/sigma
#print(gradient)
w = gamma*gradient
return w
def estimate(X,Y,w):
tp = 0
fp = 0
fn = 0
#print(len(X))
for i, x in enumerate(X):
classification = np.sign(np.dot(X[i],w))
if classification > 0 and Y[i] > 0:
tp += 1
elif classification > 0 and Y[i] < 0:
fp += 1
if classification < 0 and Y[i] > 0:
fn += 1
else:
pass
return tp,fp,fn
#-----average f1 score with gamma=0.1, sigma=1-----#
l4 = logit(xfold4,yfold4,0.1,1)
tst_l4 = estimate(xfold4,yfold4,l4)
tra_l4 = estimate(x_train4,y_train4,l4)
tlf4 = F1(tst_l4[0],tst_l4[1],tst_l4[2])
trlf4 = F1(tra_l4[0],tra_l4[1],tra_l4[2])
print(l4)
print(tst_l4)
print(tra_l4)
print(tlf4)
print(trlf4)
l3 = logit(xfold3,yfold3,0.1,1)
tst_l3 = estimate(xfold3,yfold3,l3)
tra_l3 = estimate(x_train3,y_train3,l3)
tlf3 = F1(tst_l3[0],tst_l3[1],tst_l3[2])
trlf3 = F1(tra_l3[0],tra_l3[1],tra_l3[2])
print(l3)
print(tst_l3)
print(tra_l3)
print(tlf3)
print(trlf3)
l2 = logit(xfold2,yfold2,0.1,1)
tst_l2 = estimate(xfold2,yfold2,l2)
tra_l2 = estimate(x_train2,y_train2,l2)
tlf2 = F1(tst_l2[0],tst_l2[1],tst_l2[2])
trlf2 = F1(tra_l2[0],tra_l2[1],tra_l2[2])
print(l2)
print(tst_l2)
print(tra_l2)
print(tlf2)
print(trlf2)
l1 = logit(xfold1,yfold1,0.1,1)
tst_l1 = estimate(xfold1,yfold1,l1)
tra_l1 = estimate(x_train1,y_train1,l1)
tlf1 = F1(tst_l1[0],tst_l1[1],tst_l1[2])
trlf1 = F1(tra_l1[0],tra_l1[1],tra_l1[2])
print(l1)
print(tst_l1)
print(tra_l1)
print(tlf1)
print(trlf1)
l0 = logit(xfold0,yfold0,0.1,1)
tst_l0 = estimate(xfold0,yfold0,l0)
tra_l0 = estimate(x_train0,y_train0,l0)
tlf0 = F1(tst_l0[0],tst_l0[1],tst_l0[2])
trlf0 = F1(tra_l0[0],tra_l0[1],tra_l0[2])
print(l0)
print(tst_l0)
print(tra_l0)
print(tlf0)
print(trlf0)
print('TESTavgf1(gamma=0.1,c=1):',(tlf4+tlf3+tlf2+tlf1+tlf0)/5)
print('TRAINavgf1(gamma=0.1,c=1):',(trlf4+trlf3+trlf2+trlf1+trlf0)/5)
#-----average f1 score with gamma=0.01, sigma=0.1-----#
l4 = logit(xfold4,yfold4,0.01,0.1)
tst_l4 = estimate(xfold4,yfold4,l4)
tra_l4 = estimate(x_train4,y_train4,l4)
tlf4 = F1(tst_l4[0],tst_l4[1],tst_l4[2])
trlf4 = F1(tra_l4[0],tra_l4[1],tra_l4[2])
l3 = logit(xfold3,yfold3,0.01,0.1)
tst_l3 = estimate(xfold3,yfold3,l3)
tra_l3 = estimate(x_train3,y_train3,l3)
tlf3 = F1(tst_l3[0],tst_l3[1],tst_l3[2])
trlf3 = F1(tra_l3[0],tra_l3[1],tra_l3[2])
l2 = logit(xfold2,yfold2,0.01,0.1)
tst_l2 = estimate(xfold2,yfold2,l2)
tra_l2 = estimate(x_train2,y_train2,l2)
tlf2 = F1(tst_l2[0],tst_l2[1],tst_l2[2])
trlf2 = F1(tra_l2[0],tra_l2[1],tra_l2[2])
l1 = logit(xfold1,yfold1,0.01,0.1)
tst_l1 = estimate(xfold1,yfold1,l1)
tra_l1 = estimate(x_train1,y_train1,l1)
tlf1 = F1(tst_l1[0],tst_l1[1],tst_l1[2])
trlf1 = F1(tra_l1[0],tra_l1[1],tra_l1[2])
l0 = logit(xfold0,yfold0,0.01,0.1)
tst_l0 = estimate(xfold0,yfold0,l0)
tra_l0 = estimate(x_train0,y_train0,l0)
tlf0 = F1(tst_l0[0],tst_l0[1],tst_l0[2])
trlf0 = F1(tra_l0[0],tra_l0[1],tra_l0[2])
print('TESTavgf1(gamma=0.01,c=0.1):',(tlf4+tlf3+tlf2+tlf1+tlf0)/5)
print('TRAINavgf1(gamma=0.01,c=0.1):',(trlf4+trlf3+trlf2+trlf1+trlf0)/5)
#-----average f1 score with gamma=0.001, sigma=0.01-----#
l4 = logit(xfold4,yfold4,0.001,0.01)
tst_l4 = estimate(xfold4,yfold4,l4)
tra_l4 = estimate(x_train4,y_train4,l4)
tlf4 = F1(tst_l4[0],tst_l4[1],tst_l4[2])
trlf4 = F1(tra_l4[0],tra_l4[1],tra_l4[2])
l3 = logit(xfold3,yfold3,0.001,0.01)
tst_l3 = estimate(xfold3,yfold3,l3)
tra_l3 = estimate(x_train3,y_train3,l3)
tlf3 = F1(tst_l3[0],tst_l3[1],tst_l3[2])
trlf3 = F1(tra_l3[0],tra_l3[1],tra_l3[2])
l2 = logit(xfold2,yfold2,0.001,0.01)
tst_l2 = estimate(xfold2,yfold2,l2)
tra_l2 = estimate(x_train2,y_train2,l2)
tlf2 = F1(tst_l2[0],tst_l2[1],tst_l2[2])
trlf2 = F1(tra_l2[0],tra_l2[1],tra_l2[2])
l1 = logit(xfold1,yfold1,0.001,0.01)
tst_l1 = estimate(xfold1,yfold1,l1)
tra_l1 = estimate(x_train1,y_train1,l1)
tlf1 = F1(tst_l1[0],tst_l1[1],tst_l1[2])
trlf1 = F1(tra_l1[0],tra_l1[1],tra_l1[2])
l0 = logit(xfold0,yfold0,0.001,0.01)
tst_l0 = estimate(xfold0,yfold0,l0)
tra_l0 = estimate(x_train0,y_train0,l0)
tlf0 = F1(tst_l0[0],tst_l0[1],tst_l0[2])
trlf0 = F1(tra_l0[0],tra_l0[1],tra_l0[2])
print('TESTavgf1(gamma=0.001,c=0.01):',(tlf4+tlf3+tlf2+tlf1+tlf0)/5)
print('TRAINavgf1(gamma=0.001,c=0.01):',(trlf4+trlf3+trlf2+trlf1+trlf0)/5)
#-----average f1 score with gamma=0.001, sigma=0.001-----#
l4 = logit(xfold4,yfold4,1,1)
tst_l4 = estimate(xfold4,yfold4,l4)
tra_l4 = estimate(x_train4,y_train4,l4)
tp4 = p(tst_l4[0],tst_l4[1])
tr4 = r(tst_l4[0],tst_l4[2])
trp4 = p(tra_l4[0],tra_l4[1])
trr4 = p(tra_l4[0],tra_l4[2])
tlf4 = F1(tst_l4[0],tst_l4[1],tst_l4[2])
trlf4 = F1(tra_l4[0],tra_l4[1],tra_l4[2])
l3 = logit(xfold3,yfold3,1,1)
tst_l3 = estimate(xfold3,yfold3,l3)
tra_l3 = estimate(x_train3,y_train3,l3)
tp4 = p(tst_l3[0],tst_l3[1])
tr4 = r(tst_l3[0],tst_l3[2])
trp4 = p(tra_l3[0],tra_l3[1])
trr4 = p(tra_l3[0],tra_l3[2])
tlf3 = F1(tst_l3[0],tst_l3[1],tst_l3[2])
trlf3 = F1(tra_l3[0],tra_l3[1],tra_l3[2])
l2 = logit(xfold2,yfold2,1,1)
tst_l2 = estimate(xfold2,yfold2,l2)
tra_l2 = estimate(x_train2,y_train2,l2)
tp4 = p(tst_l2[0],tst_l2[1])
tr4 = r(tst_l2[0],tst_l2[2])
trp4 = p(tra_l2[0],tra_l2[1])
trr4 = p(tra_l2[0],tra_l2[2])
tlf2 = F1(tst_l2[0],tst_l2[1],tst_l2[2])
trlf2 = F1(tra_l2[0],tra_l2[1],tra_l2[2])
l1 = logit(xfold1,yfold1,1,1)
tst_l1 = estimate(xfold1,yfold1,l1)
tra_l1 = estimate(x_train1,y_train1,l1)
tp4 = p(tst_l1[0],tst_l1[1])
tr4 = r(tst_l1[0],tst_l1[2])
trp4 = p(tra_l1[0],tra_l1[1])
trr4 = p(tra_l1[0],tra_l1[2])
tlf1 = F1(tst_l1[0],tst_l1[1],tst_l1[2])
trlf1 = F1(tra_l1[0],tra_l1[1],tra_l1[2])
l0 = logit(xfold0,yfold0,1,1)
tst_l0 = estimate(xfold0,yfold0,l0)
tra_l0 = estimate(x_train0,y_train0,l0)
tp4 = p(tst_l0[0],tst_l0[1])
tr4 = r(tst_l0[0],tst_l0[2])
trp4 = p(tra_l0[0],tra_l0[1])
trr4 = p(tra_l0[0],tra_l0[2])
tlf0 = F1(tst_l0[0],tst_l0[1],tst_l0[2])
trlf0 = F1(tra_l0[0],tra_l0[1],tra_l0[2])
print('TESTavgp(gamma=1,c=1:',(tp4+tp3+tp2+tp1+tp0)/5)
print('TESTavgr(gamma=1,c=1:',(tr4+tr3+tr2+tr1+tr0)/5)
print('TRAINavgp(gamma=1,c=1:',(trp4+trp3+trp2+trp1+trp0)/5)
print('TRAINavgr(gamma=1,c=1:',(trr4+trr3+trr2+trr1+trr0)/5)
print('TESTavgf1(gamma=1,c=1):',(tlf4+tlf3+tlf2+tlf1+tlf0)/5)
print('TRAINavgf1(gamma=1,c=1):',(trlf4+trlf3+trlf2+trlf1+trlf0)/5)
#-----sklearn logistic regression accuracy-----#
model_logit = LogisticRegression(penalty='l2',C=0.1,solver='lbfgs')
accl = cross_val_score(model_logit, xtrain_k, ytrain_k, cv=5)
print('sklearn accuracy:',np.mean(accl))
###-----Naive Bayes-----###
#-----combining data-----#
y_fold4 = yfold4.reshape(16000,1)
y_fold3 = yfold3.reshape(16000,1)
y_fold2 = yfold2.reshape(16000,1)
y_fold1 = yfold1.reshape(16000,1)
y_fold0 = yfold0.reshape(16000,1)
print(y_fold4.shape)
print(xfold4.shape)
fold4 = np.hstack((y_fold4,xfold4))
fold3 = np.hstack((y_fold3,xfold3))
fold2 = np.hstack((y_fold2,xfold2))
fold1 = np.hstack((y_fold1,xfold1))
fold0 = np.hstack((y_fold0,xfold0))
print(fold4)
print(fold4.shape)
#-----object oriented programming-----#
'''
t0 = Data(fpath='training00.data')
t1 = Data(fpath='training01.data')
t2 = Data(fpath='training02.data')
t3 = Data(fpath='training03.data')
t4 = Data(fpath='training04.data')
t = Data(fpath='test.liblinear')
tr = Data(fpath='train.liblinear')
t0._load_data(fpath="training00.data")
t0._set_attributes_info(index_column_dict,data)
t0.get_row_subset(attribute_name,attribute_value)
'''
def tran_x(X):
return X.T
def p_label(X):
pos = 0
neg = 0
for i in X[0]:
if i == 1:
pos += 1
else:
neg += 1
print("Positive: %d" % pos)
print("Negative: %d" % neg)
ppos = pos/len(X)
pneg = neg/len(X)
return ppos, pneg
print(p_label(fold4))
def frequency(X,Y):
pos = []
neg = []
m = 218
for i in range(219):
if Y[i] == 1:
if X[i][m] == 1:
pos[0][2*m+0]
else:
print(neg[1][2*m+1])
return pos, neg
'''
print(pos)
elif Y[i] == -1:
#print('####')
neg = np.count_nonzero(X[i], axis=0)
else:
pass
return pos,neg
print(frequency(xfold4,yfold4))
#frequency(xfold4,yfold4)
'''
model_bayes = GaussianNB(var_smoothing=1.5)
accb = cross_val_score(model_bayes, xtrain_k, ytrain_k, cv =5)
print('sklearn accuracy:', np.mean(accb))
###-----SVM Over Trees-----###
print('please wait....this will take a bit..')
model_rf = RandomForestClassifier(n_estimators=200, max_depth=10)
model_svm = svm.SVC(C=1, gamma=0.01, probability=True)
ensemble_svmot = VotingClassifier(estimators=[('dt',model_rf), ('svm',model_svm)],voting='soft')
accuracy = cross_val_score(ensemble_svmot, xtrain_k, ytrain_k, cv=5)
print('sklearn accuracy',np.mean(accuracy))
|
[
"sklearn.ensemble.RandomForestClassifier",
"sklearn.naive_bayes.GaussianNB",
"sklearn.model_selection.cross_val_score",
"numpy.asarray",
"numpy.ones",
"numpy.hstack",
"numpy.append",
"sklearn.linear_model.LogisticRegression",
"sklearn.ensemble.VotingClassifier",
"numpy.mean",
"numpy.exp",
"sklearn.svm.SVC",
"numpy.dot"
] |
[((1381, 1418), 'numpy.append', 'np.append', (['x_train0', 'x_train1'], {'axis': '(0)'}), '(x_train0, x_train1, axis=0)\n', (1390, 1418), True, 'import numpy as np\n'), ((1434, 1473), 'numpy.append', 'np.append', (['train_set1', 'x_train2'], {'axis': '(0)'}), '(train_set1, x_train2, axis=0)\n', (1443, 1473), True, 'import numpy as np\n'), ((1489, 1528), 'numpy.append', 'np.append', (['train_set2', 'x_train3'], {'axis': '(0)'}), '(train_set2, x_train3, axis=0)\n', (1498, 1528), True, 'import numpy as np\n'), ((1544, 1583), 'numpy.append', 'np.append', (['train_set1', 'x_train3'], {'axis': '(0)'}), '(train_set1, x_train3, axis=0)\n', (1553, 1583), True, 'import numpy as np\n'), ((1599, 1636), 'numpy.append', 'np.append', (['x_train0', 'x_train2'], {'axis': '(0)'}), '(x_train0, x_train2, axis=0)\n', (1608, 1636), True, 'import numpy as np\n'), ((1652, 1691), 'numpy.append', 'np.append', (['train_set4', 'x_train3'], {'axis': '(0)'}), '(train_set4, x_train3, axis=0)\n', (1661, 1691), True, 'import numpy as np\n'), ((1707, 1744), 'numpy.append', 'np.append', (['x_train1', 'x_train2'], {'axis': '(0)'}), '(x_train1, x_train2, axis=0)\n', (1716, 1744), True, 'import numpy as np\n'), ((1760, 1799), 'numpy.append', 'np.append', (['train_set6', 'x_train3'], {'axis': '(0)'}), '(train_set6, x_train3, axis=0)\n', (1769, 1799), True, 'import numpy as np\n'), ((1833, 1872), 'numpy.append', 'np.append', (['train_set0', 'x_train4'], {'axis': '(0)'}), '(train_set0, x_train4, axis=0)\n', (1842, 1872), True, 'import numpy as np\n'), ((1884, 1923), 'numpy.append', 'np.append', (['train_set2', 'x_train3'], {'axis': '(0)'}), '(train_set2, x_train3, axis=0)\n', (1893, 1923), True, 'import numpy as np\n'), ((1935, 1974), 'numpy.append', 'np.append', (['train_set2', 'x_train4'], {'axis': '(0)'}), '(train_set2, x_train4, axis=0)\n', (1944, 1974), True, 'import numpy as np\n'), ((1986, 2025), 'numpy.append', 'np.append', (['train_set3', 'x_train4'], {'axis': '(0)'}), '(train_set3, x_train4, axis=0)\n', (1995, 2025), True, 'import numpy as np\n'), ((2037, 2076), 'numpy.append', 'np.append', (['train_set5', 'x_train4'], {'axis': '(0)'}), '(train_set5, x_train4, axis=0)\n', (2046, 2076), True, 'import numpy as np\n'), ((2088, 2127), 'numpy.append', 'np.append', (['train_set7', 'x_train4'], {'axis': '(0)'}), '(train_set7, x_train4, axis=0)\n', (2097, 2127), True, 'import numpy as np\n'), ((2843, 2880), 'numpy.append', 'np.append', (['y_train0', 'y_train1'], {'axis': '(0)'}), '(y_train0, y_train1, axis=0)\n', (2852, 2880), True, 'import numpy as np\n'), ((2892, 2927), 'numpy.append', 'np.append', (['train1', 'y_train2'], {'axis': '(0)'}), '(train1, y_train2, axis=0)\n', (2901, 2927), True, 'import numpy as np\n'), ((2939, 2974), 'numpy.append', 'np.append', (['train2', 'y_train3'], {'axis': '(0)'}), '(train2, y_train3, axis=0)\n', (2948, 2974), True, 'import numpy as np\n'), ((2986, 3021), 'numpy.append', 'np.append', (['train1', 'y_train3'], {'axis': '(0)'}), '(train1, y_train3, axis=0)\n', (2995, 3021), True, 'import numpy as np\n'), ((3033, 3070), 'numpy.append', 'np.append', (['y_train0', 'y_train2'], {'axis': '(0)'}), '(y_train0, y_train2, axis=0)\n', (3042, 3070), True, 'import numpy as np\n'), ((3082, 3117), 'numpy.append', 'np.append', (['train4', 'y_train3'], {'axis': '(0)'}), '(train4, y_train3, axis=0)\n', (3091, 3117), True, 'import numpy as np\n'), ((3129, 3166), 'numpy.append', 'np.append', (['y_train1', 'y_train2'], {'axis': '(0)'}), '(y_train1, y_train2, axis=0)\n', (3138, 3166), True, 'import numpy as np\n'), ((3178, 3213), 'numpy.append', 'np.append', (['train6', 'y_train3'], {'axis': '(0)'}), '(train6, y_train3, axis=0)\n', (3187, 3213), True, 'import numpy as np\n'), ((3226, 3261), 'numpy.append', 'np.append', (['train2', 'y_train3'], {'axis': '(0)'}), '(train2, y_train3, axis=0)\n', (3235, 3261), True, 'import numpy as np\n'), ((3273, 3308), 'numpy.append', 'np.append', (['train2', 'y_train4'], {'axis': '(0)'}), '(train2, y_train4, axis=0)\n', (3282, 3308), True, 'import numpy as np\n'), ((3320, 3355), 'numpy.append', 'np.append', (['train3', 'y_train4'], {'axis': '(0)'}), '(train3, y_train4, axis=0)\n', (3329, 3355), True, 'import numpy as np\n'), ((3367, 3402), 'numpy.append', 'np.append', (['train5', 'y_train4'], {'axis': '(0)'}), '(train5, y_train4, axis=0)\n', (3376, 3402), True, 'import numpy as np\n'), ((3414, 3449), 'numpy.append', 'np.append', (['train7', 'y_train4'], {'axis': '(0)'}), '(train7, y_train4, axis=0)\n', (3423, 3449), True, 'import numpy as np\n'), ((3463, 3498), 'numpy.append', 'np.append', (['train0', 'y_train4'], {'axis': '(0)'}), '(train0, y_train4, axis=0)\n', (3472, 3498), True, 'import numpy as np\n'), ((11272, 11296), 'sklearn.svm.SVC', 'svm.SVC', ([], {'C': '(1)', 'gamma': '(0.01)'}), '(C=1, gamma=0.01)\n', (11279, 11296), False, 'from sklearn import svm\n'), ((11303, 11355), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model_svm', 'xtrain_k', 'ytrain_k'], {'cv': '(5)'}), '(model_svm, xtrain_k, ytrain_k, cv=5)\n', (11318, 11355), False, 'from sklearn.model_selection import cross_val_score\n'), ((18075, 18130), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': '"""l2"""', 'C': '(0.1)', 'solver': '"""lbfgs"""'}), "(penalty='l2', C=0.1, solver='lbfgs')\n", (18093, 18130), False, 'from sklearn.linear_model import LogisticRegression\n'), ((18136, 18190), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model_logit', 'xtrain_k', 'ytrain_k'], {'cv': '(5)'}), '(model_logit, xtrain_k, ytrain_k, cv=5)\n', (18151, 18190), False, 'from sklearn.model_selection import cross_val_score\n'), ((18510, 18538), 'numpy.hstack', 'np.hstack', (['(y_fold4, xfold4)'], {}), '((y_fold4, xfold4))\n', (18519, 18538), True, 'import numpy as np\n'), ((18546, 18574), 'numpy.hstack', 'np.hstack', (['(y_fold3, xfold3)'], {}), '((y_fold3, xfold3))\n', (18555, 18574), True, 'import numpy as np\n'), ((18582, 18610), 'numpy.hstack', 'np.hstack', (['(y_fold2, xfold2)'], {}), '((y_fold2, xfold2))\n', (18591, 18610), True, 'import numpy as np\n'), ((18618, 18646), 'numpy.hstack', 'np.hstack', (['(y_fold1, xfold1)'], {}), '((y_fold1, xfold1))\n', (18627, 18646), True, 'import numpy as np\n'), ((18654, 18682), 'numpy.hstack', 'np.hstack', (['(y_fold0, xfold0)'], {}), '((y_fold0, xfold0))\n', (18663, 18682), True, 'import numpy as np\n'), ((20011, 20040), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {'var_smoothing': '(1.5)'}), '(var_smoothing=1.5)\n', (20021, 20040), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((20048, 20102), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model_bayes', 'xtrain_k', 'ytrain_k'], {'cv': '(5)'}), '(model_bayes, xtrain_k, ytrain_k, cv=5)\n', (20063, 20102), False, 'from sklearn.model_selection import cross_val_score\n'), ((20239, 20293), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(200)', 'max_depth': '(10)'}), '(n_estimators=200, max_depth=10)\n', (20261, 20293), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((20306, 20348), 'sklearn.svm.SVC', 'svm.SVC', ([], {'C': '(1)', 'gamma': '(0.01)', 'probability': '(True)'}), '(C=1, gamma=0.01, probability=True)\n', (20313, 20348), False, 'from sklearn import svm\n'), ((20366, 20453), 'sklearn.ensemble.VotingClassifier', 'VotingClassifier', ([], {'estimators': "[('dt', model_rf), ('svm', model_svm)]", 'voting': '"""soft"""'}), "(estimators=[('dt', model_rf), ('svm', model_svm)], voting=\n 'soft')\n", (20382, 20453), False, 'from sklearn.ensemble import VotingClassifier\n'), ((20457, 20514), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['ensemble_svmot', 'xtrain_k', 'ytrain_k'], {'cv': '(5)'}), '(ensemble_svmot, xtrain_k, ytrain_k, cv=5)\n', (20472, 20514), False, 'from sklearn.model_selection import cross_val_score\n'), ((1008, 1028), 'numpy.asarray', 'np.asarray', (['train_rs'], {}), '(train_rs)\n', (1018, 1028), True, 'import numpy as np\n'), ((2517, 2540), 'numpy.asarray', 'np.asarray', (['train_label'], {}), '(train_label)\n', (2527, 2540), True, 'import numpy as np\n'), ((3646, 3658), 'numpy.ones', 'np.ones', (['(219)'], {}), '(219)\n', (3653, 3658), True, 'import numpy as np\n'), ((11376, 11388), 'numpy.mean', 'np.mean', (['acc'], {}), '(acc)\n', (11383, 11388), True, 'import numpy as np\n'), ((11484, 11496), 'numpy.ones', 'np.ones', (['(219)'], {}), '(219)\n', (11491, 11496), True, 'import numpy as np\n'), ((18217, 18230), 'numpy.mean', 'np.mean', (['accl'], {}), '(accl)\n', (18224, 18230), True, 'import numpy as np\n'), ((20131, 20144), 'numpy.mean', 'np.mean', (['accb'], {}), '(accb)\n', (20138, 20144), True, 'import numpy as np\n'), ((20540, 20557), 'numpy.mean', 'np.mean', (['accuracy'], {}), '(accuracy)\n', (20547, 20557), True, 'import numpy as np\n'), ((11602, 11617), 'numpy.dot', 'np.dot', (['X[i]', 'w'], {}), '(X[i], w)\n', (11608, 11617), True, 'import numpy as np\n'), ((11930, 11945), 'numpy.dot', 'np.dot', (['X[i]', 'w'], {}), '(X[i], w)\n', (11936, 11945), True, 'import numpy as np\n'), ((4167, 4182), 'numpy.dot', 'np.dot', (['X[i]', 'w'], {}), '(X[i], w)\n', (4173, 4182), True, 'import numpy as np\n'), ((11674, 11696), 'numpy.dot', 'np.dot', (['X[i]', '(s - Y[i])'], {}), '(X[i], s - Y[i])\n', (11680, 11696), True, 'import numpy as np\n'), ((11639, 11649), 'numpy.exp', 'np.exp', (['(-z)'], {}), '(-z)\n', (11645, 11649), True, 'import numpy as np\n'), ((3815, 3830), 'numpy.dot', 'np.dot', (['X[i]', 'w'], {}), '(X[i], w)\n', (3821, 3830), True, 'import numpy as np\n')]
|
from PIL import ImageGrab
class PILScreenshot:
def __init__(self):
pass
def screenshot(self, bbox: list, file_path: str):
"""
left, top, right, bottom = bbox
"""
im = ImageGrab.grab(bbox=bbox)
im.save(file_path)
if __name__ == '__main__':
cap = PILScreenshot()
cap.screenshot([0, 0, 2550, 1440], './images/windows-screenshot.png')
|
[
"PIL.ImageGrab.grab"
] |
[((219, 244), 'PIL.ImageGrab.grab', 'ImageGrab.grab', ([], {'bbox': 'bbox'}), '(bbox=bbox)\n', (233, 244), False, 'from PIL import ImageGrab\n')]
|
from flask import render_template
from . import bapp
# 向程序全局注册404 错误处理,其他路由处理可以省掉
@bapp.app_errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
# 向程序全局注册404 错误处理,其他路由处理可以省掉
@bapp.app_errorhandler(403)
def page_not_found(e):
return render_template('403.html'), 403
# 向程序全局注册500 错误处理,其他路由处理可以省掉
@bapp.app_errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
|
[
"flask.render_template"
] |
[((151, 178), 'flask.render_template', 'render_template', (['"""404.html"""'], {}), "('404.html')\n", (166, 178), False, 'from flask import render_template\n'), ((281, 308), 'flask.render_template', 'render_template', (['"""403.html"""'], {}), "('403.html')\n", (296, 308), False, 'from flask import render_template\n'), ((418, 445), 'flask.render_template', 'render_template', (['"""500.html"""'], {}), "('500.html')\n", (433, 445), False, 'from flask import render_template\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# create a file with transit opportunities per day (opd) at munis
# filter file with average tpd per stop and stop location, using muni boarder multipolygons in geojson files
# sum tpds at stops in muni to calculate opd for muni
#
# input:
# gtfsdate = '20180425'
# sserviceweekstartdate = '20180425'
# pathin = 'C:\\transitanalyst\\processed\\'
# pathout = 'C:\\transitanalyst\\processed\\'
# txt file with average tpd per stop - 'stopswtpdand10xforrail'+'_'+sserviceweekstartdate+'_'+gtfsdate+'.txt'
# stopsinmuni_post_edit = 'stopsinmuni_post_edit'+'_'+servicedate+'.txt'
# 'muni_names.txt' - map names from muni_id to english to hebrew
# output:
# txt file with average opd per muni - 'muni_opd'+'_'+sserviceweekstartdate+'_'+gtfsdate+'.txt'
#
#
print('----------------- create a file with transit opportunities per day (opd) at munis --------------------------')
print('sum tpds at stops in muni to calculate opd for muni')
print('generate muni_opd_[serviceweekstartdate]_[gtfsdate].txt')
from datetime import date
from datetime import timedelta
import time
import copy
import os
import json
import csv
from shapely.geometry import shape, Point, Polygon, MultiPolygon
import gtfs_config as gtfscfg
from pathlib import Path
cwd = Path.cwd()
def main(gtfsdate, processedpath, serviceweekstartdate):
# input:
sserviceweekstartdate = serviceweekstartdate
pathin = cwd.parent / processedpath
pathout = cwd.parent / processedpath
stopsfilein = 'stopswtpdand10xforrail'+'_'+sserviceweekstartdate+'_'+gtfsdate+'.txt' # txt file with average tpd per stop and top location
servicedate = sserviceweekstartdate
stopsinmuni_post_edit = 'stopsinmuni_post_edit'+'_'+servicedate+'.txt'
muninamesfilein = 'muni_names.txt'
processedpathin = pathout
parent_path = cwd.parent / processedpath
# output:
munifileout = stopsfilein.replace('stopswtpdand10xforrail', 'muni_opd') # txt file with average opd per muni
print('stopsfilein, munifileout : ', stopsfilein, munifileout)
gtfspathin = pathin
gtfspathout = pathout
#
# load files
#
#
# scan stopfile to create munistops_dict and compute maxaveragetpdatstop and totaltripsatallstops
#
# 1st sline is 'stop_id,stop_lat,stop_lon,averagetpdatstop\n'
#
maxaveragetpdatstop = 0.0
totaltripsatallstops = 0.0
munistops_dict = {}
slinelist=[]
print(gtfspathin / stopsfilein)
filein = open(gtfspathin / stopsfilein, 'r', encoding="utf8")
sline = filein.readline()
keylinelen = len(sline)
slinelist=sline[:-1].split(",")
print(slinelist)
keylist = slinelist
stop_id_i = keylist.index('stop_id')
stop_lat_i = keylist.index('stop_lat')
stop_lon_i = keylist.index('stop_lon')
averagetpdatstop_i = keylist.index('averagetpdatstop')
print(slinelist[stop_id_i], slinelist[stop_lat_i], slinelist[stop_lon_i], slinelist[averagetpdatstop_i])
maxfilelinecount = gtfscfg.MAX_STOPS_COUNT
count = 0
sline = filein.readline()
fileinlines = (os.path.getsize(gtfspathin / stopsfilein)-keylinelen)/len(sline)
# scan stopsfilein
while ((count < maxfilelinecount) and (sline != '')):
slinelist=sline[:-1].split(",")
#print (slinelist)
stop_id = slinelist[stop_id_i]
stop_lat = slinelist[stop_lat_i]
stop_lon = slinelist[stop_lon_i]
averagetpdatstop = float(slinelist[averagetpdatstop_i])
maxaveragetpdatstop = max(maxaveragetpdatstop, averagetpdatstop)
totaltripsatallstops += averagetpdatstop
munistops_dict[stop_id] = [stop_lat, stop_lon, averagetpdatstop]
count += 1
#print count, fileinlines, averagetpdatstop, maxaveragetpdatstop, totaltripsatallstops
sline = filein.readline()
print('count, fileinlines, averagetpdatstop, maxaveragetpdatstop, totaltripsatallstops')
print(count, fileinlines, averagetpdatstop, maxaveragetpdatstop, totaltripsatallstops)
print('------------------')
print('stops lines scanned ', count)
filein.close()
#
# >>> load txt file of stopsinmuni post edit
#
print('>>> load txt file of stopsinmuni post edit')
txtfilein = stopsinmuni_post_edit
stopsinmuni = {}
with open(processedpathin / txtfilein, newline='', encoding="utf8") as f:
reader = csv.reader(f)
header = next(reader) # ['muni_id', 'stop_id', 'part_in_muni']
print(header)
for row in reader:
#print row
muni_id = row[0]
stop_id = row[1]
part_in_muni = row[2]
# add to dict
if muni_id in stopsinmuni :
stopsinmuni[muni_id][stop_id] = part_in_muni
else :
stopsinmuni[muni_id] = {}
stopsinmuni[muni_id][stop_id] = part_in_muni
print(stopsinmuni[muni_id]) # last one
print('stopsinmuni loaded. muni count ', len(stopsinmuni))
# >>> load muninames file
muniid2engdict = {}
with open(parent_path / muninamesfilein, newline='', encoding="utf8") as muninames_f:
readermuninames = csv.reader(muninames_f)
headermuninames = next(readermuninames) # muni_id,muni_name_h,muni_name_e
print(headermuninames)
for row in readermuninames:
#print row
muni_id = row[0]
muni_name_h = row[1]
muni_name_e = row[2]
muniid2engdict[muni_id] = muni_name_e
print(muniid2engdict[muni_id]) # print last one
print('muniid2engdict loaded. muninames count ', len(muniid2engdict))
#
# process loaded files
#
#
# for each muni
# filter stops w tpd using stops in muni list
# sum the tpd from all stops in muni to get opd for muni
# output muni opd to txt file
#
fileout = open(pathout / munifileout, 'w', encoding="utf8") # open file to save results
postsline = 'municode,muni_name,opdinmuni,stopinmunicount\n'
fileout.write(postsline)
# for each muni
for muni_id, stopsindict in stopsinmuni.items():
# use stops in muni dict as filter
muni_name = muniid2engdict[muni_id]
print(muni_name)
# filter stops w tpd using stops in muni list
muni_stops_dict = {}
stopinmunicount = 0.0
opdinmuni = 0.0
for stop_id, [stop_lat, stop_lon, averagetpdatstop] in munistops_dict.items() :
if stop_id in stopsindict :
part_in_muni = float(stopsindict[stop_id])
stopinmunicount += part_in_muni
opdinmuni += averagetpdatstop*part_in_muni # sum tpd per stop in muni to get opd
print('stopinmunicount, opdinmuni: ', stopinmunicount, round(opdinmuni))
#print muni_tpdperline_dict
# output muni opportunities per day (opd) to txt file
postsline = muni_id+','+muni_name+','+str(round(opdinmuni))+','+str(round(stopinmunicount))+'\n'
fileout.write(postsline)
fileout.close()
print('closed file: ', munifileout)
|
[
"os.path.getsize",
"pathlib.Path.cwd",
"csv.reader"
] |
[((1300, 1310), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (1308, 1310), False, 'from pathlib import Path\n'), ((4139, 4152), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (4149, 4152), False, 'import csv\n'), ((4776, 4799), 'csv.reader', 'csv.reader', (['muninames_f'], {}), '(muninames_f)\n', (4786, 4799), False, 'import csv\n'), ((2970, 3011), 'os.path.getsize', 'os.path.getsize', (['(gtfspathin / stopsfilein)'], {}), '(gtfspathin / stopsfilein)\n', (2985, 3011), False, 'import os\n')]
|
from django.conf import settings
from influxdb import InfluxDBClient
from .base import * # noqa
class Storage(object):
@staticmethod
def store(data):
influx_client = Storage.get_client()
influx_client.write_points(data)
return data
@staticmethod
def get_client():
return InfluxDBClient(
settings.INFLUXDB_HOST,
settings.INFLUXDB_PORT,
settings.INFLUXDB_USER,
settings.INFLUXDB_PASS,
settings.INFLUXDB_DATABASE
)
|
[
"influxdb.InfluxDBClient"
] |
[((325, 468), 'influxdb.InfluxDBClient', 'InfluxDBClient', (['settings.INFLUXDB_HOST', 'settings.INFLUXDB_PORT', 'settings.INFLUXDB_USER', 'settings.INFLUXDB_PASS', 'settings.INFLUXDB_DATABASE'], {}), '(settings.INFLUXDB_HOST, settings.INFLUXDB_PORT, settings.\n INFLUXDB_USER, settings.INFLUXDB_PASS, settings.INFLUXDB_DATABASE)\n', (339, 468), False, 'from influxdb import InfluxDBClient\n')]
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
__config__ = pulumi.Config('azure')
auxiliary_tenant_ids = __config__.get('auxiliaryTenantIds')
client_certificate_password = __config__.get('clientCertificatePassword') or (utilities.get_env('AZURE_CLIENT_CERTIFICATE_PASSWORD', 'ARM_CLIENT_CERTIFICATE_PASSWORD') or '')
"""
The password associated with the Client Certificate. For use when authenticating as a Service Principal using a Client
Certificate
"""
client_certificate_path = __config__.get('clientCertificatePath') or (utilities.get_env('AZURE_CLIENT_CERTIFICATE_PATH', 'ARM_CLIENT_CERTIFICATE_PATH') or '')
"""
The path to the Client Certificate associated with the Service Principal for use when authenticating as a Service
Principal using a Client Certificate.
"""
client_id = __config__.get('clientId') or (utilities.get_env('AZURE_CLIENT_ID', 'ARM_CLIENT_ID') or '')
"""
The Client ID which should be used.
"""
client_secret = __config__.get('clientSecret') or (utilities.get_env('AZURE_CLIENT_SECRET', 'ARM_CLIENT_SECRET') or '')
"""
The Client Secret which should be used. For use When authenticating as a Service Principal using a Client Secret.
"""
disable_correlation_request_id = __config__.get('disableCorrelationRequestId')
"""
This will disable the x-ms-correlation-request-id header.
"""
disable_terraform_partner_id = __config__.get('disableTerraformPartnerId') or (utilities.get_env_bool('ARM_DISABLE_TERRAFORM_PARTNER_ID') or True)
"""
This will disable the Terraform Partner ID which is used if a custom `partner_id` isn't specified.
"""
environment = __config__.get('environment') or (utilities.get_env('AZURE_ENVIRONMENT', 'ARM_ENVIRONMENT') or 'public')
"""
The Cloud Environment which should be used. Possible values are public, usgovernment, german, and china. Defaults to
public.
"""
features = __config__.get('features')
msi_endpoint = __config__.get('msiEndpoint') or (utilities.get_env('ARM_MSI_ENDPOINT') or '')
"""
The path to a custom endpoint for Managed Service Identity - in most circumstances this should be detected
automatically.
"""
partner_id = __config__.get('partnerId') or (utilities.get_env('ARM_PARTNER_ID') or '')
"""
A GUID/UUID that is registered with Microsoft to facilitate partner resource usage attribution.
"""
skip_credentials_validation = __config__.get('skipCredentialsValidation') or (utilities.get_env_bool('ARM_SKIP_CREDENTIALS_VALIDATION') or False)
"""
This will cause the AzureRM Provider to skip verifying the credentials being used are valid.
"""
skip_provider_registration = __config__.get('skipProviderRegistration') or (utilities.get_env_bool('ARM_SKIP_PROVIDER_REGISTRATION') or False)
"""
Should the AzureRM Provider skip registering all of the Resource Providers that it supports, if they're not already
registered?
"""
subscription_id = __config__.get('subscriptionId') or (utilities.get_env('ARM_SUBSCRIPTION_ID') or '')
"""
The Subscription ID which should be used.
"""
tenant_id = __config__.get('tenantId') or (utilities.get_env('AZURE_TENANT_ID', 'ARM_TENANT_ID') or '')
"""
The Tenant ID which should be used.
"""
use_msi = __config__.get('useMsi') or (utilities.get_env_bool('ARM_USE_MSI') or False)
"""
Allowed Managed Service Identity be used for Authentication.
"""
location = __config__.get('location') or utilities.get_env('ARM_LOCATION')
|
[
"pulumi.Config"
] |
[((321, 343), 'pulumi.Config', 'pulumi.Config', (['"""azure"""'], {}), "('azure')\n", (334, 343), False, 'import pulumi\n')]
|
# -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2022 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# <NAME> <<EMAIL>>
#
# *****************************************************************************
"""Graphical interface to prepare scan commands."""
from nicos.clients.gui.utils import DlgPresets, loadUi
from nicos.guisupport.qt import QButtonGroup, QDialog, QIntValidator, \
pyqtSignal
from nicos.guisupport.utils import DoubleValidator
def toint(text):
text = str(text)
if not text:
return 0
return int(text)
def tofloat(text):
text = str(text)
if not text:
return 0.0
return float(text)
def fmt_time(seconds):
if seconds < 60:
return '%d sec' % seconds
elif seconds < 3600:
return '%d min' % (seconds // 60)
else:
return '%d h %d min' % (seconds // 3600, (seconds % 3600) // 60)
class ScanTool(QDialog):
addCode = pyqtSignal(str)
def __init__(self, parent, client, **settings):
QDialog.__init__(self, parent)
loadUi(self, 'tools/scan.ui')
self.scanButtonGroup = QButtonGroup()
self.scanButtonGroup.addButton(self.scanSingle)
self.scanButtonGroup.addButton(self.scanCentered)
self.qscanButtonGroup = QButtonGroup()
self.qscanButtonGroup.addButton(self.qscanSingle)
self.qscanButtonGroup.addButton(self.qscanCentered)
self.qscanButtonGroup.addButton(self.qscanRandom)
self.qscanButtonGroup.addButton(self.qscanLong)
self.qscanButtonGroup.addButton(self.qscanTrans)
self.presetButtonGroup = QButtonGroup()
self.presetButtonGroup.addButton(self.presetTime)
self.presetButtonGroup.addButton(self.presetMonitor)
self.scanButtonGroup.buttonClicked.connect(self.updateCommand)
self.qscanButtonGroup.buttonClicked.connect(self.updateCommand)
self.presetButtonGroup.buttonClicked.connect(self.updateCommand)
self.stepsInput.valueChanged.connect(self.updateCommand)
self.timeInput.valueChanged.connect(self.updateCommand)
self.monitorInput.valueChanged.connect(self.updateCommand)
self.deviceList.itemSelectionChanged .connect(self.updateCommand)
self.scanPreset.textChanged.connect(self.updateCommand)
self.scanNumsteps.textChanged.connect(self.updateCommand)
self.scanStep.textChanged.connect(self.updateCommand)
self.scanStart.textChanged.connect(self.updateCommand)
self.deviceName.textChanged.connect(self.updateCommand)
self.scanRange.textChanged.connect(self.updateCommand)
self.hInput.textChanged.connect(self.updateCommand)
self.kInput.textChanged.connect(self.updateCommand)
self.lInput.textChanged.connect(self.updateCommand)
self.EInput.textChanged.connect(self.updateCommand)
self.deltahInput.textChanged.connect(self.updateCommand)
self.deltakInput.textChanged.connect(self.updateCommand)
self.deltalInput.textChanged.connect(self.updateCommand)
self.deltaEInput.textChanged.connect(self.updateCommand)
self.deltaqInput.textChanged.connect(self.updateCommand)
self.generateBtn.clicked.connect(self.createCommand)
self.clearAllBtn.clicked.connect(self.clearAll)
self.quitBtn.clicked.connect(self.close)
self.scanCalc.clicked.connect(self.calc_scan)
self.qscanCalc.clicked.connect(self.calc_qscan)
self.qscanSingle.clicked.connect(self.set_qlabels)
self.qscanCentered.clicked.connect(self.set_qlabels)
self.qscanLong.clicked.connect(self.set_qlabels)
self.qscanTrans.clicked.connect(self.set_qlabels)
self.qscanRandom.clicked.connect(self.set_qlabels)
self._devices = sorted(parent.client.eval(
'[(dev.name, dev.unit) '
'for (name, dev) in session.devices.items() '
'if name in session.explicit_devices and hasattr(dev, "maw")]',
[]))
self.tabWidget.setTabEnabled(0, self._devices != [])
for name, unit in self._devices:
self.deviceList.addItem("%s [%s]" % (name, unit))
dval = DoubleValidator(self)
ival = QIntValidator(self)
# qscan tab
self.hInput.setValidator(dval)
self.kInput.setValidator(dval)
self.lInput.setValidator(dval)
self.EInput.setValidator(dval)
self.deltahInput.setValidator(dval)
self.deltakInput.setValidator(dval)
self.deltalInput.setValidator(dval)
self.deltaEInput.setValidator(dval)
self.deltaqInput.setValidator(dval)
# disabled for now
self.qscanRandom.setVisible(False)
self.qscanTrans.setVisible(False)
self.qscanLong.setVisible(False)
# scan/cscan tab
self.scanStart.setValidator(dval)
self.scanStep.setValidator(dval)
self.scanNumsteps.setValidator(ival)
self.scanPreset.setValidator(dval)
self.scanMovetime.setValidator(dval)
self.presets = DlgPresets('scaninput', [
# qscan tab
(self.qscanSingle, 1), (self.qscanCentered, 0),
(self.qscanLong, 0), (self.qscanTrans, 0),
(self.qscanRandom, 0), (self.monitorInput, 10000),
(self.timeInput, 120), (self.presetTime, 1),
(self.presetMonitor, 0),
(self.hInput, '0.0'), (self.kInput, '0.0'),
(self.lInput, '0.0'), (self.EInput, '0.0'),
(self.deltahInput, '0.0'), (self.deltakInput, '0.0'),
(self.deltalInput, '0.0'), (self.deltaEInput, '0.0'),
(self.deltaqInput, '0.0'), (self.stepsInput, 10),
# scan tab
(self.scanSingle, 1), (self.scanCentered, 0),
(self.scanStart, '0.0'), (self.scanStep, '0.0'),
(self.scanNumsteps, '0'), (self.scanPreset, '0.0'),
(self.deviceList, 'om [deg]'), (self.deviceName, ''),
(self.scanMovetime, '0'),
# the tab itself
(self.tabWidget, 0),
])
self.presets.load()
self.set_qlabels()
def set_qlabels(self, *args):
if self.qscanCentered.isChecked() or self.qscanSingle.isChecked():
self.label_dh.setText('<b>∆h</b>')
self.label_dk.setText('<b>∆k</b>')
self.label_dl.setText('<b>∆l</b>')
self.deltahInput.setEnabled(True)
self.deltakInput.setEnabled(True)
self.deltalInput.setEnabled(True)
self.deltaqInput.setEnabled(False)
elif self.qscanLong.isChecked() or self.qscanTrans.isChecked():
self.label_dh.setText('')
self.label_dk.setText('')
self.label_dl.setText('')
self.deltahInput.setEnabled(False)
self.deltakInput.setEnabled(False)
self.deltalInput.setEnabled(False)
self.deltaqInput.setEnabled(True)
elif self.qscanRandom.isChecked():
self.label_dh.setText('<b>u</b>')
self.label_dk.setText('<b>v</b>')
self.label_dl.setText('<b>w</b>')
self.deltahInput.setEnabled(True)
self.deltakInput.setEnabled(True)
self.deltalInput.setEnabled(True)
self.deltaqInput.setEnabled(True)
def close(self, *args):
"""Close the window and save the settings."""
self.presets.save()
return True
def closeEvent(self, event):
self.presets.save()
self.deleteLater()
self.accept()
def clearAll(self):
# Clear scan
self.scanStart.clear()
self.scanStep.clear()
self.scanNumsteps.clear()
self.scanPreset.clear()
self.scanRange.clear()
self.scanEstimation.clear()
self.scanMovetime.clear()
# Clear qscan
self.hInput.clear()
self.deltahInput.clear()
self.kInput.clear()
self.deltakInput.clear()
self.lInput.clear()
self.deltalInput.clear()
self.EInput.clear()
self.deltaEInput.clear()
self.deltaqInput.clear()
def calc_scan(self):
stepsize = tofloat(self.scanStep.text())
numstep = toint(self.scanNumsteps.text())
startpos = tofloat(self.scanStart.text())
movetime = tofloat(self.scanMovetime.text())
preset = tofloat(self.scanPreset.text())
if self.scanSingle.isChecked():
endpos = startpos + (stepsize - 1) * numstep
self.scanRange.setText('- %.2f' % endpos)
seconds = (movetime + preset) * numstep
else:
lowerend = startpos - stepsize * numstep
upperend = startpos + stepsize * numstep
self.scanRange.setText('%.2f - %.2f' % (lowerend, upperend))
seconds = (movetime + preset) * (2 * numstep + 1)
self.scanEstimation.setText(fmt_time(seconds))
return seconds
def calc_qscan(self):
numstep = toint(self.stepsInput.text())
if self.qscanCentered.isChecked() or self.qscanLong.isChecked() or \
self.qscanTrans.isChecked():
numstep = 2 * numstep + 1
if self.presetTime.isChecked():
preset = tofloat(self.timeInput.text())
seconds = numstep * preset
self.qscanEstimation.setText(fmt_time(seconds))
return seconds
else:
self.qscanEstimation.setText('no estimation possible')
return 0
def updateCommand(self, *args):
self.cmdResult.setText('<b>%s</b>' % self._getCommand())
def _getCommand(self):
tab = self.tabWidget.currentIndex()
def timeest(secs):
if secs == 0:
return ''
return '#- %d sec (%s)\n' % (secs, fmt_time(secs))
# Qscan
if tab == 1:
params = [
('h', self.hInput, tofloat),
('k', self.kInput, tofloat),
('l', self.lInput, tofloat),
('E', self.EInput, tofloat),
('n', self.stepsInput, toint),
('dh', self.deltahInput, tofloat),
('dk', self.deltakInput, tofloat),
('dl', self.deltalInput, tofloat),
('dE', self.deltaEInput, tofloat),
('dq', self.deltaqInput, tofloat),
('t', self.timeInput, tofloat),
('m', self.monitorInput, toint),
]
d = {name: func(ctl.text()) for (name, ctl, func) in params}
if self.qscanSingle.isChecked():
cmdname = 'qscan'
elif self:
cmdname = 'qcscan'
else:
return # for now
scan = cmdname + '([%(h)s, %(k)s, %(l)s, %(E)s], ' \
'[%(dh)s, %(dk)s, %(dl)s, %(dE)s], %(n)s' % d
if self.presetTime.isChecked():
scan += ', t=%s)' % d['t']
else:
scan += ', m1=%s)' % d['m']
cmd = timeest(self.calc_qscan())
cmd += scan
# scan
else:
devname = self.deviceName.text()
if not devname:
devname = self._devices[self.deviceList.currentRow()][0]
if self.scanCentered.isChecked():
cmdname = 'cscan'
else:
cmdname = 'scan'
params = [devname]
for (_pn, ctl, fn) in (('start', self.scanStart, tofloat),
('step', self.scanStep, tofloat),
('numsteps', self.scanNumsteps, toint),
('preset', self.scanPreset, tofloat)):
val = fn(ctl.text())
params.append(str(val))
cmd = timeest(self.calc_scan())
cmd += '%s(%s)' % (cmdname, ', '.join(params))
return cmd + '\n'
def createCommand(self):
self.addCode.emit(self._getCommand())
|
[
"nicos.guisupport.qt.pyqtSignal",
"nicos.guisupport.qt.QButtonGroup",
"nicos.clients.gui.utils.loadUi",
"nicos.guisupport.utils.DoubleValidator",
"nicos.guisupport.qt.QIntValidator",
"nicos.guisupport.qt.QDialog.__init__",
"nicos.clients.gui.utils.DlgPresets"
] |
[((1770, 1785), 'nicos.guisupport.qt.pyqtSignal', 'pyqtSignal', (['str'], {}), '(str)\n', (1780, 1785), False, 'from nicos.guisupport.qt import QButtonGroup, QDialog, QIntValidator, pyqtSignal\n'), ((1847, 1877), 'nicos.guisupport.qt.QDialog.__init__', 'QDialog.__init__', (['self', 'parent'], {}), '(self, parent)\n', (1863, 1877), False, 'from nicos.guisupport.qt import QButtonGroup, QDialog, QIntValidator, pyqtSignal\n'), ((1886, 1915), 'nicos.clients.gui.utils.loadUi', 'loadUi', (['self', '"""tools/scan.ui"""'], {}), "(self, 'tools/scan.ui')\n", (1892, 1915), False, 'from nicos.clients.gui.utils import DlgPresets, loadUi\n'), ((1948, 1962), 'nicos.guisupport.qt.QButtonGroup', 'QButtonGroup', ([], {}), '()\n', (1960, 1962), False, 'from nicos.guisupport.qt import QButtonGroup, QDialog, QIntValidator, pyqtSignal\n'), ((2109, 2123), 'nicos.guisupport.qt.QButtonGroup', 'QButtonGroup', ([], {}), '()\n', (2121, 2123), False, 'from nicos.guisupport.qt import QButtonGroup, QDialog, QIntValidator, pyqtSignal\n'), ((2446, 2460), 'nicos.guisupport.qt.QButtonGroup', 'QButtonGroup', ([], {}), '()\n', (2458, 2460), False, 'from nicos.guisupport.qt import QButtonGroup, QDialog, QIntValidator, pyqtSignal\n'), ((5010, 5031), 'nicos.guisupport.utils.DoubleValidator', 'DoubleValidator', (['self'], {}), '(self)\n', (5025, 5031), False, 'from nicos.guisupport.utils import DoubleValidator\n'), ((5047, 5066), 'nicos.guisupport.qt.QIntValidator', 'QIntValidator', (['self'], {}), '(self)\n', (5060, 5066), False, 'from nicos.guisupport.qt import QButtonGroup, QDialog, QIntValidator, pyqtSignal\n'), ((5884, 6663), 'nicos.clients.gui.utils.DlgPresets', 'DlgPresets', (['"""scaninput"""', "[(self.qscanSingle, 1), (self.qscanCentered, 0), (self.qscanLong, 0), (self\n .qscanTrans, 0), (self.qscanRandom, 0), (self.monitorInput, 10000), (\n self.timeInput, 120), (self.presetTime, 1), (self.presetMonitor, 0), (\n self.hInput, '0.0'), (self.kInput, '0.0'), (self.lInput, '0.0'), (self.\n EInput, '0.0'), (self.deltahInput, '0.0'), (self.deltakInput, '0.0'), (\n self.deltalInput, '0.0'), (self.deltaEInput, '0.0'), (self.deltaqInput,\n '0.0'), (self.stepsInput, 10), (self.scanSingle, 1), (self.scanCentered,\n 0), (self.scanStart, '0.0'), (self.scanStep, '0.0'), (self.scanNumsteps,\n '0'), (self.scanPreset, '0.0'), (self.deviceList, 'om [deg]'), (self.\n deviceName, ''), (self.scanMovetime, '0'), (self.tabWidget, 0)]"], {}), "('scaninput', [(self.qscanSingle, 1), (self.qscanCentered, 0), (\n self.qscanLong, 0), (self.qscanTrans, 0), (self.qscanRandom, 0), (self.\n monitorInput, 10000), (self.timeInput, 120), (self.presetTime, 1), (\n self.presetMonitor, 0), (self.hInput, '0.0'), (self.kInput, '0.0'), (\n self.lInput, '0.0'), (self.EInput, '0.0'), (self.deltahInput, '0.0'), (\n self.deltakInput, '0.0'), (self.deltalInput, '0.0'), (self.deltaEInput,\n '0.0'), (self.deltaqInput, '0.0'), (self.stepsInput, 10), (self.\n scanSingle, 1), (self.scanCentered, 0), (self.scanStart, '0.0'), (self.\n scanStep, '0.0'), (self.scanNumsteps, '0'), (self.scanPreset, '0.0'), (\n self.deviceList, 'om [deg]'), (self.deviceName, ''), (self.scanMovetime,\n '0'), (self.tabWidget, 0)])\n", (5894, 6663), False, 'from nicos.clients.gui.utils import DlgPresets, loadUi\n')]
|
from django.conf.urls import url
from . import views
app_name = "home"
urlpatterns = [
url("^$",views.homeView,name = "home"),
url("^team/$",views.teamView, name = "team"),
url("^stats/$",views.statsView,name = "stats"),
]
|
[
"django.conf.urls.url"
] |
[((93, 131), 'django.conf.urls.url', 'url', (['"""^$"""', 'views.homeView'], {'name': '"""home"""'}), "('^$', views.homeView, name='home')\n", (96, 131), False, 'from django.conf.urls import url\n'), ((137, 180), 'django.conf.urls.url', 'url', (['"""^team/$"""', 'views.teamView'], {'name': '"""team"""'}), "('^team/$', views.teamView, name='team')\n", (140, 180), False, 'from django.conf.urls import url\n'), ((187, 233), 'django.conf.urls.url', 'url', (['"""^stats/$"""', 'views.statsView'], {'name': '"""stats"""'}), "('^stats/$', views.statsView, name='stats')\n", (190, 233), False, 'from django.conf.urls import url\n')]
|
import re
person = "xx{{\"asdasd\"+\"lala\"}} }} {1+1}xxx"
regex = r"{{(.*?)}}"
matches = re.finditer(regex, person, re.MULTILINE)
for matchNum, match in enumerate(matches):
eval_result = eval(match.group(1))
person = person.replace(str(match.group()),str(eval_result))
print(person)
|
[
"re.finditer"
] |
[((92, 132), 're.finditer', 're.finditer', (['regex', 'person', 're.MULTILINE'], {}), '(regex, person, re.MULTILINE)\n', (103, 132), False, 'import re\n')]
|
import os.path
import os
import shutil
from dbt.task.base_task import BaseTask
class CleanTask(BaseTask):
def __is_project_path(self, path):
proj_path = os.path.abspath('.')
return not os.path.commonprefix(
[proj_path, os.path.abspath(path)]
) == proj_path
def __is_protected_path(self, path):
abs_path = os.path.abspath(path)
protected_paths = self.project['source-paths'] + \
self.project['test-paths'] + ['.']
protected_abs_paths = [os.path.abspath for p in protected_paths]
return abs_path in set(protected_abs_paths) or \
self.__is_project_path(abs_path)
def run(self):
for path in self.project['clean-targets']:
if not self.__is_protected_path(path):
shutil.rmtree(path, True)
|
[
"shutil.rmtree",
"os.path.abspath"
] |
[((169, 189), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (184, 189), False, 'import os\n'), ((362, 383), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (377, 383), False, 'import os\n'), ((804, 829), 'shutil.rmtree', 'shutil.rmtree', (['path', '(True)'], {}), '(path, True)\n', (817, 829), False, 'import shutil\n'), ((255, 276), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (270, 276), False, 'import os\n')]
|
#! /usr/bin/env python3
import sys
if len(sys.argv) < 3:
print("usage: add_word.py <filename> <suffix>")
sys.exit(0)
filename = sys.argv[1]
suffix = sys.argv[2]
for line in open(filename):
prefix = line.strip()
result = prefix + " with " + suffix
print(result)
|
[
"sys.exit"
] |
[((115, 126), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (123, 126), False, 'import sys\n')]
|
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF-Agents SavedModel API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow as tf
from tf_agents.policies import tf_policy
from tf_agents.utils import common
class PolicySaver(object):
"""A `PolicySaver` allows you to save a `tf_policy.Policy` to `SavedModel`.
The `save()` method exports a saved model to the requested export location.
The SavedModel that is exported can be loaded via
`tf.compat.v2.saved_model.load` (or `tf.saved_model.load` in TF2). It
will have available signatures (concrete functions): `action` and
`get_initial_state`.
Usage:
```python
my_policy = agent.collect_policy
saver = PolicySaver(policy, batch_size=1)
for i in range(...):
agent.train(...)
if i % 100 == 0:
saver.save('policy_%d' % global_step)
```
To load and use the saved policy:
```python
policy_step_spec = ...
flat_spec = tf.nest.flatten(time_step_spec)
saved_policy = tf.compat.v2.saved_model.load('policy_0')
get_initial_state = saved_policy.signatures['get_initial_state']
action = saved_policy.signatures['action']
policy_state_dict = get_initial_state(batch_size)
while True:
flat_time_step = tf.nest.flatten(time_step)
time_step_dict = dict(
(spec.name, value) for spec, value in zip(flat_spec, flat_time_step))
policy_step_dict = action(time_step_dict, policy_state_dict)
policy_step = tf.nest.map_structure(
lambda spec: policy_step_dict[spec.name], policy_step_spec)
policy_state_dict = dict(
(k, policy_step_dict[k]) for k in policy_state_dict)
# Calculate the next time_step via interaction with the environment using
# policy_step.action
...
```
"""
def __init__(self, policy, batch_size=None, seed=None):
"""Initialize PolicySaver for TF policy `policy`.
Args:
policy: A TF Policy.
batch_size: The number of batch entries the policy will process at a time.
This must be either `None` (unknown batch size) or a python integer.
seed: Random seed for the `policy.action` call, if any (this should
usually be `None`, except for testing).
Raises:
TypeError: If `policy` is not an instance of TFPolicy.
ValueError: If any of the following `policy` specs are missing names, or
the names collide: `policy.time_step_spec`, `policy.action_spec`,
`policy.policy_state_spec`, `policy.info_spec`.
ValueError: If `batch_size` is not either `None` or a python integer > 0.
NotImplementedError: If created from TF1 with eager mode disabled.
"""
if not tf.executing_eagerly():
# TODO(b/129079730): Add support for TF1 using SavedModelBuilder.
raise NotImplementedError(
'Cannot create a PolicySaver in TF1 without eager mode enabled.')
if not isinstance(policy, tf_policy.Base):
raise TypeError('policy is not a TFPolicy. Saw: %s' % type(policy))
if (batch_size is not None and
(not isinstance(batch_size, int) or batch_size < 1)):
raise ValueError('Expected batch_size == None or python int > 0, saw: %s'
% (batch_size,))
def true_if_missing_or_collision(spec, spec_names):
if not spec.name or spec.name in spec_names:
return True
spec_names.add(spec.name)
return False
def check_spec(spec):
spec_names = set()
checked = [
true_if_missing_or_collision(s, spec_names)
for s in tf.nest.flatten(spec)]
if any(checked):
raise ValueError(
'Specs contain either a missing name or a name collision.\n '
'Spec names: %s\n'
% (tf.nest.map_structure(lambda s: s.name or '<MISSING>', spec),))
check_spec({'time_step_spec': policy.time_step_spec,
'policy_state_spec': policy.policy_state_spec})
check_spec(policy.policy_step_spec)
if batch_size is None:
get_initial_state_fn = policy.get_initial_state
get_initial_state_input_specs = (
tf.TensorSpec(dtype=tf.int32, shape=(), name='batch_size'),)
else:
get_initial_state_fn = functools.partial(
policy.get_initial_state, batch_size=batch_size)
get_initial_state_input_specs = ()
signatures = {
'action': _function_with_signature(
functools.partial(policy.action, seed=seed),
input_specs=(policy.time_step_spec, policy.policy_state_spec),
output_spec=policy.policy_step_spec,
include_batch_dimension=True,
batch_size=batch_size),
'get_initial_state': _function_with_signature(
get_initial_state_fn,
input_specs=get_initial_state_input_specs,
output_spec=policy.policy_state_spec,
include_batch_dimension=False),
}
self._policy = policy
self._signatures = signatures
def save(self, export_dir):
"""Save the policy to the given `export_dir`."""
return tf.saved_model.save(
self._policy, export_dir, signatures=self._signatures)
def _function_with_signature(function,
input_specs,
output_spec,
include_batch_dimension,
batch_size=None):
"""Create a tf.function with a given signature for export.
Args:
function: A callable that can be wrapped in tf.function.
input_specs: A tuple nested specs declaring ordered arguments to function.
output_spec: The nested spec describing the output of the function.
include_batch_dimension: Python bool, whether to prepend a batch dimension
to inputs and outputs.
batch_size: Known batch size, or `None` for unknown. Ignored if
`include_batch_dimension == False`.
Returns:
A `tf.function` with the given input spec that returns a `dict` mapping
output spec keys to corresponding output values.
"""
def _with_batch(spec):
if include_batch_dimension:
return tf.TensorSpec(
shape=tf.TensorShape([batch_size]).concatenate(spec.shape),
name=spec.name,
dtype=spec.dtype)
else:
return spec
flat_input_spec = [
_with_batch(spec) for spec in tf.nest.flatten(input_specs)]
def as_dict(outputs, output_spec):
tf.nest.assert_same_structure(outputs, output_spec)
flat_outputs = tf.nest.flatten(outputs)
flat_names = [s.name for s in tf.nest.flatten(output_spec)]
return dict(zip(flat_names, flat_outputs))
@common.function(input_signature=flat_input_spec)
def function_with_signature(*input_list):
inputs_ = tf.nest.pack_sequence_as(input_specs, input_list)
outputs_ = function(*inputs_)
dict_outputs_ = as_dict(outputs_, output_spec)
return dict_outputs_
return function_with_signature
|
[
"functools.partial",
"tensorflow.nest.assert_same_structure",
"tensorflow.TensorShape",
"tf_agents.utils.common.function",
"tensorflow.nest.flatten",
"tensorflow.saved_model.save",
"tensorflow.executing_eagerly",
"tensorflow.nest.map_structure",
"tensorflow.TensorSpec",
"tensorflow.nest.pack_sequence_as"
] |
[((7165, 7213), 'tf_agents.utils.common.function', 'common.function', ([], {'input_signature': 'flat_input_spec'}), '(input_signature=flat_input_spec)\n', (7180, 7213), False, 'from tf_agents.utils import common\n'), ((5628, 5702), 'tensorflow.saved_model.save', 'tf.saved_model.save', (['self._policy', 'export_dir'], {'signatures': 'self._signatures'}), '(self._policy, export_dir, signatures=self._signatures)\n', (5647, 5702), True, 'import tensorflow as tf\n'), ((6954, 7005), 'tensorflow.nest.assert_same_structure', 'tf.nest.assert_same_structure', (['outputs', 'output_spec'], {}), '(outputs, output_spec)\n', (6983, 7005), True, 'import tensorflow as tf\n'), ((7025, 7049), 'tensorflow.nest.flatten', 'tf.nest.flatten', (['outputs'], {}), '(outputs)\n', (7040, 7049), True, 'import tensorflow as tf\n'), ((7272, 7321), 'tensorflow.nest.pack_sequence_as', 'tf.nest.pack_sequence_as', (['input_specs', 'input_list'], {}), '(input_specs, input_list)\n', (7296, 7321), True, 'import tensorflow as tf\n'), ((3269, 3291), 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (3289, 3291), True, 'import tensorflow as tf\n'), ((4786, 4852), 'functools.partial', 'functools.partial', (['policy.get_initial_state'], {'batch_size': 'batch_size'}), '(policy.get_initial_state, batch_size=batch_size)\n', (4803, 4852), False, 'import functools\n'), ((6882, 6910), 'tensorflow.nest.flatten', 'tf.nest.flatten', (['input_specs'], {}), '(input_specs)\n', (6897, 6910), True, 'import tensorflow as tf\n'), ((4686, 4744), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'dtype': 'tf.int32', 'shape': '()', 'name': '"""batch_size"""'}), "(dtype=tf.int32, shape=(), name='batch_size')\n", (4699, 4744), True, 'import tensorflow as tf\n'), ((4981, 5024), 'functools.partial', 'functools.partial', (['policy.action'], {'seed': 'seed'}), '(policy.action, seed=seed)\n', (4998, 5024), False, 'import functools\n'), ((7084, 7112), 'tensorflow.nest.flatten', 'tf.nest.flatten', (['output_spec'], {}), '(output_spec)\n', (7099, 7112), True, 'import tensorflow as tf\n'), ((4135, 4156), 'tensorflow.nest.flatten', 'tf.nest.flatten', (['spec'], {}), '(spec)\n', (4150, 4156), True, 'import tensorflow as tf\n'), ((4328, 4388), 'tensorflow.nest.map_structure', 'tf.nest.map_structure', (["(lambda s: s.name or '<MISSING>')", 'spec'], {}), "(lambda s: s.name or '<MISSING>', spec)\n", (4349, 4388), True, 'import tensorflow as tf\n'), ((6687, 6715), 'tensorflow.TensorShape', 'tf.TensorShape', (['[batch_size]'], {}), '([batch_size])\n', (6701, 6715), True, 'import tensorflow as tf\n')]
|
"""SensorStatusView-class."""
import gtk
import cairo
import math
import glib
import utils
class StatusView(gtk.DrawingArea):
"""A view for sensor for displaying status information during recording."""
def __init__(self, controller):
"""Constructor."""
gtk.DrawingArea.__init__(self)
self.controller = controller
self.refresh_interval = 50 # ms
self.set_size_request(200, 120)
self.connect("expose_event", self.on_expose)
self.latest_gui_update = 0
self.draw_que = {}
# indicator tresholds
self.green_thresh = 0.8
self.yellow_thresh = 0.5
# initiate trackstatus loop
glib.idle_add(self.redraw)
def __del__(self):
"""Destructor."""
pass
def add_draw_que(self, itemid, draw_parameters):
"""Add elements to be drawn on the trackstatus canvas."""
self.draw_que[itemid] = draw_parameters
def add_model(self, model):
"""Add a model to the view."""
model.on("play_image", self.on_play_image)
model.on("play_movie", self.on_play_movie)
model.on("draw_que_updated", self.clear_draw_que)
model.on("add_draw_que", self.add_draw_que)
def draw(self, ctx):
"""Draw the canvas."""
# wallpaper
ctx.set_source_rgb(0., 0., 0.)
ctx.rectangle(0, 0, 1, 1) # (0, 0, 1, .9)
ctx.fill()
# draw all the active aois to observer window
# draw the information from controller to the trackstatus-window
ctx.set_line_width(0.005)
ctx.set_line_join(cairo.LINE_JOIN_ROUND)
txtstart = 0.05
for i in sorted(self.draw_que):
item = self.draw_que[i]
if "r" and "g" and "b" and "o" in item:
ctx.set_source_rgba(item["r"], item["g"], item["b"], item["o"])
itype = item["type"]
if itype == "rect":
# get all the extra information to be presented
aoi = utils.aoi_from_experiment_to_cairo(item["aoi"])
# draw the rectangle
ctx.rectangle(aoi[0], aoi[1], aoi[2], aoi[3])
ctx.fill()
elif itype == "aoi":
# check if aoi is circular or rect
if len(item["aoi"]) == 3:
# circle
ctx.arc(item["aoi"][0], item["aoi"][1], item["aoi"][2],
0, 2 * math.pi)
ctx.stroke()
else:
# rectangular
# get all the extra information to be presented
aoi = utils.aoi_from_experiment_to_cairo(item["aoi"])
# draw the rectangle
ctx.rectangle(aoi[0], aoi[1], aoi[2], aoi[3])
ctx.stroke()
elif itype == "circle":
ctx.arc(item["x"], item["y"], item["radius"], 0, 2 * math.pi)
ctx.fill()
elif itype == "text":
txt = item["txt"]
ctx.set_source_rgb(0.0, 1.0, 0.0)
ctx.set_font_size(0.05)
ctx.move_to(0.01, txtstart)
ctx.show_text(txt)
txtstart += 0.05
glib.timeout_add(self.refresh_interval, self.redraw)
def on_expose(self, widget, event):
"""Callback for expose_event."""
context = widget.window.cairo_create()
context.rectangle(event.area.x, event.area.y, event.area.width,
event.area.height)
context.clip()
rect = widget.get_allocation()
context.scale(rect.width, rect.height)
self.draw(context)
return False
def on_play_image(self, stmnum, aoi):
"""Callback for play_image signal."""
self.draw_que["maoi"+str(stmnum)] = {"type": "aoi", "r": 0, "g": 1,
"b": 0, "o": 1, "aoi": aoi}
def on_play_movie(self, stmnum, aoi):
"""Callback for play_movie signal."""
self.draw_que["iaoi"+str(stmnum)] = {"type": "aoi", "r": 0, "g": 1,
"b": 0, "o": 1, "aoi": aoi}
def clear_draw_que(self):
"""Clear all draw-elements."""
self.draw_que = {}
def redraw(self):
"""Callback for the idle_add drawing-loop."""
if self.window:
alloc = self.get_allocation()
rect = gtk.gdk.Rectangle(0, 0, alloc.width, alloc.height)
self.window.invalidate_rect(rect, True)
self.window.process_updates(True)
def remove_draw_que(self, key):
"""
Remove element from the trackstatus canvas.
Parameter is an id of the
element. Reserved word: "all" clears everything from the queue.
"""
if key in self.draw_que:
self.draw_que.pop(key)
def remove_model(self, model):
"""Add a model to the view."""
model.remove_listener("play_image", self.on_play_image)
model.remove_listener("play_movie", self.on_play_movie)
model.remove_listener("draw_que_updated", self.clear_draw_que)
model.remove_listener("add_draw_que", self.add_draw_que)
def stop(self):
"""Some other views might want to stop loops."""
return False
|
[
"glib.idle_add",
"gtk.DrawingArea.__init__",
"gtk.gdk.Rectangle",
"glib.timeout_add",
"utils.aoi_from_experiment_to_cairo"
] |
[((281, 311), 'gtk.DrawingArea.__init__', 'gtk.DrawingArea.__init__', (['self'], {}), '(self)\n', (305, 311), False, 'import gtk\n'), ((686, 712), 'glib.idle_add', 'glib.idle_add', (['self.redraw'], {}), '(self.redraw)\n', (699, 712), False, 'import glib\n'), ((3257, 3309), 'glib.timeout_add', 'glib.timeout_add', (['self.refresh_interval', 'self.redraw'], {}), '(self.refresh_interval, self.redraw)\n', (3273, 3309), False, 'import glib\n'), ((4450, 4500), 'gtk.gdk.Rectangle', 'gtk.gdk.Rectangle', (['(0)', '(0)', 'alloc.width', 'alloc.height'], {}), '(0, 0, alloc.width, alloc.height)\n', (4467, 4500), False, 'import gtk\n'), ((2011, 2058), 'utils.aoi_from_experiment_to_cairo', 'utils.aoi_from_experiment_to_cairo', (["item['aoi']"], {}), "(item['aoi'])\n", (2045, 2058), False, 'import utils\n'), ((2646, 2693), 'utils.aoi_from_experiment_to_cairo', 'utils.aoi_from_experiment_to_cairo', (["item['aoi']"], {}), "(item['aoi'])\n", (2680, 2693), False, 'import utils\n')]
|
import string
import random
def count_words(text):
return len(text.split())
def count_characters(text):
return len(text)
def random_string_generator(size=32, chars=string.ascii_letters + string.digits):
return ''.join(random.choice(chars) for x in range(size))
|
[
"random.choice"
] |
[((236, 256), 'random.choice', 'random.choice', (['chars'], {}), '(chars)\n', (249, 256), False, 'import random\n')]
|
# Escreva um programa que leia o ano de nascimento de uma pessoa de acordo com sua idade e diga:
# Se ele ainda vai se alistar ao serviço militar
# Se é hora de se alistar
# Se já passou o tempo de alistamento
# O programa deve identificar quanto tempo falta ou passou de se alistar.
from datetime import date
idade = int(input('Qual é a sua idade? '))
b = date.today()
c = b.year
nascimento = c - idade
if 18 > idade > 16:
print('O seu ano de nascimento é {} e você não está na hora de se alistar!'.format(nascimento))
print('Falta 1 ano para o seu alistamento.')
elif idade < 18:
print('O seu ano de nascimento é {} e você não está na hora de se alistar!'.format(nascimento))
print('Faltam {} anos para o seu alistamento.'.format(18 - idade))
print('Seu alistamento deve ocorrer em {} anos.'.format(nascimento + 18))
elif 20 > idade > 18:
print('Seu ano de nascimento é {} e você já se alistou!'.format(nascimento))
print('Você se alistou à exatamente 1 ano atrás, que foi em {}.'.format(c - 1))
elif idade > 18:
print('Seu ano de nascimento é {} e você já se alistou!'.format(nascimento))
print('Você se alistou à {} anos atrás, que foi em {}.'.format(idade - 18, nascimento + 18))
else:
print('Seu ano de nascimento é exatamente {}.'.format(nascimento))
print('Você deve se alistar IMEDIATAMENTE!')
|
[
"datetime.date.today"
] |
[((358, 370), 'datetime.date.today', 'date.today', ([], {}), '()\n', (368, 370), False, 'from datetime import date\n')]
|
from flask import request, jsonify, g, current_app
from app.api import bp
from app.api.auth import token_auth
from app.api.errors import bad_request, error_response
from app.extensions import db
from app.models import Cradle, DDL
from datetime import datetime
from app.utils.decorator import permission_required, Permission
def validation_check_of_create_ddl(data):
message = {}
if 'body' not in data or not data.get('body').strip():
message['body'] = 'Please provide valid body.'
if 'deadline' not in data or not data.get('deadline').strip():
message['deadline'] = 'Please provide valid deadline.'
elif datetime.strptime(data.get('deadline'), "%Y-%m-%d %H:%M") <= datetime.now(): # TODO deadline 转时间
message['deadline'] = 'Deadline should not be earlier than current time.'
if 'cradle' not in data or not data.get('cradle').strip():
message['cradle'] = 'Please provide valid cradle.'
return message
@bp.route('/ddls/', methods=['POST'])
@token_auth.login_required
@permission_required(Permission.SPONSOR)
def create_ddl():
'''
向孵化器添加 ddl
:return:
'''
data = request.get_json()
if not data:
return bad_request('You must micropub JSON data.')
error_message = validation_check_of_create_ddl(data)
if error_message:
return bad_request(error_message)
data['deadline'] = datetime.strptime(data.get('deadline'), "%Y-%m-%d %H:%M")
cradle = Cradle.query.get_or_404(int(data.get('cradle')))
if g.current_user != cradle.sponsor:
return error_response(403)
ddl = DDL()
ddl.from_dict(data)
db.session.add(ddl)
cradle.add_ddl(ddl)
db.session.commit()
data = ddl.to_dict()
return jsonify(data)
@bp.route('/ddls/<int:id>', methods=['DELETE'])
@token_auth.login_required
@permission_required(Permission.SPONSOR)
def delete_ddl(id):
'''
删除 DDL
:param id: DDL ID
:return:
'''
ddl = DDL.query.get_or_404(id)
if g.current_user != ddl.cradle.sponsor:
return error_response(403)
db.session.delete(ddl)
db.session.commit()
data = ddl.to_dict() # TODO
return jsonify(data)
@bp.route('/ddls/', methods=['PUT'])
@token_auth.login_required
@permission_required(Permission.SPONSOR)
def update_ddl(id):
'''
修改 DDL
:param id: DDL ID
:return:
'''
data = request.get_json()
if not data:
return bad_request('You must micropub JSON data.')
error_message = validation_check_of_create_ddl(data)
if error_message:
return bad_request(error_message)
ddl = DDL.query.get_or_404(id)
if g.current_user != ddl.cradle.sponsor:
return error_response(403)
ddl.from_dict(data)
db.session.commit()
data = ddl.to_dict()
return jsonify(data)
@bp.route('/ddls/<int:id>', methods=['GET'])
@token_auth.login_required
def get_ddl(id):
'''
获取 DDL
:param id: DDL ID
:return:
'''
data = request.get_json()
if not data:
return bad_request('You must micropub JSON data.')
error_message = validation_check_of_create_ddl(data)
if error_message:
return bad_request(error_message)
ddl = DDL.query.get_or_404(id)
if g.current_user != ddl.cradle.sponsor:
return error_response(403)
ddl.from_dict(data)
db.session.commit()
data = ddl.to_dict()
return jsonify(data)
|
[
"app.api.errors.bad_request",
"app.utils.decorator.permission_required",
"app.extensions.db.session.add",
"datetime.datetime.now",
"flask.jsonify",
"app.models.DDL",
"app.api.bp.route",
"app.api.errors.error_response",
"app.models.DDL.query.get_or_404",
"app.extensions.db.session.delete",
"flask.request.get_json",
"app.extensions.db.session.commit"
] |
[((961, 997), 'app.api.bp.route', 'bp.route', (['"""/ddls/"""'], {'methods': "['POST']"}), "('/ddls/', methods=['POST'])\n", (969, 997), False, 'from app.api import bp\n'), ((1026, 1065), 'app.utils.decorator.permission_required', 'permission_required', (['Permission.SPONSOR'], {}), '(Permission.SPONSOR)\n', (1045, 1065), False, 'from app.utils.decorator import permission_required, Permission\n'), ((1743, 1789), 'app.api.bp.route', 'bp.route', (['"""/ddls/<int:id>"""'], {'methods': "['DELETE']"}), "('/ddls/<int:id>', methods=['DELETE'])\n", (1751, 1789), False, 'from app.api import bp\n'), ((1818, 1857), 'app.utils.decorator.permission_required', 'permission_required', (['Permission.SPONSOR'], {}), '(Permission.SPONSOR)\n', (1837, 1857), False, 'from app.utils.decorator import permission_required, Permission\n'), ((2167, 2202), 'app.api.bp.route', 'bp.route', (['"""/ddls/"""'], {'methods': "['PUT']"}), "('/ddls/', methods=['PUT'])\n", (2175, 2202), False, 'from app.api import bp\n'), ((2231, 2270), 'app.utils.decorator.permission_required', 'permission_required', (['Permission.SPONSOR'], {}), '(Permission.SPONSOR)\n', (2250, 2270), False, 'from app.utils.decorator import permission_required, Permission\n'), ((2809, 2852), 'app.api.bp.route', 'bp.route', (['"""/ddls/<int:id>"""'], {'methods': "['GET']"}), "('/ddls/<int:id>', methods=['GET'])\n", (2817, 2852), False, 'from app.api import bp\n'), ((1139, 1157), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (1155, 1157), False, 'from flask import request, jsonify, g, current_app\n'), ((1587, 1592), 'app.models.DDL', 'DDL', ([], {}), '()\n', (1590, 1592), False, 'from app.models import Cradle, DDL\n'), ((1621, 1640), 'app.extensions.db.session.add', 'db.session.add', (['ddl'], {}), '(ddl)\n', (1635, 1640), False, 'from app.extensions import db\n'), ((1669, 1688), 'app.extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1686, 1688), False, 'from app.extensions import db\n'), ((1726, 1739), 'flask.jsonify', 'jsonify', (['data'], {}), '(data)\n', (1733, 1739), False, 'from flask import request, jsonify, g, current_app\n'), ((1950, 1974), 'app.models.DDL.query.get_or_404', 'DDL.query.get_or_404', (['id'], {}), '(id)\n', (1970, 1974), False, 'from app.models import Cradle, DDL\n'), ((2059, 2081), 'app.extensions.db.session.delete', 'db.session.delete', (['ddl'], {}), '(ddl)\n', (2076, 2081), False, 'from app.extensions import db\n'), ((2086, 2105), 'app.extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2103, 2105), False, 'from app.extensions import db\n'), ((2150, 2163), 'flask.jsonify', 'jsonify', (['data'], {}), '(data)\n', (2157, 2163), False, 'from flask import request, jsonify, g, current_app\n'), ((2373, 2391), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (2389, 2391), False, 'from flask import request, jsonify, g, current_app\n'), ((2601, 2625), 'app.models.DDL.query.get_or_404', 'DDL.query.get_or_404', (['id'], {}), '(id)\n', (2621, 2625), False, 'from app.models import Cradle, DDL\n'), ((2735, 2754), 'app.extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2752, 2754), False, 'from app.extensions import db\n'), ((2792, 2805), 'flask.jsonify', 'jsonify', (['data'], {}), '(data)\n', (2799, 2805), False, 'from flask import request, jsonify, g, current_app\n'), ((2979, 2997), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (2995, 2997), False, 'from flask import request, jsonify, g, current_app\n'), ((3207, 3231), 'app.models.DDL.query.get_or_404', 'DDL.query.get_or_404', (['id'], {}), '(id)\n', (3227, 3231), False, 'from app.models import Cradle, DDL\n'), ((3341, 3360), 'app.extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (3358, 3360), False, 'from app.extensions import db\n'), ((3398, 3411), 'flask.jsonify', 'jsonify', (['data'], {}), '(data)\n', (3405, 3411), False, 'from flask import request, jsonify, g, current_app\n'), ((1190, 1233), 'app.api.errors.bad_request', 'bad_request', (['"""You must micropub JSON data."""'], {}), "('You must micropub JSON data.')\n", (1201, 1233), False, 'from app.api.errors import bad_request, error_response\n'), ((1329, 1355), 'app.api.errors.bad_request', 'bad_request', (['error_message'], {}), '(error_message)\n', (1340, 1355), False, 'from app.api.errors import bad_request, error_response\n'), ((1556, 1575), 'app.api.errors.error_response', 'error_response', (['(403)'], {}), '(403)\n', (1570, 1575), False, 'from app.api.errors import bad_request, error_response\n'), ((2035, 2054), 'app.api.errors.error_response', 'error_response', (['(403)'], {}), '(403)\n', (2049, 2054), False, 'from app.api.errors import bad_request, error_response\n'), ((2424, 2467), 'app.api.errors.bad_request', 'bad_request', (['"""You must micropub JSON data."""'], {}), "('You must micropub JSON data.')\n", (2435, 2467), False, 'from app.api.errors import bad_request, error_response\n'), ((2563, 2589), 'app.api.errors.bad_request', 'bad_request', (['error_message'], {}), '(error_message)\n', (2574, 2589), False, 'from app.api.errors import bad_request, error_response\n'), ((2686, 2705), 'app.api.errors.error_response', 'error_response', (['(403)'], {}), '(403)\n', (2700, 2705), False, 'from app.api.errors import bad_request, error_response\n'), ((3030, 3073), 'app.api.errors.bad_request', 'bad_request', (['"""You must micropub JSON data."""'], {}), "('You must micropub JSON data.')\n", (3041, 3073), False, 'from app.api.errors import bad_request, error_response\n'), ((3169, 3195), 'app.api.errors.bad_request', 'bad_request', (['error_message'], {}), '(error_message)\n', (3180, 3195), False, 'from app.api.errors import bad_request, error_response\n'), ((3292, 3311), 'app.api.errors.error_response', 'error_response', (['(403)'], {}), '(403)\n', (3306, 3311), False, 'from app.api.errors import bad_request, error_response\n'), ((698, 712), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (710, 712), False, 'from datetime import datetime\n')]
|
from datetime import date
from django.test import TestCase
from django.db import IntegrityError
from billings.models import Bill
from wallets.tests.factories import CreditCardFactory, WalletFactory
from users.tests.factories import UserFactory
from .factories import BillFactory
class TestBillModel(TestCase):
"""Test cases for Bill model."""
def setUp(self):
self.user = UserFactory()
self.wallet = WalletFactory(user=self.user)
self.credit_card = CreditCardFactory(
wallet=self.wallet,
number='4729333912967716'
)
self.bill = BillFactory(credit_card=self.credit_card)
def test_create(self):
self.assertIsInstance(self.bill, Bill)
def test_str(self):
expected_result = 'Credit card: 1 expires_at: 2018-12-31 value: 100.00'
self.assertEqual(str(self.bill), expected_result)
def test_unique_monthly_bill(self):
bill_date = date(2018, 10, 10)
BillFactory(
credit_card=self.credit_card,
expires_at=bill_date
)
with self.assertRaises(IntegrityError):
BillFactory(
credit_card=self.credit_card,
expires_at=bill_date
)
|
[
"wallets.tests.factories.WalletFactory",
"datetime.date",
"wallets.tests.factories.CreditCardFactory",
"users.tests.factories.UserFactory"
] |
[((394, 407), 'users.tests.factories.UserFactory', 'UserFactory', ([], {}), '()\n', (405, 407), False, 'from users.tests.factories import UserFactory\n'), ((430, 459), 'wallets.tests.factories.WalletFactory', 'WalletFactory', ([], {'user': 'self.user'}), '(user=self.user)\n', (443, 459), False, 'from wallets.tests.factories import CreditCardFactory, WalletFactory\n'), ((487, 551), 'wallets.tests.factories.CreditCardFactory', 'CreditCardFactory', ([], {'wallet': 'self.wallet', 'number': '"""4729333912967716"""'}), "(wallet=self.wallet, number='4729333912967716')\n", (504, 551), False, 'from wallets.tests.factories import CreditCardFactory, WalletFactory\n'), ((947, 965), 'datetime.date', 'date', (['(2018)', '(10)', '(10)'], {}), '(2018, 10, 10)\n', (951, 965), False, 'from datetime import date\n')]
|
#! /usr/bin/env python
import tensorflow as tf
import numpy as np
import re
import os
import time
import datetime
import gc
import sys
import shutil
from input_helpers import InputHelper
from siamese_network_semantic import SiameseLSTMw2v
from tensorflow.contrib import learn
import gzip
from random import random
# Parameters
# ==================================================
tf.flags.DEFINE_string("training_filepath", "data/train_snli.txt", "training file path (default: None)")
tf.flags.DEFINE_string("output_dirpath", None, "output directory path (default: None)")
tf.flags.DEFINE_float("y_scale", 5.0, "scale of y in training file (default: 5.0)")
tf.flags.DEFINE_integer("y_position", 0, "position of y in training file (default: 0)")
tf.flags.DEFINE_integer("x1_position", 0, "position of x1 in training file (default: 1)")
tf.flags.DEFINE_integer("x2_position", 0, "position of x2 in training file (default: 2)")
tf.flags.DEFINE_boolean("header", False, "if training file has a header (default: False)")
# Embedding parameters
tf.flags.DEFINE_string("word2vec_model", "wiki.simple.vec", "word2vec pre-trained embeddings file (default: None)")
tf.flags.DEFINE_string("word2vec_format", "text", "word2vec pre-trained embeddings file format (bin/text/textgz)(default: None)")
tf.flags.DEFINE_integer("embedding_dim", 300, "Dimensionality of character embedding (default: 300)")
# RNN stack parameters
tf.flags.DEFINE_boolean("tied", True, "Different side weights are tied / untied (default: True)")
tf.flags.DEFINE_float("side1_dropout", 1.0, "Dropout keep probability (default: 1.0)")
tf.flags.DEFINE_float("side2_dropout", 1.0, "Dropout keep probability (default: 1.0)")
tf.flags.DEFINE_list("side1_nodes", [50, 50, 50], "Number of nodes in layers for Side_1 (default:50,50,50)")
tf.flags.DEFINE_list("side2_nodes", [50, 50, 50], "Number of nodes in layers for Side_2 (default:50,50,50)")
# Training parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_integer("num_epochs", 300, "Number of training epochs (default: 200)")
tf.flags.DEFINE_integer("max_iterations", 500000, "Maximum number of iterations")
tf.flags.DEFINE_integer("evaluate_every", 1000, "Evaluate model on dev set after this many steps (default: 100)")
tf.flags.DEFINE_integer("checkpoint_every", 1000, "Save model after this many steps (default: 100)")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
FLAGS(sys.argv)
print("EXECUTION PARAMETERS:")
for attr, flag in sorted(FLAGS.__flags.items()):
print("{} = {}".format(attr.upper(), flag.value))
if FLAGS.training_filepath==None:
print("Input File path is empty. use --training_filepath argument.")
exit()
max_document_length=15
inpH = InputHelper()
train_set, dev_set, vocab_processor, sum_no_of_batches = inpH.getDataSets(
FLAGS.training_filepath, FLAGS.y_position, FLAGS.x1_position, FLAGS.x2_position, FLAGS.header, max_document_length, 10, FLAGS.batch_size)
trainableEmbeddings=False
if FLAGS.word2vec_model==None:
trainableEmbeddings=True
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
"You are using word embedding based semantic similarity but "
"word2vec model path is empty. It is Recommended to use --word2vec_model argument. "
"Otherwise now the code is automatically trying to learn embedding values (may not help in accuracy)"
"\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n")
else:
inpH.loadW2V(FLAGS.word2vec_model, FLAGS.word2vec_format)
# Training
# ==================================================
with tf.Graph().as_default():
sess_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=sess_conf)
with sess.as_default():
siameseModel = SiameseLSTMw2v(
sequence_length=max_document_length,
vocab_size=len(vocab_processor.vocabulary_),
embedding_size=FLAGS.embedding_dim,
batch_size=FLAGS.batch_size,
trainableEmbeddings=trainableEmbeddings,
tied=FLAGS.tied,
side1_nodes=FLAGS.side1_nodes,
side2_nodes=FLAGS.side2_nodes,
)
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(1e-3)
grads_and_vars = optimizer.compute_gradients(siameseModel.loss)
train_op_set = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
# Keep track of gradient values and sparsity (optional)
grad_summaries = []
for g, v in grads_and_vars:
if g is not None:
grad_hist_summary = tf.summary.histogram("{}/grad/hist".format(v.name), g)
sparsity_summary = tf.summary.scalar("{}/grad/sparsity".format(v.name), tf.nn.zero_fraction(g))
grad_summaries.append(grad_hist_summary)
grad_summaries.append(sparsity_summary)
grad_summaries_merged = tf.summary.merge(grad_summaries)
# Output directory for models and summaries
out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", str(int(time.time())))) \
if FLAGS.output_dirpath is None else \
os.path.abspath(FLAGS.output_dirpath)
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
print("Writing to %s." % out_dir)
# Summaries for loss pcc rho mse
loss_summary = tf.summary.scalar("loss", siameseModel.loss)
pcc_summary = tf.summary.scalar("pcc", siameseModel.pcc)
rho_summary = tf.summary.scalar("rho", siameseModel.rho)
mse_summary = tf.summary.scalar("mse", siameseModel.mse)
# Train Summaries
train_summary_op = tf.summary.merge([loss_summary, pcc_summary, rho_summary, mse_summary, grad_summaries_merged])
train_summary_dir = os.path.join(out_dir, "summaries", "train")
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
# Dev summaries
dev_summary_op = tf.summary.merge([loss_summary, pcc_summary, rho_summary, mse_summary])
dev_summary_dir = os.path.join(out_dir, "summaries", "dev")
dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph)
# Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=100)
# Write vocabulary
vocab_processor.save(os.path.join(checkpoint_dir, "vocab"))
# Initialize all variables
sess.run(tf.global_variables_initializer())
graph_def = tf.get_default_graph().as_graph_def()
graphpb_txt = str(graph_def)
with open(os.path.join(checkpoint_dir, "graphpb.txt"), 'w') as f:
f.write(graphpb_txt)
if FLAGS.word2vec_model :
# initial matrix with random uniform
initW = np.random.uniform(-0.25, 0.25, (len(vocab_processor.vocabulary_), FLAGS.embedding_dim))
#initW = np.zeros(shape=(len(vocab_processor.vocabulary_), FLAGS.embedding_dim))
# load any vectors from the word2vec
for w in vocab_processor.vocabulary_._mapping:
arr=[]
s = re.sub('[^0-9a-zA-Z]+', '', w)
if w in inpH.pre_emb:
arr=inpH.pre_emb[w]
elif w.lower() in inpH.pre_emb:
arr=inpH.pre_emb[w.lower()]
elif s in inpH.pre_emb:
arr=inpH.pre_emb[s]
elif s.isdigit():
arr=inpH.pre_emb["zero"]
if len(arr)>0:
idx = vocab_processor.vocabulary_.get(w)
initW[idx]=np.asarray(arr).astype(np.float32)
inpH.deletePreEmb()
gc.collect()
sess.run(siameseModel.W.assign(initW))
def train_step(x1_batch, x2_batch, y_batch, i):
random_value = random()
feed_dict = {
siameseModel.input_x1: x1_batch if random_value > 0.5 else x2_batch,
siameseModel.input_x2: x2_batch if random_value > 0.5 else x1_batch,
siameseModel.input_y_norm: map(lambda x: x / FLAGS.y_scale, y_batch),
siameseModel.side1_dropout: FLAGS.side1_dropout,
siameseModel.side2_dropout: FLAGS.side2_dropout,
}
_, step, loss, pcc, rho, mse, dist, summaries = sess.run([train_op_set, global_step, siameseModel.loss, siameseModel.pcc, siameseModel.rho, siameseModel.mse, siameseModel.distance, train_summary_op], feed_dict)
time_str = datetime.datetime.now().isoformat()
if i % 100 == 0:
print("TRAIN {}: step {}, loss {}, pcc: {}, rho: {}, mse: {}".format(time_str, step, loss, pcc, rho, mse * FLAGS.y_scale))
train_summary_writer.add_summary(summaries, step)
def dev_step(x1_batch, x2_batch, y_batch, i):
random_value = random()
feed_dict = {
siameseModel.input_x1: x1_batch if random_value > 0.5 else x2_batch,
siameseModel.input_x2: x2_batch if random_value > 0.5 else x1_batch,
siameseModel.input_y_norm: map(lambda x: x / FLAGS.y_scale, y_batch),
siameseModel.side1_dropout: 1.0,
siameseModel.side2_dropout: 1.0,
}
step, loss, pcc, rho, mse, summaries = sess.run([global_step, siameseModel.loss, siameseModel.pcc, siameseModel.rho, siameseModel.mse, dev_summary_op], feed_dict)
time_str = datetime.datetime.now().isoformat()
if i % 100 == 0:
print("DEV {}: step {}, loss {}, pcc {}, rho {}, mse: {}".format(time_str, step, loss, pcc, rho, mse * FLAGS.y_scale))
dev_summary_writer.add_summary(summaries, step)
return mse * FLAGS.y_scale
# Generate batches
batches = inpH.batch_iter(list(zip(train_set[0], train_set[1], train_set[2])), FLAGS.batch_size, FLAGS.num_epochs)
max_validation_mse=1e256
n_iterations = sum_no_of_batches * FLAGS.num_epochs
n_iterations = n_iterations if n_iterations < FLAGS.max_iterations else FLAGS.max_iterations
print('Total number of iterations %s.' % n_iterations)
for nn in xrange(n_iterations):
batch = batches.next()
if len(batch)<1:
continue
x1_batch, x2_batch, y_batch = zip(*batch)
if len(y_batch)<1:
continue
train_step(x1_batch, x2_batch, y_batch, nn)
step = tf.train.global_step(sess, global_step)
current_evaluation_total_mse = 0.0
if step % FLAGS.evaluate_every == 0:
dev_batches = inpH.batch_iter(list(zip(dev_set[0], dev_set[1], dev_set[2])), FLAGS.batch_size, 1)
i = 0
for db in dev_batches:
if len(db)<1:
continue
x1_dev_b, x2_dev_b, y_dev_b = zip(*db)
if len(y_dev_b)<1:
continue
current_evaluation_total_mse = current_evaluation_total_mse + dev_step(x1_dev_b, x2_dev_b, y_dev_b, i)
i = i + 1
if current_evaluation_total_mse <= max_validation_mse:
max_validation_mse = current_evaluation_total_mse
saver.save(sess, checkpoint_prefix, global_step=step)
tf.train.write_graph(sess.graph.as_graph_def(), checkpoint_prefix, "graph"+str(nn)+".pb", as_text=False)
print("Saved model {} with total_mse={} checkpoint to {}.".format(nn, max_validation_mse, checkpoint_prefix))
|
[
"tensorflow.nn.zero_fraction",
"tensorflow.ConfigProto",
"tensorflow.global_variables",
"tensorflow.Variable",
"gc.collect",
"shutil.rmtree",
"tensorflow.summary.merge",
"tensorflow.get_default_graph",
"os.path.join",
"os.path.abspath",
"os.path.exists",
"input_helpers.InputHelper",
"tensorflow.summary.FileWriter",
"re.sub",
"tensorflow.flags.DEFINE_list",
"tensorflow.flags.DEFINE_boolean",
"tensorflow.train.global_step",
"datetime.datetime.now",
"tensorflow.summary.scalar",
"tensorflow.global_variables_initializer",
"numpy.asarray",
"tensorflow.Session",
"random.random",
"tensorflow.Graph",
"tensorflow.flags.DEFINE_integer",
"tensorflow.flags.DEFINE_string",
"os.makedirs",
"tensorflow.flags.DEFINE_float",
"time.time",
"tensorflow.train.AdamOptimizer"
] |
[((381, 489), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""training_filepath"""', '"""data/train_snli.txt"""', '"""training file path (default: None)"""'], {}), "('training_filepath', 'data/train_snli.txt',\n 'training file path (default: None)')\n", (403, 489), True, 'import tensorflow as tf\n'), ((486, 577), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""output_dirpath"""', 'None', '"""output directory path (default: None)"""'], {}), "('output_dirpath', None,\n 'output directory path (default: None)')\n", (508, 577), True, 'import tensorflow as tf\n'), ((574, 661), 'tensorflow.flags.DEFINE_float', 'tf.flags.DEFINE_float', (['"""y_scale"""', '(5.0)', '"""scale of y in training file (default: 5.0)"""'], {}), "('y_scale', 5.0,\n 'scale of y in training file (default: 5.0)')\n", (595, 661), True, 'import tensorflow as tf\n'), ((658, 749), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""y_position"""', '(0)', '"""position of y in training file (default: 0)"""'], {}), "('y_position', 0,\n 'position of y in training file (default: 0)')\n", (681, 749), True, 'import tensorflow as tf\n'), ((746, 839), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""x1_position"""', '(0)', '"""position of x1 in training file (default: 1)"""'], {}), "('x1_position', 0,\n 'position of x1 in training file (default: 1)')\n", (769, 839), True, 'import tensorflow as tf\n'), ((836, 929), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""x2_position"""', '(0)', '"""position of x2 in training file (default: 2)"""'], {}), "('x2_position', 0,\n 'position of x2 in training file (default: 2)')\n", (859, 929), True, 'import tensorflow as tf\n'), ((926, 1020), 'tensorflow.flags.DEFINE_boolean', 'tf.flags.DEFINE_boolean', (['"""header"""', '(False)', '"""if training file has a header (default: False)"""'], {}), "('header', False,\n 'if training file has a header (default: False)')\n", (949, 1020), True, 'import tensorflow as tf\n'), ((1041, 1160), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""word2vec_model"""', '"""wiki.simple.vec"""', '"""word2vec pre-trained embeddings file (default: None)"""'], {}), "('word2vec_model', 'wiki.simple.vec',\n 'word2vec pre-trained embeddings file (default: None)')\n", (1063, 1160), True, 'import tensorflow as tf\n'), ((1157, 1295), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""word2vec_format"""', '"""text"""', '"""word2vec pre-trained embeddings file format (bin/text/textgz)(default: None)"""'], {}), "('word2vec_format', 'text',\n 'word2vec pre-trained embeddings file format (bin/text/textgz)(default: None)'\n )\n", (1179, 1295), True, 'import tensorflow as tf\n'), ((1287, 1392), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""embedding_dim"""', '(300)', '"""Dimensionality of character embedding (default: 300)"""'], {}), "('embedding_dim', 300,\n 'Dimensionality of character embedding (default: 300)')\n", (1310, 1392), True, 'import tensorflow as tf\n'), ((1413, 1514), 'tensorflow.flags.DEFINE_boolean', 'tf.flags.DEFINE_boolean', (['"""tied"""', '(True)', '"""Different side weights are tied / untied (default: True)"""'], {}), "('tied', True,\n 'Different side weights are tied / untied (default: True)')\n", (1436, 1514), True, 'import tensorflow as tf\n'), ((1511, 1601), 'tensorflow.flags.DEFINE_float', 'tf.flags.DEFINE_float', (['"""side1_dropout"""', '(1.0)', '"""Dropout keep probability (default: 1.0)"""'], {}), "('side1_dropout', 1.0,\n 'Dropout keep probability (default: 1.0)')\n", (1532, 1601), True, 'import tensorflow as tf\n'), ((1598, 1688), 'tensorflow.flags.DEFINE_float', 'tf.flags.DEFINE_float', (['"""side2_dropout"""', '(1.0)', '"""Dropout keep probability (default: 1.0)"""'], {}), "('side2_dropout', 1.0,\n 'Dropout keep probability (default: 1.0)')\n", (1619, 1688), True, 'import tensorflow as tf\n'), ((1685, 1797), 'tensorflow.flags.DEFINE_list', 'tf.flags.DEFINE_list', (['"""side1_nodes"""', '[50, 50, 50]', '"""Number of nodes in layers for Side_1 (default:50,50,50)"""'], {}), "('side1_nodes', [50, 50, 50],\n 'Number of nodes in layers for Side_1 (default:50,50,50)')\n", (1705, 1797), True, 'import tensorflow as tf\n'), ((1794, 1906), 'tensorflow.flags.DEFINE_list', 'tf.flags.DEFINE_list', (['"""side2_nodes"""', '[50, 50, 50]', '"""Number of nodes in layers for Side_2 (default:50,50,50)"""'], {}), "('side2_nodes', [50, 50, 50],\n 'Number of nodes in layers for Side_2 (default:50,50,50)')\n", (1814, 1906), True, 'import tensorflow as tf\n'), ((1926, 1995), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""batch_size"""', '(64)', '"""Batch Size (default: 64)"""'], {}), "('batch_size', 64, 'Batch Size (default: 64)')\n", (1949, 1995), True, 'import tensorflow as tf\n'), ((1996, 2086), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""num_epochs"""', '(300)', '"""Number of training epochs (default: 200)"""'], {}), "('num_epochs', 300,\n 'Number of training epochs (default: 200)')\n", (2019, 2086), True, 'import tensorflow as tf\n'), ((2083, 2168), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""max_iterations"""', '(500000)', '"""Maximum number of iterations"""'], {}), "('max_iterations', 500000,\n 'Maximum number of iterations')\n", (2106, 2168), True, 'import tensorflow as tf\n'), ((2165, 2282), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""evaluate_every"""', '(1000)', '"""Evaluate model on dev set after this many steps (default: 100)"""'], {}), "('evaluate_every', 1000,\n 'Evaluate model on dev set after this many steps (default: 100)')\n", (2188, 2282), True, 'import tensorflow as tf\n'), ((2279, 2383), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""checkpoint_every"""', '(1000)', '"""Save model after this many steps (default: 100)"""'], {}), "('checkpoint_every', 1000,\n 'Save model after this many steps (default: 100)')\n", (2302, 2383), True, 'import tensorflow as tf\n'), ((2399, 2494), 'tensorflow.flags.DEFINE_boolean', 'tf.flags.DEFINE_boolean', (['"""allow_soft_placement"""', '(True)', '"""Allow device soft device placement"""'], {}), "('allow_soft_placement', True,\n 'Allow device soft device placement')\n", (2422, 2494), True, 'import tensorflow as tf\n'), ((2491, 2584), 'tensorflow.flags.DEFINE_boolean', 'tf.flags.DEFINE_boolean', (['"""log_device_placement"""', '(False)', '"""Log placement of ops on devices"""'], {}), "('log_device_placement', False,\n 'Log placement of ops on devices')\n", (2514, 2584), True, 'import tensorflow as tf\n'), ((2900, 2913), 'input_helpers.InputHelper', 'InputHelper', ([], {}), '()\n', (2911, 2913), False, 'from input_helpers import InputHelper\n'), ((3766, 3882), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': 'FLAGS.allow_soft_placement', 'log_device_placement': 'FLAGS.log_device_placement'}), '(allow_soft_placement=FLAGS.allow_soft_placement,\n log_device_placement=FLAGS.log_device_placement)\n', (3780, 3882), True, 'import tensorflow as tf\n'), ((3897, 3925), 'tensorflow.Session', 'tf.Session', ([], {'config': 'sess_conf'}), '(config=sess_conf)\n', (3907, 3925), True, 'import tensorflow as tf\n'), ((5017, 5049), 'tensorflow.summary.merge', 'tf.summary.merge', (['grad_summaries'], {}), '(grad_summaries)\n', (5033, 5049), True, 'import tensorflow as tf\n'), ((5294, 5317), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (5308, 5317), False, 'import os\n'), ((5435, 5479), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'siameseModel.loss'], {}), "('loss', siameseModel.loss)\n", (5452, 5479), True, 'import tensorflow as tf\n'), ((5496, 5538), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""pcc"""', 'siameseModel.pcc'], {}), "('pcc', siameseModel.pcc)\n", (5513, 5538), True, 'import tensorflow as tf\n'), ((5555, 5597), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""rho"""', 'siameseModel.rho'], {}), "('rho', siameseModel.rho)\n", (5572, 5597), True, 'import tensorflow as tf\n'), ((5614, 5656), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""mse"""', 'siameseModel.mse'], {}), "('mse', siameseModel.mse)\n", (5631, 5656), True, 'import tensorflow as tf\n'), ((5699, 5797), 'tensorflow.summary.merge', 'tf.summary.merge', (['[loss_summary, pcc_summary, rho_summary, mse_summary, grad_summaries_merged]'], {}), '([loss_summary, pcc_summary, rho_summary, mse_summary,\n grad_summaries_merged])\n', (5715, 5797), True, 'import tensorflow as tf\n'), ((5816, 5859), 'os.path.join', 'os.path.join', (['out_dir', '"""summaries"""', '"""train"""'], {}), "(out_dir, 'summaries', 'train')\n", (5828, 5859), False, 'import os\n'), ((5885, 5937), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['train_summary_dir', 'sess.graph'], {}), '(train_summary_dir, sess.graph)\n', (5906, 5937), True, 'import tensorflow as tf\n'), ((5976, 6047), 'tensorflow.summary.merge', 'tf.summary.merge', (['[loss_summary, pcc_summary, rho_summary, mse_summary]'], {}), '([loss_summary, pcc_summary, rho_summary, mse_summary])\n', (5992, 6047), True, 'import tensorflow as tf\n'), ((6068, 6109), 'os.path.join', 'os.path.join', (['out_dir', '"""summaries"""', '"""dev"""'], {}), "(out_dir, 'summaries', 'dev')\n", (6080, 6109), False, 'import os\n'), ((6133, 6183), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['dev_summary_dir', 'sess.graph'], {}), '(dev_summary_dir, sess.graph)\n', (6154, 6183), True, 'import tensorflow as tf\n'), ((6379, 6416), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""model"""'], {}), "(checkpoint_dir, 'model')\n", (6391, 6416), False, 'import os\n'), ((4332, 4383), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'name': '"""global_step"""', 'trainable': '(False)'}), "(0, name='global_step', trainable=False)\n", (4343, 4383), True, 'import tensorflow as tf\n'), ((4400, 4429), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(0.001)'], {}), '(0.001)\n', (4422, 4429), True, 'import tensorflow as tf\n'), ((5251, 5288), 'os.path.abspath', 'os.path.abspath', (['FLAGS.output_dirpath'], {}), '(FLAGS.output_dirpath)\n', (5266, 5288), False, 'import os\n'), ((5323, 5345), 'shutil.rmtree', 'shutil.rmtree', (['out_dir'], {}), '(out_dir)\n', (5336, 5345), False, 'import shutil\n'), ((6319, 6355), 'os.path.join', 'os.path.join', (['out_dir', '"""checkpoints"""'], {}), "(out_dir, 'checkpoints')\n", (6331, 6355), False, 'import os\n'), ((6426, 6456), 'os.path.exists', 'os.path.exists', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (6440, 6456), False, 'import os\n'), ((6462, 6489), 'os.makedirs', 'os.makedirs', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (6473, 6489), False, 'import os\n'), ((6515, 6536), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (6534, 6536), True, 'import tensorflow as tf\n'), ((6600, 6637), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""vocab"""'], {}), "(checkpoint_dir, 'vocab')\n", (6612, 6637), False, 'import os\n'), ((6680, 6713), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (6711, 6713), True, 'import tensorflow as tf\n'), ((7690, 7702), 'gc.collect', 'gc.collect', ([], {}), '()\n', (7700, 7702), False, 'import gc\n'), ((7816, 7824), 'random.random', 'random', ([], {}), '()\n', (7822, 7824), False, 'from random import random\n'), ((8724, 8732), 'random.random', 'random', ([], {}), '()\n', (8730, 8732), False, 'from random import random\n'), ((10127, 10166), 'tensorflow.train.global_step', 'tf.train.global_step', (['sess', 'global_step'], {}), '(sess, global_step)\n', (10147, 10166), True, 'import tensorflow as tf\n'), ((3726, 3736), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (3734, 3736), True, 'import tensorflow as tf\n'), ((6729, 6751), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (6749, 6751), True, 'import tensorflow as tf\n'), ((6810, 6853), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""graphpb.txt"""'], {}), "(checkpoint_dir, 'graphpb.txt')\n", (6822, 6853), False, 'import os\n'), ((7262, 7292), 're.sub', 're.sub', (['"""[^0-9a-zA-Z]+"""', '""""""', 'w'], {}), "('[^0-9a-zA-Z]+', '', w)\n", (7268, 7292), False, 'import re\n'), ((4874, 4896), 'tensorflow.nn.zero_fraction', 'tf.nn.zero_fraction', (['g'], {}), '(g)\n', (4893, 4896), True, 'import tensorflow as tf\n'), ((8416, 8439), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8437, 8439), False, 'import datetime\n'), ((9248, 9271), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9269, 9271), False, 'import datetime\n'), ((5170, 5181), 'time.time', 'time.time', ([], {}), '()\n', (5179, 5181), False, 'import time\n'), ((7627, 7642), 'numpy.asarray', 'np.asarray', (['arr'], {}), '(arr)\n', (7637, 7642), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 19 10:28:35 2021
@author: <NAME> -Spatial structure index value distribution of urban streetscape
"""
import pickle
from database import postSQL2gpd,gpd2postSQL
import pandas as pd
xian_epsg=32649 #Xi'an WGS84 / UTM zone 49N
wgs84_epsg=4326
poi_classificationName={
0:"delicacy",
1:"hotel",
2:"shopping",
3:"lifeService",
4:"beauty",
5:"spot",
6:"entertainment",
7:"sports",
8:"education",
9:"media",
10:"medicalTreatment",
11:"carService",
12:"trafficFacilities",
13:"finance",
14:"realEstate",
15:"corporation",
16:"government",
17:"entrance",
18:"naturalFeatures",
}
poi_classificationName_reverse={v:k for k,v in poi_classificationName.items()}
def street_poi_structure(poi,position,distance=300):
from tqdm import tqdm
import pickle,math
import pandas as pd
import numpy as np
import geopandas as gpd
# tqdm.pandas()
poi_num=len(poi_classificationName.keys())
feature_vector=np.zeros(poi_num)
poi_=poi.copy(deep=True)
pos_poi_dict={}
pos_poi_idxes_df=pd.DataFrame(columns=['geometry','frank_e','num'])
pos_poi_feature_vector_df=pd.DataFrame(columns=['geometry']+list(range(poi_num)))
# print(pos_poi_feature_vector)
for idx,row in tqdm(position.iterrows(),total=position.shape[0]):
poi_['within']=poi_.geometry.apply(lambda pt: pt.within(row.geometry.buffer(distance)))
# print(poi_)
poi_selection_df=poi_[poi_['within']==True]
counts=poi_selection_df.level_0.value_counts().to_dict()
num=len(poi_selection_df)
counts_percent={k:v/num for k,v in counts.items()}
# print(counts_percent)
ve=0.0
for v in counts_percent.values():
if v!=0.:
ve-=v*math.log(v)
max_entropy=math.log(num)
frank_e=ve/max_entropy*100
# print(max_entropy,frank_e)
for k,v in counts.items(): #计算特征聚类出现的频数/直方图
poi_name=k.split("_")[-1]
poi_idx=poi_classificationName_reverse[poi_name]
# print(poi_idx,v)
feature_vector[poi_idx]=v
# print(feature_vector)
pos_poi_dict.update({idx:{'fn_stem':row.fn_stem, 'fn_key':row.fn_key, 'fn_idx':row.fn_idx ,'counts':counts,'counts_percent':counts_percent,'feature_vector':feature_vector,'num':num,'frank_e':frank_e,'geometry':row.geometry}})
pos_poi_idxes_df=pos_poi_idxes_df.append({'fn_stem':row.fn_stem, 'fn_key':row.fn_key, 'fn_idx':row.fn_idx,'geometry':row.geometry,'frank_e':frank_e,'num':num},ignore_index=True)
feature_vector_dict={i:feature_vector[i] for i in range(len(feature_vector))}
feature_vector_dict.update({'geometry':row.geometry,'fn_stem':row.fn_stem, 'fn_key':row.fn_key, 'fn_idx':row.fn_idx,})
pos_poi_feature_vector_df=pos_poi_feature_vector_df.append(feature_vector_dict,ignore_index=True)
# if idx==3:break
pos_poi_idxes_gdf=gpd.GeoDataFrame(pos_poi_idxes_df,geometry=pos_poi_idxes_df.geometry,crs=position.crs)
pos_poi_idxes_gdf['num_diff']=pos_poi_idxes_gdf.num.diff()
pos_poi_feature_vector_gdf=gpd.GeoDataFrame(pos_poi_feature_vector_df,geometry=pos_poi_feature_vector_df.geometry,crs=position.crs)
with open('./processed data/pos_poi_dict.pkl','wb') as f:
pickle.dump(pos_poi_dict,f)
return pos_poi_idxes_gdf,pos_poi_feature_vector_gdf
def poi_feature_clustering(feature_vector,fields,n_clusters=7,feature_analysis=True):
import pandas as pd
from sklearn.neighbors import NearestNeighbors
import numpy as np
from sklearn import cluster
from shapely.geometry import Point
import geopandas as gpd
import pyproj
from yellowbrick.cluster import KElbowVisualizer
from yellowbrick.features import Manifold
from sklearn.feature_selection import chi2, SelectKBest, f_classif
from sklearn import preprocessing
from sklearn.preprocessing import normalize
import matplotlib.pyplot as plt
pts_geometry=feature_vector[['geometry']]
pts_geometry[['x','y']]=pts_geometry.geometry.apply(lambda row:pd.Series([row.x,row.y]))
# print(pts_geometry)
pts_coordis=pts_geometry[['x','y']].to_numpy()
# print(pts_coordis)
nbrs=NearestNeighbors(n_neighbors=9, algorithm='ball_tree').fit(pts_coordis)
connectivity=nbrs.kneighbors_graph(pts_coordis)
# print(connectivity.toarray())
X_=feature_vector[fields].to_numpy()
X=normalize(X_,axis=0, norm='max')
clustering=cluster.AgglomerativeClustering(connectivity=connectivity,n_clusters=n_clusters).fit(X)
feature_vector['clustering']=clustering.labels_
#_________________________________________________________________________
if feature_analysis==True:
y=clustering.labels_
selector=SelectKBest(score_func=f_classif, k=len(fields)) #score_func=chi2
selector.fit(X,y)
dfscores = pd.DataFrame(selector.scores_)
dfpvalues=pd.DataFrame(selector.pvalues_)
dfcolumns = pd.DataFrame(fields)
featureScores = pd.concat([dfcolumns,dfscores,dfpvalues],axis=1)
featureScores.columns = ['Factor','Score','p_value'] #naming the dataframe columns
featureScores['Factor']=featureScores['Factor'].apply(lambda row:int(row))
featureScores['poi_name']=featureScores['Factor'].map(poi_classificationName)
featureScores=featureScores.sort_values(by=['Score'])
# print(type(featureScores['Factor'][0]))
print(featureScores)
# featureScores.to_excel('./graph/tl_poi_features scores.xlsx')
featureScores_=featureScores.set_index('Factor')
featureScores_.nlargest(len(fields),'Score').Score.plot(kind='barh',figsize=(30,20),fontsize=38)
featureScores_.Score.plot(kind='barh')
plt.show()
clustering_=cluster.AgglomerativeClustering(connectivity=connectivity,) #n_clusters=n_clusters
visualizer = KElbowVisualizer(clustering_, timings=False,size=(500, 500), k=(4,12)) #k=(4,12) metric='calinski_harabasz'
visualizer.fit(X) # Fit the data to the visualizer
# visualizer.show(outpath="./graph/tl_poi_clustering_KEIbow_.png") # Finalize and render the figure
return feature_vector
if __name__=="__main__":
# poi_gdf=postSQL2gpd(table_name='poi',geom_col='geometry',myusername='postgres',mypassword='<PASSWORD>',mydatabase='streetscape_GSV')
# poi_gdf=poi_gdf.to_crs(xian_epsg)
# tl_idxes_clustering_12_gdf=postSQL2gpd(table_name='tl_idxes_clustering_12',geom_col='geometry',myusername='postgres',mypassword='<PASSWORD>',mydatabase='streetscape_GSV')
# pos_poi_idxes_gdf,pos_poi_feature_vector_gdf=street_poi_structure(poi=poi_gdf,position=tl_idxes_clustering_12_gdf)
# gpd2postSQL(pos_poi_idxes_gdf,table_name='pos_poi_idxes',myusername='postgres',mypassword='<PASSWORD>',mydatabase='streetscape_GSV')
# gpd2postSQL(pos_poi_feature_vector_gdf,table_name='pos_poi_feature_vector',myusername='postgres',mypassword='<PASSWORD>',mydatabase='streetscape_GSV')
# with open('./processed data/pos_poi_dict.pkl','rb') as f:
# pos_poi_dict=pickle.load(f)
pos_poi_feature_vector_gdf=postSQL2gpd(table_name='pos_poi_feature_vector',geom_col='geometry',myusername='postgres',mypassword='<PASSWORD>',mydatabase='streetscape_GSV')
fields=[ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10','11', '12', '13', '14', '15', '16', '17', '18']
n_clusters=12 #12
feature_vector=poi_feature_clustering(pos_poi_feature_vector_gdf,fields,n_clusters=n_clusters,feature_analysis=True)
# gpd2postSQL(feature_vector,table_name='pos_poi_feature_vector_{}'.format(n_clusters),myusername='postgres',mypassword='<PASSWORD>',mydatabase='streetscape_GSV')
|
[
"pandas.DataFrame",
"pickle.dump",
"matplotlib.pyplot.show",
"database.postSQL2gpd",
"numpy.zeros",
"geopandas.GeoDataFrame",
"sklearn.neighbors.NearestNeighbors",
"sklearn.preprocessing.normalize",
"pandas.Series",
"sklearn.cluster.AgglomerativeClustering",
"math.log",
"pandas.concat",
"yellowbrick.cluster.KElbowVisualizer"
] |
[((1137, 1154), 'numpy.zeros', 'np.zeros', (['poi_num'], {}), '(poi_num)\n', (1145, 1154), True, 'import numpy as np\n'), ((1230, 1282), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['geometry', 'frank_e', 'num']"}), "(columns=['geometry', 'frank_e', 'num'])\n", (1242, 1282), True, 'import pandas as pd\n'), ((3142, 3235), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['pos_poi_idxes_df'], {'geometry': 'pos_poi_idxes_df.geometry', 'crs': 'position.crs'}), '(pos_poi_idxes_df, geometry=pos_poi_idxes_df.geometry, crs=\n position.crs)\n', (3158, 3235), True, 'import geopandas as gpd\n'), ((3326, 3437), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['pos_poi_feature_vector_df'], {'geometry': 'pos_poi_feature_vector_df.geometry', 'crs': 'position.crs'}), '(pos_poi_feature_vector_df, geometry=\n pos_poi_feature_vector_df.geometry, crs=position.crs)\n', (3342, 3437), True, 'import geopandas as gpd\n'), ((4675, 4708), 'sklearn.preprocessing.normalize', 'normalize', (['X_'], {'axis': '(0)', 'norm': '"""max"""'}), "(X_, axis=0, norm='max')\n", (4684, 4708), False, 'from sklearn.preprocessing import normalize\n'), ((7480, 7636), 'database.postSQL2gpd', 'postSQL2gpd', ([], {'table_name': '"""pos_poi_feature_vector"""', 'geom_col': '"""geometry"""', 'myusername': '"""postgres"""', 'mypassword': '"""<PASSWORD>"""', 'mydatabase': '"""streetscape_GSV"""'}), "(table_name='pos_poi_feature_vector', geom_col='geometry',\n myusername='postgres', mypassword='<PASSWORD>', mydatabase=\n 'streetscape_GSV')\n", (7491, 7636), False, 'from database import postSQL2gpd, gpd2postSQL\n'), ((1975, 1988), 'math.log', 'math.log', (['num'], {}), '(num)\n', (1983, 1988), False, 'import pickle, math\n'), ((3504, 3532), 'pickle.dump', 'pickle.dump', (['pos_poi_dict', 'f'], {}), '(pos_poi_dict, f)\n', (3515, 3532), False, 'import pickle, math\n'), ((5153, 5183), 'pandas.DataFrame', 'pd.DataFrame', (['selector.scores_'], {}), '(selector.scores_)\n', (5165, 5183), True, 'import pandas as pd\n'), ((5202, 5233), 'pandas.DataFrame', 'pd.DataFrame', (['selector.pvalues_'], {}), '(selector.pvalues_)\n', (5214, 5233), True, 'import pandas as pd\n'), ((5254, 5274), 'pandas.DataFrame', 'pd.DataFrame', (['fields'], {}), '(fields)\n', (5266, 5274), True, 'import pandas as pd\n'), ((5301, 5352), 'pandas.concat', 'pd.concat', (['[dfcolumns, dfscores, dfpvalues]'], {'axis': '(1)'}), '([dfcolumns, dfscores, dfpvalues], axis=1)\n', (5310, 5352), True, 'import pandas as pd\n'), ((6055, 6065), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6063, 6065), True, 'import matplotlib.pyplot as plt\n'), ((6099, 6157), 'sklearn.cluster.AgglomerativeClustering', 'cluster.AgglomerativeClustering', ([], {'connectivity': 'connectivity'}), '(connectivity=connectivity)\n', (6130, 6157), False, 'from sklearn import cluster\n'), ((6203, 6275), 'yellowbrick.cluster.KElbowVisualizer', 'KElbowVisualizer', (['clustering_'], {'timings': '(False)', 'size': '(500, 500)', 'k': '(4, 12)'}), '(clustering_, timings=False, size=(500, 500), k=(4, 12))\n', (6219, 6275), False, 'from yellowbrick.cluster import KElbowVisualizer\n'), ((4322, 4347), 'pandas.Series', 'pd.Series', (['[row.x, row.y]'], {}), '([row.x, row.y])\n', (4331, 4347), True, 'import pandas as pd\n'), ((4464, 4518), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': '(9)', 'algorithm': '"""ball_tree"""'}), "(n_neighbors=9, algorithm='ball_tree')\n", (4480, 4518), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((4728, 4814), 'sklearn.cluster.AgglomerativeClustering', 'cluster.AgglomerativeClustering', ([], {'connectivity': 'connectivity', 'n_clusters': 'n_clusters'}), '(connectivity=connectivity, n_clusters=\n n_clusters)\n', (4759, 4814), False, 'from sklearn import cluster\n'), ((1943, 1954), 'math.log', 'math.log', (['v'], {}), '(v)\n', (1951, 1954), False, 'import pickle, math\n')]
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['LinkedServiceArgs', 'LinkedService']
@pulumi.input_type
class LinkedServiceArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
linked_service_name: Optional[pulumi.Input[str]] = None,
read_access_id: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_id: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
write_access_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a LinkedService resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the Log Analytics Linked Service is created. Changing this forces a new resource to be created.
:param pulumi.Input[str] linked_service_name: Name of the type of linkedServices resource to connect to the Log Analytics Workspace specified in workspace_name. Accepted values are `automation` and `cluster`. Defaults to `automation`. Changing this forces a new resource to be created.
:param pulumi.Input[str] read_access_id: The ID of the readable Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
:param pulumi.Input[str] resource_id: The ID of the Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] workspace_id: The ID of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] workspace_name: The name of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] write_access_id: The ID of the writable Resource that will be linked to the workspace. This should be used for linking to a Log Analytics Cluster resource.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if linked_service_name is not None:
warnings.warn("""This field has been deprecated and will be removed in a future version of the provider""", DeprecationWarning)
pulumi.log.warn("""linked_service_name is deprecated: This field has been deprecated and will be removed in a future version of the provider""")
if linked_service_name is not None:
pulumi.set(__self__, "linked_service_name", linked_service_name)
if read_access_id is not None:
pulumi.set(__self__, "read_access_id", read_access_id)
if resource_id is not None:
warnings.warn("""This field has been deprecated in favour of `read_access_id` and will be removed in a future version of the provider""", DeprecationWarning)
pulumi.log.warn("""resource_id is deprecated: This field has been deprecated in favour of `read_access_id` and will be removed in a future version of the provider""")
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if workspace_id is not None:
pulumi.set(__self__, "workspace_id", workspace_id)
if workspace_name is not None:
warnings.warn("""This field has been deprecated in favour of `workspace_id` and will be removed in a future version of the provider""", DeprecationWarning)
pulumi.log.warn("""workspace_name is deprecated: This field has been deprecated in favour of `workspace_id` and will be removed in a future version of the provider""")
if workspace_name is not None:
pulumi.set(__self__, "workspace_name", workspace_name)
if write_access_id is not None:
pulumi.set(__self__, "write_access_id", write_access_id)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group in which the Log Analytics Linked Service is created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="linkedServiceName")
def linked_service_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the type of linkedServices resource to connect to the Log Analytics Workspace specified in workspace_name. Accepted values are `automation` and `cluster`. Defaults to `automation`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "linked_service_name")
@linked_service_name.setter
def linked_service_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "linked_service_name", value)
@property
@pulumi.getter(name="readAccessId")
def read_access_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the readable Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
"""
return pulumi.get(self, "read_access_id")
@read_access_id.setter
def read_access_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "read_access_id", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="workspaceId")
def workspace_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "workspace_id")
@workspace_id.setter
def workspace_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workspace_id", value)
@property
@pulumi.getter(name="workspaceName")
def workspace_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "workspace_name")
@workspace_name.setter
def workspace_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workspace_name", value)
@property
@pulumi.getter(name="writeAccessId")
def write_access_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the writable Resource that will be linked to the workspace. This should be used for linking to a Log Analytics Cluster resource.
"""
return pulumi.get(self, "write_access_id")
@write_access_id.setter
def write_access_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "write_access_id", value)
@pulumi.input_type
class _LinkedServiceState:
def __init__(__self__, *,
linked_service_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
read_access_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_id: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
write_access_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering LinkedService resources.
:param pulumi.Input[str] linked_service_name: Name of the type of linkedServices resource to connect to the Log Analytics Workspace specified in workspace_name. Accepted values are `automation` and `cluster`. Defaults to `automation`. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The generated name of the Linked Service. The format for this attribute is always `<workspace name>/<linked service type>`(e.g. `workspace1/Automation` or `workspace1/Cluster`)
:param pulumi.Input[str] read_access_id: The ID of the readable Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the Log Analytics Linked Service is created. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_id: The ID of the Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] workspace_id: The ID of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] workspace_name: The name of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] write_access_id: The ID of the writable Resource that will be linked to the workspace. This should be used for linking to a Log Analytics Cluster resource.
"""
if linked_service_name is not None:
warnings.warn("""This field has been deprecated and will be removed in a future version of the provider""", DeprecationWarning)
pulumi.log.warn("""linked_service_name is deprecated: This field has been deprecated and will be removed in a future version of the provider""")
if linked_service_name is not None:
pulumi.set(__self__, "linked_service_name", linked_service_name)
if name is not None:
pulumi.set(__self__, "name", name)
if read_access_id is not None:
pulumi.set(__self__, "read_access_id", read_access_id)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if resource_id is not None:
warnings.warn("""This field has been deprecated in favour of `read_access_id` and will be removed in a future version of the provider""", DeprecationWarning)
pulumi.log.warn("""resource_id is deprecated: This field has been deprecated in favour of `read_access_id` and will be removed in a future version of the provider""")
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if workspace_id is not None:
pulumi.set(__self__, "workspace_id", workspace_id)
if workspace_name is not None:
warnings.warn("""This field has been deprecated in favour of `workspace_id` and will be removed in a future version of the provider""", DeprecationWarning)
pulumi.log.warn("""workspace_name is deprecated: This field has been deprecated in favour of `workspace_id` and will be removed in a future version of the provider""")
if workspace_name is not None:
pulumi.set(__self__, "workspace_name", workspace_name)
if write_access_id is not None:
pulumi.set(__self__, "write_access_id", write_access_id)
@property
@pulumi.getter(name="linkedServiceName")
def linked_service_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the type of linkedServices resource to connect to the Log Analytics Workspace specified in workspace_name. Accepted values are `automation` and `cluster`. Defaults to `automation`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "linked_service_name")
@linked_service_name.setter
def linked_service_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "linked_service_name", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The generated name of the Linked Service. The format for this attribute is always `<workspace name>/<linked service type>`(e.g. `workspace1/Automation` or `workspace1/Cluster`)
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="readAccessId")
def read_access_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the readable Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
"""
return pulumi.get(self, "read_access_id")
@read_access_id.setter
def read_access_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "read_access_id", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource group in which the Log Analytics Linked Service is created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="workspaceId")
def workspace_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "workspace_id")
@workspace_id.setter
def workspace_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workspace_id", value)
@property
@pulumi.getter(name="workspaceName")
def workspace_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "workspace_name")
@workspace_name.setter
def workspace_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workspace_name", value)
@property
@pulumi.getter(name="writeAccessId")
def write_access_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the writable Resource that will be linked to the workspace. This should be used for linking to a Log Analytics Cluster resource.
"""
return pulumi.get(self, "write_access_id")
@write_access_id.setter
def write_access_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "write_access_id", value)
class LinkedService(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
linked_service_name: Optional[pulumi.Input[str]] = None,
read_access_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_id: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
write_access_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a Log Analytics Linked Service.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_account = azure.automation.Account("exampleAccount",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku_name="Basic",
tags={
"environment": "development",
})
example_analytics_workspace = azure.operationalinsights.AnalyticsWorkspace("exampleAnalyticsWorkspace",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku="PerGB2018",
retention_in_days=30)
example_linked_service = azure.loganalytics.LinkedService("exampleLinkedService",
resource_group_name=example_resource_group.name,
workspace_id=example_analytics_workspace.id,
read_access_id=example_account.id)
```
## Import
Log Analytics Workspaces can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:loganalytics/linkedService:LinkedService example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.OperationalInsights/workspaces/workspace1/linkedServices/Automation
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] linked_service_name: Name of the type of linkedServices resource to connect to the Log Analytics Workspace specified in workspace_name. Accepted values are `automation` and `cluster`. Defaults to `automation`. Changing this forces a new resource to be created.
:param pulumi.Input[str] read_access_id: The ID of the readable Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the Log Analytics Linked Service is created. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_id: The ID of the Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] workspace_id: The ID of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] workspace_name: The name of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] write_access_id: The ID of the writable Resource that will be linked to the workspace. This should be used for linking to a Log Analytics Cluster resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: LinkedServiceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Log Analytics Linked Service.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_account = azure.automation.Account("exampleAccount",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku_name="Basic",
tags={
"environment": "development",
})
example_analytics_workspace = azure.operationalinsights.AnalyticsWorkspace("exampleAnalyticsWorkspace",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku="PerGB2018",
retention_in_days=30)
example_linked_service = azure.loganalytics.LinkedService("exampleLinkedService",
resource_group_name=example_resource_group.name,
workspace_id=example_analytics_workspace.id,
read_access_id=example_account.id)
```
## Import
Log Analytics Workspaces can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:loganalytics/linkedService:LinkedService example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.OperationalInsights/workspaces/workspace1/linkedServices/Automation
```
:param str resource_name: The name of the resource.
:param LinkedServiceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(LinkedServiceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
linked_service_name: Optional[pulumi.Input[str]] = None,
read_access_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_id: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
write_access_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = LinkedServiceArgs.__new__(LinkedServiceArgs)
if linked_service_name is not None and not opts.urn:
warnings.warn("""This field has been deprecated and will be removed in a future version of the provider""", DeprecationWarning)
pulumi.log.warn("""linked_service_name is deprecated: This field has been deprecated and will be removed in a future version of the provider""")
__props__.__dict__["linked_service_name"] = linked_service_name
__props__.__dict__["read_access_id"] = read_access_id
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if resource_id is not None and not opts.urn:
warnings.warn("""This field has been deprecated in favour of `read_access_id` and will be removed in a future version of the provider""", DeprecationWarning)
pulumi.log.warn("""resource_id is deprecated: This field has been deprecated in favour of `read_access_id` and will be removed in a future version of the provider""")
__props__.__dict__["resource_id"] = resource_id
__props__.__dict__["tags"] = tags
__props__.__dict__["workspace_id"] = workspace_id
if workspace_name is not None and not opts.urn:
warnings.warn("""This field has been deprecated in favour of `workspace_id` and will be removed in a future version of the provider""", DeprecationWarning)
pulumi.log.warn("""workspace_name is deprecated: This field has been deprecated in favour of `workspace_id` and will be removed in a future version of the provider""")
__props__.__dict__["workspace_name"] = workspace_name
__props__.__dict__["write_access_id"] = write_access_id
__props__.__dict__["name"] = None
super(LinkedService, __self__).__init__(
'azure:loganalytics/linkedService:LinkedService',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
linked_service_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
read_access_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_id: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
write_access_id: Optional[pulumi.Input[str]] = None) -> 'LinkedService':
"""
Get an existing LinkedService resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] linked_service_name: Name of the type of linkedServices resource to connect to the Log Analytics Workspace specified in workspace_name. Accepted values are `automation` and `cluster`. Defaults to `automation`. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The generated name of the Linked Service. The format for this attribute is always `<workspace name>/<linked service type>`(e.g. `workspace1/Automation` or `workspace1/Cluster`)
:param pulumi.Input[str] read_access_id: The ID of the readable Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the Log Analytics Linked Service is created. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_id: The ID of the Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] workspace_id: The ID of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] workspace_name: The name of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] write_access_id: The ID of the writable Resource that will be linked to the workspace. This should be used for linking to a Log Analytics Cluster resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _LinkedServiceState.__new__(_LinkedServiceState)
__props__.__dict__["linked_service_name"] = linked_service_name
__props__.__dict__["name"] = name
__props__.__dict__["read_access_id"] = read_access_id
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["resource_id"] = resource_id
__props__.__dict__["tags"] = tags
__props__.__dict__["workspace_id"] = workspace_id
__props__.__dict__["workspace_name"] = workspace_name
__props__.__dict__["write_access_id"] = write_access_id
return LinkedService(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="linkedServiceName")
def linked_service_name(self) -> pulumi.Output[str]:
"""
Name of the type of linkedServices resource to connect to the Log Analytics Workspace specified in workspace_name. Accepted values are `automation` and `cluster`. Defaults to `automation`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "linked_service_name")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The generated name of the Linked Service. The format for this attribute is always `<workspace name>/<linked service type>`(e.g. `workspace1/Automation` or `workspace1/Cluster`)
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="readAccessId")
def read_access_id(self) -> pulumi.Output[str]:
"""
The ID of the readable Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
"""
return pulumi.get(self, "read_access_id")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the resource group in which the Log Analytics Linked Service is created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> pulumi.Output[str]:
"""
The ID of the Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
"""
return pulumi.get(self, "resource_id")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="workspaceId")
def workspace_id(self) -> pulumi.Output[str]:
"""
The ID of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "workspace_id")
@property
@pulumi.getter(name="workspaceName")
def workspace_name(self) -> pulumi.Output[str]:
"""
The name of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "workspace_name")
@property
@pulumi.getter(name="writeAccessId")
def write_access_id(self) -> pulumi.Output[Optional[str]]:
"""
The ID of the writable Resource that will be linked to the workspace. This should be used for linking to a Log Analytics Cluster resource.
"""
return pulumi.get(self, "write_access_id")
|
[
"pulumi.get",
"pulumi.getter",
"pulumi.ResourceOptions",
"pulumi.set",
"pulumi.log.warn",
"warnings.warn"
] |
[((4577, 4616), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""resourceGroupName"""'}), "(name='resourceGroupName')\n", (4590, 4616), False, 'import pulumi\n'), ((5065, 5104), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""linkedServiceName"""'}), "(name='linkedServiceName')\n", (5078, 5104), False, 'import pulumi\n'), ((5677, 5711), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""readAccessId"""'}), "(name='readAccessId')\n", (5690, 5711), False, 'import pulumi\n'), ((6156, 6188), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""resourceId"""'}), "(name='resourceId')\n", (6169, 6188), False, 'import pulumi\n'), ((6981, 7014), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""workspaceId"""'}), "(name='workspaceId')\n", (6994, 7014), False, 'import pulumi\n'), ((7462, 7497), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""workspaceName"""'}), "(name='workspaceName')\n", (7475, 7497), False, 'import pulumi\n'), ((7957, 7992), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""writeAccessId"""'}), "(name='writeAccessId')\n", (7970, 7992), False, 'import pulumi\n'), ((13050, 13089), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""linkedServiceName"""'}), "(name='linkedServiceName')\n", (13063, 13089), False, 'import pulumi\n'), ((14110, 14144), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""readAccessId"""'}), "(name='readAccessId')\n", (14123, 14144), False, 'import pulumi\n'), ((14589, 14628), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""resourceGroupName"""'}), "(name='resourceGroupName')\n", (14602, 14628), False, 'import pulumi\n'), ((15097, 15129), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""resourceId"""'}), "(name='resourceId')\n", (15110, 15129), False, 'import pulumi\n'), ((15922, 15955), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""workspaceId"""'}), "(name='workspaceId')\n", (15935, 15955), False, 'import pulumi\n'), ((16403, 16438), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""workspaceName"""'}), "(name='workspaceName')\n", (16416, 16438), False, 'import pulumi\n'), ((16898, 16933), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""writeAccessId"""'}), "(name='writeAccessId')\n", (16911, 16933), False, 'import pulumi\n'), ((30780, 30819), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""linkedServiceName"""'}), "(name='linkedServiceName')\n", (30793, 30819), False, 'import pulumi\n'), ((31549, 31583), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""readAccessId"""'}), "(name='readAccessId')\n", (31562, 31583), False, 'import pulumi\n'), ((31875, 31914), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""resourceGroupName"""'}), "(name='resourceGroupName')\n", (31888, 31914), False, 'import pulumi\n'), ((32215, 32247), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""resourceId"""'}), "(name='resourceId')\n", (32228, 32247), False, 'import pulumi\n'), ((32741, 32774), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""workspaceId"""'}), "(name='workspaceId')\n", (32754, 32774), False, 'import pulumi\n'), ((33075, 33110), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""workspaceName"""'}), "(name='workspaceName')\n", (33088, 33110), False, 'import pulumi\n'), ((33417, 33452), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""writeAccessId"""'}), "(name='writeAccessId')\n", (33430, 33452), False, 'import pulumi\n'), ((2664, 2728), 'pulumi.set', 'pulumi.set', (['__self__', '"""resource_group_name"""', 'resource_group_name'], {}), "(__self__, 'resource_group_name', resource_group_name)\n", (2674, 2728), False, 'import pulumi\n'), ((4856, 4895), 'pulumi.get', 'pulumi.get', (['self', '"""resource_group_name"""'], {}), "(self, 'resource_group_name')\n", (4866, 4895), False, 'import pulumi\n'), ((4998, 5044), 'pulumi.set', 'pulumi.set', (['self', '"""resource_group_name"""', 'value'], {}), "(self, 'resource_group_name', value)\n", (5008, 5044), False, 'import pulumi\n'), ((5458, 5497), 'pulumi.get', 'pulumi.get', (['self', '"""linked_service_name"""'], {}), "(self, 'linked_service_name')\n", (5468, 5497), False, 'import pulumi\n'), ((5610, 5656), 'pulumi.set', 'pulumi.set', (['self', '"""linked_service_name"""', 'value'], {}), "(self, 'linked_service_name', value)\n", (5620, 5656), False, 'import pulumi\n'), ((5957, 5991), 'pulumi.get', 'pulumi.get', (['self', '"""read_access_id"""'], {}), "(self, 'read_access_id')\n", (5967, 5991), False, 'import pulumi\n'), ((6094, 6135), 'pulumi.set', 'pulumi.set', (['self', '"""read_access_id"""', 'value'], {}), "(self, 'read_access_id', value)\n", (6104, 6135), False, 'import pulumi\n'), ((6422, 6453), 'pulumi.get', 'pulumi.get', (['self', '"""resource_id"""'], {}), "(self, 'resource_id')\n", (6432, 6453), False, 'import pulumi\n'), ((6550, 6588), 'pulumi.set', 'pulumi.set', (['self', '"""resource_id"""', 'value'], {}), "(self, 'resource_id', value)\n", (6560, 6588), False, 'import pulumi\n'), ((6794, 6818), 'pulumi.get', 'pulumi.get', (['self', '"""tags"""'], {}), "(self, 'tags')\n", (6804, 6818), False, 'import pulumi\n'), ((6929, 6960), 'pulumi.set', 'pulumi.set', (['self', '"""tags"""', 'value'], {}), "(self, 'tags', value)\n", (6939, 6960), False, 'import pulumi\n'), ((7271, 7303), 'pulumi.get', 'pulumi.get', (['self', '"""workspace_id"""'], {}), "(self, 'workspace_id')\n", (7281, 7303), False, 'import pulumi\n'), ((7402, 7441), 'pulumi.set', 'pulumi.set', (['self', '"""workspace_id"""', 'value'], {}), "(self, 'workspace_id', value)\n", (7412, 7441), False, 'import pulumi\n'), ((7758, 7792), 'pulumi.get', 'pulumi.get', (['self', '"""workspace_name"""'], {}), "(self, 'workspace_name')\n", (7768, 7792), False, 'import pulumi\n'), ((7895, 7936), 'pulumi.set', 'pulumi.set', (['self', '"""workspace_name"""', 'value'], {}), "(self, 'workspace_name', value)\n", (7905, 7936), False, 'import pulumi\n'), ((8241, 8276), 'pulumi.get', 'pulumi.get', (['self', '"""write_access_id"""'], {}), "(self, 'write_access_id')\n", (8251, 8276), False, 'import pulumi\n'), ((8381, 8423), 'pulumi.set', 'pulumi.set', (['self', '"""write_access_id"""', 'value'], {}), "(self, 'write_access_id', value)\n", (8391, 8423), False, 'import pulumi\n'), ((13443, 13482), 'pulumi.get', 'pulumi.get', (['self', '"""linked_service_name"""'], {}), "(self, 'linked_service_name')\n", (13453, 13482), False, 'import pulumi\n'), ((13595, 13641), 'pulumi.set', 'pulumi.set', (['self', '"""linked_service_name"""', 'value'], {}), "(self, 'linked_service_name', value)\n", (13605, 13641), False, 'import pulumi\n'), ((13951, 13975), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (13961, 13975), False, 'import pulumi\n'), ((14058, 14089), 'pulumi.set', 'pulumi.set', (['self', '"""name"""', 'value'], {}), "(self, 'name', value)\n", (14068, 14089), False, 'import pulumi\n'), ((14390, 14424), 'pulumi.get', 'pulumi.get', (['self', '"""read_access_id"""'], {}), "(self, 'read_access_id')\n", (14400, 14424), False, 'import pulumi\n'), ((14527, 14568), 'pulumi.set', 'pulumi.set', (['self', '"""read_access_id"""', 'value'], {}), "(self, 'read_access_id', value)\n", (14537, 14568), False, 'import pulumi\n'), ((14878, 14917), 'pulumi.get', 'pulumi.get', (['self', '"""resource_group_name"""'], {}), "(self, 'resource_group_name')\n", (14888, 14917), False, 'import pulumi\n'), ((15030, 15076), 'pulumi.set', 'pulumi.set', (['self', '"""resource_group_name"""', 'value'], {}), "(self, 'resource_group_name', value)\n", (15040, 15076), False, 'import pulumi\n'), ((15363, 15394), 'pulumi.get', 'pulumi.get', (['self', '"""resource_id"""'], {}), "(self, 'resource_id')\n", (15373, 15394), False, 'import pulumi\n'), ((15491, 15529), 'pulumi.set', 'pulumi.set', (['self', '"""resource_id"""', 'value'], {}), "(self, 'resource_id', value)\n", (15501, 15529), False, 'import pulumi\n'), ((15735, 15759), 'pulumi.get', 'pulumi.get', (['self', '"""tags"""'], {}), "(self, 'tags')\n", (15745, 15759), False, 'import pulumi\n'), ((15870, 15901), 'pulumi.set', 'pulumi.set', (['self', '"""tags"""', 'value'], {}), "(self, 'tags', value)\n", (15880, 15901), False, 'import pulumi\n'), ((16212, 16244), 'pulumi.get', 'pulumi.get', (['self', '"""workspace_id"""'], {}), "(self, 'workspace_id')\n", (16222, 16244), False, 'import pulumi\n'), ((16343, 16382), 'pulumi.set', 'pulumi.set', (['self', '"""workspace_id"""', 'value'], {}), "(self, 'workspace_id', value)\n", (16353, 16382), False, 'import pulumi\n'), ((16699, 16733), 'pulumi.get', 'pulumi.get', (['self', '"""workspace_name"""'], {}), "(self, 'workspace_name')\n", (16709, 16733), False, 'import pulumi\n'), ((16836, 16877), 'pulumi.set', 'pulumi.set', (['self', '"""workspace_name"""', 'value'], {}), "(self, 'workspace_name', value)\n", (16846, 16877), False, 'import pulumi\n'), ((17182, 17217), 'pulumi.get', 'pulumi.get', (['self', '"""write_access_id"""'], {}), "(self, 'write_access_id')\n", (17192, 17217), False, 'import pulumi\n'), ((17322, 17364), 'pulumi.set', 'pulumi.set', (['self', '"""write_access_id"""', 'value'], {}), "(self, 'write_access_id', value)\n", (17332, 17364), False, 'import pulumi\n'), ((31164, 31203), 'pulumi.get', 'pulumi.get', (['self', '"""linked_service_name"""'], {}), "(self, 'linked_service_name')\n", (31174, 31203), False, 'import pulumi\n'), ((31504, 31528), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (31514, 31528), False, 'import pulumi\n'), ((31820, 31854), 'pulumi.get', 'pulumi.get', (['self', '"""read_access_id"""'], {}), "(self, 'read_access_id')\n", (31830, 31854), False, 'import pulumi\n'), ((32155, 32194), 'pulumi.get', 'pulumi.get', (['self', '"""resource_group_name"""'], {}), "(self, 'resource_group_name')\n", (32165, 32194), False, 'import pulumi\n'), ((32472, 32503), 'pulumi.get', 'pulumi.get', (['self', '"""resource_id"""'], {}), "(self, 'resource_id')\n", (32482, 32503), False, 'import pulumi\n'), ((32696, 32720), 'pulumi.get', 'pulumi.get', (['self', '"""tags"""'], {}), "(self, 'tags')\n", (32706, 32720), False, 'import pulumi\n'), ((33022, 33054), 'pulumi.get', 'pulumi.get', (['self', '"""workspace_id"""'], {}), "(self, 'workspace_id')\n", (33032, 33054), False, 'import pulumi\n'), ((33362, 33396), 'pulumi.get', 'pulumi.get', (['self', '"""workspace_name"""'], {}), "(self, 'workspace_name')\n", (33372, 33396), False, 'import pulumi\n'), ((33702, 33737), 'pulumi.get', 'pulumi.get', (['self', '"""write_access_id"""'], {}), "(self, 'write_access_id')\n", (33712, 33737), False, 'import pulumi\n'), ((2785, 2918), 'warnings.warn', 'warnings.warn', (['"""This field has been deprecated and will be removed in a future version of the provider"""', 'DeprecationWarning'], {}), "(\n 'This field has been deprecated and will be removed in a future version of the provider'\n , DeprecationWarning)\n", (2798, 2918), False, 'import warnings\n'), ((2925, 3075), 'pulumi.log.warn', 'pulumi.log.warn', (['"""linked_service_name is deprecated: This field has been deprecated and will be removed in a future version of the provider"""'], {}), "(\n 'linked_service_name is deprecated: This field has been deprecated and will be removed in a future version of the provider'\n )\n", (2940, 3075), False, 'import pulumi\n'), ((3126, 3190), 'pulumi.set', 'pulumi.set', (['__self__', '"""linked_service_name"""', 'linked_service_name'], {}), "(__self__, 'linked_service_name', linked_service_name)\n", (3136, 3190), False, 'import pulumi\n'), ((3242, 3296), 'pulumi.set', 'pulumi.set', (['__self__', '"""read_access_id"""', 'read_access_id'], {}), "(__self__, 'read_access_id', read_access_id)\n", (3252, 3296), False, 'import pulumi\n'), ((3345, 3508), 'warnings.warn', 'warnings.warn', (['"""This field has been deprecated in favour of `read_access_id` and will be removed in a future version of the provider"""', 'DeprecationWarning'], {}), "(\n 'This field has been deprecated in favour of `read_access_id` and will be removed in a future version of the provider'\n , DeprecationWarning)\n", (3358, 3508), False, 'import warnings\n'), ((3515, 3687), 'pulumi.log.warn', 'pulumi.log.warn', (['"""resource_id is deprecated: This field has been deprecated in favour of `read_access_id` and will be removed in a future version of the provider"""'], {}), "(\n 'resource_id is deprecated: This field has been deprecated in favour of `read_access_id` and will be removed in a future version of the provider'\n )\n", (3530, 3687), False, 'import pulumi\n'), ((3730, 3778), 'pulumi.set', 'pulumi.set', (['__self__', '"""resource_id"""', 'resource_id'], {}), "(__self__, 'resource_id', resource_id)\n", (3740, 3778), False, 'import pulumi\n'), ((3820, 3854), 'pulumi.set', 'pulumi.set', (['__self__', '"""tags"""', 'tags'], {}), "(__self__, 'tags', tags)\n", (3830, 3854), False, 'import pulumi\n'), ((3904, 3954), 'pulumi.set', 'pulumi.set', (['__self__', '"""workspace_id"""', 'workspace_id'], {}), "(__self__, 'workspace_id', workspace_id)\n", (3914, 3954), False, 'import pulumi\n'), ((4006, 4167), 'warnings.warn', 'warnings.warn', (['"""This field has been deprecated in favour of `workspace_id` and will be removed in a future version of the provider"""', 'DeprecationWarning'], {}), "(\n 'This field has been deprecated in favour of `workspace_id` and will be removed in a future version of the provider'\n , DeprecationWarning)\n", (4019, 4167), False, 'import warnings\n'), ((4174, 4347), 'pulumi.log.warn', 'pulumi.log.warn', (['"""workspace_name is deprecated: This field has been deprecated in favour of `workspace_id` and will be removed in a future version of the provider"""'], {}), "(\n 'workspace_name is deprecated: This field has been deprecated in favour of `workspace_id` and will be removed in a future version of the provider'\n )\n", (4189, 4347), False, 'import pulumi\n'), ((4393, 4447), 'pulumi.set', 'pulumi.set', (['__self__', '"""workspace_name"""', 'workspace_name'], {}), "(__self__, 'workspace_name', workspace_name)\n", (4403, 4447), False, 'import pulumi\n'), ((4500, 4556), 'pulumi.set', 'pulumi.set', (['__self__', '"""write_access_id"""', 'write_access_id'], {}), "(__self__, 'write_access_id', write_access_id)\n", (4510, 4556), False, 'import pulumi\n'), ((11061, 11194), 'warnings.warn', 'warnings.warn', (['"""This field has been deprecated and will be removed in a future version of the provider"""', 'DeprecationWarning'], {}), "(\n 'This field has been deprecated and will be removed in a future version of the provider'\n , DeprecationWarning)\n", (11074, 11194), False, 'import warnings\n'), ((11201, 11351), 'pulumi.log.warn', 'pulumi.log.warn', (['"""linked_service_name is deprecated: This field has been deprecated and will be removed in a future version of the provider"""'], {}), "(\n 'linked_service_name is deprecated: This field has been deprecated and will be removed in a future version of the provider'\n )\n", (11216, 11351), False, 'import pulumi\n'), ((11402, 11466), 'pulumi.set', 'pulumi.set', (['__self__', '"""linked_service_name"""', 'linked_service_name'], {}), "(__self__, 'linked_service_name', linked_service_name)\n", (11412, 11466), False, 'import pulumi\n'), ((11508, 11542), 'pulumi.set', 'pulumi.set', (['__self__', '"""name"""', 'name'], {}), "(__self__, 'name', name)\n", (11518, 11542), False, 'import pulumi\n'), ((11594, 11648), 'pulumi.set', 'pulumi.set', (['__self__', '"""read_access_id"""', 'read_access_id'], {}), "(__self__, 'read_access_id', read_access_id)\n", (11604, 11648), False, 'import pulumi\n'), ((11705, 11769), 'pulumi.set', 'pulumi.set', (['__self__', '"""resource_group_name"""', 'resource_group_name'], {}), "(__self__, 'resource_group_name', resource_group_name)\n", (11715, 11769), False, 'import pulumi\n'), ((11818, 11981), 'warnings.warn', 'warnings.warn', (['"""This field has been deprecated in favour of `read_access_id` and will be removed in a future version of the provider"""', 'DeprecationWarning'], {}), "(\n 'This field has been deprecated in favour of `read_access_id` and will be removed in a future version of the provider'\n , DeprecationWarning)\n", (11831, 11981), False, 'import warnings\n'), ((11988, 12160), 'pulumi.log.warn', 'pulumi.log.warn', (['"""resource_id is deprecated: This field has been deprecated in favour of `read_access_id` and will be removed in a future version of the provider"""'], {}), "(\n 'resource_id is deprecated: This field has been deprecated in favour of `read_access_id` and will be removed in a future version of the provider'\n )\n", (12003, 12160), False, 'import pulumi\n'), ((12203, 12251), 'pulumi.set', 'pulumi.set', (['__self__', '"""resource_id"""', 'resource_id'], {}), "(__self__, 'resource_id', resource_id)\n", (12213, 12251), False, 'import pulumi\n'), ((12293, 12327), 'pulumi.set', 'pulumi.set', (['__self__', '"""tags"""', 'tags'], {}), "(__self__, 'tags', tags)\n", (12303, 12327), False, 'import pulumi\n'), ((12377, 12427), 'pulumi.set', 'pulumi.set', (['__self__', '"""workspace_id"""', 'workspace_id'], {}), "(__self__, 'workspace_id', workspace_id)\n", (12387, 12427), False, 'import pulumi\n'), ((12479, 12640), 'warnings.warn', 'warnings.warn', (['"""This field has been deprecated in favour of `workspace_id` and will be removed in a future version of the provider"""', 'DeprecationWarning'], {}), "(\n 'This field has been deprecated in favour of `workspace_id` and will be removed in a future version of the provider'\n , DeprecationWarning)\n", (12492, 12640), False, 'import warnings\n'), ((12647, 12820), 'pulumi.log.warn', 'pulumi.log.warn', (['"""workspace_name is deprecated: This field has been deprecated in favour of `workspace_id` and will be removed in a future version of the provider"""'], {}), "(\n 'workspace_name is deprecated: This field has been deprecated in favour of `workspace_id` and will be removed in a future version of the provider'\n )\n", (12662, 12820), False, 'import pulumi\n'), ((12866, 12920), 'pulumi.set', 'pulumi.set', (['__self__', '"""workspace_name"""', 'workspace_name'], {}), "(__self__, 'workspace_name', workspace_name)\n", (12876, 12920), False, 'import pulumi\n'), ((12973, 13029), 'pulumi.set', 'pulumi.set', (['__self__', '"""write_access_id"""', 'write_access_id'], {}), "(__self__, 'write_access_id', write_access_id)\n", (12983, 13029), False, 'import pulumi\n'), ((24466, 24490), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {}), '()\n', (24488, 24490), False, 'import pulumi\n'), ((30052, 30081), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'id': 'id'}), '(id=id)\n', (30074, 30081), False, 'import pulumi\n'), ((25075, 25208), 'warnings.warn', 'warnings.warn', (['"""This field has been deprecated and will be removed in a future version of the provider"""', 'DeprecationWarning'], {}), "(\n 'This field has been deprecated and will be removed in a future version of the provider'\n , DeprecationWarning)\n", (25088, 25208), False, 'import warnings\n'), ((25219, 25369), 'pulumi.log.warn', 'pulumi.log.warn', (['"""linked_service_name is deprecated: This field has been deprecated and will be removed in a future version of the provider"""'], {}), "(\n 'linked_service_name is deprecated: This field has been deprecated and will be removed in a future version of the provider'\n )\n", (25234, 25369), False, 'import pulumi\n'), ((25799, 25962), 'warnings.warn', 'warnings.warn', (['"""This field has been deprecated in favour of `read_access_id` and will be removed in a future version of the provider"""', 'DeprecationWarning'], {}), "(\n 'This field has been deprecated in favour of `read_access_id` and will be removed in a future version of the provider'\n , DeprecationWarning)\n", (25812, 25962), False, 'import warnings\n'), ((25973, 26145), 'pulumi.log.warn', 'pulumi.log.warn', (['"""resource_id is deprecated: This field has been deprecated in favour of `read_access_id` and will be removed in a future version of the provider"""'], {}), "(\n 'resource_id is deprecated: This field has been deprecated in favour of `read_access_id` and will be removed in a future version of the provider'\n )\n", (25988, 26145), False, 'import pulumi\n'), ((26384, 26545), 'warnings.warn', 'warnings.warn', (['"""This field has been deprecated in favour of `workspace_id` and will be removed in a future version of the provider"""', 'DeprecationWarning'], {}), "(\n 'This field has been deprecated in favour of `workspace_id` and will be removed in a future version of the provider'\n , DeprecationWarning)\n", (26397, 26545), False, 'import warnings\n'), ((26556, 26729), 'pulumi.log.warn', 'pulumi.log.warn', (['"""workspace_name is deprecated: This field has been deprecated in favour of `workspace_id` and will be removed in a future version of the provider"""'], {}), "(\n 'workspace_name is deprecated: This field has been deprecated in favour of `workspace_id` and will be removed in a future version of the provider'\n )\n", (26571, 26729), False, 'import pulumi\n')]
|
"""
Derangements
AUTHORS:
- <NAME> (2010-05): Initial version
- <NAME> (2013-03-30): Put derangements into category framework
"""
# ****************************************************************************
# Copyright (C) 2010 <NAME> <<EMAIL>>,
# 2013 <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
# ****************************************************************************
from sage.structure.parent import Parent
from sage.structure.unique_representation import UniqueRepresentation
from sage.categories.finite_enumerated_sets import FiniteEnumeratedSets
from sage.misc.all import prod
from sage.misc.prandom import random, randrange
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
from sage.rings.all import ZZ, QQ
from sage.rings.integer import Integer
from sage.combinat.combinat import CombinatorialElement
from sage.combinat.permutation import Permutation, Permutations
class Derangement(CombinatorialElement):
r"""
A derangement.
A derangement on a set `S` is a permutation `\sigma` such that `\sigma(x)
\neq x` for all `x \in S`, i.e. `\sigma` is a permutation of `S` with no
fixed points.
EXAMPLES::
sage: D = Derangements(4)
sage: elt = D([4,3,2,1])
sage: TestSuite(elt).run()
"""
def to_permutation(self):
"""
Return the permutation corresponding to ``self``.
EXAMPLES::
sage: D = Derangements(4)
sage: p = D([4,3,2,1]).to_permutation(); p
[4, 3, 2, 1]
sage: type(p)
<class 'sage.combinat.permutation.StandardPermutations_all_with_category.element_class'>
sage: D = Derangements([1, 3, 3, 4])
sage: D[0].to_permutation()
Traceback (most recent call last):
...
ValueError: Can only convert to a permutation for derangements of [1, 2, ..., n]
"""
if self.parent()._set != tuple(range(1, len(self) + 1)):
raise ValueError("Can only convert to a permutation for derangements of [1, 2, ..., n]")
return Permutation(list(self))
class Derangements(UniqueRepresentation, Parent):
r"""
The class of all derangements of a set or multiset.
A derangement on a set `S` is a permutation `\sigma` such that `\sigma(x)
\neq x` for all `x \in S`, i.e. `\sigma` is a permutation of `S` with no
fixed points.
For an integer, or a list or string with all elements
distinct, the derangements are obtained by a standard result described
in [BV2004]_. For a list or string with repeated elements, the derangements
are formed by computing all permutations of the input and discarding all
non-derangements.
INPUT:
- ``x`` -- Can be an integer which corresponds to derangements of
`\{1, 2, 3, \ldots, x\}`, a list, or a string
REFERENCES:
- [BV2004]_
- :wikipedia:`Derangement`
EXAMPLES::
sage: D1 = Derangements([2,3,4,5])
sage: D1.list()
[[3, 4, 5, 2],
[5, 4, 2, 3],
[3, 5, 2, 4],
[4, 5, 3, 2],
[4, 2, 5, 3],
[5, 2, 3, 4],
[5, 4, 3, 2],
[4, 5, 2, 3],
[3, 2, 5, 4]]
sage: D1.cardinality()
9
sage: D1.random_element() # random
[4, 2, 5, 3]
sage: D2 = Derangements([1,2,3,1,2,3])
sage: D2.cardinality()
10
sage: D2.list()
[[2, 1, 1, 3, 3, 2],
[2, 1, 2, 3, 3, 1],
[2, 3, 1, 2, 3, 1],
[2, 3, 1, 3, 1, 2],
[2, 3, 2, 3, 1, 1],
[3, 1, 1, 2, 3, 2],
[3, 1, 2, 2, 3, 1],
[3, 1, 2, 3, 1, 2],
[3, 3, 1, 2, 1, 2],
[3, 3, 2, 2, 1, 1]]
sage: D2.random_element() # random
[2, 3, 1, 3, 1, 2]
"""
@staticmethod
def __classcall_private__(cls, x):
"""
Normalize ``x`` to ensure a unique representation.
EXAMPLES::
sage: D = Derangements(4)
sage: D2 = Derangements([1, 2, 3, 4])
sage: D3 = Derangements((1, 2, 3, 4))
sage: D is D2
True
sage: D is D3
True
"""
if x in ZZ:
x = list(range(1, x + 1))
return super(Derangements, cls).__classcall__(cls, tuple(x))
def __init__(self, x):
"""
Initialize ``self``.
EXAMPLES::
sage: D = Derangements(4)
sage: TestSuite(D).run()
sage: D = Derangements('abcd')
sage: TestSuite(D).run()
sage: D = Derangements([2, 2, 1, 1])
sage: TestSuite(D).run()
"""
Parent.__init__(self, category=FiniteEnumeratedSets())
self._set = x
self.__multi = len(set(x)) < len(x)
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: Derangements(4)
Derangements of the set [1, 2, 3, 4]
sage: Derangements('abcd')
Derangements of the set ['a', 'b', 'c', 'd']
sage: Derangements([2,2,1,1])
Derangements of the multiset [2, 2, 1, 1]
"""
if self.__multi:
return "Derangements of the multiset %s" % list(self._set)
return "Derangements of the set %s" % list(self._set)
def _element_constructor_(self, der):
"""
Construct an element of ``self`` from ``der``.
EXAMPLES::
sage: D = Derangements(4)
sage: elt = D([3,1,4,2]); elt
[3, 1, 4, 2]
sage: elt.parent() is D
True
"""
if isinstance(der, Derangement):
if der.parent() is self:
return der
raise ValueError("Cannot convert %s to an element of %s" % (der, self))
return self.element_class(self, der)
Element = Derangement
def __iter__(self):
"""
Iterate through ``self``.
EXAMPLES::
sage: D = Derangements(4)
sage: D.list() # indirect doctest
[[2, 3, 4, 1],
[4, 3, 1, 2],
[2, 4, 1, 3],
[3, 4, 2, 1],
[3, 1, 4, 2],
[4, 1, 2, 3],
[4, 3, 2, 1],
[3, 4, 1, 2],
[2, 1, 4, 3]]
sage: D = Derangements([1,44,918,67])
sage: D.list()
[[44, 918, 67, 1],
[67, 918, 1, 44],
[44, 67, 1, 918],
[918, 67, 44, 1],
[918, 1, 67, 44],
[67, 1, 44, 918],
[67, 918, 44, 1],
[918, 67, 1, 44],
[44, 1, 67, 918]]
sage: D = Derangements(['A','AT','CAT','CATS'])
sage: D.list()
[['AT', 'CAT', 'CATS', 'A'],
['CATS', 'CAT', 'A', 'AT'],
['AT', 'CATS', 'A', 'CAT'],
['CAT', 'CATS', 'AT', 'A'],
['CAT', 'A', 'CATS', 'AT'],
['CATS', 'A', 'AT', 'CAT'],
['CATS', 'CAT', 'AT', 'A'],
['CAT', 'CATS', 'A', 'AT'],
['AT', 'A', 'CATS', 'CAT']]
sage: D = Derangements('CART')
sage: D.list()
[['A', 'R', 'T', 'C'],
['T', 'R', 'C', 'A'],
['A', 'T', 'C', 'R'],
['R', 'T', 'A', 'C'],
['R', 'C', 'T', 'A'],
['T', 'C', 'A', 'R'],
['T', 'R', 'A', 'C'],
['R', 'T', 'C', 'A'],
['A', 'C', 'T', 'R']]
sage: D = Derangements([1,1,2,2,3,3])
sage: D.list()
[[2, 2, 3, 3, 1, 1],
[2, 3, 1, 3, 1, 2],
[2, 3, 1, 3, 2, 1],
[2, 3, 3, 1, 1, 2],
[2, 3, 3, 1, 2, 1],
[3, 2, 1, 3, 1, 2],
[3, 2, 1, 3, 2, 1],
[3, 2, 3, 1, 1, 2],
[3, 2, 3, 1, 2, 1],
[3, 3, 1, 1, 2, 2]]
sage: D = Derangements('SATTAS')
sage: D.list()
[['A', 'S', 'S', 'A', 'T', 'T'],
['A', 'S', 'A', 'S', 'T', 'T'],
['A', 'T', 'S', 'S', 'T', 'A'],
['A', 'T', 'S', 'A', 'S', 'T'],
['A', 'T', 'A', 'S', 'S', 'T'],
['T', 'S', 'S', 'A', 'T', 'A'],
['T', 'S', 'A', 'S', 'T', 'A'],
['T', 'S', 'A', 'A', 'S', 'T'],
['T', 'T', 'S', 'A', 'S', 'A'],
['T', 'T', 'A', 'S', 'S', 'A']]
sage: D = Derangements([1,1,2,2,2])
sage: D.list()
[]
"""
if self.__multi:
for p in Permutations(self._set):
if not self._fixed_point(p):
yield self.element_class(self, list(p))
else:
for d in self._iter_der(len(self._set)):
yield self.element_class(self, [self._set[i - 1] for i in d])
def _iter_der(self, n):
r"""
Iterate through all derangements of the list `[1, 2, 3, \ldots, n]`
using the method given in [BV2004]_.
EXAMPLES::
sage: D = Derangements(4)
sage: list(D._iter_der(4))
[[2, 3, 4, 1],
[4, 3, 1, 2],
[2, 4, 1, 3],
[3, 4, 2, 1],
[3, 1, 4, 2],
[4, 1, 2, 3],
[4, 3, 2, 1],
[3, 4, 1, 2],
[2, 1, 4, 3]]
"""
if n <= 1:
return
elif n == 2:
yield [2, 1]
elif n == 3:
yield [2, 3, 1]
yield [3, 1, 2]
elif n >= 4:
for d in self._iter_der(n - 1):
for i in range(1, n):
s = d[:]
ii = d.index(i)
s[ii] = n
yield s + [i]
for d in self._iter_der(n - 2):
for i in range(1, n):
s = d[:]
s = [x >= i and x + 1 or x for x in s]
s.insert(i - 1, n)
yield s + [i]
def _fixed_point(self, a):
"""
Return ``True`` if ``a`` has a point in common with ``self._set``.
EXAMPLES::
sage: D = Derangements(5)
sage: D._fixed_point([3,1,2,5,4])
False
sage: D._fixed_point([5,4,3,2,1])
True
"""
return any(x == y for (x, y) in zip(a, self._set))
def _count_der(self, n):
"""
Count the number of derangements of `n` using the recursion
`D_2 = 1, D_3 = 2, D_n = (n-1) (D_{n-1} + D_{n-2})`.
EXAMPLES::
sage: D = Derangements(5)
sage: D._count_der(2)
1
sage: D._count_der(3)
2
sage: D._count_der(5)
44
"""
if n <= 1:
return Integer(0)
if n == 2:
return Integer(1)
if n == 3:
return Integer(2)
# n >= 4
last = Integer(2)
second_last = Integer(1)
for i in range(4, n + 1):
current = (i - 1) * (last + second_last)
second_last = last
last = current
return last
def cardinality(self):
r"""
Counts the number of derangements of a positive integer, a
list, or a string. The list or string may contain repeated
elements. If an integer `n` is given, the value returned
is the number of derangements of `[1, 2, 3, \ldots, n]`.
For an integer, or a list or string with all elements
distinct, the value is obtained by the standard result
`D_2 = 1, D_3 = 2, D_n = (n-1) (D_{n-1} + D_{n-2})`.
For a list or string with repeated elements, the number of
derangements is computed by Macmahon's theorem. If the numbers
of repeated elements are `a_1, a_2, \ldots, a_k` then the number
of derangements is given by the coefficient of `x_1 x_2 \cdots
x_k` in the expansion of `\prod_{i=0}^k (S - s_i)^{a_i}` where
`S = x_1 + x_2 + \cdots + x_k`.
EXAMPLES::
sage: D = Derangements(5)
sage: D.cardinality()
44
sage: D = Derangements([1,44,918,67,254])
sage: D.cardinality()
44
sage: D = Derangements(['A','AT','CAT','CATS','CARTS'])
sage: D.cardinality()
44
sage: D = Derangements('UNCOPYRIGHTABLE')
sage: D.cardinality()
481066515734
sage: D = Derangements([1,1,2,2,3,3])
sage: D.cardinality()
10
sage: D = Derangements('SATTAS')
sage: D.cardinality()
10
sage: D = Derangements([1,1,2,2,2])
sage: D.cardinality()
0
"""
if self.__multi:
sL = set(self._set)
A = [self._set.count(i) for i in sL]
R = PolynomialRing(QQ, 'x', len(A))
S = sum(i for i in R.gens())
e = prod((S - x)**y for (x, y) in zip(R.gens(), A))
return Integer(e.coefficient(dict([(x, y) for (x, y) in zip(R.gens(), A)])))
return self._count_der(len(self._set))
def _rand_der(self):
r"""
Produces a random derangement of `[1, 2, \ldots, n]`.
This is an
implementation of the algorithm described by Martinez et. al. in
[MPP2008]_.
EXAMPLES::
sage: D = Derangements(4)
sage: d = D._rand_der()
sage: d in D
True
"""
n = len(self._set)
A = list(range(1, n + 1))
mark = [x < 0 for x in A]
i, u = n, n
while u >= 2:
if not(mark[i - 1]):
while True:
j = randrange(1, i)
if not(mark[j - 1]):
A[i - 1], A[j - 1] = A[j - 1], A[i - 1]
break
p = random()
if p < (u - 1) * self._count_der(u - 2) // self._count_der(u):
mark[j - 1] = True
u -= 1
u -= 1
i -= 1
return A
def random_element(self):
r"""
Produces all derangements of a positive integer, a list, or
a string. The list or string may contain repeated elements.
If an integer `n` is given, then a random
derangements of `[1, 2, 3, \ldots, n]` is returned
For an integer, or a list or string with all elements
distinct, the value is obtained by an algorithm described in
[MPP2008]_. For a list or string with repeated elements the
derangement is formed by choosing an element at random from the list of
all possible derangements.
OUTPUT:
A single list or string containing a derangement, or an
empty list if there are no derangements.
EXAMPLES::
sage: D = Derangements(4)
sage: D.random_element() # random
[2, 3, 4, 1]
sage: D = Derangements(['A','AT','CAT','CATS','CARTS','CARETS'])
sage: D.random_element() # random
['AT', 'CARTS', 'A', 'CAT', 'CARETS', 'CATS']
sage: D = Derangements('UNCOPYRIGHTABLE')
sage: D.random_element() # random
['C', 'U', 'I', 'H', 'O', 'G', 'N', 'B', 'E', 'L', 'A', 'R', 'P', 'Y', 'T']
sage: D = Derangements([1,1,1,1,2,2,2,2,3,3,3,3])
sage: D.random_element() # random
[3, 2, 2, 3, 1, 3, 1, 3, 2, 1, 1, 2]
sage: D = Derangements('ESSENCES')
sage: D.random_element() # random
['N', 'E', 'E', 'C', 'S', 'S', 'S', 'E']
sage: D = Derangements([1,1,2,2,2])
sage: D.random_element()
[]
TESTS:
Check that index error discovered in :trac:`29974` is fixed::
sage: D = Derangements([1,1,2,2])
sage: _ = [D.random_element() for _ in range(20)]
"""
if self.__multi:
L = list(self)
if len(L) == 0:
return self.element_class(self, [])
i = randrange(len(L))
return L[i]
temp = self._rand_der()
return self.element_class(self, [self._set[ii - 1] for ii in temp])
|
[
"sage.combinat.permutation.Permutations",
"sage.categories.finite_enumerated_sets.FiniteEnumeratedSets",
"sage.misc.prandom.randrange",
"sage.misc.prandom.random",
"sage.rings.integer.Integer"
] |
[((11378, 11388), 'sage.rings.integer.Integer', 'Integer', (['(2)'], {}), '(2)\n', (11385, 11388), False, 'from sage.rings.integer import Integer\n'), ((11411, 11421), 'sage.rings.integer.Integer', 'Integer', (['(1)'], {}), '(1)\n', (11418, 11421), False, 'from sage.rings.integer import Integer\n'), ((9009, 9032), 'sage.combinat.permutation.Permutations', 'Permutations', (['self._set'], {}), '(self._set)\n', (9021, 9032), False, 'from sage.combinat.permutation import Permutation, Permutations\n'), ((11237, 11247), 'sage.rings.integer.Integer', 'Integer', (['(0)'], {}), '(0)\n', (11244, 11247), False, 'from sage.rings.integer import Integer\n'), ((11286, 11296), 'sage.rings.integer.Integer', 'Integer', (['(1)'], {}), '(1)\n', (11293, 11296), False, 'from sage.rings.integer import Integer\n'), ((11335, 11345), 'sage.rings.integer.Integer', 'Integer', (['(2)'], {}), '(2)\n', (11342, 11345), False, 'from sage.rings.integer import Integer\n'), ((5111, 5133), 'sage.categories.finite_enumerated_sets.FiniteEnumeratedSets', 'FiniteEnumeratedSets', ([], {}), '()\n', (5131, 5133), False, 'from sage.categories.finite_enumerated_sets import FiniteEnumeratedSets\n'), ((14368, 14376), 'sage.misc.prandom.random', 'random', ([], {}), '()\n', (14374, 14376), False, 'from sage.misc.prandom import random, randrange\n'), ((14197, 14212), 'sage.misc.prandom.randrange', 'randrange', (['(1)', 'i'], {}), '(1, i)\n', (14206, 14212), False, 'from sage.misc.prandom import random, randrange\n')]
|
from django.forms import ModelForm, Select, CheckboxInput, Textarea, TextInput, EmailInput, CheckboxSelectMultiple, \
NumberInput
from .models import Ticket
class TicketManageForm(ModelForm):
class Meta:
model = Ticket
fields = ['status',
'priority',
'tecnico_pre_diagnostico',
'tecnico_de_campo',
'is_customer',
'customer_code',
'order',
'losses',
'need_paper',
'resolution_report', ]
widgets = {
'status': Select(attrs={'class': 'form-control', 'placeholder': 'Status'}),
'priority': Select(attrs={'class': 'form-control', 'placeholder': 'Prioridade'}),
'tecnico_pre_diagnostico': Select(attrs={'class': 'form-control', 'placeholder': 'Pré diagnostico'}),
'tecnico_de_campo': Select(attrs={'class': 'form-control', 'placeholder': 'Tecnico de campo'}),
'is_customer': CheckboxInput(attrs={'class': 'form-control'}),
'customer_code': TextInput(attrs={'class': 'form-control'}),
'order': TextInput(attrs={'class': 'form-control'}),
'losses': NumberInput(attrs={'class': 'form-control'}),
'need_paper': CheckboxInput(attrs={'class': 'form-control'}),
'resolution_report': Textarea(attrs={'class': 'form-control'})
}
class TicketForm(ModelForm):
class Meta:
model = Ticket
fields = ['queue',
'problems',
'submitter_name',
'submitter_company',
'submitter_phone',
'submitter_email',
'status',
'priority',
'tecnico_pre_diagnostico',
'tecnico_de_campo',
'is_customer',
'customer_code',
'order',
'need_paper',
'resolution_report', ]
widgets = {
'queue': Select(attrs={'class': 'form-control', 'placeholder': 'Queue'}),
'problems': CheckboxSelectMultiple(attrs={'class': 'form-control'}),
'submitter_name': TextInput(attrs={'class': 'form-control'}),
'submitter_company': TextInput(attrs={'class': 'form-control'}),
'submitter_phone': TextInput(attrs={'class': 'form-control'}),
'submitter_email': EmailInput(attrs={'class': 'form-control'}),
'status': Select(attrs={'class': 'form-control', 'placeholder': 'Status'}),
'priority': Select(attrs={'class': 'form-control', 'placeholder': 'Prioridade'}),
'tecnico_pre_diagnostico': Select(attrs={'class': 'form-control', 'placeholder': 'Pré diagnostico'}),
'tecnico_de_campo': Select(attrs={'class': 'form-control', 'placeholder': 'Tecnico de campo'}),
'is_customer': CheckboxInput(attrs={'class': 'form-control'}),
'customer_code': TextInput(attrs={'class': 'form-control'}),
'order': TextInput(attrs={'class': 'form-control'}),
'need_paper': CheckboxInput(attrs={'class': 'form-control'}),
'resolution_report': Textarea(attrs={'class': 'form-control'})
}
|
[
"django.forms.CheckboxSelectMultiple",
"django.forms.Select",
"django.forms.TextInput",
"django.forms.NumberInput",
"django.forms.EmailInput",
"django.forms.CheckboxInput",
"django.forms.Textarea"
] |
[((618, 682), 'django.forms.Select', 'Select', ([], {'attrs': "{'class': 'form-control', 'placeholder': 'Status'}"}), "(attrs={'class': 'form-control', 'placeholder': 'Status'})\n", (624, 682), False, 'from django.forms import ModelForm, Select, CheckboxInput, Textarea, TextInput, EmailInput, CheckboxSelectMultiple, NumberInput\n'), ((708, 776), 'django.forms.Select', 'Select', ([], {'attrs': "{'class': 'form-control', 'placeholder': 'Prioridade'}"}), "(attrs={'class': 'form-control', 'placeholder': 'Prioridade'})\n", (714, 776), False, 'from django.forms import ModelForm, Select, CheckboxInput, Textarea, TextInput, EmailInput, CheckboxSelectMultiple, NumberInput\n'), ((817, 890), 'django.forms.Select', 'Select', ([], {'attrs': "{'class': 'form-control', 'placeholder': 'Pré diagnostico'}"}), "(attrs={'class': 'form-control', 'placeholder': 'Pré diagnostico'})\n", (823, 890), False, 'from django.forms import ModelForm, Select, CheckboxInput, Textarea, TextInput, EmailInput, CheckboxSelectMultiple, NumberInput\n'), ((924, 998), 'django.forms.Select', 'Select', ([], {'attrs': "{'class': 'form-control', 'placeholder': 'Tecnico de campo'}"}), "(attrs={'class': 'form-control', 'placeholder': 'Tecnico de campo'})\n", (930, 998), False, 'from django.forms import ModelForm, Select, CheckboxInput, Textarea, TextInput, EmailInput, CheckboxSelectMultiple, NumberInput\n'), ((1027, 1073), 'django.forms.CheckboxInput', 'CheckboxInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (1040, 1073), False, 'from django.forms import ModelForm, Select, CheckboxInput, Textarea, TextInput, EmailInput, CheckboxSelectMultiple, NumberInput\n'), ((1104, 1146), 'django.forms.TextInput', 'TextInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (1113, 1146), False, 'from django.forms import ModelForm, Select, CheckboxInput, Textarea, TextInput, EmailInput, CheckboxSelectMultiple, NumberInput\n'), ((1169, 1211), 'django.forms.TextInput', 'TextInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (1178, 1211), False, 'from django.forms import ModelForm, Select, CheckboxInput, Textarea, TextInput, EmailInput, CheckboxSelectMultiple, NumberInput\n'), ((1235, 1279), 'django.forms.NumberInput', 'NumberInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (1246, 1279), False, 'from django.forms import ModelForm, Select, CheckboxInput, Textarea, TextInput, EmailInput, CheckboxSelectMultiple, NumberInput\n'), ((1307, 1353), 'django.forms.CheckboxInput', 'CheckboxInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (1320, 1353), False, 'from django.forms import ModelForm, Select, CheckboxInput, Textarea, TextInput, EmailInput, CheckboxSelectMultiple, NumberInput\n'), ((1388, 1429), 'django.forms.Textarea', 'Textarea', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (1396, 1429), False, 'from django.forms import ModelForm, Select, CheckboxInput, Textarea, TextInput, EmailInput, CheckboxSelectMultiple, NumberInput\n'), ((2067, 2130), 'django.forms.Select', 'Select', ([], {'attrs': "{'class': 'form-control', 'placeholder': 'Queue'}"}), "(attrs={'class': 'form-control', 'placeholder': 'Queue'})\n", (2073, 2130), False, 'from django.forms import ModelForm, Select, CheckboxInput, Textarea, TextInput, EmailInput, CheckboxSelectMultiple, NumberInput\n'), ((2156, 2211), 'django.forms.CheckboxSelectMultiple', 'CheckboxSelectMultiple', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (2178, 2211), False, 'from django.forms import ModelForm, Select, CheckboxInput, Textarea, TextInput, EmailInput, CheckboxSelectMultiple, NumberInput\n'), ((2243, 2285), 'django.forms.TextInput', 'TextInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (2252, 2285), False, 'from django.forms import ModelForm, Select, CheckboxInput, Textarea, TextInput, EmailInput, CheckboxSelectMultiple, NumberInput\n'), ((2320, 2362), 'django.forms.TextInput', 'TextInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (2329, 2362), False, 'from django.forms import ModelForm, Select, CheckboxInput, Textarea, TextInput, EmailInput, CheckboxSelectMultiple, NumberInput\n'), ((2395, 2437), 'django.forms.TextInput', 'TextInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (2404, 2437), False, 'from django.forms import ModelForm, Select, CheckboxInput, Textarea, TextInput, EmailInput, CheckboxSelectMultiple, NumberInput\n'), ((2470, 2513), 'django.forms.EmailInput', 'EmailInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (2480, 2513), False, 'from django.forms import ModelForm, Select, CheckboxInput, Textarea, TextInput, EmailInput, CheckboxSelectMultiple, NumberInput\n'), ((2537, 2601), 'django.forms.Select', 'Select', ([], {'attrs': "{'class': 'form-control', 'placeholder': 'Status'}"}), "(attrs={'class': 'form-control', 'placeholder': 'Status'})\n", (2543, 2601), False, 'from django.forms import ModelForm, Select, CheckboxInput, Textarea, TextInput, EmailInput, CheckboxSelectMultiple, NumberInput\n'), ((2627, 2695), 'django.forms.Select', 'Select', ([], {'attrs': "{'class': 'form-control', 'placeholder': 'Prioridade'}"}), "(attrs={'class': 'form-control', 'placeholder': 'Prioridade'})\n", (2633, 2695), False, 'from django.forms import ModelForm, Select, CheckboxInput, Textarea, TextInput, EmailInput, CheckboxSelectMultiple, NumberInput\n'), ((2736, 2809), 'django.forms.Select', 'Select', ([], {'attrs': "{'class': 'form-control', 'placeholder': 'Pré diagnostico'}"}), "(attrs={'class': 'form-control', 'placeholder': 'Pré diagnostico'})\n", (2742, 2809), False, 'from django.forms import ModelForm, Select, CheckboxInput, Textarea, TextInput, EmailInput, CheckboxSelectMultiple, NumberInput\n'), ((2843, 2917), 'django.forms.Select', 'Select', ([], {'attrs': "{'class': 'form-control', 'placeholder': 'Tecnico de campo'}"}), "(attrs={'class': 'form-control', 'placeholder': 'Tecnico de campo'})\n", (2849, 2917), False, 'from django.forms import ModelForm, Select, CheckboxInput, Textarea, TextInput, EmailInput, CheckboxSelectMultiple, NumberInput\n'), ((2946, 2992), 'django.forms.CheckboxInput', 'CheckboxInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (2959, 2992), False, 'from django.forms import ModelForm, Select, CheckboxInput, Textarea, TextInput, EmailInput, CheckboxSelectMultiple, NumberInput\n'), ((3023, 3065), 'django.forms.TextInput', 'TextInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (3032, 3065), False, 'from django.forms import ModelForm, Select, CheckboxInput, Textarea, TextInput, EmailInput, CheckboxSelectMultiple, NumberInput\n'), ((3088, 3130), 'django.forms.TextInput', 'TextInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (3097, 3130), False, 'from django.forms import ModelForm, Select, CheckboxInput, Textarea, TextInput, EmailInput, CheckboxSelectMultiple, NumberInput\n'), ((3158, 3204), 'django.forms.CheckboxInput', 'CheckboxInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (3171, 3204), False, 'from django.forms import ModelForm, Select, CheckboxInput, Textarea, TextInput, EmailInput, CheckboxSelectMultiple, NumberInput\n'), ((3239, 3280), 'django.forms.Textarea', 'Textarea', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (3247, 3280), False, 'from django.forms import ModelForm, Select, CheckboxInput, Textarea, TextInput, EmailInput, CheckboxSelectMultiple, NumberInput\n')]
|
from __future__ import absolute_import, division, print_function
from six.moves import range
import struct
header_struct = [
('Device',10,'s'),
('Version',10,'s'),
('Crystal',20,'s'),
('CrystalSystem',12,'s'),
(None,24),
('SpaceGroup',12,'s'),
('mosaic1',4,'!f'),
('memo',80,'s'),
('reserve1',84,'s'),
('date',12,'s'),
('operatorname',20,'s'),
('target',4,'s'),
('wavelength',4,'!f'),
('monotype',20,'s'),
('mono2theta',4,'!f'),
('collimator',20,'s'),
('filter',4,'s'),
('distance',4,'!f'),
('Kv',4,'!f'),
('mA',4,'!f'),
('focus',12,'s'),
('Xmemo',80,'s'),
('cyl',4,'!i'),
(None,60),
('Spindle',4,'s'), # Crystal mount axis closest to spindle axis
('Xray_axis',4,'s'), # Crystal mount axis closest to beam axis
('phidatum',4,'!f'),
('phistart',4,'!f'),
('phiend',4,'!f'),
('noscillations',4,'!i'),
('minutes',4,'!f'), # Exposure time in minutes?
('beampixels_x',4,'!f'),
('beampixels_y',4,'!f'), # Direct beam position in pixels
('omega',4,'!f'),
('chi',4,'!f'),
('twotheta',4,'!f'),
('Mu',4,'!f'), # Spindle inclination angle?
('ScanTemplate',204,'s'), # This space is now used for storing the scan
# templates information
('nFast',4,'!i'),
('nSlow',4,'!i'), # Number of fast, slow pixels
('sizeFast',4,'!f'),
('sizeSlow',4,'!f'), # Size of fast, slow direction in mm
('record_length',4,'!i'), # Record length in bytes
('number_records',4,'!i'), # number of records
('Read_start',4,'!i'), # For partial reads, 1st read line
('IP_num',4,'!i'), # Which imaging plate 1, 2 ?
('Ratio',4,'!f'), # Output ratio for high value pixels
('Fading_start',4,'!f'), # Fading time to start of read
('Fading_end',4,'!f'), # Fading time to end of read
('computer',10,'s'), # Type of computer "IRIS", "VAX", "SUN", etc
('plate_type',10,'s'), # Type of IP
('Dr',4,'!i'),
('Dx',4,'!i'),
('Dz',4,'!i'), # IP scanning codes??
('PixShiftOdd',4,'!f'), # Pixel shift to odd lines
('IntRatioOdd',4,'!f'), # Intensity ratio to odd lines
('MagicNum',4,'!i'), # Magic number to indicate next values are legit
('NumGonAxes',4,'!i'), # Number of goniometer axes
('a5x3fGonVecs',60,'!fffffffffffffff'),# Goniometer axis vectors
('a5fGonStart',20,'!fffff'),# Start angles for each of 5 axes
('a5fGonEnd',20,'!fffff'), # End angles for each of 5 axes
('a5fGonOffset',20,'!fffff'),# Offset values for each of 5 axes
('ScanAxisNum',4,'!i'), # Which axis is the scan axis?
('AxesNames',40,'s'), # Names of the axes (space or comma separated?)'''
]
class Raxis(object):
def __init__(self,file):
self.file = file
def readHeader(self,verbose=0):
self.F = open(self.file,'rb')
self.head={}
seek = 0
for item in header_struct:
if item[0]==None:
self.F.read(item[1])
elif item[2]=='s':
self.head[item[0]]=self.F.read(item[1])[0:item[1]]
if verbose:print(item[0],self.head[item[0]])
elif len(item[2])>2:
rawdata = self.F.read(item[1])
assert len(rawdata)==struct.calcsize(item[2])
self.head[item[0]] = struct.unpack(item[2],rawdata)
if verbose:print(item[0],self.head[item[0]])
else:
rawdata = self.F.read(item[1])
assert len(rawdata)==struct.calcsize(item[2])
self.head[item[0]] = struct.unpack(item[2],rawdata)[0]
if verbose:print(item[0],self.head[item[0]])
seek+=item[1]
def data(self):
self.F.seek(self.head['record_length'])
Dim0 = self.head['nFast'] #number of fast pixels
ToRead = self.head['record_length']
ReadLines = self.head['number_records']
# Each line might be padded, so figure this out
BytesPerLine = Dim0 * 2;
Pad = ToRead - BytesPerLine;
if 0>=Pad :
#For a normal image, there should be no padding per line
#No padding, use single fast read
ToRead = ToRead * ReadLines
self.CharTemp = self.F.read(ToRead)
else:
ToRead = ToRead * ReadLines
temporary = self.F.read(ToRead)
from iotbx.detectors import unpad_raxis
self.CharTemp = unpad_raxis(temporary,self.head['record_length'],Pad)
def dump(self):
ptr = 0
for x in range(0,len(CharTemp),2):
unsigned_int = struct.unpack( "!H",self.CharTemp[x:x+2] )[0]
if unsigned_int <= 32767:
print(float(unsigned_int))
else:
print(( float(unsigned_int)+32768.0 ) * self.head['Ratio'])
if __name__=='__main__':
R = Raxis('H-x071_0001.osc')
R.readHeader()
R.data()
R.dump()
|
[
"iotbx.detectors.unpad_raxis",
"struct.unpack",
"struct.calcsize"
] |
[((4265, 4320), 'iotbx.detectors.unpad_raxis', 'unpad_raxis', (['temporary', "self.head['record_length']", 'Pad'], {}), "(temporary, self.head['record_length'], Pad)\n", (4276, 4320), False, 'from iotbx.detectors import unpad_raxis\n'), ((4410, 4453), 'struct.unpack', 'struct.unpack', (['"""!H"""', 'self.CharTemp[x:x + 2]'], {}), "('!H', self.CharTemp[x:x + 2])\n", (4423, 4453), False, 'import struct\n'), ((3259, 3290), 'struct.unpack', 'struct.unpack', (['item[2]', 'rawdata'], {}), '(item[2], rawdata)\n', (3272, 3290), False, 'import struct\n'), ((3205, 3229), 'struct.calcsize', 'struct.calcsize', (['item[2]'], {}), '(item[2])\n', (3220, 3229), False, 'import struct\n'), ((3423, 3447), 'struct.calcsize', 'struct.calcsize', (['item[2]'], {}), '(item[2])\n', (3438, 3447), False, 'import struct\n'), ((3477, 3508), 'struct.unpack', 'struct.unpack', (['item[2]', 'rawdata'], {}), '(item[2], rawdata)\n', (3490, 3508), False, 'import struct\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.http import Http404
from django.shortcuts import get_object_or_404
from django.utils import timezone
from django import template
from django.conf import settings
from django.contrib.sites.models import get_current_site
from opps.articles.models import Album
from opps.containers.models import Container, ContainerBox
from opps.channels.models import Channel
class View(object):
context_object_name = "context"
paginate_by = settings.OPPS_PAGINATE_BY
limit = settings.OPPS_VIEWS_LIMIT
page_kwarg = 'page'
def __init__(self):
self.slug = None
self.channel = None
self.long_slug = None
self.channel_long_slug = []
self.article = None
self.excluded_ids = set()
self.child_class = u'container'
def get_paginate_by(self, queryset):
queryset = self.get_queryset()
setting_name = 'OPPS_{}_{}_PAGINATE_BY'.format(queryset.
model._meta.app_label,
queryset.model.
__name__).upper()
paginate_by = getattr(settings, setting_name, self.paginate_by)
return paginate_by
def get_context_data(self, **kwargs):
if not self.long_slug:
context = []
return context
context = super(View, self).get_context_data(**kwargs)
# channel is needed everywhere
self.channel = self.channel or Channel.objects.get_homepage(
site=get_current_site(self.request)
)
if hasattr(self, 'articleboxes'):
context['articleboxes'] = self.articleboxes
else:
context['articleboxes'] = ContainerBox.objects.filter(
channel__long_slug=self.long_slug)
self.excluded_ids = []
for box in context['articleboxes']:
self.excluded_ids += [a.pk for a in box.ordered_containers()]
obj_filter = {}
obj_filter['site_domain'] = self.site.domain
obj_filter['date_available__lte'] = timezone.now()
obj_filter['published'] = True
filters = obj_filter
filters['channel_long_slug__in'] = self.channel_long_slug
is_paginated = self.page_kwarg in self.request.GET
if self.channel and self.channel.is_root_node() and not is_paginated:
filters['show_on_root_channel'] = True
article = Container.objects.filter(**filters)
context['posts'] = article.filter(
child_class='Post'
).exclude(pk__in=self.excluded_ids)[:self.limit]
context['albums'] = Album.objects.filter(
**filters
).exclude(pk__in=self.excluded_ids)[:self.limit]
context['channel'] = {}
context['channel']['long_slug'] = self.long_slug
if self.channel:
context['channel'] = self.channel
context['breadcrumb'] = self.get_breadcrumb()
if self.slug:
try:
context['next'] = self.get_object()\
.get_next_by_date_insert(**obj_filter)
except self.get_object().DoesNotExist:
pass
try:
context['prev'] = self.get_object()\
.get_previous_by_date_insert(**obj_filter)
except self.get_object().DoesNotExist:
pass
context['articleboxes'] = context['articleboxes'].filter(
containers__slug=self.slug)
if self.get_object().child_class == 'Mirror':
context['context'] = self.get_object().container
if self.request.META.get('HTTP_X_PJAX', False) or\
self.request.is_ajax():
context['extends_parent'] = 'base_ajax.html'
return context
def get_template_folder(self):
domain_folder = "containers"
if self.site.id > 1:
domain_folder = "{}/containers".format(self.site.domain)
return domain_folder
def get_long_slug(self):
self.long_slug = self.kwargs.get('channel__long_slug', None)
try:
if not self.long_slug:
self.long_slug = Channel.objects.get_homepage(
site=self.site).long_slug
except AttributeError:
pass
return self.long_slug
def set_channel_rules(self):
self.fallback = getattr(settings, 'OPPS_MULTISITE_FALLBACK', False)
filters = dict(
site__domain=self.site.domain,
long_slug=self.long_slug,
date_available__lte=timezone.now(),
published=True
)
try:
self.channel = Channel.objects.get(**filters)
except Channel.DoesNotExist:
if not self.fallback or self.site == self.site_master:
raise Http404('Channel not found and fallback disabled')
filters['site__domain'] = self.site_master.domain
self.channel = get_object_or_404(Channel, **filters)
self.long_slug = self.channel.long_slug
self.channel_long_slug = [self.long_slug]
self.channel_descendants = self.channel.get_descendants(
include_self=False)
for children in self.channel_descendants:
self.channel_long_slug.append(children.long_slug)
def get_breadcrumb(self):
if self.channel.is_root_node():
return []
return self.channel.get_ancestors(include_self=True)
def check_template(self, _template):
try:
template.loader.get_template(_template)
return True
except template.TemplateDoesNotExist:
return False
|
[
"opps.containers.models.ContainerBox.objects.filter",
"django.utils.timezone.now",
"opps.channels.models.Channel.objects.get",
"django.contrib.sites.models.get_current_site",
"opps.containers.models.Container.objects.filter",
"django.shortcuts.get_object_or_404",
"opps.articles.models.Album.objects.filter",
"opps.channels.models.Channel.objects.get_homepage",
"django.http.Http404",
"django.template.loader.get_template"
] |
[((2168, 2182), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (2180, 2182), False, 'from django.utils import timezone\n'), ((2526, 2561), 'opps.containers.models.Container.objects.filter', 'Container.objects.filter', ([], {}), '(**filters)\n', (2550, 2561), False, 'from opps.containers.models import Container, ContainerBox\n'), ((1805, 1867), 'opps.containers.models.ContainerBox.objects.filter', 'ContainerBox.objects.filter', ([], {'channel__long_slug': 'self.long_slug'}), '(channel__long_slug=self.long_slug)\n', (1832, 1867), False, 'from opps.containers.models import Container, ContainerBox\n'), ((4759, 4789), 'opps.channels.models.Channel.objects.get', 'Channel.objects.get', ([], {}), '(**filters)\n', (4778, 4789), False, 'from opps.channels.models import Channel\n'), ((5625, 5664), 'django.template.loader.get_template', 'template.loader.get_template', (['_template'], {}), '(_template)\n', (5653, 5664), False, 'from django import template\n'), ((4665, 4679), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (4677, 4679), False, 'from django.utils import timezone\n'), ((5056, 5093), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Channel'], {}), '(Channel, **filters)\n', (5073, 5093), False, 'from django.shortcuts import get_object_or_404\n'), ((1613, 1643), 'django.contrib.sites.models.get_current_site', 'get_current_site', (['self.request'], {}), '(self.request)\n', (1629, 1643), False, 'from django.contrib.sites.models import get_current_site\n'), ((2723, 2754), 'opps.articles.models.Album.objects.filter', 'Album.objects.filter', ([], {}), '(**filters)\n', (2743, 2754), False, 'from opps.articles.models import Album\n'), ((4264, 4308), 'opps.channels.models.Channel.objects.get_homepage', 'Channel.objects.get_homepage', ([], {'site': 'self.site'}), '(site=self.site)\n', (4292, 4308), False, 'from opps.channels.models import Channel\n'), ((4916, 4966), 'django.http.Http404', 'Http404', (['"""Channel not found and fallback disabled"""'], {}), "('Channel not found and fallback disabled')\n", (4923, 4966), False, 'from django.http import Http404\n')]
|
from ariadne import QueryType, make_executable_schema, graphql_sync, MutationType
from ariadne import load_schema_from_path
from features.Songs.songTypes import songTypes, songObjectType, songQueries, songMutations
from ariadne.constants import PLAYGROUND_HTML
from features.Composers.composerTypes import composerTypes, composerObjectType, composerQueries, composerMutations
from flask import Flask, request, jsonify
from features.Composers.composer import resolve_composer, update_composer, resolve_composer, create_composer
from features.Songs.song import create_song, resolve_song, update_song
queryTypes = load_schema_from_path("./root_types/queries.gql")
mutationTypes = load_schema_from_path("./root_types/mutations.gql")
schema = make_executable_schema(
[mutationTypes, queryTypes, songTypes, composerTypes],
[songObjectType, composerObjectType, songQueries, songMutations, composerQueries, composerMutations]
)
app = Flask(__name__)
@app.route("/graphql", methods=["GET"])
def graphql_playgroud():
return PLAYGROUND_HTML
@app.route("/graphql", methods=["POST"])
def graphql_server():
data = request.get_json()
success, result = graphql_sync(
schema,
data,
context_value=request,
debug=app.debug
)
return jsonify(result)
|
[
"ariadne.make_executable_schema",
"ariadne.graphql_sync",
"ariadne.load_schema_from_path",
"flask.Flask",
"flask.jsonify",
"flask.request.get_json"
] |
[((611, 660), 'ariadne.load_schema_from_path', 'load_schema_from_path', (['"""./root_types/queries.gql"""'], {}), "('./root_types/queries.gql')\n", (632, 660), False, 'from ariadne import load_schema_from_path\n'), ((677, 728), 'ariadne.load_schema_from_path', 'load_schema_from_path', (['"""./root_types/mutations.gql"""'], {}), "('./root_types/mutations.gql')\n", (698, 728), False, 'from ariadne import load_schema_from_path\n'), ((739, 927), 'ariadne.make_executable_schema', 'make_executable_schema', (['[mutationTypes, queryTypes, songTypes, composerTypes]', '[songObjectType, composerObjectType, songQueries, songMutations,\n composerQueries, composerMutations]'], {}), '([mutationTypes, queryTypes, songTypes, composerTypes\n ], [songObjectType, composerObjectType, songQueries, songMutations,\n composerQueries, composerMutations])\n', (761, 927), False, 'from ariadne import QueryType, make_executable_schema, graphql_sync, MutationType\n'), ((936, 951), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (941, 951), False, 'from flask import Flask, request, jsonify\n'), ((1121, 1139), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (1137, 1139), False, 'from flask import Flask, request, jsonify\n'), ((1162, 1228), 'ariadne.graphql_sync', 'graphql_sync', (['schema', 'data'], {'context_value': 'request', 'debug': 'app.debug'}), '(schema, data, context_value=request, debug=app.debug)\n', (1174, 1228), False, 'from ariadne import QueryType, make_executable_schema, graphql_sync, MutationType\n'), ((1278, 1293), 'flask.jsonify', 'jsonify', (['result'], {}), '(result)\n', (1285, 1293), False, 'from flask import Flask, request, jsonify\n')]
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing as mp
import os
import runpy
import shutil
import subprocess
import sys
import tempfile
import unittest
import uuid
from contextlib import closing
from unittest import mock
from unittest.mock import Mock, patch
import torch.distributed.run as launch
from torch.distributed.elastic.agent.server.api import RunResult, WorkerState
from torch.distributed.elastic.multiprocessing.errors import ChildFailedError
from torch.distributed.elastic.rendezvous.etcd_server import EtcdServer
from torch.distributed.elastic.utils import get_socket_with_port
from torch.distributed.elastic.utils.distributed import get_free_port
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
sandcastle_skip_if,
)
def launch_in_proc(args):
launch.main(args)
def path(script):
return os.path.join(os.path.dirname(__file__), script)
def get_child_pids(pid):
pgrep = subprocess.Popen(args=f"pgrep -P {pid}", shell=True, stdout=subprocess.PIPE)
pgrep.wait()
out = pgrep.stdout.read().decode("utf-8").rstrip().split("\n")
pids = []
for pid in out:
if pid:
pids.append(int(pid))
return pids
def pid_exists(pid):
try:
os.kill(pid, 0)
return True
except OSError:
return False
class MockException(Exception):
pass
class ElasticLaunchTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# start a standalone, single process etcd server to use for all tests
cls._etcd_server = EtcdServer()
cls._etcd_server.start()
cls._etcd_endpoint = cls._etcd_server.get_endpoint()
@classmethod
def tearDownClass(cls):
# stop the standalone etcd server
cls._etcd_server.stop()
def setUp(self):
self.test_dir = tempfile.mkdtemp()
# remove any lingering environment variables
for env in os.environ.keys():
if env.startswith("PET_"):
del os.environ[env]
# set a sentinel env var on the parent proc
# this should be present on the child and gets
# asserted in ``bin/test_script.py``
os.environ["TEST_SENTINEL_PARENT"] = "FOOBAR"
def tearDown(self):
shutil.rmtree(self.test_dir)
def test_launch_user_script_python(self):
self._test_launch_user_script_python()
def _test_launch_user_script_python(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=spawn",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
def test_launch_user_script_python_caffe2_bc(self):
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
sock = get_socket_with_port()
with closing(sock):
master_port = sock.getsockname()[1]
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--monitor_interval=1",
"--start_method=spawn",
"--master_addr=localhost",
f"--master_port={master_port}",
"--node_rank=0",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_user_script_bash(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=spawn",
"--no_python",
]
script_args = [path("bin/test_script.sh"), f"{self.test_dir}"]
with self.assertRaises(ValueError):
# --no_python cannot be used with --module
launch.main(args + ["--module"] + script_args)
launch.main(args + script_args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_user_script_default_nproc(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
world_size = 1
args = [
f"--nnodes={nnodes}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=spawn",
"--no_python",
]
script_args = [path("bin/test_script.sh"), f"{self.test_dir}"]
with self.assertRaises(ValueError):
# --no_python cannot be used with --module
launch.main(args + ["--module"] + script_args)
launch.main(args + script_args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_with_env_vars(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
os.environ["PET_NNODES"] = str(nnodes)
os.environ["PET_NPROC_PER_NODE"] = str(nproc_per_node)
os.environ["PET_RDZV_BACKEND"] = "etcd"
os.environ["PET_RDZV_ENDPOINT"] = self._etcd_endpoint
os.environ["PET_RDZV_ID"] = run_id
os.environ["PET_MONITOR_INTERVAL"] = "1"
os.environ["PET_START_METHOD"] = "spawn"
os.environ["PET_NO_PYTHON"] = "1"
script_args = [path("bin/test_script.sh"), f"{self.test_dir}"]
with self.assertRaises(ValueError):
# --no_python cannot be used with --module
os.environ["PET_MODULE"] = "1"
launch.main(script_args)
os.environ["PET_MODULE"] = "0"
launch.main(script_args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
def _test_nproc_launch_configuration(self, nproc_type, expected_number):
run_id = str(uuid.uuid4().int)
nnodes = 1
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_type}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=spawn",
"--no_python",
]
script_args = [path("bin/test_script.sh"), f"{self.test_dir}"]
launch.main(args + script_args)
world_size = nnodes * expected_number
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_nproc_launch_auto_configurations(self):
self._test_nproc_launch_configuration("auto", os.cpu_count())
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_nproc_launch_number_configurations(self):
self._test_nproc_launch_configuration("4", 4)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_nproc_launch_unknown_configurations(self):
with self.assertRaises(ValueError):
self._test_nproc_launch_configuration("unknown", 4)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
@patch("torch.cuda.is_available", return_value=True)
@patch("torch.cuda.device_count", return_value=3)
def test_nproc_gpu_launch_configurations(self, _mock1, _mock2):
self._test_nproc_launch_configuration("auto", 3)
self._test_nproc_launch_configuration("gpu", 3)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_elastic(self):
run_id = str(uuid.uuid4().int)
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
# we are only launching 1 node (even though max = 2)
world_size = nproc_per_node
args = [
f"--nnodes={min_nodes}:{max_nodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=spawn",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@mock.patch("torch.distributed.elastic.events.record")
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_elastic_worker_raise_exception(self, record_mock):
"""
Asserts that when the worker program fails and lancher raieses exception
to indicate that worker process failed
"""
run_id = str(uuid.uuid4().int)
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
args = [
f"--nnodes={min_nodes}:{max_nodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--max_restarts=0",
"--start_method=spawn",
path("bin/test_script.py"),
"--fail",
]
with self.assertRaises(ChildFailedError):
launch.main(args)
record_mock.assert_called_once()
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
@mock.patch(
"torch.distributed.elastic.agent.server.local_elastic_agent.LocalElasticAgent.run"
)
@mock.patch("torch.distributed.elastic.events.record")
def test_launch_elastic_agent_raise_exception(self, record_mock, mock_agent_run):
"""
Asserts that when the agent raises an exception
the launcher re-raises the original exception
"""
run_id = str(uuid.uuid4().int)
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
args = [
f"--nnodes={min_nodes}:{max_nodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--max_restarts=0",
"--start_method=spawn",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
mock_agent_run.side_effect = MockException
with self.assertRaises(MockException):
launch.main(args)
record_mock.assert_called_once()
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_standalone(self):
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--standalone",
"--monitor_interval=1",
"--start_method=spawn",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_run_path(self):
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
args = [
"--run_path",
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--monitor_interval=1",
"--start_method=spawn",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_elastic_multiple_agents(self):
run_id = str(uuid.uuid4().int)
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
nnodes = 2
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={min_nodes}:{max_nodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=spawn",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
procs = []
for _ in range(nnodes - 1):
p = mp.Process(target=launch.main, args=[args])
procs.append(p)
p.start()
launch.main(args)
for i in range(nnodes - 1):
p = procs[i]
p.join()
self.assertEqual(0, p.exitcode)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
def test_min_max_nodes_parse(self):
min_nodes, max_nodes = launch.parse_min_max_nnodes("1")
self.assertTrue(min_nodes, max_nodes)
self.assertTrue(1, min_nodes)
min_nodes, max_nodes = launch.parse_min_max_nnodes("2:20")
self.assertTrue(2, min_nodes)
self.assertTrue(20, max_nodes)
with self.assertRaises(RuntimeError):
launch.parse_min_max_nnodes("2:20:30")
@patch("torch.distributed.launcher.api.LocalElasticAgent")
def test_launch_shutdown(self, agent_mock_cls):
nnodes = 1
nproc_per_node = 4
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--monitor_interval=1",
"--start_method=spawn",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
agent_mock = Mock()
agent_mock.run.return_value = RunResult(WorkerState.SUCCEEDED)
agent_mock_cls.return_value = agent_mock
rdzv_handler_mock = Mock()
with patch(
"torch.distributed.elastic.rendezvous.registry.get_rendezvous_handler"
) as param_mock:
param_mock.return_value = rdzv_handler_mock
launch.main(args)
rdzv_handler_mock.shutdown.assert_called_once()
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_is_torchelastic_launched(self):
# launch test script with torchelastic and validate that
# torch.distributed.is_torchelastic_launched() returns True
out_file = f"{os.path.join(self.test_dir, 'out')}"
launch.main(
[
"--run_path",
"--nnodes=1",
"--nproc_per_node=1",
"--monitor_interval=1",
path("bin/test_script_is_torchelastic_launched.py"),
f"--out_file={out_file}",
]
)
with open(out_file, "r") as fp:
is_torchelastic_launched = fp.readline()
self.assertEqual("True", is_torchelastic_launched)
def test_is_not_torchelastic_launched(self):
# launch test script without torchelastic and validate that
# torch.distributed.is_torchelastic_launched() returns False
out_file = f"{os.path.join(self.test_dir, 'out')}"
# need to run the script with runpy in the same interpreter
# as the test because otherwise (depending on the environment)
# it will not find torch as a dependency
with patch.object(
sys,
"argv",
[
path("bin/test_script_is_torchelastic_launched.py"),
f"--out_file={out_file}",
],
):
runpy.run_path(sys.argv[0], run_name="__main__")
with open(out_file, "r") as fp:
is_torchelastic_launched = fp.readline()
self.assertEqual("False", is_torchelastic_launched)
def test_init_method_tcp(self):
port = get_free_port()
with patch.object(
sys,
"argv",
[
path("bin/test_script_init_method.py"),
f"--init_method=tcp://localhost:{port}",
"--rank=0",
"--world_size=1",
],
):
runpy.run_path(sys.argv[0], run_name="__main__")
# nothing to validate, just make sure it runs
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_init_method_tcp_with_torchelastic(self):
port = get_free_port()
launch.main(
[
"--run_path",
"--nnodes=1",
"--nproc_per_node=4",
"--master_addr=localhost",
f"--master_port={port}",
"--monitor_interval=1",
path("bin/test_script_init_method.py"),
f"--init_method=tcp://localhost:{port}",
]
)
# nothing to validate, just make sure it runs
def test_init_method_env(self):
port = get_free_port()
with patch.dict(
os.environ,
{
"RANK": "0",
"WORLD_SIZE": "1",
"MASTER_ADDR": "localhost",
"MASTER_PORT": str(port),
},
), patch.object(
sys,
"argv",
[
path("bin/test_script_init_method.py"),
"--init_method=env://",
],
):
runpy.run_path(sys.argv[0], run_name="__main__")
# nothing to validate, just make sure it runs
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_init_method_env_with_torchelastic(self):
port = get_free_port()
launch.main(
[
"--run_path",
"--nnodes=1",
"--nproc_per_node=4",
"--master_addr=localhost",
f"--master_port={port}",
"--monitor_interval=1",
path("bin/test_script_init_method.py"),
"--init_method=env://",
]
)
# nothing to validate, just make sure it runs
def test_get_default_executable(self):
self.assertEqual(sys.executable, launch.get_executable())
def test_get_override_executable(self):
os.environ["PYTHON_EXEC"] = "python"
self._test_launch_user_script_python()
|
[
"torch.distributed.run.parse_min_max_nnodes",
"torch.distributed.run.get_executable",
"torch.distributed.run.main",
"shutil.rmtree",
"os.path.join",
"torch.distributed.elastic.rendezvous.etcd_server.EtcdServer",
"os.path.dirname",
"torch.testing._internal.common_utils.sandcastle_skip_if",
"tempfile.mkdtemp",
"os.environ.keys",
"contextlib.closing",
"subprocess.Popen",
"unittest.mock.patch",
"runpy.run_path",
"torch.distributed.elastic.utils.get_socket_with_port",
"torch.distributed.elastic.utils.distributed.get_free_port",
"os.listdir",
"uuid.uuid4",
"torch.distributed.elastic.agent.server.api.RunResult",
"unittest.mock.Mock",
"os.kill",
"os.cpu_count",
"multiprocessing.Process"
] |
[((1004, 1021), 'torch.distributed.run.main', 'launch.main', (['args'], {}), '(args)\n', (1015, 1021), True, 'import torch.distributed.run as launch\n'), ((1140, 1216), 'subprocess.Popen', 'subprocess.Popen', ([], {'args': 'f"""pgrep -P {pid}"""', 'shell': '(True)', 'stdout': 'subprocess.PIPE'}), "(args=f'pgrep -P {pid}', shell=True, stdout=subprocess.PIPE)\n", (1156, 1216), False, 'import subprocess\n'), ((4314, 4399), 'torch.testing._internal.common_utils.sandcastle_skip_if', 'sandcastle_skip_if', (['TEST_WITH_DEV_DBG_ASAN', '"""test incompatible with dev/dbg asan"""'], {}), "(TEST_WITH_DEV_DBG_ASAN,\n 'test incompatible with dev/dbg asan')\n", (4332, 4399), False, 'from torch.testing._internal.common_utils import TEST_WITH_DEV_DBG_ASAN, sandcastle_skip_if\n'), ((5412, 5497), 'torch.testing._internal.common_utils.sandcastle_skip_if', 'sandcastle_skip_if', (['TEST_WITH_DEV_DBG_ASAN', '"""test incompatible with dev/dbg asan"""'], {}), "(TEST_WITH_DEV_DBG_ASAN,\n 'test incompatible with dev/dbg asan')\n", (5430, 5497), False, 'from torch.testing._internal.common_utils import TEST_WITH_DEV_DBG_ASAN, sandcastle_skip_if\n'), ((6420, 6505), 'torch.testing._internal.common_utils.sandcastle_skip_if', 'sandcastle_skip_if', (['TEST_WITH_DEV_DBG_ASAN', '"""test incompatible with dev/dbg asan"""'], {}), "(TEST_WITH_DEV_DBG_ASAN,\n 'test incompatible with dev/dbg asan')\n", (6438, 6505), False, 'from torch.testing._internal.common_utils import TEST_WITH_DEV_DBG_ASAN, sandcastle_skip_if\n'), ((8494, 8579), 'torch.testing._internal.common_utils.sandcastle_skip_if', 'sandcastle_skip_if', (['TEST_WITH_DEV_DBG_ASAN', '"""test incompatible with dev/dbg asan"""'], {}), "(TEST_WITH_DEV_DBG_ASAN,\n 'test incompatible with dev/dbg asan')\n", (8512, 8579), False, 'from torch.testing._internal.common_utils import TEST_WITH_DEV_DBG_ASAN, sandcastle_skip_if\n'), ((8705, 8790), 'torch.testing._internal.common_utils.sandcastle_skip_if', 'sandcastle_skip_if', (['TEST_WITH_DEV_DBG_ASAN', '"""test incompatible with dev/dbg asan"""'], {}), "(TEST_WITH_DEV_DBG_ASAN,\n 'test incompatible with dev/dbg asan')\n", (8723, 8790), False, 'from torch.testing._internal.common_utils import TEST_WITH_DEV_DBG_ASAN, sandcastle_skip_if\n'), ((8902, 8987), 'torch.testing._internal.common_utils.sandcastle_skip_if', 'sandcastle_skip_if', (['TEST_WITH_DEV_DBG_ASAN', '"""test incompatible with dev/dbg asan"""'], {}), "(TEST_WITH_DEV_DBG_ASAN,\n 'test incompatible with dev/dbg asan')\n", (8920, 8987), False, 'from torch.testing._internal.common_utils import TEST_WITH_DEV_DBG_ASAN, sandcastle_skip_if\n'), ((9154, 9239), 'torch.testing._internal.common_utils.sandcastle_skip_if', 'sandcastle_skip_if', (['TEST_WITH_DEV_DBG_ASAN', '"""test incompatible with dev/dbg asan"""'], {}), "(TEST_WITH_DEV_DBG_ASAN,\n 'test incompatible with dev/dbg asan')\n", (9172, 9239), False, 'from torch.testing._internal.common_utils import TEST_WITH_DEV_DBG_ASAN, sandcastle_skip_if\n'), ((9241, 9292), 'unittest.mock.patch', 'patch', (['"""torch.cuda.is_available"""'], {'return_value': '(True)'}), "('torch.cuda.is_available', return_value=True)\n", (9246, 9292), False, 'from unittest.mock import Mock, patch\n'), ((9298, 9346), 'unittest.mock.patch', 'patch', (['"""torch.cuda.device_count"""'], {'return_value': '(3)'}), "('torch.cuda.device_count', return_value=3)\n", (9303, 9346), False, 'from unittest.mock import Mock, patch\n'), ((9534, 9619), 'torch.testing._internal.common_utils.sandcastle_skip_if', 'sandcastle_skip_if', (['TEST_WITH_DEV_DBG_ASAN', '"""test incompatible with dev/dbg asan"""'], {}), "(TEST_WITH_DEV_DBG_ASAN,\n 'test incompatible with dev/dbg asan')\n", (9552, 9619), False, 'from torch.testing._internal.common_utils import TEST_WITH_DEV_DBG_ASAN, sandcastle_skip_if\n'), ((10531, 10584), 'unittest.mock.patch', 'mock.patch', (['"""torch.distributed.elastic.events.record"""'], {}), "('torch.distributed.elastic.events.record')\n", (10541, 10584), False, 'from unittest import mock\n'), ((10590, 10675), 'torch.testing._internal.common_utils.sandcastle_skip_if', 'sandcastle_skip_if', (['TEST_WITH_DEV_DBG_ASAN', '"""test incompatible with dev/dbg asan"""'], {}), "(TEST_WITH_DEV_DBG_ASAN,\n 'test incompatible with dev/dbg asan')\n", (10608, 10675), False, 'from torch.testing._internal.common_utils import TEST_WITH_DEV_DBG_ASAN, sandcastle_skip_if\n'), ((11550, 11635), 'torch.testing._internal.common_utils.sandcastle_skip_if', 'sandcastle_skip_if', (['TEST_WITH_DEV_DBG_ASAN', '"""test incompatible with dev/dbg asan"""'], {}), "(TEST_WITH_DEV_DBG_ASAN,\n 'test incompatible with dev/dbg asan')\n", (11568, 11635), False, 'from torch.testing._internal.common_utils import TEST_WITH_DEV_DBG_ASAN, sandcastle_skip_if\n'), ((11637, 11741), 'unittest.mock.patch', 'mock.patch', (['"""torch.distributed.elastic.agent.server.local_elastic_agent.LocalElasticAgent.run"""'], {}), "(\n 'torch.distributed.elastic.agent.server.local_elastic_agent.LocalElasticAgent.run'\n )\n", (11647, 11741), False, 'from unittest import mock\n'), ((11751, 11804), 'unittest.mock.patch', 'mock.patch', (['"""torch.distributed.elastic.events.record"""'], {}), "('torch.distributed.elastic.events.record')\n", (11761, 11804), False, 'from unittest import mock\n'), ((12754, 12839), 'torch.testing._internal.common_utils.sandcastle_skip_if', 'sandcastle_skip_if', (['TEST_WITH_DEV_DBG_ASAN', '"""test incompatible with dev/dbg asan"""'], {}), "(TEST_WITH_DEV_DBG_ASAN,\n 'test incompatible with dev/dbg asan')\n", (12772, 12839), False, 'from torch.testing._internal.common_utils import TEST_WITH_DEV_DBG_ASAN, sandcastle_skip_if\n'), ((13527, 13612), 'torch.testing._internal.common_utils.sandcastle_skip_if', 'sandcastle_skip_if', (['TEST_WITH_DEV_DBG_ASAN', '"""test incompatible with dev/dbg asan"""'], {}), "(TEST_WITH_DEV_DBG_ASAN,\n 'test incompatible with dev/dbg asan')\n", (13545, 13612), False, 'from torch.testing._internal.common_utils import TEST_WITH_DEV_DBG_ASAN, sandcastle_skip_if\n'), ((14296, 14381), 'torch.testing._internal.common_utils.sandcastle_skip_if', 'sandcastle_skip_if', (['TEST_WITH_DEV_DBG_ASAN', '"""test incompatible with dev/dbg asan"""'], {}), "(TEST_WITH_DEV_DBG_ASAN,\n 'test incompatible with dev/dbg asan')\n", (14314, 14381), False, 'from torch.testing._internal.common_utils import TEST_WITH_DEV_DBG_ASAN, sandcastle_skip_if\n'), ((15997, 16054), 'unittest.mock.patch', 'patch', (['"""torch.distributed.launcher.api.LocalElasticAgent"""'], {}), "('torch.distributed.launcher.api.LocalElasticAgent')\n", (16002, 16054), False, 'from unittest.mock import Mock, patch\n'), ((16888, 16973), 'torch.testing._internal.common_utils.sandcastle_skip_if', 'sandcastle_skip_if', (['TEST_WITH_DEV_DBG_ASAN', '"""test incompatible with dev/dbg asan"""'], {}), "(TEST_WITH_DEV_DBG_ASAN,\n 'test incompatible with dev/dbg asan')\n", (16906, 16973), False, 'from torch.testing._internal.common_utils import TEST_WITH_DEV_DBG_ASAN, sandcastle_skip_if\n'), ((19027, 19112), 'torch.testing._internal.common_utils.sandcastle_skip_if', 'sandcastle_skip_if', (['TEST_WITH_DEV_DBG_ASAN', '"""test incompatible with dev/dbg asan"""'], {}), "(TEST_WITH_DEV_DBG_ASAN,\n 'test incompatible with dev/dbg asan')\n", (19045, 19112), False, 'from torch.testing._internal.common_utils import TEST_WITH_DEV_DBG_ASAN, sandcastle_skip_if\n'), ((20261, 20346), 'torch.testing._internal.common_utils.sandcastle_skip_if', 'sandcastle_skip_if', (['TEST_WITH_DEV_DBG_ASAN', '"""test incompatible with dev/dbg asan"""'], {}), "(TEST_WITH_DEV_DBG_ASAN,\n 'test incompatible with dev/dbg asan')\n", (20279, 20346), False, 'from torch.testing._internal.common_utils import TEST_WITH_DEV_DBG_ASAN, sandcastle_skip_if\n'), ((1066, 1091), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1081, 1091), False, 'import os\n'), ((1441, 1456), 'os.kill', 'os.kill', (['pid', '(0)'], {}), '(pid, 0)\n', (1448, 1456), False, 'import os\n'), ((1754, 1766), 'torch.distributed.elastic.rendezvous.etcd_server.EtcdServer', 'EtcdServer', ([], {}), '()\n', (1764, 1766), False, 'from torch.distributed.elastic.rendezvous.etcd_server import EtcdServer\n'), ((2027, 2045), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (2043, 2045), False, 'import tempfile\n'), ((2119, 2136), 'os.environ.keys', 'os.environ.keys', ([], {}), '()\n', (2134, 2136), False, 'import os\n'), ((2453, 2481), 'shutil.rmtree', 'shutil.rmtree', (['self.test_dir'], {}), '(self.test_dir)\n', (2466, 2481), False, 'import shutil\n'), ((3158, 3175), 'torch.distributed.run.main', 'launch.main', (['args'], {}), '(args)\n', (3169, 3175), True, 'import torch.distributed.run as launch\n'), ((3569, 3591), 'torch.distributed.elastic.utils.get_socket_with_port', 'get_socket_with_port', ([], {}), '()\n', (3589, 3591), False, 'from torch.distributed.elastic.utils import get_socket_with_port\n'), ((4060, 4077), 'torch.distributed.run.main', 'launch.main', (['args'], {}), '(args)\n', (4071, 4077), True, 'import torch.distributed.run as launch\n'), ((5144, 5175), 'torch.distributed.run.main', 'launch.main', (['(args + script_args)'], {}), '(args + script_args)\n', (5155, 5175), True, 'import torch.distributed.run as launch\n'), ((6152, 6183), 'torch.distributed.run.main', 'launch.main', (['(args + script_args)'], {}), '(args + script_args)\n', (6163, 6183), True, 'import torch.distributed.run as launch\n'), ((7377, 7401), 'torch.distributed.run.main', 'launch.main', (['script_args'], {}), '(script_args)\n', (7388, 7401), True, 'import torch.distributed.run as launch\n'), ((8180, 8211), 'torch.distributed.run.main', 'launch.main', (['(args + script_args)'], {}), '(args + script_args)\n', (8191, 8211), True, 'import torch.distributed.run as launch\n'), ((10277, 10294), 'torch.distributed.run.main', 'launch.main', (['args'], {}), '(args)\n', (10288, 10294), True, 'import torch.distributed.run as launch\n'), ((13273, 13290), 'torch.distributed.run.main', 'launch.main', (['args'], {}), '(args)\n', (13284, 13290), True, 'import torch.distributed.run as launch\n'), ((14042, 14059), 'torch.distributed.run.main', 'launch.main', (['args'], {}), '(args)\n', (14053, 14059), True, 'import torch.distributed.run as launch\n'), ((15187, 15204), 'torch.distributed.run.main', 'launch.main', (['args'], {}), '(args)\n', (15198, 15204), True, 'import torch.distributed.run as launch\n'), ((15633, 15665), 'torch.distributed.run.parse_min_max_nnodes', 'launch.parse_min_max_nnodes', (['"""1"""'], {}), "('1')\n", (15660, 15665), True, 'import torch.distributed.run as launch\n'), ((15781, 15816), 'torch.distributed.run.parse_min_max_nnodes', 'launch.parse_min_max_nnodes', (['"""2:20"""'], {}), "('2:20')\n", (15808, 15816), True, 'import torch.distributed.run as launch\n'), ((16446, 16452), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (16450, 16452), False, 'from unittest.mock import Mock, patch\n'), ((16491, 16523), 'torch.distributed.elastic.agent.server.api.RunResult', 'RunResult', (['WorkerState.SUCCEEDED'], {}), '(WorkerState.SUCCEEDED)\n', (16500, 16523), False, 'from torch.distributed.elastic.agent.server.api import RunResult, WorkerState\n'), ((16601, 16607), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (16605, 16607), False, 'from unittest.mock import Mock, patch\n'), ((18607, 18622), 'torch.distributed.elastic.utils.distributed.get_free_port', 'get_free_port', ([], {}), '()\n', (18620, 18622), False, 'from torch.distributed.elastic.utils.distributed import get_free_port\n'), ((19178, 19193), 'torch.distributed.elastic.utils.distributed.get_free_port', 'get_free_port', ([], {}), '()\n', (19191, 19193), False, 'from torch.distributed.elastic.utils.distributed import get_free_port\n'), ((19694, 19709), 'torch.distributed.elastic.utils.distributed.get_free_port', 'get_free_port', ([], {}), '()\n', (19707, 19709), False, 'from torch.distributed.elastic.utils.distributed import get_free_port\n'), ((20412, 20427), 'torch.distributed.elastic.utils.distributed.get_free_port', 'get_free_port', ([], {}), '()\n', (20425, 20427), False, 'from torch.distributed.elastic.utils.distributed import get_free_port\n'), ((3605, 3618), 'contextlib.closing', 'closing', (['sock'], {}), '(sock)\n', (3612, 3618), False, 'from contextlib import closing\n'), ((5088, 5134), 'torch.distributed.run.main', 'launch.main', (["(args + ['--module'] + script_args)"], {}), "(args + ['--module'] + script_args)\n", (5099, 5134), True, 'import torch.distributed.run as launch\n'), ((6096, 6142), 'torch.distributed.run.main', 'launch.main', (["(args + ['--module'] + script_args)"], {}), "(args + ['--module'] + script_args)\n", (6107, 6142), True, 'import torch.distributed.run as launch\n'), ((7304, 7328), 'torch.distributed.run.main', 'launch.main', (['script_args'], {}), '(script_args)\n', (7315, 7328), True, 'import torch.distributed.run as launch\n'), ((8683, 8697), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (8695, 8697), False, 'import os\n'), ((11484, 11501), 'torch.distributed.run.main', 'launch.main', (['args'], {}), '(args)\n', (11495, 11501), True, 'import torch.distributed.run as launch\n'), ((12689, 12706), 'torch.distributed.run.main', 'launch.main', (['args'], {}), '(args)\n', (12700, 12706), True, 'import torch.distributed.run as launch\n'), ((15085, 15128), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'launch.main', 'args': '[args]'}), '(target=launch.main, args=[args])\n', (15095, 15128), True, 'import multiprocessing as mp\n'), ((15952, 15990), 'torch.distributed.run.parse_min_max_nnodes', 'launch.parse_min_max_nnodes', (['"""2:20:30"""'], {}), "('2:20:30')\n", (15979, 15990), True, 'import torch.distributed.run as launch\n'), ((16621, 16698), 'unittest.mock.patch', 'patch', (['"""torch.distributed.elastic.rendezvous.registry.get_rendezvous_handler"""'], {}), "('torch.distributed.elastic.rendezvous.registry.get_rendezvous_handler')\n", (16626, 16698), False, 'from unittest.mock import Mock, patch\n'), ((16804, 16821), 'torch.distributed.run.main', 'launch.main', (['args'], {}), '(args)\n', (16815, 16821), True, 'import torch.distributed.run as launch\n'), ((18337, 18385), 'runpy.run_path', 'runpy.run_path', (['sys.argv[0]'], {'run_name': '"""__main__"""'}), "(sys.argv[0], run_name='__main__')\n", (18351, 18385), False, 'import runpy\n'), ((18914, 18962), 'runpy.run_path', 'runpy.run_path', (['sys.argv[0]'], {'run_name': '"""__main__"""'}), "(sys.argv[0], run_name='__main__')\n", (18928, 18962), False, 'import runpy\n'), ((20148, 20196), 'runpy.run_path', 'runpy.run_path', (['sys.argv[0]'], {'run_name': '"""__main__"""'}), "(sys.argv[0], run_name='__main__')\n", (20162, 20196), False, 'import runpy\n'), ((20944, 20967), 'torch.distributed.run.get_executable', 'launch.get_executable', ([], {}), '()\n', (20965, 20967), True, 'import torch.distributed.run as launch\n'), ((2645, 2657), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2655, 2657), False, 'import uuid\n'), ((3369, 3394), 'os.listdir', 'os.listdir', (['self.test_dir'], {}), '(self.test_dir)\n', (3379, 3394), False, 'import os\n'), ((4271, 4296), 'os.listdir', 'os.listdir', (['self.test_dir'], {}), '(self.test_dir)\n', (4281, 4296), False, 'import os\n'), ((4461, 4473), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4471, 4473), False, 'import uuid\n'), ((5369, 5394), 'os.listdir', 'os.listdir', (['self.test_dir'], {}), '(self.test_dir)\n', (5379, 5394), False, 'import os\n'), ((5568, 5580), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5578, 5580), False, 'import uuid\n'), ((6377, 6402), 'os.listdir', 'os.listdir', (['self.test_dir'], {}), '(self.test_dir)\n', (6387, 6402), False, 'import os\n'), ((6564, 6576), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (6574, 6576), False, 'import uuid\n'), ((7595, 7620), 'os.listdir', 'os.listdir', (['self.test_dir'], {}), '(self.test_dir)\n', (7605, 7620), False, 'import os\n'), ((7731, 7743), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (7741, 7743), False, 'import uuid\n'), ((8451, 8476), 'os.listdir', 'os.listdir', (['self.test_dir'], {}), '(self.test_dir)\n', (8461, 8476), False, 'import os\n'), ((9672, 9684), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (9682, 9684), False, 'import uuid\n'), ((10488, 10513), 'os.listdir', 'os.listdir', (['self.test_dir'], {}), '(self.test_dir)\n', (10498, 10513), False, 'import os\n'), ((10917, 10929), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (10927, 10929), False, 'import uuid\n'), ((12046, 12058), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (12056, 12058), False, 'import uuid\n'), ((13484, 13509), 'os.listdir', 'os.listdir', (['self.test_dir'], {}), '(self.test_dir)\n', (13494, 13509), False, 'import os\n'), ((14253, 14278), 'os.listdir', 'os.listdir', (['self.test_dir'], {}), '(self.test_dir)\n', (14263, 14278), False, 'import os\n'), ((14450, 14462), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (14460, 14462), False, 'import uuid\n'), ((15524, 15549), 'os.listdir', 'os.listdir', (['self.test_dir'], {}), '(self.test_dir)\n', (15534, 15549), False, 'import os\n'), ((17171, 17205), 'os.path.join', 'os.path.join', (['self.test_dir', '"""out"""'], {}), "(self.test_dir, 'out')\n", (17183, 17205), False, 'import os\n'), ((17884, 17918), 'os.path.join', 'os.path.join', (['self.test_dir', '"""out"""'], {}), "(self.test_dir, 'out')\n", (17896, 17918), False, 'import os\n')]
|
#!/usr/bin/env python3
import csv
import subprocess
from subprocess import run
nums = 20
proc = 1
# result = run('./test.sh {} {}'.format(nums, proc),
# shell=True,
# check=True,
# stderr=subprocess.PIPE,
# universal_newlines=True)
# # print(result)
# parsed = str(result.stderr).split("\n")[-4:-1]
# print(parsed)
# with open('results.csv', 'w', newline='') as csvfile:
# spamwriter.writerow(['processors', 'numbers', 'time'])
# spamwriter.writerow(parsed)
with open('results.csv', 'w', newline='') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(['processors', 'numbers', 'time'])
for nums in [100, 500, 1000, 2500, 5000]:
for proc in range(1, 11):
time_spent = 0.0
experiment_count = 50
for i in range(experiment_count):
result = run('./special-test.sh {} {}'.format(nums, proc),
shell=True,
check=True,
stderr=subprocess.PIPE,
universal_newlines=True)
time_spent += float(str(result.stderr).split("\n")[-2])
spamwriter.writerow([proc, nums, time_spent/experiment_count])
|
[
"csv.writer"
] |
[((584, 660), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '""","""', 'quotechar': '"""|"""', 'quoting': 'csv.QUOTE_MINIMAL'}), "(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n", (594, 660), False, 'import csv\n')]
|
import importlib
import inspect
from typing import Any, Dict, Generic, Type, TypeVar, Union
import entrypoints
import typing_inspect
from datahub import __package_name__
from datahub.configuration.common import ConfigurationError
T = TypeVar("T")
def import_key(key: str) -> Any:
assert "." in key, "import key must contain a ."
module_name, item_name = key.rsplit(".", 1)
item = getattr(importlib.import_module(module_name), item_name)
return item
class Registry(Generic[T]):
def __init__(self):
self._mapping: Dict[str, Union[Type[T], Exception]] = {}
def _get_registered_type(self) -> Type[T]:
cls = typing_inspect.get_generic_type(self)
tp = typing_inspect.get_args(cls)[0]
return tp
def _check_cls(self, cls: Type[T]) -> None:
if inspect.isabstract(cls):
raise ValueError(
f"cannot register an abstract type in the registry; got {cls}"
)
super_cls = self._get_registered_type()
if not issubclass(cls, super_cls):
raise ValueError(f"must be derived from {super_cls}; got {cls}")
def _register(self, key: str, tp: Union[Type[T], Exception]) -> None:
if key in self._mapping:
raise KeyError(f"key already in use - {key}")
if key.find(".") >= 0:
raise KeyError(f"key cannot contain '.' - {key}")
self._mapping[key] = tp
def register(self, key: str, cls: Type[T]) -> None:
self._check_cls(cls)
self._register(key, cls)
def register_disabled(self, key: str, reason: Exception) -> None:
self._register(key, reason)
def is_enabled(self, key: str) -> bool:
tp = self._mapping[key]
return not isinstance(tp, Exception)
def load(self, entry_point_key: str) -> None:
entry_point: entrypoints.EntryPoint
for entry_point in entrypoints.get_group_all(entry_point_key):
name = entry_point.name
try:
plugin_class = entry_point.load()
except (AssertionError, ModuleNotFoundError, ImportError) as e:
self.register_disabled(name, e)
continue
self.register(name, plugin_class)
@property
def mapping(self):
return self._mapping
def get(self, key: str) -> Type[T]:
if key.find(".") >= 0:
# If the key contains a dot, we treat it as a import path and attempt
# to load it dynamically.
MyClass = import_key(key)
self._check_cls(MyClass)
return MyClass
if key not in self._mapping:
raise KeyError(f"Did not find a registered class for {key}")
tp = self._mapping[key]
if isinstance(tp, ModuleNotFoundError):
raise ConfigurationError(
f"{key} is disabled; try running: pip install '{__package_name__}[{key}]'"
) from tp
elif isinstance(tp, Exception):
raise ConfigurationError(
f"{key} is disabled due to an error in initialization"
) from tp
else:
# If it's not an exception, then it's a registered type.
return tp
def summary(self, verbose=True, col_width=15, verbose_col_width=20):
lines = []
for key in sorted(self._mapping.keys()):
line = f"{key}"
if not self.is_enabled(key):
# Plugin is disabled.
line += " " * (col_width - len(key))
details = "(disabled)"
if verbose:
details += " " * (verbose_col_width - len(details))
details += repr(self._mapping[key])
line += details
elif verbose:
# Plugin is enabled.
line += " " * (col_width - len(key))
line += self.get(key).__name__
lines.append(line)
return "\n".join(lines)
|
[
"entrypoints.get_group_all",
"importlib.import_module",
"typing_inspect.get_generic_type",
"inspect.isabstract",
"datahub.configuration.common.ConfigurationError",
"typing_inspect.get_args",
"typing.TypeVar"
] |
[((237, 249), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (244, 249), False, 'from typing import Any, Dict, Generic, Type, TypeVar, Union\n'), ((405, 441), 'importlib.import_module', 'importlib.import_module', (['module_name'], {}), '(module_name)\n', (428, 441), False, 'import importlib\n'), ((651, 688), 'typing_inspect.get_generic_type', 'typing_inspect.get_generic_type', (['self'], {}), '(self)\n', (682, 688), False, 'import typing_inspect\n'), ((812, 835), 'inspect.isabstract', 'inspect.isabstract', (['cls'], {}), '(cls)\n', (830, 835), False, 'import inspect\n'), ((1889, 1931), 'entrypoints.get_group_all', 'entrypoints.get_group_all', (['entry_point_key'], {}), '(entry_point_key)\n', (1914, 1931), False, 'import entrypoints\n'), ((702, 730), 'typing_inspect.get_args', 'typing_inspect.get_args', (['cls'], {}), '(cls)\n', (725, 730), False, 'import typing_inspect\n'), ((2803, 2902), 'datahub.configuration.common.ConfigurationError', 'ConfigurationError', (['f"""{key} is disabled; try running: pip install \'{__package_name__}[{key}]\'"""'], {}), '(\n f"{key} is disabled; try running: pip install \'{__package_name__}[{key}]\'")\n', (2821, 2902), False, 'from datahub.configuration.common import ConfigurationError\n'), ((2994, 3068), 'datahub.configuration.common.ConfigurationError', 'ConfigurationError', (['f"""{key} is disabled due to an error in initialization"""'], {}), "(f'{key} is disabled due to an error in initialization')\n", (3012, 3068), False, 'from datahub.configuration.common import ConfigurationError\n')]
|
#! /usr/bin/python3
import requests
from base64 import b64encode
import json
with open('test.png', 'rb') as in_f:
encode_png = b64encode(in_f.read())
with open('test.jpg', 'rb') as in_f:
encode_jpg = b64encode(in_f.read())
url = 'https://api.isitanime.website/isitanime'
r = requests.post(url, data=encode_png)
print(r.text)
r = requests.post(url, data=encode_jpg)
print(r.text)
r = requests.post(url, params={'url': 'http://safebooru.org/includes/header.png'})
print(r.text)
j = json.loads(r.text)
r = requests.post(url, params={'classify': 'anime', 'key': j['key']})
print(r.text)
|
[
"requests.post",
"json.loads"
] |
[((287, 322), 'requests.post', 'requests.post', (['url'], {'data': 'encode_png'}), '(url, data=encode_png)\n', (300, 322), False, 'import requests\n'), ((342, 377), 'requests.post', 'requests.post', (['url'], {'data': 'encode_jpg'}), '(url, data=encode_jpg)\n', (355, 377), False, 'import requests\n'), ((397, 475), 'requests.post', 'requests.post', (['url'], {'params': "{'url': 'http://safebooru.org/includes/header.png'}"}), "(url, params={'url': 'http://safebooru.org/includes/header.png'})\n", (410, 475), False, 'import requests\n'), ((495, 513), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (505, 513), False, 'import json\n'), ((518, 583), 'requests.post', 'requests.post', (['url'], {'params': "{'classify': 'anime', 'key': j['key']}"}), "(url, params={'classify': 'anime', 'key': j['key']})\n", (531, 583), False, 'import requests\n')]
|
from pprint import pprint
from Interpretation_parser import InterpretationParser
ip = InterpretationParser(None)
entities = [{'confidence_entity': 0.9979640530540691,
'end': 18,
'entity': 'attribute',
'extractor': 'CRFEntityExtractor',
'start': 2,
'value': 'chemical formula'},
{'confidence_entity': 0.9999196831244241,
'end': 29,
'entity': 'class',
'extractor': 'CRFEntityExtractor',
'start': 22,
'value': 'alkanol'},
{'confidence_entity': 0.9999886772252127,
'end': 48,
'entity': 'attribute',
'extractor': 'CRFEntityExtractor',
'start': 35,
'value': 'heat capacity'},
{'confidence_entity': 0.9999800615698268,
'end': 58,
'entity': 'comparison',
'extractor': 'CRFEntityExtractor',
'start': 49,
'value': 'less than'},
{'confidence_entity': 0.9999210654370583,
'end': 61,
'entity': 'number',
'extractor': 'CRFEntityExtractor',
'start': 59,
'value': '15'}]
rst = ip.fill_in_components('batch_attribute_query', entities)
print('======================================')
pprint(rst)
print('======================================')
entities = [{'confidence_entity': 0.9999900483771906,
'end': 8,
'entity': 'attribute',
'extractor': 'CRFEntityExtractor',
'start': 0,
'value': 'geometry'},
{'confidence_entity': 0.989110556379127,
'end': 12,
'entity': 'species',
'extractor': 'CRFEntityExtractor',
'start': 9,
'value': 'ch4'}]
intent = 'item_attribute_query'
rst = ip.fill_in_components(intent, entities)
print('======================================')
pprint(rst)
print('======================================')
|
[
"pprint.pprint",
"Interpretation_parser.InterpretationParser"
] |
[((88, 114), 'Interpretation_parser.InterpretationParser', 'InterpretationParser', (['None'], {}), '(None)\n', (108, 114), False, 'from Interpretation_parser import InterpretationParser\n'), ((1023, 1034), 'pprint.pprint', 'pprint', (['rst'], {}), '(rst)\n', (1029, 1034), False, 'from pprint import pprint\n'), ((1527, 1538), 'pprint.pprint', 'pprint', (['rst'], {}), '(rst)\n', (1533, 1538), False, 'from pprint import pprint\n')]
|
import tensorflow as tf
from tensorflow.keras import backend as K
from spektral.layers import ops
from spektral.layers.convolutional.message_passing import MessagePassing
class AGNNConv(MessagePassing):
r"""
An Attention-based Graph Neural Network (AGNN) as presented by
[Thekumparampil et al. (2018)](https://arxiv.org/abs/1803.03735).
**Mode**: single, disjoint.
**This layer expects a sparse adjacency matrix.**
This layer computes:
$$
\Z = \P\X
$$
where
$$
\P_{ij} = \frac{
\exp \left( \beta \cos \left( \X_i, \X_j \right) \right)
}{
\sum\limits_{k \in \mathcal{N}(i) \cup \{ i \}}
\exp \left( \beta \cos \left( \X_i, \X_k \right) \right)
}
$$
and \(\beta\) is a trainable parameter.
**Input**
- Node features of shape `(N, F)`;
- Binary adjacency matrix of shape `(N, N)`.
**Output**
- Node features with the same shape of the input.
**Arguments**
- `trainable`: boolean, if True, then beta is a trainable parameter.
Otherwise, beta is fixed to 1;
- `activation`: activation function to use;
"""
def __init__(self, trainable=True, activation=None, **kwargs):
super().__init__(aggregate='sum', activation=activation, **kwargs)
self.trainable = trainable
def build(self, input_shape):
assert len(input_shape) >= 2
if self.trainable:
self.beta = self.add_weight(shape=(1,), initializer='ones', name='beta')
else:
self.beta = K.constant(1.)
self.built = True
def call(self, inputs, **kwargs):
X, A, E = self.get_inputs(inputs)
X_norm = K.l2_normalize(X, axis=-1)
output = self.propagate(X, A, E, X_norm=X_norm)
output = self.activation(output)
return output
def message(self, X, X_norm=None):
X_j = self.get_j(X)
X_norm_i = self.get_i(X_norm)
X_norm_j = self.get_j(X_norm)
alpha = self.beta * tf.reduce_sum(X_norm_i * X_norm_j, axis=-1)
alpha = ops.unsorted_segment_softmax(alpha, self.index_i, self.N)
alpha = alpha[:, None]
return alpha * X_j
def get_config(self):
config = {
'trainable': self.trainable,
}
base_config = super().get_config()
base_config.pop('aggregate') # Remove it because it's defined by constructor
return {**base_config, **config}
|
[
"tensorflow.keras.backend.constant",
"tensorflow.keras.backend.l2_normalize",
"spektral.layers.ops.unsorted_segment_softmax",
"tensorflow.reduce_sum"
] |
[((1708, 1734), 'tensorflow.keras.backend.l2_normalize', 'K.l2_normalize', (['X'], {'axis': '(-1)'}), '(X, axis=-1)\n', (1722, 1734), True, 'from tensorflow.keras import backend as K\n'), ((2087, 2144), 'spektral.layers.ops.unsorted_segment_softmax', 'ops.unsorted_segment_softmax', (['alpha', 'self.index_i', 'self.N'], {}), '(alpha, self.index_i, self.N)\n', (2115, 2144), False, 'from spektral.layers import ops\n'), ((1569, 1584), 'tensorflow.keras.backend.constant', 'K.constant', (['(1.0)'], {}), '(1.0)\n', (1579, 1584), True, 'from tensorflow.keras import backend as K\n'), ((2027, 2070), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(X_norm_i * X_norm_j)'], {'axis': '(-1)'}), '(X_norm_i * X_norm_j, axis=-1)\n', (2040, 2070), True, 'import tensorflow as tf\n')]
|
# -*- coding: UTF-8 -*-
#
# copyright: 2020-2022, <NAME>
# author: <NAME> <http://github.com/fscm>
# license: SPDX-License-Identifier: MIT
"""Tests for the Leu currency representation(s)."""
from decimal import Context
from pytest import raises
from multicurrency import Currency
from multicurrency import (
CurrencyMismatchException,
CurrencyTypeException)
CONTEXT = Context(prec=28, rounding='ROUND_HALF_EVEN').copy()
"""Tests for the Moldovan Leu representation."""
from multicurrency import MoldovanLeu
class TestMoldovanLeu:
"""MoldovanLeu currency tests."""
def test_moldovan_leu(self):
"""test_moldovan_leu."""
amount = CONTEXT.create_decimal(1) / CONTEXT.create_decimal(7)
moldovan_leu = MoldovanLeu(amount=amount)
decimal = CONTEXT.create_decimal(amount)
assert moldovan_leu.amount == decimal
assert moldovan_leu.numeric_code == '498'
assert moldovan_leu.alpha_code == 'MDL'
assert moldovan_leu.decimal_places == 2
assert moldovan_leu.decimal_sign == ','
assert moldovan_leu.grouping_places == 3
assert moldovan_leu.grouping_sign == '.'
assert not moldovan_leu.international
assert moldovan_leu.symbol == 'L'
assert not moldovan_leu.symbol_ahead
assert moldovan_leu.symbol_separator == '\u00A0'
assert moldovan_leu.localized_symbol == 'L'
assert moldovan_leu.convertion == ''
assert moldovan_leu.__hash__() == hash(
(moldovan_leu.__class__, decimal, 'MDL', '498'))
assert moldovan_leu.__repr__() == (
'MoldovanLeu(amount: 0.1428571428571428571428571429, '
'alpha_code: "MDL", '
'symbol: "L", '
'symbol_ahead: False, '
'symbol_separator: "\u00A0", '
'localized_symbol: "L", '
'numeric_code: "498", '
'decimal_places: "2", '
'decimal_sign: ",", '
'grouping_places: "3", '
'grouping_sign: ".", '
'convertion: "", '
'international: False)')
assert moldovan_leu.__str__() == '0,14 L'
def test_moldovan_leu_negative(self):
"""test_moldovan_leu_negative."""
amount = -100
moldovan_leu = MoldovanLeu(amount=amount)
decimal = CONTEXT.create_decimal(amount)
assert moldovan_leu.numeric_code == '498'
assert moldovan_leu.alpha_code == 'MDL'
assert moldovan_leu.decimal_places == 2
assert moldovan_leu.decimal_sign == ','
assert moldovan_leu.grouping_places == 3
assert moldovan_leu.grouping_sign == '.'
assert not moldovan_leu.international
assert moldovan_leu.symbol == 'L'
assert not moldovan_leu.symbol_ahead
assert moldovan_leu.symbol_separator == '\u00A0'
assert moldovan_leu.localized_symbol == 'L'
assert moldovan_leu.convertion == ''
assert moldovan_leu.__hash__() == hash(
(moldovan_leu.__class__, decimal, 'MDL', '498'))
assert moldovan_leu.__repr__() == (
'MoldovanLeu(amount: -100, '
'alpha_code: "MDL", '
'symbol: "L", '
'symbol_ahead: False, '
'symbol_separator: "\u00A0", '
'localized_symbol: "L", '
'numeric_code: "498", '
'decimal_places: "2", '
'decimal_sign: ",", '
'grouping_places: "3", '
'grouping_sign: ".", '
'convertion: "", '
'international: False)')
assert moldovan_leu.__str__() == '-100,00 L'
def test_moldovan_leu_custom(self):
"""test_moldovan_leu_custom."""
amount = 1000
moldovan_leu = MoldovanLeu(
amount=amount,
decimal_places=5,
decimal_sign='.',
grouping_places=2,
grouping_sign=',',
international=True,
symbol_ahead=False,
symbol_separator='_')
decimal = CONTEXT.create_decimal(amount)
assert moldovan_leu.amount == decimal
assert moldovan_leu.numeric_code == '498'
assert moldovan_leu.alpha_code == 'MDL'
assert moldovan_leu.decimal_places == 5
assert moldovan_leu.decimal_sign == '.'
assert moldovan_leu.grouping_places == 2
assert moldovan_leu.grouping_sign == ','
assert moldovan_leu.international
assert moldovan_leu.symbol == 'L'
assert not moldovan_leu.symbol_ahead
assert moldovan_leu.symbol_separator == '_'
assert moldovan_leu.localized_symbol == 'L'
assert moldovan_leu.convertion == ''
assert moldovan_leu.__hash__() == hash(
(moldovan_leu.__class__, decimal, 'MDL', '498'))
assert moldovan_leu.__repr__() == (
'MoldovanLeu(amount: 1000, '
'alpha_code: "MDL", '
'symbol: "L", '
'symbol_ahead: False, '
'symbol_separator: "_", '
'localized_symbol: "L", '
'numeric_code: "498", '
'decimal_places: "5", '
'decimal_sign: ".", '
'grouping_places: "2", '
'grouping_sign: ",", '
'convertion: "", '
'international: True)')
assert moldovan_leu.__str__() == 'MDL 10,00.00000'
def test_moldovan_leu_changed(self):
"""test_cmoldovan_leu_changed."""
moldovan_leu = MoldovanLeu(amount=1000)
with raises(
AttributeError,
match='can\'t set attribute'):
moldovan_leu.amount = 999
with raises(
AttributeError,
match='can\'t set attribute'):
moldovan_leu.alpha_code = 'EUR'
with raises(
AttributeError,
match='can\'t set attribute'):
moldovan_leu.convertion = '0123456789,.'
with raises(
AttributeError,
match='can\'t set attribute'):
moldovan_leu.symbol = '€'
with raises(
AttributeError,
match='can\'t set attribute'):
moldovan_leu.symbol_ahead = False
with raises(
AttributeError,
match='can\'t set attribute'):
moldovan_leu.symbol_separator = '_'
with raises(
AttributeError,
match='can\'t set attribute'):
moldovan_leu.localized_symbol = '€'
with raises(
AttributeError,
match='can\'t set attribute'):
moldovan_leu.numeric_code = '978'
with raises(
AttributeError,
match='can\'t set attribute'):
moldovan_leu.decimal_places = 3
with raises(
AttributeError,
match='can\'t set attribute'):
moldovan_leu.decimal_sign = ','
with raises(
AttributeError,
match='can\'t set attribute'):
moldovan_leu.grouping_places = 4
with raises(
AttributeError,
match='can\'t set attribute'):
moldovan_leu.grouping_sign = '.'
with raises(
AttributeError,
match='can\'t set attribute'):
moldovan_leu.international = True
def test_moldovan_leu_math_add(self):
"""test_moldovan_leu_math_add."""
moldovan_leu_one = MoldovanLeu(amount=1)
moldovan_leu_two = MoldovanLeu(amount=2)
moldovan_leu_three = MoldovanLeu(amount=3)
currency = Currency(amount=1, alpha_code='OTHER')
with raises(
CurrencyMismatchException,
match='unsupported operation between currency MDL and OTHER.'):
_ = moldovan_leu_one + currency
with raises(
CurrencyTypeException,
match=(
'unsupported operation between <class \'multicurrency.'
'leu.MoldovanLeu\'> '
'and <class \'str\'>.')):
_ = moldovan_leu_one.__add__('1.00')
assert (
moldovan_leu_one +
moldovan_leu_two) == moldovan_leu_three
def test_moldovan_leu_slots(self):
"""test_moldovan_leu_slots."""
moldovan_leu = MoldovanLeu(amount=1000)
with raises(
AttributeError,
match=(
'\'MoldovanLeu\' '
'object has no attribute \'new_variable\'')):
moldovan_leu.new_variable = 'fail' # pylint: disable=assigning-non-slot
"""Tests for the Leu representation."""
from multicurrency import Leu
class TestLeu:
"""Leu currency tests."""
def test_leu(self):
"""test_leu."""
amount = CONTEXT.create_decimal(1) / CONTEXT.create_decimal(7)
leu = Leu(amount=amount)
decimal = CONTEXT.create_decimal(amount)
assert leu.amount == decimal
assert leu.numeric_code == '946'
assert leu.alpha_code == 'RON'
assert leu.decimal_places == 2
assert leu.decimal_sign == ','
assert leu.grouping_places == 3
assert leu.grouping_sign == '.'
assert not leu.international
assert leu.symbol == 'L'
assert not leu.symbol_ahead
assert leu.symbol_separator == '\u00A0'
assert leu.localized_symbol == 'L'
assert leu.convertion == ''
assert leu.__hash__() == hash(
(leu.__class__, decimal, 'RON', '946'))
assert leu.__repr__() == (
'Leu(amount: 0.1428571428571428571428571429, '
'alpha_code: "RON", '
'symbol: "L", '
'symbol_ahead: False, '
'symbol_separator: "\u00A0", '
'localized_symbol: "L", '
'numeric_code: "946", '
'decimal_places: "2", '
'decimal_sign: ",", '
'grouping_places: "3", '
'grouping_sign: ".", '
'convertion: "", '
'international: False)')
assert leu.__str__() == '0,14 L'
def test_leu_negative(self):
"""test_leu_negative."""
amount = -100
leu = Leu(amount=amount)
decimal = CONTEXT.create_decimal(amount)
assert leu.numeric_code == '946'
assert leu.alpha_code == 'RON'
assert leu.decimal_places == 2
assert leu.decimal_sign == ','
assert leu.grouping_places == 3
assert leu.grouping_sign == '.'
assert not leu.international
assert leu.symbol == 'L'
assert not leu.symbol_ahead
assert leu.symbol_separator == '\u00A0'
assert leu.localized_symbol == 'L'
assert leu.convertion == ''
assert leu.__hash__() == hash(
(leu.__class__, decimal, 'RON', '946'))
assert leu.__repr__() == (
'Leu(amount: -100, '
'alpha_code: "RON", '
'symbol: "L", '
'symbol_ahead: False, '
'symbol_separator: "\u00A0", '
'localized_symbol: "L", '
'numeric_code: "946", '
'decimal_places: "2", '
'decimal_sign: ",", '
'grouping_places: "3", '
'grouping_sign: ".", '
'convertion: "", '
'international: False)')
assert leu.__str__() == '-100,00 L'
def test_leu_custom(self):
"""test_leu_custom."""
amount = 1000
leu = Leu(
amount=amount,
decimal_places=5,
decimal_sign='.',
grouping_places=2,
grouping_sign=',',
international=True,
symbol_ahead=False,
symbol_separator='_')
decimal = CONTEXT.create_decimal(amount)
assert leu.amount == decimal
assert leu.numeric_code == '946'
assert leu.alpha_code == 'RON'
assert leu.decimal_places == 5
assert leu.decimal_sign == '.'
assert leu.grouping_places == 2
assert leu.grouping_sign == ','
assert leu.international
assert leu.symbol == 'L'
assert not leu.symbol_ahead
assert leu.symbol_separator == '_'
assert leu.localized_symbol == 'L'
assert leu.convertion == ''
assert leu.__hash__() == hash(
(leu.__class__, decimal, 'RON', '946'))
assert leu.__repr__() == (
'Leu(amount: 1000, '
'alpha_code: "RON", '
'symbol: "L", '
'symbol_ahead: False, '
'symbol_separator: "_", '
'localized_symbol: "L", '
'numeric_code: "946", '
'decimal_places: "5", '
'decimal_sign: ".", '
'grouping_places: "2", '
'grouping_sign: ",", '
'convertion: "", '
'international: True)')
assert leu.__str__() == 'RON 10,00.00000'
def test_leu_changed(self):
"""test_cleu_changed."""
leu = Leu(amount=1000)
with raises(
AttributeError,
match='can\'t set attribute'):
leu.amount = 999
with raises(
AttributeError,
match='can\'t set attribute'):
leu.alpha_code = 'EUR'
with raises(
AttributeError,
match='can\'t set attribute'):
leu.convertion = '0123456789,.'
with raises(
AttributeError,
match='can\'t set attribute'):
leu.symbol = '€'
with raises(
AttributeError,
match='can\'t set attribute'):
leu.symbol_ahead = False
with raises(
AttributeError,
match='can\'t set attribute'):
leu.symbol_separator = '_'
with raises(
AttributeError,
match='can\'t set attribute'):
leu.localized_symbol = '€'
with raises(
AttributeError,
match='can\'t set attribute'):
leu.numeric_code = '978'
with raises(
AttributeError,
match='can\'t set attribute'):
leu.decimal_places = 3
with raises(
AttributeError,
match='can\'t set attribute'):
leu.decimal_sign = ','
with raises(
AttributeError,
match='can\'t set attribute'):
leu.grouping_places = 4
with raises(
AttributeError,
match='can\'t set attribute'):
leu.grouping_sign = '.'
with raises(
AttributeError,
match='can\'t set attribute'):
leu.international = True
def test_leu_math_add(self):
"""test_leu_math_add."""
leu_one = Leu(amount=1)
leu_two = Leu(amount=2)
leu_three = Leu(amount=3)
currency = Currency(amount=1, alpha_code='OTHER')
with raises(
CurrencyMismatchException,
match='unsupported operation between currency RON and OTHER.'):
_ = leu_one + currency
with raises(
CurrencyTypeException,
match=(
'unsupported operation between <class \'multicurrency.'
'leu.Leu\'> '
'and <class \'str\'>.')):
_ = leu_one.__add__('1.00')
assert (
leu_one +
leu_two) == leu_three
def test_leu_slots(self):
"""test_leu_slots."""
leu = Leu(amount=1000)
with raises(
AttributeError,
match=(
'\'Leu\' '
'object has no attribute \'new_variable\'')):
leu.new_variable = 'fail' # pylint: disable=assigning-non-slot
|
[
"decimal.Context",
"multicurrency.Currency",
"pytest.raises",
"multicurrency.MoldovanLeu",
"multicurrency.Leu"
] |
[((380, 424), 'decimal.Context', 'Context', ([], {'prec': '(28)', 'rounding': '"""ROUND_HALF_EVEN"""'}), "(prec=28, rounding='ROUND_HALF_EVEN')\n", (387, 424), False, 'from decimal import Context\n'), ((746, 772), 'multicurrency.MoldovanLeu', 'MoldovanLeu', ([], {'amount': 'amount'}), '(amount=amount)\n', (757, 772), False, 'from multicurrency import MoldovanLeu\n'), ((2272, 2298), 'multicurrency.MoldovanLeu', 'MoldovanLeu', ([], {'amount': 'amount'}), '(amount=amount)\n', (2283, 2298), False, 'from multicurrency import MoldovanLeu\n'), ((3725, 3896), 'multicurrency.MoldovanLeu', 'MoldovanLeu', ([], {'amount': 'amount', 'decimal_places': '(5)', 'decimal_sign': '"""."""', 'grouping_places': '(2)', 'grouping_sign': '""","""', 'international': '(True)', 'symbol_ahead': '(False)', 'symbol_separator': '"""_"""'}), "(amount=amount, decimal_places=5, decimal_sign='.',\n grouping_places=2, grouping_sign=',', international=True, symbol_ahead=\n False, symbol_separator='_')\n", (3736, 3896), False, 'from multicurrency import MoldovanLeu\n'), ((5429, 5453), 'multicurrency.MoldovanLeu', 'MoldovanLeu', ([], {'amount': '(1000)'}), '(amount=1000)\n', (5440, 5453), False, 'from multicurrency import MoldovanLeu\n'), ((7451, 7472), 'multicurrency.MoldovanLeu', 'MoldovanLeu', ([], {'amount': '(1)'}), '(amount=1)\n', (7462, 7472), False, 'from multicurrency import MoldovanLeu\n'), ((7500, 7521), 'multicurrency.MoldovanLeu', 'MoldovanLeu', ([], {'amount': '(2)'}), '(amount=2)\n', (7511, 7521), False, 'from multicurrency import MoldovanLeu\n'), ((7551, 7572), 'multicurrency.MoldovanLeu', 'MoldovanLeu', ([], {'amount': '(3)'}), '(amount=3)\n', (7562, 7572), False, 'from multicurrency import MoldovanLeu\n'), ((7592, 7630), 'multicurrency.Currency', 'Currency', ([], {'amount': '(1)', 'alpha_code': '"""OTHER"""'}), "(amount=1, alpha_code='OTHER')\n", (7600, 7630), False, 'from multicurrency import Currency\n'), ((8318, 8342), 'multicurrency.MoldovanLeu', 'MoldovanLeu', ([], {'amount': '(1000)'}), '(amount=1000)\n', (8329, 8342), False, 'from multicurrency import MoldovanLeu\n'), ((8864, 8882), 'multicurrency.Leu', 'Leu', ([], {'amount': 'amount'}), '(amount=amount)\n', (8867, 8882), False, 'from multicurrency import Leu\n'), ((10194, 10212), 'multicurrency.Leu', 'Leu', ([], {'amount': 'amount'}), '(amount=amount)\n', (10197, 10212), False, 'from multicurrency import Leu\n'), ((11460, 11622), 'multicurrency.Leu', 'Leu', ([], {'amount': 'amount', 'decimal_places': '(5)', 'decimal_sign': '"""."""', 'grouping_places': '(2)', 'grouping_sign': '""","""', 'international': '(True)', 'symbol_ahead': '(False)', 'symbol_separator': '"""_"""'}), "(amount=amount, decimal_places=5, decimal_sign='.', grouping_places=2,\n grouping_sign=',', international=True, symbol_ahead=False,\n symbol_separator='_')\n", (11463, 11622), False, 'from multicurrency import Leu\n'), ((12968, 12984), 'multicurrency.Leu', 'Leu', ([], {'amount': '(1000)'}), '(amount=1000)\n', (12971, 12984), False, 'from multicurrency import Leu\n'), ((14838, 14851), 'multicurrency.Leu', 'Leu', ([], {'amount': '(1)'}), '(amount=1)\n', (14841, 14851), False, 'from multicurrency import Leu\n'), ((14870, 14883), 'multicurrency.Leu', 'Leu', ([], {'amount': '(2)'}), '(amount=2)\n', (14873, 14883), False, 'from multicurrency import Leu\n'), ((14904, 14917), 'multicurrency.Leu', 'Leu', ([], {'amount': '(3)'}), '(amount=3)\n', (14907, 14917), False, 'from multicurrency import Leu\n'), ((14937, 14975), 'multicurrency.Currency', 'Currency', ([], {'amount': '(1)', 'alpha_code': '"""OTHER"""'}), "(amount=1, alpha_code='OTHER')\n", (14945, 14975), False, 'from multicurrency import Currency\n'), ((15583, 15599), 'multicurrency.Leu', 'Leu', ([], {'amount': '(1000)'}), '(amount=1000)\n', (15586, 15599), False, 'from multicurrency import Leu\n'), ((5467, 5518), 'pytest.raises', 'raises', (['AttributeError'], {'match': '"""can\'t set attribute"""'}), '(AttributeError, match="can\'t set attribute")\n', (5473, 5518), False, 'from pytest import raises\n'), ((5605, 5656), 'pytest.raises', 'raises', (['AttributeError'], {'match': '"""can\'t set attribute"""'}), '(AttributeError, match="can\'t set attribute")\n', (5611, 5656), False, 'from pytest import raises\n'), ((5749, 5800), 'pytest.raises', 'raises', (['AttributeError'], {'match': '"""can\'t set attribute"""'}), '(AttributeError, match="can\'t set attribute")\n', (5755, 5800), False, 'from pytest import raises\n'), ((5902, 5953), 'pytest.raises', 'raises', (['AttributeError'], {'match': '"""can\'t set attribute"""'}), '(AttributeError, match="can\'t set attribute")\n', (5908, 5953), False, 'from pytest import raises\n'), ((6040, 6091), 'pytest.raises', 'raises', (['AttributeError'], {'match': '"""can\'t set attribute"""'}), '(AttributeError, match="can\'t set attribute")\n', (6046, 6091), False, 'from pytest import raises\n'), ((6186, 6237), 'pytest.raises', 'raises', (['AttributeError'], {'match': '"""can\'t set attribute"""'}), '(AttributeError, match="can\'t set attribute")\n', (6192, 6237), False, 'from pytest import raises\n'), ((6334, 6385), 'pytest.raises', 'raises', (['AttributeError'], {'match': '"""can\'t set attribute"""'}), '(AttributeError, match="can\'t set attribute")\n', (6340, 6385), False, 'from pytest import raises\n'), ((6482, 6533), 'pytest.raises', 'raises', (['AttributeError'], {'match': '"""can\'t set attribute"""'}), '(AttributeError, match="can\'t set attribute")\n', (6488, 6533), False, 'from pytest import raises\n'), ((6628, 6679), 'pytest.raises', 'raises', (['AttributeError'], {'match': '"""can\'t set attribute"""'}), '(AttributeError, match="can\'t set attribute")\n', (6634, 6679), False, 'from pytest import raises\n'), ((6772, 6823), 'pytest.raises', 'raises', (['AttributeError'], {'match': '"""can\'t set attribute"""'}), '(AttributeError, match="can\'t set attribute")\n', (6778, 6823), False, 'from pytest import raises\n'), ((6916, 6967), 'pytest.raises', 'raises', (['AttributeError'], {'match': '"""can\'t set attribute"""'}), '(AttributeError, match="can\'t set attribute")\n', (6922, 6967), False, 'from pytest import raises\n'), ((7061, 7112), 'pytest.raises', 'raises', (['AttributeError'], {'match': '"""can\'t set attribute"""'}), '(AttributeError, match="can\'t set attribute")\n', (7067, 7112), False, 'from pytest import raises\n'), ((7206, 7257), 'pytest.raises', 'raises', (['AttributeError'], {'match': '"""can\'t set attribute"""'}), '(AttributeError, match="can\'t set attribute")\n', (7212, 7257), False, 'from pytest import raises\n'), ((7644, 7745), 'pytest.raises', 'raises', (['CurrencyMismatchException'], {'match': '"""unsupported operation between currency MDL and OTHER."""'}), "(CurrencyMismatchException, match=\n 'unsupported operation between currency MDL and OTHER.')\n", (7650, 7745), False, 'from pytest import raises\n'), ((7832, 7969), 'pytest.raises', 'raises', (['CurrencyTypeException'], {'match': '"""unsupported operation between <class \'multicurrency.leu.MoldovanLeu\'> and <class \'str\'>."""'}), '(CurrencyTypeException, match=\n "unsupported operation between <class \'multicurrency.leu.MoldovanLeu\'> and <class \'str\'>."\n )\n', (7838, 7969), False, 'from pytest import raises\n'), ((8356, 8445), 'pytest.raises', 'raises', (['AttributeError'], {'match': '"""\'MoldovanLeu\' object has no attribute \'new_variable\'"""'}), '(AttributeError, match=\n "\'MoldovanLeu\' object has no attribute \'new_variable\'")\n', (8362, 8445), False, 'from pytest import raises\n'), ((12998, 13049), 'pytest.raises', 'raises', (['AttributeError'], {'match': '"""can\'t set attribute"""'}), '(AttributeError, match="can\'t set attribute")\n', (13004, 13049), False, 'from pytest import raises\n'), ((13127, 13178), 'pytest.raises', 'raises', (['AttributeError'], {'match': '"""can\'t set attribute"""'}), '(AttributeError, match="can\'t set attribute")\n', (13133, 13178), False, 'from pytest import raises\n'), ((13262, 13313), 'pytest.raises', 'raises', (['AttributeError'], {'match': '"""can\'t set attribute"""'}), '(AttributeError, match="can\'t set attribute")\n', (13268, 13313), False, 'from pytest import raises\n'), ((13406, 13457), 'pytest.raises', 'raises', (['AttributeError'], {'match': '"""can\'t set attribute"""'}), '(AttributeError, match="can\'t set attribute")\n', (13412, 13457), False, 'from pytest import raises\n'), ((13535, 13586), 'pytest.raises', 'raises', (['AttributeError'], {'match': '"""can\'t set attribute"""'}), '(AttributeError, match="can\'t set attribute")\n', (13541, 13586), False, 'from pytest import raises\n'), ((13672, 13723), 'pytest.raises', 'raises', (['AttributeError'], {'match': '"""can\'t set attribute"""'}), '(AttributeError, match="can\'t set attribute")\n', (13678, 13723), False, 'from pytest import raises\n'), ((13811, 13862), 'pytest.raises', 'raises', (['AttributeError'], {'match': '"""can\'t set attribute"""'}), '(AttributeError, match="can\'t set attribute")\n', (13817, 13862), False, 'from pytest import raises\n'), ((13950, 14001), 'pytest.raises', 'raises', (['AttributeError'], {'match': '"""can\'t set attribute"""'}), '(AttributeError, match="can\'t set attribute")\n', (13956, 14001), False, 'from pytest import raises\n'), ((14087, 14138), 'pytest.raises', 'raises', (['AttributeError'], {'match': '"""can\'t set attribute"""'}), '(AttributeError, match="can\'t set attribute")\n', (14093, 14138), False, 'from pytest import raises\n'), ((14222, 14273), 'pytest.raises', 'raises', (['AttributeError'], {'match': '"""can\'t set attribute"""'}), '(AttributeError, match="can\'t set attribute")\n', (14228, 14273), False, 'from pytest import raises\n'), ((14357, 14408), 'pytest.raises', 'raises', (['AttributeError'], {'match': '"""can\'t set attribute"""'}), '(AttributeError, match="can\'t set attribute")\n', (14363, 14408), False, 'from pytest import raises\n'), ((14493, 14544), 'pytest.raises', 'raises', (['AttributeError'], {'match': '"""can\'t set attribute"""'}), '(AttributeError, match="can\'t set attribute")\n', (14499, 14544), False, 'from pytest import raises\n'), ((14629, 14680), 'pytest.raises', 'raises', (['AttributeError'], {'match': '"""can\'t set attribute"""'}), '(AttributeError, match="can\'t set attribute")\n', (14635, 14680), False, 'from pytest import raises\n'), ((14989, 15090), 'pytest.raises', 'raises', (['CurrencyMismatchException'], {'match': '"""unsupported operation between currency RON and OTHER."""'}), "(CurrencyMismatchException, match=\n 'unsupported operation between currency RON and OTHER.')\n", (14995, 15090), False, 'from pytest import raises\n'), ((15168, 15297), 'pytest.raises', 'raises', (['CurrencyTypeException'], {'match': '"""unsupported operation between <class \'multicurrency.leu.Leu\'> and <class \'str\'>."""'}), '(CurrencyTypeException, match=\n "unsupported operation between <class \'multicurrency.leu.Leu\'> and <class \'str\'>."\n )\n', (15174, 15297), False, 'from pytest import raises\n'), ((15613, 15689), 'pytest.raises', 'raises', (['AttributeError'], {'match': '"""\'Leu\' object has no attribute \'new_variable\'"""'}), '(AttributeError, match="\'Leu\' object has no attribute \'new_variable\'")\n', (15619, 15689), False, 'from pytest import raises\n')]
|
#!/usr/bin/env python
# Copyright 2019-2021 <NAME>
#
# This file is part of WarpX.
#
# License: BSD-3-Clause-LBNL
# This script tests the particle scraping for the embedded boundary in RZ.
# Particles are initialized between r=0.15 and r=0.2
# having a negative radial velocity.
# A cylindrical embedded surface is placed at r=0.1.
# Upon reaching the surface, particles should be removed.
# At the end of the simulation, i.e., at time step 37,
# there should be 512 particles left.
# In addition, the test checks the boundary scraping diagnostic
# by making sure that all removed particles are properly recorded.
# Possible errors: 0
# tolerance: 0
# Possible running time: < 1 s
import os
import sys
import numpy as np
from openpmd_viewer import OpenPMDTimeSeries
import yt
sys.path.insert(1, '../../../../warpx/Regression/Checksum/')
import checksumAPI
tolerance = 0
fn = sys.argv[1]
ds = yt.load( fn )
ad = ds.all_data()
x = ad['electron', 'particle_position_x'].v
error = len(x)-512
print('error = ', error)
print('tolerance = ', tolerance)
assert(error==tolerance)
# Check that all the removed particles are properly recorded
# by making sure that, at each iteration, the sum of the number of
# remaining particles and scraped particles is equal to the
# original number of particles
ts_full = OpenPMDTimeSeries('./diags/diag2/')
ts_scraping = OpenPMDTimeSeries('./diags/diag3/')
def n_remaining_particles( iteration ):
w, = ts_full.get_particle(['w'], iteration=iteration)
return len(w)
def n_scraped_particles( iteration ):
timestamp = ts_scraping.get_particle( ['timestamp'] )
return (timestamp <= iteration).sum()
n_remaining = np.array([ n_remaining_particles(iteration) for iteration in ts_full.iterations ])
n_scraped = np.array([ n_scraped_particles(iteration) for iteration in ts_full.iterations ])
n_total = n_remaining[0]
assert np.all( n_scraped+n_remaining == n_total)
# Checksum test
test_name = os.path.split(os.getcwd())[1]
checksumAPI.evaluate_checksum(test_name, fn, do_particles=False)
|
[
"openpmd_viewer.OpenPMDTimeSeries",
"os.getcwd",
"sys.path.insert",
"checksumAPI.evaluate_checksum",
"yt.load",
"numpy.all"
] |
[((782, 842), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../../../../warpx/Regression/Checksum/"""'], {}), "(1, '../../../../warpx/Regression/Checksum/')\n", (797, 842), False, 'import sys\n'), ((900, 911), 'yt.load', 'yt.load', (['fn'], {}), '(fn)\n', (907, 911), False, 'import yt\n'), ((1310, 1345), 'openpmd_viewer.OpenPMDTimeSeries', 'OpenPMDTimeSeries', (['"""./diags/diag2/"""'], {}), "('./diags/diag2/')\n", (1327, 1345), False, 'from openpmd_viewer import OpenPMDTimeSeries\n'), ((1360, 1395), 'openpmd_viewer.OpenPMDTimeSeries', 'OpenPMDTimeSeries', (['"""./diags/diag3/"""'], {}), "('./diags/diag3/')\n", (1377, 1395), False, 'from openpmd_viewer import OpenPMDTimeSeries\n'), ((1873, 1915), 'numpy.all', 'np.all', (['(n_scraped + n_remaining == n_total)'], {}), '(n_scraped + n_remaining == n_total)\n', (1879, 1915), True, 'import numpy as np\n'), ((1974, 2038), 'checksumAPI.evaluate_checksum', 'checksumAPI.evaluate_checksum', (['test_name', 'fn'], {'do_particles': '(False)'}), '(test_name, fn, do_particles=False)\n', (2003, 2038), False, 'import checksumAPI\n'), ((1958, 1969), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1967, 1969), False, 'import os\n')]
|
import argparse
import json
import pickle
def prepare_annotations(annotations_per_image, images_info, net_input_size):
"""Prepare labels for training. For each annotated person calculates center
to perform crop around it during the training. Also converts data to the internal format.
:param annotations_per_image: all annotations for specified image id
:param images_info: auxiliary information about all images
:param net_input_size: network input size during training
:return: list of prepared annotations
"""
print('prepare_annotations\n')
prepared_annotations = []
for _, annotations in annotations_per_image.items():
previous_centers = []
for annotation in annotations[0]:
if (annotation['num_keypoints'] < 5
or annotation['area'] < 32 * 32):
print('[invalid] {} {}\n'.format(annotation['num_keypoints'], annotation['area']))
continue
person_center = [annotation['bbox'][0] + annotation['bbox'][2] / 2,
annotation['bbox'][1] + annotation['bbox'][3] / 2]
is_close = False
for previous_center in previous_centers:
distance_to_previous = ((person_center[0] - previous_center[0]) ** 2
+ (person_center[1] - previous_center[1]) ** 2) ** 0.5
if distance_to_previous < previous_center[2] * 0.3:
is_close = True
break
if is_close:
continue
prepared_annotation = {
'img_paths': images_info[annotation['image_id']]['file_name'],
'img_width': images_info[annotation['image_id']]['width'],
'img_height': images_info[annotation['image_id']]['height'],
'objpos': person_center,
'image_id': annotation['image_id'],
'bbox': annotation['bbox'],
'segment_area': annotation['area'],
'scale_provided': annotation['bbox'][3] / net_input_size,
'num_keypoints': annotation['num_keypoints'],
'segmentations': annotations[1]
}
keypoints = []
for i in range(len(annotation['keypoints']) // 3):
keypoint = [annotation['keypoints'][i * 3], annotation['keypoints'][i * 3 + 1], 2]
if annotation['keypoints'][i * 3 + 2] == 1:
keypoint[2] = 0
elif annotation['keypoints'][i * 3 + 2] == 2:
keypoint[2] = 1
keypoints.append(keypoint)
prepared_annotation['keypoints'] = keypoints
prepared_other_annotations = []
for other_annotation in annotations[0]:
if other_annotation == annotation:
continue
prepared_other_annotation = {
'objpos': [other_annotation['bbox'][0] + other_annotation['bbox'][2] / 2,
other_annotation['bbox'][1] + other_annotation['bbox'][3] / 2],
'bbox': other_annotation['bbox'],
'segment_area': other_annotation['area'],
'scale_provided': other_annotation['bbox'][3] / net_input_size,
'num_keypoints': other_annotation['num_keypoints']
}
keypoints = []
for i in range(len(other_annotation['keypoints']) // 3):
keypoint = [other_annotation['keypoints'][i * 3], other_annotation['keypoints'][i * 3 + 1], 2]
if other_annotation['keypoints'][i * 3 + 2] == 1:
keypoint[2] = 0
elif other_annotation['keypoints'][i * 3 + 2] == 2:
keypoint[2] = 1
keypoints.append(keypoint)
prepared_other_annotation['keypoints'] = keypoints
prepared_other_annotations.append(prepared_other_annotation)
prepared_annotation['processed_other_annotations'] = prepared_other_annotations
prepared_annotations.append(prepared_annotation)
previous_centers.append((person_center[0], person_center[1], annotation['bbox'][2], annotation['bbox'][3]))
return prepared_annotations
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--labels', type=str, required=True, help='path to json with keypoints train labels')
parser.add_argument('--output-name', type=str, default='prepared_train_annotation.pkl',
help='name of output file with prepared keypoints annotation')
parser.add_argument('--net-input-size', type=int, default=368, help='network input size')
args = parser.parse_args()
with open(args.labels, 'r') as f:
data = json.load(f)
annotations_per_image_mapping = {}
for annotation in data['annotations']:
if annotation['num_keypoints'] != 0 and not annotation['iscrowd']:
if annotation['image_id'] not in annotations_per_image_mapping:
annotations_per_image_mapping[annotation['image_id']] = [[], []]
annotations_per_image_mapping[annotation['image_id']][0].append(annotation)
crowd_segmentations_per_image_mapping = {}
for annotation in data['annotations']:
if annotation['iscrowd']:
#print('iscrowd\n')
if annotation['image_id'] not in crowd_segmentations_per_image_mapping:
crowd_segmentations_per_image_mapping[annotation['image_id']] = []
#print('not in crowd_segmentations_per_image_mapping\n')
crowd_segmentations_per_image_mapping[annotation['image_id']].append(annotation['segmentation'])
#print('annotation_segmentation: {}\n'.format(annotation['segmentation']))
for image_id, crowd_segmentations in crowd_segmentations_per_image_mapping.items():
if image_id in annotations_per_image_mapping:
annotations_per_image_mapping[image_id][1] = crowd_segmentations
#print('crowd_segmentations: {}\n'.format(crowd_segmentations))
#input("Press Enter to continue...")
print('set_images_info\n')
images_info = {}
for image_info in data['images']:
#print('add imageinfo: {} {}\n'.format(image_info['id'], image_info['file_name']))
images_info[image_info['id']] = image_info
prepared_annotations = prepare_annotations(annotations_per_image_mapping, images_info, args.net_input_size)
#print('prepared_annotations {}\n'.format(prepared_annotations))
with open(args.output_name, 'wb') as f:
pickle.dump(prepared_annotations, f)
|
[
"pickle.dump",
"json.load",
"argparse.ArgumentParser"
] |
[((4389, 4414), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4412, 4414), False, 'import argparse\n'), ((4882, 4894), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4891, 4894), False, 'import json\n'), ((6699, 6735), 'pickle.dump', 'pickle.dump', (['prepared_annotations', 'f'], {}), '(prepared_annotations, f)\n', (6710, 6735), False, 'import pickle\n')]
|
import ldap
from flask import current_app, jsonify, request
from flask_cors import cross_origin
from alerta.auth.utils import create_token, get_customers
from alerta.exceptions import ApiError
from alerta.models.permission import Permission
from alerta.models.user import User
from alerta.utils.audit import auth_audit_trail
from . import auth
@auth.route('/auth/login', methods=['OPTIONS', 'POST'])
@cross_origin(supports_credentials=True)
def login():
# Retrieve required fields from client request
try:
email = request.json.get('username', None) or request.json['email']
password = request.json['password']
except KeyError:
raise ApiError("must supply 'username' and 'password'", 401)
username = email.split('@')[0]
domain = email.split('@')[1]
# Validate LDAP domain
if domain not in current_app.config['LDAP_DOMAINS']:
raise ApiError('unauthorized domain', 403)
userdn = current_app.config['LDAP_DOMAINS'][domain] % username
# Attempt LDAP AUTH
try:
trace_level = 2 if current_app.debug else 0
ldap_connection = ldap.initialize(current_app.config['LDAP_URL'], trace_level=trace_level)
ldap_connection.simple_bind_s(userdn, password)
except ldap.INVALID_CREDENTIALS:
raise ApiError('invalid username or password', 401)
except Exception as e:
raise ApiError(str(e), 500)
# Create user if not yet there
user = User.find_by_email(email=email)
if not user:
user = User(username, email, '', ['user'], 'LDAP user', email_verified=True)
user.create()
# Check user is active
if user.status != 'active':
raise ApiError('user not active', 403)
# Assign customers & update last login time
groups = [user.domain]
try:
groups_filters = current_app.config.get('LDAP_DOMAINS_GROUP', {})
base_dns = current_app.config.get('LDAP_DOMAINS_BASEDN', {})
if domain in groups_filters and domain in base_dns:
resultID = ldap_connection.search(
base_dns[domain],
ldap.SCOPE_SUBTREE,
groups_filters[domain].format(username=username, email=email, userdn=userdn),
['cn']
)
resultTypes, results = ldap_connection.result(resultID)
for _dn, attributes in results:
groups.append(attributes['cn'][0].decode('utf-8'))
except ldap.LDAPError as e:
raise ApiError(str(e), 500)
customers = get_customers(user.email, groups=groups)
user.update_last_login()
auth_audit_trail.send(current_app._get_current_object(), event='basic-ldap-login', message='user login via LDAP',
user=user.email, customers=customers, scopes=Permission.lookup(user.email, groups=user.roles),
resource_id=user.id, type='user', request=request)
# Generate token
token = create_token(user_id=user.id, name=user.name, login=user.email, provider='basic_ldap',
customers=customers, roles=user.roles, email=user.email, email_verified=user.email_verified)
return jsonify(token=token.tokenize)
|
[
"alerta.models.permission.Permission.lookup",
"flask.current_app.config.get",
"alerta.models.user.User",
"flask_cors.cross_origin",
"alerta.models.user.User.find_by_email",
"flask.current_app._get_current_object",
"alerta.auth.utils.create_token",
"flask.jsonify",
"alerta.auth.utils.get_customers",
"flask.request.json.get",
"alerta.exceptions.ApiError",
"ldap.initialize"
] |
[((406, 445), 'flask_cors.cross_origin', 'cross_origin', ([], {'supports_credentials': '(True)'}), '(supports_credentials=True)\n', (418, 445), False, 'from flask_cors import cross_origin\n'), ((1450, 1481), 'alerta.models.user.User.find_by_email', 'User.find_by_email', ([], {'email': 'email'}), '(email=email)\n', (1468, 1481), False, 'from alerta.models.user import User\n'), ((2513, 2553), 'alerta.auth.utils.get_customers', 'get_customers', (['user.email'], {'groups': 'groups'}), '(user.email, groups=groups)\n', (2526, 2553), False, 'from alerta.auth.utils import create_token, get_customers\n'), ((2934, 3122), 'alerta.auth.utils.create_token', 'create_token', ([], {'user_id': 'user.id', 'name': 'user.name', 'login': 'user.email', 'provider': '"""basic_ldap"""', 'customers': 'customers', 'roles': 'user.roles', 'email': 'user.email', 'email_verified': 'user.email_verified'}), "(user_id=user.id, name=user.name, login=user.email, provider=\n 'basic_ldap', customers=customers, roles=user.roles, email=user.email,\n email_verified=user.email_verified)\n", (2946, 3122), False, 'from alerta.auth.utils import create_token, get_customers\n'), ((3150, 3179), 'flask.jsonify', 'jsonify', ([], {'token': 'token.tokenize'}), '(token=token.tokenize)\n', (3157, 3179), False, 'from flask import current_app, jsonify, request\n'), ((897, 933), 'alerta.exceptions.ApiError', 'ApiError', (['"""unauthorized domain"""', '(403)'], {}), "('unauthorized domain', 403)\n", (905, 933), False, 'from alerta.exceptions import ApiError\n'), ((1114, 1186), 'ldap.initialize', 'ldap.initialize', (["current_app.config['LDAP_URL']"], {'trace_level': 'trace_level'}), "(current_app.config['LDAP_URL'], trace_level=trace_level)\n", (1129, 1186), False, 'import ldap\n'), ((1514, 1583), 'alerta.models.user.User', 'User', (['username', 'email', '""""""', "['user']", '"""LDAP user"""'], {'email_verified': '(True)'}), "(username, email, '', ['user'], 'LDAP user', email_verified=True)\n", (1518, 1583), False, 'from alerta.models.user import User\n'), ((1680, 1712), 'alerta.exceptions.ApiError', 'ApiError', (['"""user not active"""', '(403)'], {}), "('user not active', 403)\n", (1688, 1712), False, 'from alerta.exceptions import ApiError\n'), ((1823, 1871), 'flask.current_app.config.get', 'current_app.config.get', (['"""LDAP_DOMAINS_GROUP"""', '{}'], {}), "('LDAP_DOMAINS_GROUP', {})\n", (1845, 1871), False, 'from flask import current_app, jsonify, request\n'), ((1891, 1940), 'flask.current_app.config.get', 'current_app.config.get', (['"""LDAP_DOMAINS_BASEDN"""', '{}'], {}), "('LDAP_DOMAINS_BASEDN', {})\n", (1913, 1940), False, 'from flask import current_app, jsonify, request\n'), ((2610, 2643), 'flask.current_app._get_current_object', 'current_app._get_current_object', ([], {}), '()\n', (2641, 2643), False, 'from flask import current_app, jsonify, request\n'), ((535, 569), 'flask.request.json.get', 'request.json.get', (['"""username"""', 'None'], {}), "('username', None)\n", (551, 569), False, 'from flask import current_app, jsonify, request\n'), ((674, 728), 'alerta.exceptions.ApiError', 'ApiError', (['"""must supply \'username\' and \'password\'"""', '(401)'], {}), '("must supply \'username\' and \'password\'", 401)\n', (682, 728), False, 'from alerta.exceptions import ApiError\n'), ((1294, 1339), 'alerta.exceptions.ApiError', 'ApiError', (['"""invalid username or password"""', '(401)'], {}), "('invalid username or password', 401)\n", (1302, 1339), False, 'from alerta.exceptions import ApiError\n'), ((2773, 2821), 'alerta.models.permission.Permission.lookup', 'Permission.lookup', (['user.email'], {'groups': 'user.roles'}), '(user.email, groups=user.roles)\n', (2790, 2821), False, 'from alerta.models.permission import Permission\n')]
|
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, Planning and Learning Group"
__version__ = "1.0.1"
from game import Agent
from game import Directions
from game import GameStateData
import random
import sys
class RandomAgent(Agent):
"""
Random Agent
"""
def __init__( self, index = 0 ):
self.lastMove = Directions.STOP
self.index = index
def getAction( self, state):
legal = state.getLegalActions(self.index)
move = self.getMove(legal)
if move == Directions.STOP:
# Try to move in the same direction as before
if self.lastMove in legal:
move = self.lastMove
if move not in legal:
move = random.choice(legal)
self.lastMove = move
##Aprendizaje Automatico
##print(str(state.data.food))
print(str(state.livingGhosts))
print(state.data.agentStates[0])
print(state.getNumFood())
print (state.getCapsules())
print(state.data.ghostDistances)
print(state.data)
width, height = state.data.layout.width, state.data.layout.height
print(width, height)
##Aprendizaje Automatico
return move
def getMove(self, legal):
move = Directions.STOP
move_random = random.randint(0, 3)
if ( move_random == 0 or 'Left' in self.keys) and Directions.WEST in legal: move = Directions.WEST
if ( move_random == 1 or 'Right' in self.keys) and Directions.EAST in legal: move = Directions.EAST
if ( move_random == 2 or 'Up' in self.keys) and Directions.NORTH in legal: move = Directions.NORTH
if ( move_random == 3 or 'Down' in self.keys) and Directions.SOUTH in legal: move = Directions.SOUTH
return move
|
[
"random.choice",
"random.randint"
] |
[((1308, 1328), 'random.randint', 'random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (1322, 1328), False, 'import random\n'), ((720, 740), 'random.choice', 'random.choice', (['legal'], {}), '(legal)\n', (733, 740), False, 'import random\n')]
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: streamlit/proto/WidgetStates.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from streamlit.proto import Common_pb2 as streamlit_dot_proto_dot_Common__pb2
from streamlit.proto import Components_pb2 as streamlit_dot_proto_dot_Components__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='streamlit/proto/WidgetStates.proto',
package='',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\"streamlit/proto/WidgetStates.proto\x1a\x1cstreamlit/proto/Common.proto\x1a streamlit/proto/Components.proto\"-\n\x0cWidgetStates\x12\x1d\n\x07widgets\x18\x01 \x03(\x0b\x32\x0c.WidgetState\"\xe8\x02\n\x0bWidgetState\x12\n\n\x02id\x18\x01 \x01(\t\x12\x17\n\rtrigger_value\x18\x02 \x01(\x08H\x00\x12\x14\n\nbool_value\x18\x03 \x01(\x08H\x00\x12\x16\n\x0c\x64ouble_value\x18\x04 \x01(\x01H\x00\x12\x13\n\tint_value\x18\x05 \x01(\x12H\x00\x12\x16\n\x0cstring_value\x18\x06 \x01(\tH\x00\x12*\n\x12\x64ouble_array_value\x18\x07 \x01(\x0b\x32\x0c.DoubleArrayH\x00\x12\'\n\x0fint_array_value\x18\x08 \x01(\x0b\x32\x0c.SInt64ArrayH\x00\x12*\n\x12string_array_value\x18\t \x01(\x0b\x32\x0c.StringArrayH\x00\x12\x14\n\njson_value\x18\n \x01(\tH\x00\x12\"\n\x0b\x61rrow_value\x18\x0b \x01(\x0b\x32\x0b.ArrowTableH\x00\x12\x15\n\x0b\x62ytes_value\x18\x0c \x01(\x0cH\x00\x42\x07\n\x05valueb\x06proto3'
,
dependencies=[streamlit_dot_proto_dot_Common__pb2.DESCRIPTOR,streamlit_dot_proto_dot_Components__pb2.DESCRIPTOR,])
_WIDGETSTATES = _descriptor.Descriptor(
name='WidgetStates',
full_name='WidgetStates',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='widgets', full_name='WidgetStates.widgets', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=102,
serialized_end=147,
)
_WIDGETSTATE = _descriptor.Descriptor(
name='WidgetState',
full_name='WidgetState',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='WidgetState.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='trigger_value', full_name='WidgetState.trigger_value', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='bool_value', full_name='WidgetState.bool_value', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='double_value', full_name='WidgetState.double_value', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='int_value', full_name='WidgetState.int_value', index=4,
number=5, type=18, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='string_value', full_name='WidgetState.string_value', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='double_array_value', full_name='WidgetState.double_array_value', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='int_array_value', full_name='WidgetState.int_array_value', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='string_array_value', full_name='WidgetState.string_array_value', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='json_value', full_name='WidgetState.json_value', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='arrow_value', full_name='WidgetState.arrow_value', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='bytes_value', full_name='WidgetState.bytes_value', index=11,
number=12, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='value', full_name='WidgetState.value',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=150,
serialized_end=510,
)
_WIDGETSTATES.fields_by_name['widgets'].message_type = _WIDGETSTATE
_WIDGETSTATE.fields_by_name['double_array_value'].message_type = streamlit_dot_proto_dot_Common__pb2._DOUBLEARRAY
_WIDGETSTATE.fields_by_name['int_array_value'].message_type = streamlit_dot_proto_dot_Common__pb2._SINT64ARRAY
_WIDGETSTATE.fields_by_name['string_array_value'].message_type = streamlit_dot_proto_dot_Common__pb2._STRINGARRAY
_WIDGETSTATE.fields_by_name['arrow_value'].message_type = streamlit_dot_proto_dot_Components__pb2._ARROWTABLE
_WIDGETSTATE.oneofs_by_name['value'].fields.append(
_WIDGETSTATE.fields_by_name['trigger_value'])
_WIDGETSTATE.fields_by_name['trigger_value'].containing_oneof = _WIDGETSTATE.oneofs_by_name['value']
_WIDGETSTATE.oneofs_by_name['value'].fields.append(
_WIDGETSTATE.fields_by_name['bool_value'])
_WIDGETSTATE.fields_by_name['bool_value'].containing_oneof = _WIDGETSTATE.oneofs_by_name['value']
_WIDGETSTATE.oneofs_by_name['value'].fields.append(
_WIDGETSTATE.fields_by_name['double_value'])
_WIDGETSTATE.fields_by_name['double_value'].containing_oneof = _WIDGETSTATE.oneofs_by_name['value']
_WIDGETSTATE.oneofs_by_name['value'].fields.append(
_WIDGETSTATE.fields_by_name['int_value'])
_WIDGETSTATE.fields_by_name['int_value'].containing_oneof = _WIDGETSTATE.oneofs_by_name['value']
_WIDGETSTATE.oneofs_by_name['value'].fields.append(
_WIDGETSTATE.fields_by_name['string_value'])
_WIDGETSTATE.fields_by_name['string_value'].containing_oneof = _WIDGETSTATE.oneofs_by_name['value']
_WIDGETSTATE.oneofs_by_name['value'].fields.append(
_WIDGETSTATE.fields_by_name['double_array_value'])
_WIDGETSTATE.fields_by_name['double_array_value'].containing_oneof = _WIDGETSTATE.oneofs_by_name['value']
_WIDGETSTATE.oneofs_by_name['value'].fields.append(
_WIDGETSTATE.fields_by_name['int_array_value'])
_WIDGETSTATE.fields_by_name['int_array_value'].containing_oneof = _WIDGETSTATE.oneofs_by_name['value']
_WIDGETSTATE.oneofs_by_name['value'].fields.append(
_WIDGETSTATE.fields_by_name['string_array_value'])
_WIDGETSTATE.fields_by_name['string_array_value'].containing_oneof = _WIDGETSTATE.oneofs_by_name['value']
_WIDGETSTATE.oneofs_by_name['value'].fields.append(
_WIDGETSTATE.fields_by_name['json_value'])
_WIDGETSTATE.fields_by_name['json_value'].containing_oneof = _WIDGETSTATE.oneofs_by_name['value']
_WIDGETSTATE.oneofs_by_name['value'].fields.append(
_WIDGETSTATE.fields_by_name['arrow_value'])
_WIDGETSTATE.fields_by_name['arrow_value'].containing_oneof = _WIDGETSTATE.oneofs_by_name['value']
_WIDGETSTATE.oneofs_by_name['value'].fields.append(
_WIDGETSTATE.fields_by_name['bytes_value'])
_WIDGETSTATE.fields_by_name['bytes_value'].containing_oneof = _WIDGETSTATE.oneofs_by_name['value']
DESCRIPTOR.message_types_by_name['WidgetStates'] = _WIDGETSTATES
DESCRIPTOR.message_types_by_name['WidgetState'] = _WIDGETSTATE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
WidgetStates = _reflection.GeneratedProtocolMessageType('WidgetStates', (_message.Message,), {
'DESCRIPTOR' : _WIDGETSTATES,
'__module__' : 'streamlit.proto.WidgetStates_pb2'
# @@protoc_insertion_point(class_scope:WidgetStates)
})
_sym_db.RegisterMessage(WidgetStates)
WidgetState = _reflection.GeneratedProtocolMessageType('WidgetState', (_message.Message,), {
'DESCRIPTOR' : _WIDGETSTATE,
'__module__' : 'streamlit.proto.WidgetStates_pb2'
# @@protoc_insertion_point(class_scope:WidgetState)
})
_sym_db.RegisterMessage(WidgetState)
# @@protoc_insertion_point(module_scope)
|
[
"google.protobuf.descriptor.FieldDescriptor",
"google.protobuf.descriptor.OneofDescriptor",
"google.protobuf.symbol_database.Default",
"google.protobuf.reflection.GeneratedProtocolMessageType",
"google.protobuf.descriptor.FileDescriptor"
] |
[((433, 459), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ([], {}), '()\n', (457, 459), True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((641, 1821), 'google.protobuf.descriptor.FileDescriptor', '_descriptor.FileDescriptor', ([], {'name': '"""streamlit/proto/WidgetStates.proto"""', 'package': '""""""', 'syntax': '"""proto3"""', 'serialized_options': 'None', 'create_key': '_descriptor._internal_create_key', 'serialized_pb': 'b\'\\n"streamlit/proto/WidgetStates.proto\\x1a\\x1cstreamlit/proto/Common.proto\\x1a streamlit/proto/Components.proto"-\\n\\x0cWidgetStates\\x12\\x1d\\n\\x07widgets\\x18\\x01 \\x03(\\x0b2\\x0c.WidgetState"\\xe8\\x02\\n\\x0bWidgetState\\x12\\n\\n\\x02id\\x18\\x01 \\x01(\\t\\x12\\x17\\n\\rtrigger_value\\x18\\x02 \\x01(\\x08H\\x00\\x12\\x14\\n\\nbool_value\\x18\\x03 \\x01(\\x08H\\x00\\x12\\x16\\n\\x0cdouble_value\\x18\\x04 \\x01(\\x01H\\x00\\x12\\x13\\n\\tint_value\\x18\\x05 \\x01(\\x12H\\x00\\x12\\x16\\n\\x0cstring_value\\x18\\x06 \\x01(\\tH\\x00\\x12*\\n\\x12double_array_value\\x18\\x07 \\x01(\\x0b2\\x0c.DoubleArrayH\\x00\\x12\\\'\\n\\x0fint_array_value\\x18\\x08 \\x01(\\x0b2\\x0c.SInt64ArrayH\\x00\\x12*\\n\\x12string_array_value\\x18\\t \\x01(\\x0b2\\x0c.StringArrayH\\x00\\x12\\x14\\n\\njson_value\\x18\\n \\x01(\\tH\\x00\\x12"\\n\\x0barrow_value\\x18\\x0b \\x01(\\x0b2\\x0b.ArrowTableH\\x00\\x12\\x15\\n\\x0bbytes_value\\x18\\x0c \\x01(\\x0cH\\x00B\\x07\\n\\x05valueb\\x06proto3\'', 'dependencies': '[streamlit_dot_proto_dot_Common__pb2.DESCRIPTOR,\n streamlit_dot_proto_dot_Components__pb2.DESCRIPTOR]'}), '(name=\'streamlit/proto/WidgetStates.proto\',\n package=\'\', syntax=\'proto3\', serialized_options=None, create_key=\n _descriptor._internal_create_key, serialized_pb=\n b\'\\n"streamlit/proto/WidgetStates.proto\\x1a\\x1cstreamlit/proto/Common.proto\\x1a streamlit/proto/Components.proto"-\\n\\x0cWidgetStates\\x12\\x1d\\n\\x07widgets\\x18\\x01 \\x03(\\x0b2\\x0c.WidgetState"\\xe8\\x02\\n\\x0bWidgetState\\x12\\n\\n\\x02id\\x18\\x01 \\x01(\\t\\x12\\x17\\n\\rtrigger_value\\x18\\x02 \\x01(\\x08H\\x00\\x12\\x14\\n\\nbool_value\\x18\\x03 \\x01(\\x08H\\x00\\x12\\x16\\n\\x0cdouble_value\\x18\\x04 \\x01(\\x01H\\x00\\x12\\x13\\n\\tint_value\\x18\\x05 \\x01(\\x12H\\x00\\x12\\x16\\n\\x0cstring_value\\x18\\x06 \\x01(\\tH\\x00\\x12*\\n\\x12double_array_value\\x18\\x07 \\x01(\\x0b2\\x0c.DoubleArrayH\\x00\\x12\\\'\\n\\x0fint_array_value\\x18\\x08 \\x01(\\x0b2\\x0c.SInt64ArrayH\\x00\\x12*\\n\\x12string_array_value\\x18\\t \\x01(\\x0b2\\x0c.StringArrayH\\x00\\x12\\x14\\n\\njson_value\\x18\\n \\x01(\\tH\\x00\\x12"\\n\\x0barrow_value\\x18\\x0b \\x01(\\x0b2\\x0b.ArrowTableH\\x00\\x12\\x15\\n\\x0bbytes_value\\x18\\x0c \\x01(\\x0cH\\x00B\\x07\\n\\x05valueb\\x06proto3\'\n , dependencies=[streamlit_dot_proto_dot_Common__pb2.DESCRIPTOR,\n streamlit_dot_proto_dot_Components__pb2.DESCRIPTOR])\n', (667, 1821), True, 'from google.protobuf import descriptor as _descriptor\n'), ((11172, 11339), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', (['"""WidgetStates"""', '(_message.Message,)', "{'DESCRIPTOR': _WIDGETSTATES, '__module__': 'streamlit.proto.WidgetStates_pb2'}"], {}), "('WidgetStates', (_message.Message,\n ), {'DESCRIPTOR': _WIDGETSTATES, '__module__':\n 'streamlit.proto.WidgetStates_pb2'})\n", (11212, 11339), True, 'from google.protobuf import reflection as _reflection\n'), ((11449, 11613), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', (['"""WidgetState"""', '(_message.Message,)', "{'DESCRIPTOR': _WIDGETSTATE, '__module__': 'streamlit.proto.WidgetStates_pb2'}"], {}), "('WidgetState', (_message.Message,),\n {'DESCRIPTOR': _WIDGETSTATE, '__module__':\n 'streamlit.proto.WidgetStates_pb2'})\n", (11489, 11613), True, 'from google.protobuf import reflection as _reflection\n'), ((2068, 2446), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""widgets"""', 'full_name': '"""WidgetStates.widgets"""', 'index': '(0)', 'number': '(1)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(3)', 'has_default_value': '(False)', 'default_value': '[]', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR', 'create_key': '_descriptor._internal_create_key'}), "(name='widgets', full_name=\n 'WidgetStates.widgets', index=0, number=1, type=11, cpp_type=10, label=\n 3, has_default_value=False, default_value=[], message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR,\n create_key=_descriptor._internal_create_key)\n", (2095, 2446), True, 'from google.protobuf import descriptor as _descriptor\n'), ((3300, 3690), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""trigger_value"""', 'full_name': '"""WidgetState.trigger_value"""', 'index': '(1)', 'number': '(2)', 'type': '(8)', 'cpp_type': '(7)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': '(False)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR', 'create_key': '_descriptor._internal_create_key'}), "(name='trigger_value', full_name=\n 'WidgetState.trigger_value', index=1, number=2, type=8, cpp_type=7,\n label=1, has_default_value=False, default_value=False, message_type=\n None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR,\n create_key=_descriptor._internal_create_key)\n", (3327, 3690), True, 'from google.protobuf import descriptor as _descriptor\n'), ((3712, 4096), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""bool_value"""', 'full_name': '"""WidgetState.bool_value"""', 'index': '(2)', 'number': '(3)', 'type': '(8)', 'cpp_type': '(7)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': '(False)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR', 'create_key': '_descriptor._internal_create_key'}), "(name='bool_value', full_name=\n 'WidgetState.bool_value', index=2, number=3, type=8, cpp_type=7, label=\n 1, has_default_value=False, default_value=False, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR,\n create_key=_descriptor._internal_create_key)\n", (3739, 4096), True, 'from google.protobuf import descriptor as _descriptor\n'), ((4531, 4910), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""int_value"""', 'full_name': '"""WidgetState.int_value"""', 'index': '(4)', 'number': '(5)', 'type': '(18)', 'cpp_type': '(2)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': '(0)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR', 'create_key': '_descriptor._internal_create_key'}), "(name='int_value', full_name=\n 'WidgetState.int_value', index=4, number=5, type=18, cpp_type=2, label=\n 1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR,\n create_key=_descriptor._internal_create_key)\n", (4558, 4910), True, 'from google.protobuf import descriptor as _descriptor\n'), ((5356, 5758), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""double_array_value"""', 'full_name': '"""WidgetState.double_array_value"""', 'index': '(6)', 'number': '(7)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': 'None', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR', 'create_key': '_descriptor._internal_create_key'}), "(name='double_array_value', full_name=\n 'WidgetState.double_array_value', index=6, number=7, type=11, cpp_type=\n 10, label=1, has_default_value=False, default_value=None, message_type=\n None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR,\n create_key=_descriptor._internal_create_key)\n", (5383, 5758), True, 'from google.protobuf import descriptor as _descriptor\n'), ((5779, 6173), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""int_array_value"""', 'full_name': '"""WidgetState.int_array_value"""', 'index': '(7)', 'number': '(8)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': 'None', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR', 'create_key': '_descriptor._internal_create_key'}), "(name='int_array_value', full_name=\n 'WidgetState.int_array_value', index=7, number=8, type=11, cpp_type=10,\n label=1, has_default_value=False, default_value=None, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR,\n create_key=_descriptor._internal_create_key)\n", (5806, 6173), True, 'from google.protobuf import descriptor as _descriptor\n'), ((6196, 6598), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""string_array_value"""', 'full_name': '"""WidgetState.string_array_value"""', 'index': '(8)', 'number': '(9)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': 'None', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR', 'create_key': '_descriptor._internal_create_key'}), "(name='string_array_value', full_name=\n 'WidgetState.string_array_value', index=8, number=9, type=11, cpp_type=\n 10, label=1, has_default_value=False, default_value=None, message_type=\n None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR,\n create_key=_descriptor._internal_create_key)\n", (6223, 6598), True, 'from google.protobuf import descriptor as _descriptor\n'), ((7040, 7428), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""arrow_value"""', 'full_name': '"""WidgetState.arrow_value"""', 'index': '(10)', 'number': '(11)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': 'None', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR', 'create_key': '_descriptor._internal_create_key'}), "(name='arrow_value', full_name=\n 'WidgetState.arrow_value', index=10, number=11, type=11, cpp_type=10,\n label=1, has_default_value=False, default_value=None, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR,\n create_key=_descriptor._internal_create_key)\n", (7067, 7428), True, 'from google.protobuf import descriptor as _descriptor\n'), ((7451, 7837), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""bytes_value"""', 'full_name': '"""WidgetState.bytes_value"""', 'index': '(11)', 'number': '(12)', 'type': '(12)', 'cpp_type': '(9)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': "b''", 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR', 'create_key': '_descriptor._internal_create_key'}), "(name='bytes_value', full_name=\n 'WidgetState.bytes_value', index=11, number=12, type=12, cpp_type=9,\n label=1, has_default_value=False, default_value=b'', message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR,\n create_key=_descriptor._internal_create_key)\n", (7478, 7837), True, 'from google.protobuf import descriptor as _descriptor\n'), ((8027, 8195), 'google.protobuf.descriptor.OneofDescriptor', '_descriptor.OneofDescriptor', ([], {'name': '"""value"""', 'full_name': '"""WidgetState.value"""', 'index': '(0)', 'containing_type': 'None', 'create_key': '_descriptor._internal_create_key', 'fields': '[]'}), "(name='value', full_name='WidgetState.value',\n index=0, containing_type=None, create_key=_descriptor.\n _internal_create_key, fields=[])\n", (8054, 8195), True, 'from google.protobuf import descriptor as _descriptor\n')]
|
from distutils.core import setup
setup(
name = 'InspectorAgent',
packages = ['InspectorAgent'],
version = '0.1',
description = 'lib to assert perf test result to Inspector',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/kahalKevin/InspectorAgent',
download_url = 'https://github.com/kahalKevin/InspectorAgent/archive/0.1.tar.gz',
keywords = ['testing'],
classifiers = [],
)
|
[
"distutils.core.setup"
] |
[((33, 398), 'distutils.core.setup', 'setup', ([], {'name': '"""InspectorAgent"""', 'packages': "['InspectorAgent']", 'version': '"""0.1"""', 'description': '"""lib to assert perf test result to Inspector"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/kahalKevin/InspectorAgent"""', 'download_url': '"""https://github.com/kahalKevin/InspectorAgent/archive/0.1.tar.gz"""', 'keywords': "['testing']", 'classifiers': '[]'}), "(name='InspectorAgent', packages=['InspectorAgent'], version='0.1',\n description='lib to assert perf test result to Inspector', author=\n '<NAME>', author_email='<EMAIL>', url=\n 'https://github.com/kahalKevin/InspectorAgent', download_url=\n 'https://github.com/kahalKevin/InspectorAgent/archive/0.1.tar.gz',\n keywords=['testing'], classifiers=[])\n", (38, 398), False, 'from distutils.core import setup\n')]
|
from __future__ import division, print_function
import numpy as np
from scipy import signal as sig
from matplotlib import pyplot as plt
import seaborn as sns
"""https://stackoverflow.com/questions/56551114/fully-monotone-interpolation-in-python """
# see also
# https://en.wikipedia.org/wiki/Monotone-spline aka I-spline
# https://scikit-learn.org/stable/modules/isotonic.html
# denis 2 March 2020
def butter_filtfilt( x, Wn=0.5, axis=0 ):
""" butter( 2, Wn ), filtfilt
axis 0 each col, -1 each row
"""
b, a = sig.butter( N=2, Wn=Wn )
return sig.filtfilt( b, a, x, axis=axis, method="gust" )
# twice, forward backward
def ints( x ):
return x.round().astype(int)
def minavmax( x ):
return "min av max %.3g %.3g %.3g" % (
x.min(), x.mean(), x.max() )
def pvec( x ):
n = len(x) // 25 * 25
return "%s \n%s \n" % (
minavmax( x ),
ints( x[ - n : ]) .reshape( -1, 25 ))
#...............................................................................
def monofit( y, Wn=0.1 ):
""" monotone-increasing curve fit """
y = np.asarray(y).squeeze()
# print( "\n{ monofit: y %d %s Wn %.3g " % (
# len(y), minavmax( y ), Wn ))
ygrad = np.gradient( y )
# print( "grad y:", pvec( ygrad ))
# lowpass filter --
gradsmooth = butter_filtfilt( ygrad, Wn=Wn )
# print( "gradsmooth:", pvec( gradsmooth ))
ge0 = np.fmax( gradsmooth, 0 )
ymono = np.cumsum( ge0 ) # integrate, sensitive to first few
ymono += (y - ymono).mean()
err = y - ymono
# print( "y - ymono:", pvec( err ))
errstr = "average |y - monofit|: %.2g" % np.abs( err ).mean()
# print( errstr )
# print( "} \n" )
return ymono, err, errstr
#...............................................................................
if __name__ == "__main__":
import sys
# np.set_printoptions( threshold=20, edgeitems=15, linewidth=120,
# formatter = dict( float = lambda x: "%.2g" % x ))
# float arrays %.2g
# print( 80 * "=" )
thispy = sys.argv[0]
infile = sys.argv[1] if len(sys.argv) > 1 \
else "so-mono.txt"
Wn = 0.1
params = "%s %s Wn %g " % (thispy, infile, Wn)
# print( params )
y = np.array([0.1109157119023644, 0.20187393816931934, 0.14466318670239758,
0.16535159414166822, 0.05452708697483864, 0.2153046237959556,
0.2200300476272603, 0.21012762463269324, 0.15947100322395022,
0.2819691842129948, 0.15567770052985092, 0.24850595803020692,
0.1329341593280457, 0.15595107081606913, 0.3232021121832229,
0.23707961921686588, 0.2415887076540357, 0.32363506549779797,
0.3584089204036798, 0.29232772580068433, 0.22145994836140775,
0.22797587985241133, 0.2717787840603025, 0.3245255944762287,
0.29301098282789195, 0.32417076823344143, 0.3450906550996232,
0.34272097408024904, 0.3868714875012437, 0.41876692320045755,
0.3544198724867363, 0.33073960954801895, 0.3921033666371904,
0.33349050060172974, 0.3608862044547096, 0.37375822841635425,
0.5396399750708429, 0.4209201143798284, 0.42004773793166883,
0.5217725632679073, 0.5911731474218788, 0.43389609315065386,
0.4287288396176006, 0.43007525393257007, 0.5687062142675405,
0.6030811498722173, 0.5292225577714743, 0.47710974351051355,
0.6182720730381119, 0.6241033581931327, 0.6236788197617511,
0.6643161356364049, 0.5577616524049582, 0.6888440258481371,
0.6867893120660341, 0.6685257606057502, 0.599481675493677,
0.7309075091448749, 0.7644365338580481, 0.6176797601816733,
0.6751467827192018, 0.6452178017908761, 0.6684778262246701,
0.7003380077556168, 0.667035916425416, 0.8434451759113093,
0.8419343615815968, 0.8657695361433773, 0.7392487161484605,
0.8773282098364621, 0.8265679895117846, 0.7246599961191632,
0.7251899061730714, 0.9271640780410231, 0.9180581424305536,
0.8099033021701689, 0.8268585329594615, 0.8519967080830176,
0.8711231413093845, 0.8689802343798663, 0.8299523829217353,
1.0057741699770046, 0.8538130788729608, 0.9662784297225102,
1.023419780920539, 0.913146849759822, 0.9900885996579213,
0.8740638988529978, 0.8900285618419457, 0.9065474574434158,
1.0749522597307315, 1.0319120938258166, 1.0051369663172995,
0.9893558841613622, 1.051384986916457, 1.0327996870915341,
1.0945543972861898, 0.9716604944496021, 1.1490370559566179,
1.1379231481207432, 1.6836433783615088, 1.8162068766097395,
2.072155286917785, 2.0395966998366, 2.191064589600466,
2.1581974932543617, 2.163403843819597, 2.133441151300847,
2.1726053994136922, 2.1157865673629526, 2.2249636455682866,
2.2313062166802147, 2.1731708496472764, 2.315203950110816,
2.1601242661726827, 2.174940281421225, 2.2653635413275945,
2.337227057574145, 2.3645767548381618, 2.3084919291392527,
2.314014515926446, 2.25166717296155, 2.2621157708115778,
2.2644578546265586, 2.313504860292943, 2.398969190357051,
2.309443951779675, 2.278946047410807, 2.4080802287121146,
2.353652872018618, 2.35527529074088, 2.4233001060410784,
2.428767198055608, 2.35677123091093, 2.497135132404064,
2.3978099128437282, 2.3970802609341972, 2.4967434818740024,
2.511209192435555, 2.541001050440798, 2.5760248002036525,
2.5960512284192245, 2.4778408861721037, 2.5757724103530046,
2.631148267999664, 2.538327346218921, 2.4878734713248507,
2.6133797275761066, 2.6282561527857395, 2.6150327104952447,
3.102757164382848, 3.3318503012160905, 3.3907776288198193,
3.6065313558941936, 3.601180295875859, 3.560491539319038,
3.650095006265445, 3.574812155815713, 3.686227315374108,
3.6338261415040867, 3.5661194785086288, 3.5747332336054645,
3.560674343726918, 3.5678550481603635, 3.5342848534390967,
3.4929538312485913, 3.564544653619436, 3.6861775399566126,
3.6390300636595216, 3.6656336332413666, 3.5731185631923945,
3.5965520044069854, 3.537434489989021, 3.5590937423870144,
3.5331656424410083, 3.640652819618705, 3.5971240740252126,
3.641793843012055, 3.6064014089254295, 3.530378938786505,
3.613631139461306, 3.519542268056021, 3.5416251524576,
3.524789618934195, 3.5519951806099512, 3.6435695455293975,
3.6825670484650863, 3.5993379768209217, 3.628367553897596,
3.633290480934276, 3.5772841681579535, 3.602326323397947,
3.518180278272883, 3.531054006706696, 3.5566645495066167,
3.5410992153240985, 3.630762839301216, 3.5924649123201053,
3.646230633817883, 3.568290612034935, 3.638356129262967,
3.566083243271712, 3.6064978645771797, 3.4942864293427633,
3.595438454812999, 3.681726879126678, 3.6501308156903463,
3.5490717955938593, 3.598535359345363, 3.6328331698421654,
3.595159538698094, 3.556715819008055, 3.6292942886764554,
3.6362895697392856, 3.5965220100874093, 3.6103542985016266,
3.5715010140382493, 3.658769915445062, 3.5939686395400416,
3.4974461928859917, 3.5232691556732267, 3.6145687814416614,
3.5682054018341005, 3.648937250575395, 3.4912089018613384,
3.522426560340423, 3.6757968409374637, 3.651348691084845,
3.5395070091675973, 3.5306275536360383, 3.6153498246329883,
3.599762785949876, 3.5351931286962333, 3.6488316987683054,
3.5198301490992963, 3.5696570079786687, 3.561553836008927,
3.5659475947331423, 3.553147100256108, 3.5475591872743664,
3.6097226797553317, 3.6849600324757934, 3.5264731043844413,
3.506658609738451, 3.5535775980874114, 3.5487291053913554,
3.570651383823912, 3.552993371839188, 3.5054297764661846,
3.5723024888238792])
ymono, err, errstr = monofit( y, Wn=Wn )
if 1:
sns.set_style("whitegrid")
fig, ax = plt.subplots( figsize=[10, 5] )
plt.subplots_adjust( left=.05, right=.99, bottom=.05, top=.90 )
fig.suptitle(
"Easy monotone curve fit: np.gradient | lowpass filter | clip < 0 | integrate \n"
+ errstr, multialignment="left" )
ax.plot( ymono, color="orangered" )
j = np.where( ymono < y )[0]
xax = np.arange( len(y) )
plt.vlines( xax[j], ymono[j], y[j], color="blue", lw=1 )
j = np.where( ymono > y )[0]
plt.vlines( xax[j], y[j], ymono[j], color="blue", lw=1 )
# png = thispy.replace( ".py", ".png" )
# print( "writing", png )
# plt.savefig( png )
plt.show()
|
[
"seaborn.set_style",
"numpy.fmax",
"matplotlib.pyplot.show",
"numpy.abs",
"scipy.signal.filtfilt",
"numpy.asarray",
"matplotlib.pyplot.vlines",
"numpy.cumsum",
"numpy.where",
"numpy.array",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.subplots",
"scipy.signal.butter",
"numpy.gradient"
] |
[((533, 555), 'scipy.signal.butter', 'sig.butter', ([], {'N': '(2)', 'Wn': 'Wn'}), '(N=2, Wn=Wn)\n', (543, 555), True, 'from scipy import signal as sig\n'), ((569, 616), 'scipy.signal.filtfilt', 'sig.filtfilt', (['b', 'a', 'x'], {'axis': 'axis', 'method': '"""gust"""'}), "(b, a, x, axis=axis, method='gust')\n", (581, 616), True, 'from scipy import signal as sig\n'), ((1224, 1238), 'numpy.gradient', 'np.gradient', (['y'], {}), '(y)\n', (1235, 1238), True, 'import numpy as np\n'), ((1417, 1439), 'numpy.fmax', 'np.fmax', (['gradsmooth', '(0)'], {}), '(gradsmooth, 0)\n', (1424, 1439), True, 'import numpy as np\n'), ((1455, 1469), 'numpy.cumsum', 'np.cumsum', (['ge0'], {}), '(ge0)\n', (1464, 1469), True, 'import numpy as np\n'), ((2247, 7621), 'numpy.array', 'np.array', (['[0.1109157119023644, 0.20187393816931934, 0.14466318670239758, \n 0.16535159414166822, 0.05452708697483864, 0.2153046237959556, \n 0.2200300476272603, 0.21012762463269324, 0.15947100322395022, \n 0.2819691842129948, 0.15567770052985092, 0.24850595803020692, \n 0.1329341593280457, 0.15595107081606913, 0.3232021121832229, \n 0.23707961921686588, 0.2415887076540357, 0.32363506549779797, \n 0.3584089204036798, 0.29232772580068433, 0.22145994836140775, \n 0.22797587985241133, 0.2717787840603025, 0.3245255944762287, \n 0.29301098282789195, 0.32417076823344143, 0.3450906550996232, \n 0.34272097408024904, 0.3868714875012437, 0.41876692320045755, \n 0.3544198724867363, 0.33073960954801895, 0.3921033666371904, \n 0.33349050060172974, 0.3608862044547096, 0.37375822841635425, \n 0.5396399750708429, 0.4209201143798284, 0.42004773793166883, \n 0.5217725632679073, 0.5911731474218788, 0.43389609315065386, \n 0.4287288396176006, 0.43007525393257007, 0.5687062142675405, \n 0.6030811498722173, 0.5292225577714743, 0.47710974351051355, \n 0.6182720730381119, 0.6241033581931327, 0.6236788197617511, \n 0.6643161356364049, 0.5577616524049582, 0.6888440258481371, \n 0.6867893120660341, 0.6685257606057502, 0.599481675493677, \n 0.7309075091448749, 0.7644365338580481, 0.6176797601816733, \n 0.6751467827192018, 0.6452178017908761, 0.6684778262246701, \n 0.7003380077556168, 0.667035916425416, 0.8434451759113093, \n 0.8419343615815968, 0.8657695361433773, 0.7392487161484605, \n 0.8773282098364621, 0.8265679895117846, 0.7246599961191632, \n 0.7251899061730714, 0.9271640780410231, 0.9180581424305536, \n 0.8099033021701689, 0.8268585329594615, 0.8519967080830176, \n 0.8711231413093845, 0.8689802343798663, 0.8299523829217353, \n 1.0057741699770046, 0.8538130788729608, 0.9662784297225102, \n 1.023419780920539, 0.913146849759822, 0.9900885996579213, \n 0.8740638988529978, 0.8900285618419457, 0.9065474574434158, \n 1.0749522597307315, 1.0319120938258166, 1.0051369663172995, \n 0.9893558841613622, 1.051384986916457, 1.0327996870915341, \n 1.0945543972861898, 0.9716604944496021, 1.1490370559566179, \n 1.1379231481207432, 1.6836433783615088, 1.8162068766097395, \n 2.072155286917785, 2.0395966998366, 2.191064589600466, \n 2.1581974932543617, 2.163403843819597, 2.133441151300847, \n 2.1726053994136922, 2.1157865673629526, 2.2249636455682866, \n 2.2313062166802147, 2.1731708496472764, 2.315203950110816, \n 2.1601242661726827, 2.174940281421225, 2.2653635413275945, \n 2.337227057574145, 2.3645767548381618, 2.3084919291392527, \n 2.314014515926446, 2.25166717296155, 2.2621157708115778, \n 2.2644578546265586, 2.313504860292943, 2.398969190357051, \n 2.309443951779675, 2.278946047410807, 2.4080802287121146, \n 2.353652872018618, 2.35527529074088, 2.4233001060410784, \n 2.428767198055608, 2.35677123091093, 2.497135132404064, \n 2.3978099128437282, 2.3970802609341972, 2.4967434818740024, \n 2.511209192435555, 2.541001050440798, 2.5760248002036525, \n 2.5960512284192245, 2.4778408861721037, 2.5757724103530046, \n 2.631148267999664, 2.538327346218921, 2.4878734713248507, \n 2.6133797275761066, 2.6282561527857395, 2.6150327104952447, \n 3.102757164382848, 3.3318503012160905, 3.3907776288198193, \n 3.6065313558941936, 3.601180295875859, 3.560491539319038, \n 3.650095006265445, 3.574812155815713, 3.686227315374108, \n 3.6338261415040867, 3.5661194785086288, 3.5747332336054645, \n 3.560674343726918, 3.5678550481603635, 3.5342848534390967, \n 3.4929538312485913, 3.564544653619436, 3.6861775399566126, \n 3.6390300636595216, 3.6656336332413666, 3.5731185631923945, \n 3.5965520044069854, 3.537434489989021, 3.5590937423870144, \n 3.5331656424410083, 3.640652819618705, 3.5971240740252126, \n 3.641793843012055, 3.6064014089254295, 3.530378938786505, \n 3.613631139461306, 3.519542268056021, 3.5416251524576, \n 3.524789618934195, 3.5519951806099512, 3.6435695455293975, \n 3.6825670484650863, 3.5993379768209217, 3.628367553897596, \n 3.633290480934276, 3.5772841681579535, 3.602326323397947, \n 3.518180278272883, 3.531054006706696, 3.5566645495066167, \n 3.5410992153240985, 3.630762839301216, 3.5924649123201053, \n 3.646230633817883, 3.568290612034935, 3.638356129262967, \n 3.566083243271712, 3.6064978645771797, 3.4942864293427633, \n 3.595438454812999, 3.681726879126678, 3.6501308156903463, \n 3.5490717955938593, 3.598535359345363, 3.6328331698421654, \n 3.595159538698094, 3.556715819008055, 3.6292942886764554, \n 3.6362895697392856, 3.5965220100874093, 3.6103542985016266, \n 3.5715010140382493, 3.658769915445062, 3.5939686395400416, \n 3.4974461928859917, 3.5232691556732267, 3.6145687814416614, \n 3.5682054018341005, 3.648937250575395, 3.4912089018613384, \n 3.522426560340423, 3.6757968409374637, 3.651348691084845, \n 3.5395070091675973, 3.5306275536360383, 3.6153498246329883, \n 3.599762785949876, 3.5351931286962333, 3.6488316987683054, \n 3.5198301490992963, 3.5696570079786687, 3.561553836008927, \n 3.5659475947331423, 3.553147100256108, 3.5475591872743664, \n 3.6097226797553317, 3.6849600324757934, 3.5264731043844413, \n 3.506658609738451, 3.5535775980874114, 3.5487291053913554, \n 3.570651383823912, 3.552993371839188, 3.5054297764661846, \n 3.5723024888238792]'], {}), '([0.1109157119023644, 0.20187393816931934, 0.14466318670239758, \n 0.16535159414166822, 0.05452708697483864, 0.2153046237959556, \n 0.2200300476272603, 0.21012762463269324, 0.15947100322395022, \n 0.2819691842129948, 0.15567770052985092, 0.24850595803020692, \n 0.1329341593280457, 0.15595107081606913, 0.3232021121832229, \n 0.23707961921686588, 0.2415887076540357, 0.32363506549779797, \n 0.3584089204036798, 0.29232772580068433, 0.22145994836140775, \n 0.22797587985241133, 0.2717787840603025, 0.3245255944762287, \n 0.29301098282789195, 0.32417076823344143, 0.3450906550996232, \n 0.34272097408024904, 0.3868714875012437, 0.41876692320045755, \n 0.3544198724867363, 0.33073960954801895, 0.3921033666371904, \n 0.33349050060172974, 0.3608862044547096, 0.37375822841635425, \n 0.5396399750708429, 0.4209201143798284, 0.42004773793166883, \n 0.5217725632679073, 0.5911731474218788, 0.43389609315065386, \n 0.4287288396176006, 0.43007525393257007, 0.5687062142675405, \n 0.6030811498722173, 0.5292225577714743, 0.47710974351051355, \n 0.6182720730381119, 0.6241033581931327, 0.6236788197617511, \n 0.6643161356364049, 0.5577616524049582, 0.6888440258481371, \n 0.6867893120660341, 0.6685257606057502, 0.599481675493677, \n 0.7309075091448749, 0.7644365338580481, 0.6176797601816733, \n 0.6751467827192018, 0.6452178017908761, 0.6684778262246701, \n 0.7003380077556168, 0.667035916425416, 0.8434451759113093, \n 0.8419343615815968, 0.8657695361433773, 0.7392487161484605, \n 0.8773282098364621, 0.8265679895117846, 0.7246599961191632, \n 0.7251899061730714, 0.9271640780410231, 0.9180581424305536, \n 0.8099033021701689, 0.8268585329594615, 0.8519967080830176, \n 0.8711231413093845, 0.8689802343798663, 0.8299523829217353, \n 1.0057741699770046, 0.8538130788729608, 0.9662784297225102, \n 1.023419780920539, 0.913146849759822, 0.9900885996579213, \n 0.8740638988529978, 0.8900285618419457, 0.9065474574434158, \n 1.0749522597307315, 1.0319120938258166, 1.0051369663172995, \n 0.9893558841613622, 1.051384986916457, 1.0327996870915341, \n 1.0945543972861898, 0.9716604944496021, 1.1490370559566179, \n 1.1379231481207432, 1.6836433783615088, 1.8162068766097395, \n 2.072155286917785, 2.0395966998366, 2.191064589600466, \n 2.1581974932543617, 2.163403843819597, 2.133441151300847, \n 2.1726053994136922, 2.1157865673629526, 2.2249636455682866, \n 2.2313062166802147, 2.1731708496472764, 2.315203950110816, \n 2.1601242661726827, 2.174940281421225, 2.2653635413275945, \n 2.337227057574145, 2.3645767548381618, 2.3084919291392527, \n 2.314014515926446, 2.25166717296155, 2.2621157708115778, \n 2.2644578546265586, 2.313504860292943, 2.398969190357051, \n 2.309443951779675, 2.278946047410807, 2.4080802287121146, \n 2.353652872018618, 2.35527529074088, 2.4233001060410784, \n 2.428767198055608, 2.35677123091093, 2.497135132404064, \n 2.3978099128437282, 2.3970802609341972, 2.4967434818740024, \n 2.511209192435555, 2.541001050440798, 2.5760248002036525, \n 2.5960512284192245, 2.4778408861721037, 2.5757724103530046, \n 2.631148267999664, 2.538327346218921, 2.4878734713248507, \n 2.6133797275761066, 2.6282561527857395, 2.6150327104952447, \n 3.102757164382848, 3.3318503012160905, 3.3907776288198193, \n 3.6065313558941936, 3.601180295875859, 3.560491539319038, \n 3.650095006265445, 3.574812155815713, 3.686227315374108, \n 3.6338261415040867, 3.5661194785086288, 3.5747332336054645, \n 3.560674343726918, 3.5678550481603635, 3.5342848534390967, \n 3.4929538312485913, 3.564544653619436, 3.6861775399566126, \n 3.6390300636595216, 3.6656336332413666, 3.5731185631923945, \n 3.5965520044069854, 3.537434489989021, 3.5590937423870144, \n 3.5331656424410083, 3.640652819618705, 3.5971240740252126, \n 3.641793843012055, 3.6064014089254295, 3.530378938786505, \n 3.613631139461306, 3.519542268056021, 3.5416251524576, \n 3.524789618934195, 3.5519951806099512, 3.6435695455293975, \n 3.6825670484650863, 3.5993379768209217, 3.628367553897596, \n 3.633290480934276, 3.5772841681579535, 3.602326323397947, \n 3.518180278272883, 3.531054006706696, 3.5566645495066167, \n 3.5410992153240985, 3.630762839301216, 3.5924649123201053, \n 3.646230633817883, 3.568290612034935, 3.638356129262967, \n 3.566083243271712, 3.6064978645771797, 3.4942864293427633, \n 3.595438454812999, 3.681726879126678, 3.6501308156903463, \n 3.5490717955938593, 3.598535359345363, 3.6328331698421654, \n 3.595159538698094, 3.556715819008055, 3.6292942886764554, \n 3.6362895697392856, 3.5965220100874093, 3.6103542985016266, \n 3.5715010140382493, 3.658769915445062, 3.5939686395400416, \n 3.4974461928859917, 3.5232691556732267, 3.6145687814416614, \n 3.5682054018341005, 3.648937250575395, 3.4912089018613384, \n 3.522426560340423, 3.6757968409374637, 3.651348691084845, \n 3.5395070091675973, 3.5306275536360383, 3.6153498246329883, \n 3.599762785949876, 3.5351931286962333, 3.6488316987683054, \n 3.5198301490992963, 3.5696570079786687, 3.561553836008927, \n 3.5659475947331423, 3.553147100256108, 3.5475591872743664, \n 3.6097226797553317, 3.6849600324757934, 3.5264731043844413, \n 3.506658609738451, 3.5535775980874114, 3.5487291053913554, \n 3.570651383823912, 3.552993371839188, 3.5054297764661846, \n 3.5723024888238792])\n', (2255, 7621), True, 'import numpy as np\n'), ((8019, 8045), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (8032, 8045), True, 'import seaborn as sns\n'), ((8064, 8093), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '[10, 5]'}), '(figsize=[10, 5])\n', (8076, 8093), True, 'from matplotlib import pyplot as plt\n'), ((8104, 8168), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.05)', 'right': '(0.99)', 'bottom': '(0.05)', 'top': '(0.9)'}), '(left=0.05, right=0.99, bottom=0.05, top=0.9)\n', (8123, 8168), True, 'from matplotlib import pyplot as plt\n'), ((8442, 8496), 'matplotlib.pyplot.vlines', 'plt.vlines', (['xax[j]', 'ymono[j]', 'y[j]'], {'color': '"""blue"""', 'lw': '(1)'}), "(xax[j], ymono[j], y[j], color='blue', lw=1)\n", (8452, 8496), True, 'from matplotlib import pyplot as plt\n'), ((8544, 8598), 'matplotlib.pyplot.vlines', 'plt.vlines', (['xax[j]', 'y[j]', 'ymono[j]'], {'color': '"""blue"""', 'lw': '(1)'}), "(xax[j], y[j], ymono[j], color='blue', lw=1)\n", (8554, 8598), True, 'from matplotlib import pyplot as plt\n'), ((8721, 8731), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8729, 8731), True, 'from matplotlib import pyplot as plt\n'), ((1099, 1112), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (1109, 1112), True, 'import numpy as np\n'), ((8375, 8394), 'numpy.where', 'np.where', (['(ymono < y)'], {}), '(ymono < y)\n', (8383, 8394), True, 'import numpy as np\n'), ((8511, 8530), 'numpy.where', 'np.where', (['(ymono > y)'], {}), '(ymono > y)\n', (8519, 8530), True, 'import numpy as np\n'), ((1647, 1658), 'numpy.abs', 'np.abs', (['err'], {}), '(err)\n', (1653, 1658), True, 'import numpy as np\n')]
|
from collections import namedtuple
from logging import getLogger
from subprocess import run
from pytest import fixture
from git_sh_sync.repo import Repository
@fixture(scope='function')
def initrepo(tmpdir, monkeypatch):
folder = tmpdir.mkdir('initrepo.git')
repo = Repository
def init(_, *, init):
setattr(repo, 'folder', folder)
setattr(repo, 'location', str(folder))
setattr(repo, 'remote_name', 'origin')
setattr(repo, '_log', getLogger(repo.__class__.__name__))
if init:
run(['git', 'init'], cwd=str(folder))
monkeypatch.setattr(repo, '__init__', init)
yield repo
assert folder.remove() is None
@fixture(scope='function')
def gitrepo(tmpdir):
folder = tmpdir.mkdir('gitrepo.git')
repo = Repository(location=str(folder))
yield namedtuple('GitRepo', (
'repo', 'folder', 'write', 'remove',
'add', 'commit', 'rm', 'checkout_branch', 'checkout', 'merge', 'tag',
'make_bare'
))(
repo=repo, folder=folder,
write=lambda name, text: folder.join(name).write_text(
text, 'utf-8', ensure=True
),
remove=lambda name: folder.join(name).remove(),
add=lambda *files: run(
['git', 'add', *files], cwd=repo.location
),
commit=lambda msg: run(
['git', 'commit', '-m', '{}'.format(msg)], cwd=repo.location
),
rm=lambda *files: run(
['git', 'rm', *files], cwd=repo.location
),
checkout_branch=lambda name: run(
['git', 'checkout', '-b', name], cwd=repo.location
),
checkout=lambda name: run(
['git', 'checkout', name], cwd=repo.location
),
merge=lambda name: run(
['git', 'merge', name], cwd=repo.location
),
tag=lambda name: run(
['git', 'tag', name], cwd=repo.location
),
make_bare=lambda: run(
['git', 'config', '--bool', 'core.bare', 'true'], cwd=repo.location
),
)
assert folder.remove() is None
@fixture(scope='function')
def conflict():
def make(gtrp, filename='file', brc_org='master', brc_sec='temp'):
assert gtrp.repo.status.clean is True
gtrp.checkout_branch(brc_org)
gtrp.write(filename, 'content 1')
gtrp.add(filename)
gtrp.commit('commit 1')
assert gtrp.repo.status.clean is True
gtrp.checkout_branch(brc_sec)
gtrp.write(filename, 'content 2')
gtrp.add(filename)
gtrp.commit('commit 2')
assert gtrp.repo.status.clean is True
gtrp.checkout(brc_org)
gtrp.write(filename, 'content 3')
gtrp.add(filename)
gtrp.commit('commit 3')
assert gtrp.repo.status.clean is True
gtrp.merge(brc_sec)
assert gtrp.repo.status.clean is False
yield make
|
[
"subprocess.run",
"pytest.fixture",
"collections.namedtuple",
"logging.getLogger"
] |
[((164, 189), 'pytest.fixture', 'fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (171, 189), False, 'from pytest import fixture\n'), ((689, 714), 'pytest.fixture', 'fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (696, 714), False, 'from pytest import fixture\n'), ((2094, 2119), 'pytest.fixture', 'fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (2101, 2119), False, 'from pytest import fixture\n'), ((481, 515), 'logging.getLogger', 'getLogger', (['repo.__class__.__name__'], {}), '(repo.__class__.__name__)\n', (490, 515), False, 'from logging import getLogger\n'), ((832, 979), 'collections.namedtuple', 'namedtuple', (['"""GitRepo"""', "('repo', 'folder', 'write', 'remove', 'add', 'commit', 'rm',\n 'checkout_branch', 'checkout', 'merge', 'tag', 'make_bare')"], {}), "('GitRepo', ('repo', 'folder', 'write', 'remove', 'add', 'commit',\n 'rm', 'checkout_branch', 'checkout', 'merge', 'tag', 'make_bare'))\n", (842, 979), False, 'from collections import namedtuple\n'), ((1237, 1283), 'subprocess.run', 'run', (["['git', 'add', *files]"], {'cwd': 'repo.location'}), "(['git', 'add', *files], cwd=repo.location)\n", (1240, 1283), False, 'from subprocess import run\n'), ((1449, 1494), 'subprocess.run', 'run', (["['git', 'rm', *files]"], {'cwd': 'repo.location'}), "(['git', 'rm', *files], cwd=repo.location)\n", (1452, 1494), False, 'from subprocess import run\n'), ((1555, 1610), 'subprocess.run', 'run', (["['git', 'checkout', '-b', name]"], {'cwd': 'repo.location'}), "(['git', 'checkout', '-b', name], cwd=repo.location)\n", (1558, 1610), False, 'from subprocess import run\n'), ((1664, 1713), 'subprocess.run', 'run', (["['git', 'checkout', name]"], {'cwd': 'repo.location'}), "(['git', 'checkout', name], cwd=repo.location)\n", (1667, 1713), False, 'from subprocess import run\n'), ((1764, 1810), 'subprocess.run', 'run', (["['git', 'merge', name]"], {'cwd': 'repo.location'}), "(['git', 'merge', name], cwd=repo.location)\n", (1767, 1810), False, 'from subprocess import run\n'), ((1859, 1903), 'subprocess.run', 'run', (["['git', 'tag', name]"], {'cwd': 'repo.location'}), "(['git', 'tag', name], cwd=repo.location)\n", (1862, 1903), False, 'from subprocess import run\n'), ((1953, 2025), 'subprocess.run', 'run', (["['git', 'config', '--bool', 'core.bare', 'true']"], {'cwd': 'repo.location'}), "(['git', 'config', '--bool', 'core.bare', 'true'], cwd=repo.location)\n", (1956, 2025), False, 'from subprocess import run\n')]
|
import numpy
from matplotlib import pyplot
import chaospy
pyplot.rc("figure", figsize=[3, 2])
COLOR1 = "steelblue"
COLOR2 = "slategray"
def save(name):
pyplot.axis("off")
pyplot.savefig(
f"./{name}.png",
bbox_inches="tight",
transparent=True,
)
pyplot.clf()
def make_distribution():
t = numpy.linspace(-1, 1, 100)
dist = chaospy.Normal(0, 0.5)
pyplot.fill_between(t, 0, dist.pdf(t), alpha=0.3, color=COLOR1)
pyplot.plot(t, dist.pdf(t), COLOR1, lw=4)
pyplot.fill_between(t, 0, dist.cdf(t), alpha=0.3, color=COLOR2)
pyplot.plot(t, dist.cdf(t), COLOR2, lw=4)
save("distribution")
def make_polynomial():
q0 = chaospy.variable()
poly = 1.2*q0*(q0-1.8)*(q0+1.8)
t = numpy.linspace(-2, 2, 100)
t0 = numpy.linspace(-2, 0, 100)
pyplot.fill_between(t0, 0, poly(t0), alpha=0.3, color=COLOR1)
pyplot.plot(t0, poly(t0), COLOR1, lw=4)
t0 = numpy.linspace(0, 2, 100)
pyplot.fill_between(t0, poly(t0), 0, alpha=0.3, color=COLOR2)
pyplot.plot(t0, poly(t0), COLOR2, lw=4)
save("polynomial")
def make_sampling():
dist = chaospy.Iid(chaospy.Uniform(0, 1), 2)
samples = dist.sample(20, rule="sobol")
size = 80
pyplot.scatter(*samples[:, ::2], s=size, lw=3, color="w", edgecolors=COLOR1)
pyplot.scatter(*samples[:, ::2], s=size, color=COLOR1, alpha=0.6)
pyplot.scatter(*samples[:, 1::2], s=size, lw=3, color="w", edgecolor=COLOR2)
pyplot.scatter(*samples[:, 1::2], s=size, color=COLOR2, alpha=0.6)
save("sampling")
def make_quadrature():
dist = chaospy.Iid(chaospy.Uniform(0, 1), 2)
nodes, weights = chaospy.generate_quadrature(2, dist, growth=True, rule="fejer", sparse=True)
size = (weights*500).astype(int)
indices = weights < 0
pyplot.scatter(*nodes[:, indices], s=-size[indices], lw=3, color="w", edgecolors=COLOR2)
pyplot.scatter(*nodes[:, indices], s=-size[indices], color=COLOR2, alpha=0.6)
pyplot.scatter(*nodes[:, ~indices], s=size[~indices], lw=3, color="w", edgecolor=COLOR1)
pyplot.scatter(*nodes[:, ~indices], s=size[~indices], color=COLOR1, alpha=0.6)
save("quadrature")
def make_orthogonality():
t = numpy.linspace(-2, 2, 200)
q0 = chaospy.variable()
poly1 = (q0-1.2)*(q0+1.2)
poly2 = -(q0-1.2)*(q0+1.2)
t0 = numpy.linspace(-2, -1.2)
pyplot.fill_between(t0, poly1(t0), poly2(t0), color=COLOR1, alpha=0.3)
t0 = numpy.linspace(1.2, 2)
pyplot.fill_between(t0, poly1(t0), poly2(t0), color=COLOR1, alpha=0.3)
pyplot.plot(t, poly1(t), COLOR1, lw=4)
t0 = numpy.linspace(-1.2, 1.2)
pyplot.fill_between(t, poly1(t), poly2(t), color=COLOR2, alpha=0.3)
pyplot.plot(t, poly2(t), COLOR2, lw=4)
save("orthogonality")
def make_recurrence():
dist = chaospy.Iid(chaospy.Uniform(0, 1), 2)
samples1 = numpy.array([[0, 1, 2, 3], [0, 1, 2, 3]])
samples2 = numpy.array([[0, 0, 0, 1, 1, 2], [1, 2, 3, 2, 3, 3]])
size = 100
pyplot.plot([.16, .84], [2, 2], COLOR2, lw=4)
pyplot.plot([.16, .84], [3, 3], COLOR2, lw=4)
pyplot.plot([1.16, 1.84], [3, 3], COLOR2, lw=4)
pyplot.scatter(*samples1, s=size, lw=3, color="w", edgecolors=COLOR1)
pyplot.scatter(*samples1, s=size, color=COLOR1, alpha=0.6)
pyplot.scatter(*samples2, s=size, lw=3, color="w", edgecolor=COLOR2)
pyplot.scatter(*samples2, s=size, color=COLOR2, alpha=0.6)
save("recurrence")
def make_descriptive():
numpy.random.seed(1234)
dist1 = chaospy.Normal(0, 1)
samples1 = dist1.sample(40)
dist2 = chaospy.Exponential()
samples2 = dist2.sample(20)
x = y = numpy.linspace(0, 2*numpy.pi, 200)
x, y = numpy.cos(x), numpy.sin(y)
pyplot.pie([0.5], colors=[COLOR1], radius=1, normalize=False,
center=(-0.3, 0.3), startangle=45,
wedgeprops={"width": 0.5, "alpha": 0.5, "lw": 4})
pyplot.plot(x-0.3, y+0.3, COLOR1, lw=4)
pyplot.plot(x/2-0.3, y/2+0.3, COLOR1, lw=4)
pyplot.bar([0, 0.6], [0.5, 1], bottom=[-0.6, -0.6],
width=0.5, yerr=[0.2, 0.3], color=COLOR2)
save("descriptive")
if __name__ == "__main__":
make_distribution()
make_polynomial()
make_sampling()
make_quadrature()
make_orthogonality()
make_recurrence()
make_descriptive()
|
[
"numpy.random.seed",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.scatter",
"chaospy.Exponential",
"matplotlib.pyplot.bar",
"chaospy.Uniform",
"matplotlib.pyplot.axis",
"chaospy.variable",
"numpy.sin",
"numpy.array",
"matplotlib.pyplot.rc",
"numpy.linspace",
"chaospy.generate_quadrature",
"matplotlib.pyplot.pie",
"numpy.cos",
"chaospy.Normal",
"matplotlib.pyplot.savefig"
] |
[((59, 94), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""figure"""'], {'figsize': '[3, 2]'}), "('figure', figsize=[3, 2])\n", (68, 94), False, 'from matplotlib import pyplot\n'), ((160, 178), 'matplotlib.pyplot.axis', 'pyplot.axis', (['"""off"""'], {}), "('off')\n", (171, 178), False, 'from matplotlib import pyplot\n'), ((183, 253), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['f"""./{name}.png"""'], {'bbox_inches': '"""tight"""', 'transparent': '(True)'}), "(f'./{name}.png', bbox_inches='tight', transparent=True)\n", (197, 253), False, 'from matplotlib import pyplot\n'), ((289, 301), 'matplotlib.pyplot.clf', 'pyplot.clf', ([], {}), '()\n', (299, 301), False, 'from matplotlib import pyplot\n'), ((338, 364), 'numpy.linspace', 'numpy.linspace', (['(-1)', '(1)', '(100)'], {}), '(-1, 1, 100)\n', (352, 364), False, 'import numpy\n'), ((376, 398), 'chaospy.Normal', 'chaospy.Normal', (['(0)', '(0.5)'], {}), '(0, 0.5)\n', (390, 398), False, 'import chaospy\n'), ((689, 707), 'chaospy.variable', 'chaospy.variable', ([], {}), '()\n', (705, 707), False, 'import chaospy\n'), ((752, 778), 'numpy.linspace', 'numpy.linspace', (['(-2)', '(2)', '(100)'], {}), '(-2, 2, 100)\n', (766, 778), False, 'import numpy\n'), ((789, 815), 'numpy.linspace', 'numpy.linspace', (['(-2)', '(0)', '(100)'], {}), '(-2, 0, 100)\n', (803, 815), False, 'import numpy\n'), ((936, 961), 'numpy.linspace', 'numpy.linspace', (['(0)', '(2)', '(100)'], {}), '(0, 2, 100)\n', (950, 961), False, 'import numpy\n'), ((1231, 1307), 'matplotlib.pyplot.scatter', 'pyplot.scatter', (['*samples[:, ::2]'], {'s': 'size', 'lw': '(3)', 'color': '"""w"""', 'edgecolors': 'COLOR1'}), "(*samples[:, ::2], s=size, lw=3, color='w', edgecolors=COLOR1)\n", (1245, 1307), False, 'from matplotlib import pyplot\n'), ((1312, 1377), 'matplotlib.pyplot.scatter', 'pyplot.scatter', (['*samples[:, ::2]'], {'s': 'size', 'color': 'COLOR1', 'alpha': '(0.6)'}), '(*samples[:, ::2], s=size, color=COLOR1, alpha=0.6)\n', (1326, 1377), False, 'from matplotlib import pyplot\n'), ((1382, 1458), 'matplotlib.pyplot.scatter', 'pyplot.scatter', (['*samples[:, 1::2]'], {'s': 'size', 'lw': '(3)', 'color': '"""w"""', 'edgecolor': 'COLOR2'}), "(*samples[:, 1::2], s=size, lw=3, color='w', edgecolor=COLOR2)\n", (1396, 1458), False, 'from matplotlib import pyplot\n'), ((1463, 1529), 'matplotlib.pyplot.scatter', 'pyplot.scatter', (['*samples[:, 1::2]'], {'s': 'size', 'color': 'COLOR2', 'alpha': '(0.6)'}), '(*samples[:, 1::2], s=size, color=COLOR2, alpha=0.6)\n', (1477, 1529), False, 'from matplotlib import pyplot\n'), ((1648, 1724), 'chaospy.generate_quadrature', 'chaospy.generate_quadrature', (['(2)', 'dist'], {'growth': '(True)', 'rule': '"""fejer"""', 'sparse': '(True)'}), "(2, dist, growth=True, rule='fejer', sparse=True)\n", (1675, 1724), False, 'import chaospy\n'), ((1793, 1885), 'matplotlib.pyplot.scatter', 'pyplot.scatter', (['*nodes[:, indices]'], {'s': '(-size[indices])', 'lw': '(3)', 'color': '"""w"""', 'edgecolors': 'COLOR2'}), "(*nodes[:, indices], s=-size[indices], lw=3, color='w',\n edgecolors=COLOR2)\n", (1807, 1885), False, 'from matplotlib import pyplot\n'), ((1886, 1963), 'matplotlib.pyplot.scatter', 'pyplot.scatter', (['*nodes[:, indices]'], {'s': '(-size[indices])', 'color': 'COLOR2', 'alpha': '(0.6)'}), '(*nodes[:, indices], s=-size[indices], color=COLOR2, alpha=0.6)\n', (1900, 1963), False, 'from matplotlib import pyplot\n'), ((1968, 2060), 'matplotlib.pyplot.scatter', 'pyplot.scatter', (['*nodes[:, ~indices]'], {'s': 'size[~indices]', 'lw': '(3)', 'color': '"""w"""', 'edgecolor': 'COLOR1'}), "(*nodes[:, ~indices], s=size[~indices], lw=3, color='w',\n edgecolor=COLOR1)\n", (1982, 2060), False, 'from matplotlib import pyplot\n'), ((2061, 2139), 'matplotlib.pyplot.scatter', 'pyplot.scatter', (['*nodes[:, ~indices]'], {'s': 'size[~indices]', 'color': 'COLOR1', 'alpha': '(0.6)'}), '(*nodes[:, ~indices], s=size[~indices], color=COLOR1, alpha=0.6)\n', (2075, 2139), False, 'from matplotlib import pyplot\n'), ((2201, 2227), 'numpy.linspace', 'numpy.linspace', (['(-2)', '(2)', '(200)'], {}), '(-2, 2, 200)\n', (2215, 2227), False, 'import numpy\n'), ((2237, 2255), 'chaospy.variable', 'chaospy.variable', ([], {}), '()\n', (2253, 2255), False, 'import chaospy\n'), ((2327, 2351), 'numpy.linspace', 'numpy.linspace', (['(-2)', '(-1.2)'], {}), '(-2, -1.2)\n', (2341, 2351), False, 'import numpy\n'), ((2436, 2458), 'numpy.linspace', 'numpy.linspace', (['(1.2)', '(2)'], {}), '(1.2, 2)\n', (2450, 2458), False, 'import numpy\n'), ((2586, 2611), 'numpy.linspace', 'numpy.linspace', (['(-1.2)', '(1.2)'], {}), '(-1.2, 1.2)\n', (2600, 2611), False, 'import numpy\n'), ((2843, 2884), 'numpy.array', 'numpy.array', (['[[0, 1, 2, 3], [0, 1, 2, 3]]'], {}), '([[0, 1, 2, 3], [0, 1, 2, 3]])\n', (2854, 2884), False, 'import numpy\n'), ((2900, 2953), 'numpy.array', 'numpy.array', (['[[0, 0, 0, 1, 1, 2], [1, 2, 3, 2, 3, 3]]'], {}), '([[0, 0, 0, 1, 1, 2], [1, 2, 3, 2, 3, 3]])\n', (2911, 2953), False, 'import numpy\n'), ((2974, 3021), 'matplotlib.pyplot.plot', 'pyplot.plot', (['[0.16, 0.84]', '[2, 2]', 'COLOR2'], {'lw': '(4)'}), '([0.16, 0.84], [2, 2], COLOR2, lw=4)\n', (2985, 3021), False, 'from matplotlib import pyplot\n'), ((3024, 3071), 'matplotlib.pyplot.plot', 'pyplot.plot', (['[0.16, 0.84]', '[3, 3]', 'COLOR2'], {'lw': '(4)'}), '([0.16, 0.84], [3, 3], COLOR2, lw=4)\n', (3035, 3071), False, 'from matplotlib import pyplot\n'), ((3074, 3121), 'matplotlib.pyplot.plot', 'pyplot.plot', (['[1.16, 1.84]', '[3, 3]', 'COLOR2'], {'lw': '(4)'}), '([1.16, 1.84], [3, 3], COLOR2, lw=4)\n', (3085, 3121), False, 'from matplotlib import pyplot\n'), ((3126, 3195), 'matplotlib.pyplot.scatter', 'pyplot.scatter', (['*samples1'], {'s': 'size', 'lw': '(3)', 'color': '"""w"""', 'edgecolors': 'COLOR1'}), "(*samples1, s=size, lw=3, color='w', edgecolors=COLOR1)\n", (3140, 3195), False, 'from matplotlib import pyplot\n'), ((3200, 3258), 'matplotlib.pyplot.scatter', 'pyplot.scatter', (['*samples1'], {'s': 'size', 'color': 'COLOR1', 'alpha': '(0.6)'}), '(*samples1, s=size, color=COLOR1, alpha=0.6)\n', (3214, 3258), False, 'from matplotlib import pyplot\n'), ((3263, 3331), 'matplotlib.pyplot.scatter', 'pyplot.scatter', (['*samples2'], {'s': 'size', 'lw': '(3)', 'color': '"""w"""', 'edgecolor': 'COLOR2'}), "(*samples2, s=size, lw=3, color='w', edgecolor=COLOR2)\n", (3277, 3331), False, 'from matplotlib import pyplot\n'), ((3336, 3394), 'matplotlib.pyplot.scatter', 'pyplot.scatter', (['*samples2'], {'s': 'size', 'color': 'COLOR2', 'alpha': '(0.6)'}), '(*samples2, s=size, color=COLOR2, alpha=0.6)\n', (3350, 3394), False, 'from matplotlib import pyplot\n'), ((3450, 3473), 'numpy.random.seed', 'numpy.random.seed', (['(1234)'], {}), '(1234)\n', (3467, 3473), False, 'import numpy\n'), ((3486, 3506), 'chaospy.Normal', 'chaospy.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (3500, 3506), False, 'import chaospy\n'), ((3551, 3572), 'chaospy.Exponential', 'chaospy.Exponential', ([], {}), '()\n', (3570, 3572), False, 'import chaospy\n'), ((3618, 3654), 'numpy.linspace', 'numpy.linspace', (['(0)', '(2 * numpy.pi)', '(200)'], {}), '(0, 2 * numpy.pi, 200)\n', (3632, 3654), False, 'import numpy\n'), ((3696, 3846), 'matplotlib.pyplot.pie', 'pyplot.pie', (['[0.5]'], {'colors': '[COLOR1]', 'radius': '(1)', 'normalize': '(False)', 'center': '(-0.3, 0.3)', 'startangle': '(45)', 'wedgeprops': "{'width': 0.5, 'alpha': 0.5, 'lw': 4}"}), "([0.5], colors=[COLOR1], radius=1, normalize=False, center=(-0.3,\n 0.3), startangle=45, wedgeprops={'width': 0.5, 'alpha': 0.5, 'lw': 4})\n", (3706, 3846), False, 'from matplotlib import pyplot\n'), ((3877, 3920), 'matplotlib.pyplot.plot', 'pyplot.plot', (['(x - 0.3)', '(y + 0.3)', 'COLOR1'], {'lw': '(4)'}), '(x - 0.3, y + 0.3, COLOR1, lw=4)\n', (3888, 3920), False, 'from matplotlib import pyplot\n'), ((3921, 3972), 'matplotlib.pyplot.plot', 'pyplot.plot', (['(x / 2 - 0.3)', '(y / 2 + 0.3)', 'COLOR1'], {'lw': '(4)'}), '(x / 2 - 0.3, y / 2 + 0.3, COLOR1, lw=4)\n', (3932, 3972), False, 'from matplotlib import pyplot\n'), ((3970, 4068), 'matplotlib.pyplot.bar', 'pyplot.bar', (['[0, 0.6]', '[0.5, 1]'], {'bottom': '[-0.6, -0.6]', 'width': '(0.5)', 'yerr': '[0.2, 0.3]', 'color': 'COLOR2'}), '([0, 0.6], [0.5, 1], bottom=[-0.6, -0.6], width=0.5, yerr=[0.2, \n 0.3], color=COLOR2)\n', (3980, 4068), False, 'from matplotlib import pyplot\n'), ((1142, 1163), 'chaospy.Uniform', 'chaospy.Uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1157, 1163), False, 'import chaospy\n'), ((1600, 1621), 'chaospy.Uniform', 'chaospy.Uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1615, 1621), False, 'import chaospy\n'), ((2802, 2823), 'chaospy.Uniform', 'chaospy.Uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2817, 2823), False, 'import chaospy\n'), ((3664, 3676), 'numpy.cos', 'numpy.cos', (['x'], {}), '(x)\n', (3673, 3676), False, 'import numpy\n'), ((3678, 3690), 'numpy.sin', 'numpy.sin', (['y'], {}), '(y)\n', (3687, 3690), False, 'import numpy\n')]
|
### This script reads the backup file created by the solmanager and output the
### read objects as json.
#============================ imports =========================================
import os
import argparse
import json
import time
from sensorobjectlibrary import Sol as sol, SolDefines
parser = argparse.ArgumentParser()
#============================ args ============================================
outputfile = 'solmanager.backup.json'
parser.add_argument("inputfile",
help="input file [../solmanager.backup]",
type=str)
parser.add_argument("-o", help="output file [solmanager.backup.json]", type=str)
parser.add_argument("-f", help="output format [json|csv]", type=str, default="json")
parser.add_argument("-t", help="filter SOL type (decimal type id)", type=int)
args = parser.parse_args()
if args.o is not None:
outputfile = args.o
#============================ main ============================================
# read the file
obj_list = sol.loadFromFile(args.inputfile)
# write the output
for obj in obj_list:
# skip if type is filtered
if args.t is not None:
if obj["type"] != args.t:
continue
# format object
str_type = SolDefines.sol_type_to_type_name(obj["type"])
if args.f == "json":
obj_formated = json.dumps(obj)
else:
if type(obj['value']) == list:
obj['value'] = obj['value'][0]
obj_formated = "|".join([
time.strftime("%a %d %b %Y %H:%M:%S UTC", time.localtime(obj["timestamp"])),
obj["mac"]
]) + "|" + "|".join([str(val) for val in obj["value"].values()])
# write object
outfile = "backup/" + str_type + "." + args.f
if not os.path.isfile(outfile) and args.f == "csv":
with open(outfile, 'w') as out:
out.write("|".join(["timestamp", "mac"]) + "|" +
"|".join([str(val) for val in obj["value"]]) + "\n")
with open(outfile, 'a') as out:
out.write(obj_formated+"\n")
|
[
"sensorobjectlibrary.Sol.loadFromFile",
"argparse.ArgumentParser",
"sensorobjectlibrary.SolDefines.sol_type_to_type_name",
"json.dumps",
"os.path.isfile",
"time.localtime"
] |
[((303, 328), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (326, 328), False, 'import argparse\n'), ((1005, 1037), 'sensorobjectlibrary.Sol.loadFromFile', 'sol.loadFromFile', (['args.inputfile'], {}), '(args.inputfile)\n', (1021, 1037), True, 'from sensorobjectlibrary import Sol as sol, SolDefines\n'), ((1229, 1274), 'sensorobjectlibrary.SolDefines.sol_type_to_type_name', 'SolDefines.sol_type_to_type_name', (["obj['type']"], {}), "(obj['type'])\n", (1261, 1274), False, 'from sensorobjectlibrary import Sol as sol, SolDefines\n'), ((1323, 1338), 'json.dumps', 'json.dumps', (['obj'], {}), '(obj)\n', (1333, 1338), False, 'import json\n'), ((1731, 1754), 'os.path.isfile', 'os.path.isfile', (['outfile'], {}), '(outfile)\n', (1745, 1754), False, 'import os\n'), ((1519, 1551), 'time.localtime', 'time.localtime', (["obj['timestamp']"], {}), "(obj['timestamp'])\n", (1533, 1551), False, 'import time\n')]
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of MNASNet.
Architecture: https://arxiv.org/abs/1807.11626
Note:
There are some differences with tensorflow/tpu/blob/master/models/official/mnasnet/.
The b1 model gives 74.0% accuracy on ImageNet
"""
import functools
import collections
import contextlib
import copy
import os
import tensorflow as tf
slim = tf.contrib.slim
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
@slim.add_arg_scope
def depth_multiplier_fn(output_params,
multiplier,
divisible_by=8,
min_depth=8,
**unused_kwargs):
if 'num_outputs' not in output_params:
return
d = output_params['num_outputs']
output_params['num_outputs'] = _make_divisible(d * multiplier, divisible_by,
min_depth)
_Op = collections.namedtuple('Op', ['op', 'params', 'multiplier_func'])
def op(opfunc, **params):
multiplier = params.pop('multiplier_transorm', depth_multiplier_fn)
return _Op(opfunc, params=params, multiplier_func=multiplier)
class NoOpScope(object):
"""No-op context manager."""
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback):
return False
@contextlib.contextmanager
def _scope_all(scope, default_scope=None):
with tf.variable_scope(scope, default_name=default_scope) as s,\
tf.name_scope(s.original_name_scope):
yield s
def safe_arg_scope(funcs, **kwargs):
"""Returns `slim.arg_scope` with all None arguments removed.
Arguments:
funcs: Functions to pass to `arg_scope`.
**kwargs: Arguments to pass to `arg_scope`.
Returns:
arg_scope or No-op context manager.
Note: can be useful if None value should be interpreted as "do not overwrite
this parameter value".
"""
filtered_args = {name: value for name, value in kwargs.items()
if value is not None}
if filtered_args:
return slim.arg_scope(funcs, **filtered_args)
else:
return NoOpScope()
@contextlib.contextmanager
def _set_arg_scope_defaults(defaults):
"""Sets arg scope defaults for all items present in defaults.
Args:
defaults: dictionary/list of pairs, containing a mapping from
function to a dictionary of default args.
Yields:
context manager where all defaults are set.
"""
if hasattr(defaults, 'items'):
items = list(defaults.items())
else:
items = defaults
if not items:
yield
else:
func, default_arg = items[0]
with slim.arg_scope(func, **default_arg):
with _set_arg_scope_defaults(items[1:]):
yield
def _fixed_padding(inputs, kernel_size, rate=1):
"""Pads the input along the spatial dimensions independently of input size.
Pads the input such that if it was used in a convolution with 'VALID' padding,
the output would have the same dimensions as if the unpadded input was used
in a convolution with 'SAME' padding.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
rate: An integer, rate for atrous convolution.
Returns:
output: A tensor of size [batch, height_out, width_out, channels] with the
input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).
"""
kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1),
kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)]
pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1]
pad_beg = [pad_total[0] // 2, pad_total[1] // 2]
pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]]
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg[0], pad_end[0]],
[pad_beg[1], pad_end[1]], [0, 0]])
return padded_inputs
def se_block(input_tensor,
se_ratio=0.25,
scope=None):
with tf.variable_scope(scope, default_name='se_block') as s, \
tf.name_scope(s.original_name_scope):
num_channel = input_tensor.get_shape()[3]
num_reduced_channel = max(1, int(int(num_channel) * se_ratio))
squeeze = tf.reduce_mean(input_tensor, axis=[1,2], keepdims=True)
excitation_reduce = slim.conv2d(inputs=squeeze,
num_outputs=num_reduced_channel,
kernel_size=[1,1],
stride=1,
padding='same',
normalizer_fn=None,
activation_fn=tf.nn.relu,
scope='excitation_reduce')
excitation_expand = slim.conv2d(inputs=excitation_reduce,
num_outputs=num_channel,
kernel_size=[1,1],
stride=1,
padding='same',
normalizer_fn=None,
activation_fn=tf.nn.sigmoid,
scope='excitation_expand')
return excitation_expand * input_tensor
def sep_conv(input_tensor,
num_outputs,
stride=1,
rate=1,
kernel_size=[3,3],
use_explicit_padding=False,
padding='SAME',
scope=None):
with tf.variable_scope(scope, default_name='sep_conv') as s, \
tf.name_scope(s.original_name_scope):
if use_explicit_padding:
if padding != 'SAME':
raise TypeError('`use_explicit_padding` should only be used with '
'"SAME" padding.')
padding = 'VALID'
net = slim.separable_conv2d(inputs=input_tensor,
num_outputs=None,
kernel_size=kernel_size,
depth_multiplier=1,
stride=stride,
rate=rate,
normalizer_fn=slim.batch_norm,
activation_fn=tf.nn.relu6,
padding=padding,
scope='depthwise')
net = slim.conv2d(inputs=net,
num_outputs=num_outputs,
kernel_size=[1,1],
padding=padding,
normalizer_fn=slim.batch_norm,
activation_fn=tf.identity,
scope='project')
return tf.identity(net, name='output')
def mb_conv(input_tensor,
num_outputs,
stride=1,
rate=1,
kernel_size=(3,3),
expand_factor=3,
residual=True,
use_explicit_padding=False,
se_ratio=0,
padding='SAME',
scope=None):
with tf.variable_scope(scope, default_name='mb_conv') as s, \
tf.name_scope(s.original_name_scope):
if use_explicit_padding:
if padding != 'SAME':
raise TypeError('`use_explicit_padding` should only be used with '
'"SAME" padding.')
padding = 'VALID'
net = slim.conv2d(inputs=input_tensor,
num_outputs=num_outputs*expand_factor,
kernel_size=[1,1],
padding=padding,
normalizer_fn=slim.batch_norm,
activation_fn=tf.nn.relu6,
scope='expand')
net = slim.separable_conv2d(inputs=net,
num_outputs=None,
kernel_size=kernel_size,
depth_multiplier=1,
stride=stride,
rate=rate,
normalizer_fn=slim.batch_norm,
activation_fn=tf.nn.relu6,
padding=padding,
scope='depthwise')
net = slim.conv2d(inputs=net,
num_outputs=num_outputs,
kernel_size=[1,1],
padding=padding,
normalizer_fn=slim.batch_norm,
activation_fn=tf.identity,
scope='project')
if se_ratio > 0 and se_ratio <= 1:
net = se_block(net,se_ratio)
elif se_ratio < 0 or se_ratio > 1:
raise ValueError('Invalid se_ratio.')
if residual and stride == 1 and net.get_shape().as_list()[3] == input_tensor.get_shape().as_list()[3]:
net += input_tensor
return tf.identity(net, name='output')
MNASNET_A1_DEF = dict(
defaults={
# Note: The implementation here is different from tensorflow/tpu/blob/master/models/official/mnasnet/.
# The paper does not specify whether activation_fn is relu or relu6.
(slim.batch_norm,): {'center': True, 'scale': True},
(slim.conv2d, slim.fully_connected, slim.separable_conv2d): {
'normalizer_fn': slim.batch_norm, 'activation_fn': tf.nn.relu6
},
(slim.conv2d, slim.separable_conv2d): {'padding': 'SAME'}
},
spec=[
op(slim.conv2d, stride=2, num_outputs=32, kernel_size=[3, 3]),
op(sep_conv,stride=1,num_outputs=16, kernel_size=[3, 3]),
op(mb_conv,stride=2,num_outputs=24, kernel_size=[3, 3],expand_factor=6),
op(mb_conv,stride=1,num_outputs=24, kernel_size=[3, 3],expand_factor=6),
op(mb_conv,stride=2,num_outputs=40, kernel_size=[5, 5],expand_factor=3,se_ratio=0.25),
op(mb_conv,stride=1,num_outputs=40, kernel_size=[5, 5],expand_factor=3,se_ratio=0.25),
op(mb_conv,stride=1,num_outputs=40, kernel_size=[5, 5],expand_factor=3,se_ratio=0.25),
op(mb_conv,stride=2,num_outputs=80, kernel_size=[3, 3],expand_factor=6),
op(mb_conv,stride=1,num_outputs=80, kernel_size=[3, 3],expand_factor=6),
op(mb_conv,stride=1,num_outputs=80, kernel_size=[3, 3],expand_factor=6),
op(mb_conv,stride=1,num_outputs=80, kernel_size=[3, 3],expand_factor=6),
op(mb_conv,stride=1,num_outputs=112, kernel_size=[3, 3],expand_factor=6,se_ratio=0.25),
op(mb_conv,stride=1,num_outputs=112, kernel_size=[3, 3],expand_factor=6,se_ratio=0.25),
op(mb_conv,stride=2,num_outputs=160, kernel_size=[5, 5],expand_factor=6,se_ratio=0.25),
op(mb_conv,stride=1,num_outputs=160, kernel_size=[5, 5],expand_factor=6,se_ratio=0.25),
op(mb_conv,stride=1,num_outputs=160, kernel_size=[5, 5],expand_factor=6,se_ratio=0.25),
op(mb_conv,stride=1,num_outputs=320, kernel_size=[3, 3],expand_factor=6),
op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=1280)
],
)
MNASNET_B1_DEF = dict(
defaults={
# Note: The implementation here is different from tensorflow/tpu/blob/master/models/official/mnasnet/.
# The paper does not specify whether activation_fn is relu or relu6.
(slim.batch_norm,): {'center': True, 'scale': True},
(slim.conv2d, slim.fully_connected, slim.separable_conv2d): {
'normalizer_fn': slim.batch_norm, 'activation_fn': tf.nn.relu6
},
(slim.conv2d, slim.separable_conv2d): {'padding': 'SAME'}
},
spec=[
op(slim.conv2d, stride=2, num_outputs=32, kernel_size=[3, 3]),
op(sep_conv,stride=1,num_outputs=16, kernel_size=[3, 3]),
op(mb_conv,stride=2,num_outputs=24, kernel_size=[3, 3],expand_factor=3),
op(mb_conv,stride=1,num_outputs=24, kernel_size=[3, 3],expand_factor=3),
op(mb_conv,stride=1,num_outputs=24, kernel_size=[3, 3],expand_factor=3),
op(mb_conv,stride=2,num_outputs=40, kernel_size=[5, 5],expand_factor=3),
op(mb_conv,stride=1,num_outputs=40, kernel_size=[5, 5],expand_factor=3),
op(mb_conv,stride=1,num_outputs=40, kernel_size=[5, 5],expand_factor=3),
op(mb_conv,stride=2,num_outputs=80, kernel_size=[5, 5],expand_factor=6),
op(mb_conv,stride=1,num_outputs=80, kernel_size=[5, 5],expand_factor=6),
op(mb_conv,stride=1,num_outputs=80, kernel_size=[5, 5],expand_factor=6),
op(mb_conv,stride=1,num_outputs=96, kernel_size=[3, 3],expand_factor=6),
op(mb_conv,stride=1,num_outputs=96, kernel_size=[3, 3],expand_factor=6),
op(mb_conv,stride=2,num_outputs=192, kernel_size=[5, 5],expand_factor=6),
op(mb_conv,stride=1,num_outputs=192, kernel_size=[5, 5],expand_factor=6),
op(mb_conv,stride=1,num_outputs=192, kernel_size=[5, 5],expand_factor=6),
op(mb_conv,stride=1,num_outputs=192, kernel_size=[5, 5],expand_factor=6),
op(mb_conv,stride=1,num_outputs=320, kernel_size=[3, 3],expand_factor=6,residual=False),
op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=1280)
],
)
@slim.add_arg_scope
def mnasnet_base(inputs,
conv_defs=None,
depth_multiplier=1.0,
final_endpoint=None,
output_stride=None,
use_explicit_padding=False,
scope=None,
is_training=False):
"""MNasNet base network.
Constructs a network from inputs to the given final endpoint. By default
the network is constructed in inference mode. To create network
in training mode use:
with slim.arg_scope(mnasnet.training_scope()):
logits, endpoints = mnasnet_base(...)
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
conv_defs: A list of op(...) layers specifying the net architecture.
multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
final_endpoint: The name of last layer
output_stride: An integer that specifies the requested ratio of input to
output spatial resolution. If not None, then we invoke atrous convolution
if necessary to prevent the network from reducing the spatial resolution
of the activation maps. Allowed values are 1 or any even number, excluding
zero. Typical values are 8 (accurate fully convolutional mode), 16
(fast fully convolutional mode), and 32 (classification mode).
NOTE- output_stride relies on all consequent operators to support dilated
operators via "rate" parameter. This might require wrapping non-conv
operators to operate properly.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
scope: optional variable scope.
is_training: How to setup batch_norm and other ops. Note: most of the time
this does not need be set directly. Use mnasnet.training_scope() to set
up training instead. This parameter is here for backward compatibility
only. It is safe to set it to the value matching
training_scope(is_training=...). It is also safe to explicitly set
it to False, even if there is outer training_scope set to to training.
(The network will be built in inference mode). If this is set to None,
no arg_scope is added for slim.batch_norm's is_training parameter.
Returns:
tensor_out: output tensor.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: depth_multiplier <= 0, or the target output_stride is not
allowed.
"""
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
# Set conv defs defaults and overrides.
conv_defs_defaults = conv_defs.get('defaults', {})
conv_defs_overrides = conv_defs.get('overrides', {})
if use_explicit_padding:
conv_defs_overrides = copy.deepcopy(conv_defs_overrides)
conv_defs_overrides[
(slim.conv2d, slim.separable_conv2d)] = {'padding': 'VALID'}
if output_stride is not None:
if output_stride == 0 or (output_stride > 1 and output_stride % 2):
raise ValueError('Output stride must be None, 1 or a multiple of 2.')
with _scope_all(scope, default_scope='Mnasnet'), \
safe_arg_scope([slim.batch_norm], is_training=is_training), \
_set_arg_scope_defaults(conv_defs_defaults), \
_set_arg_scope_defaults(conv_defs_overrides):
current_stride = 1
rate = 1
net = inputs
end_points = {}
for i, opdef in enumerate(conv_defs['spec']):
params = dict(opdef.params)
opdef.multiplier_func(params, depth_multiplier)
stride = params.get('stride', 1)
if output_stride is not None and current_stride == output_stride:
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
layer_stride = 1
layer_rate = rate
rate *= stride
else:
layer_stride = stride
layer_rate = 1
current_stride *= stride
# Update params.
params['stride'] = layer_stride
# Only insert rate to params if rate > 1.
if layer_rate > 1:
params['rate'] = layer_rate
# Set padding
if use_explicit_padding:
if 'kernel_size' in params:
net = _fixed_padding(net, params['kernel_size'], layer_rate)
else:
params['use_explicit_padding'] = True
end_point = 'layer_%d' % (i + 1)
try:
print('On opdet=' + str(opdef))
net = opdef.op(net, **params)
except Exception:
print('Failed to create op %i: %r params: %r' % (i, opdef, params))
raise
end_points[end_point] = net
if final_endpoint is not None and end_point == final_endpoint:
break
return net, end_points
@slim.add_arg_scope
def mnasnet(inputs,
num_classes=1001,
prediction_fn=slim.softmax,
reuse=None,
scope='Mnasnet',
base_only=False,
**mnasnet_args):
"""MNASNet model for classification, supports both a1 and b1.
Note: default mode is inference, use mnasnet.training_scope to create
training network.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer (before dropout)
are returned instead.
prediction_fn: a function to get predictions out of logits
(default softmax).
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
base_only: if True will only create the base of the network (no pooling
and no logits).
**mnasnet_args: passed to mnasnet_base verbatim.
- conv_defs: list of conv defs
- multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
- output_stride: will ensure that the last layer has at most total stride.
If the architecture calls for more stride than that provided
(e.g. output_stride=16, but the architecture has 5 stride=2 operators),
it will replace output_stride with fractional convolutions using Atrous
Convolutions.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, num_classes]
end_points: a dictionary from components of the network to the corresponding
activation tensor.
Raises:
ValueError: Input rank is invalid.
"""
is_training = mnasnet_args.get('is_training', False)
input_shape = inputs.get_shape().as_list()
if len(input_shape) != 4:
raise ValueError('Expected rank 4 input, was: %d' % len(input_shape))
with tf.variable_scope(scope, 'Mnasnet', reuse=reuse) as scope:
inputs = tf.identity(inputs, 'input')
net, end_points = mnasnet_base(inputs, scope=scope, **mnasnet_args)
if base_only:
return net, end_points
net = tf.identity(net, name='embedding')
with tf.variable_scope('Logits'):
net = global_pool(net)
end_points['global_pool'] = net
if not num_classes:
return net, end_points
net = slim.dropout(net, scope='Dropout', is_training=is_training)
# 1 x 1 x num_classes
# Note: legacy scope name.
logits = slim.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=tf.zeros_initializer(),
scope='Conv2d_1c_1x1')
logits = tf.squeeze(logits, [1, 2])
logits = tf.identity(logits, name='output')
end_points['Logits'] = logits
if prediction_fn:
end_points['Predictions'] = prediction_fn(logits, 'Predictions')
return logits, end_points
@slim.add_arg_scope
def mnasnet_a1(input_tensor,
num_classes=1001,
depth_multiplier=1.0,
scope='Mnasnet',
conv_defs=MNASNET_A1_DEF,
finegrain_classification_mode=False,
min_depth=None,
divisible_by=None,
**kwargs):
"""Creates mnasnet a1 network.
Inference mode is created by default. To create training use training_scope
below.
with tf.contrib.slim.arg_scope(mnasnet.training_scope()):
logits, endpoints = mnasnet.mnasnet(input_tensor)
Args:
input_tensor: The input tensor
num_classes: number of classes
depth_multiplier: The multiplier applied to scale number of
channels in each layer. Note: this is called depth multiplier in the
paper but the name is kept for consistency with slim's model builder.
scope: Scope of the operator
conv_defs: Allows to override default conv def.
finegrain_classification_mode: When set to True, the model
will keep the last layer large even for small multipliers. Following
https://arxiv.org/abs/1801.04381
suggests that it improves performance for ImageNet-type of problems.
*Note* ignored if final_endpoint makes the builder exit earlier.
min_depth: If provided, will ensure that all layers will have that
many channels after application of depth multiplier.
divisible_by: If provided will ensure that all layers # channels
will be divisible by this number.
**kwargs: passed directly to mnasnet
Returns:
logits/endpoints pair
Raises:
ValueError: On invalid arguments
"""
if finegrain_classification_mode:
conv_defs = copy.deepcopy(conv_defs)
if depth_multiplier < 1:
conv_defs['spec'][-1].params['num_outputs'] /= depth_multiplier
depth_args = {}
# NB: do not set depth_args unless they are provided to avoid overriding
# whatever default depth_multiplier might have thanks to arg_scope.
if min_depth is not None:
depth_args['min_depth'] = min_depth
if divisible_by is not None:
depth_args['divisible_by'] = divisible_by
with slim.arg_scope((depth_multiplier_fn,), **depth_args):
return mnasnet(
input_tensor,
num_classes=num_classes,
conv_defs=conv_defs,
scope=scope,
depth_multiplier=depth_multiplier,
**kwargs)
@slim.add_arg_scope
def mnasnet_b1(input_tensor,
num_classes=1001,
depth_multiplier=1.0,
scope='Mnasnet',
conv_defs=MNASNET_B1_DEF,
finegrain_classification_mode=False,
min_depth=None,
divisible_by=None,
**kwargs):
"""Creates mnasnet b1 network.
Inference mode is created by default. To create training use training_scope
below.
with tf.contrib.slim.arg_scope(mnasnet.training_scope()):
logits, endpoints = mnasnet.mnasnet(input_tensor)
Args:
input_tensor: The input tensor
num_classes: number of classes
depth_multiplier: The multiplier applied to scale number of
channels in each layer. Note: this is called depth multiplier in the
paper but the name is kept for consistency with slim's model builder.
scope: Scope of the operator
conv_defs: Allows to override default conv def.
finegrain_classification_mode: When set to True, the model
will keep the last layer large even for small multipliers. Following
https://arxiv.org/abs/1801.04381
suggests that it improves performance for ImageNet-type of problems.
*Note* ignored if final_endpoint makes the builder exit earlier.
min_depth: If provided, will ensure that all layers will have that
many channels after application of depth multiplier.
divisible_by: If provided will ensure that all layers # channels
will be divisible by this number.
**kwargs: passed directly to mnasnet
Returns:
logits/endpoints pair
Raises:
ValueError: On invalid arguments
"""
if finegrain_classification_mode:
conv_defs = copy.deepcopy(conv_defs)
if depth_multiplier < 1:
conv_defs['spec'][-1].params['num_outputs'] /= depth_multiplier
depth_args = {}
# NB: do not set depth_args unless they are provided to avoid overriding
# whatever default depth_multiplier might have thanks to arg_scope.
if min_depth is not None:
depth_args['min_depth'] = min_depth
if divisible_by is not None:
depth_args['divisible_by'] = divisible_by
with slim.arg_scope((depth_multiplier_fn,), **depth_args):
return mnasnet(
input_tensor,
num_classes=num_classes,
conv_defs=conv_defs,
scope=scope,
depth_multiplier=depth_multiplier,
**kwargs)
def wrapped_partial(func, *args, **kwargs):
partial_func = functools.partial(func, *args, **kwargs)
functools.update_wrapper(partial_func, func)
return partial_func
# Wrappers for mnasnet with depth-multipliers. Be noticed that
# 'finegrain_classification_mode' is set to True, which means the embedding
# layer will not be shrinked when given a depth-multiplier < 1.0.
mnasnet_a1_140 = wrapped_partial(mnasnet_a1, depth_multiplier=1.4)
mnasnet_a1_050 = wrapped_partial(mnasnet_a1, depth_multiplier=0.5,finegrain_classification_mode=True)
mnasnet_a1_035 = wrapped_partial(mnasnet_a1, depth_multiplier=0.35,finegrain_classification_mode=True)
mnasnet_b1_140 = wrapped_partial(mnasnet_b1, depth_multiplier=1.4)
mnasnet_b1_050 = wrapped_partial(mnasnet_b1, depth_multiplier=0.5,finegrain_classification_mode=True)
mnasnet_b1_035 = wrapped_partial(mnasnet_b1, depth_multiplier=0.35,finegrain_classification_mode=True)
def global_pool(input_tensor, pool_op=tf.nn.avg_pool):
"""Applies avg pool to produce 1x1 output.
NOTE: This function is funcitonally equivalenet to reduce_mean, but it has
baked in average pool which has better support across hardware.
Args:
input_tensor: input tensor
pool_op: pooling op (avg pool is default)
Returns:
a tensor batch_size x 1 x 1 x depth.
"""
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size = tf.convert_to_tensor(
[1, tf.shape(input_tensor)[1],
tf.shape(input_tensor)[2], 1])
else:
kernel_size = [1, shape[1], shape[2], 1]
output = pool_op(
input_tensor, ksize=kernel_size, strides=[1, 1, 1, 1], padding='VALID')
# Recover output shape, for unknown shape.
output.set_shape([None, 1, 1, None])
return output
def training_scope(is_training=True,
weight_decay=0.00001,
stddev=0.09,
dropout_keep_prob=0.8,
bn_decay=0.9997):
"""Defines MNASNet training scope.
Usage:
with tf.contrib.slim.arg_scope(mnasnet.training_scope()):
logits, endpoints = mnasnet_v2.mnasnet(input_tensor)
# the network created will be trainble with dropout/batch norm
# initialized appropriately.
Args:
is_training: if set to False this will ensure that all customizations are
set to non-training mode. This might be helpful for code that is reused
across both training/evaluation, but most of the time training_scope with
value False is not needed. If this is set to None, the parameters is not
added to the batch_norm arg_scope.
weight_decay: The weight decay to use for regularizing the model.
stddev: Standard deviation for initialization, if negative uses xavier.
dropout_keep_prob: dropout keep probability (not set if equals to None).
bn_decay: decay for the batch norm moving averages (not set if equals to
None).
Returns:
An argument scope to use via arg_scope.
"""
# Note: do not introduce parameters that would change the inference
# model here (for example whether to use bias), modify conv_def instead.
batch_norm_params = {
'decay': bn_decay,
'is_training': is_training
}
if stddev < 0:
weight_intitializer = slim.initializers.xavier_initializer()
else:
weight_intitializer = tf.truncated_normal_initializer(stddev=stddev)
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope(
[slim.conv2d, slim.fully_connected, slim.separable_conv2d],
weights_initializer=weight_intitializer,
normalizer_fn=slim.batch_norm), \
slim.arg_scope([mnasnet_base, mnasnet], is_training=is_training),\
safe_arg_scope([slim.batch_norm], **batch_norm_params), \
safe_arg_scope([slim.dropout], is_training=is_training,
keep_prob=dropout_keep_prob), \
slim.arg_scope([slim.conv2d], \
weights_regularizer=slim.l2_regularizer(weight_decay)), \
slim.arg_scope([slim.separable_conv2d], weights_regularizer=None) as s:
return s
|
[
"functools.partial",
"copy.deepcopy",
"tensorflow.identity",
"tensorflow.pad",
"tensorflow.reduce_mean",
"tensorflow.variable_scope",
"functools.update_wrapper",
"tensorflow.shape",
"collections.namedtuple",
"tensorflow.squeeze",
"tensorflow.zeros_initializer",
"tensorflow.name_scope",
"tensorflow.truncated_normal_initializer"
] |
[((1761, 1826), 'collections.namedtuple', 'collections.namedtuple', (['"""Op"""', "['op', 'params', 'multiplier_func']"], {}), "('Op', ['op', 'params', 'multiplier_func'])\n", (1783, 1826), False, 'import collections\n'), ((4602, 4690), 'tensorflow.pad', 'tf.pad', (['inputs', '[[0, 0], [pad_beg[0], pad_end[0]], [pad_beg[1], pad_end[1]], [0, 0]]'], {}), '(inputs, [[0, 0], [pad_beg[0], pad_end[0]], [pad_beg[1], pad_end[1]],\n [0, 0]])\n', (4608, 4690), True, 'import tensorflow as tf\n'), ((27445, 27485), 'functools.partial', 'functools.partial', (['func', '*args'], {}), '(func, *args, **kwargs)\n', (27462, 27485), False, 'import functools\n'), ((27488, 27532), 'functools.update_wrapper', 'functools.update_wrapper', (['partial_func', 'func'], {}), '(partial_func, func)\n', (27512, 27532), False, 'import functools\n'), ((2235, 2287), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {'default_name': 'default_scope'}), '(scope, default_name=default_scope)\n', (2252, 2287), True, 'import tensorflow as tf\n'), ((2302, 2338), 'tensorflow.name_scope', 'tf.name_scope', (['s.original_name_scope'], {}), '(s.original_name_scope)\n', (2315, 2338), True, 'import tensorflow as tf\n'), ((4836, 4885), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {'default_name': '"""se_block"""'}), "(scope, default_name='se_block')\n", (4853, 4885), True, 'import tensorflow as tf\n'), ((4903, 4939), 'tensorflow.name_scope', 'tf.name_scope', (['s.original_name_scope'], {}), '(s.original_name_scope)\n', (4916, 4939), True, 'import tensorflow as tf\n'), ((5085, 5141), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['input_tensor'], {'axis': '[1, 2]', 'keepdims': '(True)'}), '(input_tensor, axis=[1, 2], keepdims=True)\n', (5099, 5141), True, 'import tensorflow as tf\n'), ((6420, 6469), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {'default_name': '"""sep_conv"""'}), "(scope, default_name='sep_conv')\n", (6437, 6469), True, 'import tensorflow as tf\n'), ((6487, 6523), 'tensorflow.name_scope', 'tf.name_scope', (['s.original_name_scope'], {}), '(s.original_name_scope)\n', (6500, 6523), True, 'import tensorflow as tf\n'), ((7667, 7698), 'tensorflow.identity', 'tf.identity', (['net'], {'name': '"""output"""'}), "(net, name='output')\n", (7678, 7698), True, 'import tensorflow as tf\n'), ((8007, 8055), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {'default_name': '"""mb_conv"""'}), "(scope, default_name='mb_conv')\n", (8024, 8055), True, 'import tensorflow as tf\n'), ((8073, 8109), 'tensorflow.name_scope', 'tf.name_scope', (['s.original_name_scope'], {}), '(s.original_name_scope)\n', (8086, 8109), True, 'import tensorflow as tf\n'), ((9946, 9977), 'tensorflow.identity', 'tf.identity', (['net'], {'name': '"""output"""'}), "(net, name='output')\n", (9957, 9977), True, 'import tensorflow as tf\n'), ((17187, 17221), 'copy.deepcopy', 'copy.deepcopy', (['conv_defs_overrides'], {}), '(conv_defs_overrides)\n', (17200, 17221), False, 'import copy\n'), ((21577, 21625), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope', '"""Mnasnet"""'], {'reuse': 'reuse'}), "(scope, 'Mnasnet', reuse=reuse)\n", (21594, 21625), True, 'import tensorflow as tf\n'), ((21651, 21679), 'tensorflow.identity', 'tf.identity', (['inputs', '"""input"""'], {}), "(inputs, 'input')\n", (21662, 21679), True, 'import tensorflow as tf\n'), ((21820, 21854), 'tensorflow.identity', 'tf.identity', (['net'], {'name': '"""embedding"""'}), "(net, name='embedding')\n", (21831, 21854), True, 'import tensorflow as tf\n'), ((22399, 22425), 'tensorflow.squeeze', 'tf.squeeze', (['logits', '[1, 2]'], {}), '(logits, [1, 2])\n', (22409, 22425), True, 'import tensorflow as tf\n'), ((22442, 22476), 'tensorflow.identity', 'tf.identity', (['logits'], {'name': '"""output"""'}), "(logits, name='output')\n", (22453, 22476), True, 'import tensorflow as tf\n'), ((24324, 24348), 'copy.deepcopy', 'copy.deepcopy', (['conv_defs'], {}), '(conv_defs)\n', (24337, 24348), False, 'import copy\n'), ((26698, 26722), 'copy.deepcopy', 'copy.deepcopy', (['conv_defs'], {}), '(conv_defs)\n', (26711, 26722), False, 'import copy\n'), ((30702, 30748), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': 'stddev'}), '(stddev=stddev)\n', (30733, 30748), True, 'import tensorflow as tf\n'), ((21867, 21894), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Logits"""'], {}), "('Logits')\n", (21884, 21894), True, 'import tensorflow as tf\n'), ((22326, 22348), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (22346, 22348), True, 'import tensorflow as tf\n'), ((28835, 28857), 'tensorflow.shape', 'tf.shape', (['input_tensor'], {}), '(input_tensor)\n', (28843, 28857), True, 'import tensorflow as tf\n'), ((28871, 28893), 'tensorflow.shape', 'tf.shape', (['input_tensor'], {}), '(input_tensor)\n', (28879, 28893), True, 'import tensorflow as tf\n')]
|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v2 as tf
from tf_agents.specs import tensor_spec
from tf_agents.policies import tf_policy
from typing import Any, Callable, Iterable, Optional, Sequence, Tuple, Union
import dice_rl.data.dataset as dataset_lib
import dice_rl.estimators.estimator as estimator_lib
def _is_categorical_spec(spec):
return (tensor_spec.is_discrete(spec) and tensor_spec.is_bounded(spec) and
spec.shape == [] and spec.minimum == 0)
class TabularQLearning(object):
"""Approximate the density ratio using exact matrix solves."""
def __init__(self,
dataset_spec,
gamma: Union[float, tf.Tensor],
reward_fn: Callable = None,
solve_for_state_action_value: bool = True,
num_qvalues: Optional[int] = None,
bootstrap: bool = True,
perturbation_scale: Union[float, tf.Tensor] = 1.0,
default_reward_value: Union[float, tf.Tensor] = 0.0,
limit_episodes: Optional[int] = None):
"""Initializes the solver.
Args:
dataset_spec: The spec of the dataset that will be given.
gamma: The discount factor to use.
reward_fn: A function that takes in an EnvStep and returns the reward for
that step. If not specified, defaults to just EnvStep.reward.
solve_for_state_action_value: Whether to solve for Q-values (default) or
V-values, i.e., state-values.
num_qvalues: If specified, maintains an ensemble of Q-values for
confidence bound estimation.
bootstrap: Whether to bootstrap the dataset.
perturbation_scale: Scale of reward perturbation.
default_reward_value: Value to use for reward of unseen state-actions.
limit_episodes: How many episodes to take from the dataset. Defaults to
None (take all episodes).
"""
self._dataset_spec = dataset_spec
self._gamma = gamma
if reward_fn is None:
reward_fn = lambda env_step: env_step.reward
self._reward_fn = reward_fn
self._num_qvalues = num_qvalues
self._bootstrap = bootstrap
self._perturbation_scale = np.array(perturbation_scale)
if len(np.shape(self._perturbation_scale)) < 1:
self._perturbation_scale = np.reshape(self._perturbation_scale, [-1])
self._num_perturbations = len(self._perturbation_scale)
self._default_reward_value = default_reward_value
self._limit_episodes = limit_episodes
self._solve_for_state_action_value = solve_for_state_action_value
if (not self._solve_for_state_action_value and
not self._dataset_spec.has_log_probability()):
raise ValueError('Dataset must contain log-probability when '
'solve_for_state_action_value is False.')
# Get number of states/actions.
observation_spec = self._dataset_spec.observation
action_spec = self._dataset_spec.action
if not _is_categorical_spec(observation_spec):
raise ValueError('Observation spec must be discrete and bounded.')
self._num_states = observation_spec.maximum + 1
if not _is_categorical_spec(action_spec):
raise ValueError('Action spec must be discrete and bounded.')
self._num_actions = action_spec.maximum + 1
self._dimension = (
self._num_states * self._num_actions
if self._solve_for_state_action_value else self._num_states)
self._dimension += 1 # Add 1 for terminal absorbing state.
self._point_qvalues = np.zeros([self._dimension])
if self._num_qvalues is not None:
self._ensemble_qvalues = np.zeros([self._num_qvalues, self._dimension])
def _get_index(self, state, action):
if self._solve_for_state_action_value:
return state * self._num_actions + action
else:
return state
def solve(self,
dataset: dataset_lib.OffpolicyDataset,
target_policy: tf_policy.TFPolicy,
regularizer: float = 1e-8):
"""Solves for Q-values and then approximates target policy value.
Args:
dataset: The dataset to sample experience from.
target_policy: The policy whose value we want to estimate.
regularizer: A small constant to add before dividing.
Returns:
Estimated average per-step reward of the target policy.
"""
num_estimates = 1 + int(self._num_qvalues)
transition_matrix = np.zeros(
[self._dimension, self._dimension, num_estimates])
reward_vector = np.zeros(
[self._dimension, num_estimates, self._num_perturbations])
total_weights = np.zeros([self._dimension, num_estimates])
episodes, valid_steps = dataset.get_all_episodes(limit=self._limit_episodes)
#all_rewards = self._reward_fn(episodes)
#reward_std = np.ma.MaskedArray(all_rewards, valid_steps).std()
tfagents_episodes = dataset_lib.convert_to_tfagents_timestep(episodes)
sample_weights = np.array(valid_steps, dtype=np.int64)
if not self._bootstrap or self._num_qvalues is None:
sample_weights = (
sample_weights[:, :, None] * np.ones([1, 1, num_estimates]))
else:
probs = np.reshape(sample_weights, [-1]) / np.sum(sample_weights)
weights = np.random.multinomial(
np.sum(sample_weights), probs,
size=self._num_qvalues).astype(np.float32)
weights = np.reshape(
np.transpose(weights),
list(np.shape(sample_weights)) + [self._num_qvalues])
sample_weights = np.concatenate([sample_weights[:, :, None], weights],
axis=-1)
for episode_num in range(tf.shape(valid_steps)[0]):
# Precompute probabilites for this episode.
this_episode = tf.nest.map_structure(lambda t: t[episode_num], episodes)
this_tfagents_episode = dataset_lib.convert_to_tfagents_timestep(
this_episode)
episode_target_log_probabilities = target_policy.distribution(
this_tfagents_episode).action.log_prob(this_episode.action)
episode_target_probs = target_policy.distribution(
this_tfagents_episode).action.probs_parameter()
for step_num in range(tf.shape(valid_steps)[1] - 1):
this_step = tf.nest.map_structure(lambda t: t[episode_num, step_num],
episodes)
next_step = tf.nest.map_structure(
lambda t: t[episode_num, step_num + 1], episodes)
this_tfagents_step = dataset_lib.convert_to_tfagents_timestep(this_step)
next_tfagents_step = dataset_lib.convert_to_tfagents_timestep(next_step)
this_weights = sample_weights[episode_num, step_num, :]
if this_step.is_last() or not valid_steps[episode_num, step_num]:
continue
weight = this_weights
this_index = self._get_index(this_step.observation, this_step.action)
reward_vector[this_index, :, :] += np.expand_dims(
self._reward_fn(this_step) * weight, -1)
if self._num_qvalues is not None:
random_noise = np.random.binomial(this_weights[1:].astype('int64'),
0.5)
reward_vector[this_index, 1:, :] += (
self._perturbation_scale[None, :] *
(2 * random_noise - this_weights[1:])[:, None])
total_weights[this_index] += weight
policy_ratio = 1.0
if not self._solve_for_state_action_value:
policy_ratio = tf.exp(episode_target_log_probabilities[step_num] -
this_step.get_log_probability())
# Need to weight next nu by importance weight.
next_weight = (
weight if self._solve_for_state_action_value else policy_ratio *
weight)
if next_step.is_absorbing():
next_index = -1 # Absorbing state.
transition_matrix[this_index, next_index] += next_weight
else:
next_probs = episode_target_probs[step_num + 1]
for next_action, next_prob in enumerate(next_probs):
next_index = self._get_index(next_step.observation, next_action)
transition_matrix[this_index, next_index] += next_prob * next_weight
print('Done processing data.')
transition_matrix /= (regularizer + total_weights)[:, None, :]
reward_vector /= (regularizer + total_weights)[:, :, None]
reward_vector[np.where(np.equal(total_weights,
0.0))] = self._default_reward_value
reward_vector[-1, :, :] = 0.0 # Terminal absorbing state has 0 reward.
self._point_qvalues = np.linalg.solve(
np.eye(self._dimension) - self._gamma * transition_matrix[:, :, 0],
reward_vector[:, 0])
if self._num_qvalues is not None:
self._ensemble_qvalues = np.linalg.solve(
(np.eye(self._dimension) -
self._gamma * np.transpose(transition_matrix, [2, 0, 1])),
np.transpose(reward_vector, [1, 0, 2]))
return self.estimate_average_reward(dataset, target_policy)
def estimate_average_reward(self, dataset: dataset_lib.OffpolicyDataset,
target_policy: tf_policy.TFPolicy):
"""Estimates value (average per-step reward) of policy.
Args:
dataset: The dataset to sample experience from.
target_policy: The policy whose value we want to estimate.
Returns:
Estimated average per-step reward of the target policy.
"""
def reward_fn(env_step, valid_steps, qvalues=self._point_qvalues):
"""Computes average initial Q-values of episodes."""
# env_step is an episode, and we just want the first step.
if tf.rank(valid_steps) == 1:
first_step = tf.nest.map_structure(lambda t: t[0, ...], env_step)
else:
first_step = tf.nest.map_structure(lambda t: t[:, 0, ...], env_step)
if self._solve_for_state_action_value:
indices = self._get_index(first_step.observation[:, None],
np.arange(self._num_actions)[None, :])
initial_qvalues = tf.cast(tf.gather(qvalues, indices), tf.float32)
tfagents_first_step = dataset_lib.convert_to_tfagents_timestep(
first_step)
initial_target_probs = target_policy.distribution(
tfagents_first_step).action.probs_parameter()
value = tf.reduce_sum(initial_qvalues * initial_target_probs, axis=-1)
else:
indices = self._get_index(first_step.observation, first_step.action)
value = tf.cast(tf.gather(qvalues, indices), tf.float32)
return value
def weight_fn(env_step, valid_steps):
return tf.ones([tf.shape(valid_steps)[0]], dtype=tf.float32)
if self._num_qvalues is None:
return (1 - self._gamma) * estimator_lib.get_fullbatch_average(
dataset,
limit=None,
by_steps=False,
truncate_episode_at=1,
reward_fn=reward_fn,
weight_fn=weight_fn)
else:
estimates = []
for i in range(self._num_qvalues):
estimates.append([])
for j in range(self._num_perturbations):
estimates[-1].append(
(1 - self._gamma) * estimator_lib.get_fullbatch_average(
dataset,
limit=None,
by_steps=False,
truncate_episode_at=1,
reward_fn=lambda *args: reward_fn(
*args, qvalues=self._ensemble_qvalues[i, :, j]),
weight_fn=weight_fn))
return np.array(estimates)
|
[
"tensorflow.compat.v2.nest.map_structure",
"numpy.sum",
"numpy.ones",
"numpy.shape",
"tensorflow.compat.v2.rank",
"numpy.arange",
"dice_rl.data.dataset.convert_to_tfagents_timestep",
"numpy.transpose",
"tensorflow.compat.v2.shape",
"numpy.equal",
"numpy.reshape",
"tf_agents.specs.tensor_spec.is_discrete",
"tensorflow.compat.v2.gather",
"tensorflow.compat.v2.reduce_sum",
"numpy.concatenate",
"dice_rl.estimators.estimator.get_fullbatch_average",
"numpy.zeros",
"tf_agents.specs.tensor_spec.is_bounded",
"numpy.array",
"numpy.eye"
] |
[((1038, 1067), 'tf_agents.specs.tensor_spec.is_discrete', 'tensor_spec.is_discrete', (['spec'], {}), '(spec)\n', (1061, 1067), False, 'from tf_agents.specs import tensor_spec\n'), ((1072, 1100), 'tf_agents.specs.tensor_spec.is_bounded', 'tensor_spec.is_bounded', (['spec'], {}), '(spec)\n', (1094, 1100), False, 'from tf_agents.specs import tensor_spec\n'), ((2826, 2854), 'numpy.array', 'np.array', (['perturbation_scale'], {}), '(perturbation_scale)\n', (2834, 2854), True, 'import numpy as np\n'), ((4151, 4178), 'numpy.zeros', 'np.zeros', (['[self._dimension]'], {}), '([self._dimension])\n', (4159, 4178), True, 'import numpy as np\n'), ((5027, 5086), 'numpy.zeros', 'np.zeros', (['[self._dimension, self._dimension, num_estimates]'], {}), '([self._dimension, self._dimension, num_estimates])\n', (5035, 5086), True, 'import numpy as np\n'), ((5116, 5183), 'numpy.zeros', 'np.zeros', (['[self._dimension, num_estimates, self._num_perturbations]'], {}), '([self._dimension, num_estimates, self._num_perturbations])\n', (5124, 5183), True, 'import numpy as np\n'), ((5213, 5255), 'numpy.zeros', 'np.zeros', (['[self._dimension, num_estimates]'], {}), '([self._dimension, num_estimates])\n', (5221, 5255), True, 'import numpy as np\n'), ((5475, 5525), 'dice_rl.data.dataset.convert_to_tfagents_timestep', 'dataset_lib.convert_to_tfagents_timestep', (['episodes'], {}), '(episodes)\n', (5515, 5525), True, 'import dice_rl.data.dataset as dataset_lib\n'), ((5548, 5585), 'numpy.array', 'np.array', (['valid_steps'], {'dtype': 'np.int64'}), '(valid_steps, dtype=np.int64)\n', (5556, 5585), True, 'import numpy as np\n'), ((2940, 2982), 'numpy.reshape', 'np.reshape', (['self._perturbation_scale', '[-1]'], {}), '(self._perturbation_scale, [-1])\n', (2950, 2982), True, 'import numpy as np\n'), ((4248, 4294), 'numpy.zeros', 'np.zeros', (['[self._num_qvalues, self._dimension]'], {}), '([self._num_qvalues, self._dimension])\n', (4256, 4294), True, 'import numpy as np\n'), ((6102, 6164), 'numpy.concatenate', 'np.concatenate', (['[sample_weights[:, :, None], weights]'], {'axis': '(-1)'}), '([sample_weights[:, :, None], weights], axis=-1)\n', (6116, 6164), True, 'import numpy as np\n'), ((6331, 6388), 'tensorflow.compat.v2.nest.map_structure', 'tf.nest.map_structure', (['(lambda t: t[episode_num])', 'episodes'], {}), '(lambda t: t[episode_num], episodes)\n', (6352, 6388), True, 'import tensorflow.compat.v2 as tf\n'), ((6419, 6473), 'dice_rl.data.dataset.convert_to_tfagents_timestep', 'dataset_lib.convert_to_tfagents_timestep', (['this_episode'], {}), '(this_episode)\n', (6459, 6473), True, 'import dice_rl.data.dataset as dataset_lib\n'), ((12090, 12109), 'numpy.array', 'np.array', (['estimates'], {}), '(estimates)\n', (12098, 12109), True, 'import numpy as np\n'), ((2866, 2900), 'numpy.shape', 'np.shape', (['self._perturbation_scale'], {}), '(self._perturbation_scale)\n', (2874, 2900), True, 'import numpy as np\n'), ((5707, 5737), 'numpy.ones', 'np.ones', (['[1, 1, num_estimates]'], {}), '([1, 1, num_estimates])\n', (5714, 5737), True, 'import numpy as np\n'), ((5763, 5795), 'numpy.reshape', 'np.reshape', (['sample_weights', '[-1]'], {}), '(sample_weights, [-1])\n', (5773, 5795), True, 'import numpy as np\n'), ((5798, 5820), 'numpy.sum', 'np.sum', (['sample_weights'], {}), '(sample_weights)\n', (5804, 5820), True, 'import numpy as np\n'), ((5992, 6013), 'numpy.transpose', 'np.transpose', (['weights'], {}), '(weights)\n', (6004, 6013), True, 'import numpy as np\n'), ((6233, 6254), 'tensorflow.compat.v2.shape', 'tf.shape', (['valid_steps'], {}), '(valid_steps)\n', (6241, 6254), True, 'import tensorflow.compat.v2 as tf\n'), ((6819, 6886), 'tensorflow.compat.v2.nest.map_structure', 'tf.nest.map_structure', (['(lambda t: t[episode_num, step_num])', 'episodes'], {}), '(lambda t: t[episode_num, step_num], episodes)\n', (6840, 6886), True, 'import tensorflow.compat.v2 as tf\n'), ((6949, 7020), 'tensorflow.compat.v2.nest.map_structure', 'tf.nest.map_structure', (['(lambda t: t[episode_num, step_num + 1])', 'episodes'], {}), '(lambda t: t[episode_num, step_num + 1], episodes)\n', (6970, 7020), True, 'import tensorflow.compat.v2 as tf\n'), ((7063, 7114), 'dice_rl.data.dataset.convert_to_tfagents_timestep', 'dataset_lib.convert_to_tfagents_timestep', (['this_step'], {}), '(this_step)\n', (7103, 7114), True, 'import dice_rl.data.dataset as dataset_lib\n'), ((7144, 7195), 'dice_rl.data.dataset.convert_to_tfagents_timestep', 'dataset_lib.convert_to_tfagents_timestep', (['next_step'], {}), '(next_step)\n', (7184, 7195), True, 'import dice_rl.data.dataset as dataset_lib\n'), ((8983, 9011), 'numpy.equal', 'np.equal', (['total_weights', '(0.0)'], {}), '(total_weights, 0.0)\n', (8991, 9011), True, 'import numpy as np\n'), ((9207, 9230), 'numpy.eye', 'np.eye', (['self._dimension'], {}), '(self._dimension)\n', (9213, 9230), True, 'import numpy as np\n'), ((9507, 9545), 'numpy.transpose', 'np.transpose', (['reward_vector', '[1, 0, 2]'], {}), '(reward_vector, [1, 0, 2])\n', (9519, 9545), True, 'import numpy as np\n'), ((10233, 10253), 'tensorflow.compat.v2.rank', 'tf.rank', (['valid_steps'], {}), '(valid_steps)\n', (10240, 10253), True, 'import tensorflow.compat.v2 as tf\n'), ((10281, 10333), 'tensorflow.compat.v2.nest.map_structure', 'tf.nest.map_structure', (['(lambda t: t[0, ...])', 'env_step'], {}), '(lambda t: t[0, ...], env_step)\n', (10302, 10333), True, 'import tensorflow.compat.v2 as tf\n'), ((10367, 10422), 'tensorflow.compat.v2.nest.map_structure', 'tf.nest.map_structure', (['(lambda t: t[:, 0, ...])', 'env_step'], {}), '(lambda t: t[:, 0, ...], env_step)\n', (10388, 10422), True, 'import tensorflow.compat.v2 as tf\n'), ((10715, 10767), 'dice_rl.data.dataset.convert_to_tfagents_timestep', 'dataset_lib.convert_to_tfagents_timestep', (['first_step'], {}), '(first_step)\n', (10755, 10767), True, 'import dice_rl.data.dataset as dataset_lib\n'), ((10914, 10976), 'tensorflow.compat.v2.reduce_sum', 'tf.reduce_sum', (['(initial_qvalues * initial_target_probs)'], {'axis': '(-1)'}), '(initial_qvalues * initial_target_probs, axis=-1)\n', (10927, 10976), True, 'import tensorflow.compat.v2 as tf\n'), ((11329, 11470), 'dice_rl.estimators.estimator.get_fullbatch_average', 'estimator_lib.get_fullbatch_average', (['dataset'], {'limit': 'None', 'by_steps': '(False)', 'truncate_episode_at': '(1)', 'reward_fn': 'reward_fn', 'weight_fn': 'weight_fn'}), '(dataset, limit=None, by_steps=False,\n truncate_episode_at=1, reward_fn=reward_fn, weight_fn=weight_fn)\n', (11364, 11470), True, 'import dice_rl.estimators.estimator as estimator_lib\n'), ((9401, 9424), 'numpy.eye', 'np.eye', (['self._dimension'], {}), '(self._dimension)\n', (9407, 9424), True, 'import numpy as np\n'), ((10643, 10670), 'tensorflow.compat.v2.gather', 'tf.gather', (['qvalues', 'indices'], {}), '(qvalues, indices)\n', (10652, 10670), True, 'import tensorflow.compat.v2 as tf\n'), ((11090, 11117), 'tensorflow.compat.v2.gather', 'tf.gather', (['qvalues', 'indices'], {}), '(qvalues, indices)\n', (11099, 11117), True, 'import tensorflow.compat.v2 as tf\n'), ((5870, 5892), 'numpy.sum', 'np.sum', (['sample_weights'], {}), '(sample_weights)\n', (5876, 5892), True, 'import numpy as np\n'), ((6030, 6054), 'numpy.shape', 'np.shape', (['sample_weights'], {}), '(sample_weights)\n', (6038, 6054), True, 'import numpy as np\n'), ((6768, 6789), 'tensorflow.compat.v2.shape', 'tf.shape', (['valid_steps'], {}), '(valid_steps)\n', (6776, 6789), True, 'import tensorflow.compat.v2 as tf\n'), ((9452, 9494), 'numpy.transpose', 'np.transpose', (['transition_matrix', '[2, 0, 1]'], {}), '(transition_matrix, [2, 0, 1])\n', (9464, 9494), True, 'import numpy as np\n'), ((10570, 10598), 'numpy.arange', 'np.arange', (['self._num_actions'], {}), '(self._num_actions)\n', (10579, 10598), True, 'import numpy as np\n'), ((11216, 11237), 'tensorflow.compat.v2.shape', 'tf.shape', (['valid_steps'], {}), '(valid_steps)\n', (11224, 11237), True, 'import tensorflow.compat.v2 as tf\n')]
|
import time
import pytest
from unit.applications.proto import TestApplicationProto
class TestReconfigure(TestApplicationProto):
prerequisites = {}
@pytest.fixture(autouse=True)
def setup_method_fixture(self):
assert 'success' in self.conf(
{
"listeners": {"*:7080": {"pass": "routes"}},
"routes": [{"action": {"return": 200}}],
"applications": {},
}
)
def clear_conf(self):
assert 'success' in self.conf({"listeners": {}, "applications": {}})
def test_reconfigure(self):
(_, sock) = self.http(
b"""GET / HTTP/1.1
""",
start=True,
raw=True,
no_recv=True,
)
self.clear_conf()
resp = self.http(
b"""Host: localhost
Connection: close
""",
sock=sock,
raw=True,
)
assert resp['status'] == 200, 'finish request'
def test_reconfigure_2(self):
(_, sock) = self.http(b'', raw=True, start=True, no_recv=True)
# Waiting for connection completion.
# Delay should be more than TCP_DEFER_ACCEPT.
time.sleep(1.5)
self.clear_conf()
assert self.get(sock=sock)['status'] == 408, 'request timeout'
|
[
"pytest.fixture",
"time.sleep"
] |
[((160, 188), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (174, 188), False, 'import pytest\n'), ((1176, 1191), 'time.sleep', 'time.sleep', (['(1.5)'], {}), '(1.5)\n', (1186, 1191), False, 'import time\n')]
|
from credentials import Credentials
from firestore import Firestore
credentials = Credentials()
firestore = Firestore()
|
[
"credentials.Credentials",
"firestore.Firestore"
] |
[((83, 96), 'credentials.Credentials', 'Credentials', ([], {}), '()\n', (94, 96), False, 'from credentials import Credentials\n'), ((110, 121), 'firestore.Firestore', 'Firestore', ([], {}), '()\n', (119, 121), False, 'from firestore import Firestore\n')]
|
# Copyright (c) 2013, 2014, 2015, 2016 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
from xml.dom.minidom import parseString
from os import path
import re
import copy
import io
import csv
import logging
if sys.version_info >= (3, 3): # pragma: no cover
from ipaddress import (ip_address,
ip_network,
IPv4Address,
IPv4Network,
IPv6Address,
summarize_address_range,
collapse_addresses)
else: # pragma: no cover
from ipaddr import (IPAddress as ip_address,
IPNetwork as ip_network,
IPv4Address,
IPv4Network,
IPv6Address,
summarize_address_range,
collapse_address_list as collapse_addresses)
try: # pragma: no cover
from itertools import filterfalse
except ImportError: # pragma: no cover
from itertools import filterfalse as filterfalse
log = logging.getLogger(__name__)
IETF_RFC_REFERENCES = {
# IPv4
'RFC 1122, Section 3.2.1.3':
'http://tools.ietf.org/html/rfc1122#section-3.2.1.3',
'RFC 1918': 'http://tools.ietf.org/html/rfc1918',
'RFC 3927': 'http://tools.ietf.org/html/rfc3927',
'RFC 5736': 'http://tools.ietf.org/html/rfc5736',
'RFC 5737': 'http://tools.ietf.org/html/rfc5737',
'RFC 3068': 'http://tools.ietf.org/html/rfc3068',
'RFC 2544': 'http://tools.ietf.org/html/rfc2544',
'RFC 3171': 'http://tools.ietf.org/html/rfc3171',
'RFC 919, Section 7': 'http://tools.ietf.org/html/rfc919#section-7',
# IPv6
'RFC 4291, Section 2.7': 'http://tools.ietf.org/html/rfc4291#section-2.7',
'RFC 4291': 'http://tools.ietf.org/html/rfc4291',
'RFC 4291, Section 2.5.2':
'http://tools.ietf.org/html/rfc4291#section-2.5.2',
'RFC 4291, Section 2.5.3':
'http://tools.ietf.org/html/rfc4291#section-2.5.3',
'RFC 4291, Section 2.5.6':
'http://tools.ietf.org/html/rfc4291#section-2.5.6',
'RFC 4291, Section 2.5.7':
'http://tools.ietf.org/html/rfc4291#section-2.5.7',
'RFC 4193': 'https://tools.ietf.org/html/rfc4193'
}
IP_REGEX = (
r'(?P<ip>'
# IPv4
'(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.)){3}'
'(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)'
# IPv6
'|\[?(((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:)'
'{6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|'
'2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]'
'{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d'
'\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|'
'((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|'
'2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]'
'{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)'
'(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(('
'(:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1'
'\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(('
'[0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4})'
'{0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]'
'?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:(('
'25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})'
')|:)))(%.+)?))\]?'
# Optional IPv4 Port
'((:(6553[0-5]|655[0-2]\d|65[0-4]\d{2}|6[0-4]\d{3}|[1-5]\d{4}|[1-9]\d{0,3}'
# Optional CIDR block
'))|(\/(?:[012]\d?|3[012]?|[4-9])))?'
')'
)
def ipv4_lstrip_zeros(address):
"""
The function to strip leading zeros in each octet of an IPv4 address.
Args:
address: An IPv4 address in string format.
Returns:
String: The modified IPv4 address string.
"""
# Split the octets.
obj = address.strip().split('.')
for x, y in enumerate(obj):
# Strip leading zeros. Split / here in case CIDR is attached.
obj[x] = y.split('/')[0].lstrip('0')
if obj[x] in ['', None]:
obj[x] = '0'
return '.'.join(obj)
def calculate_cidr(start_address, end_address):
"""
The function to calculate a CIDR range(s) from a start and end IP address.
Args:
start_address: The starting IP address in string format.
end_address: The ending IP address in string format.
Returns:
List: A list of calculated CIDR ranges.
"""
tmp_addrs = []
try:
tmp_addrs.extend(summarize_address_range(
ip_address(start_address),
ip_address(end_address)))
except (KeyError, ValueError, TypeError): # pragma: no cover
try:
tmp_addrs.extend(summarize_address_range(
ip_network(start_address).network_address,
ip_network(end_address).network_address))
except AttributeError: # pragma: no cover
tmp_addrs.extend(summarize_address_range(
ip_network(start_address).ip,
ip_network(end_address).ip))
return [i.__str__() for i in collapse_addresses(tmp_addrs)]
def get_countries(is_legacy_xml=False):
"""
The function to generate a dictionary containing ISO_3166-1 country codes
to names.
Args:
is_legacy_xml: Boolean for whether to use the older country code
list (iso_3166-1_list_en.xml).
Returns:
Dictionary: A dictionary with the country codes as the keys and the
country names as the values.
"""
# Initialize the countries dictionary.
countries = {}
# Set the data directory based on if the script is a frozen executable.
if sys.platform == 'win32' and getattr(sys, 'frozen', False):
data_dir = path.dirname(sys.executable) # pragma: no cover
else:
data_dir = path.dirname(__file__)
if is_legacy_xml:
log.debug('Opening country code legacy XML: {0}'.format(
str(data_dir) + '/data/iso_3166-1_list_en.xml'))
# Create the country codes file object.
f = io.open(str(data_dir) + '/data/iso_3166-1_list_en.xml', 'r',
encoding='ISO-8859-1')
# Read the file.
data = f.read()
# Check if there is data.
if not data: # pragma: no cover
return {}
# Parse the data to get the DOM.
dom = parseString(data)
# Retrieve the country entries.
entries = dom.getElementsByTagName('ISO_3166-1_Entry')
# Iterate through the entries and add to the countries dictionary.
for entry in entries:
# Retrieve the country code and name from the DOM.
code = entry.getElementsByTagName(
'ISO_3166-1_Alpha-2_Code_element')[0].firstChild.data
name = entry.getElementsByTagName(
'ISO_3166-1_Country_name')[0].firstChild.data
# Add to the countries dictionary.
countries[code] = name.title()
else:
log.debug('Opening country code CSV: {0}'.format(
str(data_dir) + '/data/iso_3166-1_list_en.xml'))
# Create the country codes file object.
f = io.open(str(data_dir) + '/data/iso_3166-1.csv', 'r',
encoding='utf-8')
# Create csv reader object.
csv_reader = csv.reader(f, delimiter=',', quotechar='"')
# Iterate through the rows and add to the countries dictionary.
for row in csv_reader:
# Retrieve the country code and name columns.
code = row[0]
name = row[1]
# Add to the countries dictionary.
countries[code] = name
return countries
def ipv4_is_defined(address):
"""
The function for checking if an IPv4 address is defined (does not need to
be resolved).
Args:
address: An IPv4 address in string format.
Returns:
Tuple:
:Boolean: True if given address is defined, otherwise False
:String: IETF assignment name if given address is defined, otherwise ''
:String: IETF assignment RFC if given address is defined, otherwise ''
"""
# Initialize the IP address object.
query_ip = IPv4Address(str(address))
# This Network
if query_ip in IPv4Network('0.0.0.0/8'):
return True, 'This Network', 'RFC 1122, Section 3.2.1.3'
# Loopback
elif query_ip.is_loopback:
return True, 'Loopback', 'RFC 1122, Section 3.2.1.3'
# Link Local
elif query_ip.is_link_local:
return True, 'Link Local', 'RFC 3927'
# IETF Protocol Assignments
elif query_ip in IPv4Network('192.0.0.0/24'):
return True, 'IETF Protocol Assignments', 'RFC 5736'
# TEST-NET-1
elif query_ip in IPv4Network('192.0.2.0/24'):
return True, 'TEST-NET-1', 'RFC 5737'
# 6to4 Relay Anycast
elif query_ip in IPv4Network('172.16.31.10/24'):
return True, '6to4 Relay Anycast', 'RFC 3068'
# Network Interconnect Device Benchmark Testing
elif query_ip in IPv4Network('198.18.0.0/15'):
return (True,
'Network Interconnect Device Benchmark Testing',
'RFC 2544')
# TEST-NET-2
elif query_ip in IPv4Network('198.51.100.0/24'):
return True, 'TEST-NET-2', 'RFC 5737'
# TEST-NET-3
elif query_ip in IPv4Network('203.0.113.0/24'):
return True, 'TEST-NET-3', 'RFC 5737'
# Multicast
elif query_ip.is_multicast:
return True, 'Multicast', 'RFC 3171'
# Limited Broadcast
elif query_ip in IPv4Network('255.255.255.255/32'):
return True, 'Limited Broadcast', 'RFC 919, Section 7'
# Private-Use Networks
elif query_ip.is_private:
return True, 'Private-Use Networks', 'RFC 1918'
return False, '', ''
def ipv6_is_defined(address):
"""
The function for checking if an IPv6 address is defined (does not need to
be resolved).
Args:
address: An IPv6 address in string format.
Returns:
Tuple:
:Boolean: True if address is defined, otherwise False
:String: IETF assignment name if address is defined, otherwise ''
:String: IETF assignment RFC if address is defined, otherwise ''
"""
# Initialize the IP address object.
query_ip = IPv6Address(str(address))
# Multicast
if query_ip.is_multicast:
return True, 'Multicast', 'RFC 4291, Section 2.7'
# Unspecified
elif query_ip.is_unspecified:
return True, 'Unspecified', 'RFC 4291, Section 2.5.2'
# Loopback.
elif query_ip.is_loopback:
return True, 'Loopback', 'RFC 4291, Section 2.5.3'
# Reserved
elif query_ip.is_reserved:
return True, 'Reserved', 'RFC 4291'
# Link-Local
elif query_ip.is_link_local:
return True, 'Link-Local', 'RFC 4291, Section 2.5.6'
# Site-Local
elif query_ip.is_site_local:
return True, 'Site-Local', 'RFC 4291, Section 2.5.7'
# Unique Local Unicast
elif query_ip.is_private:
return True, 'Unique Local Unicast', 'RFC 4193'
return False, '', ''
def unique_everseen(iterable, key=None):
"""
The generator to list unique elements, preserving the order. Remember all
elements ever seen. This was taken from the itertools recipes.
Args:
iterable: An iterable to process.
key: Optional function to run when checking elements (e.g., str.lower)
Returns:
Generator: Yields a generator object.
"""
seen = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def unique_addresses(data=None, file_path=None):
"""
The function to search an input string and/or file, extracting and
counting IPv4/IPv6 addresses/networks. Summarizes ports with sub-counts.
If both a string and file_path are provided, it will process them both.
Args:
data: A string to process.
file_path: An optional file path to process.
Returns:
Dictionary:
:ip address/network: Each address or network found is a dictionary w/\:
:count: Total number of times seen (Integer)
:ports: Dictionary with port numbers as keys and the number of
times seen for this ip as values (Dictionary)
Raises:
ValueError: Arguments provided are invalid.
"""
if not data and not file_path:
raise ValueError('No data or file path provided.')
ret = {}
base = {
'count': 0,
'ports': {}
}
file_data = None
if file_path:
log.debug('Opening file for unique address analysis: {0}'.format(
str(file_path)))
f = open(str(file_path), 'r')
# Read the file.
file_data = f.read()
pattern = re.compile(
str(IP_REGEX),
re.DOTALL
)
# Check if there is data.
log.debug('Analyzing input/file data'.format(
str(file_path)))
for input_data in [data, file_data]:
if input_data:
# Search for IPs.
for match in pattern.finditer(input_data):
is_net = False
port = None
try:
found = match.group('ip')
if '.' in found and ':' in found:
split = found.split(':')
ip_or_net = split[0]
port = split[1]
elif '[' in found:
split = found.split(']:')
ip_or_net = split[0][1:]
port = split[1]
elif '/' in found:
is_net = True
ip_or_net = found
else:
ip_or_net = found
if is_net:
ip_obj = ip_network(ip_or_net)
else:
ip_obj = ip_address(ip_or_net)
obj_str = ip_obj.__str__()
if obj_str not in list(ret.keys()):
ret[obj_str] = copy.deepcopy(base)
ret[obj_str]['count'] += 1
if port:
try:
ret[obj_str]['ports'][str(port)] += 1
except KeyError:
ret[obj_str]['ports'][str(port)] = 1
except (KeyError, ValueError):
continue
return ret
|
[
"ipaddr.IPv4Network",
"itertools.filterfalse",
"csv.reader",
"xml.dom.minidom.parseString",
"copy.deepcopy",
"ipaddr.IPAddress",
"os.path.dirname",
"ipaddr.collapse_address_list",
"ipaddr.IPNetwork",
"logging.getLogger"
] |
[((2405, 2432), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2422, 2432), False, 'import logging\n'), ((7423, 7451), 'os.path.dirname', 'path.dirname', (['sys.executable'], {}), '(sys.executable)\n', (7435, 7451), False, 'from os import path\n'), ((7507, 7529), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (7519, 7529), False, 'from os import path\n'), ((8074, 8091), 'xml.dom.minidom.parseString', 'parseString', (['data'], {}), '(data)\n', (8085, 8091), False, 'from xml.dom.minidom import parseString\n'), ((9055, 9098), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '""","""', 'quotechar': '"""\\""""'}), '(f, delimiter=\',\', quotechar=\'"\')\n', (9065, 9098), False, 'import csv\n'), ((10038, 10062), 'ipaddr.IPv4Network', 'IPv4Network', (['"""0.0.0.0/8"""'], {}), "('0.0.0.0/8')\n", (10049, 10062), False, 'from ipaddr import IPAddress as ip_address, IPNetwork as ip_network, IPv4Address, IPv4Network, IPv6Address, summarize_address_range, collapse_address_list as collapse_addresses\n'), ((13505, 13545), 'itertools.filterfalse', 'filterfalse', (['seen.__contains__', 'iterable'], {}), '(seen.__contains__, iterable)\n', (13516, 13545), True, 'from itertools import filterfalse as filterfalse\n'), ((6735, 6764), 'ipaddr.collapse_address_list', 'collapse_addresses', (['tmp_addrs'], {}), '(tmp_addrs)\n', (6753, 6764), True, 'from ipaddr import IPAddress as ip_address, IPNetwork as ip_network, IPv4Address, IPv4Network, IPv6Address, summarize_address_range, collapse_address_list as collapse_addresses\n'), ((6168, 6193), 'ipaddr.IPAddress', 'ip_address', (['start_address'], {}), '(start_address)\n', (6178, 6193), True, 'from ipaddr import IPAddress as ip_address, IPNetwork as ip_network, IPv4Address, IPv4Network, IPv6Address, summarize_address_range, collapse_address_list as collapse_addresses\n'), ((6208, 6231), 'ipaddr.IPAddress', 'ip_address', (['end_address'], {}), '(end_address)\n', (6218, 6231), True, 'from ipaddr import IPAddress as ip_address, IPNetwork as ip_network, IPv4Address, IPv4Network, IPv6Address, summarize_address_range, collapse_address_list as collapse_addresses\n'), ((10406, 10433), 'ipaddr.IPv4Network', 'IPv4Network', (['"""192.0.0.0/24"""'], {}), "('192.0.0.0/24')\n", (10417, 10433), False, 'from ipaddr import IPAddress as ip_address, IPNetwork as ip_network, IPv4Address, IPv4Network, IPv6Address, summarize_address_range, collapse_address_list as collapse_addresses\n'), ((10541, 10568), 'ipaddr.IPv4Network', 'IPv4Network', (['"""192.0.2.0/24"""'], {}), "('192.0.2.0/24')\n", (10552, 10568), False, 'from ipaddr import IPAddress as ip_address, IPNetwork as ip_network, IPv4Address, IPv4Network, IPv6Address, summarize_address_range, collapse_address_list as collapse_addresses\n'), ((16144, 16165), 'ipaddr.IPNetwork', 'ip_network', (['ip_or_net'], {}), '(ip_or_net)\n', (16154, 16165), True, 'from ipaddr import IPAddress as ip_address, IPNetwork as ip_network, IPv4Address, IPv4Network, IPv6Address, summarize_address_range, collapse_address_list as collapse_addresses\n'), ((16229, 16250), 'ipaddr.IPAddress', 'ip_address', (['ip_or_net'], {}), '(ip_or_net)\n', (16239, 16250), True, 'from ipaddr import IPAddress as ip_address, IPNetwork as ip_network, IPv4Address, IPv4Network, IPv6Address, summarize_address_range, collapse_address_list as collapse_addresses\n'), ((16402, 16421), 'copy.deepcopy', 'copy.deepcopy', (['base'], {}), '(base)\n', (16415, 16421), False, 'import copy\n'), ((6393, 6418), 'ipaddr.IPNetwork', 'ip_network', (['start_address'], {}), '(start_address)\n', (6403, 6418), True, 'from ipaddr import IPAddress as ip_address, IPNetwork as ip_network, IPv4Address, IPv4Network, IPv6Address, summarize_address_range, collapse_address_list as collapse_addresses\n'), ((6453, 6476), 'ipaddr.IPNetwork', 'ip_network', (['end_address'], {}), '(end_address)\n', (6463, 6476), True, 'from ipaddr import IPAddress as ip_address, IPNetwork as ip_network, IPv4Address, IPv4Network, IPv6Address, summarize_address_range, collapse_address_list as collapse_addresses\n'), ((10669, 10699), 'ipaddr.IPv4Network', 'IPv4Network', (['"""172.16.31.10/24"""'], {}), "('172.16.31.10/24')\n", (10680, 10699), False, 'from ipaddr import IPAddress as ip_address, IPNetwork as ip_network, IPv4Address, IPv4Network, IPv6Address, summarize_address_range, collapse_address_list as collapse_addresses\n'), ((6623, 6648), 'ipaddr.IPNetwork', 'ip_network', (['start_address'], {}), '(start_address)\n', (6633, 6648), True, 'from ipaddr import IPAddress as ip_address, IPNetwork as ip_network, IPv4Address, IPv4Network, IPv6Address, summarize_address_range, collapse_address_list as collapse_addresses\n'), ((6670, 6693), 'ipaddr.IPNetwork', 'ip_network', (['end_address'], {}), '(end_address)\n', (6680, 6693), True, 'from ipaddr import IPAddress as ip_address, IPNetwork as ip_network, IPv4Address, IPv4Network, IPv6Address, summarize_address_range, collapse_address_list as collapse_addresses\n'), ((10835, 10863), 'ipaddr.IPv4Network', 'IPv4Network', (['"""198.18.0.0/15"""'], {}), "('198.18.0.0/15')\n", (10846, 10863), False, 'from ipaddr import IPAddress as ip_address, IPNetwork as ip_network, IPv4Address, IPv4Network, IPv6Address, summarize_address_range, collapse_address_list as collapse_addresses\n'), ((11027, 11057), 'ipaddr.IPv4Network', 'IPv4Network', (['"""198.51.100.0/24"""'], {}), "('198.51.100.0/24')\n", (11038, 11057), False, 'from ipaddr import IPAddress as ip_address, IPNetwork as ip_network, IPv4Address, IPv4Network, IPv6Address, summarize_address_range, collapse_address_list as collapse_addresses\n'), ((11150, 11179), 'ipaddr.IPv4Network', 'IPv4Network', (['"""203.0.113.0/24"""'], {}), "('203.0.113.0/24')\n", (11161, 11179), False, 'from ipaddr import IPAddress as ip_address, IPNetwork as ip_network, IPv4Address, IPv4Network, IPv6Address, summarize_address_range, collapse_address_list as collapse_addresses\n'), ((11379, 11412), 'ipaddr.IPv4Network', 'IPv4Network', (['"""255.255.255.255/32"""'], {}), "('255.255.255.255/32')\n", (11390, 11412), False, 'from ipaddr import IPAddress as ip_address, IPNetwork as ip_network, IPv4Address, IPv4Network, IPv6Address, summarize_address_range, collapse_address_list as collapse_addresses\n')]
|
# -*- coding: utf-8 -*-
import requests
import re
import json
API_URL_BASE = "https://speller.yandex.net/services/spellservice.json/checkText"
def capital(string: str) -> str:
"""
Возвращает группы, написанные заглавными буквами
:param string: Строка для обработки
:type string: str
:return: Строка, где группы, написанные заглавными буквами
:rtype: str
"""
return (string.group(1).upper() + string.group(2).upper())
def orthography(string: str) -> str:
"""
Убирает пробелы перед знаками препинания и делает буквы после знаков препинания заглавными
:param string: Строка для обработки
:type string: str
:return: Строка с правильной пунктуацией
:rtype: str
"""
w_o_spaces = string.capitalize()
signs = [',', '!', ':', ';']
for sign in signs:
w_o_spaces = re.sub(' ' + sign, sign, w_o_spaces)
w_o_spaces = re.sub(' \.', '.', w_o_spaces)
w_o_spaces = re.sub(' \?', '?', w_o_spaces)
return (re.sub('(\. |\? |! )(.)', capital, w_o_spaces))
def correct_spelling(text, options=518):
"""
Текст с ошибками (без ошибок)
Подробное апи можно найти тут: https://yandex.ru/dev/speller/doc/dg/concepts/About-docpage/
:param text: Входной текст
:type text: str
:param options: настройки для спеллера
в переменную записывается сумма нужных значений:
IGNORE_DIGITS 2 Пропускать слова с цифрами, например, "авп17х4534".
IGNORE_URLS 4 Пропускать интернет-адреса, почтовые адреса и имена файлов.
FIND_REPEAT_WORDS 8 Подсвечивать повторы слов, идущие подряд. Например, "я полетел на на Кипр".
IGNORE_CAPITALIZATION 512 Игнорировать неверное употребление ПРОПИСНЫХ/строчных букв, например, в слове "москва"
:type options: int
:return: Исправленный текст
:rtype: str
"""
lang = "ru"
data = {"text": text, "lang": lang, "options": options}
postfixes_dash = ["то ", "либо ", "нибудь ", "таки", "то.", "либо.", "нибудь.", "таки.", "то,", "либо,", "нибудь,",
"то?", "либо?", "нибудь?", "то!", "либо!", "нибудь!", "то:", "либо:", "нибудь:",
"то;", "либо;", "нибудь;", "то\"", "либо\"", "нибудь\"", "то/", "либо/", "нибудь/"
"то'", "либо'",
"нибудь'"]
prefix_dash = ["Кое"]
try:
response = requests.post(API_URL_BASE, data) # пост-запрос
result = response.json()
for arr in result:
text = text.replace(arr["word"], arr["s"][0]) # замены исправлений в тексте
for word in postfixes_dash:
text = text.replace(" " + word, "-" + word) # исправление проблем с дефисами
for word in prefix_dash:
text = text.replace(word + " ", word + '-')
except:
pass
text = orthography(text)
return text
|
[
"requests.post",
"re.sub"
] |
[((895, 926), 're.sub', 're.sub', (['""" \\\\."""', '"""."""', 'w_o_spaces'], {}), "(' \\\\.', '.', w_o_spaces)\n", (901, 926), False, 'import re\n'), ((943, 974), 're.sub', 're.sub', (['""" \\\\?"""', '"""?"""', 'w_o_spaces'], {}), "(' \\\\?', '?', w_o_spaces)\n", (949, 974), False, 'import re\n'), ((986, 1034), 're.sub', 're.sub', (['"""(\\\\. |\\\\? |! )(.)"""', 'capital', 'w_o_spaces'], {}), "('(\\\\. |\\\\? |! )(.)', capital, w_o_spaces)\n", (992, 1034), False, 'import re\n'), ((841, 877), 're.sub', 're.sub', (["(' ' + sign)", 'sign', 'w_o_spaces'], {}), "(' ' + sign, sign, w_o_spaces)\n", (847, 877), False, 'import re\n'), ((2455, 2488), 'requests.post', 'requests.post', (['API_URL_BASE', 'data'], {}), '(API_URL_BASE, data)\n', (2468, 2488), False, 'import requests\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 17 10:49:15 2016
@author: yxl
"""
from imagepy import IPy
import numpy as np
from imagepy.ui.canvasframe import CanvasFrame
from imagepy.core.manager import ImageManager, WindowsManager
from imagepy.core.engine import Simple
from skimage import color
class SplitRGB(Simple):
title = 'Split RGB Channels'
note = ['rgb']
para = {'copy':False, 'destory':True}
view = {(bool, 'copy', 'Copy data from view'),
(bool, 'destory', 'Destory current image')}
#process
def run(self, ips, imgs, para = None):
r,g,b = [],[],[]
for i,n in zip(imgs,list(range(ips.get_nslices()))):
for c,ci in zip((r,g,b),(0,1,2)):
if self.para['copy']:c.append(i[:,:,ci].copy())
else: c.append(i[:,:,ci])
self.progress(i, n)
for im, tl in zip([r,g,b],['red','green','blue']):
IPy.show_img(im, ips.title+'-'+tl)
if self.para['destory']:
ImageManager.close(ips.title)
class ToRGB(Simple):
title = 'RGB to RGB'
note = ['all']
#parameter
para = {'red':'','green':'','blue':'','destory':True}
def load(self, ips):
r, g, b = self.titles()[1:]
self.view = [('img', r, 'red', ''),
('img', g, 'green', ''),
('img', b, 'blue', ''),
(bool, 'destory', 'destory')]
return True
def titles(self): return 'RGB-Merge', 'red', 'green', 'blue'
def trans(self, img1, img2, img3):
return np.array([img1.T, img2.T, img3.T], dtype=np.uint8).T
def run(self, ips, imgs, para = None):
idx = ['red','green','blue']
print(para)
imr,img,imb = [ImageManager.get(para[i]) for i in idx]
sr,sg,sb = [i.get_nslices() for i in [imr,img,imb]]
if imr.imgtype!='8-bit' or img.imgtype!='8-bit' or imb.imgtype!='8-bit' or \
imr.size!=img.size or img.size!=imb.size or sr!=sg or sg!=sb:
IPy.alert('three images must be 8-bit image, with the same size and slices!')
return
rgbs = []
w,h = imr.size
for i in range(sr):
self.progress(i,sr)
rgbs.append(self.trans(imr.imgs[i], img.imgs[i], imb.imgs[i]))
IPy.show_img(rgbs, self.titles()[0])
if self.para['destory']:
for title in [para[i] for i in idx]:
WindowsManager.get(title).close()
class RGB2(Simple):
title = 'RGB To RGB'
note = ['rgb']
#process
def titles(self): return 'Red', 'Green', 'Blue'
def trans(self, img):
return img
def run(self, ips, imgs, para = None):
nr, ng, nb = [],[],[]
for i in range(ips.get_nslices()):
nrgb = self.trans(imgs[i])
nr.append(nrgb[:,:,0])
ng.append(nrgb[:,:,1])
nb.append(nrgb[:,:,2])
self.progress(i, len(imgs))
for im, tl in zip([nr, ng, nb], self.titles()):
IPy.show_img(im, ips.title+'-'+tl)
class MergeRGB(ToRGB):
title = 'Merge RGB Channels'
# ============= RGB - HSV ============
class RGB2HSV(RGB2):
title = 'RGB To HSV'
def titles(self):
return 'Hue', 'Saturation', 'Value'
def trans(self, img):
rst = color.rgb2hsv(img)
rst *= 255
print('============', rst.min(), rst.max())
return rst.astype(np.uint8)
class HSV2RGB(ToRGB):
title = 'HSV To RGB'
#process
def titles(self):
return 'HSV2RGB-Merge', 'H', 'S', 'V'
def trans(self, img1, img2, img3):
rst = np.array((img1.T, img2.T, img3.T), dtype=np.float64)
rst /= 255.0
rst = color.hsv2rgb(rst.T)
rst *= 255
return rst.astype(np.uint8)
# ============= RGB - CIE ============
class RGB2CIE(RGB2):
title = 'RGB To CIERGB'
#process
def titles(self):
return 'Red', 'Green', 'Blue'
def trans(self, img):
rst = color.rgb2rgbcie(img)
np.maximum(rst, 0, out=rst)
print('============', rst.min(axis=(0,1)), rst.max(axis=(0,1)))
rst *= 255/50*255
return rst.astype(np.uint8)
class CIE2RGB(ToRGB):
title = 'CIERGB To RGB'
#process
def titles(self):
return 'CIE2RGB-Merge', 'R', 'G', 'B'
def trans(self, img1, img2, img3):
rst = np.maximum((img1.T, img2.T, img3.T), 0, dtype=np.float64)
rst /= 255/50*255
rst = color.rgbcie2rgb(rst.T)
rst *= 255
return (rst).astype(np.uint8)
# ============= RGB - LUV ============
class RGB2LUV(RGB2):
title = 'RGB To LUV'
#process
def titles(self):
return 'Luminance', 'UColor', 'VColor'
def trans(self, img):
rst = color.rgb2luv(img)+128
#print('============', rst.min(), rst.max())
return rst.astype(np.uint8)
class LUV2RGB(ToRGB):
title = 'LUV To RGB'
#process
def titles(self):
return 'LUV2RGB-Merge', 'L', 'U', 'V'
def trans(self, img1, img2, img3):
rst = np.array((img1.T, img2.T, img3.T), dtype=np.float64)
rst -= 128
rst = color.luv2rgb(rst.T)
rst *= 255
return (rst).astype(np.uint8)
# ============= RGB - Lab ============
class RGB2Lab(RGB2):
title = 'RGB To Lab'
#process
def titles(self):
return 'Luminance', 'AColor', 'BColor'
def trans(self, img):
rst = color.rgb2lab(img)
print('============', rst.min(), rst.max())
rst+=100; rst*=(255/200.0)
return (rst).astype(np.uint8)
class Lab2RGB(ToRGB):
title = 'Lab To RGB'
#process
def titles(self):
return 'Lab2RGB-Merge', 'L', 'A', 'B'
def trans(self, img1, img2, img3):
rst = np.array((img1.T, img2.T, img3.T), dtype=np.float64)
rst *= (200/255.0); rst -= 100
rst = color.lab2rgb(rst.T)
rst *= 255
return (rst).astype(np.uint8)
class RGB2Gray(Simple):
title = 'RGB To Gray'
note = ['rgb']
def run(self, ips, imgs, para = None):
gray = []
for i in range(ips.get_nslices()):
gray.append(color.rgb2gray(imgs[i])*255)
self.progress(i, len(imgs))
IPy.show_img(gray, ips.title+'-Gray')
# ============= RGB - XYZ ============
class RGB2XYZ(RGB2):
title = 'RGB To XYZ'
#process
def titles(self):
return 'X', 'Y', 'Z'
def trans(self, img):
rst = color.rgb2xyz(img)
print('============', rst.min(), rst.max())
return (rst*(200)).astype(np.uint8)
class XYZ2RGB(ToRGB):
title = 'XYZ To RGB'
#process
def titles(self):
return 'XYZ2RGB-Merge', 'X', 'Y', 'Z'
def trans(self, img1, img2, img3):
rst = color.xyz2rgb(np.array((img1.T, img2.T, img3.T)).T/200.0)*255
#print('============', rst.min(), rst.max())
return rst.astype(np.uint8)
plgs = [RGB2Gray, '-', SplitRGB, MergeRGB, '-', RGB2HSV, HSV2RGB, '-', RGB2CIE, CIE2RGB, '-', RGB2LUV, LUV2RGB, '-', RGB2Lab, Lab2RGB, '-', RGB2XYZ, XYZ2RGB]
|
[
"skimage.color.rgbcie2rgb",
"numpy.maximum",
"skimage.color.hsv2rgb",
"skimage.color.rgb2gray",
"skimage.color.rgb2luv",
"skimage.color.rgb2hsv",
"imagepy.core.manager.ImageManager.close",
"imagepy.IPy.alert",
"skimage.color.lab2rgb",
"skimage.color.rgb2rgbcie",
"skimage.color.rgb2xyz",
"numpy.array",
"skimage.color.luv2rgb",
"imagepy.core.manager.ImageManager.get",
"imagepy.core.manager.WindowsManager.get",
"imagepy.IPy.show_img",
"skimage.color.rgb2lab"
] |
[((3325, 3343), 'skimage.color.rgb2hsv', 'color.rgb2hsv', (['img'], {}), '(img)\n', (3338, 3343), False, 'from skimage import color\n'), ((3635, 3687), 'numpy.array', 'np.array', (['(img1.T, img2.T, img3.T)'], {'dtype': 'np.float64'}), '((img1.T, img2.T, img3.T), dtype=np.float64)\n', (3643, 3687), True, 'import numpy as np\n'), ((3723, 3743), 'skimage.color.hsv2rgb', 'color.hsv2rgb', (['rst.T'], {}), '(rst.T)\n', (3736, 3743), False, 'from skimage import color\n'), ((4002, 4023), 'skimage.color.rgb2rgbcie', 'color.rgb2rgbcie', (['img'], {}), '(img)\n', (4018, 4023), False, 'from skimage import color\n'), ((4032, 4059), 'numpy.maximum', 'np.maximum', (['rst', '(0)'], {'out': 'rst'}), '(rst, 0, out=rst)\n', (4042, 4059), True, 'import numpy as np\n'), ((4381, 4438), 'numpy.maximum', 'np.maximum', (['(img1.T, img2.T, img3.T)', '(0)'], {'dtype': 'np.float64'}), '((img1.T, img2.T, img3.T), 0, dtype=np.float64)\n', (4391, 4438), True, 'import numpy as np\n'), ((4479, 4502), 'skimage.color.rgbcie2rgb', 'color.rgbcie2rgb', (['rst.T'], {}), '(rst.T)\n', (4495, 4502), False, 'from skimage import color\n'), ((5066, 5118), 'numpy.array', 'np.array', (['(img1.T, img2.T, img3.T)'], {'dtype': 'np.float64'}), '((img1.T, img2.T, img3.T), dtype=np.float64)\n', (5074, 5118), True, 'import numpy as np\n'), ((5152, 5172), 'skimage.color.luv2rgb', 'color.luv2rgb', (['rst.T'], {}), '(rst.T)\n', (5165, 5172), False, 'from skimage import color\n'), ((5440, 5458), 'skimage.color.rgb2lab', 'color.rgb2lab', (['img'], {}), '(img)\n', (5453, 5458), False, 'from skimage import color\n'), ((5768, 5820), 'numpy.array', 'np.array', (['(img1.T, img2.T, img3.T)'], {'dtype': 'np.float64'}), '((img1.T, img2.T, img3.T), dtype=np.float64)\n', (5776, 5820), True, 'import numpy as np\n'), ((5874, 5894), 'skimage.color.lab2rgb', 'color.lab2rgb', (['rst.T'], {}), '(rst.T)\n', (5887, 5894), False, 'from skimage import color\n'), ((6228, 6267), 'imagepy.IPy.show_img', 'IPy.show_img', (['gray', "(ips.title + '-Gray')"], {}), "(gray, ips.title + '-Gray')\n", (6240, 6267), False, 'from imagepy import IPy\n'), ((6458, 6476), 'skimage.color.rgb2xyz', 'color.rgb2xyz', (['img'], {}), '(img)\n', (6471, 6476), False, 'from skimage import color\n'), ((926, 964), 'imagepy.IPy.show_img', 'IPy.show_img', (['im', "(ips.title + '-' + tl)"], {}), "(im, ips.title + '-' + tl)\n", (938, 964), False, 'from imagepy import IPy\n'), ((1006, 1035), 'imagepy.core.manager.ImageManager.close', 'ImageManager.close', (['ips.title'], {}), '(ips.title)\n', (1024, 1035), False, 'from imagepy.core.manager import ImageManager, WindowsManager\n'), ((1570, 1620), 'numpy.array', 'np.array', (['[img1.T, img2.T, img3.T]'], {'dtype': 'np.uint8'}), '([img1.T, img2.T, img3.T], dtype=np.uint8)\n', (1578, 1620), True, 'import numpy as np\n'), ((1751, 1776), 'imagepy.core.manager.ImageManager.get', 'ImageManager.get', (['para[i]'], {}), '(para[i])\n', (1767, 1776), False, 'from imagepy.core.manager import ImageManager, WindowsManager\n'), ((2031, 2109), 'imagepy.IPy.alert', 'IPy.alert', (['"""three images must be 8-bit image, with the same size and slices!"""'], {}), "('three images must be 8-bit image, with the same size and slices!')\n", (2040, 2109), False, 'from imagepy import IPy\n'), ((3040, 3078), 'imagepy.IPy.show_img', 'IPy.show_img', (['im', "(ips.title + '-' + tl)"], {}), "(im, ips.title + '-' + tl)\n", (3052, 3078), False, 'from imagepy import IPy\n'), ((4770, 4788), 'skimage.color.rgb2luv', 'color.rgb2luv', (['img'], {}), '(img)\n', (4783, 4788), False, 'from skimage import color\n'), ((6151, 6174), 'skimage.color.rgb2gray', 'color.rgb2gray', (['imgs[i]'], {}), '(imgs[i])\n', (6165, 6174), False, 'from skimage import color\n'), ((2461, 2486), 'imagepy.core.manager.WindowsManager.get', 'WindowsManager.get', (['title'], {}), '(title)\n', (2479, 2486), False, 'from imagepy.core.manager import ImageManager, WindowsManager\n'), ((6771, 6805), 'numpy.array', 'np.array', (['(img1.T, img2.T, img3.T)'], {}), '((img1.T, img2.T, img3.T))\n', (6779, 6805), True, 'import numpy as np\n')]
|
# Copyright (C) 2020-2022, <NAME>.
# This program is licensed under the Apache License version 2.
# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
from typing import Any, Callable, Dict, List, Optional
import torch
import torch.nn as nn
from holocron.nn import GlobalAvgPool2d
from ..presets import IMAGENETTE
from ..utils import conv_sequence, load_pretrained_params
from .resnet import ResNet, _ResBlock
__all__ = ["SoftAttentionLayer", "SKConv2d", "SKBottleneck", "sknet50", "sknet101", "sknet152"]
default_cfgs: Dict[str, Dict[str, Any]] = {
"sknet50": {
**IMAGENETTE,
"input_shape": (3, 224, 224),
"url": "https://github.com/frgfm/Holocron/releases/download/v0.1.3/sknet50_224-5d2160f2.pth",
},
"sknet101": {
**IMAGENETTE,
"input_shape": (3, 224, 224),
"url": None,
},
"sknet152": {
**IMAGENETTE,
"input_shape": (3, 224, 224),
"url": None,
},
}
class SoftAttentionLayer(nn.Sequential):
def __init__(
self,
channels: int,
sa_ratio: int = 16,
out_multiplier: int = 1,
act_layer: Optional[nn.Module] = None,
norm_layer: Optional[Callable[[int], nn.Module]] = None,
drop_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__(
GlobalAvgPool2d(flatten=False),
*conv_sequence(
channels,
max(channels // sa_ratio, 32),
act_layer,
norm_layer,
drop_layer,
kernel_size=1,
stride=1,
bias=(norm_layer is None),
),
*conv_sequence(
max(channels // sa_ratio, 32),
channels * out_multiplier,
nn.Sigmoid(),
None,
drop_layer,
kernel_size=1,
stride=1,
),
)
class SKConv2d(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
m: int = 2,
sa_ratio: int = 16,
act_layer: Optional[nn.Module] = None,
norm_layer: Optional[Callable[[int], nn.Module]] = None,
drop_layer: Optional[Callable[..., nn.Module]] = None,
**kwargs: Any,
) -> None:
super().__init__()
self.path_convs = nn.ModuleList(
[
nn.Sequential(
*conv_sequence(
in_channels,
out_channels,
act_layer,
norm_layer,
drop_layer,
kernel_size=3,
bias=(norm_layer is None),
dilation=idx + 1,
padding=idx + 1,
**kwargs,
)
)
for idx in range(m)
]
)
self.sa = SoftAttentionLayer(out_channels, sa_ratio, m, act_layer, norm_layer, drop_layer)
def forward(self, x: torch.Tensor) -> torch.Tensor:
paths = torch.stack([path_conv(x) for path_conv in self.path_convs], dim=1)
b, m, c = paths.shape[:3]
z = self.sa(paths.sum(dim=1)).view(b, m, c, 1, 1)
attention_factors = torch.softmax(z, dim=1)
out = (attention_factors * paths).sum(dim=1)
return out
class SKBottleneck(_ResBlock):
expansion: int = 4
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 32,
base_width: int = 64,
dilation: int = 1,
act_layer: Optional[nn.Module] = None,
norm_layer: Optional[Callable[[int], nn.Module]] = None,
drop_layer: Optional[Callable[..., nn.Module]] = None,
conv_layer: Optional[Callable[..., nn.Module]] = None,
**kwargs: Any,
) -> None:
width = int(planes * (base_width / 64.0)) * groups
super().__init__(
[
*conv_sequence(
inplanes,
width,
act_layer,
norm_layer,
drop_layer,
conv_layer,
kernel_size=1,
stride=1,
bias=(norm_layer is None),
**kwargs,
),
SKConv2d(width, width, 2, 16, act_layer, norm_layer, drop_layer, groups=groups, stride=stride),
*conv_sequence(
width,
planes * self.expansion,
None,
norm_layer,
drop_layer,
conv_layer,
kernel_size=1,
stride=1,
bias=(norm_layer is None),
**kwargs,
),
],
downsample,
act_layer,
)
def _sknet(
arch: str,
pretrained: bool,
progress: bool,
num_blocks: List[int],
out_chans: List[int],
**kwargs: Any,
) -> ResNet:
# Build the model
model = ResNet(SKBottleneck, num_blocks, out_chans, **kwargs) # type: ignore[arg-type]
model.default_cfg = default_cfgs[arch] # type: ignore[assignment]
# Load pretrained parameters
if pretrained:
load_pretrained_params(model, default_cfgs[arch]["url"], progress)
return model
def sknet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
"""SKNet-50 from
`"Selective Kernel Networks" <https://arxiv.org/pdf/1903.06586.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
Returns:
torch.nn.Module: classification model
"""
return _sknet("sknet50", pretrained, progress, [3, 4, 6, 3], [64, 128, 256, 512], **kwargs)
def sknet101(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
"""SKNet-101 from
`"Selective Kernel Networks" <https://arxiv.org/pdf/1903.06586.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
Returns:
torch.nn.Module: classification model
"""
return _sknet("sknet101", pretrained, progress, [3, 4, 23, 3], [64, 128, 256, 512], **kwargs)
def sknet152(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
"""SKNet-152 from
`"Selective Kernel Networks" <https://arxiv.org/pdf/1903.06586.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
Returns:
torch.nn.Module: classification model
"""
return _sknet("sknet152", pretrained, progress, [3, 8, 86, 3], [64, 128, 256, 512], **kwargs)
|
[
"holocron.nn.GlobalAvgPool2d",
"torch.nn.Sigmoid",
"torch.softmax"
] |
[((3377, 3400), 'torch.softmax', 'torch.softmax', (['z'], {'dim': '(1)'}), '(z, dim=1)\n', (3390, 3400), False, 'import torch\n'), ((1391, 1421), 'holocron.nn.GlobalAvgPool2d', 'GlobalAvgPool2d', ([], {'flatten': '(False)'}), '(flatten=False)\n', (1406, 1421), False, 'from holocron.nn import GlobalAvgPool2d\n'), ((1856, 1868), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1866, 1868), True, 'import torch.nn as nn\n')]
|
import re, sys, os
file = open("output", "r")
lines = file.readlines()
file.close()
variable = []
eta1 = []
eta2 = []
mean = []
error= []
effS = []
for line in lines:
elements = re.split("\t", line)
variable += [elements[1],]
eta1 += [re.split(">", re.split("&&", elements[2])[0])[1],]
eta2 += [re.split("<", elements[2])[1],]
mean += [elements[3],]
error+= [elements[4],]
effS += [elements[5][:-1],]
header = """void plot_MeanVsET(){
TCanvas *c1 = new TCanvas("c1","Mean vs ET", 800, 600);
TH1F* h_emCorr_et = new TH1F("h_emCorr_et","",300,0,300);
TH1F* h_em_et = new TH1F("h_em_et","",300,0,300);
c1->cd();
"""
file = open("plot_MeanVsET.C", "w")
file.write(header)
for i in ("emCorr_et", "em_et"):
for j in range(0, len(eta1)):
if variable[j] != i:
continue
bin = str(int((float(eta1[j]) + float(eta2[j]))/2))
file.write(" h_" + i + "->SetBinContent(" + bin + ", " + mean[j] + ");\n")
file.write(" h_" + i + "->SetBinError (" + bin + ", " + error[j] +");\n")
file.write(" h_emCorr_et->SetMarkerStyle(23);\n")
file.write(" h_em_et ->SetMarkerStyle(20);\n")
file.write(" h_emCorr_et->SetMarkerColor(4);\n")
file.write(" h_em_et ->SetMarkerColor(1);\n")
file.write(" gStyle->SetOptStat(0);\n")
file.write(" h_em_et ->Draw();\n")
file.write(" h_emCorr_et->Draw(\"SAME\");\n")
file.write(" TLine* line = new TLine(0,1,300,1);\n")
file.write(" line->Draw();\n")
header ="""
TAxis* ax = h_em_et->GetXaxis();
ax->SetTitle("Et (GeV)");
TAxis* ay = h_em_et->GetYaxis();
ay->SetTitle("E_{T}^{RECO}/E_{T}^{MC}");
ay->SetRangeUser(0.9,1.05);
TLegend *leg = new TLegend(0.2, 0.2, 0.4, 0.4);
leg->AddEntry(h_em_et, "Before correction");
leg->AddEntry(h_emCorr_et, "After correction ");
leg->Draw();
TLine* line = new TLine(0,1,1.5,1);
line->SetLineWidth(2);
line->SetLineColor(2);
line->Draw();
c1->Print("MeanVsET.ps");
gROOT->ProcessLine(".q");
"""
file.write(header)
file.write("}\n")
|
[
"re.split"
] |
[((186, 206), 're.split', 're.split', (['"""\t"""', 'line'], {}), "('\\t', line)\n", (194, 206), False, 'import re, sys, os\n'), ((320, 346), 're.split', 're.split', (['"""<"""', 'elements[2]'], {}), "('<', elements[2])\n", (328, 346), False, 'import re, sys, os\n'), ((270, 297), 're.split', 're.split', (['"""&&"""', 'elements[2]'], {}), "('&&', elements[2])\n", (278, 297), False, 'import re, sys, os\n')]
|
#!/usr/bin/env python
from .usb import *
# High performance C pcap librar
# Python 2 only, no longer maintained
try:
import pcap
except ImportError:
pcap = None
# Slow but reliable pure Python
try:
import pcapng
except ImportError:
pcapng = None
import sys
"""
Quick hack to detect packet format
I don't think the API i'm using
"""
def guess_linux(buff):
"""
linux heuristics
0x8: one of SCE
0x1C:0x1F (urb status): 0 => success, almost always
windows:
"""
if len(buff) < 0x30:
return False
return sum(buff[0x1C:0x20]) == 0
def guess_windows(buff):
"""
windows heuristics
0xA:0xD (error code): 0 => success, almost always
linux: endpoint, device, bus id. Unlikely to be 0
0x10 (IRP information): either 0 or 1
"""
if len(buff) < 0x24:
return False
return sum(buff[0x0A:0x0E]) == 0
class PcapParser(object):
def __init__(self, fn, use_pcapng=None):
self.fn = fn
# Select library
self.use_pcapng = use_pcapng
if self.use_pcapng is None:
# User higher performance library if available
self.use_pcapng = False if pcap else True
# self.pcapng = "pcapng" in argsj["parser"]
if self.use_pcapng:
assert pcapng, "pcapng library requested but no pcapng library"
else:
assert pcap, "pcap library requested but no pcap library"
# Initialize library
if self.use_pcapng:
self.fp = open(fn, 'rb')
self.scanner = pcapng.FileScanner(self.fp)
self.scanner_iter = self.scanner.__iter__()
else:
self.pcap = pcap.pcapObject()
self.pcap.open_offline(fn)
def next(self, loop_cb):
"""return True if there was data and might be more, False if nothing was processed"""
if self.use_pcapng:
while True:
try:
block = next(self.scanner_iter)
except StopIteration:
return False
if not isinstance(block, pcapng.blocks.EnhancedPacket):
continue
loop_cb(block.captured_len, block.packet_data, block.timestamp)
return True
else:
got = [False]
# return code isn't given to indicate end
def my_loop_cb(*args, **kwargs):
got[0] = True
loop_cb(*args, **kwargs)
self.pcap.loop(1, my_loop_cb)
return got[0]
def load_pcap(fn, loop_cb, lim=float('inf'), use_pcapng=None):
parser = PcapParser(fn, use_pcapng=use_pcapng)
i = 0
while parser.next(loop_cb):
i += 1
if i >= lim:
break
def guess_parser(fn):
windows = [0]
linux = [0]
def loop_cb_guess(caplen, packet, ts):
packet = bytearray(packet)
if guess_linux(packet):
linux[0] += 1
if guess_windows(packet):
windows[0] += 1
parser = PcapParser(fn)
i = 0
while parser.next(loop_cb_guess):
i += 1
if i >= 3:
break
if windows[0]:
assert linux[0] == 0
if parser.use_pcapng:
return "win-pcapng"
else:
return "win-pcap"
if linux[0]:
assert windows[0] == 0
if parser.use_pcapng:
return "lin-pcapng"
else:
return "lin-pcap"
assert 0, "failed to identify packet format"
|
[
"pcap.pcapObject",
"pcapng.FileScanner"
] |
[((1563, 1590), 'pcapng.FileScanner', 'pcapng.FileScanner', (['self.fp'], {}), '(self.fp)\n', (1581, 1590), False, 'import pcapng\n'), ((1685, 1702), 'pcap.pcapObject', 'pcap.pcapObject', ([], {}), '()\n', (1700, 1702), False, 'import pcap\n')]
|
"""The main script for training the model."""
from arima_model import ARIMA
import torch
import numpy as np
import plotly.graph_objects as go
trainSize = 14
sampleData = torch.tensor(np.load('data.npy'))
sampleSize = len(sampleData)
trainData = sampleData[:trainSize]
predictionModel = ARIMA(p=0, d=1, q=1)
predictionModel.fit(trainData, epochs=100, learningRate=0.01)
testData = sampleData[trainSize:]
inference = torch.zeros(sampleSize)
inference[0] = trainData[-2]
inference[1] = trainData[-1]
errors = torch.tensor(np.random.normal(
loc=0, scale=1, size=sampleSize), dtype=torch.float32)
with torch.no_grad():
for i in range(len(testData) - 2):
inference[i+2] = predictionModel.forward(
inference[0:i+2], errors[0:i+2])
fig = go.Figure()
fig.add_trace(go.Scatter(x=torch.arange(sampleSize), y=sampleData,
mode='lines',
name='sampleData'))
fig.add_trace(go.Scatter(x=torch.arange(len(testData))+trainSize,
y=inference.detach().numpy(),
mode='lines+markers',
name='predicted'))
fig.show()
|
[
"numpy.load",
"plotly.graph_objects.Figure",
"torch.arange",
"numpy.random.normal",
"torch.zeros",
"torch.no_grad",
"arima_model.ARIMA"
] |
[((290, 310), 'arima_model.ARIMA', 'ARIMA', ([], {'p': '(0)', 'd': '(1)', 'q': '(1)'}), '(p=0, d=1, q=1)\n', (295, 310), False, 'from arima_model import ARIMA\n'), ((420, 443), 'torch.zeros', 'torch.zeros', (['sampleSize'], {}), '(sampleSize)\n', (431, 443), False, 'import torch\n'), ((764, 775), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (773, 775), True, 'import plotly.graph_objects as go\n'), ((186, 205), 'numpy.load', 'np.load', (['"""data.npy"""'], {}), "('data.npy')\n", (193, 205), True, 'import numpy as np\n'), ((524, 573), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': '(1)', 'size': 'sampleSize'}), '(loc=0, scale=1, size=sampleSize)\n', (540, 573), True, 'import numpy as np\n'), ((606, 621), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (619, 621), False, 'import torch\n'), ((803, 827), 'torch.arange', 'torch.arange', (['sampleSize'], {}), '(sampleSize)\n', (815, 827), False, 'import torch\n')]
|
from django.urls import path, include
from frontadmin import views
urlpatterns = [
path('accounts/', include('django.contrib.auth.urls')),
path('accounts/profile/', views.profile, name='profile'),
]
|
[
"django.urls.path",
"django.urls.include"
] |
[((148, 204), 'django.urls.path', 'path', (['"""accounts/profile/"""', 'views.profile'], {'name': '"""profile"""'}), "('accounts/profile/', views.profile, name='profile')\n", (152, 204), False, 'from django.urls import path, include\n'), ((106, 141), 'django.urls.include', 'include', (['"""django.contrib.auth.urls"""'], {}), "('django.contrib.auth.urls')\n", (113, 141), False, 'from django.urls import path, include\n')]
|
#!/usr/bin/env python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup configuration."""
import platform
try:
import setuptools
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
import setuptools
# Configure the required packages and scripts to install, depending on
# Python version and OS.
REQUIRED_PACKAGES = [
'httplib2>=0.8',
'fasteners>=0.14',
'oauth2client>=1.4.12',
'six>=1.12.0',
]
CLI_PACKAGES = [
'python-gflags>=3.0.6',
]
TESTING_PACKAGES = [
'mock>=1.0.1',
]
CONSOLE_SCRIPTS = [
'gen_client = apitools.gen.gen_client:main',
]
py_version = platform.python_version()
_APITOOLS_VERSION = '0.5.32'
with open('README.rst') as fileobj:
README = fileobj.read()
setuptools.setup(
name='google-apitools',
version=_APITOOLS_VERSION,
description='client libraries for humans',
long_description=README,
url='http://github.com/google/apitools',
author='<NAME>',
author_email='<EMAIL>',
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
# Contained modules and scripts.
packages=setuptools.find_packages(include=['apitools']),
entry_points={'console_scripts': CONSOLE_SCRIPTS},
install_requires=REQUIRED_PACKAGES,
tests_require=REQUIRED_PACKAGES + CLI_PACKAGES + TESTING_PACKAGES,
extras_require={
'cli': CLI_PACKAGES,
'testing': TESTING_PACKAGES,
},
# Add in any packaged data.
include_package_data=True,
package_data={
'apitools.data': ['*'],
},
exclude_package_data={
'': [
'*_test.py',
'*/testing/*',
'*/testdata/*',
'base/protorpclite/test_util.py',
'gen/test_utils.py',
],
},
# PyPI package information.
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='Apache 2.0',
keywords='apitools',
)
|
[
"platform.python_version",
"setuptools.find_packages",
"ez_setup.use_setuptools"
] |
[((1187, 1212), 'platform.python_version', 'platform.python_version', ([], {}), '()\n', (1210, 1212), False, 'import platform\n'), ((755, 771), 'ez_setup.use_setuptools', 'use_setuptools', ([], {}), '()\n', (769, 771), False, 'from ez_setup import use_setuptools\n'), ((1680, 1726), 'setuptools.find_packages', 'setuptools.find_packages', ([], {'include': "['apitools']"}), "(include=['apitools'])\n", (1704, 1726), False, 'import setuptools\n')]
|
import logging
from django.shortcuts import render
from django.views.generic import ListView, CreateView, DetailView, UpdateView, DeleteView
from django.urls import reverse_lazy
from django.core.exceptions import ValidationError
from django.http import Http404
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.decorators import login_required
from . import models
from . import forms
from fruitsales.utils import generate_sales_from_csv
logger = logging.getLogger(__name__)
class SaleListView(LoginRequiredMixin, ListView):
"""販売日時の降順で表示するリストビュー"""
model = models.Sale
template_name = 'fruitsales/fruitsale_list.html'
login_url = 'login'
def get_queryset(self):
return self.model.objects.order_by('-sold_at')
class SaleCreateView(LoginRequiredMixin, CreateView):
"""販売情報登録ビュー
* 入力された個数と果物マスタの単価から売り上げを計算して登録
"""
model = models.Sale
form_class = forms.SaleForm
template_name = 'fruitsales/fruitsale_new.html'
success_url = reverse_lazy('fruitsale_list')
login_url = 'login'
def form_valid(self, form):
response = super().form_valid(form)
if self.object.fruit:
# 果物の単価と個数から登録時点での売り上げを計算
price = self.object.fruit.price
self.object.amount = price * self.object.number
else:
# 指定がない場合は0
self.object.amount = 0
self.object.save()
return response
# class SaleDetailView(DetailView):
# model = models.Sale
# template_name = 'fruitsales/fruitsale_detail.html'
class SaleUpdateView(LoginRequiredMixin, UpdateView):
"""販売情報編集ビュー"""
model = models.Sale
template_name = 'fruitsales/fruitsale_edit.html'
fields = ['fruit', 'number', 'sold_at']
login_url = 'login'
class SaleDeleteView(LoginRequiredMixin, DeleteView):
"""販売情報削除ビュー"""
model = models.Sale
template_name = 'fruitsales/fruitsale_delete.html'
success_url = reverse_lazy('fruitsale_list')
login_url = 'login'
@login_required
def sales_csv_input_view(request):
"""POSTされたCSVファイルを読み込みSaleレコードとしてDBに追加するビュー
* 追加に成功した場合、追加されたレコードをリストで表示
* 内容のバリデーションに失敗した場合、エラーページを表示
"""
import csv
if request.method == 'POST':
logger.debug('received files: %s', list(request.FILES.items()))
if 'records-csv' in request.FILES:
try:
sales = generate_sales_from_csv(request.FILES['records-csv'])
except ValidationError as e:
return render(request, 'fruitsales/fruitsale_csv_input_failure.html', {'message': e.args[0]})
return render(request, 'fruitsales/fruitsale_csv_input_success.html', {'object_list': sales})
return Http404()
|
[
"django.urls.reverse_lazy",
"django.http.Http404",
"fruitsales.utils.generate_sales_from_csv",
"django.shortcuts.render",
"logging.getLogger"
] |
[((486, 513), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (503, 513), False, 'import logging\n'), ((1023, 1053), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""fruitsale_list"""'], {}), "('fruitsale_list')\n", (1035, 1053), False, 'from django.urls import reverse_lazy\n'), ((1969, 1999), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""fruitsale_list"""'], {}), "('fruitsale_list')\n", (1981, 1999), False, 'from django.urls import reverse_lazy\n'), ((2769, 2778), 'django.http.Http404', 'Http404', ([], {}), '()\n', (2776, 2778), False, 'from django.http import Http404\n'), ((2658, 2749), 'django.shortcuts.render', 'render', (['request', '"""fruitsales/fruitsale_csv_input_success.html"""', "{'object_list': sales}"], {}), "(request, 'fruitsales/fruitsale_csv_input_success.html', {\n 'object_list': sales})\n", (2664, 2749), False, 'from django.shortcuts import render\n'), ((2408, 2461), 'fruitsales.utils.generate_sales_from_csv', 'generate_sales_from_csv', (["request.FILES['records-csv']"], {}), "(request.FILES['records-csv'])\n", (2431, 2461), False, 'from fruitsales.utils import generate_sales_from_csv\n'), ((2527, 2617), 'django.shortcuts.render', 'render', (['request', '"""fruitsales/fruitsale_csv_input_failure.html"""', "{'message': e.args[0]}"], {}), "(request, 'fruitsales/fruitsale_csv_input_failure.html', {'message':\n e.args[0]})\n", (2533, 2617), False, 'from django.shortcuts import render\n')]
|
"""
Module for testing flask api with unittest
"""
import json
import unittest
import urllib
from urllib import request
from urllib.error import HTTPError
class TestGet(unittest.TestCase):
"""
Test functions of request to flask server
"""
def setUp(self):
self.data = {'list_frame_contour': 'data/bounding_box.json', "frame_path": 'data/image/'}
self.data_bb_missing = {"frame_path": 'data/image/'}
self.data_frame_missing = {'list_frame_contour': 'data/bounding_boxes/'}
self.data_all_missing = {}
self.url = "http://0.0.0.0:5000/Track_from_JSON/"
def test_json_output(self):
"""
test types of output request
test values and length of output request
:return:
"""
url_values = urllib.parse.urlencode(self.data)
full_url = self.url + '?' + url_values
req = request.Request(full_url)
req.add_header('Content-Type', 'application/json; charset=utf-8')
result = request.urlopen(full_url).read()
output = json.loads(result.decode("utf8"))
self.assertEqual(type(output), dict)
for k in output.keys():
self.assertEqual(type(output[k]), list)
for item in output[k]:
self.assertEqual(type(item), int)
self.assertEqual(output["frame 11"], [1])
self.assertEqual(output["frame 18"], [1, 3])
self.assertEqual(output["frame 518"], [101])
self.assertNotIn("frame 519", output.keys())
self.assertEqual(len(output), 519)
def test_json_output_with_dict_input(self):
"""
test types of output request
test values and length of output request
:return:
"""
with open('data/bounding_box.json', 'r') as file:
data_file = json.load(file)
json_data = json.dumps(data_file)
data = {'list_frame_contour': json_data, "frame_path": 'data/image/'}
url_values = urllib.parse.urlencode(data)
full_url = self.url + '?' + url_values
req = request.Request(full_url)
req.add_header('Content-Type', 'application/json; charset=utf-8')
result = request.urlopen(full_url).read()
output = json.loads(result.decode("utf8"))
self.assertEqual(type(output), dict)
for k in output.keys():
self.assertEqual(type(output[k]), list)
for item in output[k]:
self.assertEqual(type(item), int)
self.assertEqual(output["frame 11"], [1])
self.assertEqual(output["frame 18"], [1, 3])
self.assertEqual(output["frame 518"], [101])
self.assertNotIn("frame 519", output.keys())
self.assertEqual(len(output), 519)
def test_error_bb_missing(self):
"""
tests error message values when list_frame_contour is missing
:return:
"""
url_values = urllib.parse.urlencode(self.data_bb_missing)
full_url = self.url + '?' + url_values
req = request.Request(full_url)
with self.assertRaises(HTTPError):
request.urlopen(req).read()
def test_error_frame_missing(self):
"""
tests error message values when frame_path is missing
:return:
"""
url_values = urllib.parse.urlencode(self.data_frame_missing)
full_url = self.url + '?' + url_values
req = request.Request(full_url)
with self.assertRaises(HTTPError):
request.urlopen(req).read()
def test_error_all_missing(self):
"""
tests error message values when list_frame_contour and frame_path are missing
:return:
"""
url_values = urllib.parse.urlencode(self.data_all_missing)
url = "http://0.0.0.0:5000/Track_from_JSON/"
full_url = url + '?' + url_values
req = request.Request(full_url)
with self.assertRaises(HTTPError):
request.urlopen(req).read()
|
[
"json.load",
"urllib.request.Request",
"urllib.parse.urlencode",
"urllib.request.urlopen",
"json.dumps"
] |
[((789, 822), 'urllib.parse.urlencode', 'urllib.parse.urlencode', (['self.data'], {}), '(self.data)\n', (811, 822), False, 'import urllib\n'), ((885, 910), 'urllib.request.Request', 'request.Request', (['full_url'], {}), '(full_url)\n', (900, 910), False, 'from urllib import request\n'), ((1850, 1871), 'json.dumps', 'json.dumps', (['data_file'], {}), '(data_file)\n', (1860, 1871), False, 'import json\n'), ((1971, 1999), 'urllib.parse.urlencode', 'urllib.parse.urlencode', (['data'], {}), '(data)\n', (1993, 1999), False, 'import urllib\n'), ((2063, 2088), 'urllib.request.Request', 'request.Request', (['full_url'], {}), '(full_url)\n', (2078, 2088), False, 'from urllib import request\n'), ((2904, 2948), 'urllib.parse.urlencode', 'urllib.parse.urlencode', (['self.data_bb_missing'], {}), '(self.data_bb_missing)\n', (2926, 2948), False, 'import urllib\n'), ((3011, 3036), 'urllib.request.Request', 'request.Request', (['full_url'], {}), '(full_url)\n', (3026, 3036), False, 'from urllib import request\n'), ((3285, 3332), 'urllib.parse.urlencode', 'urllib.parse.urlencode', (['self.data_frame_missing'], {}), '(self.data_frame_missing)\n', (3307, 3332), False, 'import urllib\n'), ((3395, 3420), 'urllib.request.Request', 'request.Request', (['full_url'], {}), '(full_url)\n', (3410, 3420), False, 'from urllib import request\n'), ((3691, 3736), 'urllib.parse.urlencode', 'urllib.parse.urlencode', (['self.data_all_missing'], {}), '(self.data_all_missing)\n', (3713, 3736), False, 'import urllib\n'), ((3847, 3872), 'urllib.request.Request', 'request.Request', (['full_url'], {}), '(full_url)\n', (3862, 3872), False, 'from urllib import request\n'), ((1814, 1829), 'json.load', 'json.load', (['file'], {}), '(file)\n', (1823, 1829), False, 'import json\n'), ((1003, 1028), 'urllib.request.urlopen', 'request.urlopen', (['full_url'], {}), '(full_url)\n', (1018, 1028), False, 'from urllib import request\n'), ((2181, 2206), 'urllib.request.urlopen', 'request.urlopen', (['full_url'], {}), '(full_url)\n', (2196, 2206), False, 'from urllib import request\n'), ((3092, 3112), 'urllib.request.urlopen', 'request.urlopen', (['req'], {}), '(req)\n', (3107, 3112), False, 'from urllib import request\n'), ((3476, 3496), 'urllib.request.urlopen', 'request.urlopen', (['req'], {}), '(req)\n', (3491, 3496), False, 'from urllib import request\n'), ((3928, 3948), 'urllib.request.urlopen', 'request.urlopen', (['req'], {}), '(req)\n', (3943, 3948), False, 'from urllib import request\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import math
import numpy as np
import pdb
from functools import partial
from opts import parser
args = parser.parse_args()
from ops.rstg import *
__all__ = [
'ResNet', 'resnet10', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnet200',
]
def conv3x3x3(in_planes, out_planes, stride=1):
# 3x3x3 convolution with padding
return nn.Conv3d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
def downsample_basic_block(x, planes, stride):
out = F.avg_pool3d(x, kernel_size=1, stride=stride)
zero_pads = torch.Tensor(
out.size(0), planes - out.size(1), out.size(2), out.size(3),
out.size(4)).zero_()
if isinstance(out.data, torch.cuda.FloatTensor):
zero_pads = zero_pads.cuda()
out = Variable(torch.cat([out.data, zero_pads], dim=1))
return out
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm3d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3x3(planes, planes)
self.bn2 = nn.BatchNorm3d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
conv_op = None
offset_groups = 1
def __init__(self, dim_in, dim_out, stride, dim_inner, group=1, use_temp_conv=1, temp_stride=1, dcn=False,
shortcut_type='B'):
super(Bottleneck, self).__init__()
# 1 x 1 layer
self.with_dcn = dcn
self.conv1 = self.Conv3dBN(dim_in, dim_inner, (1 + use_temp_conv * 2, 1, 1), (temp_stride, 1, 1),
(use_temp_conv, 0, 0))
self.relu = nn.ReLU(inplace=True)
# 3 x 3 layer
self.conv2 = self.Conv3dBN(dim_inner, dim_inner, (1, 3, 3), (1, stride, stride), (0, 1, 1))
# 1 x 1 layer
self.conv3 = self.Conv3dBN(dim_inner, dim_out, (1, 1, 1), (1, 1, 1), (0, 0, 0))
self.shortcut_type = shortcut_type
self.dim_in = dim_in
self.dim_out = dim_out
self.temp_stride = temp_stride
self.stride = stride
# nn.Conv3d(dim_in, dim_out, (1,1,1),(temp_stride,stride,stride),(0,0,0))
if self.shortcut_type == 'B':
if self.dim_in == self.dim_out and self.temp_stride == 1 and self.stride == 1: # or (self.dim_in == self.dim_out and self.dim_in == 64 and self.stride ==1):
pass
else:
# pass
self.shortcut = self.Conv3dBN(dim_in, dim_out, (1, 1, 1), (temp_stride, stride, stride), (0, 0, 0))
# nn.Conv3d(dim_in,dim_inner,kernel_size=(1+use_temp_conv*2,1,1),stride = (temp_stride,1,1),padding = )
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.relu(out)
out = self.conv2(out)
out = self.relu(out)
out = self.conv3(out)
if self.dim_in == self.dim_out and self.temp_stride == 1 and self.stride == 1:
pass
else:
residual = self.shortcut(residual)
out += residual
out = self.relu(out)
return out
def Conv3dBN(self, dim_in, dim_out, kernels, strides, pads, group=1):
if self.with_dcn and kernels[0] > 1:
# use deformable conv
return nn.Sequential(
self.conv_op(dim_in, dim_out, kernel_size=kernels, stride=strides, padding=pads, bias=False,
offset_groups=self.offset_groups),
nn.BatchNorm3d(dim_out)
)
else:
return nn.Sequential(
nn.Conv3d(dim_in, dim_out, kernel_size=kernels, stride=strides, padding=pads, bias=False),
nn.BatchNorm3d(dim_out)
)
class ResNet(nn.Module):
def __init__(self,
block,
layers,
use_temp_convs_set,
temp_strides_set,
sample_size,
sample_duration,
shortcut_type='B',
num_classes=400,
stage_with_dcn=(False, False, False, False),
extract_features=False,
loss_type='softmax'):
super(ResNet, self).__init__()
self.extract_features = extract_features
self.stage_with_dcn = stage_with_dcn
self.group = 1
self.width_per_group = 64
self.dim_inner = self.group * self.width_per_group
# self.shortcut_type = shortcut_type
self.conv1 = nn.Conv3d(
3,
64,
kernel_size=(1 + use_temp_convs_set[0][0] * 2, 7, 7),
stride=(temp_strides_set[0][0], 2, 2),
padding=(use_temp_convs_set[0][0], 3, 3),
bias=False)
self.bn1 = nn.BatchNorm3d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool1 = nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 0, 0))
with_dcn = True if self.stage_with_dcn[0] else False
self.layer1 = self._make_layer(block, 64, 256, shortcut_type, stride=1, num_blocks=layers[0],
dim_inner=self.dim_inner, group=self.group, use_temp_convs=use_temp_convs_set[1],
temp_strides=temp_strides_set[1], dcn=with_dcn)
self.maxpool2 = nn.MaxPool3d(kernel_size=(2, 1, 1), stride=(2, 1, 1), padding=(0, 0, 0))
with_dcn = True if self.stage_with_dcn[1] else False
self.layer2 = self._make_layer(block, 256, 512, shortcut_type, stride=2, num_blocks=layers[1],
dim_inner=self.dim_inner * 2, group=self.group,
use_temp_convs=use_temp_convs_set[2], temp_strides=temp_strides_set[2],
dcn=with_dcn)
with_dcn = True if self.stage_with_dcn[2] else False
self.layer3 = self._make_layer(block, 512, 1024, shortcut_type, stride=2, num_blocks=layers[2],
dim_inner=self.dim_inner * 4, group=self.group,
use_temp_convs=use_temp_convs_set[3], temp_strides=temp_strides_set[3],
dcn=with_dcn)
with_dcn = True if self.stage_with_dcn[3] else False
self.layer4 = self._make_layer(block, 1024, 2048, shortcut_type, stride=1, num_blocks=layers[3],
dim_inner=self.dim_inner * 8, group=self.group,
use_temp_convs=use_temp_convs_set[4], temp_strides=temp_strides_set[4],
dcn=with_dcn)
last_duration = int(math.ceil(sample_duration / 2)) # int(math.ceil(sample_duration / 8))
last_size = int(math.ceil(sample_size / 16))
# self.avgpool = nn.AvgPool3d((last_duration, last_size, last_size), stride=1) #nn.AdaptiveAvgPool3d((1, 1, 1)) #
self.avgpool = nn.AdaptiveAvgPool3d((1, 1, 1))
self.dropout = torch.nn.Dropout(p=0.5)
self.classifier = nn.Linear(2048, num_classes)
for m in self.modules():
# if isinstance(m, nn.Conv3d):
# m.weight = nn.init.kaiming_normal_(m.weight, mode='fan_out')
# elif isinstance(m,nn.Linear):
# m.weight = nn.init.kaiming_normal(m.weight, mode='fan_out')
# elif
if isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, dim_in, dim_out, shortcut_type, stride, num_blocks, dim_inner=None, group=None,
use_temp_convs=None, temp_strides=None, dcn=False):
if use_temp_convs is None:
use_temp_convs = np.zeros(num_blocks).astype(int)
if temp_strides is None:
temp_strides = np.ones(num_blocks).astype(int)
if len(use_temp_convs) < num_blocks:
for _ in range(num_blocks - len(use_temp_convs)):
use_temp_convs.append(0)
temp_strides.append(1)
layers = []
for idx in range(num_blocks):
block_stride = 2 if (idx == 0 and stride == 2) else 1
layers.append(
block(dim_in, dim_out, block_stride, dim_inner, group, use_temp_convs[idx], temp_strides[idx], dcn))
dim_in = dim_out
return nn.Sequential(*layers)
def forward_single(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool1(x)
x = self.layer1(x)
x = self.maxpool2(x)
x = self.layer2(x)
x = self.layer3(x)
features = self.layer4(x)
x = self.avgpool(features)
y = x
# x = x.view(x.size(0), -1)
# x = self.dropout(x)
# y = self.classifier(x)
if self.extract_features:
return y, features
else:
return y
def forward_multi(self, x):
clip_preds = []
# import ipdb;ipdb.set_trace()
for clip_idx in range(x.shape[1]): # B, 10, 3, 3, 32, 224, 224
spatial_crops = []
for crop_idx in range(x.shape[2]):
clip = x[:, clip_idx, crop_idx]
clip = self.forward_single(clip)
spatial_crops.append(clip)
spatial_crops = torch.stack(spatial_crops, 1).mean(1) # (B, 400)
clip_preds.append(spatial_crops)
clip_preds = torch.stack(clip_preds, 1).mean(1) # (B, 400)
return clip_preds
def forward(self, x):
# pdb.set_trace()
# x: BT x 3 x H x W -> B x T x 3 x H x W
# pdb.set_trace()
x = x.view([args.batch_size, args.num_segments, x.shape[-3], x.shape[-2], x.shape[-1]])
x = x.permute([0,2,1,3,4])
# 5D tensor == single clip
if x.dim() == 5:
pred = self.forward_single(x)
# 7D tensor == 3 crops/10 clips
elif x.dim() == 7:
pred = self.forward_multi(x)
# loss_dict = {}
# if 'label' in batch:
# loss = F.cross_entropy(pred, batch['label'], reduction='none')
# loss_dict = {'clf': loss}
return pred
def get_fine_tuning_parameters(model, ft_begin_index):
if ft_begin_index == 0:
return model.parameters()
ft_module_names = []
for i in range(ft_begin_index, 5):
ft_module_names.append('layer{}'.format(i))
ft_module_names.append('fc')
# import ipdb;ipdb.set_trace()
parameters = []
for k, v in model.named_parameters():
for ft_module in ft_module_names:
if ft_module in k:
parameters.append({'params': v})
break
else:
parameters.append({'params': v, 'lr': 0.0})
return parameters
def obtain_arc(arc_type):
# c2d, ResNet50
if arc_type == 1:
use_temp_convs_1 = [0]
temp_strides_1 = [2]
use_temp_convs_2 = [0, 0, 0]
temp_strides_2 = [1, 1, 1]
use_temp_convs_3 = [0, 0, 0, 0]
temp_strides_3 = [1, 1, 1, 1]
use_temp_convs_4 = [0, ] * 6
temp_strides_4 = [1, ] * 6
use_temp_convs_5 = [0, 0, 0]
temp_strides_5 = [1, 1, 1]
# i3d, ResNet50
if arc_type == 2:
use_temp_convs_1 = [2]
temp_strides_1 = [1]
use_temp_convs_2 = [1, 1, 1]
temp_strides_2 = [1, 1, 1]
use_temp_convs_3 = [1, 0, 1, 0]
temp_strides_3 = [1, 1, 1, 1]
use_temp_convs_4 = [1, 0, 1, 0, 1, 0]
temp_strides_4 = [1, 1, 1, 1, 1, 1]
use_temp_convs_5 = [0, 1, 0]
temp_strides_5 = [1, 1, 1]
# c2d, ResNet101
if arc_type == 3:
use_temp_convs_1 = [0]
temp_strides_1 = [2]
use_temp_convs_2 = [0, 0, 0]
temp_strides_2 = [1, 1, 1]
use_temp_convs_3 = [0, 0, 0, 0]
temp_strides_3 = [1, 1, 1, 1]
use_temp_convs_4 = [0, ] * 23
temp_strides_4 = [1, ] * 23
use_temp_convs_5 = [0, 0, 0]
temp_strides_5 = [1, 1, 1]
# i3d, ResNet101
if arc_type == 4:
use_temp_convs_1 = [2]
temp_strides_1 = [2]
use_temp_convs_2 = [1, 1, 1]
temp_strides_2 = [1, 1, 1]
use_temp_convs_3 = [1, 0, 1, 0]
temp_strides_3 = [1, 1, 1, 1]
use_temp_convs_4 = []
for i in range(23):
if i % 2 == 0:
use_temp_convs_4.append(1)
else:
use_temp_convs_4.append(0)
temp_strides_4 = [1, ] * 23
use_temp_convs_5 = [0, 1, 0]
temp_strides_5 = [1, 1, 1]
use_temp_convs_set = [use_temp_convs_1, use_temp_convs_2, use_temp_convs_3, use_temp_convs_4, use_temp_convs_5]
temp_strides_set = [temp_strides_1, temp_strides_2, temp_strides_3, temp_strides_4, temp_strides_5]
return use_temp_convs_set, temp_strides_set
def resnet10(**kwargs):
"""Constructs a ResNet-18 model.
"""
use_temp_convs_set = []
temp_strides_set = []
model = ResNet(BasicBlock, [1, 1, 1, 1], use_temp_convs_set, temp_strides_set, **kwargs)
return model
def resnet18(**kwargs):
"""Constructs a ResNet-18 model.
"""
use_temp_convs_set = []
temp_strides_set = []
model = ResNet(BasicBlock, [2, 2, 2, 2], use_temp_convs_set, temp_strides_set, **kwargs)
return model
def resnet34(**kwargs):
"""Constructs a ResNet-34 model.
"""
use_temp_convs_set = []
temp_strides_set = []
model = ResNet(BasicBlock, [3, 4, 6, 3], use_temp_convs_set, temp_strides_set, **kwargs)
return model
def resnet50(extract_features, **kwargs):
"""Constructs a ResNet-50 model.
"""
use_temp_convs_set, temp_strides_set = obtain_arc(2)
model = ResNet(Bottleneck, [3, 4, 6, 3], use_temp_convs_set, temp_strides_set,
extract_features=extract_features, **kwargs)
return model
def resnet101(**kwargs):
"""Constructs a ResNet-101 model.
"""
use_temp_convs_set, temp_strides_set = obtain_arc(4)
model = ResNet(Bottleneck, [3, 4, 23, 3], use_temp_convs_set, temp_strides_set, **kwargs)
return model
def resnet152(**kwargs):
"""Constructs a ResNet-101 model.
"""
use_temp_convs_set = []
temp_strides_set = []
model = ResNet(Bottleneck, [3, 8, 36, 3], use_temp_convs_set, temp_strides_set, **kwargs)
return model
def resnet200(**kwargs):
"""Constructs a ResNet-101 model.
"""
use_temp_convs_set = []
temp_strides_set = []
model = ResNet(Bottleneck, [3, 24, 36, 3], use_temp_convs_set, temp_strides_set, **kwargs)
return model
def Net(num_classes, extract_features=False, loss_type='softmax',
weights=None, freeze_all_but_cls=False):
net = globals()['resnet' + str(50)](
num_classes=num_classes,
sample_size=50,
sample_duration=32,
extract_features=extract_features,
loss_type=loss_type,
)
if weights is not None:
kinetics_weights = torch.load(weights)['state_dict']
print("Found weights in {}.".format(weights))
cls_name = 'fc'
else:
kinetics_weights = torch.load('kinetics-res50.pth')
cls_name = 'fc'
print('\n Restoring Kintetics \n')
new_weights = {}
for k, v in kinetics_weights.items():
if not k.startswith('module.' + cls_name):
new_weights[k.replace('module.', '')] = v
else:
print(f"!!! Smt wrong with restore {k}")
net.load_state_dict(new_weights, strict=False)
if freeze_all_but_cls:
for name, par in net.named_parameters():
if not name.startswith('classifier'):
par.requires_grad = False
return net
|
[
"torch.nn.Dropout",
"torch.nn.BatchNorm3d",
"torch.nn.ReLU",
"torch.nn.AdaptiveAvgPool3d",
"torch.stack",
"torch.nn.Sequential",
"torch.nn.Conv3d",
"math.ceil",
"torch.load",
"numpy.zeros",
"torch.cat",
"numpy.ones",
"torch.nn.functional.avg_pool3d",
"torch.nn.Linear",
"opts.parser.parse_args",
"torch.nn.MaxPool3d"
] |
[((208, 227), 'opts.parser.parse_args', 'parser.parse_args', ([], {}), '()\n', (225, 227), False, 'from opts import parser\n'), ((469, 558), 'torch.nn.Conv3d', 'nn.Conv3d', (['in_planes', 'out_planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)', 'bias': '(False)'}), '(in_planes, out_planes, kernel_size=3, stride=stride, padding=1,\n bias=False)\n', (478, 558), True, 'import torch.nn as nn\n'), ((663, 708), 'torch.nn.functional.avg_pool3d', 'F.avg_pool3d', (['x'], {'kernel_size': '(1)', 'stride': 'stride'}), '(x, kernel_size=1, stride=stride)\n', (675, 708), True, 'import torch.nn.functional as F\n'), ((947, 986), 'torch.cat', 'torch.cat', (['[out.data, zero_pads]'], {'dim': '(1)'}), '([out.data, zero_pads], dim=1)\n', (956, 986), False, 'import torch\n'), ((1242, 1264), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['planes'], {}), '(planes)\n', (1256, 1264), True, 'import torch.nn as nn\n'), ((1285, 1306), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1292, 1306), True, 'import torch.nn as nn\n'), ((1373, 1395), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['planes'], {}), '(planes)\n', (1387, 1395), True, 'import torch.nn as nn\n'), ((2310, 2331), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2317, 2331), True, 'import torch.nn as nn\n'), ((5136, 5308), 'torch.nn.Conv3d', 'nn.Conv3d', (['(3)', '(64)'], {'kernel_size': '(1 + use_temp_convs_set[0][0] * 2, 7, 7)', 'stride': '(temp_strides_set[0][0], 2, 2)', 'padding': '(use_temp_convs_set[0][0], 3, 3)', 'bias': '(False)'}), '(3, 64, kernel_size=(1 + use_temp_convs_set[0][0] * 2, 7, 7),\n stride=(temp_strides_set[0][0], 2, 2), padding=(use_temp_convs_set[0][0\n ], 3, 3), bias=False)\n', (5145, 5308), True, 'import torch.nn as nn\n'), ((5392, 5410), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['(64)'], {}), '(64)\n', (5406, 5410), True, 'import torch.nn as nn\n'), ((5431, 5452), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (5438, 5452), True, 'import torch.nn as nn\n'), ((5477, 5549), 'torch.nn.MaxPool3d', 'nn.MaxPool3d', ([], {'kernel_size': '(1, 3, 3)', 'stride': '(1, 2, 2)', 'padding': '(0, 0, 0)'}), '(kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 0, 0))\n', (5489, 5549), True, 'import torch.nn as nn\n'), ((5945, 6017), 'torch.nn.MaxPool3d', 'nn.MaxPool3d', ([], {'kernel_size': '(2, 1, 1)', 'stride': '(2, 1, 1)', 'padding': '(0, 0, 0)'}), '(kernel_size=(2, 1, 1), stride=(2, 1, 1), padding=(0, 0, 0))\n', (5957, 6017), True, 'import torch.nn as nn\n'), ((7563, 7594), 'torch.nn.AdaptiveAvgPool3d', 'nn.AdaptiveAvgPool3d', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (7583, 7594), True, 'import torch.nn as nn\n'), ((7618, 7641), 'torch.nn.Dropout', 'torch.nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (7634, 7641), False, 'import torch\n'), ((7668, 7696), 'torch.nn.Linear', 'nn.Linear', (['(2048)', 'num_classes'], {}), '(2048, num_classes)\n', (7677, 7696), True, 'import torch.nn as nn\n'), ((8990, 9012), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (9003, 9012), True, 'import torch.nn as nn\n'), ((15741, 15773), 'torch.load', 'torch.load', (['"""kinetics-res50.pth"""'], {}), "('kinetics-res50.pth')\n", (15751, 15773), False, 'import torch\n'), ((7294, 7324), 'math.ceil', 'math.ceil', (['(sample_duration / 2)'], {}), '(sample_duration / 2)\n', (7303, 7324), False, 'import math\n'), ((7389, 7416), 'math.ceil', 'math.ceil', (['(sample_size / 16)'], {}), '(sample_size / 16)\n', (7398, 7416), False, 'import math\n'), ((15592, 15611), 'torch.load', 'torch.load', (['weights'], {}), '(weights)\n', (15602, 15611), False, 'import torch\n'), ((4126, 4149), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['dim_out'], {}), '(dim_out)\n', (4140, 4149), True, 'import torch.nn as nn\n'), ((4228, 4322), 'torch.nn.Conv3d', 'nn.Conv3d', (['dim_in', 'dim_out'], {'kernel_size': 'kernels', 'stride': 'strides', 'padding': 'pads', 'bias': '(False)'}), '(dim_in, dim_out, kernel_size=kernels, stride=strides, padding=\n pads, bias=False)\n', (4237, 4322), True, 'import torch.nn as nn\n'), ((4335, 4358), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['dim_out'], {}), '(dim_out)\n', (4349, 4358), True, 'import torch.nn as nn\n'), ((10079, 10105), 'torch.stack', 'torch.stack', (['clip_preds', '(1)'], {}), '(clip_preds, 1)\n', (10090, 10105), False, 'import torch\n'), ((8365, 8385), 'numpy.zeros', 'np.zeros', (['num_blocks'], {}), '(num_blocks)\n', (8373, 8385), True, 'import numpy as np\n'), ((8458, 8477), 'numpy.ones', 'np.ones', (['num_blocks'], {}), '(num_blocks)\n', (8465, 8477), True, 'import numpy as np\n'), ((9963, 9992), 'torch.stack', 'torch.stack', (['spatial_crops', '(1)'], {}), '(spatial_crops, 1)\n', (9974, 9992), False, 'import torch\n')]
|
import json
from os.path import join as pjoin
import ctl
import nibabel as nib
import numpy as np
from utils import DATA_DIRS
NUM_VIEWS = 360
SDD = 1000.
SID = 750.
NUM_DET_PIXELS = 1024
DET_PIXEL_DIM = 1.
def create_fdk(filename: str):
nib_volume = nib.load(pjoin(DATA_DIRS['datasets'], filename))
nib_shape = nib_volume.header.get_data_shape()
nib_dims = tuple([float(f) for f in nib_volume.header['pixdim'][1:4]])
nib_volume = nib_volume.get_fdata()
print(nib_dims)
system = ctl.CTSystem()
system.add_component(ctl.FlatPanelDetector(
(NUM_DET_PIXELS, NUM_DET_PIXELS),
(DET_PIXEL_DIM, DET_PIXEL_DIM),
))
system.add_component(ctl.TubularGantry(SDD, SID))
system.add_component(ctl.XrayTube())
setup = ctl.AcquisitionSetup(system, NUM_VIEWS)
setup.apply_preparation_protocol(ctl.protocols.AxialScanTrajectory())
ctl_volume = ctl.VoxelVolumeF.from_numpy(nib_volume.transpose())
ctl_volume.set_voxel_size(nib_dims)
projector = ctl.ocl.RayCasterProjector()
projections = projector.configure_and_project(setup, ctl_volume)
rec = ctl.ocl.FDKReconstructor()
reco = ctl.VoxelVolumeF(nib_shape, nib_dims)
reco.fill(0)
rec.configure_and_reconstruct_to(setup, projections, reco)
img = nib.Nifti1Image(reco, np.eye(4))
nib.save(img, f'fdk{NUM_VIEWS}/{filename}')
def main():
with open('train_valid.json', 'r') as json_file:
json_dict = json.load(json_file)
dataset_files = json_dict['train_files'] \
+ json_dict['valid_files'] \
+ json_dict['test_files']
for filename in dataset_files:
print(filename)
create_fdk(filename)
if __name__ == "__main__":
main()
|
[
"ctl.AcquisitionSetup",
"json.load",
"ctl.VoxelVolumeF",
"ctl.XrayTube",
"ctl.FlatPanelDetector",
"ctl.TubularGantry",
"nibabel.save",
"ctl.CTSystem",
"ctl.ocl.RayCasterProjector",
"ctl.ocl.FDKReconstructor",
"numpy.eye",
"os.path.join",
"ctl.protocols.AxialScanTrajectory"
] |
[((509, 523), 'ctl.CTSystem', 'ctl.CTSystem', ([], {}), '()\n', (521, 523), False, 'import ctl\n'), ((769, 808), 'ctl.AcquisitionSetup', 'ctl.AcquisitionSetup', (['system', 'NUM_VIEWS'], {}), '(system, NUM_VIEWS)\n', (789, 808), False, 'import ctl\n'), ((1010, 1038), 'ctl.ocl.RayCasterProjector', 'ctl.ocl.RayCasterProjector', ([], {}), '()\n', (1036, 1038), False, 'import ctl\n'), ((1119, 1145), 'ctl.ocl.FDKReconstructor', 'ctl.ocl.FDKReconstructor', ([], {}), '()\n', (1143, 1145), False, 'import ctl\n'), ((1157, 1194), 'ctl.VoxelVolumeF', 'ctl.VoxelVolumeF', (['nib_shape', 'nib_dims'], {}), '(nib_shape, nib_dims)\n', (1173, 1194), False, 'import ctl\n'), ((1323, 1366), 'nibabel.save', 'nib.save', (['img', 'f"""fdk{NUM_VIEWS}/{filename}"""'], {}), "(img, f'fdk{NUM_VIEWS}/{filename}')\n", (1331, 1366), True, 'import nibabel as nib\n'), ((269, 307), 'os.path.join', 'pjoin', (["DATA_DIRS['datasets']", 'filename'], {}), "(DATA_DIRS['datasets'], filename)\n", (274, 307), True, 'from os.path import join as pjoin\n'), ((549, 640), 'ctl.FlatPanelDetector', 'ctl.FlatPanelDetector', (['(NUM_DET_PIXELS, NUM_DET_PIXELS)', '(DET_PIXEL_DIM, DET_PIXEL_DIM)'], {}), '((NUM_DET_PIXELS, NUM_DET_PIXELS), (DET_PIXEL_DIM,\n DET_PIXEL_DIM))\n', (570, 640), False, 'import ctl\n'), ((686, 713), 'ctl.TubularGantry', 'ctl.TubularGantry', (['SDD', 'SID'], {}), '(SDD, SID)\n', (703, 713), False, 'import ctl\n'), ((740, 754), 'ctl.XrayTube', 'ctl.XrayTube', ([], {}), '()\n', (752, 754), False, 'import ctl\n'), ((846, 881), 'ctl.protocols.AxialScanTrajectory', 'ctl.protocols.AxialScanTrajectory', ([], {}), '()\n', (879, 881), False, 'import ctl\n'), ((1308, 1317), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1314, 1317), True, 'import numpy as np\n'), ((1454, 1474), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (1463, 1474), False, 'import json\n')]
|
import json
import datetime
from shapely.geometry import Point
import pandas as pd
from shapely_geojson import dumps
from ipyleaflet import TileLayer
import ipyvuetify as v
import sepal_ui.sepalwidgets as sw
import component.parameter as param
from component.scripts.scripts import PlanetKey, build_request, get_items
from component.model import AlertModel
from component.message import cm
__all__ = ["PlanetView"]
CHIPS = {
# The key is the name attribute name in the model : [name, icon, unit]
"max_images": [cm.planet.chips.max_images, "mdi-checkbox-multiple-blank", "img"],
"days_before": [cm.planet.chips.days_before, "mdi-arrow-left-circle", "d"],
"days_after": [cm.planet.chips.days_after, "mdi-arrow-right-circle", "d"],
"cloud_cover": [cm.planet.chips.cloud_cover, "mdi-cloud", "%"],
}
class CustomPanel(v.ExpansionPanel, sw.SepalWidget):
def __init__(self, model, widgets):
# link with model
self.model = model
self.title = f"{cm.planet.advanced_title}: "
# create a header, and display the default values
self.header = v.ExpansionPanelHeader()
self.shrunk_content()
self.content = v.ExpansionPanelContent(children=[w for w in widgets])
self.children = [self.header, self.content]
super().__init__()
def expand_content(self):
"""Set title when content is expanded"""
self.header.children = [self.title]
def shrunk_content(self):
"""Display chips when content is shrunk"""
# create chips
chips = [
sw.Tooltip(
v.Chip(
class_="ml-1 mr-1",
x_small=True,
children=[
v.Icon(class_="mr-1", x_small=True, children=[CHIPS[prop][1]]),
# Concatenate the value and the units
str(getattr(self.model, prop)) + f" {CHIPS[prop][2]}",
],
),
CHIPS[prop][0],
bottom=True,
)
for prop in CHIPS
]
self.header.children = [self.title] + chips
class PlanetView(v.Card, sw.SepalWidget):
"""Stand-alone component to get the user planet inputs and validate its
configuration.
Args:
model (Model): Model to store Planet parameters
"""
def __init__(self, model, map_, *args, **kwargs):
super().__init__(**kwargs)
self.model = model
self.map_ = map_
self.valid_api = False
self.client = None
self.w_api_alert = sw.Alert(
children=[cm.planet.default_api], type_="info"
).show()
self.w_api_key = sw.PasswordField(
label=cm.planet.insert_api, v_model=self.model.api_key
)
self.w_api_btn = sw.Btn(
cm.planet.check_btn,
small=True,
)
self.w_days_before = sw.NumberField(
label=cm.planet.label.days_before,
max_=5,
v_model=self.model.days_before,
disabled=True,
)
self.w_days_after = sw.NumberField(
label=cm.planet.label.days_after,
max_=5,
v_model=self.model.days_after,
disabled=True,
)
self.w_max_images = sw.NumberField(
label=cm.planet.label.max_images,
max_=6,
min_=1,
v_model=self.model.max_images,
disabled=True,
)
self.w_cloud_cover = v.Slider(
label=cm.planet.label.cloud_cover,
thumb_label=True,
v_model=self.model.cloud_cover,
disabled=True,
)
self.components = [
self.w_max_images,
self.w_days_after,
self.w_days_before,
self.w_cloud_cover,
]
self.panels = v.ExpansionPanels(
v_model=None,
class_="mt-2",
children=[CustomPanel(self.model, self.components)],
)
# Capture parameters and bind them to the model
self.model.bind(self.w_api_key, "api_key").bind(
self.w_days_before, "days_before"
).bind(self.w_days_after, "days_after").bind(
self.w_max_images, "max_images"
).bind(
self.w_cloud_cover, "cloud_cover"
)
# Button events
self.w_api_btn.on_event("click", self.validate_api_event)
self.children = [
v.CardTitle(children=[cm.planet.card_title]),
v.Flex(
class_="d-flex align-center mb-2",
row=True,
children=[self.w_api_key, self.w_api_btn],
),
self.w_api_alert,
self.panels,
]
# Interactions with Map
self.map_.reload_btn.on_click(self.add_planet_imagery)
# ui events
self.panels.observe(self._on_panel_change, "v_model")
def _on_panel_change(self, change):
"""Expand or shrunk content"""
if change["new"] == 0:
self.panels.children[0].expand_content()
else:
self.panels.children[0].shrunk_content()
def _toggle_planet_setts(self, on=True):
"""Toggle planet widgets"""
for w in self.components:
setattr(w, "disabled", False) if on else setattr(w, "disabled", True)
def validate_api_event(self, widget, change, data):
"""Event to validate the Planet API Key input and activate/deactivate
view widgets.
"""
api_key = self.w_api_key.v_model
planet_key = PlanetKey(api_key)
self.model.client = planet_key.client()
self.model.valid_api = planet_key.is_active()
if self.model.valid_api:
self.w_api_alert.add_msg(
cm.planet.success_api.msg, cm.planet.success_api.type
)
self._toggle_planet_setts(on=True)
else:
self.w_api_alert.add_msg(cm.planet.fail_api.msg, cm.planet.fail_api.type)
self._toggle_planet_setts(on=False)
def _get_items(self):
"""Get planet items based on the current coordinates"""
# Get current map coordinates
lat = self.map_.lat
lon = self.map_.lon
geom = json.loads(dumps(Point(lon, lat).buffer(0.001, cap_style=3)))
acqdate = self.model.aoi_alerts.loc[self.model.current_alert].acq_date
now = datetime.datetime.strptime(acqdate, "%Y-%m-%d")
days_before = self.model.days_before
days_after = self.model.days_after
start_date = now - datetime.timedelta(days=days_before)
future = now + datetime.timedelta(days=days_after + 1)
req = build_request(
geom, start_date, future, cloud_cover=self.model.cloud_cover / 100
)
return get_items("Alert", req, self.model.client)
def _prioritize_items(self, items):
"""Prioritize planet items"""
items = [
(
item["properties"]["item_type"],
item["id"],
pd.to_datetime(item["properties"]["acquired"]).strftime(
"%Y-%m-%d-%H:%M"
),
)
for item in items[1]
]
items_df = pd.DataFrame(data=items, columns=["item_type", "id", "date"])
items_df.sort_values(by=["item_type"])
items_df.drop_duplicates(subset=["date", "id"])
# If more than one day is selected, get one image per day.
if self.model.days_before:
items_df.date = pd.to_datetime(items_df.date)
items_df = (
items_df.groupby([items_df.date.dt.year, items_df.date.dt.day])
.nth(1)
.reset_index(drop=True)
)
if self.model.max_images:
items_df = items_df.head(self.model.max_images)
if len(items_df) == 1:
self.map_.w_state_bar.add_msg(
cm.map.status.one_image.format(len(items_df)), loading=False
)
elif len(items_df):
self.map_.w_state_bar.add_msg(
cm.map.status.number_images.format(len(items_df)), loading=False
)
else:
self.map_.w_state_bar.add_msg(cm.map.status.no_planet, loading=False)
return items_df
def add_planet_imagery(self, event=None):
"""Search planet imagery and add them to self
Args:
event (optional): If the button is clicked, we need to pass this
parameter, otherwise, we could trigger this function from
outside.
"""
# Validate whether Planet API Key is valid,
# and if there is already selected coordinates.
if self.model.aoi_alerts is None:
self.map_.w_state_bar.add_msg(cm.map.status.no_alerts, loading=False)
return
if self.validate_state_bar():
self.map_.w_state_bar.add_msg(cm.map.status.searching_planet, loading=True)
items = self._get_items()
items_df = self._prioritize_items(items)
# remove all previous loaded assets
self.map_.remove_layers_if("attribution", "Imagery © Planet Labs Inc.")
for i, row in items_df.iterrows():
layer = TileLayer(
url=param.PLANET_TILES_URL.format(
row.item_type, row.id, self.model.api_key
),
name=f"{row.item_type}, {row.date}",
attribution="Imagery © Planet Labs Inc.",
)
layer.__setattr__("_metadata", {"type": row.item_type, "id": row.id})
if row.id not in [
layer._metadata["id"]
for layer in self.map_.layers
if hasattr(layer, "_metadata")
]:
self.map_ + layer
def validate_state_bar(self):
if not self.model.valid_api:
self.map_.w_state_bar.add_msg(cm.planet.no_key, loading=False)
elif not all((self.model.valid_api, self.map_.lat, self.map_.lon)):
self.map_.w_state_bar.add_msg(cm.planet.no_latlon, loading=False)
else:
return True
|
[
"ipyvuetify.CardTitle",
"sepal_ui.sepalwidgets.Btn",
"ipyvuetify.Flex",
"component.scripts.scripts.build_request",
"ipyvuetify.ExpansionPanelContent",
"pandas.DataFrame",
"shapely.geometry.Point",
"ipyvuetify.Slider",
"datetime.timedelta",
"ipyvuetify.Icon",
"component.scripts.scripts.get_items",
"sepal_ui.sepalwidgets.PasswordField",
"datetime.datetime.strptime",
"pandas.to_datetime",
"component.parameter.PLANET_TILES_URL.format",
"sepal_ui.sepalwidgets.Alert",
"component.scripts.scripts.PlanetKey",
"ipyvuetify.ExpansionPanelHeader",
"sepal_ui.sepalwidgets.NumberField"
] |
[((1103, 1127), 'ipyvuetify.ExpansionPanelHeader', 'v.ExpansionPanelHeader', ([], {}), '()\n', (1125, 1127), True, 'import ipyvuetify as v\n'), ((1182, 1236), 'ipyvuetify.ExpansionPanelContent', 'v.ExpansionPanelContent', ([], {'children': '[w for w in widgets]'}), '(children=[w for w in widgets])\n', (1205, 1236), True, 'import ipyvuetify as v\n'), ((2718, 2790), 'sepal_ui.sepalwidgets.PasswordField', 'sw.PasswordField', ([], {'label': 'cm.planet.insert_api', 'v_model': 'self.model.api_key'}), '(label=cm.planet.insert_api, v_model=self.model.api_key)\n', (2734, 2790), True, 'import sepal_ui.sepalwidgets as sw\n'), ((2839, 2878), 'sepal_ui.sepalwidgets.Btn', 'sw.Btn', (['cm.planet.check_btn'], {'small': '(True)'}), '(cm.planet.check_btn, small=True)\n', (2845, 2878), True, 'import sepal_ui.sepalwidgets as sw\n'), ((2944, 3053), 'sepal_ui.sepalwidgets.NumberField', 'sw.NumberField', ([], {'label': 'cm.planet.label.days_before', 'max_': '(5)', 'v_model': 'self.model.days_before', 'disabled': '(True)'}), '(label=cm.planet.label.days_before, max_=5, v_model=self.\n model.days_before, disabled=True)\n', (2958, 3053), True, 'import sepal_ui.sepalwidgets as sw\n'), ((3137, 3244), 'sepal_ui.sepalwidgets.NumberField', 'sw.NumberField', ([], {'label': 'cm.planet.label.days_after', 'max_': '(5)', 'v_model': 'self.model.days_after', 'disabled': '(True)'}), '(label=cm.planet.label.days_after, max_=5, v_model=self.model\n .days_after, disabled=True)\n', (3151, 3244), True, 'import sepal_ui.sepalwidgets as sw\n'), ((3328, 3443), 'sepal_ui.sepalwidgets.NumberField', 'sw.NumberField', ([], {'label': 'cm.planet.label.max_images', 'max_': '(6)', 'min_': '(1)', 'v_model': 'self.model.max_images', 'disabled': '(True)'}), '(label=cm.planet.label.max_images, max_=6, min_=1, v_model=\n self.model.max_images, disabled=True)\n', (3342, 3443), True, 'import sepal_ui.sepalwidgets as sw\n'), ((3540, 3653), 'ipyvuetify.Slider', 'v.Slider', ([], {'label': 'cm.planet.label.cloud_cover', 'thumb_label': '(True)', 'v_model': 'self.model.cloud_cover', 'disabled': '(True)'}), '(label=cm.planet.label.cloud_cover, thumb_label=True, v_model=self.\n model.cloud_cover, disabled=True)\n', (3548, 3653), True, 'import ipyvuetify as v\n'), ((5627, 5645), 'component.scripts.scripts.PlanetKey', 'PlanetKey', (['api_key'], {}), '(api_key)\n', (5636, 5645), False, 'from component.scripts.scripts import PlanetKey, build_request, get_items\n'), ((6459, 6506), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['acqdate', '"""%Y-%m-%d"""'], {}), "(acqdate, '%Y-%m-%d')\n", (6485, 6506), False, 'import datetime\n'), ((6739, 6824), 'component.scripts.scripts.build_request', 'build_request', (['geom', 'start_date', 'future'], {'cloud_cover': '(self.model.cloud_cover / 100)'}), '(geom, start_date, future, cloud_cover=self.model.cloud_cover /\n 100)\n', (6752, 6824), False, 'from component.scripts.scripts import PlanetKey, build_request, get_items\n'), ((6859, 6901), 'component.scripts.scripts.get_items', 'get_items', (['"""Alert"""', 'req', 'self.model.client'], {}), "('Alert', req, self.model.client)\n", (6868, 6901), False, 'from component.scripts.scripts import PlanetKey, build_request, get_items\n'), ((7297, 7358), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'items', 'columns': "['item_type', 'id', 'date']"}), "(data=items, columns=['item_type', 'id', 'date'])\n", (7309, 7358), True, 'import pandas as pd\n'), ((4503, 4547), 'ipyvuetify.CardTitle', 'v.CardTitle', ([], {'children': '[cm.planet.card_title]'}), '(children=[cm.planet.card_title])\n', (4514, 4547), True, 'import ipyvuetify as v\n'), ((4561, 4660), 'ipyvuetify.Flex', 'v.Flex', ([], {'class_': '"""d-flex align-center mb-2"""', 'row': '(True)', 'children': '[self.w_api_key, self.w_api_btn]'}), "(class_='d-flex align-center mb-2', row=True, children=[self.\n w_api_key, self.w_api_btn])\n", (4567, 4660), True, 'import ipyvuetify as v\n'), ((6624, 6660), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'days_before'}), '(days=days_before)\n', (6642, 6660), False, 'import datetime\n'), ((6684, 6723), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(days_after + 1)'}), '(days=days_after + 1)\n', (6702, 6723), False, 'import datetime\n'), ((7594, 7623), 'pandas.to_datetime', 'pd.to_datetime', (['items_df.date'], {}), '(items_df.date)\n', (7608, 7623), True, 'import pandas as pd\n'), ((2606, 2662), 'sepal_ui.sepalwidgets.Alert', 'sw.Alert', ([], {'children': '[cm.planet.default_api]', 'type_': '"""info"""'}), "(children=[cm.planet.default_api], type_='info')\n", (2614, 2662), True, 'import sepal_ui.sepalwidgets as sw\n'), ((6319, 6334), 'shapely.geometry.Point', 'Point', (['lon', 'lat'], {}), '(lon, lat)\n', (6324, 6334), False, 'from shapely.geometry import Point\n'), ((7107, 7153), 'pandas.to_datetime', 'pd.to_datetime', (["item['properties']['acquired']"], {}), "(item['properties']['acquired'])\n", (7121, 7153), True, 'import pandas as pd\n'), ((9374, 9446), 'component.parameter.PLANET_TILES_URL.format', 'param.PLANET_TILES_URL.format', (['row.item_type', 'row.id', 'self.model.api_key'], {}), '(row.item_type, row.id, self.model.api_key)\n', (9403, 9446), True, 'import component.parameter as param\n'), ((1744, 1806), 'ipyvuetify.Icon', 'v.Icon', ([], {'class_': '"""mr-1"""', 'x_small': '(True)', 'children': '[CHIPS[prop][1]]'}), "(class_='mr-1', x_small=True, children=[CHIPS[prop][1]])\n", (1750, 1806), True, 'import ipyvuetify as v\n')]
|