input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<filename>dragg/reformat.py
import os
import sys
import json
import toml
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import itertools as it
import random
import plotly.graph_objects as go
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.io as pio
import plotly
from prettytable import PrettyTable
from dragg.logger import Logger
class Reformat:
def __init__(self):
self.log = Logger("reformat")
self.data_dir = os.path.expanduser(os.environ.get('DATA_DIR','data'))
self.outputs_dir = os.path.expanduser(os.environ.get('OUTPUT_DIR','outputs'))
if not os.path.isdir(self.outputs_dir):
self.log.logger.error("No outputs directory found.")
quit()
self.config_file = os.path.join(self.data_dir, os.environ.get('CONFIG_FILE', 'config.toml'))
self.config = self._import_config()
self.add_date_ranges()
self.add_mpc_params()
self.date_folders = self.set_date_folders()
self.mpc_folders = self.set_mpc_folders()
self.files = self.set_files()
self.fig_list = None
self.save_path = os.path.join('outputs', 'images', datetime.now().strftime("%m%dT%H%M%S"))
def main(self):
# put a list of plotting functions here
self.sample_home = "Crystal-RXXFA"
self.plots = [self.rl2baseline,
self.plot_single_home]
self.images = self.plot_all()
def plot_all(self, save_images=False):
figs = []
for plot in self.plots:
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.update_layout(
font=dict(
size=65,
)
)
fig.update_xaxes(
title_standoff=80
)
fig.update_yaxes(
title_standoff=60
)
fig = plot(fig)
fig.show()
figs += [fig]
return figs
def save_images(self):
if not os.path.isdir(self.save_path):
os.makedirs(self.save_path)
for img in self.images:
self.log.logger.info(f"Saving images of outputs to timestamped folder at {self.save_path}.")
try:
path = os.path.join(self.save_path, f"{img.layout.title.text}.png")
pio.write_image(img, path, width=1024, height=768)
except:
self.log.logger.error("Could not save plotly image(s) to outputs directory.")
def add_date_ranges(self):
start_dates = set([datetime.strptime(self.config['simulation']['start_datetime'], '%Y-%m-%d %H')])
end_dates = set([datetime.strptime(self.config['simulation']['end_datetime'], '%Y-%m-%d %H')])
temp = {"start_datetime": start_dates, "end_datetime": end_dates}
self.date_ranges = temp
def add_mpc_params(self):
n_houses = self.config['community']['total_number_homes']
mpc_horizon = self.config['home']['hems']['prediction_horizon']
dt = self.config['home']['hems']['sub_subhourly_steps']
solver = self.config['home']['hems']['solver']
check_type = self.config['simulation']['check_type']
agg_interval = self.config['agg']['subhourly_steps']
temp = {"n_houses": set([n_houses]), "mpc_prediction_horizons": set([mpc_horizon]), "mpc_hourly_steps": set([dt]), "check_type": set([check_type]), "agg_interval": set([agg_interval]), "solver": set([solver])}
# for key in temp:
# if key in additional_params:
# temp[key] |= set(additional_params[key])
self.mpc_params = temp
self.versions = set([self.config['simulation']['named_version']])
def set_date_folders(self):
temp = []
# self.date_ranges['mpc_steps'] = set([self.config['home']['hems']['sub_subhourly_steps']])
# self.date_ranges['rl_steps'] = set([self.config['agg']['subhourly_steps']])
keys, values = zip(*self.date_ranges.items())
permutations = [dict(zip(keys, v)) for v in it.product(*values)]
permutations = sorted(permutations, key=lambda i: i['end_datetime'], reverse=True)
for i in permutations:
date_folder = os.path.join(self.outputs_dir, f"{i['start_datetime'].strftime('%Y-%m-%dT%H')}_{i['end_datetime'].strftime('%Y-%m-%dT%H')}")
self.log.logger.info(f"Looking for files in: {date_folder}.")
if os.path.isdir(date_folder):
hours = i['end_datetime'] - i['start_datetime']
hours = int(hours.total_seconds() / 3600)
new_folder = {"folder": date_folder, "hours": hours, "start_dt": i['start_datetime']}
temp.append(new_folder)
if len(temp) == 0:
self.log.logger.error("No files found for the date ranges specified.")
exit()
return temp
def set_mpc_folders(self):
temp = []
keys, values = zip(*self.mpc_params.items())
permutations = [dict(zip(keys, v)) for v in it.product(*values)]
for j in self.date_folders:
for i in permutations:
mpc_folder = os.path.join(j["folder"], f"{i['check_type']}-homes_{i['n_houses']}-horizon_{i['mpc_prediction_horizons']}-interval_{60 // i['agg_interval']}-{60 // i['mpc_hourly_steps'] // i['agg_interval']}-solver_{i['solver']}")
if os.path.isdir(mpc_folder):
timesteps = j['hours'] * i['agg_interval']
minutes = 60 // i['agg_interval']
x_lims = [j['start_dt'] + timedelta(minutes=minutes*x) for x in range(timesteps)]
set = {'path': mpc_folder, 'agg_dt': i['agg_interval'], 'ts': timesteps, 'x_lims': x_lims,}
if not mpc_folder in temp:
temp.append(set)
for x in temp:
print(x['path'])
return temp
def set_files(self):
temp = []
keys, values = zip(*self.mpc_params.items())
permutations = [dict(zip(keys, v)) for v in it.product(*values)]
color_families = [['rgb(204,236,230)','rgb(153,216,201)','rgb(102,194,164)','rgb(65,174,118)','rgb(35,139,69)','rgb(0,88,36)'],
['rgb(191,211,230)','rgb(158,188,218)','rgb(140,150,198)','rgb(140,107,177)','rgb(136,65,157)','rgb(110,1,107)'],
['rgb(217,217,217)','rgb(189,189,189)','rgb(150,150,150)','rgb(115,115,115)','rgb(82,82,82)','rgb(37,37,37)'],
['rgb(253,208,162)','rgb(253,174,107)','rgb(253,141,60)','rgb(241,105,19)','rgb(217,72,1)','rgb(140,45,4)'],]
c = 0
d = 0
dash = ["solid", "dash", "dot", "dashdot"]
for j in self.mpc_folders:
path = j['path']
for i in permutations:
for k in self.versions:
dir = os.path.join(path, f"version-{k}")
for case_dir in os.listdir(dir):
file = os.path.join(dir, case_dir, "results.json")
if os.path.isfile(file):
name = f"{case_dir}, v = {k}"
set = {"results": file, "name": name, "parent": j, "color": color_families[c][d], "dash":dash[c]}
temp.append(set)
self.log.logger.info(f"Adding baseline file at {file}")
d = (d + 1) % len(color_families[c])
c = (c + 1) % len(color_families)
return temp
def get_type_list(self, type):
type_list = set([])
i = 0
for file in self.files:
with open(file["results"]) as f:
data = json.load(f)
temp = set([])
for name, house in data.items():
try:
if house["type"] == type:
temp.add(name)
except:
pass
if i < 1:
type_list = temp
else:
type_list = type_list.intersection(temp)
self.log.logger.info(f"{len(type_list)} homes found of type {type}: {type_list}")
return type_list
def _import_config(self):
if not os.path.exists(self.config_file):
self.log.logger.error(f"Configuration file does not exist: {self.config_file}")
sys.exit(1)
with open(self.config_file, 'r') as f:
data = toml.load(f)
return data
def plot_environmental_values(self, name, fig, summary, file, fname):
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=summary["OAT"][0:file["parent"]["ts"]], name=f"OAT (C)", visible='legendonly'))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=summary["GHI"][0:file["parent"]["ts"]], name=f"GHI", line={'color':'goldenrod', 'width':8}, visible='legendonly'))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=summary["TOU"][0:file["parent"]["ts"]], name=f"TOU Price ($/kWh)", line_shape='hv', visible='legendonly'), secondary_y=True)
fig = self.plot_thermal_bounds(fig, file['parent']['x_lims'], name, fname)
return fig
def plot_thermal_bounds(self, fig, x_lims, name, fname):
ah_file = os.path.join(self.outputs_dir, f"all_homes-{self.config['community']['total_number_homes']}-config.json")
with open(ah_file) as f:
data = json.load(f)
for dict in data:
if dict['name'] == name:
data = dict
fig.add_trace(go.Scatter(x=x_lims, y=data['hvac']['temp_in_min'] * np.ones(len(x_lims)), name=f"Tin_min", fill=None, showlegend=False, mode='lines', line_color='lightsteelblue'))
fig.add_trace(go.Scatter(x=x_lims, y=data['hvac']['temp_in_max'] * np.ones(len(x_lims)), name=f"Tin_bounds", fill='tonexty' , mode='lines', line_color='lightsteelblue'))
fig.add_trace(go.Scatter(x=x_lims, y=data['wh']['temp_wh_min'] * np.ones(len(x_lims)), name=f"Twh_min", fill=None, showlegend=False, mode='lines', line_color='pink'))
fig.add_trace(go.Scatter(x=x_lims, y=data['wh']['temp_wh_max'] * np.ones(len(x_lims)), name=f"Twh_bounds", fill='tonexty' , mode='lines', line_color='pink'))
return fig
def plot_base_home(self, name, fig, data, summary, fname, file, plot_price=True):
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["temp_in_opt"], name=f"Tin - {fname}", legendgroup='tin', line={'color':'blue', 'width':8, 'dash':file['dash']}))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["temp_wh_opt"], showlegend=True, legendgroup='twh', name=f"Twh - {fname}", line={'color':'firebrick', 'width':8, 'dash':file['dash']}))
fig.update_layout(legend=dict(
yanchor="top",
y=0.99,
xanchor="left",
x=0.03,
font=dict(
size=65),
),
yaxis_title="Temperature (deg C)"
)
return fig
def plot_pv(self, name, fig, data, fname, file):
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["p_pv_opt"], name=f"Ppv (kW)", line_color='orange', line_shape='hv', visible='legendonly'))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["u_pv_curt_opt"], name=f"U_pv_curt (kW) - {fname}", line_shape='hv', visible='legendonly'))
return fig
def plot_battery(self, name, fig, data, fname, file):
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["e_batt_opt"], name=f"SOC (kW) - {fname}", line_shape='hv', visible='legendonly'))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["p_batt_ch"], name=f"Pch (kW) - {fname}", line_shape='hv', visible='legendonly'))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["p_batt_disch"], name=f"Pdis (kW) - {fname}", line_shape='hv', visible='legendonly'))
return fig
def plot_single_home(self, fig):
if self.sample_home is None:
if type is None:
type = "base"
self.log.logger.warning("Specify a home type or name. Proceeding with home of type: \"base\".")
type_list = self._type_list(type)
self.sample_home = random.sample(type_list,1)[0]
self.log.logger.info(f"Proceeding with home: {name}")
flag = False
for file in self.files:
with open(file["results"]) as f:
comm_data = json.load(f)
try:
data = comm_data[self.sample_home]
except:
self.log.logger.error(f"No home with name: {self.sample_home}")
return
type = data["type"]
summary = comm_data["Summary"]
if not flag:
fig = self.plot_environmental_values(self.sample_home, fig, summary, file, file["name"])
flag = True
fig.update_xaxes(title_text="Time of Day (hour)")
fig.update_layout(title_text=f"{self.sample_home} - {type} type")
fig = self.plot_base_home(self.sample_home, fig, data, summary, file["name"], file)
if 'pv' in type:
fig = self.plot_pv(self.sample_home, fig, data, file["name"], file)
if 'batt' in type:
fig = self.plot_battery(self.sample_home, fig, data, file["name"], file)
return fig
def plot_all_homes(self, fig=None):
homes = ["Crystal-RXXFA","Myles-XQ5IA","Lillie-NMHUH","Robert-2D73X","Serena-98EPE","Gary-U95TS","Bruno-PVRNB","Dorothy-9XMNY","Jason-INS3S","Alvin-4BAYB",]
for self.sample_home in homes:
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.update_layout(
font=dict(
size = 12
)
)
fig = self.plot_single_home(fig)
return
def plot_baseline(self, fig):
for file in self.files:
with open(file["results"]) as f:
data = json.load(f)
ts = len(data['Summary']['p_grid_aggregate'])-1
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["Summary"]["p_grid_aggregate"], name=f"Agg Load - {file['name']}", line_shape='hv', line={'color':file['color'], 'width':4, 'dash':'solid'}))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=np.cumsum(np.divide(data["Summary"]["p_grid_aggregate"], file['parent']['agg_dt'])), name=f"Cumulative Agg Load - {file['name']}", line_shape='hv', visible='legendonly', line={'color':file['color'], 'width':4, 'dash':'dash'}))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=np.divide(np.cumsum(data["Summary"]["p_grid_aggregate"]), np.arange(ts + 1) + 1), name=f"Avg Cumulative Agg Load - {file['name']}", line_shape='hv', visible='legendonly', line={'color':file['color'], 'width':4, 'dash':'dashdot'}))
return fig
def plot_typ_day(self, fig):
rl_counter = 0
tou_counter = 0
dn_counter = 0
for file in self.files:
flag = True
with open(file["results"]) as f:
data = json.load(f)
name = file["name"]
ts = len(data['Summary']['p_grid_aggregate'])-1
rl_setpoint = data['Summary']['p_grid_setpoint']
if 'clipped' in file['name']:
rl_setpoint = np.clip(rl_setpoint, 45, 60)
loads = np.array(data["Summary"]["p_grid_aggregate"])
loads = loads[:len(loads) // (24*file['parent']['agg_dt']) * 24 * file['parent']['agg_dt']]
if len(loads) > 24:
daily_max_loads = np.repeat(np.amax(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt'])
daily_min_loads = np.repeat(np.amin(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt'])
daily_range_loads = np.subtract(daily_max_loads, daily_min_loads)
daily_range_loads = [abs(loads[max(i-6, 0)] - loads[min(i+6, len(loads)-1)]) for i in range(len(loads))]
daily_avg_loads = np.repeat(np.mean(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt'])
daily_std_loads = np.repeat(np.std(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt'])
daily_std_loads = [np.std(loads[max(i-6, 0):i+6]) for i in range(len(loads))]
composite_day = np.average(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=0)
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=composite_day, name=f"{name}", opacity=0.5, showlegend=flag, line={'color':clr, 'width':8, 'dash':dash}))
fig.update_layout(legend=dict(
yanchor="top",
y=0.45,
xanchor="left",
x=0.7
))
fig.update_layout(
font=dict(
# family="Courier New, monospace",
size=65,
),
title="Avg Daily Load Profile",
xaxis_title="Time of Day",
yaxis_title="Agg. Demand (kW)"
)
fig.update_xaxes(
title_standoff=80
)
fig.update_yaxes(
title_standoff=60
)
return fig
def plot_max_and_12hravg(self, fig):
for file in self.files:
# all_avgs.add_column()
clr = file['color']
with open(file["results"]) as f:
data = json.load(f)
name = file["name"]
ts = len(data['Summary']['p_grid_aggregate'])-1
rl_setpoint = data['Summary']['p_grid_setpoint']
if 'clipped' in file['name']:
rl_setpoint = np.clip(rl_setpoint, 45, 60)
loads = np.array(data["Summary"]["p_grid_aggregate"])
loads = loads[:len(loads) // (24*file['parent']['agg_dt']) * 24 * file['parent']['agg_dt']]
if len(loads) > 24:
daily_max_loads = np.repeat(np.amax(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt'])
daily_min_loads = np.repeat(np.amin(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt'])
daily_range_loads = np.subtract(daily_max_loads, daily_min_loads)
daily_range_loads = [abs(loads[max(i-6, 0)] - loads[min(i+6, len(loads)-1)]) for i in range(len(loads))]
daily_avg_loads = np.repeat(np.mean(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt'])
daily_std_loads = np.repeat(np.std(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt'])
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=daily_max_loads, name=f"{name} - Daily Max", line_shape='hv', opacity=1, legendgroup="first", line={'color':'firebrick', 'dash':dash, 'width':8}))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=rl_setpoint, name=f"{name} - 12 Hr Avg", opacity=0.5, legendgroup="second", line={'color':'blue', 'dash':dash, 'width':8}))
fig.update_layout(legend=dict(
yanchor="top",
y=0.8,
xanchor="left",
x=0.7
))
fig.update_layout(
font=dict(
size=65,
),
title="12 Hour Avg and | |
"Laotian",
"Laos People's Democratic Republic"
],
"latvia": [
"Latvia",
"Latvian",
"Latvians"
],
"lebanon": [
"Lebanon",
"Lebanese"
],
"lesotho": [
"Lesotho",
"Basotho",
"Basothos"
],
"liberia": [
"Liberia",
"Liberian",
"Liberians"
],
"libya": [
"Libya",
"Libyan",
"Libyans"
],
"liechtenstein": [
"Liechtenstein",
"Liechtensteiner",
"Liechtensteiners"
],
"lithuania": [
"Lithuania",
"Lithuanian",
"Lithuanians"
],
"luxembourg": [
"Luxembourg",
"Luxembourger",
"Luxembourgers"
],
"madagascar": [
"Madagascar",
"Malagasy",
"Malagsys"
],
"malawi": [
"Malawi",
"Malawian",
"Malawians"
],
"malaysia": [
"Malaysia",
"Malaysian",
"Malaysians"
],
"maldives": [
"Maldives",
"Maldivan",
"Maldivans"
],
"mali": [
"Mali",
"Malian",
"Malians"
],
"malta": [
"Malta",
"Maltan",
"Maltans"
],
"marshallislands": [
"Marshall Islands",
"Marshallese"
],
"mauritania": [
"Mauritania",
"Mauritanian",
"Mauritanians"
],
"mauritius": [
"Mauritius",
"Mauritian",
"Mauritians",
"Island of Mauritious"
],
"mexico": [
"Mexico",
"Mexican",
"Mexicans"
],
"micronesia": [
"Federated States of Micronesia",
"Micronesia",
"Micronesian",
"Micronesians"
],
"moldovan1republicpridnestrovian": [
"Republic of Moldova",
"Moldova",
"Moldovan",
"Moldovans"
],
"moldova2": [
"Pridnestrovian Moldovan Republic",
"Transnistria",
"Transnistrian",
"Transnistrians"
],
"monaco": [
"Monaco",
"Monacan",
"Monacans"
],
"mongolia": [
"Mongolia",
"Mongolian",
"Mongolians"
],
"montenegro": [
"Montenegro",
"Montenegrin",
"Montenegrins"
],
"morocco": [
"Morocco",
"Moroccan",
"Moroccans"
],
"mozambique": [
"Mozambique",
"Mozambican"
],
"myanmar": [
"Myanmar",
"Myanmarn",
"Myanmarns",
"Burma",
"Burman",
"Burmans",
"Burmese"
],
"namibia": [
"Namibia",
"Namibian",
"Namibians"
],
"nauru": [
"Nauru",
"Naurun",
"Nauruns",
"Island of Nauru"
],
"nepal": [
"Nepal",
"Nepalese"
],
"netherlands": [
"Netherlands",
"Dutch",
"Holland"
],
"newzealand": [
"New Zealand",
"New Zealander",
"New Zealanders",
"Kiwi",
"Kiwis",
"Aotearoa",
"Aotearoan",
"Aotearoans"
],
"nicaragua": [
"Nicaragua",
"Nicaraguan",
"Nicaraguans"
],
"niger": [
"Niger",
"Nigerien"
],
"nigeria": [
"Nigeria",
"Nigerian",
"Nigerians"
],
"northcyprus": [
"North Cyprus",
"Northern Cypriot",
"Kuzey Kibris"
],
"northmacedonia": [
"North Macedonia",
"Northern Macedonian",
"North Macedonians",
"Rep. of Macedonia",
"Macedonia",
"Macedonian",
"Republic of North Macedonia",
"The former Yugoslav Republic of Macedonia"
],
"norway": [
"Norway",
"Norwegian",
"Norwegians"
],
"oman": [
"Oman",
"Omani",
"Omanis"
],
"pakistan": [
"Pakistan",
"Pakistani",
"Pakistanis"
],
"palau": [
"Palau",
"Palauan",
"Palauns"
],
"palestine": [
"State of Palestine",
"Palestine",
"Palestinian",
"Palestinians",
"Palestine, State of"
],
"panama": [
"Panama",
"Panamanian",
"Panamanians"
],
"papuanewguinea": [
"Papua New Guinea",
"Papua New Guinean",
"Papua New Guineans"
],
"paraguay": [
"Paraguay",
"Paraguayan",
"Paraguayans"
],
"peru": [
"Peru",
"Peruvian",
"Peruvians"
],
"philippines": [
"Philippines",
"Filipino",
"Filipinos"
],
"poland": [
"Poland",
"Polish",
"Poles",
"Pole"
],
"portugal": [
"Portugal",
"Portuguese",
"Portugalns"
],
"puntland": [
"Puntland",
"Puntlandn",
"Puntlandns"
],
"qatar": [
"Qatar",
"Qatari",
"Qataris"
],
"romania": [
"Romania",
"Romanian",
"Romanians"
],
"russia": [
"Russian Federation",
"Russian Federations",
"Russia",
"Russian",
"Russians"
],
"rwanda": [
"Rwanda",
"Rwandan",
"Rwandans"
],
"sahrawiarabdemocraticrepublic": [
"Sahrawi Arab Democratic Republic",
"Sahrawi Arab Democratic Republicn",
"Sahrawi Arab Democratic Republicns",
"Sahrawi Republic",
"Sahrawi Republican",
"Sahrawi Republicans",
"Western Sahara",
"Western Saharan",
"Western Saharans"
],
"saintkittsandnevis": [
"Saint Kitts and Nevis",
"Saint Kitts and Nevian",
"Saint Kitts and Nevians",
"St. Kitts",
"Nevis"
],
"saintlucia": [
"Saint Lucia",
"Saint Lucian",
"Saint Lucians",
"St. Lucia"
],
"saintvincentandthegrenadines": [
"Saint Vincent and the Grenadines",
"Vincentian",
"Saint Vincent and the Grenadinesns",
"St. Vincent",
"Grenadines"
],
"samoa": [
"Samoa",
"Samoan",
"Samoans"
],
"sanmarino": [
"San Marino",
"San Marinon",
"San Marinons"
],
"saotomeandprincipe": [
"S\u00e3o Tom\u00e9 and Pr\u00edncipe",
"S\u00e3o Tom\u00e9 and Pr\u00edncipen",
"S\u00e3o Tom\u00e9 and Pr\u00edncipens",
"Sao Tome and Principe",
"Sao Tome and Principen",
"Sao Tome and Principens",
"Sao Tome"
],
"saudiarabia": [
"Saudi Arabia",
"Saudi Arabian",
"Saudi Arabians"
],
"senegal": [
"Senegal",
"Senegalese"
],
"serbia": [
"Serbia",
"Serbian",
"Serbians",
"Serbs"
],
"seychelles": [
"Seychelles",
"Seychellois"
],
"sierraleone": [
"Sierra Leone",
"Sierra Leonen",
"Sierra Leonens"
],
"singapore": [
"Singapore",
"Singaporean",
"Singaporeans"
],
"slovakia": [
"Slovakia",
"Slovakian",
"Slovakians",
"Slovaks"
],
"slovenia": [
"Slovenia",
"Slovenian",
"Slovenians"
],
"solomonislands": [
"Solomon Islands",
"Solomon Islander",
"Solomon Islanders"
],
"somalia": [
"Somalia",
"Somalian",
"Somalians"
],
"somaliland": [
"Somaliland",
"Somalilandn",
"Somalilandns"
],
"southafrica": [
"South Africa",
"South African",
"South Africans",
"Republic of South Africa",
"Republic of South African",
"Republic of South Africans",
"RSA"
],
"southkorea": [
"South Korea",
"South Korean",
"South Koreans"
],
"southossetia": [
"South Ossetia",
"South Ossetian",
"South Ossetians",
"the State of Alania",
"Alania",
"the State of Alanian",
"the State of Alanians"
],
"southsudan": [
"South Sudan",
"South Sudanese"
],
"spain": [
"Spain",
"Spanish",
"Spaniard",
"Spaniards",
"Castillians"
],
"srilanka": [
"Sri Lanka",
"Sri Lankan",
"Sri Lankans"
],
"sudan": [
"Sudan",
"Sudanese",
"Sudanns"
],
"suriname": [
"Suriname",
"Surinamer",
"Surinamers"
],
"sweden": [
"Sweden",
"Swedish",
"Swedes"
],
"switzerland": [
"Switzerland",
"Swiss",
"Suisses",
"Suisse"
],
"syria": [
"Syrian Arab Republic",
"Syrian Arab Republican",
"Syrian Arab Republicans",
"Syria",
"Syrian",
"Syrians"
],
"tajikistan": [
"Tajikistan",
"Tajik",
"Tajiks"
],
"tanzania": [
"United Republic of Tanzania",
"Tanzania",
"Tanzanian",
"Tanzanians",
"Zanzibar"
],
"thailand": [
"Thailand",
"Thai"
],
"tibet": [
"Tibet",
"Tibetan",
"Tibetans"
],
"togo": [
"Togo",
"Togolese"
],
"tonga": [
"Tonga",
"Tongan",
"Tongans"
],
"trinidadandtobago": [
"Trinidad and Tobago",
"Trinidadian",
"Trinidadians",
"Trinidad",
"Tobago"
],
"tunisia": [
"Tunisia",
"Tunisian",
"Tunisians"
],
"turkey": [
"Turkey",
"Turkish",
"Turk",
"Turks"
],
"turkmenistan": [
"Turkmenistan",
"Turkmen",
"Turkmens"
],
"tuvalu": [
"Tuvalu",
"Tuvaluan",
"Tuvaluns"
],
"uganda": [
"Uganda",
"Ugandan",
"Ugandans"
],
"ukraine": [
"Ukraine",
"Ukrainian",
"Ukrainians"
],
"unitedarabemirates": [
"United Arab Emirates",
"Emirian",
"Emirians",
"UAE"
],
"unitedkingdom": [
"United Kingdom",
"British",
"Brits",
"Great Britain",
"Northern Ireland"
],
"unitedstates": [
"United States",
"American"
],
"uruguay": [
"Uruguay",
"Uruguayan",
"Uruguayans"
],
"uzbekistan": [
"Uzbekistan",
"Uzbekistani",
"uzbeks",
"Uzbeks"
],
"vanuatu": [
"Vanuatu",
"Ni-Vanuatu",
"Ni-Vanuatus",
"Vanuatus"
],
"vaticancity": [
"Holy See",
"Vanticanien",
"Vanticaniens",
"Vatican",
"Vatican City"
],
"venezuela": [
"Venezuela",
"Venezuelan",
"Venezuelans",
"Bolivarian Republic of Venezuela",
"Bolivarian Republic of Venezuelan",
"Bolivarian Republic of Venezuelans"
],
"vietnam": [
"Viet Nam",
"Viet Nams",
"Viet Namn",
"Vietnam",
"Vietnamese",
"Kinh"
],
"yemen": [
"Yemen",
"Yemeni"
],
"zambia": [
"Zambia",
"Zambian",
"Zambians"
],
"zimbabwe": [
"Zimbabwe",
"Zimbabwean",
"Zimbabweans"
],
"africa": [
"Africa",
"AFR"
],
"arab_world": [
"Arab world"
],
"asia_pacific": [
"Asia-Pacific"
],
"caribbean": [
"Caribbean"
],
"central_africa": [
"Central Africa"
],
"central_america": [
"Central America"
],
"central_asia": [
"Central Asia"
],
"east_africa": [
"East Africa"
],
"east_africa_community": [
"East Africa Community",
"EAC"
],
"east_asia": [
"East Asia"
],
"east_asia_and_pacific": [
"East Asia and Pacific",
"EAP"
],
"eastern_africa": [
"Eastern Africa"
],
"eastern_asia": [
"Eastern Asia"
],
"europe_and_central_asia": [
"Europe and Central Asia",
"ECA"
],
"horn_of_africa": [
"Horn of Africa"
],
"latin_america": [
"Latin America"
],
"latin_america_and_caribbean": [
"Latin America and Caribbean",
"LAC"
],
"???": [
"SRI"
],
"middle_africa": [
"Middle Africa"
],
"middle_east": [
"Middle East"
],
"middle_east_and_north_africa": [
"Middle East and North Africa",
"MENA"
],
"north_africa": [
"North Africa"
],
"northern_africa": [
"Northern Africa"
],
"pacific": [
"Pacific"
],
"polynesia": [
"Polynesia"
],
"small_island_development_states": [
"Small Island Development States",
"SIDS"
],
"southern_african_development_community": [
"Southern African Development Community",
"SADC"
],
"south_america": [
"South America"
],
"south_asia": [
"South Asia"
],
"south_asia_region": [
"South Asia Region",
"SAR"
],
"southeast_asia": [
"Southeast Asia"
],
"southeastern_asia": [
"Southeastern Asia"
],
"southern_africa": [
"Southern Africa"
],
"southern_asia": [
"Southern Asia"
],
"sub_saharan_africa": [
"SSA",
"Sub-Saharan Africa"
],
"west_africa": [
"West Africa"
],
"western_africa": [
"Western Africa"
],
"western_asia": [
"Western Asia"
],
"conflict_affected_areas": [
"conflict affected areas",
"conflict-affected areas"
],
"conflict_affected_regions": [
"conflict affected regions",
"conflict-affected regions",
"conflict zones"
],
"developing_context": [
"developing context"
],
"developing_countries": [
"developing countries"
],
"developing_country": [
"developing country"
],
"developing_economy": [
"developing economies",
"developing economy"
],
"developing_market_countries": [
"developing market countries",
"developing market country"
],
"developing_markets": [
"developing markets"
],
"developing_nation": [
"developing nation"
],
"developing_region": [
"developing region"
],
"developing_state": [
"developing state"
],
"developing_world": [
"developing world"
],
"emergent_nation": [
"emergent nation",
"emergent nations"
],
"emerging_economies": [
"emerging economies"
],
"emerging_market_countries": [
"emerging market countries",
"emerging market country"
],
"emerging_nation": [
"emerging nation"
],
"emerging_world": [
"emerging world"
],
"fragile_and_conflict_affected_areas": [
"fragile and conflict affected areas"
],
"fragile_and_conflict_affected_regions": [
"fragile and conflict affected regions"
],
"fragile_areas": [
"fragile areas"
],
"fragile_contexts": [
"fragile contexts"
],
"fragile_regions": [
"fragile regions"
],
"global_south": [
"Global South",
"global south"
],
"growing_economies": [
"growing economies"
],
"less_developed_countries": [
"less developed countries",
"less developed country"
],
"lmic": [
"LMIC",
"LMICs",
"low and middle income countries"
],
"low_income_countries1": [
"low income countries",
"low income country"
],
"low_income_environment1": [
"low income | |
[self.target_variable_name]:
mice_data.set_imputer(
var, model_class=discrete_model.Logit, fit_kwds={"disp": False}
)
return mice_data
def _run_mice_loop(self, split_i: int, fold: str, mice_data: MICEData):
"""'Burn-in' and 'skip' imputations are discarded."""
for _ in range(self.n_mice_burn_in):
mice_data.update_all()
for imputation_i in range(self.n_mice_imputations):
if imputation_i:
mice_data.update_all(self.n_mice_skip + 1)
self._store_imputed(split_i, fold, imputation_i, mice_data.data)
def _store_imputed(
self, split_i: int, fold: str, imputation_i: int, imp_df: pd.DataFrame
):
"""Store just the imputed values from a single MICE iteration."""
self.imputed[fold][split_i][imputation_i] = {}
for var_name, missing_i in self.missing_i[fold][split_i].items():
self.imputed[fold][split_i][imputation_i][var_name] = (
imp_df.iloc[
missing_i,
imp_df.columns.get_loc(var_name)
].copy().values
)
def winsorize_after_get_imputed_variables(
self, fold_name: str, split_i: int, imp_i: int
) -> pd.DataFrame:
"""Extends .get_imputed_variables() in base class with winsorization."""
imp_df, _ = winsorize_novel(
self.get_imputed_variables(fold_name, split_i, imp_i),
thresholds=self.winsor_thresholds[split_i]
)
return imp_df
def _find_missing_indices(
self,
split_i: int,
train: pd.DataFrame,
test: pd.DataFrame,
variable_names: List[str]
):
raise NotImplementedError
class CategoricalImputer(Imputer):
"""Imputes missing values of non-binary categorical variables, using
output of earlier MICE."""
def __init__(
self,
df: pd.DataFrame,
splitter_winsor_mice: SplitterWinsorMICE,
cat_vars: List[str],
random_seed,
):
"""Args:
df: DataFrame containing all continuous variables (except lactate-
and albumin-related variables), all binary variables, the
non-binary discrete variables for imputation at this stage, and
the target (mortality labels). This DataFrame still contains all
its missing values, i.e. no imputation yet
splitter_winsor_mice: Pickled SplitterWinsorMice object containing
the results of MICE for the continuous variables (except lactate
and albumin) and the binary variables
cat_vars: Non-binary categorical variables for imputation
random_seed: For reproducibility
"""
super().__init__(
df,
splitter_winsor_mice.tts,
splitter_winsor_mice.target_variable_name
)
self.swm = splitter_winsor_mice
self.cat_vars = cat_vars
self.random_seed = random_seed
def impute(self):
"""Impute missing values for every non-binary categorical variable,
in every MICE imputation, in every train-test split."""
for i in pb(range(self.tts.n_splits), prefix="Split iteration"):
self._single_train_test_split(i)
def _single_train_test_split(self, split_i: int):
"""Impute missing values for every non-binary categorical variable,
in every MICE imputation, for a single train-test split."""
train, test = self._split_then_join_Xy(split_i)
self._find_missing_indices(split_i, train, test, self.cat_vars)
self._initialise_subdicts_for_imputation_storage(split_i)
for mice_imp_i in range(self.swm.n_mice_imputations):
self._single_mice_imp(
split_i=split_i,
mice_imp_i=mice_imp_i,
train_cat_vars=train[self.cat_vars]
)
def _initialise_subdicts_for_imputation_storage(self, split_i: int):
for fold_name in self.imputed.keys():
self.imputed[fold_name][split_i] = {}
for mice_imp_i in range(self.swm.n_mice_imputations):
self.imputed[fold_name][split_i][mice_imp_i] = {}
def _single_mice_imp(
self,
split_i: int,
mice_imp_i: int,
train_cat_vars: pd.DataFrame,
):
"""Impute missing values for every non-binary categorical variable, for
a single MICE imputation, in a single train-test split."""
cont_bin_target_vars = {
"train": self.swm.get_imputed_variables(
"train", split_i, mice_imp_i
),
"test": self.swm.get_imputed_variables(
"test", split_i, mice_imp_i
),
}
cont_bin_target_vars = self._scale(cont_bin_target_vars)
self._impute_all_cat_vars(
split_i=split_i,
mice_imp_i=mice_imp_i,
imp_folds_features=cont_bin_target_vars,
imp_train_targets=train_cat_vars
)
def _scale(self, dfs: Dict[str, pd.DataFrame]) -> Dict[str, pd.DataFrame]:
scalers = self._fit_scalers(dfs["train"])
for fold_name in dfs.keys():
dfs[fold_name] = self._scale_fold(
fold=dfs[fold_name],
scalers=scalers
)
return dfs
def _fit_scalers(self, train: pd.DataFrame) -> RobustScaler:
"""Fit scalers for continuous features. We need to scale them in order
to use fast solvers for multinomial logistic regression in
sklearn."""
scalers = RobustScaler()
scalers.fit(train.loc[:, self.swm.cont_vars].values)
return scalers
def _scale_fold(
self,
fold: pd.DataFrame,
scalers: RobustScaler
) -> pd.DataFrame:
"""Scale continuous features."""
fold.loc[:, self.swm.cont_vars] = scalers.fit_transform(
fold.loc[:, self.swm.cont_vars].values
)
return fold
def _impute_all_cat_vars(
self,
split_i: int,
mice_imp_i: int,
imp_folds_features: Dict[str, pd.DataFrame],
imp_train_targets: pd.DataFrame,
):
"""Impute values for all non-binary categorical variables, in
a single MICE imputation, in a single train-test split.
Args:
split_i: Index of the train-test split
mice_imp_i: Index of the MICE imputation
imp_folds_features: Keys are 'train' and 'test'. Values are the
(partly MICE-imputed) continuous, binary and target variables
from that fold, which will be used as the imputation model's
features
imp_train_targets: All non-binary categorical variables from the
train fold
"""
imputers = self._fit_imputers(
split_i=split_i,
imp_train_features=imp_folds_features["train"],
imp_train_targets=imp_train_targets
)
for fold_name in imp_folds_features:
for cat_var_name in imp_train_targets.columns:
self._impute_single_cat_var(
split_i=split_i,
mice_imp_i=mice_imp_i,
fold_name=fold_name,
cat_var_name=cat_var_name,
imp_features=imp_folds_features[fold_name],
imputer=imputers[cat_var_name]
)
def _fit_imputers(
self,
split_i: int,
imp_train_features: pd.DataFrame,
imp_train_targets: pd.DataFrame,
) -> Dict[str, Union[LogisticRegression, None]]:
"""Fit imputation models for all non-binary categorical variables, in
a single MICE imputation, in a single train-test split.
Args:
split_i: Index of the train-test split
imp_train_features: The (partly MICE-imputed) continuous, binary and
target variables used as the imputation model's features
imp_train_targets: All non-binary categorical variables
"""
imputers = {}
for cat_var_name in self.cat_vars:
imputers[cat_var_name] = self._fit_single_imputer(
split_i=split_i,
imp_train_features=imp_train_features,
imp_train_target=imp_train_targets[cat_var_name]
)
return imputers
def _fit_single_imputer(
self,
split_i: int,
imp_train_features: pd.DataFrame,
imp_train_target: pd.Series,
) -> Union[LogisticRegression, None]:
"""Fit imputation model of a single non-binary categorical variable, in
a single MICE imputation, in a single train-test split. We only fit
the model if there is at least one missing value of cat_var in the
train or test fold (otherwise there would be nothing for the model
to impute later). Returns None if imputation model not fit.
Args:
split_i: Index of the train-test split
imp_train_features: The (partly MICE-imputed) continuous, binary and
target variables used as the imputation model's features
imp_train_target: The non-binary categorical variable being modelled
"""
if any((
self.missing_i["train"][split_i][imp_train_target.name].shape[0],
self.missing_i["test"][split_i][imp_train_target.name].shape[0],
)):
obs_i = imp_train_features.index.difference(
self.missing_i["train"][split_i][imp_train_target.name]
)
imputer = LogisticRegression(
penalty="none",
solver="sag",
max_iter=3000,
multi_class="multinomial",
n_jobs=-1,
random_state=self.random_seed,
)
imputer.fit(
imp_train_features.iloc[obs_i].values,
imp_train_target.iloc[obs_i].values
)
return imputer
def _impute_single_cat_var(
self,
split_i: int,
mice_imp_i: int,
fold_name: str,
cat_var_name: str,
imp_features: pd.DataFrame,
imputer: Union[LogisticRegression, None]
):
"""pred_probs is of shape (n_missing_values, n_classes), where each row
corresponds to a missing value of cat_var_name, and each columns is
the predicted probability that the missing value is that class.
Rather than imputing each missing value using idxmax(pred_probs), we
impute each missing value probabilistically using pred_probs."""
missing_i = self.missing_i[fold_name][split_i][cat_var_name]
if missing_i.shape[0]:
pred_probs = imputer.predict_proba(
imp_features.loc[missing_i].values
)
rnd = np.random.RandomState(self.random_seed + mice_imp_i)
pred_classes = np.empty_like(missing_i)
for i in range(missing_i.shape[0]):
pred_classes[i] = rnd.choice(
imputer.classes_,
p=pred_probs[i, :]
)
self.imputed[fold_name][split_i][mice_imp_i][cat_var_name] = (
pred_classes
)
def get_imputed_df(
self, fold_name: str, split_i: int, imp_i: int
) -> pd.DataFrame:
"""Constructs DataFrame containing the continuous and binary variables
(except those related to lactate and albumin), the target variable
and the non-binary categorical variables, including their imputed
missing values for a given fold, train-test split and imputation
(MICE and categorical imputation) iteration."""
cont_bin_target_df = self.swm.winsorize_after_get_imputed_variables(
fold_name=fold_name,
split_i=split_i,
imp_i=imp_i
)
cat_df = self.get_imputed_variables(
fold_name=fold_name,
split_i=split_i,
imp_i=imp_i
)
return cont_bin_target_df.join(cat_df)
class LactateAlbuminImputer(Imputer):
"""Impute missing values of lactate or albumin. There is no simple way
to random seed GAM model fitting so imputation models will be different
on each training iteration."""
def __init__(
self,
df: pd.DataFrame,
categorical_imputer: CategoricalImputer,
lacalb_variable_name: str,
imputation_model_factory: Callable[
[pd.Index, Dict[str, Tuple], str, bool], LinearGAM],
winsor_quantiles: Tuple[float, float],
multi_cat_vars: Dict[str, Tuple],
indication_var_name: str,
mortality_as_feature: bool,
random_seed):
"""
Args:
df: Must just contain the variable to impute, plus the mortality
variable (latter needed for compatibility with Splitter).
categorical_imputer: With pre-fit imputers for all categorical
variables
lacalb_variable_name: Name of lactate or albumin variable
imputation_model_factory: Function which returns specified (but not
yet fitted) models of the transformed imputation target
winsor_quantiles: Lower and upper quantiles to winsorize
continuous variables at by default
multi_cat_vars: Keys are non-binary discrete variables, values are
the categories (excluding null values) prior to integer encoding
indication_var_name: Name of the indication column
mortality_as_feature: If True, uses mortality labels as a feature
in this lactate / albumin imputation model (providing that
mortality is a feature in the GAM specification in
imputation_model_factory)
random_seed: Used for QuantileTransformer
"""
super().__init__(
df,
categorical_imputer.tts,
categorical_imputer.target_variable_name
)
self.cat_imputer = categorical_imputer
self.cont_vars = NOVEL_MODEL_VARS["cont"] # TODO: Remove if unused?
self.lacalb_variable_name = lacalb_variable_name
self.model_factory = imputation_model_factory
self.winsor_quantiles = winsor_quantiles
self.multi_cat_vars = multi_cat_vars
self.ind_var_name = indication_var_name
self.mortality_as_feature = mortality_as_feature
self.random_seed = random_seed
self.imputed = None # Override base class. This var shouldn't be used
self._check_df(df)
self.winsor_thresholds: Dict[
int, # train-test split index
Tuple[float, float]
] = {}
self.transformers: Dict[
int, # train-test split index
QuantileTransformer
] = {}
self.imputers: Dict[
int, # train-test split index
LinearGAM
] = {}
def _check_df(self, df: pd.DataFrame):
"""Check that passed DataFrame has correct columns, and no others."""
assert set(df.columns) == {
self.target_variable_name,
self.lacalb_variable_name
}
def fit(self):
"""Fit albumin or lactate imputation models for every train-test
split."""
for i in pb(range(self.tts.n_splits), prefix="Split iteration"):
self._single_train_test_split(i)
def _single_train_test_split(self, split_i: int):
"""Fit albumin or lactate imputation models for a single train-test
split. lacalb_train and lacalb_test are DataFrames with a single
column of lactate / albumin values."""
lacalb_train, _, lacalb_test, _ = self._split(split_i)
self._find_missing_indices(
split_i=split_i,
train=lacalb_train,
test=lacalb_test,
variable_names=[self.lacalb_variable_name]
)
obs_lacalb_train = self._get_observed_values(
fold="train",
split_i=split_i,
X=lacalb_train
)
obs_lacalb_train = self._winsorize(split_i, obs_lacalb_train)
obs_lacalb_train = self._fit_transform(split_i, obs_lacalb_train)
self._fit_combine_gams(split_i, obs_lacalb_train)
def _get_observed_values(
| |
data_dict[old_name]['end_line'] - data_dict[old_name]['start_line']
df = make_df(lines, start, nrows, df_header)
data_dict[old_name]['Data'] = df
varchange = True
elif "Analysis begun on" in lines[i]: # Catch last item to be parsed
main_logger.info("EPASWMM Model: " + lines[i].strip())
data_dict[old_name]['end_line'] = i - 3
df_header = data_dict[old_name]['df_header']
start = data_dict[old_name]['start_line']
nrows = data_dict[old_name]['end_line'] - data_dict[old_name]['start_line']
df = make_df(lines, start, nrows, df_header)
data_dict[old_name]['Data'] = df
elif "Analysis ended on" in lines[i]:
main_logger.info("EPASWMM Model: " + lines[i].strip())
elif "Total elapsed time" in lines[i]:
main_logger.info("EPASWMM Model: " + lines[i].strip())
break
else:
old_name = new_name
except Exception:
main_logger.error(
"While parsing SWMM output RPT file, error encountered on line# {0}: {1}".format(str(i + 1),
lines[i]))
stop_program()
if len(data_dict) == 0:
main_logger.error(
"Error raised due to detected empty Time Series. Check result file from EPA SWMM model output: %s" % (
rpt_input_file))
stop_program()
else:
main_logger.debug("Done parsing *.rpt file.")
return data_dict
except Exception:
main_logger.error("Error encountered while opening: {0}.".format(rpt_input_file))
stop_program()
def read_run_info(run_info_file):
"""
Read FEWS run_info.xml file.
"""
global run_info
run_info = {}
if not os.path.exists(run_info_file):
main_logger.error("Failed to find run_info file: " + str(run_info_file))
print("Failed to parse run_info file: {0}.\nCheck the adapter log: {1}.".format(str(run_info_file),
logger_filename)) # can't write run_diagnsotics, because if run_info is not found, then we don't know where to write
sys.exit(1)
try:
tree = ET.parse(run_info_file)
root = tree.getroot()
except Exception:
main_logger.error("Failed to parse run_info file.")
print("Failed to parse run_info file.; check:" + logger_filename)
sys.exit(1)
run_info["diagnostic_xml"] = file_element(root.find("pi:outputDiagnosticFile", namespace), exists=False)
run_info["workDir"] = dir_element(root.find("pi:workDir", namespace).text, exists=True)
st = time_element(root.find("pi:startDateTime", namespace))
et = time_element(root.find("pi:endDateTime", namespace))
t0 = time_element(root.find("pi:time0", namespace))
lobs = time_element(root.find("pi:lastObservationDateTime", namespace))
tz = root.find("pi:timeZone", namespace).text
run_info["start_time"] = pd.Timestamp(st)
run_info["end_time"] = pd.Timestamp(et)
run_info["time0"] = pd.Timestamp(t0)
run_info["last_obs_time"] = pd.Timestamp(lobs)
run_info["time_zone"] = float(tz)
# The Rating Curve and the Control Rule files are optional inptus
if root.find("pi:inputRatingCurveFile", namespace) is not None:
run_info["dam_rating_curve"] = file_element(root.find("pi:inputRatingCurveFile", namespace).text, exists=True)
else:
main_logger.info("No rating curve file provided in the run_info.xml; rating curves will not be updated.")
print("No rating curve file provided in the run_info.xml; rating curves will not be updated.")
if root.find("pi:inputTimeSeriesFile", namespace) is not None:
if os.path.basename(root.find("pi:inputTimeSeriesFile", namespace).text) == "Control_rules.xml":
run_info["control_rule"] = file_element(root.find("pi:inputTimeSeriesFile", namespace),exists=True)
else:
main_logger.info("Time series file was provided, but is not is not considered a Control Rules file. Control_rules.xml is expected.")
print("Time series file was provided, but is not is not considered a Control Rules file. Control_rules.xml is expected.")
else:
main_logger.info("No control rule file (Control_rules.xml) provided in the run_info.xml; control rules will not be updated.")
print("No control rule file (Control_rules.xml) provided in the run_info.xml; control rules will not be updated.")
run_info["netcdf"] = file_element(root.find("pi:inputNetcdfFile", namespace))
# To keep the number of configuration files to a minimum,
# we put extra properties in the run_info.xml
properties = root.find("pi:properties", namespace)
run_info["properties"] = {}
for e in properties.findall("pi:string", namespace):
key = e.get("key")
val = e.get("value")
path = file_element(val, exists=True) # currently, the two files are in the properties section are: 1) SWMM exe, 2) SWMM inp file; both should exist
run_info["properties"][key] = path
# Hardwired properties
swmm_input_path = run_info["properties"]["swmm_input_file"]
swmm_input_fn = os.path.splitext(os.path.basename(swmm_input_path))[0]
run_info["properties"]["UDUNITS"] = file_element(
str(Path(run_info_file).parents[0]) + "//model//UDUNITS_lookup.csv", exists=True)
run_info["properties"]["out_nodes_netcdf"] = file_element(
str(Path(run_info_file).parents[0]) + "//output//" + swmm_input_fn + "_output_nodes.nc", exists=False)
run_info["properties"]["out_links_netcdf"] = file_element(
str(Path(run_info_file).parents[0]) + "//output//" + swmm_input_fn + "_output_links.nc", exists=False)
run_info["properties"]["swmm_output_file"] = file_element(
str(Path(run_info_file).parents[0]) + "//model//" + swmm_input_fn + ".rpt", exists=False)
return run_info
def read_rating_curve(rating_curve_file):
"""
Reads the dam rating curve XML file, and returns a dictionary with pairs of a) location and b) a string (formatted for use with SWMM)
"""
try:
tree = ET.parse(rating_curve_file)
root = tree.getroot()
curves = root.findall("pi:ratingCurve", namespace)
except Exception:
main_logger.error("Failed to parse rating curve file.")
stop_program()
if len(curves) == 0:
main_logger.error("No rating curves provided in {0}.".format(rating_curve_file))
stop_program()
raise ValueError("No rating curves found in the rating curve XML file {0}.".format(rating_curve_file))
dict_rc = dict()
loc_list = []
for curve in curves:
for item in curve.findall("pi:header", namespace):
loc = item.find("pi:locationId", namespace).text
try:
loc_list = loc_list + [loc]
unit = item.find("pi:stageUnit", namespace).text
for table in curve.findall("pi:table", namespace):
rows = table.findall("pi:row", namespace)
rating_curve_string = ""
i = 0
for r in rows:
if i == 0:
rating_curve_string = loc + " " + "Rating" + " " + r.get('stage') + " " + \
r.get('discharge') + '\n'
else:
rating_curve_string = rating_curve_string + loc + " " + " " + " " + \
r.get('stage') + " " + r.get('discharge') + '\n'
i += 1
dict_rc[loc] = rating_curve_string
except Exception:
print("Failed to extract rating curve for: " + loc)
main_logger.error("Failed to extract rating curve for: " + loc)
stop_program()
if len(loc_list) != len(set(loc_list)):
main_logger.error("Multiple curves with the same name found in {0}".format(rating_curve_file))
stop_program()
if len(loc_list) >= 1:
main_logger.info(
"{0} rating curves provided in {1}: {2}".format(len(loc_list), str(rating_curve_file), loc_list))
else:
main_logger.error("No rating curves provided in {0}.".format(rating_curve_file))
return dict_rc
def read_control_rules(control_rule_file):
tree = ET.parse(control_rule_file)
root = tree.getroot()
tseries = root.findall("pi:series", namespace)
dict_rules = dict()
rule_list = []
j = 0
for s in tseries:
rating_curve_string = ''
for item in s.findall("pi:header", namespace):
param = item.find("pi:parameterId", namespace).text
loc = item.find("pi:locationId", namespace).text
missing_value = item.find("pi:missVal", namespace).text
rule_id = loc + '-' + param # unique identifier of these time series rules
rule_list = rule_list + [rule_id]
events = s.findall("pi:event", namespace)
i = 0
for e in events:
# convert date format for epaswmm
if e.get('value') != missing_value:
d = time_element(e)
rating_curve_string = rating_curve_string + 'Rule ' + 'AdapterRule' + str(j + 1) + '.' + str(
i + 1) + '\n' + \
'IF SIMULATION DATE = ' + d.strftime('%m/%d/%Y') + '\n' \
'AND SIMULATION CLOCKTIME = ' + d.strftime("%H:%M:%S") + '\n' \
'THEN' + ' ' + param + ' ' + loc + ' ' + 'SETTING = ' + e.get('value') + '\n\n'
i += 1
dict_rules[rule_id] = rating_curve_string
j += 1
if len(rule_list) != len(set(rule_list)):
main_logger.error("Multiple rule time series for the same location-type pair (e.g. OL341-OUTLET) found in {0}".format(
control_rule_file))
stop_program()
return dict_rules
def read_units(units_input_file):
"""
Read the relate table between EPA-SWMM and UDUNITS + attributes information
"""
swmm_unit_dict = {}
try:
with open(units_input_file) as f:
lines = f.readlines()
header = lines[0].strip().split(',')
for i in range(1, len(lines)):
aline = lines[i].strip().split(',')
temp = {}
temp = {header[1]: aline[1], header[2]: aline[2], header[3]: aline[3]}
swmm_unit_dict[aline[0]] = temp
except Exception:
main_logger.error("Error parsing UDUNITS input file: %s" % units_input_file)
stop_program()
return swmm_unit_dict
def stop_program():
"""
Used when an error is encountered:
- Read the adapter log
- Write errors to run_diagnostics.xml
- Exit program execution.
"""
if "pytest" in sys.modules:
xml = r"log/run_diagnostic_test_cases.xml"
else:
xml = run_info["diagnostic_xml"]
main_logger.error(
"STOPPING ADAPTER : Error encountered while running the adapter. Reading Adapter Log, and writing the Diagnostics File and exiting.")
df = read_errors_warnings([logger_filename])
write_run_diagnostics(df, xml)
sys.exit(1)
def time_element(elem):
"""
Get datetime from XML element with date and time attributes
"""
start_str = elem.get("date") + " " + elem.get("time")
try:
dt = datetime.datetime.strptime(start_str, "%Y-%m-%d %H:%M:%S")
except Exception:
main_logger.error("Failed to parse date/time:" + str(start_str))
stop_program()
return dt
def write_netcdf(ds, ds_fn):
"""
Write a DataSet to NetCDF format.
"""
try:
ds.to_netcdf(ds_fn, mode='w')
except Exception:
main_logger.error("Failed to write dataset to:" + str(ds_fn))
stop_program()
def write_rainfall(rainfall_net_cdf, rainfall_dat, col_to_convert=['station_id', 'station_names']):
"""
Reads the rainfall NetCDF file, converts column type (unicode).
Write the rainfall in SWMM .DAT format.
"""
df_rain = read_netcdf(rainfall_net_cdf, col_to_convert)
df_rain = df_rain.reset_index()[["station_id", "time", "P"]]
df_rain['year'] = df_rain['time'].dt.year
df_rain['month'] = df_rain['time'].dt.month
df_rain['day'] = df_rain['time'].dt.day
df_rain['hour'] = df_rain['time'].dt.hour
df_rain['minute'] = df_rain['time'].dt.minute
df_rain = df_rain[["station_id", "year", "month", "day", "hour", "minute", "P"]]
df_rain.to_csv(rainfall_dat, sep=" ", header=[";Rainfall"] + (len(df_rain.columns) - 1) * [""],
escapechar="\\", quoting=csv.QUOTE_NONE, index=False)
main_logger.info(
"Converted the NetCDF rainfall file ({0}) to EPASWMM .DAT format ({1}).".format(rainfall_net_cdf, rainfall_dat))
def write_run_diagnostics(df_err_warn, run_diagnostics):
"""
Write the dataframe that contains both Python and EPASWMM errors to the run diagnostics file, in FEWS PI XML format.
"""
try:
with open(run_diagnostics, 'w') as xf:
# Write Header
xf.write('<?xml version="1.0" encoding="UTF-8"?>\n')
xf.write('<Diag xmlns="http://www.wldelft.nl/fews/PI"\n')
xf.write('xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n')
xf.write(
'xsi:schemaLocation="http://www.wldelft.nl/fews/PI http://fews.wldelft.nl/schemas/version1.0/pi-schemas/pi_diag.xsd" version="1.2">\n')
# Write Warnings Errors
if len(df_err_warn) > 0:
for index, row in df_err_warn.iterrows():
loc_text = str(' <line level="%s" description="%s"/>\n') % (
row["level"], row["description"])
xf.write(loc_text)
else:
xf.write(
' <line level="2" description="No errors, warnings or info | |
<gh_stars>1-10
import numpy as np
import torch
from value_iteration.pendulum import BaseSystem
from value_iteration.cost_functions import ArcTangent, SineQuadraticCost, BarrierCost
CUDA_AVAILABLE = torch.cuda.is_available()
class Cartpole(BaseSystem):
name = "Cartpole"
labels = ('x', 'theta', 'x_dot', 'theta_dot')
def __init__(self, cuda=CUDA_AVAILABLE, **kwargs):
super(Cartpole, self).__init__()
# Define Duration:
self.T = kwargs.get("T", 7.5)
self.dt = kwargs.get("dt", 1./500.)
# Define the System:
self.n_state = 4
self.n_dof = 2
self.n_act = 1
self.n_parameter = 5
# Continuous Joints:
# Right now only one continuous joint is supported
self.wrap, self.wrap_i = True, 1
# State Constraints:
# theta = 0, means the pendulum is pointing upward
self.x_target = torch.tensor([0.0, 0.0, 0.0, 0.0])
self.x_start = torch.tensor([0.0, np.pi, 0.0, 0.0])
self.x_start_var = torch.tensor([1.e-3, 5.e-2, 1.e-6, 1.e-6])
self.x_lim = torch.tensor([0.5, np.pi, 5.0, 20.0])
self.x_penalty = torch.tensor([0.4, 1.1 * np.pi, 1.1 * 5.0, 1.1 * 20.0])
self.x_init = torch.tensor([0.15, np.pi, 0.01, 0.01])
self.u_lim = torch.tensor([20., ])
# Define dynamics:
self.g = 9.81 # Gravitational acceleration [m/s^2]
mc = 0.57 # Mass of the cart [kg]
mp = 0.127 # Mass of the pole [kg]
pl = 0.3365 / 2. # Half of the pole length [m]
Beq = 0.1 # Equivalent Viscous damping Coefficient 5.4
Bp = 1.e-3 # Viscous coefficient at the pole 0.0024
# Dynamics parameter:
self.theta = torch.tensor([mc, mp, pl, Beq, Bp]).view(1, self.n_parameter, 1)
self.theta_min = 0.5 * torch.tensor([mc, mp, pl, Beq, Bp]).view(1, self.n_parameter, 1)
self.theta_max = 1.5 * torch.tensor([mc, mp, pl, Beq, Bp]).view(1, self.n_parameter, 1)
# Compute Linearized System:
out = self.dyn(self.x_target, gradient=True)
self.A = out[2].view(1, self.n_state, self.n_state).transpose(dim0=1, dim1=2).numpy()
self.B = out[1].view(1, self.n_state, self.n_act).numpy()
# Test Dynamics:
self.check_dynamics()
self.device = None
Cartpole.cuda(self) if cuda else Cartpole.cpu(self)
def dyn(self, x, dtheta=None, gradient=False):
cat = torch.cat
is_numpy = True if isinstance(x, np.ndarray) else False
x = torch.from_numpy(x) if isinstance(x, np.ndarray) else x
x = x.view(-1, self.n_state, 1)
n_samples = x.shape[0]
q, q_dot = x[:, :self.n_dof], x[:, self.n_dof:]
xc, th = x[:, 0].view(-1, 1, 1), x[:, 1].view(-1, 1, 1)
x_dot, th_dot = x[:, 2].view(-1, 1, 1), x[:, 3].view(-1, 1, 1)
sin_th, cos_th = torch.sin(th), torch.cos(th)
ones_1, zeros_1, zeros_n_dof = torch.ones_like(th), torch.zeros_like(th), torch.zeros((n_samples, 2, 1)).to(x.device)
# Update the dynamics parameters with disturbance:
if dtheta is not None:
dtheta = torch.from_numpy(dtheta).float() if isinstance(dtheta, np.ndarray) else dtheta
dtheta = dtheta.view(n_samples, self.n_parameter, 1)
theta = self.theta + dtheta
theta = torch.min(torch.max(theta, self.theta_min), self.theta_max)
else:
theta = self.theta
# Define mass matrix M = [[a, b], [b, c]]
H_00 = (theta[:, 1:2] + theta[:, 0:1]) * ones_1
H_01 = theta[:, 1:2] * theta[:, 2:3] * cos_th
H_11 = theta[:, 1:2] * theta[:, 2:3] ** 2 * ones_1
# H = cat([cat([H_00, H_01], dim=2), cat([H_01, H_11], dim=2)], dim=1)
invH = cat([cat([H_11, -H_01], dim=2), cat([-H_01, H_00], dim=2)], dim=1) / (H_00 * H_11 - H_01 * H_01)
# Calculate vector n = C(q, qd) + g(q):
n = cat([-theta[:, 1:2] * theta[:, 2:3] * sin_th * th_dot**2,
-theta[:, 1:2] * theta[:, 2:3] * self.g * sin_th], dim=1)
f = cat([-theta[:, 3:4] * x_dot, -theta[:, 4:5] * th_dot], dim=1)
# Construct Dynamics:
a = cat([q_dot, torch.matmul(invH, f - n)], dim=1)
B = cat([torch.zeros((n_samples, self.n_dof, 1)).to(x.device), invH[:, :, :1]], dim=1)
assert a.shape == (n_samples, self.n_state, 1)
assert B.shape == (n_samples, self.n_state, self.n_act)
out = (a, B)
if gradient:
zeros_nxn = torch.zeros((n_samples, self.n_dof, self.n_dof)).to(x.device)
ones_nxn = torch.ones((n_samples, self.n_dof, self.n_dof)).to(x.device)
dH_00_dq = zeros_n_dof.view(n_samples, self.n_dof, 1, 1)
dH_01_dq = cat([zeros_1.view((n_samples, 1, 1, 1)), (-theta[:, 1] * theta[:, 2] * sin_th).view((-1, 1, 1, 1))], dim=1)
dH_11_dq = zeros_n_dof.view(n_samples, self.n_dof, 1, 1)
dHdq = cat([cat([dH_00_dq, dH_01_dq], dim=3), cat([dH_01_dq, dH_11_dq], dim=3)], dim=2)
dinvH_dq = -torch.matmul(invH.view(-1, 1, self.n_dof, self.n_dof), torch.matmul(dHdq, invH.view(-1, 1, self.n_dof, self.n_dof)))
dn_dx = zeros_n_dof.view(n_samples, 2, 1)
dn_dth = cat([-theta[:, 1] * theta[:, 2] * cos_th * th_dot ** 2, -theta[:, 1] * theta[:, 2] * self.g * cos_th], dim=1)
dn_dxd = zeros_n_dof
dn_dthd = cat([-2. * theta[:, 1] * theta[:, 2] * sin_th * th_dot, zeros_1], dim=1)
dn_dq = cat([dn_dx, dn_dth], dim=2)
dn_dqd = cat([dn_dxd, dn_dthd], dim=2)
df_dqd = cat([cat([-theta[:, 3] * ones_1, zeros_1], dim=1), cat([zeros_1, -theta[:, 4] * ones_1], dim=1)], dim=2)
# Construct da/dx:
A_00 = zeros_nxn
A_01 = torch.eye(self.n_dof).view(1, self.n_dof, self.n_dof).to(x.device) * ones_nxn
A_10 = torch.matmul(dinvH_dq, (f - n).view(-1, 1, self.n_dof, 1)).squeeze(-1).transpose(dim0=1, dim1=2) - torch.matmul(invH, dn_dq)
A_11 = torch.matmul(invH, df_dqd - dn_dqd)
dadx = cat([cat([A_00, A_01], dim=2), cat([A_10, A_11], dim=2)], dim=1).transpose(dim0=1, dim1=2)
dBdx = cat([cat([zeros_nxn.view(n_samples, self.n_dof, self.n_dof, 1), dinvH_dq[:, :, :, :self.n_act]], dim=2),
torch.zeros(n_samples, self.n_dof, self.n_state, 1).to(x.device)], dim=1)
assert dadx.shape == (n_samples, self.n_state, self.n_state,)
assert dBdx.shape == (n_samples, self.n_state, self.n_state, self.n_act)
out = (a, B, dadx, dBdx)
if is_numpy:
out = [array.numpy() for array in out]
return out
def grad_dyn_theta(self, x):
cat = torch.cat
is_numpy = True if isinstance(x, np.ndarray) else False
x = torch.from_numpy(x) if isinstance(x, np.ndarray) else x
x = x.view(-1, self.n_state, 1)
n_samples = x.shape[0]
xc, th = x[:, 0].view(-1, 1, 1), x[:, 1].view(-1, 1, 1)
x_dot, th_dot = x[:, 2].view(-1, 1, 1), x[:, 3].view(-1, 1, 1)
sin_th, cos_th = torch.sin(th), torch.cos(th)
ones_1, zeros_1, zeros_n_dof = torch.ones_like(th), torch.zeros_like(th), torch.zeros((n_samples, 2, 1)).to(x.device)
# Define mass matrix M = [[a, b], [b, c]]
H_00 = (self.theta[:, 1] + self.theta[:, 0]) * ones_1
H_01 = self.theta[:, 1] * self.theta[:, 2] * cos_th
H_11 = self.theta[:, 1] * self.theta[:, 2] ** 2 * ones_1
# H = cat([cat([H_00, H_01], dim=2), cat([H_01, H_11], dim=2)], dim=1)
invH = cat([cat([H_11, -H_01], dim=2), cat([-H_01, H_00], dim=2)], dim=1) / (H_00 * H_11 - H_01 * H_01)
# Calculate vector n = C(q, qd) + g(q):
n = cat([-self.theta[:, 1] * self.theta[:, 2] * sin_th * th_dot**2,
-self.theta[:, 1] * self.theta[:, 2] * self.g * sin_th], dim=1).view(-1, 1, self.n_dof, 1)
f = cat([-self.theta[:, 3] * x_dot, -self.theta[:, 4] * th_dot], dim=1).view(-1, 1, self.n_dof, 1)
dHdp = torch.zeros(n_samples, self.n_parameter, self.n_dof, self.n_dof).to(x.device)
dndp = torch.zeros(n_samples, self.n_parameter, self.n_dof, 1).to(x.device)
dfdp = torch.zeros(n_samples, self.n_parameter, self.n_dof, 1).to(x.device)
# dM/dm_c
dHdp[:, 0, 0:1, 0:1] = ones_1
# dM/dm_p
dHdp[:, 1, 0:1, 0:1] = ones_1
dHdp[:, 1, 0:1, 1:2] = self.theta[:, 2] * cos_th
dHdp[:, 1, 1:2, 0:1] = self.theta[:, 2] * cos_th
dHdp[:, 1, 1:2, 1:2] = self.theta[:, 2]**2
# dM/dl_p
dHdp[:, 2, 0:1, 0:1] = zeros_1
dHdp[:, 2, 0:1, 1:2] = self.theta[:, 1] * cos_th
dHdp[:, 2, 1:2, 0:1] = self.theta[:, 1] * cos_th
dHdp[:, 2, 1:2, 1:2] = self.theta[:, 1] * self.theta[:, 2] * 2
# dn/dm_p
dndp[:, 1, 0:1] = -self.theta[:, 2] * sin_th * th_dot**2
dndp[:, 1, 1:2] = -self.theta[:, 2] * self.g * sin_th
# dn/dl_p
dndp[:, 2, 0:1] = -self.theta[:, 1] * sin_th * th_dot**2
dndp[:, 2, 1:2] = -self.theta[:, 1] * self.g * sin_th
# df/dB_c
dfdp[:, 3, 0:1] = -x_dot
dfdp[:, 4, 1:2] = -th_dot
invH_4d = invH.view(-1, 1, self.n_dof, self.n_dof)
dinvHdp = -torch.matmul(invH_4d, torch.matmul(dHdp, invH_4d))
dadp = torch.zeros(n_samples, self.n_parameter, self.n_state).to(x.device)
dadp[:, :, self.n_dof:, ] = (torch.matmul(dinvHdp, f - n) + torch.matmul(invH_4d, dfdp - dndp)).view(-1, self.n_parameter, self.n_dof)
dBdp = torch.zeros(n_samples, self.n_parameter, self.n_state, self.n_act).to(x.device)
dBdp[:, :, self.n_dof:, ] = dinvHdp[:, :, :, :self.n_act]
out = (dadp, dBdp)
if is_numpy:
out = [array.cpu().detach().numpy() for array in out]
return out
def cuda(self, device=None):
self.u_lim = self.u_lim.cuda(device=device)
self.theta_min = self.theta_min.cuda(device=device)
self.theta = self.theta.cuda(device=device)
self.theta_max = self.theta_max.cuda(device=device)
self.device = self.theta.device
return self
def cpu(self):
self.u_lim = self.u_lim.cpu()
self.theta_min = self.theta_min.cpu()
self.theta = self.theta.cpu()
self.theta_max = self.theta_max.cpu()
self.device = self.theta.device
return self
class CartpoleLogCos(Cartpole):
name = "Cartpole_LogCosCost"
def __init__(self, Q, R, cuda=False, **kwargs):
# Create the dynamics:
super(CartpoleLogCos, self).__init__(cuda=cuda, **kwargs)
self.u_lim = torch.tensor([12., ])
# Create the Reward Function:
assert Q.size == self.n_state and np.all(Q > 0.0)
self.Q = np.diag(Q).reshape((self.n_state, self.n_state))
assert R.size == self.n_act and np.all(R > 0.0)
self.R = np.diag(R).reshape((self.n_act, self.n_act))
self._q = SineQuadraticCost(self.Q, np.array([0.0, 1.0, 0.0, 0.0]), cuda=cuda)
self.q = BarrierCost(self._q, self.x_penalty, cuda)
# Determine beta s.t. the curvature at u = 0 is identical to 2R
beta = 4. * self.u_lim[0] ** 2 / np.pi * self.R
self.r = ArcTangent(alpha=self.u_lim.numpy()[0], beta=beta.numpy()[0, 0])
def rwd(self, x, u):
return self.q(x) + self.r(u)
def cuda(self, device=None):
super(CartpoleLogCos, self).cuda(device=device)
self.q.cuda(device=device)
return self
def cpu(self):
super(CartpoleLogCos, | |
<reponame>glanyx/segachan<gh_stars>0
import datetime
import typing
import discord
from discord.ext import commands
from sqlalchemy.sql import func
from sweeperbot.cogs.utils import log, time
from sweeperbot.cogs.utils.timer import Timer
from sweeperbot.db import models
from sweeperbot.utilities.helpers import has_guild_permissions, set_sentry_scope
class Mute(commands.Cog):
"""Handle Mutes"""
def __init__(self, bot):
self.bot = bot
self.current_mutes = {}
session = self.bot.helpers.get_db_session()
mutes = (
session.query(
func.coalesce(models.Mute.updated, models.Mute.created).label(
"created"
),
models.Mute.id,
models.User,
models.Server,
models.Mute.expires,
models.Mute.old_roles,
)
.join(models.Server, models.Server.id == models.Mute.server_id)
.join(models.User, models.User.id == models.Mute.user_id)
.filter(models.Mute.expires > datetime.datetime.now(datetime.timezone.utc))
.all()
)
for mute in mutes:
# Add timer to remove mute
timer = Timer.temporary(
mute.Server.discord_id,
mute.User.discord_id,
mute.old_roles,
event=self._unmute,
expires=mute.expires,
created=mute.created,
)
timer.start(self.bot.loop)
if mute.Server.discord_id not in self.current_mutes:
self.current_mutes[mute.Server.discord_id] = {}
self.current_mutes[mute.Server.discord_id][mute.User.discord_id] = timer
session.close()
@commands.command(aliases=["m"])
@has_guild_permissions(manage_messages=True)
@commands.guild_only()
async def mute(
self,
ctx,
user_id: str,
mute_time: time.UserFriendlyTime(commands.clean_content, default="\u2026"),
*,
reason: str,
):
"""Mute a user.
If no time or note specified default values will be used. Time can be a human readable string, many formats are understood.
To unmute someone see unmute
Requires Permission: Manage Messages
Parameters
-----------
ctx: context
The context message involved.
user_id: str
The Discord ID or user mention the command is being run on.
mute_time: time
How long the mute will be for.
reason: str
The reason for the mute. This will be sent to the user and added to the logs.
"""
self.bot.log.info(
f"CMD {ctx.command} called by {ctx.message.author} ({ctx.message.author.id})"
)
# If we were provided an ID, let's try and use it
if user_id:
member = await self.bot.helpers.get_member_or_user(
user_id, ctx.message.guild
)
if not member:
return await ctx.send(
f"Unable to find the requested user. Please make sure the user ID or @ mention is valid."
)
elif isinstance(member, discord.User):
await ctx.send(
f"The user specified does not appear to be in the server. Proceeding with mute in case they return."
)
else:
return await ctx.send(
f"A user ID or Mention must be provided for who to mute."
)
if mute_time == "20m":
mute_time = datetime.datetime.now(
datetime.timezone.utc
) + datetime.timedelta(minutes=20)
elif isinstance(mute_time, datetime.datetime):
mute_time = mute_time
else:
mute_time = mute_time.dt
mute_length_human = time.human_timedelta(mute_time)
settings = self.bot.guild_settings.get(ctx.message.guild.id)
has_modmail_server = settings.modmail_server_id
muted_role_id = settings.muted_role
mod_channel = discord.utils.get(
ctx.message.guild.text_channels, id=settings.mod_channel
)
if not mod_channel:
await ctx.send(
"Please set a mod channel using `config modchannel #channel`"
)
# delete the message we used to invoke it
if mod_channel and ctx.message.channel.id != mod_channel.id:
try:
await ctx.message.delete()
except discord.HTTPException as err:
self.bot.log.warning(
f"Couldn't delete command message for {ctx.command}: {err}"
)
log_channel = discord.utils.get(
ctx.message.guild.text_channels, name="bot-logs"
)
if not log_channel:
# If there is no normal logs channel, try the sweeper (legacy) logs channel
log_channel = discord.utils.get(
ctx.message.guild.text_channels, name="sweeper-logs"
)
if not log_channel:
return await ctx.send(
f"No log channel setup. Please create a channel called #bot-logs"
)
muted_role = ctx.message.guild.get_role(muted_role_id)
if not muted_role:
return await ctx.send("Mute role is not yet configured. Unable to proceed.")
footer_text = (
self.bot.constants.footer_with_modmail.format(guild=ctx.message.guild)
if has_modmail_server
else self.bot.constants.footer_no_modmail.format(guild=ctx.message.guild)
)
sweeper_emoji = self.bot.get_emoji(
self.bot.constants.reactions["animated_sweeperbot"]
)
session = self.bot.helpers.get_db_session()
try:
old_mute_len = None
old_mute_dt = None
if not isinstance(member, discord.User):
if muted_role in member.roles:
if (
ctx.message.guild.id in self.current_mutes
and member.id in self.current_mutes[ctx.message.guild.id]
):
old_mute_len = self.current_mutes[ctx.message.guild.id][
member.id
].human_delta
old_mute_dt = self.current_mutes[ctx.message.guild.id][
member.id
].expires
self.current_mutes[ctx.message.guild.id][member.id].stop()
del self.current_mutes[ctx.message.guild.id][member.id]
if (
member is ctx.message.guild.owner
or member.bot
or member is ctx.message.author
):
return await ctx.send("You may not use this command on that user.")
if member.top_role > ctx.me.top_role:
return await ctx.send(
"The user has higher permissions than the bot, can't use this command on that user."
)
actionMsg = await ctx.send("Initiating action. Please wait.")
self.bot.log.info(
f"Initiating mute for user: {member} ({member.id}) in guild {ctx.message.guild} ({ctx.message.guild.id})"
)
old_roles = []
old_roles_snow = []
if not isinstance(member, discord.User):
for role in member.roles:
if role.managed or role.name == "@everyone":
continue
else:
old_roles_snow.append(role)
old_roles.append(role.id)
# Remove all non-managed roles
await member.remove_roles(
*old_roles_snow,
reason=f"Muted by request of {ctx.message.author} ({ctx.message.author.id})",
atomic=True,
)
# Assign mute role
await member.add_roles(
muted_role,
reason=f"Muted by request of {ctx.message.author} ({ctx.message.author.id})",
atomic=True,
)
# If in voice, kick
try:
if member.voice and member.voice.channel:
await member.move_to(
channel=None,
reason=f"Muted by request of {ctx.message.author.mention}",
)
except discord.errors.Forbidden:
await ctx.send(
f"Missing permissions to drop user from voice channel."
)
self.bot.log.info(
f"Muted user: {member} ({member.id}) in guild {ctx.message.guild} ({ctx.message.guild.id}) for {mute_length_human}"
)
informed_user = False
try:
# Format the message
text = self.bot.constants.infraction_header.format(
action_type="mute", guild=ctx.message.guild
)
# Reduces the text to 1,800 characters to leave enough buffer for header and footer text
text += f"This mute is for **{mute_length_human}** with the reason:\n\n"
text += reason[:1800]
text += footer_text
await member.send(text)
self.bot.log.info(
f"Informed user of their mute: {member} ({member.id}) in guild {ctx.message.guild}"
)
informed_user = True
if mod_channel and actionMsg.channel.id == mod_channel.id:
await actionMsg.edit(
content=f"Mute successful for {member.mention}. **Time:** *{mute_length_human}*. {sweeper_emoji}"
)
if old_mute_len:
await ctx.send(
f"**Note**: This user was previously muted until {old_mute_len}."
)
else:
await actionMsg.edit(
content=f"That action was successful. {sweeper_emoji}"
)
except Exception as e:
if mod_channel:
await mod_channel.send(
f"Mute successful for {member.mention}. **Time:** *{mute_length_human}*. {sweeper_emoji}\n"
f"However, user couldn't be informed: {e}"
)
if not (type(e) == discord.errors.Forbidden and e.code == 50007):
self.bot.log.exception(
f"There was an error while informing {member} ({member.id}) about their mute"
)
if informed_user:
reason += "| **Msg Delivered: Yes**"
else:
reason += "| **Msg Delivered: No**"
# Log action
await log.user_action(
self.bot,
log_channel.name,
member,
"Mute",
f"**Length:** {mute_length_human}\n" f"**Reason:** {reason}",
ctx.message.author,
ctx.message.guild,
)
# Get the DB profile for the guild
db_guild = await self.bot.helpers.db_get_guild(
session, ctx.message.guild.id
)
# Get the DB profile for the user
db_user = await self.bot.helpers.db_get_user(session, member.id)
# Get mod's DB profile
db_mod = await self.bot.helpers.db_get_user(session, ctx.message.author.id)
db_action = models.Action(mod=db_mod, server=db_guild)
db_mute = None
if old_mute_len:
db_mute = (
session.query(models.Mute)
.filter(models.Mute.server == db_guild)
.filter(models.Mute.user == db_user)
.filter(models.Mute.expires == old_mute_dt)
.one_or_none()
)
if db_mute:
session.add(db_action)
session.commit()
db_mute.action_id = db_action.id
db_mute.text = reason
db_mute.expires = mute_time
db_mute.updated = datetime.datetime.now(datetime.timezone.utc)
else:
db_mute = models.Mute(
text=reason,
user=db_user,
server=db_guild,
action=db_action,
expires=mute_time,
old_roles=old_roles,
)
session.add(db_mute)
session.commit()
# Add timer to remove mute
timer = Timer.temporary(
ctx.message.guild.id,
member.id,
old_roles,
event=self._unmute,
expires=mute_time,
created=datetime.datetime.now(datetime.timezone.utc),
)
timer.start(self.bot.loop)
if ctx.message.guild.id not in self.current_mutes:
self.current_mutes[ctx.message.guild.id] = {}
self.current_mutes[ctx.message.guild.id][member.id] = timer
except Exception as e:
set_sentry_scope(ctx)
if mod_channel:
await mod_channel.send(
f"There was an error while creating mute for {member.mention}\n"
f"**Error**: {e}"
)
self.bot.log.exception(
f"There was an error while creating mute for {member} ({member.id})"
)
finally:
session.close()
@commands.command(aliases=["um"])
@has_guild_permissions(manage_messages=True)
@commands.guild_only()
async def unmute(self, ctx, member: discord.Member):
"""Removes a mute for specified user.
To Mute someone see mute
"""
self.bot.log.info(
f"CMD {ctx.command} called by {ctx.message.author} ({ctx.message.author.id})"
)
settings = self.bot.guild_settings.get(ctx.message.guild.id)
muted_role_id = settings.muted_role
muted_role = member.guild.get_role(muted_role_id)
mod_channel = discord.utils.get(
ctx.message.guild.text_channels, id=settings.mod_channel
)
if not mod_channel:
await ctx.send(
"Please set a mod channel using `config modchannel #channel`"
)
# delete the message we used to invoke it
if mod_channel and ctx.message.channel.id != mod_channel.id:
try:
await ctx.message.delete()
except discord.HTTPException as err:
self.bot.log.warning(
f"Couldn't delete command message for {ctx.command}: {err}"
)
session = self.bot.helpers.get_db_session()
try:
if muted_role is None:
return await ctx.send("Mute role is not yet configured.")
if muted_role not in member.roles:
return await ctx.send("User is not muted")
if (
member is member.guild.owner
or member.bot
or member is ctx.message.author
):
return await ctx.send("You may not use this command on that user.")
old_mute_dt = None
old_roles = []
if (
member.guild.id in self.current_mutes
and member.id in self.current_mutes[member.guild.id]
):
old_mute_dt = self.current_mutes[member.guild.id][member.id].expires
query = (
session.query(models.Mute.old_roles)
.filter(models.Mute.expires == old_mute_dt)
.first()
)
if query:
old_roles = query.old_roles
if self._unmute(member.guild.id, member.id, old_roles, ctx.message.author):
if ctx.message.channel.id == mod_channel.id:
await ctx.send(f"Successfully unmuted {member.mention}.")
else:
await ctx.send(f"That action was successful.")
else:
await mod_channel.send(
f"Successfully unmuted {member.mention}. However, user could not be informed."
)
if old_mute_dt:
# Get the DB profile for the guild
db_guild = await self.bot.helpers.db_get_guild(
session, ctx.message.guild.id
)
# Get the DB profile for the user
db_user = await self.bot.helpers.db_get_user(session, member.id)
# Get mod's DB profile
db_mod = await self.bot.helpers.db_get_user(
session, ctx.message.author.id
)
db_action = models.Action(mod=db_mod, server=db_guild)
db_mute = (
session.query(models.Mute)
.filter(models.Mute.server == db_guild)
.filter(models.Mute.user == db_user)
.filter(models.Mute.expires == old_mute_dt)
.one_or_none()
)
if db_mute:
session.add(db_action)
session.commit()
db_mute.action_id = db_action.id
db_mute.expires = datetime.datetime.now(datetime.timezone.utc)
db_mute.updated = datetime.datetime.now(datetime.timezone.utc)
session.add(db_mute)
session.commit()
else:
self.bot.log.warning(
f"Couldn't find mute for {member} | |
"failed".
* DeleteClusterSnapshot returns status as "deleted".
- **Port** *(integer) --*
The port that the cluster is listening on.
- **AvailabilityZone** *(string) --*
The Availability Zone in which the cluster was created.
- **ClusterCreateTime** *(datetime) --*
The time (UTC) when the cluster was originally created.
- **MasterUsername** *(string) --*
The master user name for the cluster.
- **ClusterVersion** *(string) --*
The version ID of the Amazon Redshift engine that is running on the cluster.
- **SnapshotType** *(string) --*
The snapshot type. Snapshots created using CreateClusterSnapshot and CopyClusterSnapshot are of type "manual".
- **NodeType** *(string) --*
The node type of the nodes in the cluster.
- **NumberOfNodes** *(integer) --*
The number of nodes in the cluster.
- **DBName** *(string) --*
The name of the database that was created when the cluster was created.
- **VpcId** *(string) --*
The VPC identifier of the cluster if the snapshot is from a cluster in a VPC. Otherwise, this field is not in the output.
- **Encrypted** *(boolean) --*
If ``true`` , the data in the snapshot is encrypted at rest.
- **KmsKeyId** *(string) --*
The AWS Key Management Service (KMS) key ID of the encryption key that was used to encrypt data in the cluster from which the snapshot was taken.
- **EncryptedWithHSM** *(boolean) --*
A boolean that indicates whether the snapshot data is encrypted using the HSM keys of the source cluster. ``true`` indicates that the data is encrypted using HSM keys.
- **AccountsWithRestoreAccess** *(list) --*
A list of the AWS customer accounts authorized to restore the snapshot. Returns ``null`` if no accounts are authorized. Visible only to the snapshot owner.
- *(dict) --*
Describes an AWS customer account authorized to restore a snapshot.
- **AccountId** *(string) --*
The identifier of an AWS customer account authorized to restore a snapshot.
- **AccountAlias** *(string) --*
The identifier of an AWS support account authorized to restore a snapshot. For AWS support, the identifier is ``amazon-redshift-support`` .
- **OwnerAccount** *(string) --*
For manual snapshots, the AWS customer account used to create or copy the snapshot. For automatic snapshots, the owner of the cluster. The owner can perform all snapshot actions, such as sharing a manual snapshot.
- **TotalBackupSizeInMegaBytes** *(float) --*
The size of the complete set of backup data that would be used to restore the cluster.
- **ActualIncrementalBackupSizeInMegaBytes** *(float) --*
The size of the incremental backup.
- **BackupProgressInMegaBytes** *(float) --*
The number of megabytes that have been transferred to the snapshot backup.
- **CurrentBackupRateInMegaBytesPerSecond** *(float) --*
The number of megabytes per second being transferred to the snapshot backup. Returns ``0`` for a completed backup.
- **EstimatedSecondsToCompletion** *(integer) --*
The estimate of the time remaining before the snapshot backup will complete. Returns ``0`` for a completed backup.
- **ElapsedTimeInSeconds** *(integer) --*
The amount of time an in-progress snapshot backup has been running, or the amount of time it took a completed backup to finish.
- **SourceRegion** *(string) --*
The source region from which the snapshot was copied.
- **Tags** *(list) --*
The list of tags for the cluster snapshot.
- *(dict) --*
A tag consisting of a name/value pair for a resource.
- **Key** *(string) --*
The key, or name, for the resource tag.
- **Value** *(string) --*
The value for the resource tag.
- **RestorableNodeTypes** *(list) --*
The list of node types that this cluster snapshot is able to restore into.
- *(string) --*
- **EnhancedVpcRouting** *(boolean) --*
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see `Enhanced VPC Routing <https://docs.aws.amazon.com/redshift/latest/mgmt/enhanced-vpc-routing.html>`__ in the Amazon Redshift Cluster Management Guide.
If this option is ``true`` , enhanced VPC routing is enabled.
Default: false
- **MaintenanceTrackName** *(string) --*
The name of the maintenance track for the snapshot.
- **ManualSnapshotRetentionPeriod** *(integer) --*
The number of days that a manual snapshot is retained. If the value is -1, the manual snapshot is retained indefinitely.
The value must be either -1 or an integer between 1 and 3,653.
- **ManualSnapshotRemainingDays** *(integer) --*
The number of days until a manual snapshot will pass its retention period.
- **SnapshotRetentionStartTime** *(datetime) --*
A timestamp representing the start of the retention period for the snapshot.
:type SourceSnapshotIdentifier: string
:param SourceSnapshotIdentifier: **[REQUIRED]**
The identifier for the source snapshot.
Constraints:
* Must be the identifier for a valid automated snapshot whose state is ``available`` .
:type SourceSnapshotClusterIdentifier: string
:param SourceSnapshotClusterIdentifier:
The identifier of the cluster the source snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.
Constraints:
* Must be the identifier for a valid cluster.
:type TargetSnapshotIdentifier: string
:param TargetSnapshotIdentifier: **[REQUIRED]**
The identifier given to the new manual snapshot.
Constraints:
* Cannot be null, empty, or blank.
* Must contain from 1 to 255 alphanumeric characters or hyphens.
* First character must be a letter.
* Cannot end with a hyphen or contain two consecutive hyphens.
* Must be unique for the AWS account that is making the request.
:type ManualSnapshotRetentionPeriod: integer
:param ManualSnapshotRetentionPeriod:
The number of days that a manual snapshot is retained. If the value is -1, the manual snapshot is retained indefinitely.
The value must be either -1 or an integer between 1 and 3,653.
The default value is -1.
:rtype: dict
:returns:
"""
pass
def create_cluster(self, ClusterIdentifier: str, NodeType: str, MasterUsername: str, MasterUserPassword: str, DBName: str = None, ClusterType: str = None, ClusterSecurityGroups: List = None, VpcSecurityGroupIds: List = None, ClusterSubnetGroupName: str = None, AvailabilityZone: str = None, PreferredMaintenanceWindow: str = None, ClusterParameterGroupName: str = None, AutomatedSnapshotRetentionPeriod: int = None, ManualSnapshotRetentionPeriod: int = None, Port: int = None, ClusterVersion: str = None, AllowVersionUpgrade: bool = None, NumberOfNodes: int = None, PubliclyAccessible: bool = None, Encrypted: bool = None, HsmClientCertificateIdentifier: str = None, HsmConfigurationIdentifier: str = None, ElasticIp: str = None, Tags: List = None, KmsKeyId: str = None, EnhancedVpcRouting: bool = None, AdditionalInfo: str = None, IamRoles: List = None, MaintenanceTrackName: str = None, SnapshotScheduleIdentifier: str = None) -> Dict:
"""
Creates a new cluster.
To create a cluster in Virtual Private Cloud (VPC), you must provide a cluster subnet group name. The cluster subnet group identifies the subnets of your VPC that Amazon Redshift uses when creating the cluster. For more information about managing clusters, go to `Amazon Redshift Clusters <https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html>`__ in the *Amazon Redshift Cluster Management Guide* .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/CreateCluster>`_
**Request Syntax**
::
response = client.create_cluster(
DBName='string',
ClusterIdentifier='string',
ClusterType='string',
NodeType='string',
MasterUsername='string',
MasterUserPassword='<PASSWORD>',
ClusterSecurityGroups=[
'string',
],
VpcSecurityGroupIds=[
'string',
],
ClusterSubnetGroupName='string',
AvailabilityZone='string',
PreferredMaintenanceWindow='string',
ClusterParameterGroupName='string',
AutomatedSnapshotRetentionPeriod=123,
ManualSnapshotRetentionPeriod=123,
Port=123,
ClusterVersion='string',
AllowVersionUpgrade=True|False,
NumberOfNodes=123,
PubliclyAccessible=True|False,
Encrypted=True|False,
HsmClientCertificateIdentifier='string',
HsmConfigurationIdentifier='string',
ElasticIp='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
],
KmsKeyId='string',
EnhancedVpcRouting=True|False,
AdditionalInfo='string',
IamRoles=[
'string',
],
MaintenanceTrackName='string',
SnapshotScheduleIdentifier='string'
)
**Response Syntax**
::
{
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ManualSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': '<PASSWORD>',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'EncryptionType': 'string'
| |
#!/usr/bin/env python
# coding: utf-8
# In this notebook, I delete a triple from the neighbourhood of the target triple based on the **IJCAI deletion scores**
#
# - neighbourhood refers to the triples that share the entities with target's entities
# - I get the deletion from both neighbourhood of s and o, then choose the one with higher score
#
#
# In[1]:
import pickle
from typing import Dict, Tuple, List
import os
import numpy as np
import pandas as pd
from collections import defaultdict
import operator
import json
import logging
import argparse
import math
from pprint import pprint
import errno
import time
import torch
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.nn import functional as F
import torch.autograd as autograd
from evaluation import evaluation
from model import Distmult, Complex, Conve, Transe
import utils
def get_nghbr_s_deletion(per_tr, test_trip, model, epsilon, lambda1):
# get neighbours of s
sub = test_trip[0]
nghbr_mask = (np.isin(per_tr[:,0], [sub]) | np.isin(per_tr[:,2], [sub]))
test_neighbours = np.where(nghbr_mask)[0] # this is index of neighbours in training data
nghbr_trip = per_tr[test_neighbours] # actual neighbour triples
# --- Get the perturbed embedding ---
test_trip = torch.from_numpy(test_trip).to(device)[None, :]
s,r,o = test_trip[:,0], test_trip[:,1], test_trip[:,2]
# get embeddings
emb_s = model.emb_e(s)
emb_r = model.emb_rel(r)
emb_o = model.emb_e(o)
score = model.score_emb(emb_s, emb_r, emb_o)
emb_s_grad = autograd.grad(score, emb_s)
epsilon_star = -epsilon * emb_s_grad[0]
perturbed_emb_s = emb_s + epsilon_star
# get scores for each neighbour
b_begin = 0
nghbr_scores = []
if args.attack_batch_size == -1:
nghbr_batch = nghbr_trip.shape[0]
else:
nghbr_batch = args.attack_batch_size
while b_begin < nghbr_trip.shape[0]:
b_nghbr_trip = nghbr_trip[b_begin : b_begin+nghbr_batch]
b_nghbr_trip = torch.from_numpy(b_nghbr_trip).to(device)
b_nghbr_s, b_nghbr_r, b_nghbr_o = b_nghbr_trip[:,0], b_nghbr_trip[:,1], b_nghbr_trip[:,2]
#emb_nghbr_s = model.emb_e(b_nghbr_s)
emb_nghbr_r = model.emb_rel(b_nghbr_r)
emb_nghbr_o = model.emb_e(b_nghbr_o)
perturbed_emb_e = perturbed_emb_s.repeat(b_nghbr_s.shape[0],1)
emb_e = emb_s.repeat(b_nghbr_s.shape[0],1)
#print(emb_s.shape, emb_nghbr_r.shape)
s1 = model.score_emb(emb_e, emb_nghbr_r, emb_nghbr_o) #nghbr score
s2 = model.score_emb(perturbed_emb_e,
emb_nghbr_r, emb_nghbr_o) #nghbr score after perturbed s
score = s1 - lambda1*s2
score = score.detach().cpu().numpy().tolist()
nghbr_scores += score
b_begin += nghbr_batch
nghbr_scores = np.array(nghbr_scores)
nghbr_scores = torch.from_numpy(nghbr_scores).to(device)
# we want to remove the neighbour with maximum score
max_values, argsort = torch.sort(nghbr_scores, -1, descending=True)
del_idx = argsort[0].item() # index of neighbour to delete
max_val = max_values[0].item() # score of the nghbr to delete
trip_idx = test_neighbours[del_idx] # index of neighbour in train data
del_trip = nghbr_trip[del_idx] # actual triple to delete == per_tr[trip_idx]
return del_idx, max_val, trip_idx, del_trip
def get_nghbr_o_deletion(per_tr, test_trip, model, epsilon, lambda1):
# get neighbours of o
obj = test_trip[2]
nghbr_mask = (np.isin(per_tr[:,0], [obj]) | np.isin(per_tr[:,2], [obj]))
test_neighbours = np.where(nghbr_mask)[0] # this is index of neighbours in training data
nghbr_trip = per_tr[test_neighbours] # actual neighbour triples
# --- Get the perturbed embedding ---
test_trip = torch.from_numpy(test_trip).to(device)[None, :]
s,r,o = test_trip[:,0], test_trip[:,1], test_trip[:,2]
# get embeddings
emb_s = model.emb_e(s)
emb_r = model.emb_rel(r)
emb_o = model.emb_e(o)
score = model.score_emb(emb_s, emb_r, emb_o)
emb_o_grad = autograd.grad(score, emb_o)
epsilon_star = -epsilon * emb_o_grad[0]
perturbed_emb_o = emb_o + epsilon_star
# get scores for each neighbour
b_begin = 0
nghbr_scores = []
if args.attack_batch_size == -1:
nghbr_batch = nghbr_trip.shape[0]
else:
nghbr_batch = args.attack_batch_size
while b_begin < nghbr_trip.shape[0]:
b_nghbr_trip = nghbr_trip[b_begin : b_begin+nghbr_batch]
b_nghbr_trip = torch.from_numpy(b_nghbr_trip).to(device)
b_nghbr_s, b_nghbr_r, b_nghbr_o = b_nghbr_trip[:,0], b_nghbr_trip[:,1], b_nghbr_trip[:,2]
emb_nghbr_s = model.emb_e(b_nghbr_s)
emb_nghbr_r = model.emb_rel(b_nghbr_r)
#emb_nghbr_o = model.emb_e(b_nghbr_o)
perturbed_emb_e = perturbed_emb_o.repeat(b_nghbr_s.shape[0],1)
emb_e = emb_o.repeat(b_nghbr_s.shape[0],1)
s1 = model.score_emb(emb_nghbr_s, emb_nghbr_r, emb_e) #nghbr score
s2 = model.score_emb(emb_nghbr_s,
emb_nghbr_r, perturbed_emb_e) #nghbr score after perturbed s
score = s1 - lambda1*s2
score = score.detach().cpu().numpy().tolist()
nghbr_scores += score
b_begin += nghbr_batch
nghbr_scores = np.array(nghbr_scores)
nghbr_scores = torch.from_numpy(nghbr_scores).to(device)
# we want to remove the neighbour with maximum score
max_values, argsort = torch.sort(nghbr_scores, -1, descending=True)
del_idx = argsort[0].item() # index of neighbour to delete
max_val = max_values[0].item() # score of the nghbr to delete
trip_idx = test_neighbours[del_idx] # index of neighbour in train data
del_trip = nghbr_trip[del_idx] # actual triple to delete == per_tr[trip_idx]
return del_idx, max_val, trip_idx, del_trip
def generate_nghbrs(test_set, train_set):
'''
For every triple in test set, return the index of
neighbouring triple in training set,
i.e. indices in training set are returned
'''
n_dict = {}
for t, triple in enumerate(test_set):
sub = triple[0]
obj = triple[2]
mask = (np.isin(train_set[:,0], [sub, obj]) | np.isin(train_set[:,2], [sub, obj]))
#nghbrs_dict[t] = pro_train[mask]
mask_idx = np.where(mask)[0]
n_dict[t] = mask_idx
return n_dict
def get_deletions(train_data, test_data, neighbours, model, attack_batch_size, args):
logger.info('------ Generating edits per target triple ------')
start_time = time.time()
logger.info('Start time: {0}'.format(str(start_time)))
triples_to_delete = []
for test_idx, test_trip in enumerate(test_data):
test_nghbrs = neighbours[test_idx]
nghbr_trip = train_data[test_nghbrs]
_, max_val_s, _, del_trip_s = get_nghbr_s_deletion(train_data, test_trip,
model, args.epsilon, args.lambda1)
_, max_val_o, _, del_trip_o = get_nghbr_o_deletion(train_data, test_trip,
model, args.epsilon, args.lambda1)
if max_val_s > max_val_o:
del_trip = del_trip_s
else:
del_trip = del_trip_o
triple_to_delete = del_trip
triples_to_delete.append(triple_to_delete)
if test_idx%100 == 0 or test_idx == test_data.shape[0]-1:
logger.info('Processed test triple {0}'.format(str(test_idx)))
logger.info('Time taken: {0}'.format(str(time.time() - start_time)))
logger.info('Time taken to generate edits: {0}'.format(str(time.time() - start_time)))
return triples_to_delete
if __name__ == '__main__':
parser = utils.get_argument_parser()
parser.add_argument('--target-split', type=str, default='0_100_1', help='Ranks to use for target set. Values are 0 for ranks==1; 1 for ranks <=10; 2 for ranks>10 and ranks<=100. Default: 1')
parser.add_argument('--budget', type=int, default=1, help='Budget for each target triple for each corruption side')
parser.add_argument('--rand-run', type=int, default=1, help='A number assigned to the random run of experiment')
parser.add_argument('--attack-batch-size', type=int, default=-1, help='Batch size for processing neighbours of target')
parser.add_argument('--epsilon', type=int, default=1, help='Value of epsilon multiplier in IJCAI delete attack')
parser.add_argument('--lambda1', type=int, default=1, help='Value of lambda1 in IJCAI delete attack')
# In[5]:
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# args.target_split = '0_100_1' # which target split to use
# #Values are 1 for ranks <=10; 2 for ranks>10 and ranks<=100.
# args.budget = 1 #indicates the num of adversarial edits for each target triple for each corruption side
# args.rand_run = 1 # a number assigned to the random run of the experiment
args.seed = args.seed + (args.rand_run - 1) # default seed is 17
# args.model = 'distmult'
# args.data = 'WN18RR'
# args.reproduce_results = True
if args.reproduce_results:
args = utils.set_hyperparams(args)
# In[7]:
# Fixing random seeds for reproducibility -https://pytorch.org/docs/stable/notes/randomness.html
torch.manual_seed(args.seed)
cudnn.deterministic = True
cudnn.benchmark = False
np.random.seed(args.seed)
rng = np.random.default_rng(seed=args.seed)
args.epochs = -1 #no training here
model_name = '{0}_{1}_{2}_{3}_{4}'.format(args.model, args.embedding_dim, args.input_drop, args.hidden_drop, args.feat_drop)
model_path = 'saved_models/{0}_{1}.model'.format(args.data, model_name)
log_path = 'logs/attack_logs/ijcai_del_{0}_{1}_{2}_{3}_{4}'.format( args.model, args.data,
args.target_split, args.budget, args.rand_run)
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO,
filename = log_path
)
logger = logging.getLogger(__name__)
data_path = 'data/target_{0}_{1}_{2}'.format(args.model, args.data, args.target_split)
n_ent, n_rel, ent_to_id, rel_to_id = utils.generate_dicts(data_path)
##### load data####
data = utils.load_data(data_path)
train_data, valid_data, test_data = data['train'], data['valid'], data['test']
inp_f = open(os.path.join(data_path, 'to_skip_eval.pickle'), 'rb')
to_skip_eval: Dict[str, Dict[Tuple[int, int], List[int]]] = pickle.load(inp_f)
inp_f.close()
to_skip_eval['lhs'] = {(int(k[0]), int(k[1])): v for k,v in to_skip_eval['lhs'].items()}
to_skip_eval['rhs'] = {(int(k[0]), int(k[1])): v for k,v in to_skip_eval['rhs'].items()}
model = utils.load_model(model_path, args, n_ent, n_rel, device)
neighbours = generate_nghbrs(test_data, train_data)
# test set is the target set because we loaded data from target_...
triples_to_delete = get_deletions(train_data, test_data, neighbours,
model, args.attack_batch_size,
args)
df = pd.DataFrame(data=triples_to_delete)
df = df.drop_duplicates()
# print(df.shape)
trips_to_delete = df.values
# print(trips_to_delete.shape)
num_duplicates = len(triples_to_delete) - trips_to_delete.shape[0]
# print(num_duplicates)
per_tr_1, n_ignored_edits = utils.perturb_data(train_data,
trips_to_delete)
logger.info('Shape of perturbed training set: {0}'.format(per_tr_1.shape))
logger.info('Number of adversarial deletions ignored (because of singleton nodes): {0}'.format(n_ignored_edits))
logger.info('Number of duplicate adversarial deletions : {0}'.format(num_duplicates))
logger.info ('Length of original training set: ' + str(train_data.shape[0]))
logger.info ('Length of new poisoned training set: ' + str(per_tr_1.shape[0]))
save_path = 'data/ijcai_del_{0}_{1}_{2}_{3}_{4}'.format( args.model, args.data,
args.target_split, args.budget, args.rand_run)
try :
os.makedirs(save_path)
except OSError as e:
if e.errno == errno.EEXIST:
logger.info(e)
logger.info('Using the existing folder {0} for processed data'.format(save_path))
else:
raise
new_train = per_tr_1
num_en_or = np.unique(np.concatenate((train_data[:,0], train_data[:,2]))).shape[0]
num_en_pos = np.unique(np.concatenate((new_train[:,0], new_train[:,2]))).shape[0]
with open(os.path.join(save_path, 'train.txt'), 'w') as out:
for item in new_train:
out.write("%s\n" % "\t".join(map(str, item)))
out = open(os.path.join(save_path, 'train.pickle'), 'wb')
pickle.dump(new_train.astype('uint64'), out)
out.close()
with open(os.path.join(save_path, 'entities_dict.json'), 'w') as f:
f.write(json.dumps(ent_to_id) + '\n')
with open(os.path.join(save_path, 'relations_dict.json'), 'w') | |
= value
def get_uncertain_state_set(self): return self.uncertain_state_set
def set_uncertain_state_set(self, uncertain_state_set): self.uncertain_state_set = uncertain_state_set
def add_uncertain_state_set(self, value): self.uncertain_state_set.append(value)
def insert_uncertain_state_set(self, index, value): self.uncertain_state_set[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='StandardStates', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='StandardStates')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='StandardStates'):
super(StandardStates, self).exportAttributes(outfile, level, already_processed, namespace_, name_='StandardStates')
def exportChildren(self, outfile, level, namespace_='', name_='StandardStates', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
for state_ in self.state:
state_.export(outfile, level, namespace_, name_='state')
for polymorphic_state_set_ in self.polymorphic_state_set:
polymorphic_state_set_.export(outfile, level, namespace_, name_='polymorphic_state_set')
for uncertain_state_set_ in self.uncertain_state_set:
uncertain_state_set_.export(outfile, level, namespace_, name_='uncertain_state_set')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.meta or
self.state or
self.polymorphic_state_set or
self.uncertain_state_set or
self.set or
super(StandardStates, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='StandardStates'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(StandardStates, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(StandardStates, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('state=[\n')
level += 1
for state_ in self.state:
showIndent(outfile, level)
outfile.write('model_.StandardState(\n')
state_.exportLiteral(outfile, level, name_='StandardState')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('polymorphic_state_set=[\n')
level += 1
for polymorphic_state_set_ in self.polymorphic_state_set:
showIndent(outfile, level)
outfile.write('model_.StandardPolymorphicStateSet(\n')
polymorphic_state_set_.exportLiteral(outfile, level, name_='StandardPolymorphicStateSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('uncertain_state_set=[\n')
level += 1
for uncertain_state_set_ in self.uncertain_state_set:
showIndent(outfile, level)
outfile.write('model_.StandardUncertainStateSet(\n')
uncertain_state_set_.exportLiteral(outfile, level, name_='StandardUncertainStateSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.StateSet(\n')
set_.exportLiteral(outfile, level, name_='StateSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(StandardStates, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
elif nodeName_ == 'state':
obj_ = StandardState.factory()
obj_.build(child_)
self.state.append(obj_)
elif nodeName_ == 'polymorphic_state_set':
obj_ = StandardPolymorphicStateSet.factory()
obj_.build(child_)
self.polymorphic_state_set.append(obj_)
elif nodeName_ == 'uncertain_state_set':
obj_ = StandardUncertainStateSet.factory()
obj_.build(child_)
self.uncertain_state_set.append(obj_)
elif nodeName_ == 'set':
obj_ = StateSet.factory()
obj_.build(child_)
self.set.append(obj_)
# end class StandardStates
class StandardState(AbstractState):
"""This is a concrete implementation of the state element, which
requires a symbol element, in this case restricted to integers,
and optional mapping elements to refer to other states."""
subclass = None
superclass = AbstractState
def __init__(self, about=None, meta=None, label=None, id=None, symbol=None, valueOf_=None):
super(StandardState, self).__init__(about, meta, label, id, symbol, )
self.symbol = _cast(None, symbol)
if meta is None:
self.meta = []
else:
self.meta = meta
def factory(*args_, **kwargs_):
if StandardState.subclass:
return StandardState.subclass(*args_, **kwargs_)
else:
return StandardState(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_symbol(self): return self.symbol
def set_symbol(self, symbol): self.symbol = symbol
def validate_StandardToken(self, value):
# Validate type StandardToken, a restriction on xs:integer.
pass
def export(self, outfile, level, namespace_='', name_='StandardState', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='StandardState')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='StandardState'):
super(StandardState, self).exportAttributes(outfile, level, already_processed, namespace_, name_='StandardState')
if self.symbol is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
outfile.write(' symbol=%s' % (quote_attrib(self.symbol), ))
def exportChildren(self, outfile, level, namespace_='', name_='StandardState', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
def hasContent_(self):
if (
self.meta or
super(StandardState, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='StandardState'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.symbol is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
showIndent(outfile, level)
outfile.write('symbol = %d,\n' % (self.symbol,))
super(StandardState, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(StandardState, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('symbol', node)
if value is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
try:
self.symbol = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
self.validate_StandardToken(self.symbol) # validate type StandardToken
super(StandardState, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
# end class StandardState
class RNAChar(AbstractChar):
"""A concrete implementation of the AbstractChar element, i.e. a single
column in an alignment."""
subclass = None
superclass = AbstractChar
def __init__(self, about=None, meta=None, label=None, id=None, tokens=None, states=None, codon=None, valueOf_=None):
super(RNAChar, self).__init__(about, meta, label, id, tokens, states, codon, )
self.tokens = _cast(None, tokens)
self.states = _cast(None, states)
self.codon = _cast(None, codon)
self.id = _cast(None, id)
if meta is None:
self.meta = []
else:
self.meta = meta
def factory(*args_, **kwargs_):
if RNAChar.subclass:
return RNAChar.subclass(*args_, **kwargs_)
else:
return RNAChar(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_tokens(self): return self.tokens
def set_tokens(self, tokens): self.tokens = tokens
def validate_MSTokenLength(self, value):
# Validate type MSTokenLength, a restriction on xs:positiveInteger.
pass
def get_states(self): return self.states
def set_states(self, states): self.states = states
def get_codon(self): return self.codon
def set_codon(self, codon): self.codon = codon
def validate_CodonPosition(self, value):
# Validate type CodonPosition, a restriction on xs:nonNegativeInteger.
pass
def get_id(self): return self.id
def set_id(self, id): self.id = id
def export(self, outfile, level, namespace_='', name_='RNAChar', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RNAChar')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RNAChar'):
super(RNAChar, self).exportAttributes(outfile, level, already_processed, namespace_, name_='RNAChar')
if self.tokens is not None and 'tokens' not in already_processed:
already_processed.append('tokens')
outfile.write(' tokens=%s' % (quote_attrib(self.tokens), ))
if self.states is not None and 'states' not in already_processed:
already_processed.append('states')
outfile.write(' states=%s' % (self.gds_format_string(quote_attrib(self.states).encode(ExternalEncoding), input_name='states'), ))
if self.codon is not None and 'codon' not in already_processed:
already_processed.append('codon')
outfile.write(' codon=%s' % (quote_attrib(self.codon), ))
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
def exportChildren(self, outfile, level, namespace_='', name_='RNAChar', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
def hasContent_(self):
if (
self.meta or
super(RNAChar, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='RNAChar'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.tokens is not None and 'tokens' not in already_processed:
already_processed.append('tokens')
showIndent(outfile, level)
outfile.write('tokens = %d,\n' % (self.tokens,))
if self.states is not None and 'states' not in already_processed:
already_processed.append('states')
showIndent(outfile, level)
outfile.write('states = "%s",\n' % (self.states,))
if self.codon is not None and 'codon' not in already_processed:
already_processed.append('codon')
showIndent(outfile, level)
outfile.write('codon = %d,\n' % | |
import asyncio
import copy
import logging
import talib as ta
from .exceptions import NotImplementedException
from sklearn.cluster import KMeans, DBSCAN, MeanShift
from sklearn.metrics import silhouette_score
import pandas as pd
import numpy as np
from itertools import groupby
from operator import itemgetter
from .utils import time_scale_to_milisecond
class Analyzer():
"""
The duty of Analyzer class is to provide analysis objects.
It is configurable via the config file
Use case does not require multiple instance
"""
# This initiation may not be needed
# TODO: Normally lambda functions would be quite useful to have one-liner functions,
# however they are not "awaitable". Thus each one-liner lambda expression should be an awaitable method
def __init__(self, _config):
self.logger = logging.getLogger('app.{}'.format(__name__))
self.config = _config
self.current_time_df={}
return
async def sample_analyzer(self, data_dict):
analysis_dict=dict()
for pair,data_obj in data_dict.items():
analysis_obj = dict()
for time_scale, time_df in data_obj.items():
self.current_time_df = copy.deepcopy(time_df)
# Generate coroutines
indicator_coroutines = []
header = '_ind_'
indicator_method_names = list(map(lambda orig_string: header + orig_string, self.config['analysis']['indicators'].keys()))
for ind in indicator_method_names:
if hasattr(self, ind): indicator_coroutines.append(getattr(self, ind)())
else: raise RuntimeError(f'Unknown indicator: "{ind}"')
analysis_output = list(await asyncio.gather(*indicator_coroutines))
# NOTE: Since coroutines are not reuseable, they require to be created in each cycle
# NOTE: pd.Series needs to be casted to list
stats = dict()
for key, value in zip(self.config['analysis']['indicators'].keys(), analysis_output):
stats[key] = value
# Assign "stats" to each "time_scale"
analysis_obj[time_scale] = stats
analysis_dict[pair] = analysis_obj
return analysis_dict
async def visual_analysis(self, data_dict):
analysis_dict=dict()
for pair,data_obj in data_dict.items():
analysis_obj = dict()
for time_scale, time_df in data_obj.items():
self.current_time_df = copy.deepcopy(time_df)
# Generate coroutines
indicator_coroutines = []
header = '_ind_'
indicator_method_names = list(map(lambda orig_string: header + orig_string, self.config['visualization']['indicators'].keys()))
for ind in indicator_method_names:
if hasattr(self, ind): indicator_coroutines.append(getattr(self, ind)())
else: raise RuntimeError(f'Unknown indicator: "{ind}"')
header = '_pat_'
pattern_method_names = list(map(lambda orig_string: header + orig_string, self.config['visualization']['patterns'])) # Patterns do not take arg
for pat in pattern_method_names:
if hasattr(self, pat): indicator_coroutines.append(getattr(self, pat)())
else: raise RuntimeError(f'Unknown pattern: "{pat}"')
analysis_output = list(await asyncio.gather(*indicator_coroutines))
# NOTE: Since coroutines are not reuseable, they require to be created in each cycle
# NOTE: pd.Series needs to be casted to list
stats = dict()
for key, value in zip(list(self.config['visualization']['indicators'].keys()) + self.config['visualization']['patterns'], analysis_output):
stats[key] = value
# Assign "stats" to each "time_scale"
analysis_obj[time_scale] = stats
analysis_dict[pair] = analysis_obj
return analysis_dict
# Analyzers
async def _ind_market_classifier(self):
# TODO: Market status receives the name of some other indicators and runs
# a secondary analysis.
# Maybe the secondary analysis such as S/R levels should be put under
# another category
analyzer = "_ind_" + self.config['visualization']['indicators']['market_classifier']
if hasattr(self, analyzer):
analysis_output = await getattr(self, analyzer)()
classification = {}
if analyzer == '_ind_aroonosc':
uptrend_filter = np.where(np.array(analysis_output) > 0)[0]
downtrend_filter = np.where(np.array(analysis_output) < 0)[0]
classification = {'downtrend':downtrend_filter, 'uptrend':uptrend_filter}
elif analyzer == '_ind_fractal_aroon':
uptrend_filter = np.where(np.nan_to_num(analysis_output['aroonup']) > 80)[0]
downtrend_filter = np.where(np.nan_to_num(analysis_output['aroondown']) > 80)[0]
classification = {'downtrend':downtrend_filter, 'uptrend':uptrend_filter}
ts_index = self.current_time_df.index
result = {}
# TODO: Make validation counter generic
validation_counter = 5
for class_name, filter_idx in classification.items():
class_item_list = []
for k, g in groupby(enumerate(filter_idx), lambda ix: ix[0] - ix[1]):
seq_idx = list(map(itemgetter(1), g))
# NOTE: If the sq. length is 1 than it will not be displayed. Apply "seq_idx[-1]+1" if you need to
#if len(seq_idx) >= validation_counter:
# class_item = {'start':ts_index[seq_idx[0]], 'end':ts_index[seq_idx[-1]], 'validation_point':ts_index[seq_idx[0]+validation_counter -1]}
# class_item_list.append(class_item)
class_item = {'start':ts_index[seq_idx[0]], 'end':ts_index[seq_idx[-1]]}
class_item_list.append(class_item)
result[class_name] = class_item_list
'''
Sample: result
{
downtrend:[
{
start_ts:
end_ts:
validation_point:
},
...
]
}
'''
# if last closed candle is in uptrend, then then 'end' parameter wikk be equal to its timestamp
# so the day_diff will be 1
result['is_daydiff']=int((self.current_time_df.index[-1] - result['uptrend'][-1]['end'])/time_scale_to_milisecond('1d'))
result['is_lastidx']=int(analysis_output['aroonup'][-1] > 80)
return result
async def _ind_fractal_aroon(self):
fractal_line = await self._ind_fractal_line_3()
aroondown, aroonup = ta.AROON(pd.Series(fractal_line['bearish']), pd.Series(fractal_line['bullish']), timeperiod=25)
return {'aroonup':list(aroonup), 'aroondown': list(aroondown)}
async def _ind_fractal_aroonosc(self):
fractal_line = await self._ind_fractal_line_3()
return list(ta.AROONOSC(pd.Series(fractal_line['bearish']), pd.Series(fractal_line['bullish']), timeperiod=25))
async def _ind_fractal_line_3(self):
bearish_frac = list(pd.Series(await self._pat_bearish_fractal_3()).bfill())
bullish_frac = list(pd.Series(await self._pat_bullish_fractal_3()).bfill())
return {'bearish':bearish_frac, 'bullish':bullish_frac}
async def _ind_support_dbscan(self):
bullish_frac = np.nan_to_num(await self._pat_bullish_fractal_3()).reshape(-1,1)
#bullish_frac = bullish_frac[~np.isnan(bullish_frac)].reshape(-1,1)
# Perform unidimentional clustering
eps = float(max(bullish_frac)* 0.005) # NOTE: Band of %0.5 unless optimized
dbscan = DBSCAN(eps=eps, min_samples=3) # It requires at least 3 point to call a cluster a region of s/r
dbscan_bull = dbscan.fit_predict(bullish_frac)
cls_tokens = np.unique(dbscan_bull)
sup_levels = []
for token in cls_tokens:
if token != -1:
indices = np.where(dbscan_bull == token)
sup_level = {}
sup_level['validation_point'] = indices[0][-1]
sup_level['centroids'] = bullish_frac[indices].reshape(1,-1)[0].tolist()
sup_levels.append(sup_level)
return sup_levels
async def _ind_resistance_dbscan(self):
# NOTE: In order to yield validation points, nan values are assigned to 0.
# They are visualized but not in the appeared window
bearish_frac = np.nan_to_num(await self._pat_bearish_fractal_3()).reshape(-1,1)
#bearish_frac = bearish_frac[~np.isnan(bearish_frac)].reshape(-1,1)
# Perform unidimentional clustering
# TODO: NEXT: Find an algorithmic way of calculating epsilon
# Because the epsilon values needs to be changed based on timeframe
eps = float(max(bearish_frac)* 0.005) # NOTE: Band of %0.5 unless optimized
dbscan = DBSCAN(eps=eps, min_samples=3) # It requires at least 3 point to call a cluster a region of s/r
dbscan_bear = dbscan.fit_predict(bearish_frac)
cls_tokens = np.unique(dbscan_bear)
res_levels = []
for token in cls_tokens:
if token != -1:
indices = np.where(dbscan_bear == token)
res_level = {}
res_level['validation_point'] = indices[0][-1]
res_level['centroids'] = bearish_frac[indices].reshape(1,-1)[0].tolist()
res_levels.append(res_level)
return res_levels
async def _ind_support_mshift(self):
bullish_frac = np.array(await self._pat_bullish_fractal_3())
bullish_frac = bullish_frac[~np.isnan(bullish_frac)].reshape(-1,1)
# Perform unidimentional clustering
ms = MeanShift()
ms_bull = ms.fit_predict(bullish_frac)
#aa = ms.cluster_centers_
cls_tokens = np.unique(ms_bull)
bullish_centroids = []
for token in cls_tokens:
if token != -1:
bullish_centroids.append(bullish_frac[np.where(ms_bull == token)].reshape(1,-1)[0].tolist())
return bullish_centroids
async def _ind_resistance_mshift(self):
bearish_frac = np.array(await self._pat_bearish_fractal_3())
bearish_frac = bearish_frac[~np.isnan(bearish_frac)].reshape(-1,1)
# Perform unidimentional clustering
ms = MeanShift()
ms_bear = ms.fit_predict(bearish_frac)
#aa = ms.cluster_centers_
cls_tokens = np.unique(ms_bear)
bearish_centroids = []
for token in cls_tokens:
if token != -1:
bearish_centroids.append(bearish_frac[np.where(ms_bear == token)].reshape(1,-1)[0].tolist())
return bearish_centroids
async def _ind_kmeans(self):
# Obtain the (time,high) and (time,low) pairs and merge
lows = np.array(self.current_time_df['low']).reshape(-1,1)
highs = np.array(self.current_time_df['high']).reshape(-1,1)
# Perform unidimentional clustering
km = KMeans(
n_clusters=5, init='random',
n_init=13, max_iter=300,
tol=1e-04, random_state=0
)
# TODO: Filter out the anomalies
# Low Cluster
y_km = km.fit_predict(lows)
low_clusters = km.cluster_centers_[:,0]
cls_tokens = np.unique(y_km)
cls_centroids = []
for token in cls_tokens:
cls_centroids.append(lows[np.where(y_km == token)].reshape(1,-1)[0].tolist())
# High Cluster
y_km = km.fit_predict(highs)
high_clusters = km.cluster_centers_[:,0]
high_cls_tokens = np.unique(y_km)
high_cls_centroids = []
for token in high_cls_tokens:
high_cls_centroids.append(highs[np.where(y_km == token)].reshape(1,-1)[0].tolist())
return {'high_cls':high_cls_centroids, 'low_cls':cls_centroids}
def is_resistance(serie):
if len(serie) == 3 and serie.iloc[0] < serie.iloc[1] > serie.iloc[2]:
return serie.iloc[1]
elif len(serie) == 5 and serie.iloc[0] < serie.iloc[1] < serie.iloc[2] > serie.iloc[3] > serie.iloc[4]:
return serie.iloc[2]
return np.NaN
def is_support(serie):
if len(serie) == 3 and serie.iloc[0] > serie.iloc[1] < serie.iloc[2]:
return serie.iloc[1]
elif len(serie) == 5 and serie.iloc[0] > serie.iloc[1] > serie.iloc[2] < serie.iloc[3] < serie.iloc[4]:
return serie.iloc[2]
return np.NaN
async def _pat_bearish_fractal_5(self): return list(np.roll(self.current_time_df['high'].rolling(5).apply(Analyzer.is_resistance), -1))
async def _pat_bullish_fractal_5(self): return list(np.roll(self.current_time_df['low'].rolling(5).apply(Analyzer.is_support), -1))
async def _pat_bearish_fractal_3(self): return list(np.roll(self.current_time_df['high'].rolling(3).apply(Analyzer.is_resistance), -1))
async def _pat_bullish_fractal_3(self): return list(np.roll(self.current_time_df['low'].rolling(3).apply(Analyzer.is_support), -1))
# Custom Indicators
async def _ind_low(self): return list(self.current_time_df['low'])
async def _ind_high(self): return list(self.current_time_df['high'])
async def _ind_llow(self): return self.current_time_df['low'].min()
async def _ind_hhigh(self): return self.current_time_df['high'].max()
async def _ind_close(self): return float(self.current_time_df['close'].tail(1))
# TODO: Find a way to standardize the low/high/close
# Overlap Studies
async def _ind_bband(self):
upperband, middleband, lowerband = ta.BBANDS(self.current_time_df['close'],
timeperiod=self.config['analysis']['indicators']['bband']['timeperiod'],
nbdevup=self.config['analysis']['indicators']['bband']['nbdevup'],
nbdevdn=self.config['analysis']['indicators']['bband']['nbdevdn'],
matype=0) # No config option for matype yet!
return {'upper':list(upperband), 'middle': list(middleband), 'lower':list(lowerband)}
async def _ind_dema(self): raise NotImplementedException('indicator')
async def _ind_ema(self): raise NotImplementedException('indicator')
async def _ind_ht_trendline(self): raise NotImplementedException('indicator')
async def _ind_kama(self): raise NotImplementedException('indicator')
async def _ind_ma(self):
ma = {}
for param in self.config['analysis']['indicators']['ma']:
ma[param] = list(ta.MA(self.current_time_df['close'], timeperiod=param, matype=0))
return ma
async def _ind_mama(self): raise NotImplementedException('indicator')
async def _ind_mavp(self): raise NotImplementedException('indicator')
async def _ind_midpoint(self): raise NotImplementedException('indicator')
async def _ind_midprice(self): raise NotImplementedException('indicator')
async def _ind_sar(self): raise NotImplementedException('indicator')
async def _ind_sarext(self): raise NotImplementedException('indicator')
async def _ind_sma(self): raise NotImplementedException('indicator')
async def _ind_t3(self): raise NotImplementedException('indicator')
async def _ind_tema(self): raise NotImplementedException('indicator')
async def _ind_trima(self): raise NotImplementedException('indicator')
async def _ind_wma(self): raise NotImplementedException('indicator')
# Momentum Indicators
async def _ind_adx(self): return list(ta.ADX(self.current_time_df['high'], self.current_time_df['low'], self.current_time_df['close'], timeperiod=14))
async def _ind_adxr(self): return list(ta.ADXR(self.current_time_df['high'], self.current_time_df['low'], self.current_time_df['close'], timeperiod=14))
async def _ind_apo(self): return list(ta.APO(self.current_time_df['high'], fastperiod=12, slowperiod=26, matype=0))
async def _ind_aroon(self):
aroondown, aroonup = ta.AROON(self.current_time_df['high'], self.current_time_df['low'], timeperiod=14)
return {'aroonup':list(aroonup), 'aroondown': list(aroondown)}
async def | |
# /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME>
@email: <EMAIL>
"""
from utils import *
# Fig.~S1a&c
def Analysis_halfyear(interactions):
Data = read('Results/Graph_Halfyear_TR_{}.txt'.format(interactions))
avg2, avg3, avg4, avg5, avg6 = [], [], [], [], []
for data in Data:
if data.count('-1') == 0:
if data[2] == '2':
avg2.append(list(map(int, map(float, data[6:]))))
if data[2] == '3':
avg3.append(list(map(int, map(float, data[6:]))))
if data[2] == '4':
avg4.append(list(map(int, map(float, data[6:]))))
if data[2] == '5':
avg5.append(list(map(int, map(float, data[6:]))))
if int(data[2]) >= 6 and int(data[2]) < 100:
avg6.append(list(map(int, map(float, data[6:]))))
fig = plt.figure(figsize=(7, 7))
ax = plt.axes()
X = [1, 2, 3, 4]
H = [[] for i in range(4)]
Y = []
Err = []
for i in range(4):
for j in range(len(avg2)):
H[i].append(mt.log(avg2[j][i] + 1))
Y.append(np.mean(H[i]))
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o", color="#34495e", ls='-', label='$=2$',
ecolor='#34495e', marker='^', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
X = [1, 2, 3, 4]
H = [[] for i in range(4)]
Y = []
Err = []
for i in range(4):
for j in range(len(avg3)):
H[i].append(mt.log(avg3[j][i] + 1))
Y.append(np.mean(H[i]))
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o", color="#2980b9", ls='-', label='$=3$',
ecolor='#2980b9', marker='s', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
X = [1, 2, 3, 4]
H = [[] for i in range(4)]
Y = []
Err = []
for i in range(4):
for j in range(len(avg4)):
H[i].append(mt.log(avg4[j][i] + 1))
Y.append(np.mean(H[i]))
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o", color="#7f8c8d", ls='-', label='$=4$',
ecolor='#7f8c8d', marker='p', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
X = [1, 2, 3, 4]
H = [[] for i in range(4)]
Y = []
Err = []
for i in range(4):
for j in range(len(avg5)):
H[i].append(mt.log(avg5[j][i] + 1))
Y.append(np.mean(H[i]))
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o", color="#c0392b", ls='-', label='$=5$',
ecolor='#c0392b', marker='H', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
X = [1, 2, 3, 4]
H = [[] for i in range(4)]
Y = []
Err = []
for i in range(4):
for j in range(len(avg6)):
H[i].append(mt.log(avg6[j][i] + 1))
Y.append(np.mean(H[i]))
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o", color="#8e44ad", ls='-', label='$\geq6$',
ecolor='#8e44ad', marker='8', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.xticks([1, 2, 3, 4], ['1', '2', '3', '4'], fontsize=20)
plt.yticks(fontsize=20)
plt.xlabel("Phase", fontsize=25)
plt.ylabel("$log$(Interaction {})".format(interactions), fontsize=25)
legend = plt.legend(frameon=False, loc='upper right', title='Tie Range in Phase 1', fontsize=20)
legend.get_title().set_fontsize(fontsize=20)
if interactions == 'Frequency':
plt.ylim([0, 2.5])
plt.subplots_adjust(left=0.145, bottom=0.11, right=0.98, top=0.97)
# plt.savefig('Plots/SI/RobustnessCheck_HY_F.pdf', format='pdf')
if interactions == 'Duration':
plt.ylim([0, 7])
plt.subplots_adjust(left=0.11, bottom=0.11, right=0.98, top=0.97)
# plt.savefig('Plots/SI/RobustnessCheck_HY_D.pdf', format='pdf')
plt.show()
# Fig.~S1b&d
def Analysis_month(interactions):
Data = read('opendata/Results/Graph_Month_TR_{}.txt'.format(interactions))
avg2, avg3, avg4, avg5, avg6 = [], [], [], [], []
for data in Data:
if data.count('-1') == 0:
if data[2] == '2':
avg2.append(list(map(int, map(float, data[26:]))))
if data[2] == '3':
avg3.append(list(map(int, map(float, data[26:]))))
if data[2] == '4':
avg4.append(list(map(int, map(float, data[26:]))))
if data[2] == '5':
avg5.append(list(map(int, map(float, data[26:]))))
if int(data[2]) >= 6 and int(data[2]) < 100:
avg6.append(list(map(int, map(float, data[26:]))))
fig = plt.figure(figsize=(14, 7))
ax = plt.axes()
X = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]
H = [[] for i in range(24)]
Y = []
Err = []
for i in range(24):
for j in range(len(avg2)):
H[i].append(mt.log(avg2[j][i] + 1))
Y.append(np.mean(H[i]))
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o", color="#34495e", ls='-', label='$=2$',
ecolor='#34495e', marker='^', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
X = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]
H = [[] for i in range(24)]
Y = []
Err = []
for i in range(24):
for j in range(len(avg3)):
H[i].append(mt.log(avg3[j][i] + 1))
Y.append(np.mean(H[i]))
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o", color="#2980b9", ls='-', label='$=3$',
ecolor='#2980b9', marker='s', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
X = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]
H = [[] for i in range(24)]
Y = []
Err = []
for i in range(24):
for j in range(len(avg4)):
H[i].append(mt.log(avg4[j][i] + 1))
Y.append(np.mean(H[i]))
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o", color="#7f8c8d", ls='-', label='$=4$',
ecolor='#7f8c8d', marker='p', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
X = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]
H = [[] for i in range(24)]
Y = []
Err = []
for i in range(24):
for j in range(len(avg5)):
H[i].append(mt.log(avg5[j][i] + 1))
Y.append(np.mean(H[i]))
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o", color="#c0392b", ls='-', label='$=5$',
ecolor='#c0392b', marker='H', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
X = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]
H = [[] for i in range(24)]
Y = []
Err = []
for i in range(24):
for j in range(len(avg6)):
H[i].append(mt.log(avg6[j][i] + 1))
Y.append(np.mean(H[i]))
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o", color="#8e44ad", ls='-', label='$\geq6$',
ecolor='#8e44ad', marker='8', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.xticks([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24],
['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19',
'20', '21', '22', '23', '24'], fontsize=20)
plt.yticks(fontsize=20)
plt.xlabel("Phase", fontsize=25)
plt.ylabel("$log$(Interaction {})".format(interactions), fontsize=25)
legend = plt.legend(frameon=False, loc='upper right', title='Tie Range in Phase 1', fontsize=20)
legend.get_title().set_fontsize(fontsize=20)
if interactions == 'Frequency':
plt.ylim([0, 2])
plt.subplots_adjust(left=0.09, bottom=0.11, right=0.98, top=0.97)
# plt.savefig('Plots/SI/RobustnessCheck_M_F.pdf', format='pdf')
if interactions == 'Duration':
plt.ylim([0, 6])
plt.subplots_adjust(left=0.06, bottom=0.11, right=0.98, top=0.97)
# plt.savefig('Plots/SI/RobustnessCheck_M_D.pdf', format='pdf')
plt.show()
# Fig.~S2(a)
def Persistent_ProbabilityHY():
fig = plt.figure(figsize=(7, 7))
ax = plt.axes()
Data = read('Results/Graph_Halfyear_TR_Frequency.txt')
avg2, avg3, avg4, avg5, avg6 = [], [], [], [], []
GH = nx.read_gexf('Graph/Halfyear/Frequency/G_Frequency_Halfyear_1.gexf')
Large = max(nx.connected_components(GH), key=len)
for data in Data:
if data.count('-1') == 0:
if data[0] in Large and data[1] in Large:
if data[2] == '2':
avg2.append(list(map(int, map(float, data[6:]))))
if data[2] == '3':
avg3.append(list(map(int, map(float, data[6:]))))
if data[2] == '4':
avg4.append(list(map(int, map(float, data[6:]))))
if data[2] == '5':
avg5.append(list(map(int, map(float, data[6:]))))
if int(data[2]) >= 6 and int(data[2]) <= 100:
avg6.append(list(map(int, map(float, data[6:]))))
H = []
Count = [0 for _ in range(3)]
for data in avg2:
for _ in range(3):
if data[_ + 1] == 0:
Count[_] += 1
for _ in range(3):
H.append([1 - Count[_] / len(avg2), '$=2$', int(_ + 2)])
Count = [0 for _ in range(3)]
for data in avg3:
for _ in range(3):
if data[_ + 1] == 0:
Count[_] += 1
for _ in range(3):
H.append([1 - Count[_] / len(avg3), '$=3$', int(_ + 2)])
Count = [0 for _ in range(3)]
for data in avg4:
for _ in range(3):
if data[_ + 1] == 0:
Count[_] += 1
for _ in range(3):
H.append([1 - Count[_] / len(avg4), '$=4$', int(_ + 2)])
Count = [0 for _ in range(3)]
for data in avg5:
for _ in range(3):
if data[_ + 1] == 0:
Count[_] += 1
for _ in range(3):
H.append([1 - Count[_] / len(avg5), '$=5$', int(_ + 2)])
Count = [0 for _ in range(3)]
for data in avg6:
for _ in range(3):
if data[_ + 1] == 0:
Count[_] += 1
for _ in range(3):
H.append([1 - Count[_] / len(avg6), '$\geq6$', int(_ + 2)])
H.append([1, '$=2$', 1])
H.append([1, '$=3$', 1])
H.append([1, '$=4$', 1])
H.append([1, '$=5$', 1])
H.append([1, '$\geq6$', 1])
TR = ['$=2$', '$=3$', '$=4$', '$=5$', '$\geq6$']
result = [0 for _ in range(4)]
for tr in TR:
for h in H:
if h[1] == tr:
result[h[2] - 1] = h[0]
print(tr, result)
X = [1, 2, 3, 4]
Y = [1, 0.5198435241847288, 0.42654463428219314, 0.3555069087329271]
plt.plot(X, Y, color='#34495e', label='$=2$', marker='^', markersize=10, linewidth=2)
Y = [1, 0.24877016233119698, 0.1885505247553232, 0.14666597830175454]
plt.plot(X, Y, color='#2980b9', label='$=3$', marker='s', markersize=10, linewidth=2)
Y = [1, 0.28445115376320895, 0.2249730429156782, 0.18266120336424407]
plt.plot(X, Y, color='#7f8c8d', label='$=4$', marker='p', markersize=10, linewidth=2)
Y = [1, 0.46226415094339623, 0.4231805929919138, 0.39622641509433965]
plt.plot(X, Y, color='#c0392b', label='$=5$', marker='H', markersize=10, linewidth=2)
Y = [1, 0.5, 0.5333333333333333, 0.3666666666666667]
plt.plot(X, Y, color='#8e44ad', label='$\geq6$', marker='8', markersize=10, linewidth=2)
plt.xlabel('Phase', fontsize=25)
plt.ylabel('Persistence Probability', fontsize=25)
plt.xticks([1, 2, 3, 4], ['1', '2', '3', '4'], fontsize=20)
plt.yticks(fontsize=20)
plt.ylim([0,1.05])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
legend = plt.legend(frameon=False, loc='upper right', title='Tie Range in Phase 1', fontsize=20)
legend.get_title().set_fontsize(fontsize=20)
plt.subplots_adjust(left=0.145, bottom=0.11, right=0.98, top=0.97)
| |
from apps.lockto.utils import get_lockto_coll
from apps.fields.fields import unflatten_name, field_item_types
from apps.record.errors import ValidationError
from apps.shortcuts import getparam, getintparam, getparamlist, url
from apps.store.models import Collection
from apps.store.search import Collection as SearchCollection
import math
from restpose.query import And, Or
from restpose.errors import RestPoseError
import restkit
from utils import highlight
class SearchParams(object):
"""The parameters describing a search to be performed.
"""
def __init__(self, stash=None, params=None):
"""Read the form parameters describing a search.
Updates stash as appropriate, and returns an object with parameters as
attributes.
"""
self.stash = stash
# First, get the search action.
# No stash, since we don't want to repeat it.
self.action = getparam('act', 'entry', params=params)
if self.action in ('search', 'select'):
# Except, we do stash the search and select actions.
stash['act'] = [self.action]
self.search_in_progress = True
else:
self.search_in_progress = False
# Get the types to be searched.
ttypes = getparam('ttypes', 'record', stash, params=params)
if self.action == 'select':
ttypes = 'record'
self.ttypes = list(sorted(set(ttypes.split(','))))
stash['ttypes'] = self.ttypes
# Get the collections to search.
self.colls = getparamlist('collid', [], stash, params=params)
if len(self.colls) == 0:
self.colls.append(u'*')
stash['collid'] = self.colls
# Handle modifications to the list of collections.
if self.action == 'add_collection':
self.colls.append(u'*')
stash['collid'] = self.colls
self.action = 'entry'
if self.action.startswith('del_collection_'):
num = int(self.action[len('del_collection_'):])
del self.colls[num]
stash['collid'] = self.colls
self.action = 'entry'
# Get the basic query. This is for queries entered by users in a
# single input box.
self.q = getparam('q', None, stash, params=params)
# Get the numbers of the components of the filter query.
# First get the numbers of the components of the query.
self.qsnums = filter(None, getparam('qs', '', stash, params=params).split(','))
if len(self.qsnums) < 1:
self.qsnums = [1]
else:
self.qsnums = list(map(lambda x: int(x), self.qsnums))
# Handle modifications to the list of components of the filter query.
if self.action == 'add_field':
self.qsnums.append(max(self.qsnums) + 1)
self.action = 'entry'
if self.action.startswith('del_field_'):
num = int(self.action[len('del_field_'):])
del self.qsnums[num - 1]
self.action = 'entry'
# Stash the numbers of the components of the filter query.
stash['qs'] = [','.join(map(lambda x: str(x), self.qsnums))]
# Read the components of the query.
self.fields = []
for num in self.qsnums:
prefix = 'q%d' % num
param = getparam(prefix + 'f', None, stash, params=params)
if param is None or param == u'':
continue
values = getparamlist(prefix + 'm', None, stash, params=params)
if param == u'*':
values = filter(lambda x: not((x is None) or (unicode(x).strip() in (u'', u'*'))), values)
self.fields.append((num, u'*', u'*', values))
else:
field, type = param.rsplit('_', 1)
field = unflatten_name(field)
self.fields.append((num, type, field, values))
# Get the order to return results in.
order = getparam('order', 'score', stash, params=params)
if order == 'score':
self.order = 'score'
self.order_asc = False
self.order_field = None
else:
if order[0] in '+-':
self.order = order
self.order_asc = (self.order[0] == '+')
self.order_field = self.order[1:]
else:
self.order = '+' + order
self.order_asc = True
self.order_field = self.order
# Get the offsets within the search results.
self.hpp = getintparam('hpp', 100, stash, params=params)
self.startrank = getintparam('startrank', None, stash, params=params)
self.rank = getintparam('rank', None, stash, params=params)
# Ensure that the offsets are integers, aligned to pages.
if self.startrank is None:
if self.rank is None:
self.startrank = self.rank = 0
else:
self.rank = int(self.rank)
self.startrank = int(self.rank / self.hpp) * self.hpp
else:
self.startrank = int(int(self.startrank) / self.hpp) * self.hpp
if self.rank is None:
self.rank = self.startrank
else:
self.rank = int(self.rank)
class Search(object):
"""A search to be performed.
"""
def __init__(self, params, url_fn=url):
"""Initialise the set of results.
"""
#: The parameters used for the search.
self.params = params
#: Function used for making urls. (Different one used for exports.)
self.url_fn = url_fn
#: Flag, True if query is empty, set when building the query.
self._empty = True
#: Words used (positively) in the query, for highlighting.
self._words = []
#: The query being performed.
self._query = None
#: Cached description of the whole query being performed.
self._query_desc = None
#: The base query being performed, unrestricted by document type.
self._base_query = None
#: Cached query for the collections being searched.
self._coll_query = None
#: Cached description of the collections being searched.
self._coll_desc = None
#: Cached query for the fields being searched.
self._field_query = None
#: Cached description of the fields being searched.
self._field_desc = None
#: Cached query restricted by collection lock.
self._lockedto_query = None
#: The types of objects matching the search (and their counts).
self._matching_types = None
#: The collections related to the search results.
self._relevant_colls = None
#: A list of result objects for the current page.
self._resultlist = None
#: Flag - true if previous and next result objects have been built.
self._built_prevnext_result = False
#: Cache of fields in the collections being searched.
self._relevant_fields = None
#: Highlighter used for building summaries.
self._highlighter = highlight.Highlighter()
self._restrict_coll = None
self.error = None
# The auto action runs a query if there is one, or goes to the search
# entry page if there isn't.
if self.params.action == 'auto':
if self.empty:
self.params.action = 'entry'
else:
self.params.action = 'search'
self.params.stash['act'] = [self.params.action]
def restrict_to_collection(self, collid):
"""Restrict the results of a search to those in a collection.
"""
self._restrict_coll = collid
def add_to_context(self, context):
context['search'] = self
if self.error is not None:
context['error'] = str(self.error)
def validate(self):
"""Check that the search parameters are valid."""
try:
list(self.base_query[:0])
return True
except ValidationError, e:
self.error = e
return False
except RestPoseError, e:
self.error = e
return False
except restkit.ResourceError, e:
self.error = e
return False
except restkit.RequestError, e:
self.error = e
return False
@property
def hpp(self):
"""Hits per page"""
return self.params.hpp
@property
def startrank(self):
"""Rank at the start of the current page."""
return self.params.startrank
@property
def rank(self):
"""Rank of the current search result."""
return self.params.rank
@property
def order(self):
"""Order of search results."""
return self.params.order
@property
def resultlist(self):
"""Return a list of results for the query.
Returns the results in the current page only.
"""
if self._resultlist is None:
self._build_resultlist()
return self._resultlist
@property
def resultids(self):
"""Return a list of ids of items matching the query.
Returns the results in the current page only.
"""
if self._resultlist is None:
self._build_resultlist()
return self._resultids
@property
def needs_pagination(self):
"""Return True if the search has sufficient results to require
pagination.
"""
if self.params.startrank != 0:
return True
if self._resultlist is None:
self._build_resultlist()
return self._has_next_page
@property
def page_endrank(self):
"""The rank of the last result on the page.
"""
return self.params.startrank + len(self.resultids) - 1
@property
def match_count(self):
"""The number of matching results.
"""
if self._resultlist is None:
self._build_resultlist()
return self._match_count
@property
def prev_page_url(self):
"""Return URL of the previous page, or '' if on first page.
"""
if self.params.startrank == 0:
return ''
return self.url_fn("search",
newstartrank=self.params.startrank - self.params.hpp,
**self.params.stash)
@property
def next_page_url(self):
"""Return URL of the next page, or '' if on last page.
"""
if self._resultlist is None:
self._build_resultlist()
print "Search.next_page_url(): %r" % ((
self.params.startrank,
self.params.hpp,
self.params.stash
),)
if self._has_next_page:
return self.url_fn("search",
newstartrank=self.params.startrank + self.params.hpp,
**self.params.stash)
return ''
@property
def pagination_links(self):
"""A list of pagination links.
Each item is a 3-tuple of (url, number, current):
- url is the url of the link target
- number is the page number of the link
- current is a flag, True iff the page is the current page.
"""
result = []
pages = int(self.match_count / self.hpp) + 1
slop = int(math.ceil(self.params.hpp * 1.25)) - self.params.hpp
if self.match_count % self.params.hpp <= slop:
pages -= 1
for num in range(pages):
result.append((self.url_fn("search",
newstartrank = num * self.hpp,
**self.params.stash),
num + 1,
num * self.hpp == self.startrank))
return result
def _build_resultobj(self, item):
summary = []
title = item.data.get('title', [''])[0]
for k, v in item.data.iteritems():
if k in ('mtime', 'type', ):
continue
for bit in v:
bit = unicode(bit)
if bit != title:
summary.append(bit.strip())
summary = u' '.join(summary)
item.summary = self._highlighter.makeSample(summary, self.words,
maxlen=300,
hl=[u'<b>', u'</b>'])
return item
def _build_resultlist(self):
max_hpp = int(math.ceil(self.params.hpp * 1.25))
search = self.query[self.params.startrank:
self.params.startrank + max_hpp] \
.check_at_least(-1)
self._match_count = search.matches_estimated
if search.has_more:
# If there are still more results after the slop, | |
<gh_stars>1-10
from __future__ import division, absolute_import
from struct import unpack as _unpack, pack as _pack
import os.path
from sys import byteorder as _BYTEORDER
import warnings
import numpy as np
from .spacegroups import GetSpaceGroup
from ._extensions import extend_to_p1
class Volume(object):
def __init__(self, array, voxelspacing=1.0, origin=(0, 0, 0),
angles=(90, 90, 90), offset=(0, 0, 0), spacegroup=None, cell_shape=None):
self.array = array
if isinstance(voxelspacing, float):
voxelspacing = tuple([voxelspacing] * 3)
self.voxelspacing = voxelspacing
self.origin = origin
self.angles = angles
self.offset = offset
self.lattice_parameters = [x * vs
for x, vs in zip(self.array.shape[::-1], self.voxelspacing)]
self.a, self.b, self.c = self.lattice_parameters
self.alpha, self.beta, self.gamma = self.angles
if spacegroup is not None:
self.spacegroup = GetSpaceGroup(spacegroup)
else:
self.spacegroup = None
self.cell_shape = cell_shape
cos_alpha = np.cos(np.deg2rad(self.alpha))
cos_beta = np.cos(np.deg2rad(self.beta))
cos_gamma = np.cos(np.deg2rad(self.gamma))
omega = np.sqrt(
1 + 2 * cos_alpha * cos_beta * cos_gamma -
cos_alpha * cos_alpha - cos_beta * cos_beta -
cos_gamma * cos_gamma)
self.voxel_volume = np.product(self.voxelspacing) * omega
self.volume = np.product(self.lattice_parameters) * omega
@classmethod
def fromfile(cls, fid, fmt=None):
p = parse_volume(fid)
return cls(p.density, voxelspacing=p.voxelspacing,
origin=p.origin, angles=p.angles, offset=p.offset,
spacegroup=p.spacegroup, cell_shape=p.cell_shape)
@classmethod
def zeros(cls, shape, voxelspacing=1.0, origin=(0, 0, 0),
angles=(90, 90, 90), offset=(0, 0, 0), spacegroup=None, cell_shape=None):
return cls(np.zeros(shape, dtype=np.float64), voxelspacing,
origin, angles, offset, spacegroup, cell_shape)
@classmethod
def zeros_like(cls, volume):
return cls(np.zeros_like(volume.array), volume.voxelspacing,
volume.origin, volume.angles, volume.offset,
volume.spacegroup, volume.cell_shape)
@property
def shape(self):
return self.array.shape
def duplicate(self):
return Volume(self.array.copy(), voxelspacing=self.voxelspacing,
origin=self.origin, angles=self.angles, offset=self.offset)
def fill_unit_cell(self):
if self.cell_shape is None:
raise ValueError("cell_shape attribute is None.")
out = Volume.zeros(self.cell_shape, voxelspacing=self.voxelspacing,
origin=self.origin, angles=self.angles, offset=(0,0,0),
spacegroup=self.spacegroup, cell_shape=self.cell_shape)
offset = np.asarray(self.offset, np.int32)
for symop in self.spacegroup.symop_list:
trans = np.hstack((symop.R, symop.t.reshape(3, -1)))
trans[:, -1] *= out.shape[::-1]
extend_to_p1(self.array, offset, trans, out.array)
return out
def set_spacegroup(self, spacegroup):
self.spacegroup = GetSpaceGroup(spacegroup)
def tofile(self, fid, fmt=None):
if fmt is None:
fmt = os.path.splitext(fid)[-1][1:]
if fmt in ('ccp4', 'map', 'mrc'):
to_mrc(fid, self)
elif fmt in ('xplor', 'cns'):
to_xplor(fid, self)
else:
raise ValueError("Format is not supported.")
# Volume parsers
def parse_volume(fid, fmt=None):
try:
fname = fid.name
except AttributeError:
fname = fid
if fmt is None:
fmt = os.path.splitext(fname)[-1][1:]
if fmt in ('ccp4', 'map'):
p = CCP4Parser(fname)
elif fmt == 'mrc':
p = MRCParser(fname)
else:
raise ValueError('Extension of file is not supported.')
return p
class CCP4Parser(object):
HEADER_SIZE = 1024
HEADER_TYPE = ('i' * 10 + 'f' * 6 + 'i' * 3 + 'f' * 3 + 'i' * 3 +
'f' * 27 + 'c' * 8 + 'f' * 1 + 'i' * 1 + 'c' * 800)
HEADER_FIELDS = (
'nc nr ns mode ncstart nrstart nsstart nx ny nz xlength ylength '
'zlength alpha beta gamma mapc mapr maps amin amax amean ispg '
'nsymbt lskflg skwmat skwtrn extra xstart ystart zstart map '
'machst rms nlabel label'
).split()
HEADER_CHUNKS = [1] * 25 + [9, 3, 12] + [1] * 3 + [4, 4, 1, 1, 800]
def __init__(self, fid):
if isinstance(fid, str):
fhandle = open(fid)
elif isinstance(fid, file):
fhandle = fid
else:
raise ValueError("Input should either be a file or filename.")
self.fhandle = fhandle
self.fname = fhandle.name
# first determine the endiannes of the file
self._get_endiannes()
# get the header
self._get_header()
self.params = tuple(self.header[key] for key in ('xlength', 'ylength', 'zlength'))
self.angles = tuple(self.header[key] for key in ('alpha', 'beta', 'gamma'))
self.shape = tuple(self.header[key] for key in ('nx', 'ny', 'nz'))
self.voxelspacing = tuple(length / n
for length, n in zip(self.params, self.shape))
self.spacegroup = int(self.header['ispg'])
self.cell_shape = [self.header[key] for key in 'nz ny nx'.split()]
self._get_offset()
self._get_origin()
# Get the symbol table and ultimately the density
self._get_symbt()
self._get_density()
def _get_endiannes(self):
self.fhandle.seek(212)
m_stamp = hex(ord(self.fhandle.read(1)))
if m_stamp == '0x44':
endian = '<'
elif m_stamp == '0x11':
endian = '>'
else:
raise ValueError('Endiannes is not properly set in file. Check the file format.')
self._endian = endian
self.fhandle.seek(0)
def _get_header(self):
header = _unpack(self._endian + self.HEADER_TYPE,
self.fhandle.read(self.HEADER_SIZE))
self.header = {}
index = 0
for field, nchunks in zip(self.HEADER_FIELDS, self.HEADER_CHUNKS):
end = index + nchunks
if nchunks > 1:
self.header[field] = header[index: end]
else:
self.header[field] = header[index]
index = end
self.header['label'] = ''.join(self.header['label'])
def _get_offset(self):
self.offset = [0] * 3
self.offset[self.header['mapc'] - 1] = self.header['ncstart']
self.offset[self.header['mapr'] - 1] = self.header['nrstart']
self.offset[self.header['maps'] - 1] = self.header['nsstart']
def _get_origin(self):
self.origin = (0, 0, 0)
def _get_symbt(self):
self.symbt = self.fhandle.read(self.header['nsymbt'])
def _get_density(self):
# Determine the dtype of the file based on the mode
mode = self.header['mode']
if mode == 0:
dtype = 'i1'
elif mode == 1:
dtype = 'i2'
elif mode == 2:
dtype = 'f4'
# Read the density
storage_shape = tuple(self.header[key] for key in ('ns', 'nr', 'nc'))
self.density = np.fromfile(self.fhandle,
dtype=self._endian + dtype).reshape(storage_shape)
# Reorder axis so that nx is fastest changing.
maps, mapr, mapc = [self.header[key] for key in ('maps', 'mapr', 'mapc')]
if maps == 3 and mapr == 2 and mapc == 1:
pass
elif maps == 3 and mapr == 1 and mapc == 2:
self.density = np.swapaxes(self.density, 1, 2)
elif maps == 2 and mapr == 1 and mapc == 3:
self.density = np.swapaxes(self.density, 1, 2)
self.density = np.swapaxes(self.density, 1, 0)
elif maps == 1 and mapr == 2 and mapc == 3:
self.density = np.swapaxes(self.density, 0, 2)
else:
raise ValueError("Density storage order ({:} {:} {:}) not supported.".format(maps, mapr, mapc))
self.density = np.ascontiguousarray(self.density, dtype=np.float64)
class MRCParser(CCP4Parser):
def _get_origin(self):
origin_fields = 'xstart ystart zstart'.split()
origin = [self.header[field] for field in origin_fields]
return origin
def to_mrc(fid, volume, labels=[], fmt=None):
if fmt is None:
fmt = os.path.splitext(fid)[-1][1:]
if fmt not in ('ccp4', 'mrc', 'map'):
raise ValueError('Format is not recognized. Use ccp4, mrc, or map.')
dtype = volume.array.dtype.name
if dtype == 'int8':
mode = 0
elif dtype in ('int16', 'int32'):
mode = 1
elif dtype in ('float32', 'float64'):
mode = 2
else:
raise TypeError("Data type ({:})is not supported.".format(dtype))
if fmt in ('ccp4', 'map'):
nxstart, nystart, nzstart = volume.offset
else:
nxstart, nystart, nzstart = [0, 0, 0]
voxelspacing = volume.voxelspacing
nz, ny, nx = volume.shape
xl, yl, zl = volume.lattice_parameters
alpha, beta, gamma = volume.angles
mapc, mapr, maps = [1, 2, 3]
ispg = 1
nsymbt = 0
lskflg = 0
skwmat = [0.0]*9
skwtrn = [0.0]*3
fut_use = [0.0]*12
if fmt == 'mrc':
origin = volume.origin
else:
origin = [0, 0, 0]
str_map = list('MAP ')
if _BYTEORDER == 'little':
machst = list('\x44\x41\x00\x00')
elif _BYTEORDER == 'big':
machst = list('\x44\x41\x00\x00')
else:
raise ValueError("Byteorder {:} is not recognized".format(byteorder))
labels = [' '] * 800
nlabels = 0
min_density = volume.array.min()
max_density = volume.array.max()
mean_density = volume.array.mean()
std_density = volume.array.std()
with open(fid, 'wb') as out:
out.write(_pack('i', nx))
out.write(_pack('i', ny))
out.write(_pack('i', nz))
out.write(_pack('i', mode))
out.write(_pack('i', nxstart))
out.write(_pack('i', nystart))
out.write(_pack('i', nzstart))
out.write(_pack('i', nx))
out.write(_pack('i', ny))
out.write(_pack('i', nz))
out.write(_pack('f', xl))
out.write(_pack('f', yl))
out.write(_pack('f', zl))
out.write(_pack('f', alpha))
out.write(_pack('f', beta))
out.write(_pack('f', gamma))
out.write(_pack('i', mapc))
out.write(_pack('i', mapr))
out.write(_pack('i', maps))
out.write(_pack('f', min_density))
out.write(_pack('f', max_density))
out.write(_pack('f', mean_density))
out.write(_pack('i', ispg))
out.write(_pack('i', nsymbt))
out.write(_pack('i', lskflg))
for f in skwmat:
out.write(_pack('f', f))
for f in skwtrn:
out.write(_pack('f', f))
for f in fut_use:
out.write(_pack('f', f))
for f in origin:
out.write(_pack('f', f))
for c in str_map:
out.write(_pack('c', c))
for c in machst:
out.write(_pack('c', c))
out.write(_pack('f', std_density))
# max 10 labels
# nlabels = min(len(labels), 10)
# TODO labels not handled correctly
#for label in labels:
# list_label = [c for c in label]
# llabel = len(list_label)
# if llabel < 80:
#
# # max 80 characters
# label = min(len(label), 80)
out.write(_pack('i', nlabels))
for c in labels:
out.write(_pack('c', c))
# write density
modes = [np.int8, np.int16, np.float32]
volume.array.astype(modes[mode]).tofile(out)
class XPLORParser(object):
"""
Class for reading XPLOR volume files created by NIH-XPLOR or CNS.
"""
def __init__(self, fid):
if isinstance(fid, file):
fname = fid.name
elif isinstance(fid, str):
fname = fid
fid = open(fid)
else:
raise TypeError('Input should either be a file or filename')
self.source = fname
self._get_header()
def _get_header(self):
header = {}
with open(self.source) as volume:
# first line is blank
volume.readline()
line = volume.readline()
nlabels = int(line.split()[0])
label = [volume.readline() for n in range(nlabels)]
header['label'] = label
line = volume.readline()
header['nx'] = int(line[0:8])
header['nxstart'] | |
<filename>dbinit/patch.py
from utility import *
def patchMissilesDamage(env):
addEffect(10000, 'characterDamageMissiles', EffectCategory.passive, False, False, env)
addEffectGroup('Character', 'characterDamageMissiles', env)
updateEffect('characterDamageMissiles', env, [OwnerRequiredSkillModifier(Domain.charID, 'Missile Launcher Operation', 'emDamage', Operation.postMul, 'missileDamageMultiplier'),
OwnerRequiredSkillModifier(Domain.charID, 'Missile Launcher Operation', 'explosiveDamage', Operation.postMul, 'missileDamageMultiplier'),
OwnerRequiredSkillModifier(Domain.charID, 'Missile Launcher Operation', 'kineticDamage', Operation.postMul, 'missileDamageMultiplier'),
OwnerRequiredSkillModifier(Domain.charID, 'Missile Launcher Operation', 'thermalDamage', Operation.postMul, 'missileDamageMultiplier')])
def patchTacticalDestroyer(name, env):
addItemEffect('{} Defense Mode'.format(name), 'tacticalMode', env)
addItemEffect('{} Sharpshooter Mode'.format(name), 'tacticalMode', env)
addItemEffect('{} Propulsion Mode'.format(name), 'tacticalMode', env)
addItemAttribute('{} Defense Mode'.format(name), 'canFitShipType1', getTypeID(name, env), env)
addItemAttribute('{} Sharpshooter Mode'.format(name), 'canFitShipType1', getTypeID(name, env), env)
addItemAttribute('{} Propulsion Mode'.format(name), 'canFitShipType1', getTypeID(name, env), env)
addItemAttribute(name, 'tacticalModes', 1, env)
def patchTacticalModes(env):
addEffect(10000, 'tacticalMode', EffectCategory.passive, False, False, env)
addAttribute(10000, 'tacticalModes', 4, 122, True, True, 0, env)
patchTacticalDestroyer('Confessor', env)
patchTacticalDestroyer('Svipul', env)
patchTacticalDestroyer('Jackdaw', env)
patchTacticalDestroyer('Hecate', env)
def patchAncillaryArmorRepairer(env):
addEffect(10001, 'naniteRepairPasteArmorDamageBonus', EffectCategory.passive, False, False, env)
addEffect(10003, 'fueledArmorRepairBonus', EffectCategory.passive, False, False, env)
addAttribute(10002, 'chargedArmorDamageMultiplierPostDiv', 5, 0, True, True, 0, env)
for typeID, type in env['typeDogma'].items():
for attribute in type['dogmaAttributes']:
if attribute['attributeID'] == 1886:
addItemAttribute(env['typeIDs'][typeID]['typeName'], 'chargedArmorDamageMultiplierPostDiv', attribute['value'], env)
break
for effect in type['dogmaEffects']:
if effect['effectID'] == 5275:
addItemEffect(env['typeIDs'][typeID]['typeName'], 'fueledArmorRepairBonus', env)
addItemEffect('Nanite Repair Paste', 'naniteRepairPasteArmorDamageBonus', env)
addItemAttribute('Nanite Repair Paste', 'chargedArmorDamageMultiplierPostDiv', 1, env)
def patchFitting(env):
env['effectNames']['online']['effectCategory'] = EffectCategory.online.name
env['effectNames']['onlineForStructures']['effectCategory'] = EffectCategory.online.name
updateEffect('online', env, [ItemModifier(Domain.shipID, 'cpuLoad', Operation.modAdd, 'cpu'),
ItemModifier(Domain.shipID, 'powerLoad', Operation.modAdd, 'power')])
updateEffect('slotModifier', env, [ItemModifier(Domain.shipID, 'hiSlots', Operation.modAdd, 'hiSlotModifier'),
ItemModifier(Domain.shipID, 'medSlots', Operation.modAdd, 'medSlotModifier'),
ItemModifier(Domain.shipID, 'lowSlots', Operation.modAdd, 'lowSlotModifier')])
updateEffect('hardPointModifierEffect', env, [ItemModifier(Domain.shipID, 'launcherSlotsLeft', Operation.modAdd, 'launcherHardPointModifier'),
ItemModifier(Domain.shipID, 'turretSlotsLeft', Operation.modAdd, 'turretHardPointModifier')])
def patchHardeners(env):
updateEffect('adaptiveArmorHardener', env, [ItemModifier(Domain.shipID, 'armorEmDamageResonance', Operation.postMul, 'armorEmDamageResonance'),
ItemModifier(Domain.shipID, 'armorExplosiveDamageResonance', Operation.postMul, 'armorExplosiveDamageResonance'),
ItemModifier(Domain.shipID, 'armorKineticDamageResonance', Operation.postMul, 'armorKineticDamageResonance'),
ItemModifier(Domain.shipID, 'armorThermalDamageResonance', Operation.postMul, 'armorThermalDamageResonance')])
def patchRepairers(env):
updateEffect('fueledArmorRepairBonus', env, [ItemModifier(Domain.itemID, 'chargedArmorDamageMultiplier', Operation.postDiv, 'chargedArmorDamageMultiplierPostDiv'),
ItemModifier(Domain.itemID, 'armorDamageAmount', Operation.postMul, 'chargedArmorDamageMultiplier')])
updateEffect('naniteRepairPasteArmorDamageBonus', env, [ItemModifier(Domain.otherID, 'chargedArmorDamageMultiplierPostDiv', Operation.postAssignment, 'chargedArmorDamageMultiplierPostDiv')])
updateEffect('armorRepair', env, [ItemModifier(Domain.shipID, 'armorDamage', Operation.subRate, 'armorDamageAmount')])
updateEffect('targetArmorRepair', env, [ItemModifier(Domain.targetID, 'armorDamage', Operation.subRate, 'armorDamageAmount')])
#updateEffect('remoteArmorRepairFalloff', env, [ItemModifier(Domain.targetID, 'armorDamage', Operation.subRate, 'armorDamageAmount')])
updateEffect('fueledArmorRepair', env, [ItemModifier(Domain.shipID, 'armorDamage', Operation.subRate, 'armorDamageAmount')])
updateEffect('shipModuleRemoteArmorRepairer', env, [ItemModifier(Domain.targetID, 'armorDamage', Operation.subRate, 'armorDamageAmount')])
updateEffect('shipModuleAncillaryRemoteArmorRepairer', env, [ItemModifier(Domain.targetID, 'armorDamage', Operation.subRate, 'armorDamageAmount')])
updateEffect('shieldBoosting', env, [ItemModifier(Domain.shipID, 'shieldCharge', Operation.addRate, 'shieldBonus')])
updateEffect('shieldTransfer', env, [ItemModifier(Domain.targetID, 'shieldCharge', Operation.addRate, 'shieldBonus')])
#updateEffect('remoteShieldTransferFalloff', env, [ItemModifier(Domain.targetID, 'shieldCharge', Operation.addRate, 'shieldBonus')])
updateEffect('fueledShieldBoosting', env, [ItemModifier(Domain.shipID, 'shieldCharge', Operation.addRate, 'shieldBonus')])
updateEffect('shipModuleRemoteShieldBooster', env, [ItemModifier(Domain.targetID, 'shieldCharge', Operation.addRate, 'shieldBonus')])
updateEffect('shipModuleAncillaryRemoteShieldBooster', env, [ItemModifier(Domain.targetID, 'shieldCharge', Operation.addRate, 'shieldBonus')])
updateEffect('structureRepair', env, [ItemModifier(Domain.shipID, 'damage', Operation.subRate, 'structureDamageAmount')])
updateEffect('shipModuleRemoteHullRepairer', env, [ItemModifier(Domain.targetID, 'damage', Operation.subRate, 'structureDamageAmount')])
def patchEnergyTransfers(env):
updateEffect('shipModuleRemoteCapacitorTransmitter', env, [ItemModifier(Domain.targetID, 'charge', Operation.addRate, 'powerTransferAmount')])
updateEffect('energyNeutralizerFalloff', env, [ItemModifier(Domain.targetID, 'charge', Operation.subRate, 'powerTransferAmount')])
updateEffect('energyNosferatuFalloff', env, [ItemModifier(Domain.targetID, 'charge', Operation.subRate, 'powerTransferAmount')])
def patchRemoteTrackingComputer(env):
updateEffect('shipModuleRemoteTrackingComputer', env, [LocationRequiredSkillModifier(Domain.targetID, 'Gunnery', 'trackingSpeed', Operation.postPercent, 'trackingSpeedBonus'),
LocationRequiredSkillModifier(Domain.targetID, 'Gunnery', 'maxRange', Operation.postPercent, 'maxRangeBonus'),
LocationRequiredSkillModifier(Domain.targetID, 'Gunnery', 'falloff', Operation.postPercent, 'falloffBonus')])
def patchConfessor(env):
updateEffect('modeVelocityPostDiv', env, [ItemModifier(Domain.shipID, 'maxVelocity', Operation.postDiv, 'modeVelocityPostDiv')])
updateEffect('modeAgilityPostDiv', env, [ItemModifier(Domain.shipID, 'agility', Operation.postDiv, 'modeAgilityPostDiv')])
updateEffect('modeArmorResonancePostDiv', env, [ItemModifier(Domain.shipID, 'armorEmDamageResonance', Operation.postDiv, 'modeEmResistancePostDiv'),
ItemModifier(Domain.shipID, 'armorExplosiveDamageResonance', Operation.postDiv, 'modeExplosiveResistancePostDiv'),
ItemModifier(Domain.shipID, 'armorKineticDamageResonance', Operation.postDiv, 'modeKineticResistancePostDiv'),
ItemModifier(Domain.shipID, 'armorThermalDamageResonance', Operation.postDiv, 'modeThermicResistancePostDiv')])
updateEffect('modeSigRadiusPostDiv', env, [ItemModifier(Domain.shipID, 'signatureRadius', Operation.postDiv, 'modeSignatureRadiusPostDiv')])
updateEffect('shipModeScanStrengthPostDiv', env, [ItemModifier(Domain.shipID, 'scanRadarStrength', Operation.postDiv, 'modeRadarStrengthPostDiv')])
updateEffect('shipModeSETOptimalRangePostDiv', env, [LocationRequiredSkillModifier(Domain.shipID, 'Small Energy Turret', 'maxRange', Operation.postDiv, 'modeMaxRangePostDiv')])
updateEffect('shipModeMaxTargetRangePostDiv', env, [ItemModifier(Domain.shipID, 'maxTargetRange', Operation.postDiv, 'modeMaxTargetRangePostDiv')])
def patchSvipul(env):
updateEffect('modeShieldResonancePostDiv', env, [ItemModifier(Domain.shipID, 'shieldEmDamageResonance', Operation.postDiv, 'modeEmResistancePostDiv'),
ItemModifier(Domain.shipID, 'shieldExplosiveDamageResonance', Operation.postDiv, 'modeExplosiveResistancePostDiv'),
ItemModifier(Domain.shipID, 'shieldKineticDamageResonance', Operation.postDiv, 'modeKineticResistancePostDiv'),
ItemModifier(Domain.shipID, 'shieldThermalDamageResonance', Operation.postDiv, 'modeThermicResistancePostDiv')])
updateEffect('shipModeSPTTrackingPostDiv', env, [LocationRequiredSkillModifier(Domain.shipID, 'Small Projectile Turret', 'trackingSpeed', Operation.postDiv, 'modeTrackingPostDiv')])
updateEffect('modeMWDSigRadiusPostDiv', env, [LocationRequiredSkillModifier(Domain.shipID, 'High Speed Maneuvering', 'signatureRadiusBonus', Operation.postDiv, 'modeMWDSigPenaltyPostDiv')])
def patchJackdaw(env):
updateEffect('shipModeMissileVelocityPostDiv', env, [OwnerRequiredSkillModifier(Domain.charID, 'Missile Launcher Operation', 'maxVelocity', Operation.postDiv, 'modeMaxRangePostDiv')])
def patchHecate(env):
updateEffect('modeHullResonancePostDiv', env, [ItemModifier(Domain.shipID, 'emDamageResonance', Operation.postDiv, 'modeEmResistancePostDiv'),
ItemModifier(Domain.shipID, 'explosiveDamageResonance', Operation.postDiv, 'modeExplosiveResistancePostDiv'),
ItemModifier(Domain.shipID, 'kineticDamageResonance', Operation.postDiv, 'modeKineticResistancePostDiv'),
ItemModifier(Domain.shipID, 'thermalDamageResonance', Operation.postDiv, 'modeThermicResistancePostDiv')])
updateEffect('modeArmorRepDurationPostDiv', env, [LocationRequiredSkillModifier(Domain.shipID, 'Repair Systems', 'duration', Operation.postDiv, 'modeArmorRepDurationPostDiv')])
updateEffect('modeMWDBoostPostDiv', env, [LocationRequiredSkillModifier(Domain.shipID, 'High Speed Maneuvering', 'speedFactor', Operation.postDiv, 'modeMWDVelocityPostDiv')])
updateEffect('modeMWDCapPostDiv', env, [LocationRequiredSkillModifier(Domain.shipID, 'High Speed Maneuvering', 'capacitorNeed', Operation.postDiv, 'modeMWDCapPostDiv')])
updateEffect('shipModeSHTOptimalRangePostDiv', env, [LocationRequiredSkillModifier(Domain.shipID, 'Small Hybrid Turret', 'maxRange', Operation.postDiv, 'modeMaxRangePostDiv')])
def patchSpeedBoost(env):
addAttribute(576, 'speedBoostFactorCalc', 9, 0, True, True, 0.01, env)
addAttribute(578, 'speedBoostFactorCalc2', 9, 0, True, True, 1.0, env)
addEffect(710, 'speedBoostFactorCalculator', EffectCategory.passive, False, False, env)
addEffect(712, 'speedBoostFactorCalculator2', EffectCategory.passive, False, False, env)
addEffect(1171, 'massFactor', EffectCategory.passive, False, False, env)
addEffectGroup('Propulsion Module', 'speedBoostFactorCalculator', env)
addEffectGroup('Propulsion Module', 'speedBoostFactorCalculator2', env)
updateEffect('speedBoostFactorCalculator', env, [ItemModifier(Domain.itemID, 'speedBoostFactorCalc', Operation.postMul, 'speedFactor'),
ItemModifier(Domain.itemID, 'speedBoostFactorCalc', Operation.postMul, 'speedBoostFactor')])
updateEffect('speedBoostFactorCalculator2', env, [ItemModifier(Domain.itemID, 'speedBoostFactorCalc2', Operation.modAdd, 'speedBoostFactorCalc')])
updateEffect('moduleBonusAfterburner', env, [ItemModifier(Domain.shipID, 'maxVelocity', Operation.postMul, 'speedBoostFactorCalc2'),
ItemModifier(Domain.shipID, 'mass', Operation.modAdd, 'massAddition')])
updateEffect('moduleBonusMicrowarpdrive', env, [ItemModifier(Domain.shipID, 'maxVelocity', Operation.postMul, 'speedBoostFactorCalc2'),
ItemModifier(Domain.shipID, 'mass', Operation.modAdd, 'massAddition'),
ItemModifier(Domain.shipID, 'signatureRadius', Operation.postPercent, 'signatureRadiusBonus')])
updateEffect('massFactor', env, [LocationGroupModifier(Domain.itemID, 'Propulsion Module', 'speedBoostFactorCalc', Operation.postDiv, 'mass')])
addEffectCategory('Ship', 'massFactor', env)
def patchMissing(env):
updateEffect('droneDmgBonus', env, [LocationRequiredDomainSkillModifier(Domain.charID, 'damageMultiplier', Operation.postPercent, 'damageMultiplierBonus', Domain.itemID)])
updateEffect('selfRof', env, [LocationRequiredDomainSkillModifier(Domain.shipID, 'speed', Operation.postPercent, 'rofBonus', Domain.itemID)])
updateEffect('launcherFitted', env, [ItemModifier(Domain.shipID, 'launcherSlotsLeft', Operation.modSub, 'slots')])
updateEffect('turretFitted', env, [ItemModifier(Domain.shipID, 'turretSlotsLeft', Operation.modSub, 'slots')])
updateEffect('missileEMDmgBonus', env, [LocationRequiredDomainSkillModifier(Domain.charID, 'emDamage', Operation.postPercent, 'damageMultiplierBonus', Domain.itemID)])
updateEffect('missileExplosiveDmgBonus', env, [LocationRequiredDomainSkillModifier(Domain.charID, 'explosiveDamage', Operation.postPercent, 'damageMultiplierBonus', Domain.itemID)])
updateEffect('missileThermalDmgBonus', env, [LocationRequiredDomainSkillModifier(Domain.charID, 'thermalDamage', Operation.postPercent, 'damageMultiplierBonus', Domain.itemID)])
updateEffect('missileKineticDmgBonus2', env, [LocationRequiredDomainSkillModifier(Domain.charID, 'kineticDamage', Operation.postPercent, 'damageMultiplierBonus', Domain.itemID)])
updateEffect('cloakingTargetingDelayBonusPostPercentCloakingTargetingDelayBonusForShipModulesRequiringCloaking', env, [LocationRequiredDomainSkillModifier(Domain.shipID, 'cloakingTargetingDelay', Operation.postPercent, 'cloakingTargetingDelayBonus', Domain.itemID)])
def patchGangBoost(env):
addEffect(1004, 'gangBoost', EffectCategory.activation, False, False, env)
addEffectGroup('Command Burst', 'gangBoost', env)
addEffectGroup('Titan Phenomena Generator', 'gangBoost', env)
warfareBuffs = dict()
for typeID, type in env['typeDogma'].items():
for attribute in type['dogmaAttributes']:
name = env['dogmaAttributes'][attribute['attributeID']]['name']
if name.startswith('warfareBuff'):
bufID = int(attribute['value'])
if bufID in warfareBuffs:
continue
modifyingAttribute = 'none'
if bufID == 10:
id = 'shieldHarmonizingChargeBuff'
attributes = ["shieldEmDamageResonance", "shieldKineticDamageResonance", "shieldThermalDamageResonance", "shieldExplosiveDamageResonance"]
modifiers = [ItemModifier(Domain.shipID, x, Operation.postPercent, modifyingAttribute) for x in attributes]
elif bufID == 11:
id = 'activeShieldingChargeBuf'
modifiers = list()
for modifiedAttribute in ["capacitorNeed", "duration"]:
for skill in ["Shield Operation", "Shield Emission Systems"]:
modifiers.append(LocationRequiredSkillModifier(Domain.shipID, skill, modifiedAttribute, Operation.postPercent, modifyingAttribute))
elif bufID == 12:
id = 'shieldExtensionChargeBuff'
modifiers = [ItemModifier(Domain.shipID, 'shieldCapacity', Operation.postPercent, modifyingAttribute)]
elif bufID == 13:
id = 'armorEnergizingChargeBuff'
attributes = ["armorEmDamageResonance", "armorKineticDamageResonance", "armorThermalDamageResonance", "armorExplosiveDamageResonance"]
modifiers = [ItemModifier(Domain.shipID, x, Operation.postPercent, modifyingAttribute) for x in attributes]
elif bufID == 14:
id = 'rapidRepairChargeBuff'
modifiers = list()
for modifiedAttribute in ["capacitorNeed", "duration"]:
for skill in ["Repair Systems", "Remote Armor Repair Systems"]:
modifiers.append(LocationRequiredSkillModifier(Domain.shipID, skill, modifiedAttribute, Operation.postPercent, modifyingAttribute))
elif bufID == 15:
id = 'armorReinforcementChargeBuff'
modifiers = [ItemModifier(Domain.shipID, 'armorHP', Operation.postPercent, modifyingAttribute)]
elif bufID == 16:
id = 'sensorOptimizationChargeBuff1'
modifiers = [ItemModifier(Domain.shipID, 'scanResolution', Operation.postPercent, modifyingAttribute)]
elif bufID == 17:
id = 'electronicSuperiorityChargeBuff'
modifiers = list()
for modifiedAttribute in ["maxRange", "falloffEffectiveness"]:
for group in ["ECM", "Sensor Dampener", "Weapon Disruptor", "Target Painter"]:
modifiers.append(LocationGroupModifier(Domain.shipID, group, modifiedAttribute, Operation.postPercent, modifyingAttribute))
for modifiedAttribute in ["scanGravimetricStrengthBonus", "scanLadarStrengthBonus", "scanMagnetometricStrengthBonus", "scanRadarStrengthBonus"]:
modifiers.append(LocationGroupModifier(Domain.shipID, 'ECM', modifiedAttribute, Operation.postPercent, modifyingAttribute))
for modifiedAttribute in ["missileVelocityBonus", "explosionDelayBonus", "aoeVelocityBonus", "falloffBonus", "maxRangeBonus", "aoeCloudSizeBonus", "trackingSpeedBonus"]:
modifiers.append(LocationGroupModifier(Domain.shipID, 'Weapon Disruptor', modifiedAttribute, Operation.postPercent, modifyingAttribute))
for modifiedAttribute in ["maxTargetRangeBonus", "scanResolutionBonus"]:
modifiers.append(LocationGroupModifier(Domain.shipID, 'Sensor Dampener', modifiedAttribute, Operation.postPercent, modifyingAttribute))
modifiers.append(LocationGroupModifier(Domain.shipID, 'Target Painter', 'signatureRadiusBonus', Operation.postPercent, modifyingAttribute))
elif bufID == 18:
id = 'electronicHardeningChargeBuff1'
attributes = ["scanGravimetricStrength", "scanRadarStrength", "scanLadarStrength", "scanMagnetometricStrength"]
modifiers = [ItemModifier(Domain.shipID, x, Operation.postPercent, modifyingAttribute) for x in attributes]
elif bufID == 19:
id = 'electronicHardeningChargeBuff2'
attributes = ["sensorDampenerResistance", "weaponDisruptionResistance"]
modifiers = [ItemModifier(Domain.shipID, x, Operation.postPercent, modifyingAttribute) for x in attributes]
elif bufID == 20:
id = 'evasiveManeuversChargeBuff1'
modifiers = [ItemModifier(Domain.shipID, 'signatureRadius', Operation.postPercent, modifyingAttribute)]
elif bufID == 21:
id = 'interdictionManeuversChargeBuff'
groups = ["Stasis Web", "Warp Scrambler"]
modifiers = [LocationGroupModifier(Domain.shipID, x, 'maxRange', Operation.postPercent, modifyingAttribute) for x in groups]
elif bufID == 22:
id = 'rapidDeploymentChargeBuff'
skills = ["Afterburner", "High Speed Maneuvering"]
modifiers = [LocationRequiredSkillModifier(Domain.shipID, x, 'speedFactor', Operation.postPercent, modifyingAttribute) for x in skills]
elif bufID == 23:
id = 'miningLaserFieldEnhancementChargeBuff'
skills = ["Mining", "Ice Harvesting", "Gas Cloud Harvesting"]
modifiers = [LocationRequiredSkillModifier(Domain.shipID, x, 'maxRange', Operation.postPercent, modifyingAttribute) for x in skills]
modifiers.append(LocationRequiredSkillModifier(Domain.shipID, 'CPU Management', 'surveyScanRange', Operation.postPercent, modifyingAttribute))
elif bufID == 24:
id = 'miningLaserOptimizationChargeBuff'
modifiers = list()
for modifiedAttribute in ["capacitorNeed", "duration"]:
for skill in ["Mining", "Ice Harvesting", "Gas Cloud Harvesting"]:
modifiers.append(LocationRequiredSkillModifier(Domain.shipID, skill, modifiedAttribute, Operation.postPercent, modifyingAttribute))
elif bufID == 25:
id = 'miningEquipmentPreservationChargeBuff1'
modifiers = [LocationRequiredSkillModifier(Domain.shipID, 'Mining', 'crystalVolatilityChance', Operation.postPercent, modifyingAttribute)]
elif bufID == 26:
id = 'sensorOptimizationChargeBuff2'
modifiers = [ItemModifier(Domain.shipID, 'maxTargetRange', Operation.postPercent, modifyingAttribute)]
elif bufID == 39:
id = 'amarrPhenomenaGeneratorBuff1'
modifiers = [ItemModifier(Domain.shipID, 'rechargeRate', Operation.postPercent, modifyingAttribute)]
elif bufID == 40:
id = 'amarrPhenomenaGeneratorBuff2'
attributes = ["armorKineticDamageResonance", "shieldKineticDamageResonance", "kineticDamageResonance"]
modifiers = [ItemModifier(Domain.shipID, x, Operation.postPercent, modifyingAttribute) for x in attributes]
elif bufID == 41:
id = 'amarrPhenomenaGeneratorBuff3'
attributes = ["armorEmDamageResonance", "shieldEmDamageResonance", "emDamageResonance"]
modifiers = [ItemModifier(Domain.shipID, x, Operation.postPercent, modifyingAttribute) for x in attributes]
elif bufID == 42:
id = 'gallentePhenomenaGeneratorBuff1'
modifiers = [ItemModifier(Domain.shipID, 'armorHP', Operation.postPercent, modifyingAttribute)]
elif bufID == 43:
id = 'gallentePhenomenaGeneratorBuff2'
attributes = ["armorExplosiveDamageResonance", "shieldExplosiveDamageResonance", "explosiveDamageResonance"]
modifiers = [ItemModifier(Domain.shipID, x, Operation.postPercent, modifyingAttribute) for x in attributes]
elif bufID == 44:
id = 'gallentePhenomenaGeneratorBuff3'
attributes = ["armorThermalDamageResonance", "shieldThermalDamageResonance", "thermalDamageResonance"]
modifiers = [ItemModifier(Domain.shipID, x, Operation.postPercent, modifyingAttribute) for x in attributes]
elif bufID == 45:
id = 'minmatarPhenomenaGeneratorBuff1'
modifiers = [ItemModifier(Domain.shipID, 'signatureRadius', Operation.postPercent, modifyingAttribute)]
elif bufID == 46:
id = 'minmatarPhenomenaGeneratorBuff2'
attributes = ["armorThermalDamageResonance", "shieldThermalDamageResonance", "thermalDamageResonance"]
| |
y_self_bool['hat'] - y
failures = diff != 0
print('^' * 80)
print(type(failures))
print(failures.describe())
print(failures[:5])
failures_df = Series([False] * len(df), index=df.index)
for idx, val in failures.iteritems():
failures_df[idx] = val
df = df[failures_df]
y_self_df = Series([0.0] * len(df), index=df.index, dtype=float)
y_self_df_bool = Series([0] * len(df), index=df.index, dtype=int)
for idx in y_self_df.index:
y_self_df[idx] = y_self['hat'][idx]
y_self_df_bool[idx] = y_self_bool['hat'][idx]
df['probability'] = y_self_df
df['predicted'] = y_self_df_bool
columns = list(df.columns[-3:]) + list(df.columns[:-3])
df2 = DataFrame()
for col in columns:
df2[col] = df[col]
df2.sort_values('hat', ascending=False, inplace=True)
df2.to_csv('%s.failures.csv' % name, index_label='job_id')
def show_predicted(df, y_test, out_path):
print('show_predicted: df=%s,y_test=%s,out_path="%s"' %
(S(df), S(y_test), out_path))
name = '%s-%d' % (out_path, len(y_test))
print('~' * 80)
df = df.loc[y_test.index, :]
df['hat'] = y_test
columns = ['hat'] + [col for col in df.columns if col != 'hat']
df2 = DataFrame()
for col in columns:
df2[col] = df[col]
df2.to_csv('%s.predicted.csv' % name, index_label='job_id')
def show_predicted_prob(df, y_test, out_path):
print('show_predicted_prob: df=%s,y_test=%s,out_path="%s"' %
(S(df), S(y_test), out_path))
name = '%s-%d' % (out_path, len(y_test))
print('~' * 80)
df = df.loc[y_test.index, :]
df['hat'] = y_test
columns = ['hat'] + [col for col in df.columns if col != 'hat']
df2 = DataFrame()
for col in columns:
df2[col] = df[col]
df2.sort_values('hat', ascending=False, inplace=True)
df2.to_csv('%s.predicted.csv' % name, index_label='job_id')
def build_model001(df):
x_cols = ['salary_min', 'subclasses']
df2 = remove_nulls(df, x_cols + ['hat'])
df_train, df_test = split_train_test(df2)
X, y = getXy(df_train, x_cols)
X_test, _ = getXy(df_test, x_cols)
X_all = pd.concat([X, X_test])
X = getDummy(X_all, X, 'subclasses')
X_test = getDummy(X_all, X_test, 'subclasses')
print('df', df.shape)
print('X_all', X_all.shape)
assert frozenset(df.index) == frozenset(X_all.index)
X_all = pd.concat([X, X_test])
print('X_all', X_all.shape)
assert frozenset(df.index) == frozenset(X_all.index)
# assert False
return X, y, X_test
def build_model002(df):
x_cols = ['salary_min', 'salary_max']
df = remove_nulls(df, x_cols + ['hat'])
df_train, df_test = split_train_test(df)
X, y = getXy(df_train, x_cols)
X_test, _ = getXy(df_test, x_cols)
# X_all = pd.concat([X, X_test])
# X = getDummy(X_all, X, 'subclasses')
# X_test = getDummy(X_all, X_test, 'subclasses')
return X, y, X_test
def build_model003(df):
x_cols = ['salary_max', 'subclasses']
df = remove_nulls(df, x_cols + ['hat'])
df_train, df_test = split_train_test(df)
X, y = getXy(df_train, x_cols)
X_test, _ = getXy(df_test, x_cols)
X_all = pd.concat([X, X_test])
X = getDummy(X_all, X, 'subclasses')
X_test = getDummy(X_all, X_test, 'subclasses')
return X, y, X_test
def add_keywords(df, column, keywords):
data = np.zeros((len(df), len(keywords)), dtype=np.int8)
nan_count = 0
for i, text in enumerate(df[column]):
# assert isinstance(text, str), (column, type(text), text)
if not isinstance(text, str):
nan_count += 1
continue
words = set(RE_SPACE.split(text.lower()))
words = {w.replace("'", '').replace("!", '') .replace("?", '')
for w in words}
for j, kw in enumerate(keywords):
if kw in words:
data[i, j] = 1
print('column=%s,df=%s,nan_count=%d=%.2f' % (column, list(df.shape), nan_count,
nan_count / len(df)))
# assert nan_count == 0
df_out = df[[col for col in df.columns if col != column]]
for j, kw in enumerate(keywords):
df_out.loc[:, '%s_%s' % (column, kw)] = data[:, j]
assert isinstance(df, DataFrame)
return df_out
def build_model004(df):
from keywords import get_keywords
x_cols = ['title', 'abstract']
df_train, df_test = split_train_test(df)
X, y = getXy(df_train, x_cols)
X_test, _ = getXy(df_test, x_cols)
keywords = get_keywords(50)
print('keywords=%s' % keywords)
print('X before=%s:%s' % (list(X.shape), X.columns))
X = add_keywords(X, 'title', keywords['title'])
X = add_keywords(X, 'abstract', keywords['abstract'])
X_test = add_keywords(X_test, 'title', keywords['title'])
X_test = add_keywords(X_test, 'abstract', keywords['abstract'])
print('X after =%s:%s' % (list(X.shape), X.columns))
return X, y, X_test
def build_model005(df):
from keywords import get_keywords
x_cols = ['salary_min', 'salary_max', 'title', 'abstract']
# print('!' * 80)
# no_min = df['salary_min'].isnull()
# no_min_max = df['salary_max'][no_min]
# print('no_min_max')
# print(no_min_max)
# no_max = df['salary_max'].isnull()
# no_max_min = df['salary_min'][no_max]
# print('no_max_min')
# print(no_max_min)
has_minmax = df['salary_min'].notnull() & df['salary_max'].notnull()
df = df[has_minmax]
df_train, df_test = split_train_test(df)
X, y = getXy(df_train, x_cols)
X_test, _ = getXy(df_test, x_cols)
# X.dropna(how='all', inplace=True)
keywords = get_keywords(50)
X = add_keywords(X, 'title', keywords['title'])
X = add_keywords(X, 'abstract', keywords['abstract'])
X_test = add_keywords(X_test, 'title', keywords['title'])
X_test = add_keywords(X_test, 'abstract', keywords['abstract'])
return X, y, X_test
def build_model006(df):
from keywords import get_keywords_pos_neg
x_cols = ['title', 'abstract']
df_train, df_test = split_train_test(df)
X, y = getXy(df_train, x_cols)
X_test, _ = getXy(df_test, x_cols)
keywords = get_keywords_pos_neg(50)
print('keywords=%s' % keywords)
print('X before=%s:%s' % (list(X.shape), X.columns))
X = add_keywords(X, 'title', keywords['title'])
X = add_keywords(X, 'abstract', keywords['abstract'])
X_test = add_keywords(X_test, 'title', keywords['title'])
X_test = add_keywords(X_test, 'abstract', keywords['abstract'])
print('X after =%s:%s' % (list(X.shape), X.columns))
return X, y, X_test
def build_model007(df):
from keywords import get_keywords_pos_neg2
x_cols = ['title', 'abstract']
df_train, df_test = split_train_test(df)
X, y = getXy(df_train, x_cols)
X_test, _ = getXy(df_test, x_cols)
keywords = get_keywords_pos_neg2(50)
print('keywords=%s' % keywords)
print('X before=%s:%s' % (list(X.shape), X.columns))
X = add_keywords(X, 'title', keywords['title'])
X = add_keywords(X, 'abstract', keywords['abstract'])
X_test = add_keywords(X_test, 'title', keywords['title'])
X_test = add_keywords(X_test, 'abstract', keywords['abstract'])
print('X after =%s:%s' % (list(X.shape), X.columns))
return X, y, X_test
def build_model008_zeros(df):
_, df_test = split_train_test(df)
X_test, _ = getXy(df_test, [])
y_test = [0] * len(X_test)
y_test = DataFrame(y_test, columns=['hat'], index=X_test.index)
y_test.to_csv('%s.y_test.csv' % 'model008_zeros', index_label='job_id')
def build_model009(df):
from keywords import get_keywords_pos_neg2, get_keywords_column, kw_raw_job_type
x_cols = ['title', 'abstract', 'raw_job_type']
df_train, df_test = split_train_test(df)
X, y = getXy(df_train, x_cols)
X_test, _ = getXy(df_test, x_cols)
keywords = get_keywords_pos_neg2(50)
print('keywords=%s' % keywords)
words_raw_job_type = get_keywords_column(50, kw_raw_job_type)
print('words_raw_job_type=%s' % words_raw_job_type)
print('X before=%s:%s' % (list(X.shape), X.columns))
X = add_keywords(X, 'title', keywords['title'])
X = add_keywords(X, 'abstract', keywords['abstract'])
X = add_keywords(X, 'raw_job_type', words_raw_job_type)
X_test = add_keywords(X_test, 'title', keywords['title'])
X_test = add_keywords(X_test, 'abstract', keywords['abstract'])
X_test = add_keywords(X_test, 'raw_job_type', words_raw_job_type)
print('X after =%s:%s' % (list(X.shape), X.columns))
return X, y, X_test
STOP_WORDS = {
'-',
'and',
'for',
'/',
'|',
'&',
'<',
'>',
'of',
'in',
'a',
'as',
'i',
'on',
'the',
'this',
'their',
'an',
'to',
'or',
'is'
}
RE_SPACE = re.compile(r'[\s\.,;:\(\)\[\]/\+&\-\|]+')
def show_words_column(df, column, n_top):
print('=' * 80)
print('show_words:', df.shape, column)
hat_counts = {}
for hat in [-1, 0, 1]:
counts = defaultdict(int)
df2 = df[df['hat'] == hat]
for title in df2[column]:
title = title.lower()
words = set(RE_SPACE.split(title))
words = {w.replace("'", '').replace("!", '') .replace("?", '')
for w in words}
for w in words:
if w and (w not in STOP_WORDS) and ('$' not in w):
counts[w] += 1
hat_counts[hat] = counts
if False:
for hat in [-1, 0, 1]:
print('-' * 80)
print('hat=%d' % hat)
counts = hat_counts[hat]
for i, w in enumerate(sorted(counts, key=lambda k: -counts[k])[:90]):
print('%3d: %4d: %s' % (i, counts[w], w))
top0 = sorted(hat_counts[0], key=lambda k: -hat_counts[0][k])
top1 = sorted(hat_counts[1], key=lambda k: -hat_counts[0][k])
N = sum(hat_counts[0].values())
P = sum(hat_counts[1].values())
R = P / N
min_n = 200
min_p = min_n * R
def log_ratio(n, p):
return np.log10((p + 10) / (n / R + 10))
log_ratio_0 = log_ratio(N, P)
key_words = set()
for hat in [0, 1]:
key_words |= set(hat_counts[hat].keys())
contrasts = {w: [0, 0] for w in key_words}
for i, hat in enumerate([0, 1]):
for w, n in hat_counts[hat].items():
contrasts[w][i] += n
contrasts = {w: [n, p] for w, (n, p) in contrasts.items()
if n >= min_n or p >= min_p}
print('before:', len(contrasts))
contrasts = {w: [n, p] for w, (n, p) in contrasts.items()
if np.abs(log_ratio(n, p) - log_ratio_0) > 0.3}
print('after:', len(contrasts))
contrasts = {w: (n, p, log_ratio(n, p),
log_ratio(n, p) - log_ratio_0) for w, (n, p) in contrasts.items()}
def sort_key(w, npr):
n, p, r, r0 = npr
return r0
pretty_list = sorted(contrasts.items(), key=lambda kv: sort_key(*kv))
print('*' * 80)
pprint(pretty_list)
# return ratio_order, ratios, contrasts
def show_words(df, n_top):
filled = df['abstract'].fillna('')
df['abstract'] = filled
i_abstract = list(df.columns).index('abstract')
for row in df.itertuples():
s = row[i_abstract + 1]
assert isinstance(s, str), (type(s), s, row)
show_words_column(df, 'title', n_top)
show_words_column(df, 'abstract', n_top)
def show_words_job(df, n_top):
"""
raw_job_type
Casual
Internship/Work Experience
Full Time
Full-time
"""
filled = df['raw_job_type'].fillna('')
df['raw_job_type'] = filled
i_abstract = list(df.columns).index('raw_job_type')
for row in df.itertuples():
s = row[i_abstract + 1]
assert isinstance(s, str), (type(s), s, row)
show_words_column(df, 'raw_job_type', n_top)
RE_MODEL = re.compile(r'model(\d+)\.')
def model_num(path):
m = RE_MODEL.search(path)
assert m, path
return int(m.group(1))
def combine_models():
model_nums = [1, 2, 3]
# model_nums = [2, 3]
model_paths = ['model%03d.y_test.csv' % i for i in model_nums]
assert all(os.path.exists(path) for path in model_paths)
# y1 = pd.read_csv('model001.y_test.csv').set_index('job_id')
# y2 = pd.read_csv('model002.y_test.csv').set_index('job_id')
# y3 = pd.read_csv('model003.y_test.csv').set_index('job_id')
models = [pd.read_csv(path).set_index('job_id') for path in model_paths]
path = 'all/jobs_all.csv'
df = get_data(path)
df_train, df_test = split_train_test(df)
y_data = np.ones((len(df_test), len(models)), dtype=int) * -1
y = DataFrame(y_data, columns=model_nums, index=df_test.index)
for d in [y] + models:
| |
<filename>processing/processor.py<gh_stars>0
'''
Description: Implements the processors which generate the products the system
is capable of producing.
License: NASA Open Source Agreement 1.3
'''
import os
from os.path import expanduser
import shutil
import glob
import json
import datetime
import copy
import subprocess
from collections import namedtuple
import re
from espa import Metadata
import settings
import utilities
from logging_tools import EspaLogging
import sensor
import initialization
import parameters
import landsat_metadata
import staging
import transfer
import distribution
import product_formatting
from espa_exception import ESPAException
class ProductProcessor(object):
"""Provides the super class for all product request processing
It performs the tasks needed by all processors.
It initializes the logger object and keeps it around for all the
child-classes to use.
It implements initialization of the order and product directory
structures.
It also implements the cleanup of the product directory.
"""
def __init__(self, cfg, parms):
"""Initialization for the object.
"""
self._logger = EspaLogging.get_logger(settings.PROCESSING_LOGGER)
# Some minor enforcement for what parms should be
if isinstance(parms, dict):
self._parms = parms
self._logger.debug('PARMS: {0}'.format(self._parms))
else:
raise ESPAException('Input parameters was of type [{}],'
' where dict is required'.format(type(parms)))
self._cfg = cfg
# Log the distribution method that will be used
self._logger.info('Using distribution method [{}]'.
format(self._cfg.get('espa_distribution_method')))
# Establish the product owner
self._user = self._cfg.get('espa_user')
self._group = self._cfg.get('espa_group')
# Validate the parameters
self.validate_parameters()
# Initialize these, which are set by other methods
self._product_name = None
self._product_dir = None
self._stage_dir = None
self._work_dir = None
self._output_dir = None
# Ship resource report
self._include_resource_report = self._cfg.get('include_resource_report')
def validate_parameters(self):
"""Validates the parameters required for the processor
"""
# Test for presence of required top-level parameters
keys = ['orderid', 'scene', 'product_type', 'options']
for key in keys:
if not parameters.test_for_parameter(self._parms, key):
raise RuntimeError('Missing required input parameter [{}]'
.format(key))
# Set the download URL to None if not provided
if not parameters.test_for_parameter(self._parms, 'download_url'):
self._parms['download_url'] = None
# TODO - Remove this once we have converted
if not parameters.test_for_parameter(self._parms, 'product_id'):
self._logger.warning('[product_id] parameter missing defaulting'
' to [scene]')
self._parms['product_id'] = self._parms['scene']
# Make sure the bridge mode parameter is defined
if not parameters.test_for_parameter(self._parms, 'bridge_mode'):
self._parms['bridge_mode'] = False
# Validate the options
options = self._parms['options']
# Default these so they are not kept, they should only be present and
# turned on for developers
if not parameters.test_for_parameter(options, 'keep_directory'):
options['keep_directory'] = False
if not parameters.test_for_parameter(options,
'keep_intermediate_data'):
options['keep_intermediate_data'] = False
# Verify or set the destination information
if not parameters.test_for_parameter(options, 'destination_username'):
options['destination_username'] = 'localhost'
if not parameters.test_for_parameter(options, 'destination_pw'):
options['destination_pw'] = 'localhost'
def log_order_parameters(self):
"""Log the order parameters in json format
"""
# Override the usernames and passwords for logging
parms = copy.deepcopy(self._parms)
parms['options']['source_username'] = 'XXXXXXX'
parms['options']['destination_username'] = 'XXXXXXX'
parms['options']['source_pw'] = 'XXXXXXX'
parms['options']['destination_pw'] = 'XXXXXXX'
self._logger.info('MAPPER OPTION LINE {}'
.format(json.dumps(parms, sort_keys=True)))
del parms
def snapshot_resources(self):
""" Delivers (to logger) a current resource snapshot in JSON format
"""
# Likely to be turned off duing operations
if not self._include_resource_report:
return
resources = dict(current_workdir_size=utilities.current_disk_usage(self._work_dir),
peak_memory_usage=utilities.peak_memory_usage(),
entity={k: self._parms.get(k) for k in ('scene', 'orderid')})
self._logger.info('*** RESOURCE SNAPSHOT {} ***'
.format(json.dumps(resources, sort_keys=True)))
def initialize_processing_directory(self):
"""Initializes the processing directory
Creates the following directories.
.../output
.../stage
.../work
Note:
order_id and product_id along with the espa_work_dir processing
configuration provide the path to the processing locations.
"""
product_id = self._parms['product_id']
order_id = self._parms['orderid']
# Get the absolute path to the directory, and default to the current one
base_work_dir = self.check_mesos_sandbox(self._cfg.get('espa_work_dir'))
# Create the product directory name
product_dirname = '-'.join([str(order_id), str(product_id)])
# Add the product directory name to the path
self._product_dir = os.path.join(base_work_dir, product_dirname)
# Just incase remove it, and we don't care about errors if it
# doesn't exist (probably only needed for developer runs)
shutil.rmtree(self._product_dir, ignore_errors=True)
# Create each of the sub-directories
self._stage_dir = initialization.create_stage_directory(self._product_dir)
self._logger.info('Created directory [{}]'.format(self._stage_dir))
self._work_dir = initialization.create_work_directory(self._product_dir)
self._logger.info('Created directory [{}]'.format(self._work_dir))
# Will return the espa_distribution_dir if distribution_method is local
self._output_dir = initialization.create_output_directory(self._product_dir)
def remove_product_directory(self):
"""Remove the product directory
"""
options = self._parms['options']
# We don't care about this failing, we just want to attempt to free
# disk space to be nice to the whole system. If this processing
# request failed due to a processing issue. Otherwise, with
# successfull processing, hadoop cleans up after itself.
if self._product_dir is not None and not options['keep_directory']:
shutil.rmtree(self._product_dir, ignore_errors=True)
def get_product_name(self):
"""Build the product name from the product information and current
time
Note:
Not implemented here.
"""
raise NotImplementedError('[{}] Requires implementation in the child'
' class'.format(self.get_product_name
.__name__))
def distribute_product(self):
"""Does both the packaging and distribution of the product using
the distribution module
"""
product_name = self.get_product_name()
# Deliver the product files
product_file = 'ERROR'
cksum_file = 'ERROR'
try:
immutability = utilities.str2bool(self._cfg.get('immutable_distribution'))
(product_file, cksum_file) = \
distribution.distribute_product(immutability=immutability,
product_name=product_name,
source_path=self._work_dir,
packaging_path=self._output_dir,
parms=self._parms,
user=self._user,
group=self._group)
except (Exception, ESPAException):
msg = 'An Exception occurred delivering the product'
self._logger.exception(msg)
raise ESPAException(msg)
self._logger.info('*** Product Delivery Complete ***')
# Let the caller know where we put these on the destination system
return (product_file, cksum_file)
def process_product(self):
"""Perform the processor specific processing to generate the
requested product
Note:
Not implemented here.
Note:
Must return the destination product and cksum file names.
"""
raise NotImplementedError('[{}] Requires implementation in the child'
' class'.format(self.process_product
.__name__))
def process(self):
"""Generates a product through a defined process
This method must cleanup everything it creates by calling the
remove_product_directory() method.
Note:
Must return the destination product and cksum file names.
"""
# Logs the order parameters that can be passed to the mapper for this
# processor
self.log_order_parameters()
# Initialize the processing directory.
self.initialize_processing_directory()
try:
(destination_product_file, destination_cksum_file) = \
self.process_product()
finally:
# Remove the product directory
# Free disk space to be nice to the whole system.
self.remove_product_directory()
return (destination_product_file, destination_cksum_file)
def check_mesos_sandbox(self, path):
"""
Make sure that the base work dir exists. If not, try setting it
to the current user's home directory.
Args:
path (str): The full path to a base working directory
Returns:
str
"""
default = '/home/espa'
path = os.path.abspath(path)
if os.path.exists(path):
self._logger.info('Working directory is set to {}'.format(path))
return path
else:
path = os.path.abspath(default)
self._logger.warning('Mesos sandbox not found, setting base work directory to {}'.format(path))
return path
class CustomizationProcessor(ProductProcessor):
"""Provides the super class implementation for customization processing
Allows for warping the products to the user requested projection.
"""
def __init__(self, cfg, parms):
self._build_products = False
super(CustomizationProcessor, self).__init__(cfg, parms)
def validate_parameters(self):
"""Validates the parameters required for the processor
"""
# Call the base class parameter validation
super(CustomizationProcessor, self).validate_parameters()
product_id = self._parms['product_id']
options = self._parms['options']
self._logger.info('Validating [CustomizationProcessor] parameters')
parameters.validate_reprojection_parameters(options, product_id)
# Update the xml filename to be correct
self._xml_filename = '.'.join([product_id, 'xml'])
def build_reprojection_cmd_line(self, options):
"""Converts the options to command line arguments for reprojection
"""
cmd = ['espa_reprojection.py', '--xml', self._xml_filename]
# The target_projection is used as the sub-command to the executable
if not options['reproject']:
# none is used if no reprojection will be performed
cmd.append('none')
else:
cmd.append(options['target_projection'])
if options['target_projection'] == 'utm':
cmd.extend(['--zone', options['utm_zone']])
cmd.extend(['--north-south', options['utm_north_south']])
elif options['target_projection'] == 'aea':
cmd.extend(['--datum', options['datum']])
cmd.extend(['--central-meridian', options['central_meridian']])
cmd.extend(['--origin-latitude', options['origin_lat']])
cmd.extend(['--std-parallel-1', options['std_parallel_1']])
cmd.extend(['--std-parallel-2', options['std_parallel_2']])
cmd.extend(['--false-easting', options['false_easting']])
cmd.extend(['--false-northing', options['false_northing']])
elif options['target_projection'] == 'ps':
cmd.extend(['--latitude-true-scale', options['latitude_true_scale']])
cmd.extend(['--longitude-pole', options['longitude_pole']])
cmd.extend(['--origin-latitude', options['origin_lat']])
cmd.extend(['--false-easting', options['false_easting']])
cmd.extend(['--false-northing', options['false_northing']])
elif options['target_projection'] == 'sinu':
cmd.extend(['--central-meridian', options['central_meridian']])
cmd.extend(['--false-easting', options['false_easting']])
cmd.extend(['--false-northing', options['false_northing']])
# Nothing needed for lonlat or none
if options['resample_method']:
cmd.extend(['--resample-method', options['resample_method']])
else:
cmd.extend(['--resample-method', 'near'])
if options['resize'] or options['reproject'] or options['image_extents']:
cmd.extend(['--pixel-size', options['pixel_size']])
cmd.extend(['--pixel-size-units', options['pixel_size_units']])
if options['image_extents']:
cmd.extend(['--extent-minx', options['minx']])
cmd.extend(['--extent-maxx', options['maxx']])
cmd.extend(['--extent-miny', options['miny']])
cmd.extend(['--extent-maxy', options['maxy']])
cmd.extend(['--extent-units', options['image_extents_units']])
# Always envi for ESPA reprojection processing
# The provided output format is used later
cmd.extend(['--output-format', 'envi'])
return map(str, cmd)
def customize_products(self):
"""Performs the customization of the products
"""
# Nothing to do if the user did not specify anything to build
if not self._build_products:
return
product_id = self._parms['product_id']
options = self._parms['options']
# Reproject the data for each product, but only if necessary
if (options['reproject'] or
options['resize'] or
options['image_extents'] or
options['projection'] is not None):
# Change to the working directory
current_directory = os.getcwd()
os.chdir(self._work_dir)
try:
cmd = self.build_reprojection_cmd_line(options)
output = ''
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as error:
self._logger.info(error.output)
msg = 'An exception occurred during product customization'
self._logger.exception(msg)
raise ESPAException(msg)
if len(output) > 0:
self._logger.info(output)
finally:
# Change back to the previous directory
os.chdir(current_directory)
class CDRProcessor(CustomizationProcessor):
"""Provides the super class implementation for generating | |
Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListCertificateMapEntriesPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def get_certificate_map_entry(
self,
request: Union[certificate_manager.GetCertificateMapEntryRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> certificate_manager.CertificateMapEntry:
r"""Gets details of a single CertificateMapEntry.
.. code-block:: python
from google.cloud import certificate_manager_v1
def sample_get_certificate_map_entry():
# Create a client
client = certificate_manager_v1.CertificateManagerClient()
# Initialize request argument(s)
request = certificate_manager_v1.GetCertificateMapEntryRequest(
name="name_value",
)
# Make the request
response = client.get_certificate_map_entry(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.certificate_manager_v1.types.GetCertificateMapEntryRequest, dict]):
The request object. Request for the
`GetCertificateMapEntry` method.
name (str):
Required. A name of the certificate map entry to
describe. Must be in the format
``projects/*/locations/*/certificateMaps/*/certificateMapEntries/*``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.certificate_manager_v1.types.CertificateMapEntry:
Defines a certificate map entry.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a certificate_manager.GetCertificateMapEntryRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, certificate_manager.GetCertificateMapEntryRequest):
request = certificate_manager.GetCertificateMapEntryRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_certificate_map_entry
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def create_certificate_map_entry(
self,
request: Union[
certificate_manager.CreateCertificateMapEntryRequest, dict
] = None,
*,
parent: str = None,
certificate_map_entry: certificate_manager.CertificateMapEntry = None,
certificate_map_entry_id: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Creates a new CertificateMapEntry in a given project
and location.
.. code-block:: python
from google.cloud import certificate_manager_v1
def sample_create_certificate_map_entry():
# Create a client
client = certificate_manager_v1.CertificateManagerClient()
# Initialize request argument(s)
certificate_map_entry = certificate_manager_v1.CertificateMapEntry()
certificate_map_entry.hostname = "hostname_value"
request = certificate_manager_v1.CreateCertificateMapEntryRequest(
parent="parent_value",
certificate_map_entry_id="certificate_map_entry_id_value",
certificate_map_entry=certificate_map_entry,
)
# Make the request
operation = client.create_certificate_map_entry(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.certificate_manager_v1.types.CreateCertificateMapEntryRequest, dict]):
The request object. Request for the
`CreateCertificateMapEntry` method.
parent (str):
Required. The parent resource of the certificate map
entry. Must be in the format
``projects/*/locations/*/certificateMaps/*``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
certificate_map_entry (google.cloud.certificate_manager_v1.types.CertificateMapEntry):
Required. A definition of the
certificate map entry to create.
This corresponds to the ``certificate_map_entry`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
certificate_map_entry_id (str):
Required. A user-provided name of the
certificate map entry.
This corresponds to the ``certificate_map_entry_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.certificate_manager_v1.types.CertificateMapEntry`
Defines a certificate map entry.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any(
[parent, certificate_map_entry, certificate_map_entry_id]
)
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a certificate_manager.CreateCertificateMapEntryRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, certificate_manager.CreateCertificateMapEntryRequest
):
request = certificate_manager.CreateCertificateMapEntryRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if certificate_map_entry is not None:
request.certificate_map_entry = certificate_map_entry
if certificate_map_entry_id is not None:
request.certificate_map_entry_id = certificate_map_entry_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.create_certificate_map_entry
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
certificate_manager.CertificateMapEntry,
metadata_type=certificate_manager.OperationMetadata,
)
# Done; return the response.
return response
def update_certificate_map_entry(
self,
request: Union[
certificate_manager.UpdateCertificateMapEntryRequest, dict
] = None,
*,
certificate_map_entry: certificate_manager.CertificateMapEntry = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Updates a CertificateMapEntry.
.. code-block:: python
from google.cloud import certificate_manager_v1
def sample_update_certificate_map_entry():
# Create a client
client = certificate_manager_v1.CertificateManagerClient()
# Initialize request argument(s)
certificate_map_entry = certificate_manager_v1.CertificateMapEntry()
certificate_map_entry.hostname = "hostname_value"
request = certificate_manager_v1.UpdateCertificateMapEntryRequest(
certificate_map_entry=certificate_map_entry,
)
# Make the request
operation = client.update_certificate_map_entry(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.certificate_manager_v1.types.UpdateCertificateMapEntryRequest, dict]):
The request object. Request for the
`UpdateCertificateMapEntry` method.
certificate_map_entry (google.cloud.certificate_manager_v1.types.CertificateMapEntry):
Required. A definition of the
certificate map entry to create map
entry.
This corresponds to the ``certificate_map_entry`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The update mask applies to the resource. For
the ``FieldMask`` definition, see
https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.certificate_manager_v1.types.CertificateMapEntry`
Defines a certificate map entry.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([certificate_map_entry, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a certificate_manager.UpdateCertificateMapEntryRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, certificate_manager.UpdateCertificateMapEntryRequest
):
request = certificate_manager.UpdateCertificateMapEntryRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if certificate_map_entry is not None:
request.certificate_map_entry = certificate_map_entry
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error | |
<gh_stars>0
# MIT License
#
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from numpy import array, zeros, sqrt, power, around, exp, multiply, maximum, minimum
from dsorlib.vehicles.thrusters.deprecated.abstract_thruster_model import AbstractThrusterModel
# noinspection PyPep8Naming
class QuadraticThrusterModel(AbstractThrusterModel):
"""
QuadraticThrusterModel is an implementation for the thruster models of the seabotics
thrusters or any other thruster whose map between the (%RPM) units [-100, 100] (aka the input of the thruster model)
and the input in [kgf] is achieved using quadratic models of the type:
thrust [kgf] = a (%RPM)^2 + b (%RPM)
Since the thruster dynamics class that this class inherits requires the conversion of the input of the motor
from some unit to the S.I standard Newton, a conversion from [kgf] to [N] is further applied in each method
Furthermore, this model assumes that the thruster dynamics are modelled by a first order model
with a delay and a pole
This class inherits the AbstractThrusterModel class. For more information refer to the documentation for AbstractThrusterModel
"""
def __init__(self,
number_of_thrusters: int, # The number of thrusters
B: array, # The thruster allocation matrix
min_thrust: float, # The minimum thrust that can be applied in Newton [N]
max_thrust: float, # The maximum thrust that can be applied in Newton [N]
thruster_map_positive: array,
# The params for the quadratic mapping between %RPM [0, 100] and [kgf] units
thruster_map_negative: array,
# The params for the quadratic mapping between %RPM [-100, 0] and [kgf] units
tau: float, # The delay in the continuous time model of the thrusters
thrusters_poles: array, # The position of the pole (in continuous time), for each thruster
period: float): # The sampling period for this discretized model
"""
The Constructor for the seabotics model of the thrusters
params:
- number_of_thrusters: int - The number of thrusters
- B: array - The thruster allocation matrix with size (6 x num_thrusters)
- min_thrust: float - The minimum thrust that can be applied in Newton [N]
- max_thrust: float - The maximum thrust that can be applied in Newton [N]
- thruster_map_positive: array - Array with 2 params for the quadratic mapping between %RPM [0, 100] and [kgf] units
- thruster_map_negative: array - Array with 2 params for the quadratic mapping between %RPM [-100, 0] and [kgf] units
- tau: float - The delay in the continuous time model of the thrusters
- thrusters_poles: array - Array with the position of the pole (in continuous time), for each thruster with size (num_thrusters)
"""
# Initialized the Super Class elements
super().__init__(number_of_thrusters, B, min_thrust, max_thrust)
# Save the other parameters
self.thruster_map_positive = array(thruster_map_positive)
self.thruster_map_negative = array(thruster_map_negative)
self.tau = float(tau)
self.thrusters_poles = array(thrusters_poles).reshape((self.number_of_thrusters,))
self.period = float(period)
# Auxiliary variables for the dynamics calculations (save the outputs in previous time steps)
# Compute the number of delays that we get from the conversion of the continuous time model to discrete difference equations model
self.num_delays_input = int(round(self.tau / self.period))
# Create a circular buffer to store the previous input of each thruster model for the corresponding delays
# Save input in (k-1, k-2, ..., k-num_delays)
self.circular_buffer = zeros((self.number_of_thrusters, self.num_delays_input))
# Create a variable to save the previous output (in k-1 timestep)
self.y_1 = zeros(self.number_of_thrusters)
def __copy__(self):
"""
Implements a deep copy of the QuadraticThrusterModel
because in this class the variables in the buffers must be different for different AUVs
therefore sharing the same thruster model/object with several AUV at the same time
could introduce bugs
:return: A deep copy of the QuadraticThrusterModel
"""
return QuadraticThrusterModel(number_of_thrusters=self.number_of_thrusters,
B=self.B,
min_thrust=self.min_thrust,
max_thrust=self.max_thrust,
thruster_map_positive=self.thruster_map_positive,
thruster_map_negative=self.thruster_map_negative,
tau=self.tau,
thrusters_poles=self.thrusters_poles,
period=self.period)
def thrust_to_input(self, thrust: array = array([0.0, 0.0, 0.0, 0.0])):
"""
The goal of this method is to convert a given vector of thrusts for each individual motor
expressed in [N] and convert it to an input signal to the actual motors - the input signal
is expressed in (%RPM) -> [-100, 1000]
Note: Our model of the thrusters is defined in the relation [kgm] -> [%RPM]
so in this method we just do an extra conversion [N] -> [kgm] by dividing
the thrust in [N] by 9.8
params:
thrust - a vector of the thrust for each individual motor, expressed in Newton [N]
returns:
a vector with the corresponding input signals to the motors (RPM%)
"""
# Convert the Thrust vector to a numpy array
thrust = array(thrust) # [N]
# Saturate between min_thrust and max_thrust
thrust = maximum(thrust, self.min_thrust)
thrust = minimum(thrust, self.max_thrust)
# Divide the thrust by 9.8 to convert it to [kgm]
# Note that our model was identified between [%RPM] and [kgm units]
thrust = 1 / 9.8 * thrust # [kgf]
# Positive side of the model
a_positive = self.thruster_map_positive[0]
b_positive = self.thruster_map_positive[1]
# Negative side of the model
a_negative = self.thruster_map_negative[0]
b_negative = self.thruster_map_negative[1]
# Create an empty vector of thrusts in [RPM%]
input = zeros(thrust.shape)
# Calculate the values for the inputs that are >= 0 [kgf]
input[thrust >= 0] = (-b_positive + sqrt(power(b_positive, 2) + 4 * a_positive * thrust[thrust >= 0])) / (
2 * a_positive)
# Calculate the values for the inputs that are < 0 [kgf]
input[thrust < 0] = (-b_negative + sqrt(power(b_negative, 2) + 4 * a_negative * thrust[thrust < 0])) / (
2 * a_negative)
return input
def input_to_thrust(self, input: array = array([0.0, 0.0, 0.0, 0.0])):
"""
The goal of this method is to convert a given vector of inputs for each individual motor
(RPM%) [-100, 100] and convert it to thrust in Newton [N]
Note: Our model of the thrusters is defined in the relation [%RPM] -> [kgm]
so in this method we just do an extra conversion [kgm] -> [N] by multiplying
the result in [kgm] by 9.8
params:
input - a vector of the input signals to the motors (RPM%)
returns:
a vector with the corresponding thrust expressed in Newton [N]
"""
# Convert the input to a numpy array
input = array(input)
# Saturate the input between [-100, 100] (RPM%)
input = maximum(input, -100.0)
input = minimum(input, 100.0)
# Positive side of the model
a_positive = self.thruster_map_positive[0]
b_positive = self.thruster_map_positive[1]
# Negative side of the model
a_negative = self.thruster_map_negative[0]
b_negative = self.thruster_map_negative[1]
# Create an empty vector to store the thrusts in [kgm]
output = zeros(input.shape)
# Calculate the outputs for the inputs that are >= than 0%
output[input >= 0] = a_positive * power(input[input >= 0], 2) + b_positive * input[input >= 0] # [kgf]
# Calculate the outputs for the inputs that are < than 0%
output[input < 0] = a_negative * power(input[input < 0], 2) + b_negative * input[input < 0] # [kgf]
# Multiply the thrust by 9.8 to convert it to [N]
# Note that our model was identified between [%RPM] and [kgm units]
output = output * 9.8
return output
def thrusters_dynamic_model(self, input: array = array([0.0, 0.0, 0.0, 0.0])):
"""
The goal of this | |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2013-2014, gamesun
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of gamesun nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY GAMESUN "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL GAMESUN BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import sys, os
import wx
import GUI as ui
import threading
import re
import serial
# import time
from wx.lib.wordwrap import wordwrap
import itertools
import icon32, icon16
import pkg_resources
import zipfile
from cStringIO import StringIO
import webbrowser
import appInfo
import glob
import subprocess
import ConfigParser
reload(sys)
sys.setdefaultencoding('utf-8')
MAINMENU = 0
SUBMENU = 1
MENUITEM = 2
CHECKITEM = 3
SEPARATOR = 4
RADIOITEM = 5
ASCII = 0
HEX_LOWERCASE = 1
HEX_UPPERCASE = 2
THREAD_TIMEOUT = 0.5
SERIAL_WRITE_TIMEOUT = 0.5
SASHPOSITION = 180
if sys.platform == 'win32':
DIRECTORY_SEPARATER = '\\'
elif sys.platform.startswith('linux'):
DIRECTORY_SEPARATER = '/'
SERIALRXCNT = wx.NewEventType() # Create an own event type
EVT_SERIALRXCNT = wx.PyEventBinder(SERIALRXCNT, 0) # bind to serial rxcounter update events
class SerialRxCntEvent(wx.PyCommandEvent):
eventType = SERIALRXCNT
def __init__(self, windowID):
wx.PyCommandEvent.__init__(self, self.eventType, windowID)
def Clone(self):
self.__class__(self.GetId())
SERIALEXCEPT = wx.NewEventType()
EVT_SERIALEXCEPT = wx.PyEventBinder(SERIALEXCEPT, 0)
class SerialExceptEvent(wx.PyCommandEvent):
eventType = SERIALEXCEPT
def __init__(self, windowID, param):
wx.PyCommandEvent.__init__(self, self.eventType ,windowID)
self.param = param
def Clone(self):
self.__class__(self.GetId(), self.param)
regex_matchTTY = re.compile('/tty/(?P<tty>\w+)')
def EnumerateSerialPorts():
if sys.platform == 'win32':
""" Uses the Win32 registry to return an
iterator of serial (COM) ports
existing on this computer.
"""
pathDevi = r'HARDWARE\DEVICEMAP'
import _winreg as winreg
try:
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, pathDevi)
except WindowsError:
# failed in reading registry.
# return COM1 ~ COM16
for i in range(1, 17):
yield "COM" + str(i)
return
pathCOMM = r'HARDWARE\DEVICEMAP\SERIALCOMM'
try:
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, pathCOMM)
except WindowsError:
# when getting none serial port,
# SERIALCOMM is not exist in "HARDWARE\DEVICEMAP\".
# return nothing.
return
for i in itertools.count():
try:
val = winreg.EnumValue(key, i)
yield str(val[1])
except EnvironmentError:
break
elif sys.platform.startswith('linux'):
for t in glob.glob('/sys/class/tty/*/device/driver'):
r = regex_matchTTY.search(t)
if r:
yield '/dev/%s' % r.group('tty')
MENU_ID_LOCAL_ECHO = wx.NewId()
MENU_ID_RX_ASCII = wx.NewId()
MENU_ID_RX_HEX_L = wx.NewId()
MENU_ID_RX_HEX_U = wx.NewId()
MENU_ID_TRNST_HEX_PNL = wx.NewId()
MenuDefs = (
MAINMENU,
('&File', (
(MENUITEM, wx.NewId(), '&Save', 'Save the log to a file' , 'self.OnSave' ),
(SEPARATOR,),
(MENUITEM, wx.NewId(), '&Exit MyTerm', 'Exit MyTerm', 'self.OnExitApp' ),
)),
('&Port', (
(MENUITEM, wx.NewId(), '&Open', 'Open the Port' , 'self.OnOpenPort' ),
(MENUITEM, wx.NewId(), '&Close', 'Close the Port', 'self.OnClosePort' ),
)),
('&Display', (
(MENUITEM, wx.NewId(), '&Show Setting Bar', 'Show Setting Bar', 'self.OnShowSettingBar' ),
(CHECKITEM, wx.NewId(), '&Always on top', 'always on most top', 'self.OnAlwayOnTop' ),
(CHECKITEM, MENU_ID_LOCAL_ECHO, '&Local echo','echo what you typed', 'self.OnLocalEcho' ),
(SUBMENU, '&Rx view as', (
(RADIOITEM, MENU_ID_RX_ASCII, '&Ascii', '', 'self.OnRxAsciiMode' ),
(RADIOITEM, MENU_ID_RX_HEX_L, '&hex(lowercase)', '', 'self.OnRxHexModeLowercase' ),
(RADIOITEM, MENU_ID_RX_HEX_U, '&HEX(UPPERCASE)', '', 'self.OnRxHexModeUppercase' ),
)),
(CHECKITEM, MENU_ID_TRNST_HEX_PNL, 'Show Transmit &Hex', 'Show Transmit Hex Panel', 'self.OnTransmitHexPanel' ),
# (SUBMENU, 'Tx view as', (
# (RADIOITEM, wx.NewId(), 'ASCII', '', 'self.OnTxAsciiMode' ),
# (RADIOITEM, wx.NewId(), 'HEX', '', 'self.OnTxHexMode' ),
# )),
# (CHECKITEM, wx.NewId(), 'S&tatus Bar', 'Show Status Bar', 'self.OnShowStatusBar' ),
)),
('&Help', (
(MENUITEM, wx.NewId(), '&About', 'About MyTerm', 'self.OnAbout' ),
))
)
regex_matchPort = re.compile('COM(?P<port>\d+)')
serialport = serial.Serial()
class MyApp(wx.App):
def OnInit(self):
self.frame = ui.MyFrame(None, wx.ID_ANY, "")
if sys.platform == 'win32':
my_data = pkg_resources.resource_string(__name__,"library.zip")
filezip = StringIO(my_data)
zf = zipfile.ZipFile(filezip)
data = zf.read("media/icon16.ico")
icon = wx.EmptyIcon()
icon.CopyFromBitmap(wx.ImageFromStream(StringIO(data), wx.BITMAP_TYPE_ICO).ConvertToBitmap())
self.frame.SetIcon(icon) # for the app's title of main window
self.frame.SetIcon(icon32.geticon32Icon()) # for the app's task bar
elif sys.platform.startswith('linux'):
self.frame.SetIcon(icon32.geticon32Icon())
# self.frame.SetIcon(wx.Icon("media\icon16.ico", wx.BITMAP_TYPE_ICO, 16, 16))
self.frame.SplitterWindow.SetSashSize(2)
self.frame.SplitterWindow.SetSashPosition(SASHPOSITION, True)
# self.frame.cmbPort.AppendItems(('COM1', 'COM2', 'COM3', 'COM4', 'COM5', 'COM6', 'COM7', 'COM8'))
self.OnEnumPorts()
# Make a menu
self.menuBar = wx.MenuBar()
self.MakeMenu(self.menuBar, MenuDefs)
self.frame.SetMenuBar(self.menuBar)
# initial variables
self.rxmode = ASCII
self.txmode = ASCII
self.localEcho = False
self.rxCount = 0
self.txCount = 0
self.transmitHexPanel = False
self.HideTransmitHexPanel()
# bind events
self.frame.btnHideBar.Bind(wx.EVT_BUTTON, self.OnHideSettingBar)
self.frame.btnOpen.Bind(wx.EVT_BUTTON, self.OnBtnOpen)
self.frame.btnEnumPorts.Bind(wx.EVT_BUTTON, self.OnEnumPorts)
self.frame.btnClear.Bind(wx.EVT_BUTTON, self.OnClear)
self.frame.btnTransmitHex.Bind(wx.EVT_BUTTON, self.OnTransmitHex)
# self.frame.Bind(wx.EVT_WINDOW_DESTROY, self.Cleanup)
self.frame.Bind(wx.EVT_CLOSE, self.Cleanup)
self.Bind(EVT_SERIALRXCNT, self.OnSerialRxCnt)
self.Bind(EVT_SERIALEXCEPT, self.OnSerialExcept)
self.frame.txtctlMain.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
self.frame.txtctlMain.Bind(wx.EVT_CHAR, self.OnSerialWrite)
self.frame.txtctlMain.Bind(wx.EVT_TEXT_PASTE, self.OnPaste)
self.frame.txtctlMain.Bind(wx.EVT_TEXT_URL, self.OnURL)
self.frame.SplitterWindow.Bind(wx.EVT_SPLITTER_DCLICK, self.OnSplitterDClick)
self.SetTopWindow(self.frame)
self.frame.SetTitle( appInfo.title )
self.frame.Show()
self.config = ConfigParser.RawConfigParser()
self.LoadSettings()
self.evtPortOpen = threading.Event()
return True
def OnTransmitHex(self, evt = None):
hexStr = self.frame.txtctlHex.GetValue()
str = HexToByte(hexStr)
if serialport.isOpen():
try:
serialport.write(str)
except serial.SerialException, e:
evt = SerialExceptEvent(self.frame.GetId(), e)
self.frame.GetEventHandler().AddPendingEvent(evt)
else:
self.txCount += len(str)
self.frame.statusbar.SetStatusText('Tx:%d' % self.txCount, 2)
def OnTransmitHexPanel(self, evt = None):
if evt.Selection == 1:
self.ShowTransmitHexPanel()
elif evt.Selection == 0:
self.HideTransmitHexPanel()
def HideTransmitHexPanel(self):
self.transmitHexPanel = False
self.frame.pnlTransmitHex.Hide()
self.frame.pnlData.GetSizer().Layout()
# call SetScrollbars() just for refreshing the scroll bar.
# self.frame.window_1_pane_1.SetScrollbars(10, 10, 30, 300)
def ShowTransmitHexPanel(self):
self.transmitHexPanel = True
self.frame.pnlTransmitHex.Show()
self.frame.pnlData.GetSizer().Layout()
# call SetScrollbars() just for refreshing the scroll bar.
# self.frame.window_1_pane_1.SetScrollbars(10, 10, 30, 300)
def OnSplitterDClick(self, evt):
evt.Veto() # disable the feature "unsplit a splitter"
def LoadSettings(self):
self.config.read("%s\\setting.ini" % os.path.dirname(os.path.realpath(__file__)))
# use unicode(self.config.get('...', '...'), 'utf-8') to convert the ASCII string to utf8 if needed.
try:
if self.config.has_section('serial'):
self.frame.cmbPort.SetStringSelection(self.config.get('serial', 'port'))
self.frame.cmbBaudRate.SetStringSelection(self.config.get('serial', 'baudrate'))
self.frame.choiceDataBits.SetStringSelection(self.config.get('serial', 'databits'))
self.frame.choiceParity.SetStringSelection(self.config.get('serial', 'parity'))
self.frame.choiceStopBits.SetStringSelection(self.config.get('serial', 'stopbits'))
if self.config.get('serial', 'rtscts') == 'on':
self.frame.chkboxrtscts.SetValue(True)
else:
self.frame.chkboxrtscts.SetValue(False)
if self.config.get('serial', 'xonxoff') == 'on':
self.frame.chkboxxonxoff.SetValue(True)
else:
self.frame.chkboxxonxoff.SetValue(False)
if self.config.has_section('display'):
{'ASCII': self.OnRxAsciiMode,
'hex': self.OnRxHexModeLowercase,
'HEX': self.OnRxHexModeUppercase,
}[self.config.get('display', 'rx_view_as')]()
self.menuBar.Check({ASCII: MENU_ID_RX_ASCII,
HEX_LOWERCASE: MENU_ID_RX_HEX_L,
HEX_UPPERCASE: MENU_ID_RX_HEX_U,
}.get(self.rxmode),
True)
if self.config.get('display', 'local_echo') == 'on':
self.menuBar.Check(MENU_ID_LOCAL_ECHO, True)
self.localEcho = True
self.frame.statusbar.SetStatusText('Local echo:On', 4)
else:
self.menuBar.Check(MENU_ID_LOCAL_ECHO, False)
self.localEcho = False
self.frame.statusbar.SetStatusText('Local echo:Off', 4)
if self.config.get('display', 'transmit_hex_panel') == 'on':
self.transmitHexPanel = True
self.menuBar.Check(MENU_ID_TRNST_HEX_PNL, True)
self.ShowTransmitHexPanel()
else:
self.transmitHexPanel = False
self.menuBar.Check(MENU_ID_TRNST_HEX_PNL, False)
self.HideTransmitHexPanel()
except:
pass
def SaveSettings(self):
if not self.config.has_section('serial'):
self.config.add_section('serial')
self.config.set('serial', 'port', str(self.frame.cmbPort.GetStringSelection()))
self.config.set('serial', 'baudrate', str(self.frame.cmbBaudRate.GetStringSelection()))
self.config.set('serial', 'databits', str(self.frame.choiceDataBits.GetStringSelection()))
self.config.set('serial', 'parity', str(self.frame.choiceParity.GetStringSelection()))
self.config.set('serial', 'stopbits', str(self.frame.choiceStopBits.GetStringSelection()))
self.config.set('serial', 'rtscts',
self.frame.chkboxrtscts.IsChecked() and 'on' or 'off' )
self.config.set('serial', 'xonxoff',
self.frame.chkboxxonxoff.IsChecked() and 'on' or 'off' )
if not self.config.has_section('display'):
self.config.add_section('display')
self.config.set('display', 'rx_view_as',
{ASCII: 'ASCII',
HEX_LOWERCASE:'hex',
HEX_UPPERCASE:'HEX',
}.get(self.rxmode)
)
self.config.set('display', 'local_echo', self.localEcho and 'on' or 'off')
self.config.set('display', 'transmit_hex_panel', self.transmitHexPanel and 'on' or 'off')
with open("%s\\setting.ini" % os.path.dirname(os.path.realpath(__file__)), 'w') as configfile:
self.config.write(configfile)
def OnURL(self, evt):
if evt.MouseEvent.LeftUp():
s = evt.GetURLStart()
e = evt.GetURLEnd()
strURL = self.frame.txtctlMain.GetRange(s, e)
webbrowser.open(strURL)
return
evt.Skip()
def OnClear(self, evt = None):
self.frame.txtctlMain.Clear()
self.rxCount = 0
self.txCount = 0
self.frame.statusbar.SetStatusText('Rx:%d' % self.rxCount, 1)
self.frame.statusbar.SetStatusText('Tx:%d' % self.txCount, 2)
def OnSave(self, evt = None):
dlg = wx.FileDialog(self.frame,
message="Save file as ...",
defaultDir = os.getcwd(),
wildcard = "Text Files|*.txt",
style = wx.SAVE | wx.CHANGE_DIR)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
print "You selected %s\n" % path,
f = open(path, 'w')
f.write(self.frame.txtctlMain.GetValue())
f.close()
dlg.Destroy()
def GetPort(self):
if sys.platform == 'win32':
r = regex_matchPort.search(self.frame.cmbPort.GetValue())
if r:
return int(r.group('port')) - 1
return
elif sys.platform.startswith('linux'):
return self.frame.cmbPort.GetValue()
def GetBaudRate(self):
return int(self.frame.cmbBaudRate.GetValue())
def GetDataBits(self):
s = self.frame.choiceDataBits.GetStringSelection()
if s == '5':
return serial.FIVEBITS
elif s == '6':
return serial.SIXBITS
elif s == '7':
return serial.SEVENBITS
elif s == '8':
return serial.EIGHTBITS
def GetParity(self):
s = self.frame.choiceParity.GetStringSelection()
if s == 'None':
return serial.PARITY_NONE
elif s == 'Even':
return serial.PARITY_EVEN
elif s == 'Odd':
return serial.PARITY_ODD
elif s == 'Mark':
return serial.PARITY_MARK
elif s == 'Space':
return serial.PARITY_SPACE
def GetStopBits(self):
s = self.frame.choiceStopBits.GetStringSelection()
if s == '1':
return serial.STOPBITS_ONE
elif s == '1.5':
return serial.STOPBITS_ONE_POINT_FIVE
elif s == '2':
return serial.STOPBITS_TWO
def MakeMenu(self, menuBar, args, menu = None):
if args[0] == MENUITEM:
menu.Append(args[1], args[2], args[3])
eval('self.frame.Bind(wx.EVT_MENU,' + args[4] + ', id = args[1])')
elif args[0] == CHECKITEM:
menu.AppendCheckItem(args[1], args[2], args[3])
eval('self.frame.Bind(wx.EVT_MENU,' + args[4] + ', id = args[1])')
elif args[0] == SEPARATOR:
menu.AppendSeparator()
elif args[0] == RADIOITEM:
menu.AppendRadioItem(args[1], args[2], args[3])
eval('self.frame.Bind(wx.EVT_MENU,' + args[4] + ', id = args[1])')
elif args[0] == SUBMENU:
submenu = wx.Menu()
| |
import enum
import pickle
from dataclasses import dataclass, replace
from typing import Callable, List
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
from einops import rearrange
from jax.example_libraries.optimizers import l2_norm
from jax.random import PRNGKey
from jax.tree_util import tree_map
from tqdm import tqdm
def safe_clip_grads(grad_tree, max_norm):
"""Clip gradients stored as a pytree of arrays to maximum norm `max_norm`."""
norm = l2_norm(grad_tree)
eps = 1e-9
def normalize(g):
return jnp.where(
norm < max_norm, g, g * max_norm / (norm + eps))
return tree_map(normalize, grad_tree)
class NodeType(enum.IntEnum):
NORMAL = 0
OBSTACLE = 1
AIRFOIL = 2
HANDLE = 3
INFLOW = 4
OUTFLOW = 5
WALL_BOUNDARY = 6
SIZE = 9
@dataclass
class EdgeSet:
name: str
features: jnp.ndarray
senders: jnp.ndarray
receivers: jnp.ndarray
@dataclass
class MultiGraph:
node_features: jnp.ndarray
edge_sets: List[EdgeSet]
def triangles_to_edges(faces):
"""Compute mesh edges from triangles."""
# faces.shape == int32 [n_faces, 3]
# Extract the three edges from the each face
edges_1 = faces[:, 0:2]
edges_2 = faces[:, 1:3]
edges_3 = jnp.stack([faces[:, 2], faces[:, 0]], axis=1)
edges = jnp.concatenate([edges_1, edges_2, edges_3], axis=0)
# edges.shape == [n_edges, 2] == [3 * n_faces, 2]
# Sort each edge so that we always go from a larger index to a smaller index
receivers = edges.min(axis=1)
senders = edges.max(axis=1)
sorted_edges = jnp.stack([senders, receivers], axis=1)
# sorted_edges.shape == [n_edges, 2]
# Traverse through 0th dim and remove duplicated edges
unique_edges = jnp.unique(sorted_edges, axis=0,
size=edges.shape[0], fill_value=-1)
# unique_edges.shape == [n_unique_edges, 2]
# Unpack again
senders, receivers = unique_edges[:, 0], unique_edges[:, 1]
# Create two-way connectivity
sources = jnp.concatenate([senders, receivers], axis=0)
dests = jnp.concatenate([receivers, senders], axis=0)
# sources.shape == dests.shape == [2 * n_unique_edges]
# Note: -1 pads might be scattered around in no particular order.
return sources, dests
class MLPEncoder(hk.Module):
def __init__(self, output_sizes, layer_norm=True, name=None):
super().__init__(name=name)
self.layers = []
n_layers = len(output_sizes)
for i, size in enumerate(output_sizes):
components = [hk.Linear(size, name=f'linear_{i}')]
if i < n_layers - 1:
components.append(jax.nn.relu)
self.layers.append(hk.Sequential(components))
if layer_norm:
self.norm = hk.LayerNorm(axis=-1,
create_scale=True,
create_offset=True)
def __call__(self, x):
for layer in self.layers:
x = layer(x)
if hasattr(self, 'norm'):
x = self.norm(x)
return x
class GraphEncoder(hk.Module):
def __init__(self, n_edge_sets=1, name=None):
super().__init__(name=name)
self.node_encoder = MLPEncoder(output_sizes=[128, 128],
name='node_encoder')
self.edge_encoders = []
for i in range(n_edge_sets):
self.edge_encoders.append(MLPEncoder(output_sizes=[128, 128],
name=f'edge_encoder_{i}'))
def __call__(self, graph):
node_latents = self.node_encoder(graph.node_features) # (2059, 128)
new_edge_sets = []
for i, edge_set in enumerate(graph.edge_sets):
latent = self.edge_encoders[i](edge_set.features) # (23304, 128)
new_edge_sets.append(replace(edge_set, features=latent))
graph = MultiGraph(node_features=node_latents,
edge_sets=new_edge_sets)
return graph
class GraphNetBlock(hk.Module):
def __init__(self, n_edge_sets=1, name=None):
super().__init__(name=name)
self.edge_updaters = []
for i in range(n_edge_sets):
self.edge_updaters.append(MLPEncoder(output_sizes=[128, 128],
name=f'edge_updater_{i}'))
self.node_updater = MLPEncoder(output_sizes=[128, 128],
name='node_updater')
def _update_edges(self, node_feats, edge_set, i):
sender_feats = jnp.take(node_feats, edge_set.senders, axis=0)
receiver_feats = jnp.take(node_feats, edge_set.receivers, axis=0)
feats_list = [sender_feats, receiver_feats, edge_set.features]
feats = jnp.concatenate(feats_list, axis=-1)
feats_nan_mask = jnp.isnan(feats)
feats = jnp.where(feats_nan_mask, 0, feats)
return self.edge_updaters[i](feats)
def _update_nodes(self, node_features, edge_sets):
feats_list = [node_features]
for edge_set in edge_sets:
# n_feats = edge_set.features.shape[-1]
feats = jnp.zeros(node_features.shape)
# index = repeat(edge_set.receivers, 'n -> n f', f=n_feats)
# idx = jnp.meshgrid(*(jnp.arange(n)
# for n in feats.shape), sparse=True)
# idx[0] = edge_set.receivers # Double check: What happens when idx contains -1?
feats = feats.at[edge_set.receivers].add(edge_set.features)
feats_list.append(feats)
feats = jnp.concatenate(feats_list, axis=-1)
feats_nan_mask = jnp.isnan(feats)
feats = jnp.where(feats_nan_mask, 0, feats)
return self.node_updater(feats)
def __call__(self, graph):
# Apply edge functions
new_edge_sets = []
for i, edge_set in enumerate(graph.edge_sets):
# FIXME: unique encoder in each iteration
updated_feats = self._update_edges(
graph.node_features, edge_set, i)
new_edge_sets.append(replace(edge_set, features=updated_feats))
# Apply node function
new_node_feats = self._update_nodes(graph.node_features, new_edge_sets)
# Add residual connections
new_node_feats = new_node_feats + graph.node_features
new_edge_sets_2 = []
for es, old_es in zip(new_edge_sets, graph.edge_sets):
new_es = replace(es, features=es.features + old_es.features)
new_edge_sets_2.append(new_es)
graph = MultiGraph(node_features=new_node_feats,
edge_sets=new_edge_sets_2)
return graph
class GraphProcessor(hk.Module):
def __init__(self, n_message_passing_steps=15, name=None):
super().__init__(name=name)
self.graph_encoder = GraphEncoder(name='graph_encoder')
self.layers = []
for i in range(n_message_passing_steps):
self.layers.append(GraphNetBlock(name=f'graph_layer_{i}'))
self.graph_decoder = MLPEncoder(output_sizes=[128, 2],
layer_norm=False,
name='node_updater')
def __call__(self, graph):
graph = self.graph_encoder(graph)
for layer in self.layers:
graph = layer(graph)
out = self.graph_decoder(graph.node_features)
return out
class Normalizer(hk.Module):
def __init__(self, size, max_accumulations=10**6, std_epsilon=1e-8, name=None):
super().__init__(name=name)
self.max_accumulations = max_accumulations
self.std_epsilon = jnp.full(size, std_epsilon)
self.dim_sizes = None
self.size = size
@property
def sum(self):
return hk.get_state('sum', shape=self.size, dtype=jnp.int32,
init=jnp.zeros)
@property
def sum_squared(self):
return hk.get_state('sum_squared', shape=self.size, dtype=jnp.int32,
init=jnp.zeros)
@property
def count(self):
return hk.get_state('count', shape=[], dtype=jnp.int32,
init=jnp.zeros)
@property
def n_accumulations(self):
return hk.get_state('n_accumulations', shape=[], dtype=jnp.int32,
init=jnp.zeros)
def _accumulate(self, x):
x_count = jnp.count_nonzero(~jnp.isnan(x[..., 0]))
x_sum = jnp.nansum(x, axis=0)
x_sum_squared = jnp.nansum(x**2, axis=0)
hk.set_state("sum", self.sum + x_sum)
hk.set_state("sum_squared", self.sum_squared + x_sum_squared)
hk.set_state("count", self.count + x_count)
hk.set_state("n_accumulations", self.n_accumulations + 1)
return x
def _pool_dims(self, x):
_, *dim_sizes, _ = x.shape
self.dim_sizes = dim_sizes
if self.dim_sizes:
x = rearrange(x, 'b ... h -> (b ...) h')
return x
def _unpool_dims(self, x):
if len(self.dim_sizes) == 1:
x = rearrange(x, '(b m) h -> b m h', m=self.dim_sizes[0])
elif len(self.dim_sizes) == 2:
m, n = self.dim_sizes
x = rearrange(x, '(b m n) h -> b m n h', m=m, n=n)
return x
def __call__(self, x):
x = self._pool_dims(x)
# x.shape == [batch_size, latent_dim]
self._accumulate(x)
# hk.cond(self.n_accumulations < self.max_accumulations,
# lambda x: self._accumulate(x), lambda x: x, x)
x = (x - self.mean) / self.std
x = self._unpool_dims(x)
return x
def inverse(self, x, channel=None):
x = self._pool_dims(x)
if channel is None:
x = x * self.std + self.mean
else:
x = x * self.std[channel] + self.mean[channel]
x = self._unpool_dims(x)
return x
@property
def mean(self):
# safe_count = max(self.count, self.one)
return self.sum / self.count
@property
def std(self):
# safe_count = max(self.count, self.one)
std = jnp.sqrt(self.sum_squared / self.count - self.mean**2)
return jnp.maximum(std, self.std_epsilon)
class TrainedNormalizer(Normalizer):
def __call__(self, x):
x = self._pool_dims(x)
# x.shape == [batch_size, latent_dim]
x = (x - self.mean) / self.std
x = self._unpool_dims(x)
return x
class MeshGraphNet:
def __init__(self,
optimizer: Callable,
node_dim: int = 11,
edge_dim: int = 3,
output_dim: int = 2,
max_accumulations: int = int(1e5),
n_layers: int = 15,
clip_val: float = 0.1):
self.n_layers = n_layers
self.optimizer = optimizer
self.params = None
self.state = None
self.node_dim = node_dim
self.edge_dim = edge_dim
self.output_dim = output_dim
self.max_accumulations = max_accumulations
self.clip_val = clip_val
def model(batch):
processor = GraphProcessor(name='processor')
node_normalizer = Normalizer(
[node_dim], max_accumulations, name='node_normalizer')
edge_normalizer = Normalizer(
[edge_dim], max_accumulations, name='edge_normalizer')
output_normalizer = Normalizer(
[output_dim], max_accumulations, name='output_normalizer')
graph = self._build_graph(batch, node_normalizer, edge_normalizer)
preds = processor(graph)
targets = batch['target_velocity'] - batch['velocity']
# targets = output_normalizer(targets)
targets_nan_mask = jnp.isnan(targets)
targets = jnp.where(targets_nan_mask, 0, targets)
preds = jnp.where(targets_nan_mask, 0, preds)
return {'preds': preds, 'targets': targets}
self.model = hk.without_apply_rng(
hk.transform_with_state(jax.vmap(model)))
def step(self, params, opt_state, batch):
loss_value, grads = jax.value_and_grad(
self.loss_fn)(params, batch)
grads = safe_clip_grads(grads, self.clip_val)
updates, opt_state = self.optimizer.update(
grads, opt_state, params)
params = optax.apply_updates(params, updates)
return params, opt_state, loss_value
def _build_graph(self, batch, node_normalizer, edge_normalizer):
# Each node has a type: 0 (normal), 4 (inflow), 5 (outflow), 6 (wall)
node_types = jax.nn.one_hot(batch['node_type'], 9)
# node_types.shape == [n_nodes, 9]
node_features = jnp.concatenate(
[batch['velocity'], node_types], axis=-1)
# node_features.shape == [n_nodes, 11] nan padded
senders, receivers = triangles_to_edges(batch['cells'])
# senders.shape == receivers.shape == [n_edges]
sender_pos = jnp.take(batch['mesh_pos'], senders, axis=0)
receiver_pos = jnp.take(batch['mesh_pos'], receivers, axis=0)
# sender_pos.shape == receiver_pos.shape == [n_edges, 2] nan padded
rel_pos = sender_pos - receiver_pos
# rel_mesh_pos.shape == [n_edges, 2] nan padded
norms = jnp.linalg.norm(rel_pos, axis=-1, keepdims=True)
# norms.shape == [n_edges, 1] nan padded
edge_features = jnp.concatenate([rel_pos, norms], axis=-1)
# edge_features.shape == [n_edges, 3] nan padded
# edge_features = edge_normalizer(edge_features)
edge_nan_mask = jnp.isnan(edge_features)
edge_features = jnp.where(edge_nan_mask, 0, edge_features)
mesh_edges = EdgeSet(name='mesh_edges',
features=edge_features,
receivers=receivers,
senders=senders)
# node_features = node_normalizer(node_features)
node_nan_mask = jnp.isnan(node_features)
node_features = jnp.where(node_nan_mask, 0, node_features)
graph = MultiGraph(node_features=node_features,
edge_sets=[mesh_edges])
return graph
def load_lightning_model_state(self, path, *args, **kwargs):
with open(path, 'rb') as f:
params = pickle.load(f)
self.params = params
def init(self, seed, datamodule):
rng = PRNGKey(seed)
train_batches = iter(datamodule.train_dataloader())
with tqdm(train_batches, unit="batch") as tepoch:
for i, batch in enumerate(tepoch):
if i == 0:
params, self.state = self.model.init(rng, batch)
_, self.state = self.model.apply(params, self.state, batch)
break
self.params = params
return params
def loss_fn(self, params, batch):
out, self.state = self.model.apply(
params, self.state, batch)
loss = optax.l2_loss(out['targets'], out['preds']).sum(axis=-1)
loss = jnp.nanmean(loss)
return loss
def valid_step(self, params, batch):
# Roll out for 50 steps
keys = ['velocity']
inputs = {k: v[:, 0] if k in keys else v for k, v in batch.items()}
@jax.jit
def rollout(params, state, | |
bool(state["Paused"]):
print("Container is in Paused State")
elif bool(state["Running"]):
print("Container is in Running State")
elif int(state["ExitCode"]) == 0:
print("Container is in Stopped State")
else:
print("Container is in Crashed State")
print("Ip Address of the container: " +detail['NetworkSettings']['IPAddress'])
if bool(detail["State"]["Running"]):
container_id = detail['Id']
cpu_usage = {}
cur_usage = 0
last_usage = 0
for i in range(2):
with open('/sys/fs/cgroup/cpuacct/docker/' + container_id + '/cpuacct.stat', 'r') as f:
for line in f:
m = re.search(r"(system|user)\s+(\d+)", line)
if m:
cpu_usage[m.group(1)] = int(m.group(2))
cpu = cpu_usage["system"] + cpu_usage["user"]
last_usage = cur_usage
cur_usage = cpu
time.sleep(1)
cpu_percent = (cur_usage - last_usage)*100.0/user_hz
print("CPU Usage: %.2f %%" %(cpu_percent))
else:
print(0)
if bool(detail["State"]["Running"]):
container_id = detail['Id']
print("Docker Port Info:")
cmd = "sudo docker port {}".format(container_id)
os.system(cmd)
if bool(detail["State"]["Running"]):
container_id = detail['Id']
with open('/sys/fs/cgroup/memory/docker/' + container_id + '/memory.stat', 'r') as f:
for line in f:
m = re.search(r"total_rss\s+(\d+)", line)
if m:
mem = int(m.group(1))
print("Memory: %s KB "%(mem/1024.0))
o = re.search(r"usage\s+(\d+)", line)
if o:
print("Usage: %s "%(o.group(1)))
p = re.search(r"max_usage\s+(\d+)", line)
if p:
print("Max Usage: %s "%(p.group(1)))
if bool(detail["State"]["Running"]):
container_id = detail['Id']
with open('/sys/fs/cgroup/cpuacct/docker/' + container_id + '/cpuacct.stat', 'r') as f:
for line in f:
m = re.search(r"user\s+(\d+)", line)
if m:
user_ticks = int(m.group(1))
print("Time spent by running processes: %.2f ms"%(user_ticks*1000.0/user_hz))
print("List Networks:")
cmd = "docker network ls"
os.system(cmd)
return 0
def buildImages(args):
tag = 'candidate'
prefix = args.prefix
if prefix:
prefix += '/'
if args.image == 'all' or args.image == 'quagga':
image_name = '{}{}:{}'.format(prefix, Quagga.IMAGE, tag)
Quagga.build_image(image_name)
if args.image == 'all' or args.image == 'radius':
image_name = '{}{}:{}'.format(prefix, Radius.IMAGE, tag)
Radius.build_image(image_name)
if args.image == 'all' or args.image == 'test':
image_name = '{}{}:{}'.format(prefix, CordTester.IMAGE, tag)
CordTester.build_image(image_name)
return 0
def startImages(args):
##starts the latest ONOS image
onos_cnt = {'tag': 'latest'}
image_names = args.onos.rsplit(':', 1)
onos_cnt['image'] = image_names[0]
if len(image_names) > 1:
if image_names[1].find('/') < 0:
onos_cnt['tag'] = image_names[1]
else:
#tag cannot have slashes
onos_cnt['image'] = args.onos
if args.image == 'all' or args.image == 'onos':
onos = Onos(image = onos_cnt['image'], tag = onos_cnt['tag'])
print('ONOS started with ip %s' %(onos.ip()))
if args.image == 'all' or args.image == 'quagga':
quagga = Quagga(prefix = args.prefix)
print('Quagga started with ip %s' %(quagga.ip()))
if args.image == 'all' or args.image == 'radius':
radius = Radius(prefix = args.prefix)
print('Radius started with ip %s' %(radius.ip()))
return 0
def xosCommand(args):
update = False
profile = args.profile
if args.command == 'update':
update = True
xos = XosServiceProfile(profile = profile, update = update)
if args.command == 'build':
xos.build_images(force = True)
if args.command == 'start':
xos.start_services()
if args.command == 'stop':
xos.stop_services(rm = True)
return 0
if __name__ == '__main__':
parser = ArgumentParser(description='Cord Tester')
subparser = parser.add_subparsers()
parser_run = subparser.add_parser('run', help='Run cord tester')
parser_run.add_argument('-t', '--test-type', default=test_type_default, help='Specify test type or test case to run')
parser_run.add_argument('-o', '--onos', default=onos_image_default, type=str, help='ONOS container image')
parser_run.add_argument('-q', '--quagga',action='store_true',help='Provision quagga container for vrouter')
parser_run.add_argument('-a', '--app', default=onos_app_file, type=str, help='Cord ONOS app filename')
parser_run.add_argument('-l', '--olt', action='store_true', help='Use OLT config')
parser_run.add_argument('-olt-config', '--olt-config', default=olt_config_default, type=str, help='Provide OLT configuration')
parser_run.add_argument('-e', '--test-controller', default='', type=str, help='External test controller ip for Onos and/or radius server. '
'Eg: 10.0.0.2/10.0.0.3 to specify ONOS and Radius ip to connect')
parser_run.add_argument('-r', '--server', default=cord_test_server_address, type=str,
help='ip:port address to connect for cord test server for container requests')
parser_run.add_argument('-k', '--keep', action='store_true', help='Keep test container after tests')
parser_run.add_argument('-s', '--start-switch', action='store_true', help='Start OVS when running under OLT config')
parser_run.add_argument('-dh', '--setup-dhcpd', action='store_true', help='Start dhcpd Server in cord-tester test container')
parser_run.add_argument('-u', '--update', default='none', choices=['test','quagga','radius', 'all'], type=str, help='Update cord tester container images. '
'Eg: --update=quagga to rebuild quagga image.'
' --update=radius to rebuild radius server image.'
' --update=test to rebuild cord test image.(Default)'
' --update=all to rebuild all cord tester images.')
parser_run.add_argument('-n', '--num-containers', default=1, type=int,
help='Specify number of test containers to spawn for tests')
parser_run.add_argument('-c', '--container', default='', type=str, help='Test container name for running tests')
parser_run.add_argument('-m', '--manifest', default='', type=str, help='Provide test configuration manifest')
parser_run.add_argument('-p', '--prefix', default='', type=str, help='Provide container image prefix')
parser_run.add_argument('-d', '--no-switch', action='store_true', help='Dont start test switch.')
parser_run.add_argument('-i', '--identity-file', default=identity_file_default,
type=str, help='ssh identity file to access compute nodes from test container')
parser_run.add_argument('-j', '--onos-instances', default=1, type=int,
help='Specify number to test onos instances to form cluster')
parser_run.add_argument('-v', '--shared-volume', action='store_true', help='Start ONOS cluster instances with shared volume')
parser_run.add_argument('-async', '--async-mode', action='store_true',
help='Start ONOS cluster instances in async mode')
parser_run.add_argument('-log', '--log-level', default=onos_log_level,
choices=['DEBUG','TRACE','ERROR','WARN','INFO'],
type=str,
help='Specify the log level for the test cases')
parser_run.add_argument('-jvm-heap-size', '--jvm-heap-size', default='', type=str, help='ONOS JVM heap size')
parser_run.add_argument('-network', '--network', default='', type=str, help='Docker network to attach')
parser_run.add_argument('-onos-cord', '--onos-cord', default='', type=str,
help='Specify config location for ONOS cord when running on podd')
parser_run.add_argument('-service-profile', '--service-profile', default='', type=str,
help='Specify config location for ONOS cord service profile when running on podd.'
'Eg: $HOME/service-profile/cord-pod')
parser_run.add_argument('-synchronizer', '--synchronizer', default='', type=str,
help='Specify the synchronizer to use for ONOS cord instance when running on podd.'
'Eg: vtn,fabric,cord')
parser_run.add_argument('-karaf', '--karaf', default='3.0.8', type=str, help='Karaf version for ONOS')
parser_run.add_argument('-voltha-loc', '--voltha-loc', default='', type=str,
help='Specify the voltha location in order to start voltha')
parser_run.add_argument('-voltha-intf', '--voltha-intf', default='eth0', type=str,
help='Specify the voltha interface for voltha to listen')
parser_run.add_argument('-voltha-enable', '--voltha-enable', action='store_true',
help='Run the tests with voltha environment enabled')
parser_run.add_argument('-voltha-container-mode', '--voltha-container-mode', action='store_true',
help='Run the tests with voltha container environment enabled')
parser_run.add_argument('-expose-port', '--expose-port', action='store_true',
help='Start ONOS by exposing the controller ports to the host.'
'Add +1 for every other onos/cluster instance when running more than 1 ONOS instances')
parser_run.add_argument('-skip-onos-restart', '--skip-onos-restart', action='store_true',
help = 'Skips restarting/configuring of onoscord')
parser_run.set_defaults(func=runTest)
parser_setup = subparser.add_parser('setup', help='Setup cord tester environment')
parser_setup.add_argument('-o', '--onos', default=onos_image_default, type=str, help='ONOS container image')
parser_setup.add_argument('-r', '--server', default=cord_test_server_address, type=str,
help='ip:port address for cord test server to listen for container restart requests')
parser_setup.add_argument('-q', '--quagga',action='store_true',help='Provision quagga container for vrouter')
parser_setup.add_argument('-a', '--app', default=onos_app_file, type=str, help='Cord ONOS app filename')
parser_setup.add_argument('-e', '--test-controller', default='', type=str, help='External test controller ip for Onos and/or radius server. '
'Eg: 10.0.0.2/10.0.0.3 to specify ONOS and Radius ip to connect')
parser_setup.add_argument('-u', '--update', default='none', choices=['quagga','radius', 'all'], type=str, help='Update cord tester container images. '
'Eg: --update=quagga to rebuild quagga image.'
' --update=radius to rebuild radius server image.'
' --update=all to rebuild all cord tester images.')
parser_setup.add_argument('-d', '--dont-provision', action='store_true', help='Dont start test container.')
parser_setup.add_argument('-l', '--olt', action='store_true', help='Use OLT config')
parser_setup.add_argument('-olt-config', '--olt-config', default=olt_config_default, type=str, help='Provide OLT configuration')
parser_setup.add_argument('-log', '--log-level', default=onos_log_level, type=str,
choices=['DEBUG','TRACE','ERROR','WARN','INFO'],
help='Specify the log level for the test cases')
parser_setup.add_argument('-s', '--start-switch', action='store_true', help='Start OVS when running under OLT config')
parser_setup.add_argument('-dh', '--setup-dhcpd', action='store_true', help='Start dhcpd Server in cord-tester container')
parser_setup.add_argument('-onos-cord', '--onos-cord', default='', type=str,
help='Specify config location for ONOS cord when running on podd')
parser_setup.add_argument('-service-profile', '--service-profile', default='', type=str,
help='Specify config location for ONOS cord service profile when running on podd.'
'Eg: $HOME/service-profile/cord-pod')
parser_setup.add_argument('-synchronizer', '--synchronizer', default='', type=str,
help='Specify the synchronizer to use for ONOS cord instance when running on podd.'
'Eg: vtn,fabric,cord')
parser_setup.add_argument('-m', '--manifest', default='', type=str, help='Provide test configuration manifest')
parser_setup.add_argument('-p', '--prefix', default='', type=str, help='Provide container image prefix')
parser_setup.add_argument('-i', '--identity-file', default=identity_file_default,
type=str, help='ssh identity file to access compute nodes from test container')
parser_setup.add_argument('-n', '--onos-instances', default=1, type=int,
help='Specify number of test onos instances to spawn')
parser_setup.add_argument('-v', '--shared-volume', action='store_true',
help='Start ONOS cluster instances with shared volume')
parser_setup.add_argument('-async', '--async-mode', action='store_true',
help='Start ONOS cluster instances in async mode')
parser_setup.add_argument('-f', '--foreground', action='store_true', help='Run in foreground')
parser_setup.add_argument('-jvm-heap-size', '--jvm-heap-size', default='', type=str, help='ONOS JVM heap size')
parser_setup.add_argument('-network', '--network', default='', type=str, help='Docker network to attach')
parser_setup.add_argument('-karaf', '--karaf', default='3.0.8', type=str, help='Karaf version for ONOS')
parser_setup.add_argument('-voltha-loc', '--voltha-loc', default='', type=str,
help='Specify the voltha location in order to start voltha')
parser_setup.add_argument('-voltha-intf', '--voltha-intf', default='eth0', type=str,
help='Specify the voltha interface for voltha to listen')
parser_setup.add_argument('-voltha-enable', '--voltha-enable', action='store_true',
help='Run the tests with voltha environment enabled')
parser_setup.add_argument('-voltha-container-mode', '--voltha-container-mode', action='store_true',
help='Run the tests with voltha container environment enabled')
parser_setup.add_argument('-expose-port', '--expose-port', action='store_true',
help='Start ONOS by exposing the controller ports to the host.'
'Add +1 for every other onos/cluster instance when running more than 1 ONOS instances')
parser_setup.add_argument('-skip-onos-restart', '--skip-onos-restart', action='store_true',
help = 'Skips restarting/configuring of onoscord')
parser_setup.set_defaults(func=setupCordTester)
parser_xos = subparser.add_parser('xos', help='Building xos into cord tester environment')
parser_xos.add_argument('command', choices=['build', 'update', 'start', 'stop'])
parser_xos.add_argument('-p', '--profile', default='cord-pod', type=str, help='Provide service profile')
parser_xos.set_defaults(func=xosCommand)
parser_list = subparser.add_parser('list', help='List test cases')
parser_list.add_argument('-t', '--test', default='all', help='Specify test type to list test cases. '
'Eg: -t tls to list tls test cases.'
' -t tls-dhcp-vrouter to list tls,dhcp and vrouter test cases.'
' -t all to list all test | |
fit_types = self.fit_types
num_gen = self.num_gen
num_pop = self.num_pop
num_var = self.num_var
try:
fitl = [list(x) for x in zip(*self.parent_pop['fit_values'])]
best = [self.get_sorting_indices(l, reverse=False)[0] if self.fit_types[i] == 'min' else self.get_sorting_indices(l, reverse=True)[0] for i, l in enumerate(fitl)]
fit = [min(x) if self.fit_types[i] == 'min' else max(x) for i, x in enumerate(fitl)]
except(Exception):
best = None
fit = None
return TPL.format(fit_names, fit_types, num_gen, num_pop, num_var, best, fit)
def summary(self):
"""Print a summary of the MOGA."""
print(self)
def moga_optimize(self):
"""This is the main optimization function, this function permorms the multi objective
GA optimization, performing all genetic operators.
"""
self.write_moga_json_file()
if self.start_from_gen:
self.parent_pop = self.get_pop_from_pf_file()
start_gen_number = self.start_from_gen + 1
else:
start_gen_number = 0
self.parent_pop['binary'] = self.generate_random_bin_pop()
self.parent_pop['decoded'] = self.decode_binary_pop(self.parent_pop['binary'])
self.parent_pop['scaled'] = self.scale_population(self.parent_pop['decoded'])
if self.fixed_start_pop:
for i in range(self.fixed_start_pop['num_pop']):
# print('fixed start individual', i)
# print(self.fixed_start_pop['binary'][i])
self.parent_pop['binary'][i] = self.fixed_start_pop['binary'][i]
self.parent_pop['decoded'][i] = self.fixed_start_pop['decoded'][i]
# print(self.fixed_start_pop['decoded'][i])
self.parent_pop['scaled'][i] = self.fixed_start_pop['scaled'][i]
# print(self.fixed_start_pop['scaled'][i])
# print('')
self.parent_pop['fit_values'] = [[[]] * self.num_fit_func for i in range(self.num_pop)]
for i in range(self.num_pop):
for j in range(self.num_fit_func):
fit_func = self.fit_functions[j]
self.parent_pop['fit_values'][i][j] = fit_func(self.parent_pop['scaled'][i], **self.fkwargs)
self.current_pop['binary'] = self.generate_random_bin_pop()
for generation in range(start_gen_number, self.num_gen):
print('generation ', generation)
self.current_pop['decoded'] = self.decode_binary_pop(self.current_pop['binary'])
self.current_pop['scaled'] = self.scale_population(self.current_pop['decoded'])
self.current_pop['fit_values'] = [[[]] * self.num_fit_func for i in range(self.num_pop)]
for i in range(self.num_pop):
for j in range(self.num_fit_func):
fit_func = self.fit_functions[j]
self.current_pop['fit_values'][i][j] = fit_func(self.current_pop['scaled'][i], **self.fkwargs)
# self.parent_pop['fit_values'][i][j] = self.evaluate_fitness(i, fit_func)
self.combine_populations()
self.non_dom_sort()
for u in range(len(self.pareto_front_indices) - 1):
self.extract_pareto_front(u)
self.calculate_crowding_distance()
self.crowding_distance_sorting()
self.parent_reseting()
self.write_out_file(generation)
if generation < self.num_gen - 1:
self.nsga_tournament()
self.create_mating_pool()
self.simple_crossover()
self.random_mutation()
else:
print(self)
def evaluate_fitness(self, index, fit_func):
chromo = ''.join(str(y) for x in self.current_pop['binary'][index] for y in x)
fit = self.ind_fit_dict.setdefault(chromo, None)
if not fit:
fit = fit_func(self.current_pop['scaled'][index], *self.fargs, **self.fkwargs)
self.ind_fit_dict[chromo] = fit
return fit
def write_out_file(self, generation):
"""This function writes a file containing all of the population data for
the given ``generation``.
Parameters
----------
generation: int
The generation to write the population data of.
"""
filename = 'generation ' + "%03d" % generation + '_pareto_front' + ".pareto"
pf_file = open(self.output_path + (str(filename)), "wb")
pf_file.write('Generation \n')
pf_file.write(str(generation) + '\n')
pf_file.write('\n')
pf_file.write('Number of individuals per generation\n')
pf_file.write(str(self.num_pop))
pf_file.write('\n')
pf_file.write('\n')
pf_file.write('Population scaled variables \n')
for i in range(self.num_pop):
pf_file.write(str(i) + ',')
for f in range(self.num_var):
pf_file.write(str(self.parent_pop['scaled'][i][f]))
pf_file.write(',')
pf_file.write('\n')
pf_file.write('\n')
pf_file.write('Population fitness values \n')
for i in range(self.num_pop):
pf_file.write(str(i) + ',')
for f in range(self.num_fit_func):
pf_file.write(str(self.parent_pop['fit_values'][i][f]))
pf_file.write(',')
pf_file.write('\n')
pf_file.write('\n')
pf_file.write('Population Pareto front indices \n')
for i in range(self.num_pop):
pf_file.write(str(i) + ',')
pf_file.write(str(self.parent_pop['pf'][i]) + '\n')
pf_file.write('\n')
pf_file.write('\n')
pf_file.close()
def generate_random_bin_pop(self):
"""This function generates a random binary population
Returns
-------
rendom_bin_pop: dict
The generated random binary population dictionary.
"""
random_bin_pop = [[[]] * self.num_var for i in range(self.num_pop)]
for j in range(self.num_pop):
for i in range(self.num_var):
random_bin_pop[j][i] = [random.randint(0, 1) for u in range(self.num_bin_dig[i])]
return random_bin_pop
def decode_binary_pop(self, bin_pop):
"""This function decodes the given binary population
Parameters
----------
bin_pop: dict
The binary population to decode.
Returns
-------
decoded_pop: dict
The decoded population dictionary.
"""
decoded_pop = [[[]] * self.num_var for i in range(self.num_pop)]
for j in range(len(bin_pop)):
decoded_pop[j] = {}
for i in range(self.num_var):
value = 0
chrom = bin_pop[j][i]
for u, gene in enumerate(chrom):
if gene == 1:
value = value + 2**u
decoded_pop[j][i] = value
return decoded_pop
def scale_population(self, decoded_pop):
"""Scales the decoded population, variable values are scaled according to each
of their bounds contained in ``GA.boundaries``.
Parameters
----------
decoded_pop: list
The decoded population list.
Returns
-------
scaled_pop: list
The scaled ppopulation list.
"""
# scaled_pop = [[[]] * self.num_var for i in range(self.num_pop)]
# for j in range(self.num_pop):
# for i in range(self.num_var):
# maxbin = float((2 ** self.num_bin_dig[i]) - 1)
# scaled_pop[j][i] = self.boundaries[i][0] + (self.boundaries[i][1] - self.boundaries[i][0]) * decoded_pop[j][i] / maxbin
# return scaled_pop
scaled_pop = [[[]] * self.num_var for i in range(self.num_pop)]
for j in range(self.num_pop):
for i in range(self.num_var):
maxbin = (2 ** self.num_bin_dig[i]) - 1
scaled_pop[j][i] = decoded_pop[j][i] * (self.boundaries[i][1] - self.boundaries[i][0]) / float((maxbin + self.boundaries[i][0]))
return scaled_pop
def combine_populations(self):
"""This function combines the parent population with the current population
to create a 2 x ``MOGA.num_pop`` long current population.
"""
self.combined_pop['binary'] = [[[]] * self.num_var for i in range(self.num_pop * 2)]
self.combined_pop['decoded'] = [[[]] * self.num_var for i in range(self.num_pop * 2)]
self.combined_pop['scaled'] = [[[]] * self.num_var for i in range(self.num_pop * 2)]
self.combined_pop['fit_values'] = [[[]] * self.num_fit_func for i in range(self.num_pop * 2)]
for i in range(self.num_pop):
self.combined_pop['binary'][i] = self.parent_pop['binary'][i]
self.combined_pop['binary'][i + self.num_pop] = self.current_pop['binary'][i]
self.combined_pop['decoded'][i] = self.parent_pop['decoded'][i]
self.combined_pop['decoded'][i + self.num_pop] = self.current_pop['decoded'][i]
self.combined_pop['scaled'][i] = self.parent_pop['scaled'][i]
self.combined_pop['scaled'][i + self.num_pop] = self.current_pop['scaled'][i]
self.combined_pop['fit_values'][i] = self.parent_pop['fit_values'][i]
self.combined_pop['fit_values'][i + self.num_pop] = self.current_pop['fit_values'][i]
def non_dom_sort(self):
"""This function performs the non dominated sorting operator of the NSGA-II
algorithm. It assigns each individual in the population a Pareto front level,
according to their fitness values.
"""
self.domination_count = {}
self.dominated_set = []
self.dominating_individuals = []
self.pareto_front_indices = []
self.pareto_front_individuals = []
for i in range(self.num_pop * 2):
self.domination_count[i] = 0
for k in range(self.num_pop * 2):
if i == k:
continue
count_sup = 0
count_inf = 0
for j in range(self.num_fit_func):
if self.fit_types[j] == 'min':
if self.combined_pop['fit_values'][i][j] < self.combined_pop['fit_values'][k][j]:
count_sup += 1
elif self.combined_pop['fit_values'][i][j] > self.combined_pop['fit_values'][k][j]:
count_inf += 1
elif self.fit_types[j] == 'max':
if self.combined_pop['fit_values'][i][j] > self.combined_pop['fit_values'][k][j]:
count_sup += 1
elif self.combined_pop['fit_values'][i][j] < self.combined_pop['fit_values'][k][j]:
count_inf += 1
if count_sup < 1 and count_inf >= 1:
self.domination_count[i] += 1
elif count_sup >= 1 and count_inf < 1:
self.dominated_set.append(k)
self.dominating_individuals.append(i)
pareto_front_number = 0
self.pareto_front_indices.append(0)
while len(self.pareto_front_individuals) < self.num_pop:
index_count = 0
for i in range(self.num_pop * 2):
if self.domination_count[i] == 0:
self.pareto_front_individuals.append(i)
self.domination_count[i] -= 1
index_count += 1
index = index_count + self.pareto_front_indices[pareto_front_number]
self.pareto_front_indices.append(index)
a = self.pareto_front_indices[pareto_front_number]
b = self.pareto_front_indices[pareto_front_number + 1]
for k in range(a, b):
for h in range(len(self.dominating_individuals)):
if self.pareto_front_individuals[k] == self.dominating_individuals[h]:
if self.domination_count[self.dominated_set[h]] >= 0:
self.domination_count[self.dominated_set[h]] = self.domination_count[self.dominated_set[h]] - 1
pareto_front_number += 1
def extract_pareto_front(self, u):
"""Adds each new level of pareto front individuals to the ``MOGA.i_pareto_front`` list.
"""
self.i_pareto_front = []
for i in range(self.pareto_front_indices[u], self.pareto_front_indices[u + 1]):
self.i_pareto_front.append(self.pareto_front_individuals[i])
def calculate_crowding_distance(self):
"""This function calculates the crowding distance for all inividuals in the population.
The crowding distance computes the volume of the hypercube that surrounds each individual
and whose boundaries are determined by their closest neighbors in the objective space. The
crowdng distance is used by NSGA-II to better distribute the population allong the Pareto
front and avoid crowded areas, thus better representing the variety of solutions in the front.
"""
self.num_i_pareto_front = len(self.i_pareto_front)
self.pf_values = [0] * self.num_i_pareto_front
self.crowding_distance = [0] * self.num_i_pareto_front
for i in range(self.num_fit_func):
ind_fit_values_list = [fit_val[i] for fit_val in self.combined_pop['fit_values']]
# print (ind_fit_values_list)
delta = max(ind_fit_values_list) - min(ind_fit_values_list)
# print (delta)
# print()
for k in range(self.num_i_pareto_front):
self.pf_values[k] = (self.combined_pop['fit_values'][self.i_pareto_front[k]][i])
if self.fit_types[i] == 'max':
self.sorted_indices = self.get_sorting_indices(self.pf_values, reverse=True)
else:
self.sorted_indices = self.get_sorting_indices(self.pf_values, reverse=False)
self.crowding_distance[self.sorted_indices[0]] = float('inf')
self.crowding_distance[self.sorted_indices[self.num_i_pareto_front - 1]] = float('inf')
for j in range(1, self.num_i_pareto_front - 1):
formula = (self.pf_values[self.sorted_indices[j + 1]] - self.pf_values[self.sorted_indices[j - 1]]) / delta
self.crowding_distance[self.sorted_indices[j]] += formula
for i in range(self.num_i_pareto_front):
self.new_pop_cd.append(self.crowding_distance[i])
def get_sorting_indices(self, l, reverse=False):
"""Reurns the indices that would sort a list of floats.
Parameters
----------
l: list
The list of floats to be sorted.
reverse: bool
If true the sorting will be done from top to bottom.
Returns
-------
sorting_index: list
The list of indices that would sort the given list of floats.
"""
sorting_index = [i for (v, i) in sorted((v, i) for (i, v) in enumerate(l))]
if reverse is True:
sorting_index = list(reversed(sorting_index))
return sorting_index
def crowding_distance_sorting(self):
"""This function sorts the individuals in the population according
to their crowding distance.
"""
cd_sorted_last_pf_index = []
sorted_last_pf_cd = sorted(self.crowding_distance)
sorted_last_pf_cd = list(reversed(sorted_last_pf_cd))
sorting_index = self.get_sorting_indices(self.crowding_distance, reverse=True)
for i in range(self.num_i_pareto_front):
cd_sorted_last_pf_index.append(self.i_pareto_front[sorting_index[i]])
self.new_pop_cd[len(self.new_pop_cd) - self.num_i_pareto_front:len(self.new_pop_cd)] = sorted_last_pf_cd[:]
self.pareto_front_individuals[len(self.new_pop_cd) - self.num_i_pareto_front: len(self.new_pop_cd)] = cd_sorted_last_pf_index[:]
def parent_reseting(self):
"""This function updates the patent population, selecting the individuals that are higher
in the pareto front level, and have the largest crowding distance.
"""
self.parent_pop['scaled'] = []
self.parent_pop['decoded'] = []
self.parent_combined_dict = {}
for | |
x = cx - w/2
y = cy - h/2
pygame.draw.rect(screen, DARKRED, [x, y, w, h])
pygame.draw.rect(screen, DARKRED, [x, y, w, h], 4)
if mistakes < 5:
pygame.draw.rect(screen, LIGHTRED, [x+1, y+1, w-(w*(mistakes/(strikes-1)))-1, h-1])
## this life bar is pretty simple. I use center-x and center-y so it would be easier to
## put in the same place as the timer. The (w-(w*(mistakes/(strikes-1)))-1) thing is
## the width of the red bar: The full width, minus whatever fraction of it is used up.
## So, if they've made 2 out of their 5 mistakes (the strikes variable is 6 because
## it's the sixth that kills them, hence the -6), and the width is 480 pixels, then
## it will become (480 - (480*2/5)), or two fifths less than 480. The negative one on
## the w and h is just to make it fit nicer in the outer box.
screen.blit(mistakesLeft, [x + (w*1/100), y - (h*2/3)])
def drawKeys(color):
key_locations_top_X = [x[0] for x in key_locations_top]
key_locations_top_Y = [y[1] for y in key_locations_top]
## ^^ separates x from y and stores them in separate lists
key_locations_mid_X = [x[0] for x in key_locations_mid]
key_locations_mid_Y = [y[1] for y in key_locations_mid]
## ^^ this is the only code I found on the internet, LOL
key_locations_bot_X = [x[0] for x in key_locations_bot]
key_locations_bot_Y = [y[1] for y in key_locations_bot]
for i in range(len(key_locations_top)):
key_location_top_X = key_locations_top_X[i]
key_location_top_Y = key_locations_top_Y[i]
pygame.draw.rect(screen, color, [key_location_top_X, key_location_top_Y, 52, 52])
for i in range(len(key_locations_mid)):
key_location_mid_X = key_locations_mid_X[i]
key_location_mid_Y = key_locations_mid_Y[i]
pygame.draw.rect(screen, color, [key_location_mid_X, key_location_mid_Y, 52, 52])
for i in range(len(key_locations_bot)):
key_location_bot_X = key_locations_bot_X[i]
key_location_bot_Y = key_locations_bot_Y[i]
pygame.draw.rect(screen, color, [key_location_bot_X, key_location_bot_Y, 52, 52])
if color == LIGHTGREEN:
pygame.draw.line(screen, GREEN, (360, 630), (396, 630), 2) #F key's bump
pygame.draw.line(screen, GREEN, (528, 630), (564, 630), 2) #J key's bump
elif color == LIGHTRED:
pygame.draw.line(screen, RED, (360, 630), (396, 630), 2) #F key's bump
pygame.draw.line(screen, RED, (528, 630), (564, 630), 2) #J key's bump
def getCORKey():
CORkey = random.randint(97, 122)
return CORkey
## The randint is 97, 122 because the numbers 97 to 122 in ASCII represent "a" to "z".
def getCORSpot(key):
## This determines what row, and what key on that row starting from the left, each letter
## belongs to. This makes it very easy to use with lists.
global CORKeyRow
global CORKeyN
if key == 113 or key == 119 or key == 101 or key == 114 or key == 116 or key == 121 or key == 117 or key == 105 or key == 111 or key == 112:
CORKeyRow = 0
if key == 113: CORKeyN = 0
elif key == 119: CORKeyN = 1
elif key == 101: CORKeyN = 2
elif key == 114: CORKeyN = 3
elif key == 116: CORKeyN = 4
elif key == 121: CORKeyN = 5
elif key == 117: CORKeyN = 6
elif key == 105: CORKeyN = 7
elif key == 111: CORKeyN = 8
elif key == 112: CORKeyN = 9
elif key == 97 or key == 115 or key == 100 or key == 102 or key == 103 or key == 104 or key == 106 or key == 107 or key == 108:
CORKeyRow = 1
if key == 97: CORKeyN = 0
elif key == 115: CORKeyN = 1
elif key == 100: CORKeyN = 2
elif key == 102: CORKeyN = 3
elif key == 103: CORKeyN = 4
elif key == 104: CORKeyN = 5
elif key == 106: CORKeyN = 6
elif key == 107: CORKeyN = 7
elif key == 108: CORKeyN = 8
elif key == 122 or key == 120 or key == 99 or key == 118 or key == 98 or key == 110 or key == 109:
CORKeyRow = 2
if key == 122: CORKeyN = 0
elif key == 120: CORKeyN = 1
elif key == 99: CORKeyN = 2
elif key == 118: CORKeyN = 3
elif key == 98: CORKeyN = 4
elif key == 110: CORKeyN = 5
elif key == 109: CORKeyN = 6
def scoresDEF(): ##<< This function grabs the scores from the highscores.txt and puts the new score in it's place
highscores = []
highscoresINT = []
file = open("highscores.txt", "r+") ## r+ means it gets opened in read/write mode
for i in range(0, 10):
data = file.readline() ## this command runs the next line each time it is run
highscores.append(data) ## hence it being put in a loop
file.close() ## once the data is in a list, it doesn't need to be open, using resources
for i in range(0, 10): ## because the lines are imported as "11111:ABC\n", they have to be turned into (11111, "ABC").
highscores[i] = ( int(highscores[i].split(":")[0]), ((highscores[i]).split(":")[1]).split("\n")[0] )
## highscores[i] = "11111:ABC", so split(":")[0] is just 11111, and [1] is ABC. The ABC then has \n hacked off.
highscoresINT.append((highscores[i])[0])
for i in range(0, 10): #checks each item individually in the list against a list of just the numbers
if score > (highscores[i])[0] and score not in highscoresINT:
oldscore = highscores[i] # saves the score being replaced so it can move down in the list
highscores[i] = (score, name)
highscores.append(oldscore)
highscoresINT.append(score)
highscoresINT = (sorted(highscoresINT, reverse=True))[:10]
highscores = (sorted(highscores, reverse=True))[:10]
highscoresINT.append(score)
highscoresINT = (sorted(highscoresINT, reverse=True))[:10] #[:10] keeps only the first 10 items.
## the score sorting works by putting the replaced score at the 11th spot, then sorting to get it into place, then taking off the last one.
return highscores
def scoresSTRING(): ##<< this function returns the same list as the first function, but all in strings so it can be printed.
highscores = scoresDEF()
for i in range(0, 10):
if (highscores[i])[0] == score:
highscores[i] = ( str((highscores[i])[0]) + ":b", (highscores[i])[1])
##^^ the "new" score on the list is now (11111:b, ABC) so the program knows to bold it
else:
highscores[i] = ( str((highscores[i])[0]), (highscores[i])[1] )
return highscores
def scoresWRITE(): ##<< writes the new list to file for storage
highscores = scoresDEF()
for i in range(0, 10):
highscores[i] = ( str((highscores[i])[0]) + ":" + (highscores[i])[1] + "\n")
file = open("highscores.txt", "r+")
file.seek(0)
file.truncate()
for i in range(0, 10):
file.write(highscores[i])
file.close()
def scoresRESET(): ##<< resets the file and score to preset defaults.
file = open("highscores.txt", "r+")
file.seek(0)
file.truncate()
resetList = [
'20000:aaa\n',
'17500:bbb\n',
'15000:ccc\n',
'12500:ddd\n',
'10000:eee\n',
'7500:fff\n',
'5000:ggg\n',
'2500:hhh\n',
'1000:iii\n',
'50:jjj\n'
]
score = 0
name = ["A", "A", "A"]
for i in range(0, 10):
file.write(resetList[i])
file.close()
## Main Game: ----------------------------------------------------------------------------------------------------------------------------------------------------------!!!
while True:
## initializes the key's locations --
key_locations = []
## Top row
global key_locations_top
key_locations_top = []
topRowX = 162
topRowY = 532
for i in range(0,10):
key_locations_top.append(((topRowX + 56 * i), topRowY))
## Middle Row
global key_locations_mid
key_locations_mid = []
midRowX = 184
midRowY = 590
#F = 352, 590
#J = 520, 590
for i in range(0,9):
key_locations_mid.append(((midRowX + 56 * i), midRowY))
## Bottom Row
global key_locations_bot
key_locations_bot = []
botRowX = 207
botRowY = 648
for i in range(0,7):
key_locations_bot.append(((botRowX + 56 * i), botRowY))
## Whole Keyboard
key_locations.append(key_locations_top)
key_locations.append(key_locations_mid)
key_locations.append(key_locations_bot)
## --
## Introduction: -----------------------------------------------
opening = True
while opening:
for event in pygame.event.get():
if event.type == pygame.QUIT: pygame.quit(), sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE: pygame.quit(), sys.exit()
elif event.key == pygame.K_BACKQUOTE and meme_hack == False: meme_hack = True
elif event.key == pygame.K_BACKQUOTE and meme_hack == True: meme_hack = False
else:
opening = False
screen.blit(imgBKGD, [0,0]) ##Background
pygame.draw.rect(screen, BLACK, [140, 80, 720, 355]) ##Monitor Screen
drawKeys(LIGHTRED)
explainText1 = monitorFont.render(">>> In this game, you will have to press keys as they turn red on", True, WHITE)
explainText2 = monitorFont.render(">>> the keyboard you see below you.", True, WHITE)
explainText3 = monitorFont.render(">>> You have 30 seconds, although each key you press will", True, WHITE)
explainText4 = monitorFont.render(">>> earn you 0.5 extra.", True, WHITE)
explainText5 | |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Command module."""
import os
import sys
import argparse
import mindinsight
from mindinsight.mindconverter.converter import main
from mindinsight.mindconverter.graph_based_converter.common.utils import get_framework_type
from mindinsight.mindconverter.graph_based_converter.constant import ARGUMENT_LENGTH_LIMIT, EXPECTED_NUMBER, \
FrameworkType
from mindinsight.mindconverter.graph_based_converter.framework import main_graph_base_converter
from mindinsight.mindconverter.common.log import logger as log, logger_console as log_console
class ArgsCheck:
"""Args check."""
@staticmethod
def check_repeated(namespace, dest, default, option_string, parser_in):
"""Check repeated."""
if getattr(namespace, dest, default) is not default:
parser_in.error(f'Parameter `{option_string}` is set repeatedly.')
class FileDirAction(argparse.Action):
"""File directory action class definition."""
@staticmethod
def check_path(parser_in, values, option_string=None):
"""
Check argument for file path.
Args:
parser_in (ArgumentParser): Passed-in argument parser.
values (object): Argument values with type depending on argument definition.
option_string (str): Optional string for specific argument name. Default: None.
"""
outfile = values
if len(outfile) > ARGUMENT_LENGTH_LIMIT:
parser_in.error(
f"The length of {option_string}{outfile} should be no more than {ARGUMENT_LENGTH_LIMIT}.")
if outfile.startswith('~'):
outfile = os.path.realpath(os.path.expanduser(outfile))
if not outfile.startswith('/'):
outfile = os.path.realpath(os.path.join(os.getcwd(), outfile))
if os.path.exists(outfile) and not os.access(outfile, os.R_OK):
parser_in.error(f'{option_string} {outfile} not accessible')
return outfile
def __call__(self, parser_in, namespace, values, option_string=None):
"""
Inherited __call__ method from argparse.Action.
Args:
parser_in (ArgumentParser): Passed-in argument parser.
namespace (Namespace): Namespace object to hold arguments.
values (object): Argument values with type depending on argument definition.
option_string (str): Optional string for specific argument name. Default: None.
"""
ArgsCheck.check_repeated(namespace, self.dest, self.default, option_string, parser_in)
outfile_dir = self.check_path(parser_in, values, option_string)
if os.path.isfile(outfile_dir):
parser_in.error(f'{option_string} {outfile_dir} is a file')
setattr(namespace, self.dest, outfile_dir)
class OutputDirAction(argparse.Action):
"""File directory action class definition."""
def __call__(self, parser_in, namespace, values, option_string=None):
"""
Inherited __call__ method from argparse.Action.
Args:
parser_in (ArgumentParser): Passed-in argument parser.
namespace (Namespace): Namespace object to hold arguments.
values (object): Argument values with type depending on argument definition.
option_string (str): Optional string for specific argument name. Default: None.
"""
ArgsCheck.check_repeated(namespace, self.dest, self.default, option_string, parser_in)
output = values
if len(output) > ARGUMENT_LENGTH_LIMIT:
parser_in.error(
f"The length of {option_string}{output} should be no more than {ARGUMENT_LENGTH_LIMIT}.")
if output.startswith('~'):
output = os.path.realpath(os.path.expanduser(output))
if not output.startswith('/'):
output = os.path.realpath(os.path.join(os.getcwd(), output))
if os.path.exists(output):
if not os.access(output, os.R_OK):
parser_in.error(f'{option_string} {output} not accessible')
if os.path.isfile(output):
parser_in.error(f'{option_string} {output} is a file')
setattr(namespace, self.dest, output)
class ProjectPathAction(argparse.Action):
"""Project directory action class definition."""
def __call__(self, parser_in, namespace, values, option_string=None):
"""
Inherited __call__ method from argparse.Action.
Args:
parser_in (ArgumentParser): Passed-in argument parser.
namespace (Namespace): Namespace object to hold arguments.
values (object): Argument values with type depending on argument definition.
option_string (str): Optional string for specific argument name. Default: None.
"""
ArgsCheck.check_repeated(namespace, self.dest, self.default, option_string, parser_in)
outfile_dir = FileDirAction.check_path(parser_in, values, option_string)
if not os.path.exists(outfile_dir):
parser_in.error(f'{option_string} {outfile_dir} not exists')
if not os.path.isdir(outfile_dir):
parser_in.error(f'{option_string} [{outfile_dir}] should be a directory.')
setattr(namespace, self.dest, outfile_dir)
class InFileAction(argparse.Action):
"""Input File action class definition."""
def __call__(self, parser_in, namespace, values, option_string=None):
"""
Inherited __call__ method from argparse.Action.
Args:
parser_in (ArgumentParser): Passed-in argument parser.
namespace (Namespace): Namespace object to hold arguments.
values (object): Argument values with type depending on argument definition.
option_string (str): Optional string for specific argument name. Default: None.
"""
ArgsCheck.check_repeated(namespace, self.dest, self.default, option_string, parser_in)
outfile_dir = FileDirAction.check_path(parser_in, values, option_string)
if not os.path.exists(outfile_dir):
parser_in.error(f'{option_string} {outfile_dir} not exists')
if not os.path.isfile(outfile_dir):
parser_in.error(f'{option_string} {outfile_dir} is not a file')
if not os.path.basename(outfile_dir).endswith("py"):
parser_in.error(f'{option_string} {outfile_dir} is not a valid python file')
setattr(namespace, self.dest, outfile_dir)
class ModelFileAction(argparse.Action):
"""Model File action class definition."""
def __call__(self, parser_in, namespace, values, option_string=None):
"""
Inherited __call__ method from argparse.Action.
Args:
parser_in (ArgumentParser): Passed-in argument parser.
namespace (Namespace): Namespace object to hold arguments.
values (object): Argument values with type depending on argument definition.
option_string (str): Optional string for specific argument name. Default: None.
"""
ArgsCheck.check_repeated(namespace, self.dest, self.default, option_string, parser_in)
outfile_dir = FileDirAction.check_path(parser_in, values, option_string)
if not os.path.exists(outfile_dir):
parser_in.error(f'{option_string} {outfile_dir} not exists')
if not os.path.isfile(outfile_dir):
parser_in.error(f'{option_string} {outfile_dir} is not a file')
frame_type = get_framework_type(outfile_dir)
if frame_type == FrameworkType.UNKNOWN.value:
parser_in.error(f'{option_string} {outfile_dir} should be an valid '
f'TensorFlow pb or PyTorch pth model file')
setattr(namespace, self.dest, outfile_dir)
class LogFileAction(argparse.Action):
"""Log file action class definition."""
def __call__(self, parser_in, namespace, values, option_string=None):
"""
Inherited __call__ method from FileDirAction.
Args:
parser_in (ArgumentParser): Passed-in argument parser.
namespace (Namespace): Namespace object to hold arguments.
values (object): Argument values with type depending on argument definition.
option_string (str): Optional string for specific argument name. Default: None.
"""
ArgsCheck.check_repeated(namespace, self.dest, self.default, option_string, parser_in)
outfile_dir = FileDirAction.check_path(parser_in, values, option_string)
if os.path.exists(outfile_dir) and not os.path.isdir(outfile_dir):
parser_in.error(f'{option_string} {outfile_dir} is not a directory')
setattr(namespace, self.dest, outfile_dir)
class ShapeAction(argparse.Action):
"""Shape action class definition."""
def __call__(self, parser_in, namespace, values, option_string=None):
"""
Inherited __call__ method from FileDirAction.
Args:
parser_in (ArgumentParser): Passed-in argument parser.
namespace (Namespace): Namespace object to hold arguments.
values (object): Argument values with type depending on argument definition.
option_string (str): Optional string for specific argument name. Default: None.
"""
ArgsCheck.check_repeated(namespace, self.dest, self.default, option_string, parser_in)
in_shape = None
shape_str = values
shape_list = shape_str.split(':')
if not len(shape_list) == EXPECTED_NUMBER:
parser_in.error(f"Only support one shape now, but get {len(shape_list)}.")
try:
in_shape = [int(num_shape) for num_shape in shape_list[0].split(',')]
except ValueError:
parser_in.error(
f"{option_string} {shape_str} should be a list of integer split by ',', check it please.")
setattr(namespace, self.dest, in_shape)
class NodeAction(argparse.Action):
"""Node action class definition."""
def __call__(self, parser_in, namespace, values, option_string=None):
"""
Inherited __call__ method from FileDirAction.
Args:
parser_in (ArgumentParser): Passed-in argument parser.
namespace (Namespace): Namespace object to hold arguments.
values (object): Argument values with type depending on argument definition.
option_string (str): Optional string for specific argument name. Default: None.
"""
ArgsCheck.check_repeated(namespace, self.dest, self.default, option_string, parser_in)
node_str = values
if len(node_str) > ARGUMENT_LENGTH_LIMIT:
parser_in.error(
f"The length of {option_string}{node_str} should be no more than {ARGUMENT_LENGTH_LIMIT}."
)
node_list = node_str.split(',')
if not len(node_list) == EXPECTED_NUMBER:
parser_in.error(f"Only support one {option_string} now, but get {len(node_list)}.")
setattr(namespace, self.dest, node_str)
parser = argparse.ArgumentParser(
prog='mindconverter',
description='MindConverter CLI entry point (version: {})'.format(mindinsight.__version__),
allow_abbrev=False)
parser.add_argument(
'--version',
action='version',
version='%(prog)s ({})'.format(mindinsight.__version__))
parser.add_argument(
'--in_file',
type=str,
action=InFileAction,
required=False,
default=None,
help="""
Specify path for script file to use AST schema to
do script conversation.
""")
parser.add_argument(
'--model_file',
type=str,
action=ModelFileAction,
required=False,
help="""
PyTorch .pth or Tensorflow .pb model file path to use graph
based schema to do script generation. When
`--in_file` and `--model_file` are both provided,
use AST schema as default.
""")
parser.add_argument(
'--shape',
type=str,
action=ShapeAction,
default=None,
required=False,
help="""
Optional, expected input tensor shape of
`--model_file`. It's required when use graph based
schema.
Usage: --shape 1,3,244,244
""")
parser.add_argument(
'--input_nodes',
type=str,
action=NodeAction,
default=None,
required=False,
help="""
Optional, input node(s) name of `--model_file`. It's required when use Tensorflow model.
Usage: --input_nodes input_1:0,input_2:0
""")
parser.add_argument(
'--output_nodes',
type=str,
action=NodeAction,
default=None,
required=False,
help="""
Optional, output node(s) name of `--model_file`. It's required when use Tensorflow model.
Usage: --output_nodes output_1:0,output_2:0
""")
parser.add_argument(
'--output',
type=str,
action=OutputDirAction,
default=os.path.join(os.getcwd(), 'output'),
help="""
Optional, specify path for converted script file
directory. Default output directory is `output` folder
in the current working directory.
""")
parser.add_argument(
'--report',
type=str,
action=LogFileAction,
default=None,
help="""
Optional, specify report directory. Default is
converted script directory.
""")
parser.add_argument(
'--project_path',
type=str,
action=ProjectPathAction,
required=False,
default=None,
help="""
Optional, PyTorch scripts project path. If PyTorch
project is not in PYTHONPATH, please assign
`--project_path` when use graph based schema.
Usage: --project_path ~/script_file/
""")
def cli_entry():
"""Entry point for mindconverter CLI."""
permissions = os.R_OK | os.W_OK | os.X_OK
os.umask(permissions << 3 | permissions)
argv = sys.argv[1:]
if not argv:
argv = ['-h']
args = parser.parse_args(argv)
else:
args = parser.parse_args()
mode = permissions << 6
os.makedirs(args.output, mode=mode, exist_ok=True)
if args.report is None:
args.report = args.output
os.makedirs(args.report, mode=mode, exist_ok=True)
_run(args.in_file, args.model_file,
args.shape,
args.input_nodes, args.output_nodes,
args.output, args.report,
args.project_path)
def _run(in_files, model_file, shape, input_nodes, output_nodes, out_dir, report, project_path):
"""
Run converter command.
Args:
in_files (str): The file path or directory to convert.
model_file(str): The pytorch .pth to convert on graph based schema.
shape(list): The input tensor shape of module_file.
input_nodes(str): The input node(s) name of Tensorflow model, split by ','.
output_nodes(str): The output node(s) name of | |
<gh_stars>100-1000
import functools
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmseg.utils import resize
from mmseg.accuracy import accuracy
import math
from utils.coco_dataset import transform_preds
def cross_entropy(pred,
label,
weight=None,
class_weight=None,
reduction='mean',
avg_factor=None,
ignore_index=255): # TODO(Mingyu): By default, ignore 255
"""The wrapper function for :func:`F.cross_entropy`"""
# class_weight is a manual rescaling weight given to each class.
# If given, has to be a Tensor of size C element-wise losses
loss = F.cross_entropy(
pred,
label,
weight=class_weight,
reduction='none',
ignore_index=ignore_index)
# apply weights and do the reduction
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(
loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
def _expand_onehot_labels(labels, label_weights, label_channels):
"""Expand onehot labels to match the size of prediction."""
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
inds = torch.nonzero(labels >= 1, as_tuple=False).squeeze()
if inds.numel() > 0:
bin_labels[inds, labels[inds] - 1] = 1
if label_weights is None:
bin_label_weights = None
else:
bin_label_weights = label_weights.view(-1, 1).expand(
label_weights.size(0), label_channels)
return bin_labels, bin_label_weights
def binary_cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None):
"""Calculate the binary CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, 1).
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
Returns:
torch.Tensor: The calculated loss
"""
if pred.dim() != label.dim():
label, weight = _expand_onehot_labels(label, weight, pred.size(-1))
# weighted element-wise losses
if weight is not None:
weight = weight.float()
loss = F.binary_cross_entropy_with_logits(
pred, label.float(), weight=class_weight, reduction='none')
# do the reduction for the weighted loss
loss = weight_reduce_loss(
loss, weight, reduction=reduction, avg_factor=avg_factor)
return loss
def mask_cross_entropy(pred,
target,
label,
reduction='mean',
avg_factor=None,
class_weight=None):
"""Calculate the CrossEntropy loss for masks.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
target (torch.Tensor): The learning label of the prediction.
label (torch.Tensor): ``label`` indicates the class label of the mask'
corresponding object. This will be used to select the mask in the
of the class which the object belongs to when the mask prediction
if not class-agnostic.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
Returns:
torch.Tensor: The calculated loss
"""
# TODO: handle these two reserved arguments
assert reduction == 'mean' and avg_factor is None
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[inds, label].squeeze(1)
return F.binary_cross_entropy_with_logits(
pred_slice, target, weight=class_weight, reduction='mean')[None]
class CrossEntropyLoss(nn.Module):
"""CrossEntropyLoss.
Args:
use_sigmoid (bool, optional): Whether the prediction uses sigmoid
of softmax. Defaults to False.
use_mask (bool, optional): Whether to use mask cross entropy loss.
Defaults to False.
reduction (str, optional): . Defaults to 'mean'.
Options are "none", "mean" and "sum".
class_weight (list[float], optional): Weight of each class.
Defaults to None.
loss_weight (float, optional): Weight of the loss. Defaults to 1.0.
"""
def __init__(self,
use_sigmoid=False,
use_mask=False,
reduction='mean',
class_weight=None,
loss_weight=1.0,
align_corners=False):
super(CrossEntropyLoss, self).__init__()
assert (use_sigmoid is False) or (use_mask is False)
self.use_sigmoid = use_sigmoid
self.use_mask = use_mask
self.reduction = reduction
self.loss_weight = loss_weight
self.class_weight = class_weight
self.align_corners = align_corners
if self.use_sigmoid:
self.cls_criterion = binary_cross_entropy
elif self.use_mask:
self.cls_criterion = mask_cross_entropy
else:
self.cls_criterion = cross_entropy
def forward(self,
cls_score,
label,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function."""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
cls_score = resize(
input=cls_score,
size=label.shape[2:],
mode='bilinear',
align_corners=self.align_corners)
if self.class_weight is not None:
class_weight = cls_score.new_tensor(self.class_weight)
else:
class_weight = None
label = label.squeeze(1)
loss_cls = self.loss_weight * self.cls_criterion(
cls_score,
label,
weight,
class_weight=class_weight,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
acc = accuracy(cls_score, label)
return loss_cls, acc
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, elementwise_mean:1, sum: 2
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
assert weight.dim() == loss.dim()
if weight.dim() > 1:
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == 'mean':
loss = loss.sum() / avg_factor
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred,
target,
weight=None,
reduction='mean',
avg_factor=None,
**kwargs):
# get element-wise loss
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
class JointsMSELoss(nn.Module):
def __init__(self, use_target_weight):
super(JointsMSELoss, self).__init__()
self.criterion = nn.MSELoss(reduction='mean')
self.use_target_weight = use_target_weight
def forward(self, output, target, target_weight):
batch_size = output.size(0)
num_joints = output.size(1)
heatmaps_pred = output.reshape((batch_size, num_joints, -1)).split(1, 1)
heatmaps_gt = target.reshape((batch_size, num_joints, -1)).split(1, 1)
loss = 0
for idx in range(num_joints):
heatmap_pred = heatmaps_pred[idx].squeeze()
heatmap_gt = heatmaps_gt[idx].squeeze()
if self.use_target_weight:
loss += 0.5 * self.criterion(
heatmap_pred.mul(target_weight[:, idx]),
heatmap_gt.mul(target_weight[:, idx])
)
else:
loss += 0.5 * self.criterion(heatmap_pred, heatmap_gt)
return loss / num_joints
def get_max_preds(batch_heatmaps):
'''
get predictions from score maps
heatmaps: numpy.ndarray([batch_size, num_joints, height, width])
'''
assert isinstance(batch_heatmaps, np.ndarray), \
'batch_heatmaps should be numpy.ndarray'
assert batch_heatmaps.ndim == 4, 'batch_images should be 4-ndim'
batch_size = batch_heatmaps.shape[0]
num_joints = batch_heatmaps.shape[1]
width = batch_heatmaps.shape[3]
heatmaps_reshaped = batch_heatmaps.reshape((batch_size, num_joints, -1))
idx = np.argmax(heatmaps_reshaped, 2)
maxvals = np.amax(heatmaps_reshaped, 2)
maxvals = maxvals.reshape((batch_size, num_joints, 1))
idx = idx.reshape((batch_size, num_joints, 1))
preds = np.tile(idx, (1, 1, 2)).astype(np.float32)
preds[:, :, 0] = (preds[:, :, 0]) % width
preds[:, :, 1] = np.floor((preds[:, :, 1]) / width)
pred_mask = np.tile(np.greater(maxvals, 0.0), (1, 1, 2))
pred_mask = pred_mask.astype(np.float32)
preds *= pred_mask
return preds, maxvals
def get_final_preds(batch_heatmaps, center, scale):
coords, maxvals = get_max_preds(batch_heatmaps)
heatmap_height = batch_heatmaps.shape[2]
heatmap_width = batch_heatmaps.shape[3]
# post-processing
POST_PROCESS = 1
if POST_PROCESS:
for n in range(coords.shape[0]):
for p in range(coords.shape[1]):
hm = batch_heatmaps[n][p]
px = int(math.floor(coords[n][p][0] + 0.5))
py = int(math.floor(coords[n][p][1] + 0.5))
if 1 < px < heatmap_width-1 and 1 < py < heatmap_height-1:
diff = np.array(
[
hm[py][px+1] - hm[py][px-1],
hm[py+1][px]-hm[py-1][px]
]
)
coords[n][p] += np.sign(diff) * .25
preds = coords.copy()
# Transform back
for i in range(coords.shape[0]):
preds[i] = transform_preds(
coords[i], center[i], scale[i], [heatmap_width, heatmap_height]
)
return preds, maxvals
def calc_dists(preds, target, normalize):
preds = preds.astype(np.float32)
target = target.astype(np.float32)
dists = np.zeros((preds.shape[1], preds.shape[0]))
for n in range(preds.shape[0]):
for c in range(preds.shape[1]):
if target[n, c, 0] > 1 and target[n, c, 1] > 1:
normed_preds = preds[n, c, :] / normalize[n]
normed_targets = target[n, c, :] / normalize[n]
dists[c, n] = np.linalg.norm(normed_preds - normed_targets)
else:
dists[c, n] = -1
return dists
def dist_acc(dists, thr=0.5):
''' Return percentage below threshold while ignoring values with a -1 '''
dist_cal = np.not_equal(dists, -1)
| |
<gh_stars>1-10
import time
import json
from service_fabrik_backup_restore import parse_options, create_iaas_client
def main():
# +-> Definition of constants
DIRECTORY_PERSISTENT = '/var/vcap/store'
DIRECTORY_SNAPSHOT = '/tmp/service-fabrik-backup/snapshot'
DIRECTORY_UPLOADS = '/tmp/service-fabrik-backup/uploads'
# +-> Initialization: Argument Parsing, IaaS-Client Creation
configuration = parse_options('backup')
iaas_client = create_iaas_client('backup', configuration, DIRECTORY_PERSISTENT, [
DIRECTORY_SNAPSHOT, DIRECTORY_UPLOADS], 10, 18000)
# ------------------------------------------ BACKUP START ----------------------------------------------------------
backup_guid = configuration['backup_guid']
backup_type = configuration['type']
instance_id = configuration['instance_id']
landscape = configuration['iaas'].title()
iaas_client.initialize()
try:
tarball_files_name = 'blueprint-files.tar.gz.gpg'
tarball_files_path = DIRECTORY_UPLOADS + '/' + tarball_files_name
metadata_files_name = 'blueprint-metadata.json'
metadata_files_path = '/tmp' + '/' + metadata_files_name
# +-> Get the id of the persistent volume attached to this instance
volume_persistent = iaas_client.get_persistent_volume_for_instance(
instance_id)
if not volume_persistent:
iaas_client.exit(
'Could not find the persistent volume attached to this instance: {}.'.format(instance_id))
if backup_type == 'online':
# +-> Create a snapshot of the persistent volume
snapshot_store = iaas_client.create_snapshot(volume_persistent.id)
if not snapshot_store:
iaas_client.exit('Could not find the snapshot of the persistent volume {}.'
.format(DIRECTORY_PERSISTENT))
if landscape != 'Aws' and landscape != 'Azure' and landscape != 'Gcp' and landscape != 'Ali':
# +-> Create a volume from this snapshot whose contents will be backed-up
volume_snapshot = iaas_client.create_volume(
snapshot_store.size, snapshot_store.id)
if not volume_snapshot:
iaas_client.exit(
'Could not create a volume from the {} snapshot.'.format(DIRECTORY_PERSISTENT))
# +-> Create a volume where the encrypted tarballs/files will be stored on (to be uploaded)
volume_uploads = iaas_client.create_volume(snapshot_store.size)
if not volume_uploads:
iaas_client.exit(
'Could not create a volume for the uploads.')
# +-> Attach the snapshot volume to the instance
attachment_volume_snapshot = iaas_client.create_attachment(
volume_snapshot.id, instance_id)
if not attachment_volume_snapshot:
iaas_client.exit('Could not attach the snapshot volume with id {} to instance with id {}.'
.format(volume_snapshot.id, instance_id))
# +-> Attach the upload volume to the instance
attachment_volume_uploads = iaas_client.create_attachment(
volume_uploads.id, instance_id)
if not attachment_volume_uploads:
iaas_client.exit('Could not attach the upload volume with id {} to instance with id {}.'
.format(volume_uploads.id, instance_id))
# +-> Find the mountpoint of the snapshot volume
mountpoint_volume_snapshot = iaas_client.get_mountpoint(
volume_snapshot.id, '1')
if not mountpoint_volume_snapshot:
iaas_client.exit('Could not determine the mountpoint for the snapshot volume (id: {}).'
.format(volume_snapshot.id))
# +-> Find the mountpoint of the upload volume
mountpoint_volume_uploads = iaas_client.get_mountpoint(
volume_uploads.id)
if not mountpoint_volume_uploads:
iaas_client.exit('Could not determine the mountpoint for the upload volume (id: {}).'
.format(volume_uploads.id))
# +-> Create temporary directories, format the upload volume and mount them to these directories
if not iaas_client.delete_directory(DIRECTORY_SNAPSHOT):
iaas_client.exit(
'Could not remove the following directory: {}.'.format(DIRECTORY_SNAPSHOT))
if not iaas_client.delete_directory(DIRECTORY_UPLOADS):
iaas_client.exit(
'Could not remove the following directory: {}.'.format(DIRECTORY_UPLOADS))
if not iaas_client.create_directory(DIRECTORY_SNAPSHOT):
iaas_client.exit(
'Could not create the following directory: {}'.format(DIRECTORY_SNAPSHOT))
if not iaas_client.create_directory(DIRECTORY_UPLOADS):
iaas_client.exit(
'Could not create the following directory: {}'.format(DIRECTORY_UPLOADS))
if not iaas_client.format_device(mountpoint_volume_uploads):
iaas_client.exit('Could not format the following device: {}'.format(
mountpoint_volume_uploads))
if not iaas_client.mount_device(mountpoint_volume_snapshot, DIRECTORY_SNAPSHOT):
iaas_client.exit('Could not mount the device {} to the directory {}.'
.format(mountpoint_volume_snapshot, DIRECTORY_SNAPSHOT))
if not iaas_client.mount_device(mountpoint_volume_uploads, DIRECTORY_UPLOADS):
iaas_client.exit('Could not mount the device {} to the directory {}.'
.format(mountpoint_volume_uploads, DIRECTORY_UPLOADS))
# +-> Create tarball of the contents of the persistent volume, encrypt it, and upload it to blob store
# +-> Service Fabrik forces the services to store their blobs in a pseudo-folder named with the backup_guid
if not iaas_client.create_and_encrypt_tarball_of_directory('{}/blueprint/files'
.format(DIRECTORY_PERSISTENT),
tarball_files_path):
iaas_client.exit('Could not create and encrypt a tarball of the directory {}'
.format(DIRECTORY_PERSISTENT))
if not iaas_client.upload_to_blobstore(tarball_files_path, '{}/{}'.format(backup_guid, tarball_files_name)):
iaas_client.exit(
'Could not upload the tarball {}.'.format(tarball_files_path))
# +-> Unmount the volumes and remove the temporary directories
if not iaas_client.unmount_device(mountpoint_volume_uploads):
iaas_client.exit('Could not unmount the device {}.'.format(
mountpoint_volume_uploads))
if not iaas_client.unmount_device(mountpoint_volume_snapshot):
iaas_client.exit('Could not unmount the device {}.'.format(
mountpoint_volume_snapshot))
if not iaas_client.delete_directory(DIRECTORY_SNAPSHOT):
iaas_client.exit(
'Could not remove the following directory: {}.'.format(DIRECTORY_SNAPSHOT))
if not iaas_client.delete_directory(DIRECTORY_UPLOADS):
iaas_client.exit(
'Could not remove the following directory: {}.'.format(DIRECTORY_UPLOADS))
# +-> Detach the snapshot volume and the upload volume from the instance
if not iaas_client.delete_attachment(attachment_volume_uploads.volume_id, instance_id):
iaas_client.exit('Could not detach the upload volume with id {} to instance with id {}.'
.format(attachment_volume_uploads.volume_id, instance_id))
if not iaas_client.delete_attachment(attachment_volume_snapshot.volume_id, instance_id):
iaas_client.exit('Could not detach the snapshot with id {} to instance with id {}.'
.format(attachment_volume_snapshot.volume_id, instance_id))
# +-> Delete the upload volume and the snapshot volume
if not iaas_client.delete_volume(volume_uploads.id):
iaas_client.exit(
'Could not delete the upload volume with id {}.'.format(volume_uploads.id))
if not iaas_client.delete_volume(volume_snapshot.id):
iaas_client.exit(
'Could not delete the snapshot volume with id {}.'.format(volume_snapshot.id))
if landscape == 'Aws':
# +-> Copy Snapshot Function to encrypt the Snapshot
snapshot_store_encrypted = iaas_client.copy_snapshot(
snapshot_store.id)
if not snapshot_store_encrypted:
iaas_client.exit('Could not create the encrypted copy of the snapshot {}.'
.format(DIRECTORY_PERSISTENT))
with open(metadata_files_path, 'w') as f:
f.write(json.dumps(
{'snapshotId': snapshot_store_encrypted.id}))
# +-> Keep agent metadata
if not iaas_client.upload_to_blobstore(metadata_files_path, '{}/{}'.format(backup_guid, metadata_files_name)):
iaas_client.exit(
'Could not upload the tarball {}.'.format(metadata_files_path))
if landscape == 'Azure' or landscape == 'Gcp' or landscape == 'Ali':
with open(metadata_files_path, 'w') as f:
f.write(json.dumps(
{'snapshotId': snapshot_store.id}))
# +-> Keep agent metadata
if not iaas_client.upload_to_blobstore(metadata_files_path, '{}/{}'.format(backup_guid, metadata_files_name)):
iaas_client.exit(
'Could not upload the tarball {}.'.format(metadata_files_path))
# +-> Delete the snapshot of the persistent volume
if landscape != 'Azure' and landscape != 'Gcp' and landscape != 'Ali' and not iaas_client.delete_snapshot(snapshot_store.id):
iaas_client.exit(
'Could not delete the snapshot with id {}.'.format(snapshot_store.id))
elif backup_type == 'offline':
# +-> Stop the service job
iaas_client.stop_service_job()
if landscape != 'Aws' and landscape != 'Azure' and landscape != 'Gcp' and landscape != 'Ali':
# +-> Create a volume where the encrypted tarballs/files will be stored on (to be uploaded)
volume_uploads = iaas_client.create_volume(
volume_persistent.size)
if not volume_uploads:
iaas_client.exit(
'Could not create a volume for the uploads.')
# +-> Attach the upload volume to the instance
attachment_volume_uploads = iaas_client.create_attachment(
volume_uploads.id, instance_id)
if not attachment_volume_uploads:
iaas_client.exit('Could not attach the upload volume with id {} to instance with id {}.'
.format(volume_uploads.id, instance_id))
# +-> Find the mountpoint of the upload volume
mountpoint_volume_uploads = iaas_client.get_mountpoint(
volume_uploads.id)
if not mountpoint_volume_uploads:
iaas_client.exit('Could not determine the mountpoint for the upload volume (id: {}).'
.format(volume_uploads.id))
# +-> Create temporary directory, format the upload volume and mount it to this directory
if not iaas_client.delete_directory(DIRECTORY_UPLOADS):
iaas_client.exit(
'Could not remove the following directory: {}.'.format(DIRECTORY_UPLOADS))
if not iaas_client.create_directory(DIRECTORY_UPLOADS):
iaas_client.exit(
'Could not create the following directory: {}'.format(DIRECTORY_UPLOADS))
if not iaas_client.format_device(mountpoint_volume_uploads):
iaas_client.exit('Could not format the following device: {}'.format(
mountpoint_volume_uploads))
if not iaas_client.mount_device(mountpoint_volume_uploads, DIRECTORY_UPLOADS):
iaas_client.exit('Could not mount the device {} to the directory {}.'
.format(mountpoint_volume_uploads, DIRECTORY_UPLOADS))
# +-> Wait for the service job to be stopped before starting the content encryption
if not iaas_client.wait_for_service_job_status('not monitored'):
iaas_client.exit('Could not stop the service job.')
# +-> Create tarball of the contents of the persistent volume and encrypt it
if not iaas_client.create_and_encrypt_tarball_of_directory('{}/blueprint/files'
.format(DIRECTORY_PERSISTENT),
tarball_files_path):
iaas_client.exit('Could not create and encrypt a tarball of the directory {}'
.format(DIRECTORY_PERSISTENT))
# +-> Upload the tarball to the blob store
if not iaas_client.upload_to_blobstore(tarball_files_path, '{}/{}'.format(backup_guid, tarball_files_name)):
iaas_client.exit(
'Could not upload the tarball {}.'.format(tarball_files_path))
# +-> Unmount the volumes and remove the temporary directories
if not iaas_client.unmount_device(mountpoint_volume_uploads):
iaas_client.exit('Could not unmount the device {}.'.format(
mountpoint_volume_uploads))
if not iaas_client.delete_directory(DIRECTORY_UPLOADS):
iaas_client.exit(
'Could not remove the following directory: {}.'.format(DIRECTORY_UPLOADS))
# +-> Detach the upload volume from the instance
if not iaas_client.delete_attachment(attachment_volume_uploads.volume_id, instance_id):
iaas_client.exit('Could not detach the upload volume with id {} to instance with id {}.'
.format(attachment_volume_uploads.volume_id, instance_id))
# +-> Delete the upload volume and the snapshot volume
if not iaas_client.delete_volume(volume_uploads.id):
iaas_client.exit(
'Could not delete the upload volume with id {}.'.format(volume_uploads.id))
if landscape == 'Aws' or landscape == 'Azure' or landscape == 'Gcp' or landscape == 'Ali':
snapshot_store = None
if landscape == 'Aws':
# +-> Copy Snapshot Function to encrypt the Snapshot
snapshot_store = iaas_client.copy_snapshot(
volume_persistent.id)
if not snapshot_store:
iaas_client.exit('Could not create the encrypted copy of the snapshot {}.'
.format(DIRECTORY_PERSISTENT))
if landscape == 'Azure' or landscape == 'Gcp' or landscape == 'Ali':
# +-> Create a snapshot of the persistent volume
snapshot_store = iaas_client.create_snapshot(
volume_persistent.id)
if not snapshot_store:
iaas_client.exit('Could not find the snapshot of the persistent volume {}.'
.format(DIRECTORY_PERSISTENT))
with open(metadata_files_path, 'w') as f:
f.write(json.dumps(
{'snapshotId': snapshot_store.id}))
# +-> Keep agent metadata
if not iaas_client.upload_to_blobstore(metadata_files_path, '{}/{}'.format(backup_guid, metadata_files_name)):
iaas_client.exit(
'Could not upload the tarball {}.'.format(metadata_files_path))
# +-> Start the service job
iaas_client.start_service_job()
# +-> Wait for the service job to be running again
if not iaas_client.wait_for_service_job_status('running'):
iaas_client.exit(
'Could not get the service job to be running again.')
iaas_client.finalize()
except Exception as error:
iaas_client.exit('An unexpected exception occurred: {}'.format(error))
# ------------------------------------------- | |
<reponame>gxdai/solt<filename>tests/test_transforms.py
import copy
import random
from contextlib import ExitStack as does_not_raise
import cv2
import numpy as np
import pytest
import solt.core as slc
import solt.transforms as slt
from solt.constants import ALLOWED_INTERPOLATIONS, ALLOWED_PADDINGS
from .fixtures import *
def test_img_mask_vertical_flip(img_3x4, mask_3x4):
img, mask = img_3x4, mask_3x4
dc = slc.DataContainer((img, mask), "IM")
stream = slc.Stream([slt.Flip(p=1, axis=0)])
dc = stream(dc, return_torch=False)
img_res, _, _ = dc[0]
mask_res, _, _ = dc[1]
h, w = mask.shape
assert np.array_equal(cv2.flip(img, 0).reshape(h, w, 1), img_res)
assert np.array_equal(cv2.flip(mask, 0), mask_res)
def test_flip_invalid_axis():
with pytest.raises(ValueError):
slt.Flip(p=1, axis=100)
def test_img_mask_mask_vertical_flip(img_3x4, mask_3x4):
img, mask = img_3x4, mask_3x4
dc = slc.DataContainer((img, mask, mask), "IMM")
stream = slc.Stream([slt.Flip(p=1, axis=0)])
dc = stream(dc, return_torch=False)
img_res, _, _ = dc[0]
mask_res, _, _ = dc[1]
h, w = mask.shape
assert np.array_equal(cv2.flip(img, 0).reshape(h, w, 1), img_res)
assert np.array_equal(cv2.flip(mask, 0), mask_res)
def test_img_mask_horizontal_flip(img_3x4, mask_3x4):
img, mask = img_3x4, mask_3x4
dc = slc.DataContainer((img, mask), "IM")
stream = slc.Stream([slt.Flip(p=1, axis=1)])
dc = stream(dc, return_torch=False)
img_res, _, _ = dc[0]
mask_res, _, _ = dc[1]
h, w = mask.shape
assert np.array_equal(cv2.flip(img, 1).reshape(h, w, 1), img_res)
assert np.array_equal(cv2.flip(mask, 1), mask_res)
def test_img_mask_vertical_horizontal_flip(img_3x4, mask_3x4):
img, mask = img_3x4, mask_3x4
dc = slc.DataContainer((img, mask), "IM")
stream = slc.Stream([slt.Flip(p=1, axis=0), slt.Flip(p=1, axis=1)])
dc = stream(dc, return_torch=False)
img_res, _, _ = dc[0]
mask_res, _, _ = dc[1]
h, w = mask.shape
assert np.array_equal(cv2.flip(cv2.flip(img, 0), 1).reshape(h, w, 1), img_res)
assert np.array_equal(cv2.flip(cv2.flip(mask, 0), 1), mask_res)
def test_img_mask_vertical_horizontal_flip_negative_axes(img_3x4, mask_3x4):
img, mask = img_3x4, mask_3x4
dc = slc.DataContainer((img, mask), "IM")
stream = slt.Flip(p=1, axis=-1)
dc = stream(dc)
img_res, _, _ = dc[0]
mask_res, _, _ = dc[1]
h, w = mask.shape
assert np.array_equal(cv2.flip(cv2.flip(img, 0), 1).reshape(h, w, 1), img_res)
assert np.array_equal(cv2.flip(cv2.flip(mask, 0), 1), mask_res)
def test_img_mask__kptsvertical_horizontal_flip_negative_axes(img_3x4, mask_3x4):
img, mask = img_3x4, mask_3x4
kpts_data = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]).reshape((4, 2))
kpts = slc.Keypoints(kpts_data.copy(), 3, 4)
dc = slc.DataContainer((img, mask, kpts), "IMP")
stream = slt.Flip(p=1, axis=-1)
dc = stream(dc)
img_res, _, _ = dc[0]
mask_res, _, _ = dc[1]
kpts_res, _, _ = dc[2]
h, w = mask.shape
assert np.array_equal(cv2.flip(cv2.flip(img, 0), 1).reshape(h, w, 1), img_res)
assert np.array_equal(cv2.flip(cv2.flip(mask, 0), 1), mask_res)
kpts_data[:, 0] = 4 - 1 - kpts_data[:, 0]
kpts_data[:, 1] = 3 - 1 - kpts_data[:, 1]
assert np.array_equal(kpts_data, kpts_res.data)
def test_keypoints_vertical_flip():
kpts_data = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]).reshape((4, 2))
kpts = slc.Keypoints(kpts_data, 2, 2)
stream = slt.Flip(p=1, axis=0)
dc = slc.DataContainer((kpts,), "P")
dc_res = stream(dc)
assert np.array_equal(dc_res[0][0].data, np.array([[0, 1], [0, 0], [1, 1], [1, 0]]).reshape((4, 2)))
def test_keypoints_horizontal_flip_within_stream():
kpts_data = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]).reshape((4, 2))
kpts = slc.Keypoints(kpts_data, 2, 2)
stream = slc.Stream([slt.Flip(p=1, axis=1)])
dc = slc.DataContainer((kpts,), "P")
dc_res = stream(dc, return_torch=False)
assert np.array_equal(dc_res[0][0].data, np.array([[1, 0], [1, 1], [0, 0], [0, 1]]).reshape((4, 2)))
def test_keypoints_vertical_flip_within_stream():
kpts_data = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]).reshape((4, 2))
kpts = slc.Keypoints(kpts_data, 2, 2)
stream = slc.Stream([slt.Flip(p=1, axis=0)])
dc = slc.DataContainer((kpts,), "P")
dc_res = stream(dc, return_torch=False)
assert np.array_equal(dc_res[0][0].data, np.array([[0, 1], [0, 0], [1, 1], [1, 0]]).reshape((4, 2)))
def test_rotate_range_none():
trf = slt.Rotate(None)
assert trf.angle_range == (0, 0)
@pytest.mark.parametrize("angle", [1, 2.5])
def test_rotate_range_conversion_from_number(angle):
trf = slt.Rotate(angle_range=angle)
assert trf.angle_range == (-angle, angle)
def test_shear_range_none():
trf = slt.Shear(None, None)
assert trf.range_x == (0, 0)
assert trf.range_y == (0, 0)
@pytest.mark.parametrize("ignore_state", [True, False])
@pytest.mark.parametrize(
"transform_settings",
[
None,
{0: {"interpolation": "nearest", "padding": "z"}},
{0: {"interpolation": "nearest", "padding": "r"}},
{0: {"interpolation": "bilinear", "padding": "z"}},
{0: {"interpolation": "bilinear", "padding": "r"}},
{0: {"interpolation": "bicubic", "padding": "z"}},
{0: {"interpolation": "bicubic", "padding": "r"}},
{0: {"interpolation": "area", "padding": "z"}},
{0: {"interpolation": "area", "padding": "r"}},
{0: {"interpolation": "lanczos", "padding": "z"}},
{0: {"interpolation": "lanczos", "padding": "r"}},
],
)
def test_rotate_90_img_mask_keypoints_destructive(img_3x3, mask_3x3, transform_settings, ignore_state):
# Setting up the data
kpts_data = np.array([[0, 0], [0, 2], [2, 2], [2, 0]]).reshape((4, 2))
kpts = slc.Keypoints(kpts_data, 3, 3)
img, mask = img_3x3, mask_3x3
H, W = mask.shape
dc = slc.DataContainer((img, mask, kpts, 1), "IMPL", transform_settings=copy.deepcopy(transform_settings),)
# Defining the 90 degrees transform (clockwise)
stream = slt.Rotate(angle_range=(90, 90), p=1, ignore_state=ignore_state)
dc_res = stream(dc)
img_res, _, _ = dc_res[0]
mask_res, _, _ = dc_res[1]
kpts_res, _, _ = dc_res[2]
label_res, _, _ = dc_res[3]
M = cv2.getRotationMatrix2D((W // 2, H // 2), -90, 1)
img_inter = ALLOWED_INTERPOLATIONS["bicubic"]
img_pad = ALLOWED_PADDINGS["z"]
if transform_settings is not None:
img_inter = ALLOWED_INTERPOLATIONS[transform_settings[0]["interpolation"]]
img_pad = ALLOWED_PADDINGS[transform_settings[0]["padding"]]
expected_img_res = cv2.warpAffine(img, M, (W, H), flags=img_inter, borderMode=img_pad).reshape((H, W, 1))
expected_mask_res = cv2.warpAffine(mask, M, (W, H))
expected_kpts_res = np.array([[2, 0], [0, 0], [0, 2], [2, 2]]).reshape((4, 2))
assert np.array_equal(expected_img_res, img_res)
assert np.array_equal(expected_mask_res, mask_res)
np.testing.assert_array_almost_equal(expected_kpts_res, kpts_res.data)
assert label_res == 1
@pytest.mark.parametrize("k", list(range(-4, 5)))
def test_rotate_90_img_mask_nondestructive(k, img_3x3, mask_3x3):
# Setting up the data
img, mask = img_3x3, mask_3x3
H, W = mask.shape
dc = slc.DataContainer((img, mask), "IM")
# Defining the 90 degrees transform (counterclockwise)
stream = slt.Rotate90(k=k, p=1)
dc_res = stream(dc)
img_res, _, _ = dc_res[0]
mask_res, _, _ = dc_res[1]
expected_img_res = np.rot90(img, -k).reshape((H, W, 1))
expected_mask_res = np.rot90(mask, -k)
assert np.array_equal(expected_img_res, img_res)
assert np.array_equal(expected_mask_res, mask_res)
@pytest.mark.parametrize("k", [None, "123", 123.0])
def test_rotate_nondestructive_does_not_accept_non_int_k(k):
with pytest.raises(TypeError):
slt.Rotate90(k=k)
@pytest.mark.parametrize("k", list(range(-4, 5)))
def test_rotate_90_transforms_have_same_behaviour(k, img_6x6_rgb):
dc = slc.DataContainer(img_6x6_rgb, "I")
trf_1 = slt.Rotate(angle_range=(k * 90, k * 90), p=1)
trf_1.sample_transform(dc)
trf_2 = slt.Rotate90(k=k, p=1)
trf_2.sample_transform(dc)
assert np.array_equal(trf_1.state_dict["transform_matrix"], trf_2.state_dict["transform_matrix"])
def test_zoom_x_axis_odd(img_5x5):
stream = slt.Scale(range_x=(0.5, 0.5), range_y=(1, 1), same=False, p=1, ignore_fast_mode=True)
dc = slc.DataContainer((img_5x5,), "I")
H, W = img_5x5.shape[0], img_5x5.shape[1]
img_res = stream(dc)[0][0]
assert H == img_res.shape[0]
assert W // 2 == img_res.shape[1]
def test_scale_x_axis_even(img_6x6):
stream = slt.Scale((0.5, 0.5), (1, 1), same=False, p=1, ignore_fast_mode=True)
dc = slc.DataContainer((img_6x6,), "I")
H, W = img_6x6.shape[0], img_6x6.shape[1]
img_res = stream(dc)[0][0]
assert H == img_res.shape[0]
assert W // 2 == img_res.shape[1]
def test_scale_xy_axis_odd(img_5x5):
stream = slt.Scale((0.5, 0.5), (3, 3), same=False, p=1, ignore_fast_mode=True)
dc = slc.DataContainer((img_5x5,), "I")
H, W = img_5x5.shape[0], img_5x5.shape[1]
img_res = stream(dc)[0][0]
assert H * 3 == img_res.shape[0]
assert W // 2 == img_res.shape[1]
def test_scale_xy_axis_even(img_6x6):
stream = slt.Scale((0.5, 0.5), (2, 2), same=False, p=1, ignore_fast_mode=True)
dc = slc.DataContainer((img_6x6,), "I")
H, W = img_6x6.shape[0], img_6x6.shape[1]
img_res = stream(dc)[0][0]
assert H * 2 == img_res.shape[0]
assert W // 2 == img_res.shape[1]
def test_scale_img_mask(img_3x4, mask_3x4):
img_mask_3x4 = img_3x4, mask_3x4
stream = slt.Scale((0.5, 0.5), (2, 2), same=False, p=1, ignore_fast_mode=True)
dc = slc.DataContainer(img_mask_3x4, "IM")
H, W = img_mask_3x4[0].shape[0], img_mask_3x4[0].shape[1]
img_res = stream(dc)[0][0]
mask_res = stream(dc)[1][0]
assert H * 2 == img_res.shape[0], W // 2 == img_res.shape[1]
assert H * 2 == mask_res.shape[0], W // 2 == mask_res.shape[1]
def test_keypoints_assert_reflective(img_3x3, mask_3x3):
# Setting up the data
kpts_data = np.array([[0, 0], [0, 2], [2, 2], [2, 0]]).reshape((4, 2))
kpts = slc.Keypoints(kpts_data, 3, 3)
img, mask = img_3x3, mask_3x3
dc = slc.DataContainer((img, mask, kpts,), "IMP")
# Defining the 90 degrees transform (clockwise)
stream = slt.Rotate(angle_range=(20, 20), p=1, padding="r")
with pytest.raises(ValueError):
stream(dc)
def test_padding_img_2x2_4x4(img_2x2):
img = img_2x2
dc = slc.DataContainer((img,), "I")
transf = slt.Pad((4, 4))
res = transf(dc)
assert (res[0][0].shape[0] == 4) and (res[0][0].shape[1] == 4)
def test_padding_img_2x2_2x2(img_2x2):
img = img_2x2
dc = slc.DataContainer((img,), "I")
transf = slt.Pad((2, 2))
res = transf(dc)
assert (res[0][0].shape[0] == 2) and (res[0][0].shape[1] == 2)
def test_padding_img_mask_2x2_4x4(img_2x2, mask_2x2):
img, mask = img_2x2, mask_2x2
dc = slc.DataContainer((img, mask), "IM")
transf = slt.Pad((4, 4))
res = transf(dc)
assert (res[0][0].shape[0] == 4) and (res[0][0].shape[1] == 4)
assert (res[1][0].shape[0] == 4) and (res[1][0].shape[1] == 4)
def test_padding_img_2x2_3x3(img_2x2):
img = img_2x2
dc = slc.DataContainer((img,), "I")
transf = slt.Pad((3, 3))
res = transf(dc)
assert (res[0][0].shape[0] == 3) and (res[0][0].shape[1] == 3)
def test_padding_img_mask_2x2_3x3(img_2x2, mask_2x2):
img, mask = img_2x2, mask_2x2
dc = slc.DataContainer((img, mask), "IM")
transf = slt.Pad((3, 3))
res = transf(dc)
assert (res[0][0].shape[0] == 3) and (res[0][0].shape[1] == 3)
assert (res[1][0].shape[0] == 3) and (res[1][0].shape[1] == 3)
def test_padding_img_mask_3x4_3x4(img_3x4, mask_3x4):
img, mask = img_3x4, mask_3x4
dc = slc.DataContainer((img, mask), "IM")
transf = slt.Pad((4, 3))
res = transf(dc)
assert (res[0][0].shape[0] == 3) and (res[0][0].shape[1] == 4)
assert (res[1][0].shape[0] == 3) and (res[1][0].shape[1] == 4)
def test_padding_img_mask_3x4_5x5(img_3x4, mask_3x4):
img, mask = img_3x4, mask_3x4
dc = slc.DataContainer((img, mask), "IM")
transf = slt.Pad((5, 5))
res = transf(dc)
assert (res[0][0].shape[0] == 5) and (res[0][0].shape[1] == 5)
assert (res[1][0].shape[0] == 5) and (res[1][0].shape[1] == 5)
def test_pad_to_20x20_img_mask_keypoints_3x3(img_3x3, mask_3x3):
# Setting up the data
kpts_data = np.array([[0, 0], [0, 2], [2, 2], [2, 0]]).reshape((4, 2))
kpts = slc.Keypoints(kpts_data, 3, 3)
img, mask = img_3x3, mask_3x3
dc = slc.DataContainer((img, mask, kpts,), "IMP")
transf = slt.Pad((20, 20))
res = transf(dc)
| |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 16 10:07:16 2020
PSDM Tools
----------
This file contains tools and associated functions geared at aiding in the
analysis of GAC adsorption data.
Functions Contained:
isotherm_fit()
predict_full_scale()
specific_throughput()
@author: <NAME>
EPA Disclaimer
==============
The United States Environmental Protection Agency (EPA) GitHub project code is
provided on an "as is" basis and the user assumes responsibility for its use.
EPA has relinquished control of the information and no longer has
responsibility to protect the integrity , confidentiality, or availability of
the information. Any reference to specific commercial products, processes, or
services by service mark, trademark, manufacturer, or otherwise, does not
constitute or imply their endorsement, recomendation or favoring by EPA. The
EPA seal and logo shall not be used in any manner to imply endorsement of any
commercial product or activity by EPA or the United States Government.
By submitting a pull request, you make an agreement with EPA that you will not
submit a claim of compensation for services rendered to EPA or any other
federal agency. Further, you agree not to charge the time you spend developing
software code related to this project to any federal grant or cooperative
agreement.
"""
import pandas as pd
import warnings
warnings.simplefilter("ignore")
import numpy as np
import pylab as plt
from scipy import stats
from scipy.optimize import curve_fit
from scipy.interpolate import interp1d
import multiprocessing as mp
import PSDM
def specific_throughput(column_specs, filter_pfas, k_data, c0, ct,
compound_data,
ebct_range=np.arange(5,46,5),
chem_type=['halogenated alkenes'],
wat_type=['Organic Free'],
nr=14, nz=18):
'''
Parameters
----------
column_specs : pandas dataframe
Dataframe with column specifications.
filter_pfas : list of strings
List of compounds to model.
k_data : pandas dataframe
Dataframe that contains K & 1/n values to be used in the simulation.
Must provide a k_data structure that has all the compounds listed in
filter_pfas. Can contain more species, but must contain all requested
by filter_pfas.
c0 : float
Initial concentration of chemicals in influent. Units: ng/L
Assumes all chemicals have the same initial concentration.
Does not allow for variable influent concentrations.
ct : float
Threshold concentration for when a bed is removed. Units: ng/L
Defines carbon usage rate. Must be less than c0.
compound_data : pandas dataframe
Dataframe that contains physical parameters associated with chemical
species included in simulation.
ebct_range : list/iterable, optional
Values of empty bed contact time to consider.
The default is np.arange(5,46,5).
chem_type : list/string, optional
Type of chemical species to model. The default is ['halogenated alkenes'].
Related to fouling parameters
wat_type : list/string, optional
Type of water to model. The default is ['Organic Free'].
Related to fouling parameters
nr : int, optional
Number of radial collocation points. The default is 14.
nz : int, optional
Number of axial collocation points. The default is 18.
Returns
-------
compound_store : TYPE
DESCRIPTION.
'''
orig_ebct = column_specs['L'] * np.pi * (column_specs['diam']**2)/\
(4. * column_specs['flrt'])
orig_flrt = column_specs['flrt'] * 1
types = [column_specs['carbon'], column_specs['influentID']]
multi_idx = pd.MultiIndex.from_tuples([(typ, comp)
for typ in types
for comp in filter_pfas],
names=['type','compound'])
idx = [0, column_specs['duration']]
raw_data = pd.DataFrame(c0, columns=multi_idx, index=idx)
#Initiate storage dictionary (returned object)
compound_store = {}
for comp in filter_pfas:
print(comp)
ebct_store = []
for ebct in ebct_range:
ratio = orig_ebct / ebct
#rescale flow rate of system to desired EBCT value
column_specs['flrt'] = ratio * orig_flrt
#need to rework this to support this step...
column = PSDM.PSDM(column_specs, compound_data, raw_data,\
nz=nz, nr=nr, chem_type=chem_type,\
water_type=wat_type, k_data=k_data,\
xn_range=[k_data[comp]['1/n']],
test_range=[k_data[comp]['K']],
optimize=False)
_, _, _, _, results = column.run_psdm_kfit(comp)
treat_days = results[results < ct].dropna().index[-1]
spec_throughput = (column.flrt/1e6 * PSDM.min_per_day * \
treat_days) / (column.wt/1e3)
ebct_store.append(spec_throughput)
# plt.plot(results.index, results.values, label=comp+'_'+repr(ebct))
compound_store[comp] = ebct_store
return compound_store
def predict_full_scale(PSDM_obj, filter_pfas, target_conc, \
total_beds, beds_per_cycle, plot=True):
'''
Parameters
----------
PSDM_obj : PSDM class object
Column information created from PSDM.PSDM()
Must have 'k_data=' supplied on object creation. Should only use
user-supplied k_values (or initial estimates will be used)
filter_pfas : list of strings
Example: ['compound1', 'compound2',...]
List of compounds to model and use to esablish the target_conc.
if only a single compound is needed : ['compound'] must be supplied
target_conc : float
The target concentration for cummulative modeled effluent from
filter_pfas. Units are in ng/L (ppt).
total_beds : INT
Number of beds in rotation.
beds_per_cycle : INT
Number of beds rotated in/out for each cycle.
plot : TYPE, optional
DESCRIPTION. The default is True.
Returns
-------
best_val : float
Number of days per bed rotation interval. This highlights how many
days between bed replacements.
Example: best_val = 100 (days),
for 8 total beds and 2 beds per cycle
means 2 beds are cycled in every 100 days, for total life
of any 1 bed of 400 days (8/2 * 100 days)
best_cycle : interpolating function
Blended effluent concentration for best_val case.
y = concentration (ng/L)
x = time (days)
Plots and Excel files are also generated
'''
init_optflag = PSDM_obj.optimize_flag
init_testrange = PSDM_obj.test_range
init_xnrange = PSDM_obj.xn_range
init_compounds = PSDM_obj.compounds
init_xdata = PSDM_obj.xdata
init_xn = PSDM_obj.xn
PSDM_obj.optimize_flag = False
PSDM_obj.compounds = filter_pfas
idx = PSDM_obj.data_df.index.values
if np.max(idx) < PSDM_obj.max_days:
idx[-1] = PSDM_obj.max_days
PSDM_obj.data_df.set_index(idx)
#this assumes that the concentrations in the dataframe are just
#averages, so no time variability is impacted
PSDM_obj.xdata = PSDM_obj.data_df.index.values
time_idx = np.arange(PSDM_obj.max_days+1)
data_df = pd.DataFrame(columns=filter_pfas, index=time_idx)
for comp in filter_pfas:
PSDM_obj.test_range = np.array([PSDM_obj.k_data[comp]['K']])
PSDM_obj.xn_range = np.array([PSDM_obj.k_data[comp]['1/n']])
PSDM_obj.xn = PSDM_obj.k_data[comp]['1/n']
comp, k_v, xn_v, ssqs, md = PSDM_obj.run_psdm_kfit(comp)
md[md<0.] = 0.
md[md>data_df[comp][0]] = data_df[comp][0]
if plot:
plt.plot(md.index.values, md.values, label=comp)
out_f = interp1d(md.index.values,\
md.transpose().values,\
fill_value='extrapolate')
data_df[comp] = out_f(time_idx)[0]
if plot:
plt.legend(loc='center right')
plt.ylabel('Concentration (ng/L)')
plt.xlabel('Time (days)')
plt.xlim((0,1095)) #limits to 3 years, rather than 3000 days
#may change to 1000 days
plt.savefig('full_scale_'+PSDM_obj.carbon+'.png',dpi=300)
plt.close()
data_df[data_df<0]=0. #resets negatives to zero
writer = pd.ExcelWriter(PSDM_obj.project_name+'_'+PSDM_obj.carbon+'.xlsx')
data_df.to_excel(writer, 'model_fit')
writer.save()
small_rotation = total_beds%beds_per_cycle
if small_rotation == 0:
#all cycles the same
weights = int(total_beds/beds_per_cycle) *\
[float(beds_per_cycle)/total_beds]
else:
weights = [float(small_rotation)/total_beds] + \
int(total_beds/beds_per_cycle)*[float(beds_per_cycle)/total_beds]
# having small rotation be first, is the worst case scenario
# it means the smallest percentage of beds is new
# having small rotation last is the best case scenario, not used.
summed = data_df.transpose().sum()
min_cycle = 1 #days
num_cycles = np.ceil(float(total_beds)/beds_per_cycle)
if num_cycles*beds_per_cycle > total_beds:
print('number of beds per cycle may result in non-uniform cycling. assuming new beds have fewest numbers')
bed_info = []
count = 0
bed_c = 1
for bed in range(total_beds):
if count <= beds_per_cycle:
bed_info.append(bed_c)
count+=1
if count == beds_per_cycle:
count = 0
bed_c+=1
function = interp1d(summed.index,summed.values,fill_value='extrapolate')
aa = np.arange(5*PSDM_obj.max_days)
summed = pd.Series(function(aa),index = aa)
best_cycle = np.zeros(min_cycle)
best_val = min_cycle*1
try:
for i in range(min_cycle,PSDM_obj.max_days,1):
tmp = np.zeros(i)
for j in range(max(bed_info)):
tmp += weights[j]*(summed[(summed.index>=(j*i))&(summed.index<((j+1)*i))].values)
if tmp.max() <= target_conc:
best_cycle = tmp*1.
best_val = i
else:
break
except Exception:
best_val = PSDM_obj.max_days
best_cycle = np.zeros(PSDM_obj.max_days)
#reset object parameters to initial values
PSDM_obj.optimize_flag = init_optflag
PSDM_obj.test_range = init_testrange
PSDM_obj.xn_range = init_xnrange
PSDM_obj.compounds = init_compounds
PSDM_obj.xdata = init_xdata
PSDM_obj.xn = init_xn
return best_val, best_cycle
def isotherm_fit(data, isotherm='freundlich', plot=True, save_plot=False, filename='test'):
'''
Parameters
----------
data : TYPE
DESCRIPTION.
isotherm : TYPE, optional
DESCRIPTION. The default is 'freundlich'.
plot : TYPE, optional
DESCRIPTION. The default is True.
save_plot : TYPE, optional
DESCRIPTION. The default is False.
Returns
-------
TYPE
DESCRIPTION.
'''
def langmuir(c, K, N):
'''
Returns results of Langmuir Isotherm
Parameters
----------
c : array
array of liquid concentrations.
K : float
N : float
K & N are parameter values
Returns
-------
array, of solid phase concentrations
'''
return (K*c*N)/(1. + K*c)
def freundlich(c, k, invN):
'''
Parameters
----------
c : array
array of liquid concentrations.
k : float
Freundlich K parameter.
invN : float
1/n parameter
Returns
-------
TYPE
DESCRIPTION.
'''
# k, invN = array
return k * c**invN
def RedlichPeterson(c, A, B, M):
| |
<reponame>asrozar/python-netrecon<gh_stars>1-10
from netrecon import shared
from pexpect import TIMEOUT
from re import search, compile as recompile
from netaddr import IPAddress
from ipaddress import IPv4Network, IPv4Address
import time
def host_discovery(system_address, username, password):
ssh_session, prompt = shared.get_ssh_session(system_address, username, password)
data = None
# print('got session with %s' % system_address)
# 99 = timed out
# 98 = connection refused
# 97 = bad ssh key
# 96 = using ssh v1
# 95 = password denied
# 94 = permission denied
# 93 = EOF
# 92 = network unreachable
# 1 = could not connect
if ssh_session == 99:
return 99
if ssh_session == 98:
return 98
if ssh_session == 97:
return 97
if ssh_session == 96:
return 96
if ssh_session == 95:
return 95
if ssh_session == 94:
return 94
if ssh_session == 93:
return 93
if ssh_session == 92:
return 92
if ssh_session == 1:
return 1
infrastructure_os, ssh_session, prompt = shared.discover_os(ssh_session, prompt, password)
try:
if infrastructure_os['os']:
if infrastructure_os['os'] == 'panos':
data = get_panos_hosts(ssh_session, prompt)
elif infrastructure_os['os'] == 'ios':
data = get_ios_hosts(ssh_session, prompt)
elif infrastructure_os['os'] == 'nxos':
data = get_nxos_hosts(ssh_session, prompt)
elif infrastructure_os['os'] == 'asaos':
data = get_asa_hosts(ssh_session, prompt)
elif infrastructure_os['os'] == 'linux':
data = infrastructure_os['os']
shared.kill_ssh_session(ssh_session)
except KeyError as e:
print('HOST_DISCOVERY EXCEPTION ERROR: %s' % str(e))
return data
def get_panos_hosts(ssh_session, prompt):
data = dict()
host_list = list()
subnet_list = list()
system_ip_list = list()
nat_list = list()
model = None
serial = None
sw_version = None
ssh_session.expect([TIMEOUT, prompt])
ssh_session.sendline(shared.PAN_SHOW_INTERFACES_LOGICAL)
ssh_session.expect([TIMEOUT, prompt])
show_interfaces_buff = ssh_session.before
ssh_session.expect([TIMEOUT, prompt])
ssh_session.sendline(shared.PAN_SHOW_ARP)
ssh_session.expect([TIMEOUT, prompt])
arp_buff = ssh_session.before
ssh_session.expect([TIMEOUT, prompt])
ssh_session.sendline(shared.PAN_SHOW_SUBNETS)
ssh_session.expect([TIMEOUT, prompt])
subnets_buff = ssh_session.before
ssh_session.sendline(shared.PAN_SHOW_SYS_INFO)
ssh_session.expect([TIMEOUT, prompt])
sys_info_buff = ssh_session.before
ssh_session.sendline(shared.PAN_SHOW_RUN_NAT)
ssh_session.expect([TIMEOUT, prompt])
nat_buff = ssh_session.before
shared.kill_ssh_session(ssh_session)
interfaces = show_interfaces_buff.split('------------------- ----- ---- ---------------- ------------------------ ------ ------------------')
interfaces_split = interfaces[1].split('\r\n')
for e in interfaces_split:
if e != '':
entry = e.rstrip().split()
if len(entry) == 7:
d = {'system_ip_address': entry[6], 'vlan': int(entry[5]), 'name': entry[0], 'zone': entry[3]}
if d not in system_ip_list:
system_ip_list.append(d)
arp = arp_buff.split('--------------------------------------------------------------------------------')
split_arp = arp[1].split('\r\n')
for e in split_arp:
if e != '':
entry = e.rstrip().split()
if len(entry) == 6:
if entry[4] == 'c':
mac_addr = entry[2].replace(':', '')
mac_vendor = shared.lookup_mac_vendor(mac_addr)
host_dict = {'adjacency_interface': entry[0],
'host_address': entry[1],
'mac_address': mac_addr,
'port': entry[3],
'mac_vendor': mac_vendor
}
host_list.append(host_dict)
subnets_buff_split = subnets_buff.split('\r\n')
subnets_buff_split.pop(-1)
for i in subnets_buff_split[2:]:
sn = i.split()
if shared.is_subnet(sn[0]):
local_subnet_dict = {'subnet': '%s' % sn[0],
'source_interface': sn[-1]}
if local_subnet_dict not in subnet_list:
subnet_list.append(local_subnet_dict)
sys_info_buff_splt = sys_info_buff.split('^(\r\n)')
for i in sys_info_buff_splt:
model_match = search(r'(model:\s+)([^\s]+)', i)
serial_match = search(r'(serial:\s+)([^\s]+)', i)
sw_version_match = search(r'(sw-version:\s+)([^\s]+)', i)
if model_match:
model = model_match.group(2)
if serial_match:
serial = serial_match.group(2)
if sw_version_match:
sw_version = sw_version_match.group(2)
nat_buff_split = nat_buff.strip().split('{')
nat_buff_split.pop(0)
for i in nat_buff_split:
i_split = i.split('}')
i_split.pop(1)
nat_line = i_split[0].split('\r\n')
d = {}
for line in nat_line:
if line:
line_split = line.split()
if line_split[0] == 'nat-type':
d['nat_type'] = line_split[1].rstrip(';')
if line_split[0] == 'from':
d['from'] = line_split[1].rstrip(';')
if line_split[0] == 'source':
if '[' in line_split[1]:
line_split.pop(-1)
d['source'] = line_split[2:]
else:
d['source'] = line_split[1].rstrip(';')
if line_split[0] == 'to':
d['to'] = line_split[1].rstrip(';')
if line_split[0] == 'to-interface':
d['to_interface'] = line_split[1].rstrip(';')
if line_split[0] == 'destination':
if '[' in line_split[1]:
line_split.pop(-1)
d['destination'] = line_split[2:]
else:
d['destination'] = line_split[1].rstrip(';')
if line_split[0] == 'service':
d['service'] = line_split[1].rstrip(';')
if line_split[0] == 'translate-to':
line_split = line.split('"')
line_split.pop(-1)
d['translate_to'] = line_split[1]
if d not in nat_list:
nat_list.append(d)
system_info = {'system_model': model,
'system_serial': serial,
'license': None,
'system_sw_version': 'PAN-OS %s' % sw_version}
data['nat_list'] = nat_list
data['system_ip_list'] = system_ip_list
data['system_info'] = system_info
data['subnet_list'] = subnet_list
data['host_list'] = host_list
return data
def get_asa_hosts(ssh_session, prompt):
system_info = dict()
data = dict()
host_list = list()
subnet_list = list()
system_ip_list = list()
nat_list = list()
ipsec_tunnels = list()
ssh_session.expect([TIMEOUT, prompt])
ssh_session.sendline(shared.ASA_SHOW_OS)
ssh_session.expect([TIMEOUT, prompt])
os_software = ssh_session.before
ssh_session.sendline(shared.ASA_SHOW_SERIALNUM)
ssh_session.expect([TIMEOUT, prompt])
time.sleep(.1)
asa_sn = ssh_session.before
ssh_session.sendline(shared.ASA_SHOW_MODEL)
ssh_session.expect([TIMEOUT, prompt])
time.sleep(.1)
asa_model_buff = ssh_session.before
ssh_session.sendline(shared.ASA_SHOW_INTERFACE)
ssh_session.expect([TIMEOUT, prompt])
time.sleep(.1)
asa_interfaces_buff = ssh_session.before
ssh_session.sendline(shared.ASA_SHOW_LOCAL_CONNECTIONS)
ssh_session.expect([TIMEOUT, prompt])
time.sleep(.1)
local_subnets_buff = ssh_session.before
local_subnets_lines = local_subnets_buff.split('\r\n')
ssh_session.sendline(shared.ASA_SHOW_IP_LOCAL_POOLS)
ssh_session.expect([TIMEOUT, prompt])
time.sleep(.1)
local_ip_local_pools_buff = ssh_session.before
local_ip_local_pools_lines = local_ip_local_pools_buff.split('\r\n')
ssh_session.sendline(shared.ASA_SHOWARP)
ssh_session.expect([TIMEOUT, prompt])
time.sleep(.1)
arp_buff = ssh_session.before
arp_lines = arp_buff.split('\t')
ssh_session.sendline(shared.ASA_SHOW_XLATE)
if ssh_session.expect([TIMEOUT, '.Invalid input detected.', prompt]) == 1:
ssh_session.expect([TIMEOUT, prompt])
ssh_session.sendline('show xlate')
nat_buff = ssh_session.before
shared.kill_ssh_session(ssh_session)
ssh_session.sendline(shared.ASA_SHOW_CRYPTO_IPSEC_SA_DETAIL)
ssh_session.expect([TIMEOUT, prompt])
time.sleep(.1)
ipsec_sa_buff = ssh_session.before
ipsec_sa_intefaces = ipsec_sa_buff.split('interface:')
asa_interfaces_split = asa_interfaces_buff.split('!')
asa_interfaces_split.pop(0)
for l in asa_interfaces_split:
int_line = l.split('\r\n')
d = {}
for line in int_line:
if line:
x = line.split()
if x[0] == 'nameif':
nameif = x[1]
d['nameif'] = nameif
if x[0] == 'ip':
ip_addr = x[2]
net_mask = x[3]
d['system_ip_address'] = ip_addr
d['net_mask'] = net_mask
if x[0] == 'vlan':
tag = int(x[1])
d['vlan'] = tag
if x[0] == 'interface':
interface = x[1]
d['interface'] = interface
if 'nameif' in d:
if 'tag' not in d:
d['vlan'] = 0
if d not in system_ip_list:
system_ip_list.append(d)
try:
local_ip_local_pools_lines.pop(-1)
except IndexError:
pass
for ip_local_pool in local_ip_local_pools_lines[1:]:
ip_local_pool_split = ip_local_pool.split()
name = ip_local_pool_split[3]
ip_range = ip_local_pool_split[4]
f_host = ip_range.split('-')[0].split('.')[:-1]
network = '.'.join(f_host) + '.0'
try:
netmask = ip_local_pool_split[6]
except IndexError:
CLASS_A = '255.0.0.0'
CLASS_B = '255.240.0.0'
CLASS_C = '255.255.0.0'
if IPv4Address(network) in IPv4Network(("10.0.0.0", CLASS_A)):
netmask = CLASS_A
elif IPv4Address(network) in IPv4Network(("172.16.0.0", CLASS_B)):
netmask = CLASS_B
elif IPv4Address(network) in IPv4Network(("192.168.0.0", CLASS_C)):
netmask = CLASS_C
else:
netmask = None
if shared.is_subnet(netmask):
local_subnet_dict = {'subnet': '%s/%s' % (network, IPAddress(netmask).netmask_bits()),
'source_interface': name}
if local_subnet_dict not in subnet_list:
subnet_list.append(local_subnet_dict)
for subnet_line in local_subnets_lines:
subnet_line_list = list(filter(None, subnet_line.split(' ')))
try:
ip_addr = subnet_line_list[1]
mask = subnet_line_list[2]
interface = subnet_line_list[-1:][0]
except IndexError:
continue
if shared.is_valid_ip(ip_addr):
if shared.is_subnet(mask):
local_subnet_dict = {'subnet': '%s/%s' % (ip_addr, IPAddress(mask).netmask_bits()),
'source_interface': interface}
if local_subnet_dict not in subnet_list:
subnet_list.append(local_subnet_dict)
for a in arp_lines:
arp_split = a.split(' ')
try:
mac_addr = arp_split[2].replace('.', '')
except IndexError:
continue
mac_vendor = shared.lookup_mac_vendor(mac_addr)
host_dict = {'host_address': arp_split[1],
'mac_address': mac_addr,
'adjacency_interface': arp_split[0],
'mac_vendor': mac_vendor}
if host_dict not in host_list:
host_list.append(host_dict)
asa_model_split = asa_model_buff.split('\r\n')
for asa_model in asa_model_split:
asa_model_match = search(r'^Hardware:\s+([^\s]+)', asa_model)
if asa_model_match:
if asa_model_match.group(1).endswith(','):
asa_model = asa_model_match.group(1)[:-1]
else:
asa_model = asa_model_match.group(1)
system_info['system_model'] = asa_model
asa_sn_split = asa_sn.split('\r\n')
for asa_sn in asa_sn_split:
asa_serial_match = search(r'^Serial\s+Number\s*:\s+([^\s]+)', asa_sn)
if asa_serial_match:
system_info['system_serial'] = asa_serial_match.group(1)
os_software_split = os_software.split('\r\n')
for os_software in os_software_split:
asa_os_ver_match = search(r'(^Cisco\s+Adaptive\s+Security\s+Appliance)', os_software)
if asa_os_ver_match:
system_info['system_sw_version'] = os_software.rstrip()
nat_lines = recompile(r'TCP|UDP|NAT').split(nat_buff)
if not nat_lines:
nat_lines = nat_buff.split('\r\n')
nat_lines.pop(-1)
nat_lines = nat_lines[2:]
for l in nat_lines:
d = {}
split_flags = l.split('flags')
if 's' in split_flags[1]:
split_to = str(split_flags[0]).split('to')
to_int = split_to[1].split(':')
d['to_int'] = to_int[0].strip()
d['to_list'] = [x.rstrip().strip() for x in to_int[1].split(',')]
split_from = str(split_to[0]).split('from')
from_int = split_from[1].split(':')
d['from_int'] = from_int[0].strip()
d['from_list'] = [x.rstrip().strip() for x in from_int[1].split(',')]
nat_list.append(d)
for int_element in ipsec_sa_intefaces:
if int_element:
tunnel = int_element.split('Crypto map tag:')
remote_ident_lines = recompile('(remote ident \(addr\/mask\/prot\/port\):)(\s*)(\(.*\))')
networks = list()
for tunnel_element in tunnel:
s = remote_ident_lines.search(tunnel_element)
if s:
net_element = s.groups(-1)
net_element.replace('(', '')
net_element.replace(')', '')
net_element_split = net_element.split('/')
net = net_element_split[0]
mask = net_element_split[1]
networks.append({'network': net, 'subnet_mask': mask})
ipsec_tunnels.append({'tunnel_name': tunnel[0].strip(), 'networks': networks})
data['ipsec_tunnels'] = ipsec_tunnels
data['nat_list'] = nat_list
data['system_ip_list'] = system_ip_list
data['system_info'] = system_info
data['subnet_list'] = subnet_list
data['host_list'] = host_list
return data
def get_nxos_hosts(ssh_session, prompt):
data = dict()
system_ip_list = list()
host_list = list()
subnet_list = list()
mac_list = list()
system_info = dict()
sw_version = None
ssh_session.sendline(shared.SHOW_OS)
ssh_session.expect([TIMEOUT, prompt])
os_software = ssh_session.before
os_software_split = os_software.split('\r\n')
for i in os_software_split:
nxos_ver = search(r'(^Cisco\s+Nexus\s+Operating)', i)
if nxos_ver:
sw_version = i.rstrip()
system_info['system_sw_version'] = sw_version
ssh_session.sendline('show version | include "Processor Board ID"')
ssh_session.expect([TIMEOUT, prompt])
switch_sn = ssh_session.before
system_info['system_serial'] = switch_sn.split('show version | include "Processor Board ID"')[1].split('\r\n')[1].split('ID')[1].strip()
ssh_session.sendline('show version | section Hardware')
ssh_session.expect([TIMEOUT, prompt])
switch_model = ssh_session.before
system_info['system_model'] = switch_model.split('show version | section Hardware')[1].split('Hardware')[1].split('\r\n')[1].strip()
ssh_session.sendline(shared.IOS_SHOWIPINTBR)
ssh_session.expect([TIMEOUT, prompt])
system_ip_address_buff = ssh_session.before
ssh_session.sendline(shared.NXOS_SHOW_LOCAL_CONNECTIONS)
ssh_session.expect([TIMEOUT, prompt])
local_subnets_buff = ssh_session.before
ssh_session.sendline(shared.IOS_SHOW_CDP_DETAIL)
ssh_session.expect([TIMEOUT, prompt])
cdp_buff = ssh_session.before
cdp_data = str(cdp_buff).split('-------------------------')
ssh_session.sendline(shared.NXOS_SHOW_ARP)
ssh_session.expect([TIMEOUT, prompt])
arp_buff = ssh_session.before
ssh_session.sendline(shared.IOS_SHOW_CAM)
ssh_session.expect([TIMEOUT, prompt])
cam_buff = ssh_session.before
shared.kill_ssh_session(ssh_session)
addr_line_split = system_ip_address_buff.split('Interface IP Address Interface Status')
addrs_nl_split = addr_line_split[1].split('\r\n')
addrs_nl_split.pop(-1)
for addrs_line in addrs_nl_split:
if addrs_line:
line = addrs_line.split()
d = {'system_ip_address': line[1],
'name': line[0],
'status': line[2]
}
if d not in system_ip_list:
system_ip_list.append(d)
local_subnets = local_subnets_buff.split('show ip route direct | include attached')[1]
local_subnets_lines = local_subnets.split('\r\n')
local_subnets_lines.pop(0)
local_subnets_lines.pop(-1)
for subnet_line in local_subnets_lines:
if subnet_line:
subnet_split | |
"""Submodule provinding GAN model architectures."""
import torch
import torch.optim as optim
from torch import nn
from typing import Callable
class GANModel(torch.nn.Module):
def __init__(
self,
generator_model: torch.nn.Module,
encoding_model: torch.nn.Module,
masking_layer: torch.nn.Module,
image_preprocessing: Callable = None,
response_preprocessing: Callable = None,
device: str = None,
*args,
**kwargs,
):
super().__init__()
if device is None:
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
else:
self.device = device
self.generator_model = generator_model
self.encoding_model = encoding_model.requires_grad_(False)
self.masking_layer = masking_layer
# set processing to nothing if None is passed
if image_preprocessing is None:
def image_preprocessing(self, x):
return x
else:
self.image_preprocessing = image_preprocessing
if response_preprocessing is None:
def response_preprocessing(self, x):
return x
else:
self.response_preprocessing = response_preprocessing
def forward(self, latent_vector: torch.Tensor) -> torch.Tensor:
sample = self.generator_model(latent_vector) # latent vector -> sample
masked_sample = self.masking_layer(sample) # sample -> masked sample
preprocessed_sample = self.image_preprocessing(
masked_sample
) # masked sample -> preprocessed sample
activations = self.encoding_model(
preprocessed_sample
) # preprocessed sample -> activations
return activations, preprocessed_sample, masked_sample, sample
class ExampleGAN:
"""Create gan model class.
Create gan model and hold it for usage in the experiments.
This class is based on Lazarou 2020: PyTorch and GANs: A Micro Tutorial.
tutorial, which can be found at:
https://towardsdatascience.com/\
pytorch-and-gans-a-micro-tutorial-804855817a6b
Most in-line comments are direct quotes from the tutorial. The code is
copied and slightly adapted.
"""
def __init__(
self,
generator,
discriminator,
noise_fn,
data_fn,
batch_size=32,
device="cpu",
lr_d=1e-3,
lr_g=2e-4,
):
"""A GAN class for holding and training a generator and discriminator.
Args:
generator (torch.nn.Module): A Generator network.
discriminator (torch.nn.Module): A Discriminator network.
noise_fn (function f(num: int)): A noise function.
This is the function used to sample latent vectors Z,
which our Generator will map to generated samples X.
This function must accept an integer num as input and
return a 2D Torch tensor with shape (num, latent_dim).
data_fn (function f(num: int)): A data function.
This is the function that our Generator is tasked with
learning. This function must accept an integer num as
input and return a 2D Torch tensor with shape (num, data_dim),
where data_dim is the dimension of the data we are trying
to generate, the input_dim of our Discriminator.
batch_size (int, optional): training batch size. Defaults to 32.
device (string, optional): cpu or CUDA. Defaults to cpu.
lr_d (float, optional): learning rate for the discriminator.
Defaults to 1e-3.
lr_g (float, optional): learning rate for the generator.
Defaults to 2e-4.
"""
self.generator = generator
self.generator = self.generator.to(device)
self.discriminator = discriminator
self.discriminator = self.discriminator.to(device)
self.noise_fn = noise_fn
self.data_fn = data_fn
self.batch_size = batch_size
self.device = device
self.lr_d = lr_d
self.lr_g = lr_g
self.betas = (0.5, 0.999)
# The optimization criterion used is binary cross entropy loss.
self.criterion = nn.BCELoss()
# An optimizer for the discriminator part of the model.
self.optim_d = optim.Adam(
discriminator.parameters(), lr=self.lr_d, betas=self.betas
)
# An optimizer for the generator part of the model.
self.optim_g = optim.Adam(
generator.parameters(), lr=self.lr_g, betas=self.betas
)
# A vector of ones, the discriminator target.
self.target_ones = torch.ones((batch_size, 1)).to(device)
# A vector of zeros, the generators target.
self.target_zeros = torch.zeros((batch_size, 1)).to(device)
def generate_samples(self, latent_vec=None, num=None):
"""Generate samples from the generator.
If latent_vec and num are None then us self.batch_size random latent
vectors.
Args:
latent_vec (torch.Tensor, optional): A pytorch latent vector or
None. Defaults to None.
num (int, optional): The number of samples to generate if
latent_vec is None. Defaults to None.
"""
num = self.batch_size if num is None else num
latent_vec = self.noise_fn(num) if latent_vec is None else latent_vec
# no_grad tells torch not to compute gradients here
with torch.no_grad():
samples = self.generator(latent_vec)
return samples
def train_step_generator(self):
"""Train the generator one step and return the loss."""
# Clear the gradients. The coolest thing about PyTorch is that the
# gradient automatically accumulates in each parameter as the network is
# used. However, we typically want to clear these gradients between each
# step of the optimizer; the zero_grad method does just that.
self.generator.zero_grad()
# Sample batch_size latent vectors from the noise-generating function.
latent_vec = self.noise_fn(self.batch_size)
# Feed the latent vectors into the Generator and get the generated
# samples as output (under the hood, the generator.forward method is
# called here). Remember, PyTorch is define-by-run, so this is the point
# where the generator’s computational graph is built.
generated = self.generator(latent_vec)
# Feed the generated samples into the Discriminator and get its
# confidence that each sample is real. Remember, the Discriminator is
# trying to classify these samples as fake (0) while the Generator is
# trying trick it into thinking they’re real (1). Just as in the previous
# line, this is where the Discriminator’s computational graph is built,
# and because it was given the generated samples generated as input, this
# computational graph is stuck on the end of the Generator’s
# computational graph.
classifications = self.discriminator(generated)
# Calculate the loss for the Generator. Our loss function is
# Binary Cross Entropy, so the loss for each of the batch_size samples is
# calculated and averaged into a single value. loss is a PyTorch tensor
# with a single value in it, so it’s still connected to the full
# computational graph.
loss = self.criterion(classifications, self.target_ones)
# This is where the magic happens. Or rather, this is where the prestige
# happens, since the magic has been happening invisibly this whole time.
# Here, the backward method calculates the gradient d_loss/d_x for every
# parameter x in the computational graph.
loss.backward()
# Apply one step of the optimizer, nudging each parameter down the
# gradient. If you’ve built a GAN in Keras before, you’re probably
# familiar with having to set my_network.trainable = False. One of the
# advantages of PyTorch is that you don’t have to bother with that,
# because optim_g was told to only concern itself with our Generator’s
# parameters.
self.optim_g.step()
# Return the loss. We will be storing these in a list for later
# visualization. However, it’s vital that we use the item method to
# return it as a float, not as a PyTorch tensor. This is because, if we
# keep a reference to that tensor object in a list, Python will also hang
# on to the entire computational graph. This is a big waste of memory,
# so we need to make sure that we only keep what we need (the value)
# so that Python’s garbage collector can clean up the rest.
return loss.item()
def train_step_discriminator(self):
"""Train the discriminator one step and return the losses."""
# Clear the gradients. The coolest thing about PyTorch is that the
# gradient automatically accumulates in each parameter as the network is
# used. However, we typically want to clear these gradients between each
# step of the optimizer; the zero_grad method does just that.
self.discriminator.zero_grad()
# real samples
# Sample some real samples from the target function, get the
# Discriminator’s confidences that they’re real (the Discriminator wants
# to maximize this!), and calculate the loss. This is very similar to the
# generator’s training step.
real_samples = self.data_fn(self.batch_size)
pred_real = self.discriminator(real_samples)
loss_real = self.criterion(pred_real, self.target_ones)
# generated samples
# Sample some generated samples from the generator, get the
# Discriminator’s confidences that they’re real (the Discriminator wants
# to minimize this!), and calculate the loss. Because we’re training the
# Discriminator here, we don’t care about the gradients in the Generator
# and as such we use the no_grad context manager. Alternatively, you
# could ditch the no_grad and substitute in the line
# pred_fake = self.discriminator(fake_samples.detach())
# and detach fake_samples from the Generator’s computational graph after
# the fact, but why bother calculating it in the first place?
latent_vec = self.noise_fn(self.batch_size)
with torch.no_grad():
fake_samples = self.generator(latent_vec)
pred_fake = self.discriminator(fake_samples)
loss_fake = self.criterion(pred_fake, self.target_zeros)
# combine
# Average the computational graphs for the real samples and the generated
# samples. Yes, that’s really it. This | |
# -*- coding: utf-8 -*-
"""
Interface into SQL for the IBEIS Controller
TODO; need to use some sort of sticky bit so
sql files are created with reasonable permissions.
"""
import functools
import logging
import collections
import os
import parse
import re
import uuid
from collections.abc import Mapping, MutableMapping
from contextlib import contextmanager
from os.path import join, exists
import six
import sqlalchemy
import utool as ut
from deprecated import deprecated
from sqlalchemy.engine import LegacyRow
from sqlalchemy.schema import Table
from sqlalchemy.sql import bindparam, text, ClauseElement
from wbia.dtool import lite
from wbia.dtool.dump import dumps
from wbia.dtool.types import Integer, TYPE_TO_SQLTYPE
from wbia.dtool.types import initialize_postgresql_types
import tqdm
print, rrr, profile = ut.inject2(__name__)
logger = logging.getLogger('wbia')
READ_ONLY = ut.get_argflag(('--readonly-mode', '--read-only', '--readonly'))
VERBOSE_SQL = ut.get_argflag(('--print-sql', '--verbose-sql', '--verb-sql', '--verbsql'))
NOT_QUIET = not (ut.QUIET or ut.get_argflag('--quiet-sql'))
VERBOSE = ut.VERBOSE
VERYVERBOSE = ut.VERYVERBOSE
TIMEOUT = 600 # Wait for up to 600 seconds for the database to return from a locked state
BATCH_SIZE = int(1e4)
SQLColumnRichInfo = collections.namedtuple(
'SQLColumnRichInfo', ('column_id', 'name', 'type_', 'notnull', 'dflt_value', 'pk')
)
# FIXME (31-Jul-12020) Duplicate definition of wbia.constants.METADATA_TABLE
# Use this definition as the authority because it's within the context of its use.
METADATA_TABLE_NAME = 'metadata'
# Defines the columns used within the metadata table.
METADATA_TABLE_COLUMNS = {
# Dictionary of metadata column names pair with:
# - is_coded_data: bool showing if the value is a data type (True) or string (False)
# <column-name>: <info-dict>
'dependson': dict(is_coded_data=True),
'docstr': dict(is_coded_data=False),
'relates': dict(is_coded_data=True),
'shortname': dict(is_coded_data=True),
'superkeys': dict(is_coded_data=True),
'extern_tables': dict(is_coded_data=True),
'dependsmap': dict(is_coded_data=True),
'primary_superkey': dict(is_coded_data=True),
'constraint': dict(is_coded_data=False),
}
METADATA_TABLE_COLUMN_NAMES = list(METADATA_TABLE_COLUMNS.keys())
def create_engine(uri, POSTGRESQL_POOL_SIZE=20, ENGINES={}, timeout=TIMEOUT):
pid = os.getpid()
if ENGINES.get('pid') != pid:
# ENGINES contains engines from the parent process that the
# child process can't use
ENGINES.clear()
ENGINES['pid'] = pid
kw = {
# The echo flag is a shortcut to set up SQLAlchemy logging
'echo': False,
'connect_args': {
'timeout': timeout,
},
}
if uri.startswith('sqlite:') and ':memory:' in uri:
# Don't share engines for in memory sqlite databases
return sqlalchemy.create_engine(uri, **kw)
if uri not in ENGINES:
if uri.startswith('postgresql:'):
# pool_size is not available for sqlite
kw['pool_size'] = POSTGRESQL_POOL_SIZE
kw['connect_args'] = {
'connect_timeout': timeout,
}
ENGINES[uri] = sqlalchemy.create_engine(uri, **kw)
return ENGINES[uri]
def compare_coldef_lists(coldef_list1, coldef_list2):
def normalize(coldef_list):
for name, coldef in coldef_list:
# Remove "rowid" which is added to postgresql tables
if name != 'rowid':
coldef_ = coldef.lower()
# Remove "default nextval" for postgresql auto-increment fields
# as sqlite doesn't need it
coldef_ = re.sub(r' default \(nextval\(.*', '', coldef_)
# Consider bigint and integer the same
if 'bigint' in coldef_:
coldef_ = re.sub(r"'([^']*)'::bigint", r'\1', coldef_)
coldef_ = re.sub(r'\bbigint\b', 'integer', coldef_)
# Consider double precision and real the same
if 'double precision' in coldef_:
coldef_ = re.sub(r'\bdouble precision\b', 'real', coldef_)
yield name.lower(), coldef_
coldef_list1 = list(normalize(coldef_list1))
coldef_list2 = list(normalize(coldef_list2))
if len(coldef_list1) != len(coldef_list2):
return coldef_list1, coldef_list2
for i in range(len(coldef_list1)):
name1, coldef1 = coldef_list1[i]
name2, coldef2 = coldef_list2[i]
if name1 != name2:
return coldef_list1, coldef_list2
if coldef1 != coldef2:
return coldef_list1, coldef_list2
return
def _unpacker(results):
""" HELPER: Unpacks results if unpack_scalars is True. """
if not results: # Check for None or empty list
results = None
else:
assert len(results) <= 1, 'throwing away results! { %r }' % (results,)
results = results[0]
return results
def tuplize(list_):
""" Converts each scalar item in a list to a dimension-1 tuple """
tup_list = [item if ut.isiterable(item) else (item,) for item in list_]
return tup_list
def sanitize_sql(db, tablename_, columns=None):
""" Sanatizes an sql tablename and column. Use sparingly """
tablename = re.sub('[^a-zA-Z_0-9]', '', tablename_)
valid_tables = db.get_table_names()
if tablename not in valid_tables:
logger.info('tablename_ = %r' % (tablename_,))
logger.info('valid_tables = %r' % (valid_tables,))
raise Exception(
'UNSAFE TABLE: tablename=%r. '
'Column names and table names should be different' % tablename
)
if columns is None:
return tablename
else:
def _sanitize_sql_helper(column):
column_ = re.sub('[^a-zA-Z_0-9]', '', column)
valid_columns = db.get_column_names(tablename)
if column_ not in valid_columns:
raise Exception(
'UNSAFE COLUMN: must be all lowercase. '
'tablename={}, column={}, valid_columns={} column_={}'.format(
tablename, column, valid_columns, column_
)
)
return None
else:
return column_
columns = [_sanitize_sql_helper(column) for column in columns]
columns = [column for column in columns if columns is not None]
return tablename, columns
@six.add_metaclass(ut.ReloadingMetaclass)
class SQLDatabaseController(object):
"""
Interface to an SQL database
"""
class Metadata(Mapping):
"""Metadata is an attribute of the ``SQLDatabaseController`` that
facilitates easy usages by internal and exteral users.
Each metadata attributes represents a table (i.e. an instance of ``TableMetadata``).
Each ``TableMetadata`` instance has metadata names as attributes.
The ``TableMetadata`` can also be adapated to a dictionary for compatability.
The the ``database`` attribute is a special case that results
in a ``DatabaseMetadata`` instance rather than ``TableMetadata``.
This primarily give access to the version and initial UUID,
respectively as ``database.version`` and ``database.init_uuid``.
Args:
ctrlr (SQLDatabaseController): parent controller object
"""
class DatabaseMetadata(MutableMapping):
"""Special metadata for database information"""
__fields = (
'version',
'init_uuid',
)
def __init__(self, ctrlr):
self.ctrlr = ctrlr
@property
def version(self):
stmt = text(
f'SELECT metadata_value FROM {METADATA_TABLE_NAME} WHERE metadata_key = :key'
)
try:
return self.ctrlr.executeone(
stmt, {'key': 'database_version'}, use_fetchone_behavior=True
)[0]
except TypeError: # NoneType
return None
@version.setter
def version(self, value):
if not value:
raise ValueError(value)
dialect = self.ctrlr._engine.dialect.name
if dialect == 'sqlite':
stmt = text(
f'INSERT OR REPLACE INTO {METADATA_TABLE_NAME} (metadata_key, metadata_value)'
'VALUES (:key, :value)'
)
elif dialect == 'postgresql':
stmt = text(
f"""\
INSERT INTO {METADATA_TABLE_NAME}
(metadata_key, metadata_value)
VALUES (:key, :value)
ON CONFLICT (metadata_key) DO UPDATE
SET metadata_value = EXCLUDED.metadata_value"""
)
else:
raise RuntimeError(f'Unknown dialect {dialect}')
params = {'key': 'database_version', 'value': value}
self.ctrlr.executeone(stmt, params)
@property
def init_uuid(self):
stmt = text(
f'SELECT metadata_value FROM {METADATA_TABLE_NAME} WHERE metadata_key = :key'
)
try:
value = self.ctrlr.executeone(
stmt, {'key': 'database_init_uuid'}, use_fetchone_behavior=True
)[0]
except TypeError: # NoneType
return None
if value is not None:
value = uuid.UUID(value)
return value
@init_uuid.setter
def init_uuid(self, value):
if not value:
raise ValueError(value)
elif isinstance(value, uuid.UUID):
value = str(value)
dialect = self.ctrlr._engine.dialect.name
if dialect == 'sqlite':
stmt = text(
f'INSERT OR REPLACE INTO {METADATA_TABLE_NAME} (metadata_key, metadata_value) '
'VALUES (:key, :value)'
)
elif dialect == 'postgresql':
stmt = text(
f"""\
INSERT INTO {METADATA_TABLE_NAME}
(metadata_key, metadata_value)
VALUES (:key, :value)
ON CONFLICT (metadata_key) DO UPDATE
SET metadata_value = EXCLUDED.metadata_value"""
)
else:
raise RuntimeError(f'Unknown dialect {dialect}')
params = {'key': 'database_init_uuid', 'value': value}
self.ctrlr.executeone(stmt, params)
# collections.abc.MutableMapping abstract methods
def __getitem__(self, key):
try:
return getattr(self, key)
except AttributeError as exc:
raise KeyError(*exc.args)
def __setitem__(self, key, value):
if key not in self.__fields:
raise AttributeError(key)
setattr(self, key, value)
def __delitem__(self, key):
raise RuntimeError(f"'{key}' cannot be deleted")
def __iter__(self):
for name in self.__fields:
yield name
def __len__(self):
return len(self.__fields)
class TableMetadata(MutableMapping):
"""Metadata on a particular SQL table"""
def __init__(self, ctrlr, table_name):
super().__setattr__('ctrlr', ctrlr)
super().__setattr__('table_name', table_name)
def _get_key_name(self, name):
"""Because keys are `<table-name>_<name>`"""
return '_'.join([self.table_name, name])
def update(self, **kwargs):
"""Update or insert the value into the metadata table with the given keyword arguments of metadata field names"""
for keyword, value in kwargs.items():
if keyword not in METADATA_TABLE_COLUMN_NAMES:
# ignore unknown keywords
continue
setattr(self, keyword, value)
def __getattr__(self, name):
# Query the database for the value represented as name
key = '_'.join([self.table_name, name])
statement = text(
'SELECT metadata_value '
f'FROM {METADATA_TABLE_NAME} '
'WHERE metadata_key = :key'
)
try:
value = self.ctrlr.executeone(
statement, {'key': key}, use_fetchone_behavior=True
)[0]
except TypeError: # NoneType
return None
if METADATA_TABLE_COLUMNS[name]['is_coded_data']:
value = eval(value)
if name == 'superkeys' and isinstance(value, list):
# superkeys looks like [('image_rowid, encounter_rowid',)]
# instead of [('image_rowid',), ('encounter_rowid',)]
if len(value) == 1 and len(value[0]) == 1:
value = [tuple(value[0][0].split(', '))]
return value
def __getattribute__(self, name):
return super().__getattribute__(name)
def __setattr__(self, name, value):
try:
info = METADATA_TABLE_COLUMNS[name]
except KeyError:
# This prevents setting of any attributes outside of the known names
raise AttributeError
# Delete the record if given None
if value is None:
return self.__delattr__(name)
if info['is_coded_data']:
# Treat the data as code.
value = repr(value)
key = self._get_key_name(name)
# Insert or update the record
dialect = self.ctrlr._engine.dialect.name
if dialect == 'sqlite':
statement = text(
f'INSERT OR REPLACE INTO {METADATA_TABLE_NAME} '
f'(metadata_key, metadata_value) VALUES (:key, :value)'
)
elif dialect == 'postgresql':
statement = text(
f"""\
INSERT INTO {METADATA_TABLE_NAME}
(metadata_key, metadata_value)
VALUES (:key, :value)
ON CONFLICT (metadata_key) DO UPDATE
SET metadata_value = EXCLUDED.metadata_value"""
)
else:
raise RuntimeError(f'Unknown dialect {dialect}')
params = {
'key': key,
'value': value,
}
self.ctrlr.executeone(statement, params)
def | |
data = {
'data': {
'type': 'contributors',
'attributes': {
'bibliographic': True,
'permission': 'invalid',
},
'relationships': {
'users': {
'data': {
'id': self.user_two._id,
'type': 'users'
}
}
}
}
}
res = self.app.post_json_api(self.private_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
self.private_project.reload()
assert_not_in(self.user_two, self.private_project.contributors)
@assert_logs(NodeLog.CONTRIB_ADDED, 'private_project')
def test_adds_none_permission_contributor_private_project_admin_uses_default_permissions(self):
data = {
'data': {
'type': 'contributors',
'attributes': {
'bibliographic': True,
'permission': None
},
'relationships': {
'users': {
'data': {
'id': self.user_two._id,
'type': 'users'
}
}
}
}
}
res = self.app.post_json_api(self.private_url, data, auth=self.user.auth)
assert_equal(res.status_code, 201)
self.private_project.reload()
assert_in(self.user_two, self.private_project.contributors)
for permission in permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS:
assert_true(self.private_project.has_permission(self.user_two, permission))
def test_adds_already_existing_contributor_private_project_admin(self):
self.private_project.add_contributor(self.user_two, auth=Auth(self.user), save=True)
self.private_project.reload()
res = self.app.post_json_api(self.private_url, self.data_user_two,
auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_adds_non_existing_user_private_project_admin(self):
data = {
'data': {
'type': 'contributors',
'attributes': {
'bibliographic': True
},
'relationships': {
'users': {
'data': {
'id': 'FAKE',
'type': 'users'
}
}
}
}
}
res = self.app.post_json_api(self.private_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
self.private_project.reload()
assert_equal(len(self.private_project.contributors), 1)
def test_adds_contributor_private_project_non_admin(self):
self.private_project.add_contributor(self.user_two, permissions=[permissions.READ, permissions.WRITE], auth=Auth(self.user))
res = self.app.post_json_api(self.private_url, self.data_user_three,
auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
self.private_project.reload()
assert_not_in(self.user_three, self.private_project.contributors)
def test_adds_contributor_private_project_non_contributor(self):
res = self.app.post_json_api(self.private_url, self.data_user_three,
auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
self.private_project.reload()
assert_not_in(self.user_three, self.private_project.contributors)
def test_adds_contributor_private_project_not_logged_in(self):
res = self.app.post_json_api(self.private_url, self.data_user_two, expect_errors=True)
assert_equal(res.status_code, 401)
self.private_project.reload()
assert_not_in(self.user_two, self.private_project.contributors)
class TestNodeContributorBulkCreate(NodeCRUDTestCase):
def setUp(self):
super(TestNodeContributorBulkCreate, self).setUp()
self.user_three = AuthUserFactory()
self.private_url = '/{}nodes/{}/contributors/'.format(API_BASE, self.private_project._id)
self.public_url = '/{}nodes/{}/contributors/'.format(API_BASE, self.public_project._id)
self.payload_one = {
'type': 'contributors',
'attributes': {
'bibliographic': True,
'permission': "admin"
},
'relationships': {
'users': {
'data': {
'id': self.user_two._id,
'type': 'users'
}
}
}
}
self.payload_two = {
'type': 'contributors',
'attributes': {
'bibliographic': False,
'permission': "read"
},
'relationships': {
'users': {
'data': {
'id': self.user_three._id,
'type': 'users'
}
}
}
}
def test_bulk_create_contributors_blank_request(self):
res = self.app.post_json_api(self.public_url, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
def test_node_contributor_bulk_create_contributor_exists(self):
self.public_project.add_contributor(self.user_two, permissions=[permissions.READ], visible=True, save=True)
res = self.app.post_json_api(self.public_url, {'data': [self.payload_two, self.payload_one]},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert "is already a contributor" in res.json['errors'][0]['detail']
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 2)
def test_node_contributor_bulk_create_logged_out_public_project(self):
res = self.app.post_json_api(self.public_url, {'data': [self.payload_one, self.payload_two]},
expect_errors=True, bulk=True)
assert_equal(res.status_code, 401)
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 1)
def test_node_contributor_bulk_create_logged_in_public_project_project(self):
res = self.app.post_json_api(self.public_url, {'data': [self.payload_one, self.payload_two]},
auth=self.user.auth, bulk=True)
assert_equal(res.status_code, 201)
assert_items_equal([res.json['data'][0]['attributes']['bibliographic'], res.json['data'][1]['attributes']['bibliographic']],
[True, False])
assert_items_equal([res.json['data'][0]['attributes']['permission'], res.json['data'][1]['attributes']['permission']],
['admin', 'read'])
assert_equal(res.content_type, 'application/vnd.api+json')
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 3)
def test_node_contributor_bulk_create_logged_out_private_project(self):
res = self.app.post_json_api(self.private_url, {'data': [self.payload_one, self.payload_two]},
expect_errors=True, bulk=True)
assert_equal(res.status_code, 401)
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 1)
def test_node_contributor_bulk_create_logged_in_contrib_private_project(self):
res = self.app.post_json_api(self.private_url, {'data': [self.payload_one, self.payload_two]},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 201)
assert_equal(len(res.json['data']), 2)
assert_items_equal([res.json['data'][0]['attributes']['bibliographic'], res.json['data'][1]['attributes']['bibliographic']],
[True, False])
assert_items_equal([res.json['data'][0]['attributes']['permission'], res.json['data'][1]['attributes']['permission']],
['admin', 'read'])
assert_equal(res.content_type, 'application/vnd.api+json')
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 3)
def test_node_contributor_bulk_create_logged_in_non_contrib_private_project(self):
res = self.app.post_json_api(self.private_url, {'data': [self.payload_one, self.payload_two]},
auth=self.user_two.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 403)
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 1)
def test_node_contributor_bulk_create_logged_in_read_only_contrib_private_project(self):
self.private_project.add_contributor(self.user_two, permissions=[permissions.READ], save=True)
res = self.app.post_json_api(self.private_url, {'data': [self.payload_two]},
auth=self.user_two.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 403)
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 1)
def test_node_contributor_bulk_create_all_or_nothing(self):
invalid_id_payload = {
'type': 'contributors',
'relationships': {
'users': {
'data': {
'type': 'users',
'id': '12345'
}
}
}
}
res = self.app.post_json_api(self.public_url, {'data': [self.payload_one, invalid_id_payload]},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 404)
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 1)
def test_node_contributor_bulk_create_limits(self):
node_contrib_create_list = {'data': [self.payload_one] * 101}
res = self.app.post_json_api(self.public_url, node_contrib_create_list,
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.json['errors'][0]['detail'], 'Bulk operation limit is 100, got 101.')
assert_equal(res.json['errors'][0]['source']['pointer'], '/data')
def test_node_contributor_bulk_create_no_type(self):
payload = {'data': [{'relationships': {'users': {'data': {'type': 'users', 'id': self.user_two._id}}}}]}
res = self.app.post_json_api(self.public_url, payload, auth=self.user.auth,
expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/0/type')
def test_node_contributor_bulk_create_incorrect_type(self):
payload = {
'data': [{
'type': 'contributors',
'relationships': {
'users': {
'data': {
'type': 'Wrong type.',
'id': self.user_two._id
}
}
}
}]
}
res = self.app.post_json_api(self.public_url, payload, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 409)
def test_node_contributor_bulk_create_no_relationships(self):
payload = {
'data': [{
'type': 'contributors',
'id': self.user_two._id
}]
}
res = self.app.post_json_api(self.public_url, payload, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/relationships')
def test_node_contributor_ugly_payload(self):
payload = 'sdf;jlasfd'
res = self.app.post_json_api(self.public_url, payload, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Malformed request.')
def test_node_contributor_bulk_create_invalid_permissions_all_or_nothing(self):
payload = {
'type': 'contributors',
'attributes': {
'permission': 'super-user',
'bibliographic': True
},
'relationships': {
'users': {
'data': {
'type': 'users',
'id': self.user_two._id
}
}
}
}
payload = {'data': [self.payload_two, payload]}
res = self.app.post_json_api(self.public_url, payload, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 1)
class TestNodeContributorBulkUpdate(NodeCRUDTestCase):
def setUp(self):
super(TestNodeContributorBulkUpdate, self).setUp()
self.user_three = AuthUserFactory()
self.user_four = AuthUserFactory()
self.public_project.add_contributor(self.user_two, permissions=[permissions.READ], visible=True, save=True)
self.public_project.add_contributor(self.user_three, permissions=[permissions.READ], visible=True, save=True)
self.private_project.add_contributor(self.user_two, permissions=[permissions.READ], visible=True, save=True)
self.private_project.add_contributor(self.user_three, permissions=[permissions.READ], visible=True, save=True)
self.private_url = '/{}nodes/{}/contributors/'.format(API_BASE, self.private_project._id)
self.public_url = '/{}nodes/{}/contributors/'.format(API_BASE, self.public_project._id)
self.payload_one = {
'id': self.user_two._id,
'type': 'contributors',
'attributes': {
'bibliographic': True,
'permission': "admin"
}
}
self.payload_two = {
'id': self.user_three._id,
'type': 'contributors',
'attributes': {
'bibliographic': False,
'permission': "write"
}
}
def test_bulk_update_contributors_blank_request(self):
res = self.app.patch_json_api(self.public_url, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
def test_bulk_update_contributors_dict_instead_of_list(self):
res = self.app.put_json_api(self.public_url, {'data': self.payload_one},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
def test_bulk_update_contributors_public_project_one_not_found(self):
invalid_id = {
'id': '12345',
'type': 'contributors',
'attributes': {}
}
empty_payload = {'data': [invalid_id, self.payload_one]}
res = self.app.put_json_api(self.public_url, empty_payload, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Could not find all objects to update.')
res = self.app.get(self.public_url)
data = res.json['data']
assert_items_equal([data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']],
['admin', 'read', 'read'] )
def test_bulk_update_contributors_public_projects_logged_out(self):
res = self.app.put_json_api(self.public_url, {'data': [self.payload_one, self.payload_two]},
expect_errors=True, bulk=True)
assert_equal(res.status_code, 401)
res = self.app.get(self.public_url, auth=self.user.auth)
data = res.json['data']
assert_items_equal([data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']],
['admin', 'read', 'read'])
def test_bulk_update_contributors_public_projects_logged_in(self):
res = self.app.put_json_api(self.public_url, {'data': [self.payload_one, self.payload_two]},
auth=self.user.auth, bulk=True)
assert_equal(res.status_code, 200)
data = res.json['data']
assert_items_equal([data[0]['attributes']['permission'], data[1]['attributes']['permission']],
['admin', 'write'])
def test_bulk_update_contributors_private_projects_logged_out(self):
res = self.app.put_json_api(self.private_url, {'data': [self.payload_one, self.payload_two]},
expect_errors=True, bulk=True)
assert_equal(res.status_code, 401)
res = self.app.get(self.private_url, auth=self.user.auth)
data = res.json['data']
assert_items_equal([data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']],
['admin', 'read', 'read'])
def test_bulk_update_contributors_private_projects_logged_in_contrib(self):
res = self.app.put_json_api(self.private_url, {'data': [self.payload_one, self.payload_two]},
auth=self.user.auth, bulk=True)
assert_equal(res.status_code, 200)
data = res.json['data']
assert_items_equal([data[0]['attributes']['permission'], data[1]['attributes']['permission']],
['admin', 'write'])
def test_bulk_update_contributors_private_projects_logged_in_non_contrib(self):
res = self.app.put_json_api(self.private_url, {'data': [self.payload_one, self.payload_two]},
auth=self.user_four.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 403)
res = self.app.get(self.private_url, auth=self.user.auth)
data = res.json['data']
assert_items_equal([data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']],
['admin', 'read', 'read'])
def test_bulk_update_contributors_private_projects_logged_in_read_only_contrib(self):
res = self.app.put_json_api(self.private_url, {'data': [self.payload_one, self.payload_two]},
auth=self.user_two.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 403)
res = self.app.get(self.private_url, auth=self.user.auth)
data = res.json['data']
assert_items_equal([data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']],
['admin', 'read', 'read'])
def test_bulk_update_contributors_projects_send_dictionary_not_list(self):
res = self.app.put_json_api(self.public_url, {'data': self.payload_one},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Expected a list of items but got type "dict".')
def test_bulk_update_contributors_id_not_supplied(self):
res = self.app.put_json_api(self.public_url, {'data': [{'type': 'contributors', 'attributes': {}}]},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/0/id')
assert_equal(res.json['errors'][0]['detail'], "This field may not be null.")
def test_bulk_update_contributors_type_not_supplied(self):
res = self.app.put_json_api(self.public_url, {'data': [{'id': self.user_two._id, 'attributes': {}}]},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/0/type')
assert_equal(res.json['errors'][0]['detail'], "This field may not be null.")
def test_bulk_update_contributors_wrong_type(self):
invalid_type = {
'id': self.user_two._id,
'type': 'Wrong type.',
'attributes': {}
}
res = self.app.put_json_api(self.public_url, {'data': [invalid_type]},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 409)
def test_bulk_update_contributors_wrong_id(self):
invalid_id = {
'id': '12345',
'type': 'contributors',
'attributes': {}
}
res = self.app.put_json_api(self.public_url, {'data': [invalid_id]},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Could not find all objects to update.')
def test_bulk_update_contributors_limits(self):
contrib_update_list = {'data': [self.payload_one] * 101}
res = self.app.put_json_api(self.public_url, contrib_update_list, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.json['errors'][0]['detail'], 'Bulk operation limit is 100, got 101.')
assert_equal(res.json['errors'][0]['source']['pointer'], '/data')
def test_bulk_update_contributors_invalid_permissions(self):
res = self.app.put_json_api(self.public_url, {'data': [self.payload_two, {'id': self.user_two._id, 'type': 'contributors', 'attributes': {'permission': 'super-user'}}]},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], '"super-user" is not a valid choice.')
res = self.app.get(self.public_url, auth=self.user.auth)
data = res.json['data']
assert_items_equal([data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']],
['admin', 'read', 'read'])
def test_bulk_update_contributors_invalid_bibliographic(self):
res = self.app.put_json_api(self.public_url, {'data': [self.payload_two, {'id': self.user_two._id, 'type': 'contributors', 'attributes': {'bibliographic': 'true and false'}}]},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], '"true and false" is not a valid boolean.')
res = self.app.get(self.public_url, auth=self.user.auth)
data = res.json['data']
assert_items_equal([data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']],
['admin', 'read', 'read'])
class TestNodeContributorBulkPartialUpdate(NodeCRUDTestCase):
def setUp(self):
super(TestNodeContributorBulkPartialUpdate, self).setUp()
self.user_three = AuthUserFactory()
self.user_four = AuthUserFactory()
self.public_project.add_contributor(self.user_two, permissions=[permissions.READ], visible=True, save=True)
self.public_project.add_contributor(self.user_three, permissions=[permissions.READ], visible=True, save=True)
self.private_project.add_contributor(self.user_two, permissions=[permissions.READ], visible=True, save=True)
self.private_project.add_contributor(self.user_three, permissions=[permissions.READ], visible=True, save=True)
self.private_url = '/{}nodes/{}/contributors/'.format(API_BASE, self.private_project._id)
self.public_url = '/{}nodes/{}/contributors/'.format(API_BASE, self.public_project._id)
self.payload_one = {
'id': self.user_two._id,
'type': 'contributors',
'attributes': {
'bibliographic': True,
'permission': "admin"
}
}
self.payload_two = {
'id': self.user_three._id,
'type': 'contributors',
'attributes': {
'bibliographic': False,
'permission': "write"
}
}
def test_bulk_partial_update_contributors_blank_request(self):
res = self.app.patch_json_api(self.public_url, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
def test_bulk_partial_update_contributors_public_project_one_not_found(self):
invalid_id = {
'id': '12345',
'type': 'contributors',
'attributes': {}
}
empty_payload = {'data': [invalid_id, self.payload_one]}
res = self.app.patch_json_api(self.public_url, empty_payload, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Could not find all objects to update.')
res = self.app.get(self.public_url)
data = res.json['data']
assert_items_equal([data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']],
['admin', 'read', 'read'] )
def test_bulk_partial_update_contributors_public_projects_logged_out(self):
res = self.app.patch_json_api(self.public_url,
{'data': [self.payload_one, self.payload_two]}, bulk=True, expect_errors=True)
assert_equal(res.status_code, 401)
res | |
<gh_stars>0
import pdb
import time
import os
import subprocess
import re
import random
import json
import numpy as np
import glob
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
import socket
import argparse
import threading
import _thread
import signal
from datetime import datetime
import csv
import opt_dash
parser = argparse.ArgumentParser(description='TCP client')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='select testcase')
args = parser.parse_args()
with open('../job_trace/job_queue_sc_50.json', 'r') as fp: #TODO
queue = json.load(fp)
queue_dict = {}
arrival_time = 0
for item in queue:
arrival_time += np.random.poisson(30)
queue_dict[item] = arrival_time
queue_timer = time.time()
queue_delay = {}
for item in queue:
queue_delay[str(item)] = 0
# predict batch time simulated
with open('batch_times/K80_batch_time_sc.json', 'r') as fp: #TODO
K80_batch_pred = json.load(fp)
with open('batch_times/V100_batch_time_sc.json', 'r') as fp:
V100_batch_pred = json.load(fp)
for key,value in K80_batch_pred.items():
# add Gaussian noise with 5% mean
pred_error = value * 0.035 # 3% error
direction = 1 if random.random() < 0.5 else -1
K80_batch_pred[key] = round(value + direction*pred_error,3)
for key,value in V100_batch_pred.items():
# add Gaussian noise with 5% mean
pred_error = value * 0.023 # 3% error
direction = 1 if random.random() < 0.5 else -1
V100_batch_pred[key] = round(value + direction*pred_error,3)
multigpu_list = []#['1', '2', '3']#, '4', '5', '6', '7'] #TODO
job_start = {} #{'49': time1, '15': time2...}
JCT = {}
for item in queue:
JCT[str(item)] = 0
completion = {}
for item in queue:
completion[str(item)] = 0
overhead = {} # initialize so that every job starts with 0s overhead time
for item in queue:
overhead[str(item)] = 0
ovhd_start = {} # initialize this to 0 as well
for item in queue:
ovhd_start[str(item)] = 0
b_start = {} # initialize this to 0 as well
for item in queue:
b_start[str(item)] = 0
c_start = {} # initialize this to 0 as well
for item in queue:
c_start[str(item)] = 0
d_start = {} # initialize this to 0 as well
for item in queue:
d_start[str(item)] = 0
ovhd_a = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_a[str(item)] = []
ovhd_b = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_b[str(item)] = []
ovhd_c = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_c[str(item)] = []
ovhd_d = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_d[str(item)] = []
ovhd_total = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_total[str(item)] = []
k80_1st = {}
for item in queue:
k80_1st[str(item)] = []
v100_1st = {}
for item in queue:
v100_1st[str(item)] = []
num_mig = {} # initialize migration time to 0
for item in queue:
num_mig[str(item)] = 0
queue_start = {} # initialize this to 0 as well
for item in queue:
queue_start[str(item)] = 0
queue_time = {} # initialize this to 0 as well
for item in queue:
queue_time[str(item)] = 0
V100_batch_time = {}
for item in queue:
V100_batch_time[str(item)] = 0
K80_batch_time = {}
for item in queue:
K80_batch_time[str(item)] = 0
V100_1st_ovhd = {}
for item in queue:
V100_1st_ovhd[str(item)] = 0
K80_1st_ovhd = {}
for item in queue:
K80_1st_ovhd[str(item)] = 0
K80_start_time = {}
for item in queue:
K80_start_time[str(item)] = 0
V100_start_time = {}
for item in queue:
V100_start_time[str(item)] = 0
promote_start_time = {}
for item in queue:
promote_start_time[str(item)] = 0
demote_list = []
job_remaining_batch = {}
for item in queue:
job_remaining_batch[str(item)] = 0
K80_time = {}
for item in queue:
K80_time[str(item)] = 0
V100_time = {}
for item in queue:
V100_time[str(item)] = 0
gpu_usage_time = [] # don't initialize this
gpu_usage = []
gpu_usage_completion = []
speedup_dict = {}
for item in queue:
speedup_dict[str(item)] = 0
birthplace = {}
for item in queue:
birthplace[str(item)] = 'none'
index = 0
K80_cap = 8 #TODO
V100_cap = 4
K80_used = 0
V100_used = 0
K80_per_node = 8
V100_per_node = 4
K80_job = {}
for i in range(K80_cap):
K80_job[str(i)] = 'idle'
V100_job = {}
for i in range(V100_cap):
V100_job[str(i)] = 'idle'
step1_job = []
step2_job = []
pc_job = []
K80_node = ['c2178']#, 'c2182']
V100_node = ['d1003']#, 'd1015']
host_node = 'c0145'
testcase = args.tc
### also, change .h5 file folder in jobs ###
INTERVAL = 30 # make decision every 30s
run_log = open('run.log','w')
# function to detect if there are two free or reserved GPUs in a node
# returns an empty list if there is none, otherwise returns list with gpu id in V100/K80_jobs
def detect_2_gpus(gpu_dict, gpu_per_node, status='idle'):
job_list = list(gpu_dict.values())
num_nodes = int(len(job_list) / gpu_per_node)
for i in range(num_nodes):
start = i * gpu_per_node
end = start + gpu_per_node
sliced_list = job_list[start:end]
occurence = sliced_list.count(status)
if occurence >= 2:
# only take the first two elements
indexs = [j for j, e in enumerate(sliced_list) if e == status]
return [str(j + start) for j in indexs]
return []
def K80_LUT(gpu):
quotient = int(gpu) // 8
remainder = int(gpu) % 8
real_node = K80_node[quotient]
real_gpu = str(remainder)
return real_node, real_gpu
def V100_LUT(gpu):
quotient = int(gpu) // 4
remainder = int(gpu) % 4
real_node = V100_node[quotient]
real_gpu = str(remainder)
return real_node, real_gpu
def send_signal(node, cmd):
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = 10000
# Connect the socket to the port where the server is listening
server_address = (node, int(port))
print('connecting to {} port {}'.format(*server_address), file=run_log, flush=True)
sock.connect(server_address)
try:
# Send data
message = cmd.encode('utf-8') #b'save 35' #b'start 35 gpu 6'#b'save 35'
print('sending {!r}'.format(message), file=run_log, flush=True)
sock.sendall(message)
while True:
data = sock.recv(32)
if 'success' in data.decode('utf-8'):
# print('received {!r}'.format(data))
break
else:
print('waiting for success signal', file=run_log, flush=True)
time.sleep(1)
finally:
#print('closing socket')
sock.close()
def get_avail_id(gpu_dict):
# input is K80_job or V100_job (dict)
key_list = list(gpu_dict.keys())
value_list = list(gpu_dict.values())
indexs = [j for j, e in enumerate(value_list) if e == 'idle']
return [key_list[j] for j in indexs]
# 2-gpu jobs in new_pool have duplicated items
# returns mapping of jobs in "new_pool" to GPUs
def GPU_placement(GPU_avail, new_pool, gpu_type='K80', raise_error=True):
mapping = {}
skip = False
res_group = [] # group reserved GPU together
for i in range(len(GPU_avail)):
if skip:
skip = False
continue
else:
# two gpus from the same node
if gpu_type == 'K80':
GPU_per_node = K80_per_node
elif gpu_type == 'V100':
GPU_per_node = V100_per_node
if i!=len(GPU_avail)-1 and int(GPU_avail[i])//GPU_per_node==int(GPU_avail[i+1])//GPU_per_node:
skip = True
res_group.append([GPU_avail[i], GPU_avail[i+1]])
else:
res_group.append([GPU_avail[i]])
group_1gpu = [i for i in res_group if len(i) == 1] # 1gpu id
group_2gpu = [i for i in res_group if len(i) == 2] # 2gpu id [['1','2'],['4','7']]
pool_1gpu = [i for i in new_pool if i not in multigpu_list] # 1gpu job
pool_2gpu = [i for i in new_pool if i in multigpu_list] # 2gpu job
if len(GPU_avail) < len(new_pool) or 2*len(group_2gpu) < len(pool_2gpu):
if raise_error:
if gpu_type == 'K80':
raise ValueError('Bug with K80 placement for new jobs, more jobs than free gpus')
elif gpu_type == 'V100':
raise ValueError('Bug with V100 placement for new jobs, more jobs than free gpus')
else:
return mapping
# if there is no 2-gpu job
if set(new_pool).isdisjoint(multigpu_list):
for i in range(len(new_pool)):
mapping[new_pool[i]] = GPU_avail[i]
else:
# first, fill in all 1gpu slots with 1-gpu jobs as much as possible
for i in group_1gpu[:]:
if len(pool_1gpu) > 0:
mapping[pool_1gpu[0]] = i[0]
pool_1gpu.pop(0)
for i in group_2gpu[:]:
if len(pool_2gpu) > 1:
mapping[pool_2gpu[0]] = ','.join(i)
pool_2gpu = [i for i in pool_2gpu if i != pool_2gpu[0]]
elif len(pool_1gpu) > 0:
mapping[pool_1gpu[0]] = i[0]
if len(pool_1gpu) > 1:
mapping[pool_1gpu[1]] = i[1]
pool_1gpu.pop(1)
pool_1gpu.pop(0)
return mapping
#aa = K80_placement(['0','1','2','3','4'], ['3','3','1','1','50'])
# checks if 2-GPU jobs can be promoted/demoted without locality issue
# if cannot, remove 2-GPU job and corresponding 1-GPU job until all jobs can fit
# then returns new_K80_avail, new_V100_avail, new_promoted, new_demoted
def locality_check(K80_avail, V100_avail, promoted, demoted):
'''
K80/V100_avail: ['1', '2', '5']
promoted/demoted: ['7','7','50','70']
'''
for item in range(2):#[K80_avail, V100_avail]:
skip = False
res_group = [] # group reserved GPU together
GPU_avail = [K80_avail,V100_avail][item]
for i in range(len(GPU_avail)):
if skip:
skip = False
continue
else:
# two gpus from the same node
if item == 0:
GPU_per_node = K80_per_node
elif item == 1:
GPU_per_node = V100_per_node
if i!=len(GPU_avail)-1 and int(GPU_avail[i])//GPU_per_node==int(GPU_avail[i+1])//GPU_per_node:
skip = True
res_group.append([GPU_avail[i], GPU_avail[i+1]])
else:
res_group.append([GPU_avail[i]])
if item == 0:
K80_1gpu = [i for i in res_group if len(i) == 1] # 1gpu id
K80_2gpu = [i for i in res_group if len(i) == 2] # 2gpu id [['1','2'],['4','7']]
elif item == 1:
V100_1gpu = [i for i in res_group if len(i) == 1] # 1gpu id
V100_2gpu = [i for i in res_group if len(i) == 2] # 2gpu id
promoted_1gpu = [i for i in promoted if i not in multigpu_list] # 1gpu job
promoted_2gpu = [i for i in | |
import pytest
import sys
import time
from appdirs import user_config_dir
import os
import attr
import tempfile
import json
from functools import partial
dn = os.path.dirname
import devapps
# sys.path.insert(0, dn(dn(os.path.abspath(__file__))))
# import setup_method as devapps
# default providers:
CLI, Env, File = devapps.CLI, devapps.Env, devapps.File
Exc = devapps.Exc
log = []
root = devapps.root
parent = devapps.parent
inner = devapps.inner
def storelog(_, __, ev, store=log):
store.append(dict(ev))
return ev
from devapps.common import set_log, breakpoint
logger = set_log()
logger._processors.insert(2, storelog)
configure = partial(devapps.configure, log=logger)
fn_test = tempfile.mkstemp()[1] + '.test_mdv'
def clear(l):
# py2 compat:
while l:
l.pop()
def write_file(d, fn=fn_test):
with open(fn, 'w') as fd:
fd.write(json.dumps(d))
def test_to_shorts():
ctx = {}
res = ctx['shorts'] = devapps.to_shorts(['foo', 'foo_e'])
res.pop('_orig_shorts_', 0)
assert res == {'f': 'foo', 'fe': 'foo_e', 'foo': 'foo', 'foo_e': 'foo_e'}
with pytest.raises(Exception) as einfo:
res = devapps.short_to_long(0, {'fo': 1}, 0, ctx)
assert log[-1]['event'] == Exc.non_unique
res = devapps.short_to_long(0, {'foo': 1}, 0, ctx)
res.pop('_orig_shorts_', 0)
assert res == {'foo': 1}
res = devapps.short_to_long(0, {'f': 1}, 0, ctx)
res.pop('_orig_shorts_', 0)
assert res == {'foo': 1}
res = devapps.short_to_long(0, {'foo_': 1}, 0, ctx)
res.pop('_orig_shorts_', 0)
assert res == {'foo_e': 1}
userdir = lambda fn: os.path.join(user_config_dir(), fn)
class TestFile(object):
def test_file_provider_none(self):
f = devapps.File(None)
assert f.filename == None
def test_file_non_exists(self):
fn = 'xxxxxf%s' % time.time()
with pytest.raises(Exception) as einfo:
f = devapps.File(fn)
assert einfo.value.args[0] == Exc.file_not_found
assert einfo.value.args[1]['fn'] == userdir(fn)
def test_file_from_App(self):
"""No Error if config file not present"""
class FooApp:
pass
f = devapps.File(FooApp)
assert f.filename == userdir('FooApp.json')
with pytest.raises(Exception) as einfo:
f = devapps.File(FooApp, ignore_missing=False)
assert einfo.value.args[0] == Exc.file_not_found
class TestCliPreParser(object):
def setup_method(self):
self.c = CLI.pre_parse_cli
def test_nested(self):
"""the preparser just builds a deep dict, no knowledge of anything else"""
m = {}
res = self.c('foo=b=ar -cd baz -d a.b.c=d i=1 f=1.1 a.B=false'.split())
assert res == {
'-cd': 'baz',
'-d': 'is_set',
'a': {'B': 'false', 'b': {'c': 'd'}},
'f': '1.1',
'foo': 'b=ar',
'i': '1',
}
def test_switches(self):
argv = ['foo=bar', '-c', 'fn', 'x', '-C']
sw = {'-c': 'config_file', '-C': 'Cfg'}
res = devapps.CLI(argv, sw)
assert res.argvd == {
'Cfg': 'is_set',
'config_file': 'fn',
'foo': 'bar',
'x': 'is_set',
}
with pytest.raises(Exception) as einfo:
argv.append('-d')
devapps.CLI(argv, sw)
assert log[-1]['event'] == Exc.not_a_switch
assert '-d' in str(log[-1])
def test_switches2(self):
argv = ['foo=bar', 'a', 'b']
res = devapps.CLI(argv).argvd
assert res == {'a': 'is_set', 'b': 'is_set', 'foo': 'bar'}
class TestConfigure(object):
def teardown_method(self):
os.unlink(fn_test) if os.path.exists(fn_test) else 0
def setup_method(self):
ld = """
some very loooooooooooooooooooong long
description
"""
class Inner:
b_i_dflt = True
s_i_dflt = 'inner_str'
def do_inner(self, argument, foo=int):
return {'inner.do_inner': root(self).do_func1((argument, foo))}
class deep:
b_d_dflt = True
s_d_dflt = 'deep_str'
f_d_float = attr.ib(
1.0, metadata={'long_descr': ld, 'descr': 'descr'}
)
def do_deep(self, argument, foo1=44):
"""
Does deep stuff
"""
return {
'deep_deep': (
argument,
foo1,
{
'root config': (
root(self).b_no_dflt,
root(self).i_no_dflt,
root(self).i_dflt,
)
},
parent(self).do_inner((argument, foo1)),
)
}
def do_deep_func1(cls, foo, bar=float, baz=bool):
"""does deep func1 business"""
# func1
return {
'deep_func1': (foo, bar, baz, cls.do_deep(foo * 2))
}
class MyApp:
# fmt: off
attr_inst = attr.ib(1.1, converter=lambda x: x+x)
b_no_dflt = attr.ib(type=bool, metadata={'descr': 'a bool', 'long_descr': ld})
b_dflt_True = True
b_dflt_False = False
s_no_dflt = str
s_dflt = 'foo'
i_no_dflt = int
i_dflt = 42
f_no_dflt = float
f_dflt = 42.0
d_no_dflt = dict
l_no_dflt = list
inner = Inner
# fmt: on
def do_run(cls, func_param):
"""return the complete setup of the app as one string"""
cls = attr.asdict(cls)
return dict(locals())
def another_func(cls, foo):
return cls.f_dflt
def do_func1(cls, foo, bar1=float, baz=bool, *args, **kw):
"""does foo"""
d = dict(locals())
d.pop('cls')
assert cls.another_func('foo') == cls.f_dflt
return {'app.do_func1': d}
# for -h tests:
MyApp.__doc__ = """
Test App
Does stuff
"""
# test perf:
# s = 'bb_a'
# for i in range(1, 1000):
# s += '_a'
# setattr(MyApp, s, i)
# setattr(MyApp.inner, s, i)
self.App = MyApp
self.configure = partial(configure, MyApp)
clear(log)
def test_ok(self, get_argv=False):
argv = 'lnd=1,dnd=a:b:2,bnd=True,snd=sndfoo,ind=23,fnd=2.3'
argv += ',i.bid=0,i.sid=inner_cust_str'
argv += ',i.d.bdd=0,i.d.sdd=my_deep_str'
argv += ',ai=1.2'
argv = argv.split(',')
if get_argv:
return argv
res = self.configure(CLI(argv, set_runner_func=False))[0]().do_run(
'myp'
)
assert res == {
'cls': {
'attr_inst': 2.4,
'b_dflt_False': False,
'b_dflt_True': True,
'b_no_dflt': True,
'd_no_dflt': {'a': 'b:2'},
'f_dflt': 42.0,
'f_no_dflt': 2.3,
'i_dflt': 42,
'i_no_dflt': 23,
'inner': {
'b_i_dflt': False,
'deep': {
'f_d_float': 1.0,
'b_d_dflt': False,
's_d_dflt': 'my_deep_str',
},
's_i_dflt': 'inner_cust_str',
},
'l_no_dflt': ['1'],
's_dflt': 'foo',
's_no_dflt': 'sndfoo',
},
'func_param': 'myp',
}
def test_dict_wrong_fmt(self):
argv = 'lnd=1,b_no_dflt=True,d_no_dflt=2222223'.split(',')
with pytest.raises(Exception) as einfo:
res = self.configure(CLI(argv))
assert log[-1]['event'] == Exc.cannot_cast_to_dict
assert '2222223' in str(log[-1])
def test_dict_lit_eval(self):
sargv = (
'lnd=1,b_no_dflt=True,d_no_dflt={"a": "b"},f_no_dflt=1,ind=1,snd=a'
)
argv = sargv.split(',')
# with pytest.raises(Exception) as einfo:
res = self.configure(CLI(argv))
print(res[0]())
assert attr.asdict(res[0]()) == {
'attr_inst': 2.2,
'b_dflt_False': False,
'b_dflt_True': True,
'b_no_dflt': True,
'd_no_dflt': {'a': 'b'},
'f_dflt': 42.0,
'f_no_dflt': 1.0,
'i_dflt': 42,
'i_no_dflt': 1,
'inner': {
'b_i_dflt': True,
'deep': {
'b_d_dflt': True,
'f_d_float': 1.0,
's_d_dflt': 'deep_str',
},
's_i_dflt': 'inner_str',
},
'l_no_dflt': ['1'],
's_dflt': 'foo',
's_no_dflt': 'a',
}
def test_sh_help(self):
sargv = 'b_dflt_False=True,b_no_dflt=True,d_no_dflt={"a": "b"}'
sargv += ',f_no_dflt=1,ind=1,snd=a,-hh'
# with pytest.raises(Exception) as einfo:
# with self.assertRaises(SystemExit):
app, func = self.configure(CLI(sargv.split(',')))[:2]
res = func(app())
print(res)
assert '# MyApp\n' in res
assert '# deep_func1\n' in res
assert 'ooooong' in res
def ok_with_env(self, pref='', e=os.environ):
argv = self.test_ok(get_argv=True)
m = {}
m[pref + 'MyApp_b_dflt_True'] = '0'
m[pref + 'MyApp_inner_deep_f_d_float'] = '1.2'
m[pref + 'MyApp_inner_deep_s_d_dflt'] = 'not_taken_is_in_cli_args'
e.update(m)
exp = {
'cls': {
'attr_inst': 2.4,
'b_dflt_False': False,
'b_dflt_True': False,
'b_no_dflt': True,
'd_no_dflt': {'a': 'b:2'},
'f_dflt': 42.0,
'f_no_dflt': 2.3,
'i_dflt': 42,
'i_no_dflt': 23,
'inner': {
'b_i_dflt': False,
'deep': {
'b_d_dflt': False,
'f_d_float': 1.2,
's_d_dflt': 'my_deep_str',
},
's_i_dflt': 'inner_cust_str',
},
'l_no_dflt': ['1'],
's_dflt': 'foo',
's_no_dflt': 'sndfoo',
},
'func_param': 'myenvp',
}
env = 'MyApp' if not pref else pref + 'MyApp'
res = self.configure((CLI(argv, set_runner_func=False), Env(env)))[
0
]().do_run('myenvp')
assert res == exp
for k in m:
del os.environ[k]
def test_ok_with_env(self):
self.ok_with_env()
def test_ok_with_env_cust_prefix(self):
self.ok_with_env(pref='foo')
def test_unmatched_outer(self):
# fmt:off
class App:
foo_bar_baz = 1
class Inner: abd = 'd'
# fmt:on
with pytest.raises(Exception) as einfo:
configure(
App, CLI(['fbb=2', 'out_unmatched'], set_runner_func=False)
)
assert log[-1]['event'] == Exc.unmatched
assert 'out_unmatched' in str(log[-1])
def test_unmatched_inner(self):
# fmt:off
class App:
foo_bar_baz = 1
class Inner: abd = 'd'
# fmt:on
with pytest.raises(Exception) as einfo:
configure(App, CLI(['fbb=2', 'I.inner_unmatched=4']))
assert log[-1]['event'] == Exc.unmatched
assert 'inner_unmatched' in str(log[-1])
def test_insufficient(self):
with pytest.raises(Exception) as einfo:
self.configure(CLI(['bnd=0'], set_runner_func=False))
l = log[-1]
assert l['event'] == Exc.require_value
assert 'no_dflt' in str(l)
def test_short_collision(self):
class App:
foo_bar_baz = 1
foo_baz_baz = 2
with pytest.raises(Exception) as einfo:
# needs an argument, otherwise the cli is not even checked for
# short collisions:
configure(App, CLI(['x'], set_runner_func=False))
assert log[-1]['event'] == Exc.cannot_build_unique_short_form
def test_file_struct(self):
class App:
foo = 1
class Inner:
ifoo = 2
write_file({'foo': 2, 'Inner': {'ifoo': 3}})
app = configure(App, File(fn_test))[0]
app = app()
assert app.foo == 2
assert app.Inner.ifoo == 3
def test_file_preset_function_params(self):
"""
This is a bit of a crazy feature:
We mutate the defaults(!) of functions in the tree according to
what is given in the config file:
"""
argv = self.test_ok(get_argv=True)
# argv = [
# 'lnd=1',
# 'dnd=a:b:2',
# 'bnd=True',
# 'snd=sndfoo',
# 'ind=23',
# 'fnd=2.3',
# 'i.bid=0',
# 'i.sid=inner_cust_str',
# 'i.d.bdd=0',
# 'i.d.sdd=my_deep_str',
# 'ai=1.2',
# ]
write_file(
{
'foo': 2, # non existent, no error for File
'i_no_dflt': 100, # overwritten by cli
'i_dflt': 101, # not overwritten by cli
'func1': {'bar1': 123.2, 'baz': True},
'inner': {
'ifoo': 3,
'inner': {'foo': 42},
'deep': {
'b_d_dflt': False,
'deep': {'foo1': 43},
'deep_func1': {'bar': 1.2, 'baz': False},
},
},
}
)
app = self.configure([CLI(argv), File(fn_test)])[0]
app = app()
# if we do only app.inner.deep then deep (and app)
# would not have the ._root set, so parent() and root() calls would not
# work:
deep = inner(app, 'inner', 'deep')
res = deep.do_deep_func1(foo='myfoo')
# check the functions called to understand the result.
# Note: their defaults have been changed in the configure run!
assert res == {
'deep_func1': (
'myfoo',
1.2,
False,
{
'deep_deep': (
'myfoomyfoo',
43,
{'root config': (True, 23, 101)},
{
'inner.do_inner': {
'app.do_func1': {
'args': (),
'bar1': 123.2,
'baz': True,
'foo': (('myfoomyfoo', 43), 42),
'kw': {},
}
}
},
)
},
)
}
self.setup_method()
argv.append('-hhc')
app, func = self.configure([CLI(argv), File(fn_test)])[:2]
res = | |
c) are not uuid.
for col in [fld for fld in df.columns.difference(["uuid"]) if fld.endswith("id") and dtypes[fld] == "str"]:
# Subset dataframe to required column with non-default and non-"None" values.
series = df.loc[~df[col].isin([defaults[col], "None"]), col]
if len(series):
# Validation 1: ensure ids are 32 digits.
# Compile uuids of flagged records.
flag_uuids = series.loc[series.map(len) != 32].index.values
for uid in flag_uuids:
errors[1].append(f"uuid: '{uid}', based on attribute field: {col}.")
# Validation 2: ensure ids are hexadecimal.
# Compile uuids of flagged records.
hexdigits = set(string.hexdigits)
flag_uuids = series.loc[series.map(lambda uid: not set(uid).issubset(hexdigits))].index.values
for uid in flag_uuids:
errors[2].append(f"uuid: '{uid}', based on attribute field: {col}.")
# Iterate unique id fields.
unique_fields = {"ferrysegid", "roadsegid"}
for col in unique_fields.intersection(set(df.columns)):
# Filter dataframe to required column.
series = df[col]
# Validation 3: ensure unique id fields are unique within their column.
# Compile uuids of flagged records.
flag_uuids = series.loc[series.duplicated(keep=False)].index.values
for uid in flag_uuids:
errors[3].append(f"uuid: '{uid}', based on attribute field: {col}.")
# Validation 4: ensure unique id fields are not "None" nor the default field value.
# Compile uuids of flagged records.
flag_uuids = series.loc[series.isin([defaults[col], "None"])].index.values
for uid in flag_uuids:
errors[4].append(f"uuid: '{uid}', based on attribute field: {col}.")
return errors
def isolated_lines(self, name: str, junction: str = "junction") -> Dict[int, list]:
"""
Identifies the uuids of isolated line segments.
:param str name: NRN dataset name.
:param str junction: NRN dataset name for NRN junction.
:return Dict[int, list]: dictionary of validation codes and associated lists of error messages.
"""
errors = defaultdict(list)
# Filter dataframes to only required fields.
df = self.dframes[name][["uuid", "geometry"]]
junction = self.dframes[junction][["junctype", "geometry"]]
# Validation 1: ensure line segments are connected to at least one other line segment.
# Compile junctions for 'Dead End'.
pts = set(chain([geom.coords[0] for geom in
junction.loc[junction["junctype"] == "Dead End", "geometry"].values]))
# Identify isolated segments.
# Flag records where both endpoints are 'Dead End'.
mask = df["geometry"].map(lambda g: all(map(lambda pt: pt in pts, itemgetter(0, -1)(g.coords))))
# Compile uuids of flagged records, compile error properties.
if sum(mask):
errors[1] = list(map(lambda val: f"uuid: '{val}'", df.loc[mask].index.values))
# Validation 2: identify line segments which connect to another line segment at intermediate / non-endpoint
# vertices.
# Compile all coordinates and their count from across the entire dataset.
df_nodes_all = df["geometry"].map(attrgetter("coords")).map(tuple)
nodes_count = Counter(chain.from_iterable(df_nodes_all.map(set)))
# Filter analysis records to those with > 2 constituent points.
df_nodes = df_nodes_all.loc[df_nodes_all.map(len) > 2]
# Configure duplicated non-endpoints for analysis records relative to the full dataframe.
def non_endpoint_dups(nodes: Tuple[tuple, ...]) -> Union[None, Tuple[Tuple[tuple, ...], Tuple[int, ...]]]:
"""
Returns intermediate / non-endpoint nodes and their dataframe counts if they are duplicated.
:param Tuple[tuple, ...] nodes: tuple of coordinate tuples.
:return Union[None, Tuple[Tuple[tuple, ...], Tuple[int, ...]]]: None or a nested tuple containing a tuple of
all non-endpoint coordinate tuples and a tuple of the frequency of each node within the entire dataset.
"""
counts = itemgetter(*nodes[1:-1])(nodes_count)
if not isinstance(counts, tuple):
counts = (counts,)
counts_valid = tuple(map(lambda count: count > 1, counts))
if any(counts_valid):
return tuple(compress(nodes[1:-1], counts_valid)), tuple(compress(counts, counts_valid))
else:
return None
dups = df_nodes.map(non_endpoint_dups)
dups = dups.loc[~dups.isna()]
# Nest nodes with counts and explode records.
dups = dups.map(lambda vals: tuple(zip(*vals))).explode()
# Compile uuids of flagged records, compile error properties.
for index, data in dups.iteritems():
errors[2].append(f"uuid: '{index}' intersects {data[1] - 1} other line segment(s) at non-endpoint vertex: "
f"{data[0]}.")
return errors
def line_internal_clustering(self, name: str) -> Dict[int, list]:
"""
Validates the distance between adjacent coordinates of line segments.
Validation: line segments must have >= 1x10^(-2) (0.01) meters distance between adjacent coordinates.
:param str name: NRN dataset name.
:return Dict[int, list]: dictionary of validation codes and associated lists of error messages.
"""
errors = defaultdict(list)
min_distance = 0.01
series = self.dframes_m[name]["geometry"]
# Extract coordinates from geometries.
series_coords = series.map(attrgetter("coords")).map(tuple)
# Filter out records with only 2 constituent points.
series_coords = series_coords.loc[series_coords.map(len) > 2]
if len(series_coords):
# Create ordered coordinate pairs, sorted.
coord_pairs = series_coords.map(ordered_pairs).explode()
# Remove invalid pairs (duplicated adjacent coordinates).
coord_pairs = coord_pairs.loc[coord_pairs.map(lambda pair: pair[0] != pair[1])]
# Calculate distance between coordinate pairs.
coord_dist = coord_pairs.map(lambda pair: euclidean(pair[0], pair[-1]))
# Flag invalid distances and create dataframe with invalid pairs and distances.
flag = coord_dist < min_distance
invalid_df = pd.DataFrame({"pair": coord_pairs.loc[flag], "distance": coord_dist.loc[flag]},
index=coord_dist.loc[flag].index)
if len(invalid_df):
# Compile error properties.
for record in invalid_df.sort_values(by=["uuid", "distance"], ascending=True).itertuples(index=True):
index, coords, distance = attrgetter("Index", "pair", "distance")(record)
# Reproject coordinates back to NRN CRS.
coords = [shapely.ops.transform(self.prj_3348_to_4617, Point(pt)).coords[0] for pt in coords]
errors[1].append(f"uuid: '{index}', coordinates: {coords[0]} and {coords[1]}, are too close: "
f"{distance} meters.")
return errors
def line_length(self, name: str) -> Dict[int, list]:
"""
Validates the minimum feature length of line geometries.
:param str name: NRN dataset name.
:return Dict[int, list]: dictionary of validation codes and associated lists of error messages.
"""
errors = defaultdict(list)
min_length = 5
series = self.dframes_m[name]["geometry"]
# Validation: ensure line segments are >= 5 meters in length.
flag = series.length < min_length
# Compile error properties.
if sum(flag):
for index, val in series.loc[flag].length.round(2).sort_values(ascending=True).iteritems():
errors[1].append(f"uuid: '{index}' is too short: {val} meters.")
return errors
def line_merging_angle(self, name: str) -> Dict[int, list]:
"""
Validates the merging angle of line segments.
Validation: ensure line segments merge at angles >= 5 degrees.
:param str name: NRN dataset name.
:return Dict[int, list]: dictionary of validation codes and associated lists of error messages.
"""
# Define function to calculate the angular degrees between two intersecting lines.
def get_angle(ref_pt: tuple, pt1: tuple, pt2: tuple) -> bool:
"""
Validates the angle formed by the 2 points and reference point.
:param tuple ref_pt: coordinate tuple of the reference point.
:param tuple pt1: coordinate tuple
:param tuple pt2: coordinate tuple
:return bool: boolean validation of the angle formed by the 2 points and 1 reference point.
"""
angle_1 = np.angle(complex(*(np.array(pt1) - np.array(ref_pt))), deg=True)
angle_2 = np.angle(complex(*(np.array(pt2) - np.array(ref_pt))), deg=True)
return round(abs(angle_1 - angle_2), 2)
errors = defaultdict(list)
merging_angle = 5
series = self.dframes_m[name]["geometry"]
# Compile line endpoints and their neighbours, convert to uuid-neighbour lookup dict.
endpts_nbrs = series.map(
lambda g: tuple(map(itemgetter(0, 1), itemgetter(0, 1, -2, -1)(attrgetter("coords")(g)))))
uuid_nbr_lookup = endpts_nbrs.to_dict()
# Compile only endpoints.
endpts = endpts_nbrs.map(itemgetter(0, -1))
# Explode point groups, filter to only duplicates, and construct a dataframe of the uuids and coordinates.
pts_exploded = endpts.explode()
pts_dups = pts_exploded.loc[pts_exploded.duplicated(keep=False)]
pts_df = pd.DataFrame({"coords": pts_dups, "uuid": pts_dups.index})
# Proceed only if duplicated points exist.
if len(pts_df):
# Group uuids according to coordinates. Explode and convert to DataFrame, keeping index as column.
grouped_pt_uuid = helpers.groupby_to_list(pts_df, "coords", "uuid")
uuid_pt_df = grouped_pt_uuid.explode().reset_index(drop=False).rename(columns={"index": "pt", 0: "uuid"})
# Compile endpoint-neighbouring points.
# Process: Flag uuids according to duplication status within their group. For unique uuids, configure the
# neighbouring point based on whichever endpoint matches the common group point. For duplicated uuids
# (which represent self-loops), the first duplicate takes the second point, the second duplicate takes the
# second-last point - thereby avoiding the same neighbour being taken twice for self-loop intersections.
dup_flags = {
"dup_none": uuid_pt_df.loc[~uuid_pt_df.duplicated(keep=False), ["uuid", "pt"]],
"dup_first": uuid_pt_df.loc[uuid_pt_df.duplicated(keep="first"), "uuid"],
"dup_last": uuid_pt_df.loc[uuid_pt_df.duplicated(keep="last"), "uuid"]
}
dup_results = {
"dup_none": np.vectorize(
lambda uid, pt:
itemgetter({True: 1, False: -2}[uuid_nbr_lookup[uid][0] == pt])(uuid_nbr_lookup[uid]),
otypes=[tuple])(dup_flags["dup_none"]["uuid"], dup_flags["dup_none"]["pt"]),
"dup_first": dup_flags["dup_first"].map(lambda uid: uuid_nbr_lookup[uid][1]).values,
"dup_last": dup_flags["dup_last"].map(lambda uid: uuid_nbr_lookup[uid][-2]).values
}
uuid_pt_df["pt_nbr"] = None
uuid_pt_df.loc[dup_flags["dup_none"].index, "pt_nbr"] = dup_results["dup_none"]
uuid_pt_df.loc[dup_flags["dup_first"].index, "pt_nbr"] = dup_results["dup_first"]
uuid_pt_df.loc[dup_flags["dup_last"].index, "pt_nbr"] = dup_results["dup_last"]
# Aggregate groups of points and associated neighbours.
grouped_pt_nbrs = helpers.groupby_to_list(uuid_pt_df, "pt", "pt_nbr")
# Configure all point-neighbour and point-uuid combinations.
combos_pt_nbrs = grouped_pt_nbrs.map(lambda vals: combinations(vals, r=2)).map(tuple).explode()
combos_pt_uuid = grouped_pt_uuid.map(lambda vals: combinations(vals, r=2)).map(tuple).explode()
# Prepend reference point to neighbour point tuples, add uuid combinations as index.
combos = pd.Series(
np.vectorize(lambda pt, nbrs: (pt, *nbrs), otypes=[tuple])(combos_pt_nbrs.index, combos_pt_nbrs),
index=combos_pt_uuid.values)
# Calculate the merging angle (in degrees) between each set of points. Filter to invalid records.
angles = combos.map(lambda pts: get_angle(*pts))
results = angles.loc[angles < merging_angle]
# Compile error properties.
if len(results):
for uuids, angle in results.drop_duplicates().sort_values(ascending=True).iteritems():
line1, line2 = itemgetter(0, 1)(uuids)
errors[1].append(f"uuids: '{line1}', '{line2}' merge at too small an angle: {angle} degrees.")
return errors
def line_proximity(self, name: | |
the charm.
kubelet_extra_config = hookenv.config('kubelet-extra-config')
kubelet_extra_config = yaml.safe_load(kubelet_extra_config)
merge_kubelet_extra_config(kubelet_config, kubelet_extra_config)
# Render the file and configure Kubelet to use it
os.makedirs('/root/cdk/kubelet', exist_ok=True)
with open('/root/cdk/kubelet/config.yaml', 'w') as f:
f.write('# Generated by kubernetes-worker charm, do not edit\n')
yaml.dump(kubelet_config, f)
kubelet_opts['config'] = '/root/cdk/kubelet/config.yaml'
else:
# NOTE: This is for 1.9. Once we've dropped 1.9 support, we can remove
# this whole block and the parent if statement.
kubelet_opts['address'] = '0.0.0.0'
kubelet_opts['anonymous-auth'] = 'false'
kubelet_opts['client-ca-file'] = str(ca_crt_path)
kubelet_opts['cluster-domain'] = dns['domain']
kubelet_opts['fail-swap-on'] = 'false'
kubelet_opts['port'] = '10250'
kubelet_opts['tls-cert-file'] = str(server_crt_path)
kubelet_opts['tls-private-key-file'] = str(server_key_path)
if dns['enable-kube-dns']:
kubelet_opts['cluster-dns'] = dns['sdn-ip']
if is_state('kubernetes-worker.gpu.enabled'):
kubelet_opts['feature-gates'] = 'DevicePlugins=true'
# Workaround for DNS on bionic, for k8s 1.9
# https://github.com/juju-solutions/bundle-canonical-kubernetes/issues/655
resolv_path = os.path.realpath('/etc/resolv.conf')
if resolv_path == '/run/systemd/resolve/stub-resolv.conf':
kubelet_opts['resolv-conf'] = '/run/systemd/resolve/resolv.conf'
if get_version('kubelet') >= (1, 11):
kubelet_opts['dynamic-config-dir'] = '/root/cdk/kubelet/dynamic-config'
# If present, ensure kubelet gets the pause container from the configured
# registry. When not present, kubelet uses a default image location
# (currently k8s.gcr.io/pause:3.4.1).
registry_location = get_registry_location()
if registry_location:
kubelet_opts['pod-infra-container-image'] = \
'{}/pause:3.4.1'.format(registry_location)
configure_kubernetes_service(configure_prefix, 'kubelet', kubelet_opts,
'kubelet-extra-args')
@when('config.changed.ingress')
def toggle_ingress_state():
''' Ingress is a toggled state. Remove ingress.available if set when
toggled '''
if hookenv.config('ingress'):
set_state('kubernetes-worker.ingress.enabled')
else:
remove_state('kubernetes-worker.ingress.enabled')
@when_any('config.changed.default-backend-image',
'config.changed.ingress-ssl-chain-completion',
'config.changed.nginx-image',
'config.changed.ingress-ssl-passthrough',
'config.changed.ingress-default-ssl-certificate',
'config.changed.ingress-default-ssl-key')
def reconfigure_ingress():
remove_state('kubernetes-worker.ingress.available')
@when('kubernetes-worker.config.created', 'kubernetes-worker.ingress.enabled')
@when_not('kubernetes-worker.ingress.available')
def render_and_launch_ingress():
''' Launch the Kubernetes ingress controller & default backend (404) '''
config = hookenv.config()
# need to test this in case we get in
# here from a config change to the image
if not config.get('ingress'):
return
context = {}
context['arch'] = arch()
addon_path = '/root/cdk/addons/{}'
context['juju_application'] = hookenv.service_name()
# If present, workers will get the ingress containers from the configured
# registry. Otherwise, we'll set an appropriate upstream image registry.
registry_location = get_registry_location()
context['defaultbackend_image'] = config.get('default-backend-image')
if (context['defaultbackend_image'] == "" or
context['defaultbackend_image'] == "auto"):
if registry_location:
backend_registry = registry_location
else:
backend_registry = 'k8s.gcr.io'
if context['arch'] == 's390x':
context['defaultbackend_image'] = \
"{}/defaultbackend-s390x:1.4".format(backend_registry)
elif context['arch'] == 'ppc64el':
context['defaultbackend_image'] = \
"{}/defaultbackend-ppc64le:1.5".format(backend_registry)
else:
context['defaultbackend_image'] = \
"{}/defaultbackend-{}:1.5".format(backend_registry, context['arch'])
# Render the ingress daemon set controller manifest
context['ssl_chain_completion'] = config.get(
'ingress-ssl-chain-completion')
context['enable_ssl_passthrough'] = config.get(
'ingress-ssl-passthrough')
context['default_ssl_certificate_option'] = None
if config.get('ingress-default-ssl-certificate') and config.get(
'ingress-default-ssl-key'):
context['default_ssl_certificate'] = b64encode(
config.get('ingress-default-ssl-certificate').encode(
'utf-8')).decode('utf-8')
context['default_ssl_key'] = b64encode(
config.get('ingress-default-ssl-key').encode('utf-8')).decode(
'utf-8')
default_certificate_option = (
'- --default-ssl-certificate='
'$(POD_NAMESPACE)/default-ssl-certificate')
context['default_ssl_certificate_option'] = default_certificate_option
context['ingress_image'] = config.get('nginx-image')
if context['ingress_image'] == "" or context['ingress_image'] == "auto":
if context['arch'] == 'ppc64el':
# multi-arch image doesn't include ppc64le, have to use an older version
context['ingress_uid'] = '33'
context['ingress_image'] = '/'.join([
registry_location or 'quay.io',
'kubernetes-ingress-controller/nginx-ingress-controller-ppc64le:0.20.0',
])
else:
context['ingress_uid'] = '101'
context['ingress_image'] = '/'.join([
registry_location or 'us.gcr.io',
'k8s-artifacts-prod/ingress-nginx/controller:v0.45.0',
])
kubelet_version = get_version('kubelet')
if kubelet_version < (1, 9):
context['daemonset_api_version'] = 'extensions/v1beta1'
context['deployment_api_version'] = 'extensions/v1beta1'
elif kubelet_version < (1, 16):
context['daemonset_api_version'] = 'apps/v1beta2'
context['deployment_api_version'] = 'extensions/v1beta1'
else:
context['daemonset_api_version'] = 'apps/v1'
context['deployment_api_version'] = 'apps/v1'
context['use_forwarded_headers'] = "true" if config.get(
"ingress-use-forwarded-headers") else "false"
manifest = addon_path.format('ingress-daemon-set.yaml')
render('ingress-daemon-set.yaml', manifest, context)
hookenv.log('Creating the ingress daemon set.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create ingress controller. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
# Render the default http backend (404) deployment manifest
# needs to happen after ingress-daemon-set since that sets up the namespace
manifest = addon_path.format('default-http-backend.yaml')
render('default-http-backend.yaml', manifest, context)
hookenv.log('Creating the default http backend.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create default-http-backend. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
set_state('kubernetes-worker.ingress.available')
hookenv.open_port(80)
hookenv.open_port(443)
@when('kubernetes-worker.config.created',
'kubernetes-worker.ingress.available')
@when_not('kubernetes-worker.ingress.enabled')
def disable_ingress():
hookenv.log('Deleting the http backend and ingress.')
hookenv.close_port(80)
hookenv.close_port(443)
try:
kubectl('delete', '--ignore-not-found', '-f',
'/root/cdk/addons/default-http-backend.yaml')
kubectl('delete', '--ignore-not-found', '-f',
'/root/cdk/addons/ingress-daemon-set.yaml')
except CalledProcessError:
traceback.print_exc()
hookenv.log('Failed to disable ingress, waiting to retry')
return
remove_state('kubernetes-worker.ingress.available')
def restart_unit_services():
'''Restart worker services.'''
hookenv.log('Restarting kubelet and kube-proxy.')
services = ['kube-proxy', 'kubelet']
for service in services:
service_restart('snap.%s.daemon' % service)
def get_kube_api_servers():
'''Return the list of kubernetes API endpoint URLs.'''
kube_control = endpoint_from_name("kube-control")
kube_api = endpoint_from_name("kube-api-endpoint")
# prefer kube-api-endpoints
if kube_api.services():
return [
'https://{0}:{1}'.format(unit['hostname'], unit['port'])
for service in kube_api.services()
for unit in service['hosts']
]
if hasattr(kube_control, "get_api_endpoints"):
return kube_control.get_api_endpoints()
hookenv.log("Unable to determine API server URLs from either kube-control "
"or kube-api-endpoint relation", hookenv.ERROR)
return []
@when('kubernetes-worker.config.created')
@when('nrpe-external-master.available')
@when('kube-control.auth.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups',
'nrpe-external-master.reconfigure')
@when_any('kube-control.api_endpoints.available',
'kube-api-endpoint.available')
def update_nrpe_config():
services = ['snap.{}.daemon'.format(s) for s in worker_services]
data = render('nagios_plugin.py', context={'node_name': get_node_name()})
plugin_path = install_nagios_plugin_from_text(data,
'check_k8s_worker.py')
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe_setup.add_check("node",
"Node registered with API Server",
str(plugin_path))
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
creds = db.get('credentials')
servers = get_kube_api_servers()
if creds and servers:
server = servers[get_unit_number() % len(servers)]
create_kubeconfig(nrpe_kubeconfig_path, server, ca_crt_path,
token=creds['client_token'], user='nagios')
# Make sure Nagios dirs are the correct permissions.
cmd = ['chown', '-R', 'nagios:nagios']
for p in ['/var/lib/nagios/', os.path.dirname(nrpe_kubeconfig_path)]:
if os.path.exists(p):
check_call(cmd + [p])
remove_state('nrpe-external-master.reconfigure')
set_state('nrpe-external-master.initial-config')
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config():
remove_state('nrpe-external-master.initial-config')
remove_nagios_plugin('check_k8s_worker.py')
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in worker_services:
nrpe_setup.remove_check(shortname=service)
nrpe_setup.remove_check(shortname='node')
@when('nvidia.ready')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.gpu.enabled')
def enable_gpu():
"""Enable GPU usage on this node.
"""
if get_version('kubelet') < (1, 9):
hookenv.status_set(
'active',
'Upgrade to snap channel >= 1.9/stable to enable GPU support.'
)
return
hookenv.log('Enabling gpu mode')
try:
# Not sure why this is necessary, but if you don't run this, k8s will
# think that the node has 0 gpus (as shown by the output of
# `kubectl get nodes -o yaml`
check_call(['nvidia-smi'])
except CalledProcessError as cpe:
hookenv.log('Unable to communicate with the NVIDIA driver.')
hookenv.log(cpe)
return
set_label('gpu', 'true')
set_label('cuda', 'true')
set_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when_not('nvidia.ready')
@when_not('kubernetes-worker.restart-needed')
def nvidia_departed():
"""Cuda departed."""
disable_gpu()
remove_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
def disable_gpu():
"""Disable GPU usage on this node.
"""
hookenv.log('Disabling gpu mode')
# Remove node labels
remove_label('gpu')
remove_label('cuda')
@when('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_enabled(kube_control):
"""Notify kubernetes-master that we're gpu-enabled.
"""
kube_control.set_gpu(True)
@when_not('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_not_enabled(kube_control):
"""Notify kubernetes-master that we're not gpu-enabled.
"""
kube_control.set_gpu(False)
@when('kube-control.connected')
def request_kubelet_and_proxy_credentials(kube_control):
""" Request kubelet node authorization with a well formed kubelet user.
This also implies that we are requesting kube-proxy auth. """
# The kube-cotrol interface is created to support RBAC.
# At this point we might as well do the right thing and return the hostname
# even if it will only be used when we enable RBAC
nodeuser = 'system:node:{}'.format(get_node_name().lower())
kube_control.set_auth_request(nodeuser)
@when('kube-control.connected')
def catch_change_in_creds(kube_control):
"""Request a service restart in case credential updates were detected."""
nodeuser = 'system:node:{}'.format(get_node_name().lower())
creds = kube_control.get_auth_credentials(nodeuser)
if creds and creds['user'] == nodeuser:
# We need to cache the credentials here because if the
# master changes (master leader dies and replaced by a new one)
# the new master will have no recollection of our certs.
db.set('credentials', creds)
set_state('worker.auth.bootstrapped')
if data_changed('kube-control.creds', creds):
set_state('kubernetes-worker.restart-needed')
def missing_kube_control():
"""Inform the operator they need to add the kube-control relation.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
Called from charm_status.
"""
try:
goal_state = hookenv.goal_state()
except NotImplementedError:
goal_state = {}
if 'kube-control' in goal_state.get('relations', {}):
if not is_flag_set("kube-control.connected"):
hookenv.status_set(
'waiting',
'Waiting for kubernetes-master to become ready')
return True
else:
hookenv.status_set(
'blocked',
'Relate {}:kube-control kubernetes-master:kube-control'.format(
hookenv.service_name()))
return True
return False
def _systemctl_is_active(application):
''' Poll systemctl to determine if the application is running '''
cmd = ['systemctl', 'is-active', application]
try:
raw = check_output(cmd)
return b'active' in raw
except Exception:
return False
class ApplyNodeLabelFailed(Exception):
pass
def persistent_call(cmd, retry_message):
deadline = time.time() + 180
while time.time() < deadline:
code = subprocess.call(cmd)
if code == 0:
return True
hookenv.log(retry_message)
time.sleep(1)
else:
return False
def set_label(label, value):
nodename = get_node_name()
cmd = 'kubectl --kubeconfig={0} label node {1} {2}={3} --overwrite'
cmd = cmd.format(kubeconfig_path, nodename, label, value)
cmd = cmd.split()
retry = 'Failed to apply label %s=%s. Will retry.' % (label, value)
if not persistent_call(cmd, retry):
raise ApplyNodeLabelFailed(retry)
def remove_label(label):
nodename = get_node_name()
cmd = 'kubectl --kubeconfig={0} label node {1} {2}-'
cmd = cmd.format(kubeconfig_path, nodename, label)
cmd = cmd.split()
retry = 'Failed to remove label {0}. Will retry.'.format(label)
if not persistent_call(cmd, retry):
raise ApplyNodeLabelFailed(retry)
@when_any('endpoint.aws.joined',
'endpoint.gcp.joined',
'endpoint.openstack.joined',
'endpoint.vsphere.joined',
'endpoint.azure.joined')
@when_not('kubernetes-worker.cloud.ready')
def set_cloud_pending():
k8s_version = get_version('kubelet')
k8s_1_11 = k8s_version >= (1, 11)
k8s_1_12 = k8s_version >= (1, 12)
vsphere_joined = is_state('endpoint.vsphere.joined')
azure_joined = is_state('endpoint.azure.joined')
if (vsphere_joined and not k8s_1_12) or (azure_joined and not k8s_1_11):
set_state('kubernetes-worker.cloud.blocked')
else:
remove_state('kubernetes-worker.cloud.blocked')
set_state('kubernetes-worker.cloud.pending')
@when_any('endpoint.aws.joined',
'endpoint.gcp.joined',
'endpoint.azure.joined')
@when('kube-control.cluster_tag.available')
@when_not('kubernetes-worker.cloud.request-sent')
def request_integration():
hookenv.status_set('maintenance', 'requesting cloud integration')
kube_control = endpoint_from_flag('kube-control.cluster_tag.available')
cluster_tag = kube_control.get_cluster_tag()
if is_state('endpoint.aws.joined'):
cloud = | |
#@title
import numpy as np
import re
import string
import jax
import cmath
from typing import List
import tensorflow as tf
import tensornetwork as tn
from colorama import Fore
from colorama import Style
from itertools import product
class QCircuit:
"""Implementation of a QCircuit."""
def __init__(self, num_qubits, backend='jax'):
self.num_qubits = num_qubits
self.backend = backend
# Final list that would contain all the nodes needed for network
self.network = [self.get_initial_state()]
# List that would contain info about what gate is being applied for a particular qubit
self.gate_patch = []
# List that would contain a pair of list of control qubits and target qubit
self.control_gates_patch = []
# List that contains the angle for rotation gates
self.arguments = [None] * self.num_qubits
self.graphics_terminal = []
for index in range(self.num_qubits):
self.gate_patch.append('I')
self.graphics_terminal.append(" | ")
self.graphics_terminal.append("q%2s |──" % str(index))
self.graphics_terminal.append(" | ")
# Define one qubits gate dictionary
gates = {
"I" : np.eye(2, dtype=np.complex128),
"X" : np.array([[0.0, 1.0],
[1.0, 0.0]], dtype=np.complex128),
"Y" : np.array([[0.0, 0.0-1.j],
[0.+1.j, 0.0]], dtype=np.complex128),
"Z" : np.array([[1.0, 0.0],
[0.0, -1.0]], dtype=np.complex128),
"H" : np.array([[1, 1],
[1, -1]], dtype=np.complex128) / np.sqrt(2),
"T" : np.array([[1.0, 0.0],
[0.0, np.exp(1.j * np.pi / 4)]], dtype=np.complex128),
"R" : np.array([[1.0, 0.0],
[0.0, 1.0]], dtype=np.complex128),
"RX" : np.array([[1.0, 1.0],
[1.0, 1.0]], dtype=np.complex128),
"RY" : np.array([[1.0, 1.0],
[1.0, 1.0]], dtype=np.complex128),
"RZ" : np.array([[1.0, 0.0],
[0.0, 1.0]], dtype=np.complex128)
}
########################################## GRAPHICS ############################################
# Graphic functions to make it user friendly at the end
colors = {
0 : "\u001b[0m",
1 : "\u001b[31m",
2 : "\u001b[32m",
3 : "\u001b[33m",
4 : "\u001b[34m",
5 : "\u001b[35m",
6 : "\u001b[36m",
7 : "\u001b[37m",
8 : "\u001b[31m",
9 : "\u001b[32m",
10 : "\u001b[33m",
11 : "\u001b[34m",
}
def apply_graphics_to_patch(self):
"""
Visualize the circuit with all gates applying on it.
"""
color_iterator = 1
for control_gate in self.control_gates_patch:
full_list = control_gate[0] + [control_gate[1]]
for qubit in full_list:
if any(qubit - it_qubit == 1 for it_qubit in full_list):
self.graphics_terminal[qubit * 3] += "%s╔═╩═╗%s " % (self.colors[color_iterator], self.colors[0])
else:
self.graphics_terminal[qubit * 3] += "%s╔══╗%s " % (self.colors[color_iterator], self.colors[0])
if (qubit == control_gate[1]):
self.graphics_terminal[qubit * 3 + 1] += "%s║%s║%s───" % (self.colors[color_iterator], self.gate_patch[qubit][1], self.colors[0])
else:
self.graphics_terminal[qubit * 3 + 1] += "%s║%s║%s───" % (self.colors[color_iterator], self.gate_patch[qubit][1].lower(), self.colors[0])
if any(qubit - it_qubit == -1 for it_qubit in full_list):
self.graphics_terminal[qubit * 3 + 2] += "%s╚═╦═╝%s " % (self.colors[color_iterator], self.colors[0])
else:
self.graphics_terminal[qubit * 3 + 2] += "%s╚══╝%s " % (self.colors[color_iterator], self.colors[0])
color_iterator = color_iterator + 1
for qubit in range(self.num_qubits):
if (self.gate_patch[qubit] == 'I'):
self.graphics_terminal[qubit * 3] += " "
self.graphics_terminal[qubit * 3 + 1] += "──────"
self.graphics_terminal[qubit * 3 + 2] += " "
elif ("Target" not in self.gate_patch[qubit] and "Control" not in self.gate_patch[qubit]):
if len(self.gate_patch[qubit]) > 1:
self.graphics_terminal[qubit * 3] += "╔═══╗ "
self.graphics_terminal[qubit * 3 + 1] += "║%s ║───" % self.gate_patch[qubit]
self.graphics_terminal[qubit * 3 + 2] += "╚═══╝ "
else:
self.graphics_terminal[qubit * 3] += "╔═══╗ "
self.graphics_terminal[qubit * 3 + 1] += "║ %s ║───" % self.gate_patch[qubit]
self.graphics_terminal[qubit * 3 + 2] += "╚═══╝ "
########################################## GRAPHICS ############################################
# Methods of the QCircuit class
def get_initial_state(self):
""""
Generate and returns the node of the initial state of the quantum circuit.
"""
if self.num_qubits <= 0 or not isinstance(self.num_qubits, int):
raise ValueError("Amount of qubits should be not-negative integer.")
# create initial state vector
initial_state = np.zeros(2 ** self.num_qubits, dtype=np.complex128)
initial_state[0] = 1.0 + 0.j
initial_state = np.transpose(initial_state)
# wrap the tensor of the initial state in to a node
initial_state_node = tn.Node(initial_state, backend=self.backend)
return initial_state_node
# Generate two quibits gates
def generate_control_gate(self, control, target:List, gate:str):
"""
Generate and return the tensor of the control gate for any system
of different number of qubits with the consideration of the given
control and target qubits.
Args:
control: The index of the control qubit
target: The index of the target qubit
gate: a type of gate to be generated (X, Y, etc.)
Returns:
A tensor of the contol gate
"""
control_gate = np.eye(2**self.num_qubits, dtype=np.complex128)
tuples = []
# Searches for all the numbers up to 2**self.num_qubits such that in
# binary representation they have '1' in control-place and '0' in
# target place.
for i in range(2**self.num_qubits):
if not (i & (1 << target)) and all(i & (1 << control_qubit) for control_qubit in control):
swap = i + 2**target
# Embeds the transformation into the matrix.
control_gate[i][i] = self.gates[gate][0][0]
control_gate[i][swap] = self.gates[gate][0][1]
control_gate[swap][i] = self.gates[gate][1][0]
control_gate[swap][swap] = self.gates[gate][1][1]
# If control gate applies Hadamard gate, puts the whole system into
# superposition.
if gate == 'H':
control_gate = control_gate * (1. + 1.j) / np.sqrt(2)
return control_gate
def apply_arguments(self, gate):
"""
Applies R, RX, RY, RZ gates on quantum state.
Args:
gate: number of a qubit to apply gate on
Returns:
None.
"""
if self.gate_patch[gate] == 'R':
self.gates['R'][1][1] = self.arguments[gate]
if self.gate_patch[gate] == 'RX':
self.gates['RX'][0][0] = np.cos(self.arguments[gate])
self.gates['RX'][1][1] = np.cos(self.arguments[gate])
self.gates['RX'][1][0] = -1.j*np.sin(self.arguments[gate])
self.gates['RX'][0][1] = -1.j*np.sin(self.arguments[gate])
if self.gate_patch[gate] == 'RY':
self.gates['RY'][0][0] = np.cos(self.arguments[gate])
self.gates['RY'][1][1] = np.cos(self.arguments[gate])
self.gates['RY'][1][0] = np.sin(self.arguments[gate])
self.gates['RY'][0][1] = -np.sin(self.arguments[gate])
if self.gate_patch[gate] == 'RZ':
self.gates['RZ'][0][0] = np.exp(-1.j*self.arguments[gate])
self.gates['RZ'][1][1] = np.exp(1.j*self.arguments[gate])
def evaluate_patch(self):
"""
Evaluate the gates applying on the curcuit at the given moment of time.
The tensor of the correcponding gates stored in a tensornetwork node.
"""
if all(self.gate_patch[i] == 'I' for i in range(self.num_qubits)):
return
# Call graphic function
self.apply_graphics_to_patch()
# Create matrix for all control gates in the current patch
for control_gate_info in self.control_gates_patch:
target_qubit = control_gate_info[1]
if self.gate_patch[target_qubit][1] == 'R':
self.gates['R'][1][1] = self.arguments[target_qubit]
control_gate = self.generate_control_gate(control_gate_info[0], target_qubit, self.gate_patch[target_qubit][1])
control_gate = control_gate.transpose()
self.network.append(tn.Node(control_gate, backend=self.backend))
self.gate_patch[target_qubit] = 'I'
for qubit in control_gate_info[0]:
self.gate_patch[qubit] = 'I'
self.control_gates_patch = []
self.apply_arguments(self.num_qubits - 1)
result_matrix = self.gates[self.gate_patch[self.num_qubits - 1]]
# expand space using tensor product
shape = 4
for gate in reversed(range(self.num_qubits - 1)):
self.apply_arguments(gate)
result_matrix = np.tensordot(result_matrix, self.gates[self.gate_patch[gate]], axes=0)
result_matrix = result_matrix.transpose((0, 2, 1, 3)).reshape((shape, shape))
shape = len(result_matrix) * 2
result_matrix = result_matrix.transpose()
# store the moment in the node and append to the curcuit
self.network.append(tn.Node(result_matrix, backend=self.backend))
for index in range(self.num_qubits):
self.gate_patch[index] = 'I'
self.arguments[index] = None
def get_state_vector(self):
"""
Returns resulting state vector as a tensor of rank 1.
Round values to 3 decimal points.
"""
# connect all nodes and evaluate all tensors stored in it
self.evaluate_patch()
if len(self.network) > 1:
for index in reversed(range(1, len(self.network) - 1)):
self.network[index + 1][0] ^ self.network[index][1]
self.network[1][0] ^ self.network[0][0]
nodes = tn.reachable(self.network[1])
result = tn.contractors.greedy(nodes, ignore_edge_order=True)
# round the result to three decimals
state_vecor = np.round(result.tensor, 3)
return state_vecor
# Get amplitude
def get_amplitude(self):
"""
Print amplitudes of the final state vector of the circuit.
Amplitudes defined as the length of the state vector on Bloch sphere.
Round values to 3 decimal points.
"""
state_vector = self.get_state_vector()
# amplitude = sqrt( (real_part)^2 + (complex_part)^2)
for index in range(2 ** self.num_qubits):
amplitude = np.absolute(state_vector[index])
# decimal to binary
b = np.binary_repr(index, width=self.num_qubits)
print("|" + b + "> amplitude " + str(amplitude))
# Get bitstring
def get_bitstring(self):
"""
Print bitstring for the final state vector of the circuit.
Probability calculated as a value times value_conjugate.
Returns:
Probability of each bit.
Binary reprsentation of the most probabal bitstring.
"""
state_vector = self.get_state_vector()
sample = {}
# probability = complex_magnitude * complex_magnitude_conjugate
for index in range(2 ** self.num_qubits):
probability = state_vector[index] * np.conjugate(state_vector[index])
probability = np.round(np.real(probability), 3)
b = np.binary_repr(index, width=self.num_qubits)
sample[index] = probability
# print("|" + b + "> probability " + str(probability))
return sample, np.binary_repr(max(sample, key=sample.get), width=self.num_qubits)
# Get visualization
def visualize(self):
"""
Visualize the quantum circuit.
"""
self.evaluate_patch()
for string in self.graphics_terminal:
print (string)
# checks for the correct input
def check_input_one_gate(self, target:int):
""""
Check for the basics inputs of the one-qubit gates.
Args:
target: a target qubit.
Return: None.
Raise:
Value Errors.
"""
if target > self.num_qubits - 1:
raise ValueError("Qubit's index exceed the specified size of the cirquit.")
| |
minimum
if callable(V):
Vrel = lambda r : V(r) - Emin
else:
Vrel = V - Emin
# compute low-lying levels E(v, J)
EvJ = np.zeros((3, 3))
for Nrot in range(3):
J = Nrot + omega
if callable(Vrel):
centrifug = lambda r : Vrel(r) + (J*(J+1)-omega*omega)/(2*mass*r*r)
else:
centrifug = Vrel + (J*(J+1)-omega*omega)/(2*mass*R*R)
cvals, cvecs, ratio, xwfn, ywfn = FGH(R, centrifug, mass, silent=silent,
npt=npt, padwidth=padwidth, interp=interp)
if np.any(ratio[:3] > psitol):
# wavefunction is not good enough for lowest 3 states
print(ratio[:3])
print_err('', 'sloppy wfn for J = {:d}'.format(J))
EvJ[:,J] = cvals[:3]
# convert energy levels to cm**-1
EvJ *= AU2CM
# vibrational constants
constants['w0'] = EvJ[1,0] - EvJ[0,0]
constants['E0'] = EvJ[0,0]
a, b, c = parabfit([0.5, 1.5, 2.5], EvJ[:,0])
constants['we'] = b
constants['wexe'] = -a
# rotational constants
B = []
D = []
jj = [J*(J+1) for J in range(3)]
for v in range(3):
a, b, c = parabfit(jj, EvJ[v,:])
B.append(b)
D.append(-a)
constants['B0'] = B[0]
constants['D0'] = D[0]
a, b, c = parabfit([0.5, 1.5, 2.5], B)
constants['alpha'] = -b
constants['Be'] = c
a, b, c = parabfit([0.5, 1.5, 2.5], D)
constants['De'] = c
return constants
##
def rovib_levels(R, V, mass, omega=0, vmax=2, Nmax=2,
psitol=1.e-6, silent=False, npt=51, padwidth=0, interp='cubic'):
# given a potential, return some diatomic rovibrational energies
# input units are a.u.
# output units are cm**-1 relative to the energy minimum
# return values:
# array of energies E(v, N)/cm-1, where J = N + omega
# Emin/hartree
# 'psitol' is tolerance for FGH periodicity artifacts
# 'padwidth' is how much to extend the left and right, using
# the endpoint V-value of the real data. This is to keep
# the cyclic copies from interacting. The default padwidth=1
# makes the range 3x wider.
# 'V' may be a function instead of a list/array
if not silent:
print('Wavefunction tail convergence criterion = {:.1e}'.format(psitol))
# compute low-lying levels E(v, J)
nv = vmax + 1
nN = Nmax + 1
fitorder = 4
(xlow, ylow) = lowest_points(R, V, fitorder+1)
if callable(V):
# find the minimum located by continuous means
res = optimize.minimize_scalar(V, bracket=[xlow[0], xlow[-1]])
Emin = res.fun
else:
# quartic fit to lowest five points
(xmin, ymin) = polymin(xlow, ylow, order=fitorder)
if len(ymin) > 1:
print_err('', 'potential has more than one minimum')
Emin = ymin[0]
# make energies relative to minimum
if callable(V):
Vrel = lambda r : V(r) - Emin
else:
Vrel = V - Emin
EvJ = np.zeros((nv, nN))
for Nrot in range(nN):
J = Nrot + omega
if callable(Vrel):
centrifug = lambda r : Vrel(r) + (J*(J+1)-omega*omega)/(2*mass*r*r)
else:
centrifug = Vrel + (J*(J+1)-omega*omega)/(2*mass*R*R)
cvals, cvecs, ratio, xwfn, ywfn = FGH(R, centrifug, mass, silent=silent,
npt=npt, padwidth=padwidth, interp=interp)
if np.any(ratio[:nv] > psitol):
# wavefunction is not good enough
print(ratio[:nv])
print_err('', 'sloppy wfn for J = {:d}'.format(J))
EvJ[:,J] = cvals[:nv]
# convert energy levels to cm**-1
EvJ *= AU2CM
return EvJ, Emin
##
def turnover_limits_potential(R, V, Rmin, interp='cubic', tol=0.001):
# given a diatomic potential V(R), and approx minimum Rmin,
# check for large-R (and small-R) death plunge
# return the range, not to exceed the range of R,
# over which the potential is single-minimum
if callable(V):
fn = V
else:
# use a spline to interpolate discrete data
fn = spline_fit(R, V, kind=interp)
fminz = lambda x : -fn(x)
xbot = np.min(R)
xtop = np.max(R)
# check at large R
res = optimize.minimize_scalar(fminz, bounds=(Rmin, xtop), method='bounded')
if res.success:
if xtop - res.x > tol:
# new high end
xtop = res.x
# check at small R
res = optimize.minimize_scalar(fminz, bounds=(xbot, Rmin), method='bounded')
if res.success:
if res.x - xbot > tol:
# new low end
xbot = res.x
return xbot, xtop
##
def flat_truncate_function(xlo, xhi, f):
# given limits and a function, return a function that equals f(x)
# between the limits but is flat beyond them
def ftrunc(x):
if x < xlo:
y = f(xlo)
elif x > xhi:
y = f(xhi)
else:
y = f(x)
return y
return np.vectorize(ftrunc)
##
def classical_turning_points(R, V, mass, omega=0, vmax=1, npt=51,
psitol=1.e-6, padwidth=0, interp='cubic'):
# given a potential V(R) and a mass, return the classical
# turning points for the vibrational levels up to v=vmax
# also return the location of the minimum (R_e)
vlev, emin = rovib_levels(R, V, mass, vmax=vmax, Nmax=0, npt=npt,
psitol=psitol, interp=interp, padwidth=padwidth,
silent=True)
# restore hartree values
vlev = vlev.flatten() / AU2CM + emin
# construct continuous function if needed
if callable(V):
fn = V
else:
fn = interpolate.Akima1DInterpolator(R, V)
res = optimize.minimize_scalar(fn, bounds=(R[0], R[-1]), method='bounded')
xe = res.x
xturn = [] # list of turnig point pairs
xprev = np.array([xe, xe])
dx = np.array([-0.1, 0.1])
for ivib, evib in enumerate(vlev[:vmax+1]):
fturn = lambda x : fn(x) - evib
root = optimize.fsolve(fturn, xprev + dx)
xturn.append(root)
xprev = root.copy()
return xturn, xe
##
def diatomic_Dunham(R, V, mass, omega=0, lmax=2, jmax=2, psitol=1.e-6,
silent=False, npt=51, padwidth=0, interp='cubic',
conventional=True):
# given a potential and mass, return some diatomic Dunham constants
# input units are a.u.
# output units are cm**-1 relative to the energy minimum
# return values:
# array of Dunham constants Y(l,j), where l is constant along column
# (if 'conventional'==True) and dict of spectr. constants and values
# 'psitol' is tolerance for FGH periodicity artifacts
# 'padwidth' is how much to extend the left and right, using
# the endpoint V-value of the real data. This is to keep
# the cyclic copies from interacting. The default padwidth=1
# makes the range 3x wider.
# 'V' may be a function instead of a list/array
# By fitting energy levels, not from Dunham's equations
if omega != 0:
print_err('', 'Omega not zero--results may be meaningless', halt=False)
# compute required levels E(v, J)
EvJ, Emin = rovib_levels(R, V, mass, omega=omega, vmax=lmax,
Nmax=jmax+omega, psitol=psitol, silent=silent, npt=npt,
padwidth=padwidth, interp=interp)
vvals = np.arange(lmax + 1) + 0.5
Nvals = np.arange(jmax + 1) # does not include omega
Jvals = Nvals + omega
JJvals = Jvals * (Jvals + 1)
Y = np.zeros((jmax+1, lmax+1))
# Nrot = 0 fitting
pfit = np.polynomial.polynomial.Polynomial.fit(vvals, EvJ[:,0], deg=lmax)
coef = pfit.convert().coef
Y[0,1:] = coef[1:]
for l in range(lmax+1):
# v = constant fitting (gives B0, etc., not Be)
pfit = np.polynomial.polynomial.Polynomial.fit(JJvals, EvJ[l,:], deg=jmax)
coef = pfit.convert().coef
Y[1:,l] = coef[1:]
# fit the rotatonal constants
C = Y.copy()
for j in range(1, jmax+1):
pfit = np.polynomial.polynomial.Polynomial.fit(vvals, C[j,:], deg=lmax)
coef = pfit.convert().coef
Y[j:,:-1] = coef[:-1]
Y[j,-1] = np.nan
if conventional:
# create dict with traditional constants
labels = [ [None, 'we', '-wexe', 'weye', 'weze'],
['Be', '-alpha', 'gamma'],
['-De', '-beta', 'delta'],
['Fe'],
['He']
]
constants = {}
for j in range(jmax+1):
for l in range(lmax+1):
try:
lbl = labels[j][l]
if np.isnan(Y[j,l]):
continue
# check for negative sign
if '-' in lbl:
constants[lbl[1:]] = -Y[j,l]
else:
constants[lbl] = Y[j,l]
except:
pass
return Y.T, constants
else:
return Y.T
##
def Brot_to_R(B, mu, reverse=False):
# convert diatomic rotational constant 'B' to its equivalent bond length
# do the opposite if 'reverse'==True
# expected units: B in cm-1, R in Angstrom, mu in amu
mu *= AMU2AU # convert from amu to atomic unit of mass
if reverse:
R = B
R /= BOHR # convert from angstrom to bohr
B = 1/(2 * mu * R*R)
B *= AU2CM # convert to cm-1
return B
else:
B /= AU2CM # convert from cm-1 to hartree
R = np.sqrt(1/(2 * mu * B))
R *= BOHR # convert to angstrom
return R
##
def discrep_BR_VZ(spectr, mass):
# Given the output from diatomic_spectr(), return the discrepancy
# between Re and Be (in angstrom) and the discrepancy between
# E0 and (we/2 - wexe/4) (in cm-1)
# 'mass' in atomic units
# Return values are (R_B - R_e) and (ZPE_vib - E0)
zdiff = (spectr['we']/2 - spectr['wexe']/4) - spectr['E0']
br_const = PLANCK / (8 * PI**2 * CLIGHT) # | |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os
import pkg_resources
import shutil
import unittest
import skbio
import tempfile
from qiime2.plugin.testing import TestPluginBase
from qiime2.util import redirected_stdio
from q2_types.feature_data import AlignedDNAFASTAFormat
from q2_phylogeny import raxml, raxml_rapid_bootstrap
from q2_phylogeny._raxml import (run_command, _build_rapid_bootstrap_command,
_set_raxml_version)
class RaxmlTests(TestPluginBase):
package = 'q2_phylogeny.tests'
@classmethod
def setUpClass(cls):
super(TestPluginBase, cls).setUpClass()
tmpdir = tempfile.mkdtemp()
src = pkg_resources.resource_filename(cls.package, 'data')
dst = os.path.join(tmpdir, 'data')
shutil.copytree(src, dst)
cls.data_dir = dst
@classmethod
def tearDownClass(cls):
super(TestPluginBase, cls).setUpClass()
shutil.rmtree(cls.data_dir)
def get_data_path(self, filename):
# Override TestPluginBase.get_data_path so that it returns paths to
# temporary copies of test data.
return os.path.join(self.data_dir, filename)
def test_raxml(self):
# Test that output tree is made.
# Reads tree output and compares tip labels to expected labels.
input_fp = self.get_data_path('aligned-dna-sequences-3.fasta')
input_sequences = AlignedDNAFASTAFormat(input_fp, mode='r')
with redirected_stdio(stderr=os.devnull):
obs = raxml(input_sequences)
obs_tree = skbio.TreeNode.read(str(obs))
# load the resulting tree and test that it has the right number of
# tips and the right tip ids
tips = list(obs_tree.tips())
tip_names = [t.name for t in tips]
self.assertEqual(set(tip_names),
set(['GCA001510755', 'GCA001045515', 'GCA000454205',
'GCA000473545', 'GCA000196255', 'GCA000686145',
'GCA001950115', 'GCA001971985', 'GCA900007555']))
def test_raxml_underscore_ids(self):
# Test that output tree is made with underscores in tip IDs.
# Some programs and python wrappers may strip underscores.
# Reads tree output and compares tip labels to expected labels.
input_fp = self.get_data_path('aligned-dna-sequences-4.fasta')
input_sequences = AlignedDNAFASTAFormat(input_fp, mode='r')
with redirected_stdio(stderr=os.devnull):
obs = raxml(input_sequences)
obs_tree = skbio.TreeNode.read(str(obs), convert_underscores=False)
# load the resulting tree and test that it has the right number of
# tips and the right tip ids
tips = list(obs_tree.tips())
tip_names = [t.name for t in tips]
self.assertEqual(set(tip_names),
set(['GCA_001510755_1', 'GCA_001045515_1',
'GCA_000454205_1', 'GCA_000473545_1',
'GCA_000196255_1', 'GCA_002142615_1',
'GCA_000686145_1', 'GCA_001950115_1',
'GCA_001971985_1', 'GCA_900007555_1']))
def test_set_raxml_version(self):
obs_stand_1 = _set_raxml_version(raxml_version='Standard',
n_threads=1)
self.assertTrue('raxmlHPC' in str(obs_stand_1[0]))
self.assertTrue(len(obs_stand_1) == 1)
obs_sse3_1 = _set_raxml_version(raxml_version='SSE3', n_threads=1)
self.assertTrue('raxmlHPC-SSE3' in str(obs_sse3_1[0]))
self.assertTrue(len(obs_sse3_1) == 1)
obs_avx2_1 = _set_raxml_version(raxml_version='AVX2', n_threads=1)
self.assertTrue('raxmlHPC-AVX2' in str(obs_avx2_1[0]))
self.assertTrue(len(obs_avx2_1) == 1)
obs_stand_4 = _set_raxml_version(raxml_version='Standard',
n_threads=4)
self.assertTrue('raxmlHPC-PTHREADS' in str(obs_stand_4[0]))
self.assertTrue('4' in str(obs_stand_4[1]))
self.assertTrue(len(obs_stand_4) == 2)
obs_sse3_4 = _set_raxml_version(raxml_version='SSE3', n_threads=4)
self.assertTrue('raxmlHPC-PTHREADS-SSE3' in str(obs_sse3_4[0]))
self.assertTrue('4' in str(obs_sse3_4[1]))
self.assertTrue(len(obs_sse3_4) == 2)
obs_avx2_4 = _set_raxml_version(raxml_version='AVX2', n_threads=4)
self.assertTrue('raxmlHPC-PTHREADS-AVX2' in str(obs_avx2_4[0]))
self.assertTrue('4' in str(obs_avx2_4[1]))
self.assertTrue(len(obs_avx2_4) == 2)
def test_raxml_version(self):
# Test that an output tree is made when invoking threads.
input_fp = self.get_data_path('aligned-dna-sequences-3.fasta')
input_sequences = AlignedDNAFASTAFormat(input_fp, mode='r')
with redirected_stdio(stderr=os.devnull):
obs = raxml(input_sequences, raxml_version='SSE3')
obs_tree = skbio.TreeNode.read(str(obs), convert_underscores=False)
# load the resulting tree and test that it has the right number of
# tips and the right tip ids
tips = list(obs_tree.tips())
tip_names = [t.name for t in tips]
self.assertEqual(set(tip_names),
set(['GCA001510755', 'GCA001045515', 'GCA000454205',
'GCA000473545', 'GCA000196255', 'GCA000686145',
'GCA001950115', 'GCA001971985', 'GCA900007555']))
def test_raxml_n_threads(self):
# Test that an output tree is made when invoking threads.
input_fp = self.get_data_path('aligned-dna-sequences-3.fasta')
input_sequences = AlignedDNAFASTAFormat(input_fp, mode='r')
with redirected_stdio(stderr=os.devnull):
obs = raxml(input_sequences, n_threads=2)
obs_tree = skbio.TreeNode.read(str(obs), convert_underscores=False)
# load the resulting tree and test that it has the right number of
# tips and the right tip ids
tips = list(obs_tree.tips())
tip_names = [t.name for t in tips]
self.assertEqual(set(tip_names),
set(['GCA001510755', 'GCA001045515', 'GCA000454205',
'GCA000473545', 'GCA000196255', 'GCA000686145',
'GCA001950115', 'GCA001971985', 'GCA900007555']))
def test_raxml_with_seed(self):
# Test tip-to-tip dists are identical to manually run RAxML output.
# This test is comparing an ordered series of tip-to-tip distances
# to a tree output from a manual run of the default command:
# raxmlHPC -m GTRGAMMA -p 1723 -s aligned-dna-sequences-3.fasta -n q2
# NOTE: I cleanly rounded the tip-to-tip dists (i.e. `%.4f`) as RAxML
# may return slightly different rounding errors on different
# systems.
input_fp = self.get_data_path('aligned-dna-sequences-3.fasta')
input_sequences = AlignedDNAFASTAFormat(input_fp, mode='r')
with redirected_stdio(stderr=os.devnull):
obs = raxml(input_sequences, seed=1723)
obs_tree = skbio.TreeNode.read(str(obs), convert_underscores=False)
obs_tl = list(obs_tree.tip_tip_distances().to_series())
obs_series = set(['%.4f' % e for e in obs_tl])
exp_tree = skbio.TreeNode.read(self.get_data_path('test.tre'))
exp_tl = list(exp_tree.tip_tip_distances().to_series())
exp_series = set(['%.4f' % e for e in exp_tl])
self.assertEqual(obs_series, exp_series)
def test_raxml_model_choice(self):
# Tip to tip dists should NOT be identical under different models.
# Default is GTRGAMMA, we'll compare ouput to GRTGAMMAI & GTRCAT.
# This test is comparing an ordered series of tip-to-tip distances.
# Take note, that for this comparison to work, all must have the same
# seed value set.
input_fp = self.get_data_path('aligned-dna-sequences-3.fasta')
input_sequences = AlignedDNAFASTAFormat(input_fp, mode='r')
# default GTRGAMMA
with redirected_stdio(stderr=os.devnull):
gtrg = raxml(input_sequences, seed=1723)
gtrg_tree = skbio.TreeNode.read(
str(gtrg), convert_underscores=False)
gtrg_td = set(gtrg_tree.tip_tip_distances().to_series())
# set GTRGAMMAI
with redirected_stdio(stderr=os.devnull):
gtrgi = raxml(input_sequences, seed=1723,
substitution_model='GTRGAMMAI')
gtrgi_tree = skbio.TreeNode.read(
str(gtrgi), convert_underscores=False)
gtrgi_td = set(gtrgi_tree.tip_tip_distances().to_series())
# set GTRCAT
with redirected_stdio(stderr=os.devnull):
gtrcat = raxml(input_sequences, seed=1723,
substitution_model='GTRCAT')
gtrcat_tree = skbio.TreeNode.read(
str(gtrcat), convert_underscores=False)
gtrcat_td = set(gtrcat_tree.tip_tip_distances().to_series())
# test pairs are not equivalent
self.assertNotEqual(gtrg_td, gtrgi_td)
self.assertNotEqual(gtrg_td, gtrcat_td)
self.assertNotEqual(gtrgi_td, gtrcat_td)
def test_raxml_num_searches(self):
input_fp = self.get_data_path('aligned-dna-sequences-3.fasta')
input_sequences = AlignedDNAFASTAFormat(input_fp, mode='r')
with redirected_stdio(stderr=os.devnull):
obs = raxml(input_sequences, seed=1723, n_searches=5)
obs_tree = skbio.TreeNode.read(str(obs), convert_underscores=False)
obs_tl = list(obs_tree.tip_tip_distances().to_series())
obs_series = set(['%.4f' % e for e in obs_tl])
exp_tree = skbio.TreeNode.read(self.get_data_path('test3.tre'))
exp_tl = list(exp_tree.tip_tip_distances().to_series())
exp_series = set(['%.4f' % e for e in exp_tl])
self.assertEqual(obs_series, exp_series)
def test_rapid_bootstrap_command(self):
input_fp = self.get_data_path('aligned-dna-sequences-3.fasta')
input_sequences = AlignedDNAFASTAFormat(input_fp, mode='r')
with tempfile.TemporaryDirectory() as temp_dir:
with redirected_stdio(stderr=os.devnull):
obs = _build_rapid_bootstrap_command(input_sequences, 1723,
8752, 15, 'GTRGAMMA',
temp_dir, 'bs')
self.assertTrue(str(input_sequences) in str(obs[11]))
self.assertTrue('1723' in obs[5])
self.assertTrue('8752' in obs[7])
self.assertTrue('15' in obs[9])
self.assertTrue('GTRGAMMA' in obs[3])
self.assertTrue(str(temp_dir) in obs[13])
self.assertTrue('bs' in obs[15])
def test_raxml_rapid_bootstrap(self):
# Test that output tree is made.
# Reads tree output and compares tip labels to expected labels.
input_fp = self.get_data_path('aligned-dna-sequences-3.fasta')
input_sequences = AlignedDNAFASTAFormat(input_fp, mode='r')
with redirected_stdio(stderr=os.devnull):
obs = raxml_rapid_bootstrap(input_sequences)
obs_tree = skbio.TreeNode.read(str(obs))
# load the resulting tree and test that it has the right number of
# tips and the right tip ids
tips = list(obs_tree.tips())
tip_names = [t.name for t in tips]
self.assertEqual(set(tip_names),
set(['GCA001510755', 'GCA001045515', 'GCA000454205',
'GCA000473545', 'GCA000196255', 'GCA000686145',
'GCA001950115', 'GCA001971985', 'GCA900007555']))
def test_raxml_rapid_bootstrap_n_threads(self):
# Test that an output tree is made when invoking threads.
input_fp = self.get_data_path('aligned-dna-sequences-3.fasta')
input_sequences = AlignedDNAFASTAFormat(input_fp, mode='r')
with redirected_stdio(stderr=os.devnull):
obs = raxml_rapid_bootstrap(input_sequences, n_threads=2)
obs_tree = skbio.TreeNode.read(str(obs), convert_underscores=False)
# load the resulting tree and test that it has the right number of
# tips and the right tip ids
tips = list(obs_tree.tips())
tip_names = [t.name for t in tips]
self.assertEqual(set(tip_names),
set(['GCA001510755', 'GCA001045515', 'GCA000454205',
'GCA000473545', 'GCA000196255', 'GCA000686145',
'GCA001950115', 'GCA001971985', 'GCA900007555']))
def test_raxml_rapid_bootstrap_with_seed(self):
# Test tip-to-tip dists are identical to manually run RAxML output.
# This test is comparing an ordered series of tip-to-tip distances
# to a tree output from a manual run of the default command:
# raxmlHPC -f a -m GTRGAMMA -p 1723 -x 3871 -N 10
# -s aligned-dna-sequences-3.fasta -n q2
# NOTE: I cleanly rounded the tip-to-tip dists (i.e. `%.4f`) as RAxML
# may return slightly different rounding errors on different
# systems (and at times, between conda environments).
input_fp = self.get_data_path('aligned-dna-sequences-3.fasta')
input_sequences = AlignedDNAFASTAFormat(input_fp, mode='r')
# test that branchlengths are identical
with redirected_stdio(stderr=os.devnull):
obs = raxml_rapid_bootstrap(input_sequences, seed=1723,
rapid_bootstrap_seed=3871,
bootstrap_replicates=10)
obs_tree = skbio.TreeNode.read(str(obs), convert_underscores=False)
# sometimes we lose the last set of numbers on long floats
obs_tl = list(obs_tree.tip_tip_distances().to_series())
obs_series = set(['%.4f' % e for e in obs_tl])
exp_tree = skbio.TreeNode.read(self.get_data_path('test2.tre'),
convert_underscores=True)
exp_tl = list(exp_tree.tip_tip_distances().to_series())
exp_series = set(['%.4f' % e for e in exp_tl])
self.assertEqual(obs_series, exp_series)
# test that bootstrap supports are identical
obs_bs = [node.name for node in obs_tree.non_tips()].sort()
exp_bs = [node.name for node in exp_tree.non_tips()].sort()
self.assertEqual(obs_bs, exp_bs)
def test_run_not_verbose(self):
input_fp = self.get_data_path('aligned-dna-sequences-3.fasta')
input_sequences = AlignedDNAFASTAFormat(input_fp, mode='r')
aligned_fp = str(input_sequences)
with tempfile.TemporaryDirectory() as temp_dir:
cmd = ['raxmlHPC',
'-m', 'GTRGAMMA',
'-p', '1723',
'-s', aligned_fp,
'-w', temp_dir,
'-n', 'q2']
with redirected_stdio(stderr=os.devnull):
run_command(cmd, verbose=False)
obs_tree_fp = os.path.join(temp_dir, 'RAxML_bestTree.q2')
obs_tree = skbio.TreeNode.read(str(obs_tree_fp),
convert_underscores=False)
# load the resulting tree and test that it has the right number of
# tips and the right tip ids
tips = list(obs_tree.tips())
tip_names = [t.name for t in tips]
self.assertEqual(set(tip_names),
set(['GCA001510755', 'GCA001045515',
'GCA000454205', 'GCA000473545',
'GCA000196255', 'GCA000686145',
'GCA001950115', 'GCA001971985',
'GCA900007555']))
def test_run_rapid_bs_not_verbose(self):
input_fp = self.get_data_path('aligned-dna-sequences-3.fasta')
input_sequences = AlignedDNAFASTAFormat(input_fp, mode='r')
aligned_fp = str(input_sequences)
with tempfile.TemporaryDirectory() as temp_dir:
cmd = ['raxmlHPC',
'-m', 'GTRGAMMA',
'-p', '1723',
'-s', aligned_fp,
'-w', temp_dir,
'-n', 'q2',
'-f', 'a',
'-x', '9834',
'-N', '10']
with redirected_stdio(stderr=os.devnull):
run_command(cmd, verbose=False)
obs_tree_fp = os.path.join(temp_dir, 'RAxML_bipartitions.q2')
obs_tree = skbio.TreeNode.read(str(obs_tree_fp),
convert_underscores=False)
# load the resulting tree and test | |
# coding: utf-8
import pprint
import six
from enum import Enum
class SubscriptionProductVersion:
swagger_types = {
'activated_on': 'datetime',
'billing_cycle': 'str',
'comment': 'str',
'created_on': 'datetime',
'default_currency': 'str',
'enabled_currencies': 'list[str]',
'id': 'int',
'increment_number': 'int',
'linked_space_id': 'int',
'minimal_number_of_periods': 'int',
'name': 'DatabaseTranslatedString',
'number_of_notice_periods': 'int',
'obsoleted_on': 'datetime',
'planned_purge_date': 'datetime',
'product': 'SubscriptionProduct',
'reference': 'str',
'retiring_finished_on': 'datetime',
'retiring_started_on': 'datetime',
'state': 'SubscriptionProductVersionState',
'tax_calculation': 'TaxCalculation',
'version': 'int',
}
attribute_map = {
'activated_on': 'activatedOn','billing_cycle': 'billingCycle','comment': 'comment','created_on': 'createdOn','default_currency': 'defaultCurrency','enabled_currencies': 'enabledCurrencies','id': 'id','increment_number': 'incrementNumber','linked_space_id': 'linkedSpaceId','minimal_number_of_periods': 'minimalNumberOfPeriods','name': 'name','number_of_notice_periods': 'numberOfNoticePeriods','obsoleted_on': 'obsoletedOn','planned_purge_date': 'plannedPurgeDate','product': 'product','reference': 'reference','retiring_finished_on': 'retiringFinishedOn','retiring_started_on': 'retiringStartedOn','state': 'state','tax_calculation': 'taxCalculation','version': 'version',
}
_activated_on = None
_billing_cycle = None
_comment = None
_created_on = None
_default_currency = None
_enabled_currencies = None
_id = None
_increment_number = None
_linked_space_id = None
_minimal_number_of_periods = None
_name = None
_number_of_notice_periods = None
_obsoleted_on = None
_planned_purge_date = None
_product = None
_reference = None
_retiring_finished_on = None
_retiring_started_on = None
_state = None
_tax_calculation = None
_version = None
def __init__(self, **kwargs):
self.discriminator = None
self.activated_on = kwargs.get('activated_on', None)
self.billing_cycle = kwargs.get('billing_cycle', None)
self.comment = kwargs.get('comment', None)
self.created_on = kwargs.get('created_on', None)
self.default_currency = kwargs.get('default_currency', None)
self.enabled_currencies = kwargs.get('enabled_currencies', None)
self.id = kwargs.get('id', None)
self.increment_number = kwargs.get('increment_number', None)
self.linked_space_id = kwargs.get('linked_space_id', None)
self.minimal_number_of_periods = kwargs.get('minimal_number_of_periods', None)
self.name = kwargs.get('name', None)
self.number_of_notice_periods = kwargs.get('number_of_notice_periods', None)
self.obsoleted_on = kwargs.get('obsoleted_on', None)
self.planned_purge_date = kwargs.get('planned_purge_date', None)
self.product = kwargs.get('product', None)
self.reference = kwargs.get('reference', None)
self.retiring_finished_on = kwargs.get('retiring_finished_on', None)
self.retiring_started_on = kwargs.get('retiring_started_on', None)
self.state = kwargs.get('state', None)
self.tax_calculation = kwargs.get('tax_calculation', None)
self.version = kwargs.get('version', None)
@property
def activated_on(self):
"""Gets the activated_on of this SubscriptionProductVersion.
:return: The activated_on of this SubscriptionProductVersion.
:rtype: datetime
"""
return self._activated_on
@activated_on.setter
def activated_on(self, activated_on):
"""Sets the activated_on of this SubscriptionProductVersion.
:param activated_on: The activated_on of this SubscriptionProductVersion.
:type: datetime
"""
self._activated_on = activated_on
@property
def billing_cycle(self):
"""Gets the billing_cycle of this SubscriptionProductVersion.
The billing cycle determines the rhythm with which the subscriber is billed. The charging may have different rhythm.
:return: The billing_cycle of this SubscriptionProductVersion.
:rtype: str
"""
return self._billing_cycle
@billing_cycle.setter
def billing_cycle(self, billing_cycle):
"""Sets the billing_cycle of this SubscriptionProductVersion.
The billing cycle determines the rhythm with which the subscriber is billed. The charging may have different rhythm.
:param billing_cycle: The billing_cycle of this SubscriptionProductVersion.
:type: str
"""
self._billing_cycle = billing_cycle
@property
def comment(self):
"""Gets the comment of this SubscriptionProductVersion.
The comment allows to provide a internal comment for the version. It helps to document why a product was changed. The comment is not disclosed to the subscriber.
:return: The comment of this SubscriptionProductVersion.
:rtype: str
"""
return self._comment
@comment.setter
def comment(self, comment):
"""Sets the comment of this SubscriptionProductVersion.
The comment allows to provide a internal comment for the version. It helps to document why a product was changed. The comment is not disclosed to the subscriber.
:param comment: The comment of this SubscriptionProductVersion.
:type: str
"""
self._comment = comment
@property
def created_on(self):
"""Gets the created_on of this SubscriptionProductVersion.
:return: The created_on of this SubscriptionProductVersion.
:rtype: datetime
"""
return self._created_on
@created_on.setter
def created_on(self, created_on):
"""Sets the created_on of this SubscriptionProductVersion.
:param created_on: The created_on of this SubscriptionProductVersion.
:type: datetime
"""
self._created_on = created_on
@property
def default_currency(self):
"""Gets the default_currency of this SubscriptionProductVersion.
The default currency has to be used in all fees.
:return: The default_currency of this SubscriptionProductVersion.
:rtype: str
"""
return self._default_currency
@default_currency.setter
def default_currency(self, default_currency):
"""Sets the default_currency of this SubscriptionProductVersion.
The default currency has to be used in all fees.
:param default_currency: The default_currency of this SubscriptionProductVersion.
:type: str
"""
self._default_currency = default_currency
@property
def enabled_currencies(self):
"""Gets the enabled_currencies of this SubscriptionProductVersion.
The currencies which are enabled can be selected to define component fees. Currencies which are not enabled cannot be used to define fees.
:return: The enabled_currencies of this SubscriptionProductVersion.
:rtype: list[str]
"""
return self._enabled_currencies
@enabled_currencies.setter
def enabled_currencies(self, enabled_currencies):
"""Sets the enabled_currencies of this SubscriptionProductVersion.
The currencies which are enabled can be selected to define component fees. Currencies which are not enabled cannot be used to define fees.
:param enabled_currencies: The enabled_currencies of this SubscriptionProductVersion.
:type: list[str]
"""
self._enabled_currencies = enabled_currencies
@property
def id(self):
"""Gets the id of this SubscriptionProductVersion.
The ID is the primary key of the entity. The ID identifies the entity uniquely.
:return: The id of this SubscriptionProductVersion.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this SubscriptionProductVersion.
The ID is the primary key of the entity. The ID identifies the entity uniquely.
:param id: The id of this SubscriptionProductVersion.
:type: int
"""
self._id = id
@property
def increment_number(self):
"""Gets the increment_number of this SubscriptionProductVersion.
The increment number represents the version number incremented whenever a new version is activated.
:return: The increment_number of this SubscriptionProductVersion.
:rtype: int
"""
return self._increment_number
@increment_number.setter
def increment_number(self, increment_number):
"""Sets the increment_number of this SubscriptionProductVersion.
The increment number represents the version number incremented whenever a new version is activated.
:param increment_number: The increment_number of this SubscriptionProductVersion.
:type: int
"""
self._increment_number = increment_number
@property
def linked_space_id(self):
"""Gets the linked_space_id of this SubscriptionProductVersion.
The linked space id holds the ID of the space to which the entity belongs to.
:return: The linked_space_id of this SubscriptionProductVersion.
:rtype: int
"""
return self._linked_space_id
@linked_space_id.setter
def linked_space_id(self, linked_space_id):
"""Sets the linked_space_id of this SubscriptionProductVersion.
The linked space id holds the ID of the space to which the entity belongs to.
:param linked_space_id: The linked_space_id of this SubscriptionProductVersion.
:type: int
"""
self._linked_space_id = linked_space_id
@property
def minimal_number_of_periods(self):
"""Gets the minimal_number_of_periods of this SubscriptionProductVersion.
The minimal number of periods determines how long the subscription has to run before the subscription can be terminated.
:return: The minimal_number_of_periods of this SubscriptionProductVersion.
:rtype: int
"""
return self._minimal_number_of_periods
@minimal_number_of_periods.setter
def minimal_number_of_periods(self, minimal_number_of_periods):
"""Sets the minimal_number_of_periods of this SubscriptionProductVersion.
The minimal number of periods determines how long the subscription has to run before the subscription can be terminated.
:param minimal_number_of_periods: The minimal_number_of_periods of this SubscriptionProductVersion.
:type: int
"""
self._minimal_number_of_periods = minimal_number_of_periods
@property
def name(self):
"""Gets the name of this SubscriptionProductVersion.
The product version name is the name of the product which is shown to the user for the version. When the visible product name should be changed for a particular product a new version has to be created which contains the new name of the product.
:return: The name of this SubscriptionProductVersion.
:rtype: DatabaseTranslatedString
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this SubscriptionProductVersion.
The product version name is the name of the product which is shown to the user for the version. When the visible product name should be changed for a particular product a new version has to be created which contains the new name of the product.
:param name: The name of this SubscriptionProductVersion.
:type: DatabaseTranslatedString
"""
self._name = name
@property
def number_of_notice_periods(self):
"""Gets the number_of_notice_periods of this SubscriptionProductVersion.
The number of notice periods determines the number of periods which need to be paid between the request to terminate the subscription and the final period.
:return: The number_of_notice_periods of this SubscriptionProductVersion.
:rtype: int
"""
return self._number_of_notice_periods
@number_of_notice_periods.setter
def number_of_notice_periods(self, number_of_notice_periods):
"""Sets the number_of_notice_periods of this SubscriptionProductVersion.
The number of notice periods determines the number of periods which need to be paid between the request to terminate the subscription and the final period.
:param number_of_notice_periods: The number_of_notice_periods of this SubscriptionProductVersion.
:type: int
"""
self._number_of_notice_periods = number_of_notice_periods
@property
def obsoleted_on(self):
"""Gets the obsoleted_on of this SubscriptionProductVersion.
:return: The obsoleted_on of this SubscriptionProductVersion.
:rtype: datetime
"""
return self._obsoleted_on
@obsoleted_on.setter
def obsoleted_on(self, obsoleted_on):
"""Sets the obsoleted_on of this SubscriptionProductVersion.
:param obsoleted_on: The obsoleted_on of this SubscriptionProductVersion.
:type: datetime
"""
self._obsoleted_on = obsoleted_on
@property
def planned_purge_date(self):
"""Gets the planned_purge_date of this SubscriptionProductVersion.
The planned purge date indicates when the entity is permanently removed. When the date is null the entity is not planned to be removed.
| |
<reponame>anna-hope/python-firestore
# Copyright 2017 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import pytest
def _make_document_reference(*args, **kwargs):
from google.cloud.firestore_v1.document import DocumentReference
return DocumentReference(*args, **kwargs)
def test_constructor():
collection_id1 = "users"
document_id1 = "alovelace"
collection_id2 = "platform"
document_id2 = "*nix"
client = mock.MagicMock()
client.__hash__.return_value = 1234
document = _make_document_reference(
collection_id1, document_id1, collection_id2, document_id2, client=client
)
assert document._client is client
expected_path = "/".join(
(collection_id1, document_id1, collection_id2, document_id2)
)
assert document.path == expected_path
def _make_commit_repsonse(write_results=None):
from google.cloud.firestore_v1.types import firestore
response = mock.create_autospec(firestore.CommitResponse)
response.write_results = write_results or [mock.sentinel.write_result]
response.commit_time = mock.sentinel.commit_time
return response
def _write_pb_for_create(document_path, document_data):
from google.cloud.firestore_v1.types import common
from google.cloud.firestore_v1.types import document
from google.cloud.firestore_v1.types import write
from google.cloud.firestore_v1 import _helpers
return write.Write(
update=document.Document(
name=document_path, fields=_helpers.encode_dict(document_data)
),
current_document=common.Precondition(exists=False),
)
def _create_helper(retry=None, timeout=None):
from google.cloud.firestore_v1 import _helpers
# Create a minimal fake GAPIC with a dummy response.
firestore_api = mock.Mock()
firestore_api.commit.mock_add_spec(spec=["commit"])
firestore_api.commit.return_value = _make_commit_repsonse()
# Attach the fake GAPIC to a real client.
client = _make_client("dignity")
client._firestore_api_internal = firestore_api
# Actually make a document and call create().
document = _make_document_reference("foo", "twelve", client=client)
document_data = {"hello": "goodbye", "count": 99}
kwargs = _helpers.make_retry_timeout_kwargs(retry, timeout)
write_result = document.create(document_data, **kwargs)
# Verify the response and the mocks.
assert write_result is mock.sentinel.write_result
write_pb = _write_pb_for_create(document._document_path, document_data)
firestore_api.commit.assert_called_once_with(
request={
"database": client._database_string,
"writes": [write_pb],
"transaction": None,
},
metadata=client._rpc_metadata,
**kwargs,
)
def test_documentreference_create():
_create_helper()
def test_documentreference_create_w_retry_timeout():
from google.api_core.retry import Retry
retry = Retry(predicate=object())
timeout = 123.0
_create_helper(retry=retry, timeout=timeout)
def test_documentreference_create_empty():
# Create a minimal fake GAPIC with a dummy response.
from google.cloud.firestore_v1.document import DocumentReference
from google.cloud.firestore_v1.document import DocumentSnapshot
firestore_api = mock.Mock(spec=["commit"])
document_reference = mock.create_autospec(DocumentReference)
snapshot = mock.create_autospec(DocumentSnapshot)
snapshot.exists = True
document_reference.get.return_value = snapshot
firestore_api.commit.return_value = _make_commit_repsonse(
write_results=[document_reference]
)
# Attach the fake GAPIC to a real client.
client = _make_client("dignity")
client._firestore_api_internal = firestore_api
client.get_all = mock.MagicMock()
client.get_all.exists.return_value = True
# Actually make a document and call create().
document = _make_document_reference("foo", "twelve", client=client)
document_data = {}
write_result = document.create(document_data)
assert write_result.get().exists
def _write_pb_for_set(document_path, document_data, merge):
from google.cloud.firestore_v1.types import common
from google.cloud.firestore_v1.types import document
from google.cloud.firestore_v1.types import write
from google.cloud.firestore_v1 import _helpers
write_pbs = write.Write(
update=document.Document(
name=document_path, fields=_helpers.encode_dict(document_data)
)
)
if merge:
field_paths = [
field_path
for field_path, value in _helpers.extract_fields(
document_data, _helpers.FieldPath()
)
]
field_paths = [field_path.to_api_repr() for field_path in sorted(field_paths)]
mask = common.DocumentMask(field_paths=sorted(field_paths))
write_pbs._pb.update_mask.CopyFrom(mask._pb)
return write_pbs
def _set_helper(merge=False, retry=None, timeout=None, **option_kwargs):
from google.cloud.firestore_v1 import _helpers
# Create a minimal fake GAPIC with a dummy response.
firestore_api = mock.Mock(spec=["commit"])
firestore_api.commit.return_value = _make_commit_repsonse()
# Attach the fake GAPIC to a real client.
client = _make_client("db-dee-bee")
client._firestore_api_internal = firestore_api
# Actually make a document and call create().
document = _make_document_reference("User", "Interface", client=client)
document_data = {"And": 500, "Now": b"\xba\xaa\xaa \xba\xaa\xaa"}
kwargs = _helpers.make_retry_timeout_kwargs(retry, timeout)
write_result = document.set(document_data, merge, **kwargs)
# Verify the response and the mocks.
assert write_result is mock.sentinel.write_result
write_pb = _write_pb_for_set(document._document_path, document_data, merge)
firestore_api.commit.assert_called_once_with(
request={
"database": client._database_string,
"writes": [write_pb],
"transaction": None,
},
metadata=client._rpc_metadata,
**kwargs,
)
def test_documentreference_set():
_set_helper()
def test_documentreference_set_w_retry_timeout():
from google.api_core.retry import Retry
retry = Retry(predicate=object())
timeout = 123.0
_set_helper(retry=retry, timeout=timeout)
def test_documentreference_set_merge():
_set_helper(merge=True)
def _write_pb_for_update(document_path, update_values, field_paths):
from google.cloud.firestore_v1.types import common
from google.cloud.firestore_v1.types import document
from google.cloud.firestore_v1.types import write
from google.cloud.firestore_v1 import _helpers
return write.Write(
update=document.Document(
name=document_path, fields=_helpers.encode_dict(update_values)
),
update_mask=common.DocumentMask(field_paths=field_paths),
current_document=common.Precondition(exists=True),
)
def _update_helper(retry=None, timeout=None, **option_kwargs):
from collections import OrderedDict
from google.cloud.firestore_v1 import _helpers
from google.cloud.firestore_v1.transforms import DELETE_FIELD
# Create a minimal fake GAPIC with a dummy response.
firestore_api = mock.Mock(spec=["commit"])
firestore_api.commit.return_value = _make_commit_repsonse()
# Attach the fake GAPIC to a real client.
client = _make_client("potato-chip")
client._firestore_api_internal = firestore_api
# Actually make a document and call create().
document = _make_document_reference("baked", "Alaska", client=client)
# "Cheat" and use OrderedDict-s so that iteritems() is deterministic.
field_updates = OrderedDict(
(("hello", 1), ("then.do", False), ("goodbye", DELETE_FIELD))
)
kwargs = _helpers.make_retry_timeout_kwargs(retry, timeout)
if option_kwargs:
option = client.write_option(**option_kwargs)
write_result = document.update(field_updates, option=option, **kwargs)
else:
option = None
write_result = document.update(field_updates, **kwargs)
# Verify the response and the mocks.
assert write_result is mock.sentinel.write_result
update_values = {
"hello": field_updates["hello"],
"then": {"do": field_updates["then.do"]},
}
field_paths = list(field_updates.keys())
write_pb = _write_pb_for_update(
document._document_path, update_values, sorted(field_paths)
)
if option is not None:
option.modify_write(write_pb)
firestore_api.commit.assert_called_once_with(
request={
"database": client._database_string,
"writes": [write_pb],
"transaction": None,
},
metadata=client._rpc_metadata,
**kwargs,
)
def test_documentreference_update_with_exists():
with pytest.raises(ValueError):
_update_helper(exists=True)
def test_documentreference_update():
_update_helper()
def test_documentreference_update_w_retry_timeout():
from google.api_core.retry import Retry
retry = Retry(predicate=object())
timeout = 123.0
_update_helper(retry=retry, timeout=timeout)
def test_documentreference_update_with_precondition():
from google.protobuf import timestamp_pb2
timestamp = timestamp_pb2.Timestamp(seconds=1058655101, nanos=100022244)
_update_helper(last_update_time=timestamp)
def test_documentreference_empty_update():
# Create a minimal fake GAPIC with a dummy response.
firestore_api = mock.Mock(spec=["commit"])
firestore_api.commit.return_value = _make_commit_repsonse()
# Attach the fake GAPIC to a real client.
client = _make_client("potato-chip")
client._firestore_api_internal = firestore_api
# Actually make a document and call create().
document = _make_document_reference("baked", "Alaska", client=client)
# "Cheat" and use OrderedDict-s so that iteritems() is deterministic.
field_updates = {}
with pytest.raises(ValueError):
document.update(field_updates)
def _delete_helper(retry=None, timeout=None, **option_kwargs):
from google.cloud.firestore_v1 import _helpers
from google.cloud.firestore_v1.types import write
# Create a minimal fake GAPIC with a dummy response.
firestore_api = mock.Mock(spec=["commit"])
firestore_api.commit.return_value = _make_commit_repsonse()
# Attach the fake GAPIC to a real client.
client = _make_client("donut-base")
client._firestore_api_internal = firestore_api
kwargs = _helpers.make_retry_timeout_kwargs(retry, timeout)
# Actually make a document and call delete().
document = _make_document_reference("where", "we-are", client=client)
if option_kwargs:
option = client.write_option(**option_kwargs)
delete_time = document.delete(option=option, **kwargs)
else:
option = None
delete_time = document.delete(**kwargs)
# Verify the response and the mocks.
assert delete_time is mock.sentinel.commit_time
write_pb = write.Write(delete=document._document_path)
if option is not None:
option.modify_write(write_pb)
firestore_api.commit.assert_called_once_with(
request={
"database": client._database_string,
"writes": [write_pb],
"transaction": None,
},
metadata=client._rpc_metadata,
**kwargs,
)
def test_documentreference_delete():
_delete_helper()
def test_documentreference_delete_with_option():
from google.protobuf import timestamp_pb2
timestamp_pb = timestamp_pb2.Timestamp(seconds=1058655101, nanos=100022244)
_delete_helper(last_update_time=timestamp_pb)
def test_documentreference_delete_w_retry_timeout():
from google.api_core.retry import Retry
retry = Retry(predicate=object())
timeout = 123.0
_delete_helper(retry=retry, timeout=timeout)
def _get_helper(
field_paths=None,
use_transaction=False,
not_found=False,
# This should be an impossible case, but we test against it for
# completeness
return_empty=False,
retry=None,
timeout=None,
):
from google.cloud.firestore_v1 import _helpers
from google.cloud.firestore_v1.types import common
from google.cloud.firestore_v1.types import document
from google.cloud.firestore_v1.types import firestore
from google.cloud.firestore_v1.transaction import Transaction
# Create a minimal fake GAPIC with a dummy response.
create_time = 123
update_time = 234
read_time = 345
firestore_api = mock.Mock(spec=["batch_get_documents"])
response = mock.create_autospec(firestore.BatchGetDocumentsResponse)
response.read_time = read_time
response.found = mock.create_autospec(document.Document)
response.found.fields = {}
response.found.create_time = create_time
response.found.update_time = update_time
client = _make_client("donut-base")
client._firestore_api_internal = firestore_api
document_reference = _make_document_reference("where", "we-are", client=client)
response.found.name = None if not_found else document_reference._document_path
response.missing = document_reference._document_path if not_found else None
def WhichOneof(val):
return "missing" if not_found else "found"
response._pb = response
response._pb.WhichOneof = WhichOneof
firestore_api.batch_get_documents.return_value = iter(
[response] if not return_empty else []
)
if use_transaction:
transaction = Transaction(client)
transaction_id = transaction._id = b"asking-me-2"
else:
transaction = None
kwargs = _helpers.make_retry_timeout_kwargs(retry, timeout)
snapshot = document_reference.get(
field_paths=field_paths, transaction=transaction, **kwargs
)
assert snapshot.reference is document_reference
if not_found or return_empty:
assert snapshot._data is None
assert not snapshot.exists
assert snapshot.read_time is not None
assert snapshot.create_time is None
assert snapshot.update_time is None
else:
assert snapshot.to_dict() == {}
assert snapshot.exists
assert snapshot.read_time is read_time
assert snapshot.create_time is create_time
assert snapshot.update_time is update_time
# Verify the request made to the API
if field_paths is not None:
mask = common.DocumentMask(field_paths=sorted(field_paths))
else:
mask = None
if use_transaction:
expected_transaction_id = transaction_id
else:
expected_transaction_id = None
firestore_api.batch_get_documents.assert_called_once_with(
request={
"database": client._database_string,
"documents": [document_reference._document_path],
"mask": mask,
"transaction": expected_transaction_id,
},
metadata=client._rpc_metadata,
**kwargs,
)
def test_documentreference_get_not_found():
_get_helper(not_found=True)
def test_documentreference_get_default():
_get_helper()
def test_documentreference_get_return_empty():
_get_helper(return_empty=True)
def test_documentreference_get_w_retry_timeout():
from google.api_core.retry import Retry
retry = Retry(predicate=object())
timeout = 123.0
_get_helper(retry=retry, timeout=timeout)
def test_documentreference_get_w_string_field_path():
with pytest.raises(ValueError):
_get_helper(field_paths="foo")
def test_documentreference_get_with_field_path():
_get_helper(field_paths=["foo"])
def test_documentreference_get_with_multiple_field_paths():
_get_helper(field_paths=["foo", "bar.baz"])
def test_documentreference_get_with_transaction():
_get_helper(use_transaction=True)
def _collections_helper(page_size=None, retry=None, timeout=None):
from google.cloud.firestore_v1.collection import CollectionReference
from google.cloud.firestore_v1 import _helpers
from google.cloud.firestore_v1.services.firestore.client import FirestoreClient
collection_ids = ["coll-1", "coll-2"]
class Pager(object):
def __iter__(self):
yield from collection_ids
api_client = mock.create_autospec(FirestoreClient)
api_client.list_collection_ids.return_value = Pager()
client = _make_client()
client._firestore_api_internal = api_client
kwargs = _helpers.make_retry_timeout_kwargs(retry, timeout)
# Actually make a document and call delete().
document = _make_document_reference("where", "we-are", client=client)
if page_size is not None:
collections = list(document.collections(page_size=page_size, **kwargs))
| |
# type: ignore
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
import copy
from requests import Response
class AzureFirewallClient:
def __init__(self, subscription_id: str,
resource_group: str,
client_id: str,
api_version: str,
verify: bool,
proxy: bool,
client_secret: str = None,
tenant_id: str = None,
certificate_thumbprint: str = None,
private_key: str = None):
self.resource_group = resource_group
self.subscription_id = subscription_id
self.api_version = api_version
self.default_params = {"api-version": api_version}
is_credentials = (client_secret and tenant_id) or (certificate_thumbprint and private_key)
scope = Scopes.management_azure if is_credentials else \
'https://management.azure.com/user_impersonation offline_access user.read'
grant_type = CLIENT_CREDENTIALS if is_credentials else DEVICE_CODE
token_retrieval_url = f'https://login.microsoftonline.com/{tenant_id}/oauth2/v2.0/token' if tenant_id \
else 'https://login.microsoftonline.com/organizations/oauth2/v2.0/token'
if not is_credentials:
client_secret = None
tenant_id = None
certificate_thumbprint = None
private_key = None
self.ms_client = MicrosoftClient(
self_deployed=True,
tenant_id=tenant_id,
token_retrieval_url=token_retrieval_url,
auth_id=client_id,
enc_key=client_secret,
grant_type=grant_type,
base_url=f'https://management.azure.com/subscriptions/{subscription_id}'
f'/resourceGroups/{resource_group}/providers/Microsoft.Network',
scope=scope,
verify=verify,
proxy=proxy,
certificate_thumbprint=certificate_thumbprint,
private_key=private_key
)
def azure_firewall_list_request(self, resource: str, next_link: str = None) -> dict:
"""
List azure firewalls in resource group or subscription.
Args:
resource (str): The resource which contains the firewalls to list.
next_link (str): URL to retrieve the next set of results.
Returns:
dict: API response from Azure.
"""
if next_link:
full_url = next_link
response = self.ms_client.http_request('GET', full_url=full_url, resp_type="json", timeout=100)
return response
if resource == "resource_group":
full_url = f'https://management.azure.com/subscriptions/{self.subscription_id}' \
f'/resourceGroups/{self.resource_group}/providers/Microsoft.Network/azureFirewalls'
else:
full_url = f'https://management.azure.com/subscriptions/{self.subscription_id}' \
f'/providers/Microsoft.Network/azureFirewalls'
response = self.ms_client.http_request('GET', full_url=full_url, params=self.default_params, resp_type="json",
timeout=100)
return response
def azure_firewall_get_request(self, firewall_name: str) -> dict:
"""
Retrieve azure firewall information.
Args:
firewall_name (str): The name of the azure firewall to retrieve.
Returns:
dict: API response from Azure.
"""
url_suffix = f'azureFirewalls/{firewall_name}'
response = self.ms_client.http_request('GET', url_suffix=url_suffix, params=self.default_params,
resp_type="json", timeout=100)
return response
def azure_firewall_update_request(self, firewall_name: str, firewall_data: dict) -> dict:
"""
Update firewall resource.
Args:
firewall_name (str): The name of the firewall to update.
firewall_data (dict): Firewall resource JSON information.
Returns:
dict: API response from Azure.
"""
url_suffix = f'azureFirewalls/{firewall_name}'
response = self.ms_client.http_request('PUT', url_suffix=url_suffix, params=self.default_params,
json_data=firewall_data,
resp_type="json", timeout=100)
return response
def azure_firewall_policy_create_request(self, policy_name: str, threat_intelligence_mode: str, ip_address: list,
domain_address: list, location: str, tier: str, base_policy_id: str,
enable_proxy: bool, dns_servers: list) -> dict:
"""
Create firewall policy.
Args:
policy_name (str): The name of the azure policy to create.
threat_intelligence_mode (str): The operation mode for Threat Intelligence.
ip_address (list): IP addresses for the threat intelligence whitelist.
domain_address (list): Fully qualified domain name for the threat intelligence whitelist.
location (str): Policy resource region location.
tier (str): Tier of an Azure Policy.
base_policy_id (str): The ID of the parent firewall policy from which rules are inherited.
enable_proxy (bool): Enable DNS Proxy on Firewalls attached to the Firewall Policy.
dns_servers (list): Custom DNS Servers.
Returns:
dict: API response from Azure.
"""
data = remove_empty_elements({
"location": location,
"properties": {
"threatIntelMode": threat_intelligence_mode,
"threatIntelWhitelist": {
"ipAddresses": ip_address,
"fqdns": domain_address
},
"snat": {
"privateRanges": None
},
"dnsSettings": {
"servers": dns_servers,
"enableProxy": enable_proxy
},
"basePolicy": {"id": base_policy_id},
"sku": {
"tier": tier
}
}
})
url_suffix = f'firewallPolicies/{policy_name}'
response = self.ms_client.http_request('PUT', url_suffix=url_suffix, params=self.default_params, json_data=data,
resp_type="json", timeout=100)
return response
def azure_firewall_policy_update_request(self, policy_name: str, policy_data: dict) -> dict:
"""
Update policy resource.
Args:
policy_name (str): The name of the policy resource to update.
policy_data (dict): Policy resource JSON information.
Returns:
dict: API response from Azure.
"""
url_suffix = f'firewallPolicies/{policy_name}'
response = self.ms_client.http_request('PUT', url_suffix=url_suffix, params=self.default_params,
json_data=policy_data,
resp_type="json", timeout=100)
return response
def azure_firewall_policy_get_request(self, policy_name: str) -> dict:
"""
Retrieve policy information.
Args:
policy_name (str): The name of the policy to retrieve.
Returns:
dict: API response from Azure.
"""
url_suffix = f'firewallPolicies/{policy_name}'
response = self.ms_client.http_request('GET', url_suffix=url_suffix, params=self.default_params,
resp_type="json", timeout=100)
return response
def azure_firewall_policy_delete_request(self, policy_name: str) -> Response:
"""
Delete policy resource.
Args:
policy_name (str): The name of the policy to delete.
Returns:
Response: API response from Azure.
"""
url_suffix = f'firewallPolicies/{policy_name}'
response = self.ms_client.http_request('DELETE', url_suffix=url_suffix, params=self.default_params,
resp_type="response", timeout=100)
return response
def azure_firewall_policy_list_request(self, resource: str, next_link: str = None) -> dict:
"""
List policies in resource group or subscription.
Args:
resource (str): The resource which contains the policy to list.
next_link (str): URL to retrieve the next set of results.
Returns:
dict: API response from Azure.
"""
if next_link:
full_url = next_link
response = self.ms_client.http_request('GET', full_url=full_url, resp_type="json", timeout=100)
return response
if resource == "resource_group":
full_url = f'https://management.azure.com/subscriptions/{self.subscription_id}' \
f'/resourceGroups/{self.resource_group}/providers/Microsoft.Network/firewallPolicies'
else:
full_url = f'https://management.azure.com/subscriptions/{self.subscription_id}' \
f'/providers/Microsoft.Network/firewallPolicies'
response = self.ms_client.http_request('GET', full_url=full_url, params=self.default_params, resp_type="json",
timeout=100)
return response
def azure_firewall_policy_rule_collection_create_or_update_request(self, policy_name: str, collection_name: str,
collection_data: dict) -> dict:
"""
Create or update policy rule collection.
Args:
policy_name (str): The name of the policy which contains the collection.
collection_name (str): The name of the rule collection to create or update.
collection_data (dict): Rule collection information.
Returns:
dict: API response from Azure.
"""
url_suffix = f'firewallPolicies/{policy_name}/ruleCollectionGroups/{collection_name}'
response = self.ms_client.http_request('PUT', url_suffix=url_suffix, params=self.default_params,
resp_type="json", json_data=collection_data, timeout=100)
return response
def azure_firewall_policy_rule_collection_list_request(self, policy_name: str, next_link: str = None) -> dict:
"""
List collection rules in policy.
Args:
policy_name (str): The resource which contains the policy to list.
next_link (str): URL to retrieve the next set of results.
Returns:
dict: API response from Azure.
"""
if next_link:
full_url = next_link
response = self.ms_client.http_request('GET', full_url=full_url, resp_type="json", timeout=100)
return response
url_suffix = f'firewallPolicies/{policy_name}/ruleCollectionGroups'
response = self.ms_client.http_request('GET', url_suffix=url_suffix, params=self.default_params,
resp_type="json", timeout=100)
return response
def azure_firewall_policy_rule_collection_get_request(self, policy_name: str,
collection_name: str) -> dict:
"""
Retrieve policy collection group information.
Args:
policy_name (str): The name of the policy which contains the collection.
collection_name (str): he name of the policy rule collection to retrieve.
Returns:
dict: API response from Azure.
"""
url_suffix = f'firewallPolicies/{policy_name}/ruleCollectionGroups/{collection_name}'
response = self.ms_client.http_request('GET', url_suffix=url_suffix, params=self.default_params,
resp_type="json", timeout=100)
return response
def azure_firewall_policy_rule_collection_delete_request(self, policy_name: str, collection_name: str) -> Response:
"""
Delete policy collection group information.
Args:
policy_name (str): The name of the policy which contains the collection.
collection_name (str): The name of the policy rule collection to delete.
Returns:
Response: API response from Azure.
"""
url_suffix = f'firewallPolicies/{policy_name}/ruleCollectionGroups/{collection_name}'
response = self.ms_client.http_request('DELETE', url_suffix=url_suffix, params=self.default_params,
resp_type="response", timeout=100)
return response
def azure_firewall_policy_network_rule_collection_create_request(self, policy_name: str, collection_priority: int,
collection_name: str, action: str,
rule_information: dict) -> dict:
"""
Create network rule collection in firewall or policy.
Args:
policy_name (str): The name of the policy which contains the collection.
collection_priority (int): The priority of the nat rule collection resource.
collection_name (str): The name of the nat rule collection which contains the rule.
action (str): The action type of a rule collection.
rule_information (dict): Rule information.
Returns:
dict: API response from Azure.
"""
payload = remove_empty_elements({
"properties": {
"priority": collection_priority,
"ruleCollections": [
{
"ruleCollectionType": "FirewallPolicyFilterRuleCollection",
"name": collection_name,
"priority": collection_priority,
"action": {
"type": action
},
"rules": [
rule_information
]
}
]
}
})
url_suffix = f'firewallPolicies/{policy_name}/ruleCollectionGroups/{collection_name}'
response = self.ms_client.http_request('PUT', url_suffix=url_suffix, params=self.default_params,
json_data=payload, resp_type="json", timeout=100)
return response
def azure_firewall_service_tag_list_request(self, location: str, next_link: str = None) -> dict:
"""
Retrieve service tag information resources.
Args:
location (str): The location that will be used as a reference for version
next_link (str): URL to retrieve the next set of results.
Returns:
dict: API response from Azure.
"""
if next_link:
full_url = next_link
response = self.ms_client.http_request('GET', full_url=full_url, resp_type="json", timeout=100)
return response
full_url = f'https://management.azure.com/subscriptions/{self.subscription_id}' \
f'/providers/Microsoft.Network/locations/{location}/serviceTagDetails'
response = self.ms_client.http_request('GET', full_url=full_url, resp_type="json", params=self.default_params)
return response
def azure_firewall_ip_group_create_request(self, ip_group_name: str, location: str,
ip_address: list = None) -> dict:
"""
Create IP group resource.
Args:
ip_group_name (str): The name of the IP group resource to create.
location (str): The location of the IP group resource.
ip_address (list): IP addresses or IP address prefixes in the IP group resource.
Returns:
dict: API response from Azure.
"""
payload = remove_empty_elements({
"location": location,
"properties": {
"ipAddresses": ip_address
}
})
url_suffix = f'ipGroups/{ip_group_name}'
response = self.ms_client.http_request('PUT', url_suffix=url_suffix, params=self.default_params,
json_data=payload, resp_type="json", timeout=100)
return response
def azure_firewall_ip_group_list_request(self, resource: str, next_link: str = None) -> dict:
"""
List IP Groups in resource group or subscription.
Args:
resource (str): The resource which contains the IP Groups to list.
next_link (str): URL to retrieve the next set of results.
Returns:
dict: API response from Azure.
"""
if next_link:
full_url = next_link
response = self.ms_client.http_request('GET', full_url=full_url, resp_type="json", timeout=100)
return response
if resource == "resource_group":
full_url = f'https://management.azure.com/subscriptions/{self.subscription_id}' \
f'/resourceGroups/{self.resource_group}/providers/Microsoft.Network/ipGroups'
else:
full_url = f'https://management.azure.com/subscriptions/{self.subscription_id}/providers/Microsoft.Network/ipGroups'
response = self.ms_client.http_request('GET', full_url=full_url, params=self.default_params, | |
<filename>tests/test_distribution.py
"""Test cases for the distribution module."""
from decimal import Decimal
from pytest_mock import MockFixture
from rdflib import Graph
from rdflib.compare import graph_diff, isomorphic
from skolemizer.testutils import skolemization
from datacatalogtordf import DataService
from datacatalogtordf import Distribution
def test_to_graph_should_return_identifier_set_at_constructor() -> None:
"""It returns an identifier graph isomorphic to spec."""
distribution = Distribution("http://example.com/distributions/1")
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
<http://example.com/distributions/1> a dcat:Distribution ;
.
"""
g1 = Graph().parse(data=distribution.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic
def test_to_graph_should_return_skolemization(mocker: MockFixture) -> None:
"""It returns a endpointURL graph isomorphic to spec."""
distribution = Distribution()
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
<http://wwww.digdir.no/.well-known/skolem/284db4d2-80c2-11eb-82c3-83e80baa2f94>
a dcat:Distribution ;
.
"""
mocker.patch(
"skolemizer.Skolemizer.add_skolemization",
return_value=skolemization,
)
g1 = Graph().parse(data=distribution.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic
def test_to_graph_should_return_title_as_graph() -> None:
"""It returns a title graph isomorphic to spec."""
distribution = Distribution()
distribution.identifier = "http://example.com/distributions/1"
distribution.title = {"nb": "API-distribusjon", "en": "API-distribution"}
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
<http://example.com/distributions/1> a dcat:Distribution ;
dct:title "API-distribution"@en, "API-distribusjon"@nb
.
"""
g1 = Graph().parse(data=distribution.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic
def test_to_graph_should_return_description() -> None:
"""It returns a description graph isomorphic to spec."""
distribution = Distribution()
distribution.identifier = "http://example.com/distributions/1"
distribution.description = {"nb": "Beskrivelse", "en": "Description"}
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
<http://example.com/distributions/1> a dcat:Distribution ;
dct:description "Description"@en, "Beskrivelse"@nb ;
.
"""
g1 = Graph().parse(data=distribution.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic
def test_to_graph_should_return_release_date() -> None:
"""It returns a release date graph isomorphic to spec."""
distribution = Distribution()
distribution.identifier = "http://example.com/distributions/1"
distribution.release_date = "2019-12-31"
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
<http://example.com/distributions/1> a dcat:Distribution ;
dct:issued "2019-12-31"^^xsd:date ;
.
"""
g1 = Graph().parse(data=distribution.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic
def test_to_graph_should_return_modification_date() -> None:
"""It returns a modification date graph isomorphic to spec."""
distribution = Distribution()
distribution.identifier = "http://example.com/distributions/1"
distribution.modification_date = "2019-12-31"
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
<http://example.com/distributions/1> a dcat:Distribution ;
dct:modified "2019-12-31"^^xsd:date ;
.
"""
g1 = Graph().parse(data=distribution.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic
def test_to_graph_should_return_license() -> None:
"""It returns a license graph isomorphic to spec."""
distribution = Distribution()
distribution.identifier = "http://example.com/distributions/1"
distribution.license = "http://example.com/licenses/1"
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
<http://example.com/distributions/1> a dcat:Distribution ;
dct:license <http://example.com/licenses/1>
.
"""
g1 = Graph().parse(data=distribution.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic
def test_to_graph_should_return_access_rights() -> None:
"""It returns a access rights graph isomorphic to spec."""
access_rights = ["PUBLIC", "RESTRICTED", "NON-PUBLIC"]
for _r in access_rights:
distribution = Distribution()
distribution.identifier = "http://example.com/distributions/1"
distribution.access_rights = (
f"http://publications.europa.eu/distribution/authority/access-right/{_r}"
)
src = (
"@prefix dct: <http://purl.org/dc/terms/> ."
"@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> ."
"@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> ."
"@prefix dcat: <http://www.w3.org/ns/dcat#> .\n"
"<http://example.com/distributions/1> a dcat:Distribution ;"
"\tdct:accessRights\t"
"<http://publications.europa.eu/distribution/authority/access-right/"
f"{_r}> ."
)
g1 = Graph().parse(data=distribution.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic
def test_to_graph_should_return_rights() -> None:
"""It returns a rights graph isomorphic to spec."""
distribution = Distribution()
distribution.identifier = "http://example.com/distributions/1"
distribution.rights = "http://example.com/rights/1"
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
<http://example.com/distributions/1> a dcat:Distribution ;
dct:rights <http://example.com/rights/1> ;
.
"""
g1 = Graph().parse(data=distribution.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic
def test_to_graph_should_return_has_policy() -> None:
"""It returns a has_policy graph isomorphic to spec."""
distribution = Distribution()
distribution.identifier = "http://example.com/distributions/1"
distribution.has_policy = "http://example.com/policies/1"
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
@prefix odrl: <http://www.w3.org/ns/odrl/2/> .
<http://example.com/distributions/1> a dcat:Distribution ;
odrl:hasPolicy <http://example.com/policies/1> ;
.
"""
g1 = Graph().parse(data=distribution.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic
def test_to_graph_should_return_access_URL() -> None:
"""It returns a access URL graph isomorphic to spec."""
distribution = Distribution()
distribution.identifier = "http://example.com/distributions/1"
distribution.access_URL = "http://example.com/someendpoint"
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
<http://example.com/distributions/1> a dcat:Distribution ;
dcat:accessURL <http://example.com/someendpoint> ;
.
"""
g1 = Graph().parse(data=distribution.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic
def test_to_graph_should_return_access_service() -> None:
"""It returns a access service graph isomorphic to spec."""
distribution = Distribution()
distribution.identifier = "http://example.com/distributions/1"
service = DataService()
service.identifier = "http://example.com/dataservices/1"
distribution.access_service = service
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
<http://example.com/distributions/1> a dcat:Distribution ;
dcat:accessService <http://example.com/dataservices/1> ;
.
"""
g1 = Graph().parse(data=distribution.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic
def test_to_graph_should_return_access_service_skolemized(mocker: MockFixture) -> None:
"""It returns a access service graph isomorphic to spec."""
distribution = Distribution()
distribution.identifier = "http://example.com/distributions/1"
service = DataService()
distribution.access_service = service
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
<http://example.com/distributions/1> a dcat:Distribution ;
dcat:accessService
<http://wwww.digdir.no/.well-known/skolem/284db4d2-80c2-11eb-82c3-83e80baa2f94>
.
"""
mocker.patch(
"skolemizer.Skolemizer.add_skolemization",
return_value=skolemization,
)
g1 = Graph().parse(data=distribution.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic
def test_to_graph_should_return_download_URL() -> None:
"""It returns a download URL graph isomorphic to spec."""
distribution = Distribution()
distribution.identifier = "http://example.com/distributions/1"
distribution.download_URL = "http://example.com/download"
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
<http://example.com/distributions/1> a dcat:Distribution ;
dcat:downloadURL <http://example.com/download> ;
.
"""
g1 = Graph().parse(data=distribution.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic
def test_to_graph_should_return_byte_size() -> None:
"""It returns a byte size graph isomorphic to spec."""
distribution = Distribution()
distribution.identifier = "http://example.com/distributions/1"
# byte_size is an xsd:decimal:
distribution.byte_size = Decimal(5120.0)
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
<http://example.com/distributions/1> a dcat:Distribution ;
dcat:byteSize "5120.0"^^xsd:decimal ;
.
"""
g1 = Graph().parse(data=distribution.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic
def test_to_graph_should_return_spatial_resolution() -> None:
"""It returns a spatial resolution graph isomorphic to spec."""
distribution = Distribution()
distribution.identifier = "http://example.com/distributions/1"
# spatial resolution is an xsd:decimal:
distribution.spatial_resolution = Decimal(30.0)
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
<http://example.com/distributions/1> a dcat:Distribution ;
dcat:spatialResolutionInMeters "30.0"^^xsd:decimal
.
"""
g1 = Graph().parse(data=distribution.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic
def test_to_graph_should_return_temporal_resolution() -> None:
"""It returns a temporal resolution graph isomorphic to spec."""
distribution = Distribution()
distribution.identifier = "http://example.com/distributions/1"
distribution.temporal_resolution = "PT15M"
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
<http://example.com/distributions/1> a dcat:Distribution ;
dcat:temporalResolution "PT15M"^^xsd:duration ;
.
"""
g1 = Graph().parse(data=distribution.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic
def test_to_graph_should_return_conforms_to() -> None:
"""It returns a conforms to graph isomorphic to spec."""
distribution = Distribution()
distribution.identifier = "http://example.com/distributions/1"
distribution.conforms_to.append("http://example.com/standards/1")
distribution.conforms_to.append("http://example.com/standards/2")
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
| |
neg = pos = 0
event = self.event
for e in events:
event.update(e)
voltage = self.get_voltage(event)
if voltage is not None:
neg += voltage["neg"]
pos += voltage["pos"]
self._state = int(neg / len(events) + pos / len(events))
self.attrs = dict(neg=round(float(neg / len(events)), 2), pos=round(float(pos / len(events)), 2))
class BatteryFerroampSensor(FloatValFerroampSensor):
def __init__(self, name, key, device_id, device_name, interval, precision, config_id):
super().__init__(
name, key, PERCENTAGE, "mdi:battery-low", device_id, device_name, interval, precision, config_id
)
@property
def icon(self):
if self.state is None:
return self._icon
pct = int(int(self.state) / 10) * 10
if pct <= 90:
self._icon = f"mdi:battery-{pct}"
else:
self._icon = "mdi:battery"
return self._icon
def handle_options_update(self, options):
super().handle_options_update(options)
self._precision = options.get(CONF_PRECISION_BATTERY)
class TemperatureFerroampSensor(FloatValFerroampSensor):
def __init__(self, name, key, device_id, device_name, interval, precision, config_id):
super().__init__(
name, key, TEMP_CELSIUS, "mdi:thermometer", device_id, device_name, interval, precision, config_id
)
def handle_options_update(self, options):
super().handle_options_update(options)
self._precision = options.get(CONF_PRECISION_TEMPERATURE)
class CurrentFerroampSensor(FloatValFerroampSensor):
def __init__(self, name, key, icon, device_id, device_name, interval, precision, config_id):
super().__init__(
name,
key,
ELECTRICAL_CURRENT_AMPERE,
icon,
device_id,
device_name,
interval,
precision,
config_id
)
def handle_options_update(self, options):
super().handle_options_update(options)
self._precision = options.get(CONF_PRECISION_CURRENT)
class VoltageFerroampSensor(FloatValFerroampSensor):
def __init__(self, name, key, icon, device_id, device_name, interval, precision, config_id):
super().__init__(
name, key, VOLT, icon, device_id, device_name, interval, precision, config_id
)
def handle_options_update(self, options):
super().handle_options_update(options)
self._precision = options.get(CONF_PRECISION_VOLTAGE)
class EnergyFerroampSensor(FloatValFerroampSensor):
"""Representation of a Ferroamp energy in kWh value Sensor."""
def __init__(self, name, key, icon, device_id, device_name, interval, precision, config_id):
"""Initialize the sensor"""
super().__init__(name, key, ENERGY_KILO_WATT_HOUR, icon, device_id, device_name, interval, precision, config_id)
def update_state_from_events(self, events):
temp = 0
event = self.event
for e in events:
event.update(e)
v = event.get(self._state_key, None)
if v is not None:
temp += float(v["val"])
self._state = round(temp / len(events) / 3600000000, self._precision)
if self._precision == 0:
self._state = int(self._state)
def handle_options_update(self, options):
super().handle_options_update(options)
self._precision = options.get(CONF_PRECISION_ENERGY)
class RelayStatusFerroampSensor(FerroampSensor):
def __init__(self, name, key, device_id, device_name, interval, config_id):
"""Initialize the sensor"""
super().__init__(name, key, "", "", device_id, device_name, interval, config_id)
def update_state_from_events(self, events):
temp = None
event = self.event
for e in events:
event.update(e)
v = event.get(self._state_key, None)
if v is not None:
val = int(v["val"])
if val == 0:
temp = "closed"
elif val == 1:
temp = "open/disconnected"
elif val == 2:
temp = "precharge"
if temp is not None:
self._state = temp
class PowerFerroampSensor(FloatValFerroampSensor):
"""Representation of a Ferroamp Power Sensor."""
def __init__(self, name, key, icon, device_id, device_name, interval, config_id):
super().__init__(name, key, POWER_WATT, icon, device_id, device_name, interval, 0, config_id)
class CalculatedPowerFerroampSensor(FerroampSensor):
"""Representation of a Ferroamp Power Sensor based on V and A."""
def __init__(self, name, voltage_key, current_key, icon, device_id, device_name, interval, config_id):
"""Initialize the sensor."""
super().__init__(
name,
voltage_key,
POWER_WATT,
icon,
device_id,
device_name,
interval,
config_id
)
self._voltage_key = voltage_key
self._current_key = current_key
@property
def unique_id(self):
"""Return unique ID of entity."""
return f"{self.device_id}-{self._voltage_key}-{self._current_key}"
def update_state_from_events(self, events):
temp_voltage = temp_current = 0
event = self.event
for e in events:
event.update(e)
voltage = event.get(self._voltage_key, None)
current = event.get(self._current_key, None)
if current is not None and voltage is not None:
temp_voltage += float(voltage["val"])
temp_current += float(current["val"])
self._state = int(round(temp_voltage / len(events) * temp_current / len(events), 0))
class ThreePhaseFerroampSensor(FerroampSensor):
"""Representation of a Ferroamp ThreePhase Sensor."""
def __init__(self, name, key, unit, icon, device_id, device_name, interval, precision, config_id):
"""Initialize the sensor."""
super().__init__(name, key, unit, icon, device_id, device_name, interval, config_id)
self._precision = precision
def get_phases(self, event):
phases = event.get(self._state_key, None)
_LOGGER.debug(phases)
if phases is not None:
phases = dict(
L1=float(phases["L1"]), L2=float(phases["L2"]), L3=float(phases["L3"])
)
return phases
def update_state_from_events(self, events):
l1 = l2 = l3 = 0
event = self.event
for e in events:
event.update(e)
phases = self.get_phases(event)
if phases is not None:
l1 += phases["L1"]
l2 += phases["L2"]
l3 += phases["L3"]
self._state = round(l1 / len(events) + l2 / len(events) + l3 / len(events), self._precision)
if self._precision == 0:
self._state = int(self._state)
self.attrs = dict(
L1=round(float(l1 / len(events)), 2),
L2=round(float(l2 / len(events)), 2),
L3=round(float(l3 / len(events)), 2),
)
class ThreePhaseEnergyFerroampSensor(ThreePhaseFerroampSensor):
def __init__(self, name, key, icon, device_id, device_name, interval, precision, config_id):
"""Initialize the sensor."""
super().__init__(name, key, ENERGY_KILO_WATT_HOUR, icon, device_id, device_name, interval, precision, config_id)
def get_phases(self, event):
phases = super().get_phases(event)
if phases is not None:
phases = dict(
L1=round(phases["L1"] / 3600000000, 2),
L2=round(phases["L2"] / 3600000000, 2),
L3=round(phases["L3"] / 3600000000, 2),
)
return phases
def handle_options_update(self, options):
super().handle_options_update(options)
self._precision = options.get(CONF_PRECISION_ENERGY)
class ThreePhasePowerFerroampSensor(ThreePhaseFerroampSensor):
def __init__(self, name, key, icon, device_id, device_name, interval, config_id):
"""Initialize the sensor."""
super().__init__(name, key, POWER_WATT, icon, device_id, device_name, interval, 0, config_id)
class CommandFerroampSensor(RestoreEntity):
def __init__(self, name, device_id, device_name, config_id):
self._state = None
self._name = name
self._icon = "mdi:cog-transfer-outline"
self._device_id = device_id
self._device_name = device_name
self.config_id = config_id
self.updated = datetime.min
self.attrs = {}
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unique_id(self):
"""Return unique ID of entity."""
return f"{self.device_id}_last_cmd"
@property
def icon(self):
return self._icon
@property
def device_id(self):
return self._device_id
@property
def device_info(self):
"""Return the device_info of the device."""
device_info = {
"identifiers": {(DOMAIN, self._device_id)},
"name": self._device_name,
"manufacturer": MANUFACTURER,
}
return device_info
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return None
@property
def should_poll(self) -> bool:
return False
@property
def state_attributes(self):
return self.attrs
async def async_added_to_hass(self) -> None:
"""Handle entity which will be added."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if not state:
return
self._state = state.state
self.hass.data[DOMAIN][DATA_DEVICES][self.config_id][self.device_id][self.unique_id] = self
def add_request(self, trans_id, cmd, arg):
if arg is not None:
self._state = f"{cmd} ({arg})"
else:
self._state = cmd
self.attrs["transId"] = trans_id
self.attrs["status"] = None
self.attrs["message"] = None
self.updated = datetime.now()
if self.entity_id is not None:
self.async_write_ha_state()
def add_response(self, trans_id, status, message):
if self.attrs["transId"] == trans_id:
self.attrs["status"] = status
self.attrs["message"] = message
self.updated = datetime.now()
if self.entity_id is not None:
self.async_write_ha_state()
def ehub_sensors(slug, name, interval, precision_battery, precision_energy, precision_frequency, config_id):
return [
FloatValFerroampSensor(
f"{name} Estimated Grid Frequency",
"gridfreq",
FREQUENCY_HERTZ,
"mdi:sine-wave",
f"{slug}_{EHUB}",
f"{name} {EHUB_NAME}",
interval,
precision_frequency,
config_id,
),
ThreePhaseFerroampSensor(
f"{name} External Voltage",
"ul",
VOLT,
"mdi:current-ac",
f"{slug}_{EHUB}",
f"{name} {EHUB_NAME}",
interval,
0,
config_id,
),
ThreePhaseFerroampSensor(
f"{name} Inverter RMS current",
"il",
ELECTRICAL_CURRENT_AMPERE,
"mdi:current-dc",
f"{slug}_{EHUB}",
f"{name} {EHUB_NAME}",
interval,
0,
config_id,
),
ThreePhaseFerroampSensor(
f"{name} Inverter reactive current",
"ild",
ELECTRICAL_CURRENT_AMPERE,
"mdi:current-dc",
f"{slug}_{EHUB}",
f"{name} {EHUB_NAME}",
interval,
0,
config_id,
),
ThreePhaseFerroampSensor(
f"{name} Grid Current",
"iext",
ELECTRICAL_CURRENT_AMPERE,
"mdi:current-ac",
f"{slug}_{EHUB}",
f"{name} {EHUB_NAME}",
interval,
0,
config_id,
),
ThreePhaseFerroampSensor(
f"{name} Grid Reactive Current",
"iextd",
ELECTRICAL_CURRENT_AMPERE,
"mdi:current-ac",
f"{slug}_{EHUB}",
f"{name} {EHUB_NAME}",
interval,
0,
config_id,
),
ThreePhaseFerroampSensor(
f"{name} External Active Current",
"iextq",
ELECTRICAL_CURRENT_AMPERE,
"mdi:current-ac",
f"{slug}_{EHUB}",
f"{name} {EHUB_NAME}",
interval,
0,
config_id,
),
ThreePhaseFerroampSensor(
f"{name} Adaptive Current Equalization",
"iace",
ELECTRICAL_CURRENT_AMPERE,
"mdi:current-ac",
f"{slug}_{EHUB}",
f"{name} {EHUB_NAME}",
interval,
0,
config_id,
),
ThreePhasePowerFerroampSensor(
f"{name} Grid Power",
"pext",
"mdi:transmission-tower",
f"{slug}_{EHUB}",
f"{name} {EHUB_NAME}",
interval,
config_id,
),
ThreePhasePowerFerroampSensor(
f"{name} Grid Power Reactive",
"pextreactive",
"mdi:transmission-tower",
f"{slug}_{EHUB}",
f"{name} {EHUB_NAME}",
interval,
config_id,
),
ThreePhasePowerFerroampSensor(
f"{name} Inverter Power, active",
"pinv",
"mdi:solar-power",
f"{slug}_{EHUB}",
f"{name} {EHUB_NAME}",
interval,
config_id,
),
ThreePhasePowerFerroampSensor(
f"{name} Inverter Power, reactive",
"pinvreactive",
"mdi:solar-power",
f"{slug}_{EHUB}",
f"{name} {EHUB_NAME}",
interval,
config_id,
),
ThreePhasePowerFerroampSensor(
f"{name} Consumption Power",
"pload",
"mdi:power-plug",
f"{slug}_{EHUB}",
f"{name} {EHUB_NAME}",
interval,
config_id,
),
ThreePhasePowerFerroampSensor(
f"{name} Consumption Power Reactive",
"ploadreactive",
"mdi:power-plug",
f"{slug}_{EHUB}",
f"{name} {EHUB_NAME}",
interval,
config_id,
),
ThreePhaseEnergyFerroampSensor(
f"{name} External Energy Produced",
"wextprodq",
"mdi:power-plug",
f"{slug}_{EHUB}",
f"{name} {EHUB_NAME}",
interval,
precision_energy,
config_id,
),
ThreePhaseEnergyFerroampSensor(
f"{name} External Energy Consumed",
"wextconsq",
"mdi:power-plug",
f"{slug}_{EHUB}",
f"{name} {EHUB_NAME}",
interval,
precision_energy,
config_id,
),
ThreePhaseEnergyFerroampSensor(
f"{name} Inverter Energy Produced",
"winvprodq",
"mdi:power-plug",
f"{slug}_{EHUB}",
f"{name} {EHUB_NAME}",
interval,
precision_energy,
config_id,
),
ThreePhaseEnergyFerroampSensor(
f"{name} Inverter Energy Consumed",
"winvconsq",
"mdi:power-plug",
f"{slug}_{EHUB}",
f"{name} {EHUB_NAME}",
interval,
precision_energy,
config_id,
),
ThreePhaseEnergyFerroampSensor(
f"{name} Load Energy Produced",
"wloadprodq",
"mdi:power-plug",
f"{slug}_{EHUB}",
f"{name} {EHUB_NAME}",
interval,
precision_energy,
config_id,
),
ThreePhaseEnergyFerroampSensor(
f"{name} Load Energy Consumed",
"wloadconsq",
"mdi:power-plug",
f"{slug}_{EHUB}",
f"{name} {EHUB_NAME}",
interval,
precision_energy,
config_id,
),
EnergyFerroampSensor(
f"{name} Total Solar Energy",
"wpv",
"mdi:solar-power",
f"{slug}_{EHUB}",
f"{name} {EHUB_NAME}",
interval,
precision_energy,
config_id,
),
EnergyFerroampSensor(
f"{name} Battery Energy Produced",
"wbatprod",
"mdi:solar-power",
f"{slug}_{EHUB}",
f"{name} {EHUB_NAME}",
interval,
precision_energy,
config_id,
),
EnergyFerroampSensor(
f"{name} Battery Energy Consumed",
"wbatcons",
"mdi:solar-power",
f"{slug}_{EHUB}",
f"{name} {EHUB_NAME}",
interval,
precision_energy,
config_id,
),
IntValFerroampSensor(
f"{name} System State",
"state",
"",
"mdi:traffic-light",
f"{slug}_{EHUB}",
f"{name} {EHUB_NAME}",
interval,
config_id,
),
DcLinkFerroampSensor(
f"{name} DC Link Voltage",
"udc",
"mdi:current-ac",
f"{slug}_{EHUB}",
f"{name} {EHUB_NAME}",
interval,
config_id,
),
BatteryFerroampSensor(
f"{name} System State of Charge",
"soc",
f"{slug}_{EHUB}",
f"{name} {EHUB_NAME}",
interval,
precision_battery,
config_id,
),
BatteryFerroampSensor(
f"{name} System State of Health",
"soh",
f"{slug}_{EHUB}",
f"{name} {EHUB_NAME}",
interval,
precision_battery,
config_id,
),
IntValFerroampSensor(
f"{name} Apparent power",
"sext",
"VA",
"mdi:transmission-tower",
f"{slug}_{EHUB}",
f"{name} {EHUB_NAME}",
interval,
config_id,
),
PowerFerroampSensor(
f"{name} Solar | |
'APIGetVmMonitorNumberMsg',
'APIGetVmMonitorNumberReply',
'APIGetVmQgaMsg',
'APIGetVmQgaReply',
'APIGetVmRDPMsg',
'APIGetVmRDPReply',
'APIGetVmSshKeyMsg',
'APIGetVmSshKeyReply',
'APIGetVmStartingCandidateClustersHostsMsg',
'APIGetVmStartingCandidateClustersHostsReply',
'APIGetVmUsbRedirectMsg',
'APIGetVmUsbRedirectReply',
'APIGetVolumeCapabilitiesMsg',
'APIGetVolumeCapabilitiesReply',
'APIGetVolumeFormatMsg',
'APIGetVolumeFormatReply',
'APIGetVolumeQosMsg',
'APIGetVolumeQosReply',
'APIGetVolumeReply',
'APIGetVolumeSnapshotTreeMsg',
'APIGetVolumeSnapshotTreeReply',
'APIGetVpcVRouterDistributedRoutingConnectionsMsg',
'APIGetVpcVRouterDistributedRoutingConnectionsReply',
'APIGetVpcVRouterDistributedRoutingEnabledMsg',
'APIGetVpcVRouterDistributedRoutingEnabledReply',
'APIGetVpcVpnConfigurationFromRemoteMsg',
'APIGetVpcVpnConfigurationFromRemoteReply',
'APIGetZoneMsg',
'APIGetZoneReply',
'APIIsOpensourceVersionMsg',
'APIIsOpensourceVersionReply',
'APIIsReadyToGoMsg',
'APIIsReadyToGoReply',
'APIKvmRunShellMsg',
'APIListAccountReply',
'APIListApplianceVmReply',
'APIListBackupStorageReply',
'APIListClusterReply',
'APIListDiskOfferingReply',
'APIListGlobalConfigReply',
'APIListHostReply',
'APIListImageReply',
'APIListInstanceOfferingReply',
'APIListIpRangeReply',
'APIListL2NetworkReply',
'APIListL2VlanNetworkReply',
'APIListL3NetworkReply',
'APIListManagementNodeReply',
'APIListNetworkServiceProviderReply',
'APIListPolicyReply',
'APIListPortForwardingRuleReply',
'APIListPrimaryStorageReply',
'APIListSecurityGroupReply',
'APIListUserReply',
'APIListVmInstanceReply',
'APIListVmNicInSecurityGroupReply',
'APIListVmNicReply',
'APIListVolumeReply',
'APIListZonesReply',
'APILocalStorageGetVolumeMigratableHostsMsg',
'APILocalStorageGetVolumeMigratableReply',
'APILocalStorageMigrateVolumeMsg',
'APILogInByAccountMsg',
'APILogInByLdapMsg',
'APILogInByLdapReply',
'APILogInByUserMsg',
'APILogInReply',
'APILogOutMsg',
'APILogOutReply',
'APIMigrateVmMsg',
'APIPauseVmInstanceMsg',
'APIPowerOffBaremetalHostMsg',
'APIPowerOnBaremetalHostMsg',
'APIPowerResetBaremetalHostMsg',
'APIPowerStatusBaremetalHostMsg',
'APIPrimaryStorageMigrateVolumeMsg',
'APIPrometheusQueryLabelValuesMsg',
'APIPrometheusQueryLabelValuesReply',
'APIPrometheusQueryMetadataMsg',
'APIPrometheusQueryMetadataReply',
'APIPrometheusQueryPassThroughMsg',
'APIPrometheusQueryVmMonitoringDataMsg',
'APIPrometheusQueryVmMonitoringDataReply',
'APIProvisionBaremetalHostMsg',
'APIPutMetricDataMsg',
'APIQueryAccountMsg',
'APIQueryAccountReply',
'APIQueryAccountResourceRefMsg',
'APIQueryAccountResourceRefReply',
'APIQueryAffinityGroupMsg',
'APIQueryAffinityGroupReply',
'APIQueryAlarmMsg',
'APIQueryAlarmReply',
'APIQueryAlertMsg',
'APIQueryAlertReply',
'APIQueryAliyunDiskFromLocalMsg',
'APIQueryAliyunDiskFromLocalReply',
'APIQueryAliyunKeySecretMsg',
'APIQueryAliyunKeySecretReply',
'APIQueryAliyunRouteEntryFromLocalMsg',
'APIQueryAliyunRouteEntryFromLocalReply',
'APIQueryAliyunSnapshotFromLocalMsg',
'APIQueryAliyunSnapshotFromLocalReply',
'APIQueryAliyunVirtualRouterFromLocalMsg',
'APIQueryAliyunVirtualRouterFromLocalReply',
'APIQueryApplianceVmMsg',
'APIQueryApplianceVmReply',
'APIQueryBackupStorageMsg',
'APIQueryBackupStorageReply',
'APIQueryBaremetalChassisMsg',
'APIQueryBaremetalChassisReply',
'APIQueryBaremetalHardwareInfoMsg',
'APIQueryBaremetalHardwareInfoReply',
'APIQueryBaremetalHostCfgMsg',
'APIQueryBaremetalHostCfgReply',
'APIQueryBaremetalPxeServerMsg',
'APIQueryBaremetalPxeServerReply',
'APIQueryCephBackupStorageMsg',
'APIQueryCephPrimaryStorageMsg',
'APIQueryCephPrimaryStoragePoolMsg',
'APIQueryCephPrimaryStoragePoolReply',
'APIQueryClusterMsg',
'APIQueryClusterReply',
'APIQueryConnectionAccessPointFromLocalMsg',
'APIQueryConnectionAccessPointFromLocalReply',
'APIQueryConnectionBetweenL3NetworkAndAliyunVSwitchMsg',
'APIQueryConnectionBetweenL3NetworkAndAliyunVSwitchReply',
'APIQueryConsoleProxyAgentMsg',
'APIQueryConsoleProxyAgentReply',
'APIQueryDataCenterFromLocalMsg',
'APIQueryDataCenterFromLocalReply',
'APIQueryDiskOfferingMsg',
'APIQueryDiskOfferingReply',
'APIQueryEcsImageFromLocalMsg',
'APIQueryEcsImageFromLocalReply',
'APIQueryEcsInstanceFromLocalMsg',
'APIQueryEcsInstanceFromLocalReply',
'APIQueryEcsSecurityGroupFromLocalMsg',
'APIQueryEcsSecurityGroupFromLocalReply',
'APIQueryEcsSecurityGroupRuleFromLocalMsg',
'APIQueryEcsSecurityGroupRuleFromLocalReply',
'APIQueryEcsVSwitchFromLocalMsg',
'APIQueryEcsVSwitchFromLocalReply',
'APIQueryEcsVpcFromLocalMsg',
'APIQueryEcsVpcFromLocalReply',
'APIQueryEipMsg',
'APIQueryEipReply',
'APIQueryEmailMediaMsg',
'APIQueryEmailTriggerActionMsg',
'APIQueryEventSubscriptionMsg',
'APIQueryEventSubscriptionReply',
'APIQueryFusionstorBackupStorageMsg',
'APIQueryFusionstorPrimaryStorageMsg',
'APIQueryGCJobMsg',
'APIQueryGCJobReply',
'APIQueryGlobalConfigMsg',
'APIQueryGlobalConfigReply',
'APIQueryHostMsg',
'APIQueryHostReply',
'APIQueryHybridEipFromLocalMsg',
'APIQueryHybridEipFromLocalReply',
'APIQueryIPSecConnectionMsg',
'APIQueryIPSecConnectionReply',
'APIQueryIdentityZoneFromLocalMsg',
'APIQueryIdentityZoneFromLocalReply',
'APIQueryImageMsg',
'APIQueryImageReply',
'APIQueryImageStoreBackupStorageMsg',
'APIQueryImageStoreBackupStorageReply',
'APIQueryInstanceOfferingMsg',
'APIQueryInstanceOfferingReply',
'APIQueryIpRangeMsg',
'APIQueryIpRangeReply',
'APIQueryL2NetworkMsg',
'APIQueryL2NetworkReply',
'APIQueryL2VlanNetworkMsg',
'APIQueryL2VlanNetworkReply',
'APIQueryL2VxlanNetworkMsg',
'APIQueryL2VxlanNetworkPoolMsg',
'APIQueryL2VxlanNetworkPoolReply',
'APIQueryL2VxlanNetworkReply',
'APIQueryL3NetworkMsg',
'APIQueryL3NetworkReply',
'APIQueryLdapBindingMsg',
'APIQueryLdapBindingReply',
'APIQueryLdapServerMsg',
'APIQueryLdapServerReply',
'APIQueryLoadBalancerListenerMsg',
'APIQueryLoadBalancerListenerReply',
'APIQueryLoadBalancerMsg',
'APIQueryLoadBalancerReply',
'APIQueryLocalStorageResourceRefMsg',
'APIQueryLocalStorageResourceRefReply',
'APIQueryLongJobMsg',
'APIQueryLongJobReply',
'APIQueryManagementNodeMsg',
'APIQueryManagementNodeReply',
'APIQueryMediaMsg',
'APIQueryMediaReply',
'APIQueryMonitorTriggerActionMsg',
'APIQueryMonitorTriggerActionReply',
'APIQueryMonitorTriggerMsg',
'APIQueryMonitorTriggerReply',
'APIQueryNetworkServiceL3NetworkRefMsg',
'APIQueryNetworkServiceL3NetworkRefReply',
'APIQueryNetworkServiceProviderMsg',
'APIQueryNetworkServiceProviderReply',
'APIQueryNotificationMsg',
'APIQueryNotificationReply',
'APIQueryNotificationSubscriptionMsg',
'APIQueryNotificationSubscriptionReply',
'APIQueryOssBucketFileNameMsg',
'APIQueryOssBucketFileNameReply',
'APIQueryPciDeviceMsg',
'APIQueryPciDeviceOfferingMsg',
'APIQueryPciDeviceOfferingReply',
'APIQueryPciDevicePciDeviceOfferingMsg',
'APIQueryPciDevicePciDeviceOfferingReply',
'APIQueryPciDeviceReply',
'APIQueryPolicyMsg',
'APIQueryPolicyReply',
'APIQueryPortForwardingRuleMsg',
'APIQueryPortForwardingRuleReply',
'APIQueryPrimaryStorageMsg',
'APIQueryPrimaryStorageReply',
'APIQueryQuotaMsg',
'APIQueryQuotaReply',
'APIQueryReply',
'APIQueryResourcePriceMsg',
'APIQueryResourcePriceReply',
'APIQueryRouterInterfaceFromLocalMsg',
'APIQueryRouterInterfaceFromLocalReply',
'APIQuerySNSApplicationEndpointMsg',
'APIQuerySNSApplicationEndpointReply',
'APIQuerySNSApplicationPlatformMsg',
'APIQuerySNSApplicationPlatformReply',
'APIQuerySNSDingTalkEndpointMsg',
'APIQuerySNSDingTalkEndpointReply',
'APIQuerySNSEmailEndpointMsg',
'APIQuerySNSEmailEndpointReply',
'APIQuerySNSEmailPlatformMsg',
'APIQuerySNSEmailPlatformReply',
'APIQuerySNSHttpEndpointMsg',
'APIQuerySNSHttpEndpointReply',
'APIQuerySNSTextTemplateMsg',
'APIQuerySNSTextTemplateReply',
'APIQuerySNSTopicMsg',
'APIQuerySNSTopicReply',
'APIQuerySNSTopicSubscriberMsg',
'APIQuerySNSTopicSubscriberReply',
'APIQuerySchedulerJobMsg',
'APIQuerySchedulerJobReply',
'APIQuerySchedulerTriggerMsg',
'APIQuerySchedulerTriggerReply',
'APIQuerySecurityGroupMsg',
'APIQuerySecurityGroupReply',
'APIQuerySecurityGroupRuleMsg',
'APIQuerySecurityGroupRuleReply',
'APIQuerySftpBackupStorageMsg',
'APIQuerySftpBackupStorageReply',
'APIQueryShareableVolumeVmInstanceRefMsg',
'APIQueryShareableVolumeVmInstanceRefReply',
'APIQuerySharedResourceMsg',
'APIQuerySharedResourceReply',
'APIQuerySystemTagMsg',
'APIQuerySystemTagReply',
'APIQueryTagMsg',
'APIQueryTagReply',
'APIQueryUsbDeviceMsg',
'APIQueryUsbDeviceReply',
'APIQueryUserGroupMsg',
'APIQueryUserGroupReply',
'APIQueryUserMsg',
'APIQueryUserReply',
'APIQueryUserTagMsg',
'APIQueryUserTagReply',
'APIQueryVCenterBackupStorageMsg',
'APIQueryVCenterBackupStorageReply',
'APIQueryVCenterClusterMsg',
'APIQueryVCenterClusterReply',
'APIQueryVCenterDatacenterMsg',
'APIQueryVCenterDatacenterReply',
'APIQueryVCenterMsg',
'APIQueryVCenterPrimaryStorageMsg',
'APIQueryVCenterPrimaryStorageReply',
'APIQueryVCenterReply',
'APIQueryVRouterRouteEntryMsg',
'APIQueryVRouterRouteEntryReply',
'APIQueryVRouterRouteTableMsg',
'APIQueryVRouterRouteTableReply',
'APIQueryVipMsg',
'APIQueryVipReply',
'APIQueryVirtualBorderRouterFromLocalMsg',
'APIQueryVirtualBorderRouterFromLocalReply',
'APIQueryVirtualRouterOfferingMsg',
'APIQueryVirtualRouterOfferingReply',
'APIQueryVirtualRouterVRouterRouteTableRefMsg',
'APIQueryVirtualRouterVRouterRouteTableRefReply',
'APIQueryVirtualRouterVmMsg',
'APIQueryVirtualRouterVmReply',
'APIQueryVmInstanceMsg',
'APIQueryVmInstanceReply',
'APIQueryVmNicInSecurityGroupMsg',
'APIQueryVmNicInSecurityGroupReply',
'APIQueryVmNicMsg',
'APIQueryVmNicReply',
'APIQueryVniRangeMsg',
'APIQueryVniRangeReply',
'APIQueryVolumeMsg',
'APIQueryVolumeReply',
'APIQueryVolumeSnapshotMsg',
'APIQueryVolumeSnapshotReply',
'APIQueryVolumeSnapshotTreeMsg',
'APIQueryVolumeSnapshotTreeReply',
'APIQueryVpcIkeConfigFromLocalMsg',
'APIQueryVpcIkeConfigFromLocalReply',
'APIQueryVpcIpSecConfigFromLocalMsg',
'APIQueryVpcIpSecConfigFromLocalReply',
'APIQueryVpcUserVpnGatewayFromLocalMsg',
'APIQueryVpcUserVpnGatewayFromLocalReply',
'APIQueryVpcVpnConnectionFromLocalMsg',
'APIQueryVpcVpnConnectionFromLocalReply',
'APIQueryVpcVpnGatewayFromLocalMsg',
'APIQueryVpcVpnGatewayFromLocalReply',
'APIQueryVtepMsg',
'APIQueryVtepReply',
'APIQueryWebhookMsg',
'APIQueryWebhookReply',
'APIQueryZoneMsg',
'APIQueryZoneReply',
'APIRebootEcsInstanceMsg',
'APIRebootVmInstanceMsg',
'APIReclaimSpaceFromImageStoreMsg',
'APIReconnectBackupStorageMsg',
'APIReconnectConsoleProxyAgentMsg',
'APIReconnectHostMsg',
'APIReconnectImageStoreBackupStorageMsg',
'APIReconnectPrimaryStorageMsg',
'APIReconnectSftpBackupStorageMsg',
'APIReconnectVirtualRouterMsg',
'APIRecoverDataVolumeMsg',
'APIRecoverImageMsg',
'APIRecoverVmInstanceMsg',
'APIRecoveryImageFromImageStoreBackupStorageMsg',
'APIRecoveryVirtualBorderRouterRemoteMsg',
'APIRefreshLoadBalancerMsg',
'APIReimageVmInstanceMsg',
'APIReloadLicenseMsg',
'APIReloadLicenseReply',
'APIRemoveActionFromAlarmMsg',
'APIRemoveActionFromEventSubscriptionMsg',
'APIRemoveDnsFromL3NetworkMsg',
'APIRemoveLabelFromAlarmMsg',
'APIRemoveLabelFromEventSubscriptionMsg',
'APIRemoveMonFromCephBackupStorageMsg',
'APIRemoveMonFromCephPrimaryStorageMsg',
'APIRemoveMonFromFusionstorBackupStorageMsg',
'APIRemoveMonFromFusionstorPrimaryStorageMsg',
'APIRemoveRemoteCidrsFromIPsecConnectionMsg',
'APIRemoveSNSDingTalkAtPersonMsg',
'APIRemoveSchedulerJobFromSchedulerTriggerMsg',
'APIRemoveUserFromGroupMsg',
'APIRemoveVmFromAffinityGroupMsg',
'APIRemoveVmNicFromLoadBalancerMsg',
'APIReply',
'APIRequestBaremetalConsoleAccessMsg',
'APIRequestConsoleAccessMsg',
'APIResizeDataVolumeMsg',
'APIResizeRootVolumeMsg',
'APIResumeVmInstanceMsg',
'APIRevertVolumeFromSnapshotMsg',
'APIRevokeResourceSharingMsg',
'APIScanBackupStorageMsg',
'APISearchAccountReply',
'APISearchBackupStorageReply',
'APISearchClusterReply',
'APISearchDiskOfferingReply',
'APISearchDnsReply',
'APISearchGenerateSqlTriggerMsg',
'APISearchHostReply',
'APISearchImageReply',
'APISearchInstanceOfferingReply',
'APISearchL2NetworkReply',
'APISearchL2VlanNetworkReply',
'APISearchL3NetworkReply',
'APISearchNetworkServiceProviderReply',
'APISearchPolicyReply',
'APISearchPrimaryStorageReply',
'APISearchReply',
'APISearchSftpBackupStorageReply',
'APISearchUserGroupReply',
'APISearchUserReply',
'APISearchVirtualRouterOffingReply',
'APISearchVirtualRouterVmReply',
'APISearchVmInstanceReply',
'APISearchVolumeReply',
'APISearchZoneReply',
'APISessionMessage',
'APISetImageQgaMsg',
'APISetL3NetworkMtuMsg',
'APISetL3NetworkRouterInterfaceIpMsg',
'APISetNicQosMsg',
'APISetVipQosMsg',
'APISetVmBootOrderMsg',
'APISetVmConsolePasswordMsg',
'APISetVmHostnameMsg',
'APISetVmInstanceHaLevelMsg',
'APISetVmMonitorNumberMsg',
'APISetVmQgaMsg',
'APISetVmRDPMsg',
'APISetVmSshKeyMsg',
'APISetVmStaticIpMsg',
'APISetVmUsbRedirectMsg',
'APISetVolumeQosMsg',
'APISetVpcVRouterDistributedRoutingEnabledMsg',
'APIShareResourceMsg',
'APIStartBaremetalPxeServerMsg',
'APIStartEcsInstanceMsg',
'APIStartVmInstanceMsg',
'APIStopBaremetalPxeServerMsg',
'APIStopEcsInstanceMsg',
'APIStopVmInstanceMsg',
'APISubmitLongJobMsg',
'APISubscribeEventMsg',
'APISubscribeSNSTopicMsg',
'APISyncAliyunRouteEntryFromRemoteMsg',
'APISyncAliyunSnapshotRemoteMsg',
'APISyncAliyunVirtualRouterFromRemoteMsg',
'APISyncConnectionAccessPointFromRemoteMsg',
'APISyncDataCenterFromRemoteMsg',
'APISyncDiskFromAliyunFromRemoteMsg',
'APISyncEcsImageFromRemoteMsg',
'APISyncEcsInstanceFromRemoteMsg',
'APISyncEcsSecurityGroupFromRemoteMsg',
'APISyncEcsSecurityGroupRuleFromRemoteMsg',
'APISyncEcsVSwitchFromRemoteMsg',
'APISyncEcsVpcFromRemoteMsg',
'APISyncHybridEipFromRemoteMsg',
'APISyncIdentityFromRemoteMsg',
'APISyncImageFromImageStoreBackupStorageMsg',
'APISyncImageSizeMsg',
'APISyncPrimaryStorageCapacityMsg',
'APISyncRouterInterfaceFromRemoteMsg',
'APISyncVCenterMsg',
'APISyncVirtualBorderRouterFromRemoteMsg',
'APISyncVolumeSizeMsg',
'APISyncVpcUserVpnGatewayFromRemoteMsg',
'APISyncVpcVpnConnectionFromRemoteMsg',
'APISyncVpcVpnGatewayFromRemoteMsg',
'APITerminateVirtualBorderRouterRemoteMsg',
'APITriggerGCJobMsg',
'APIUnsubscribeEventMsg',
'APIUnsubscribeSNSTopicMsg',
'APIUpdateAccountMsg',
'APIUpdateAffinityGroupMsg',
'APIUpdateAlarmMsg',
'APIUpdateAliyunDiskMsg',
'APIUpdateAliyunKeySecretMsg',
'APIUpdateAliyunSnapshotMsg',
'APIUpdateAliyunVirtualRouterMsg',
'APIUpdateBackupStorageMsg',
'APIUpdateBaremetalChassisMsg',
'APIUpdateBaremetalPxeServerMsg',
'APIUpdateCephBackupStorageMonMsg',
'APIUpdateCephPrimaryStorageMonMsg',
'APIUpdateCephPrimaryStoragePoolMsg',
'APIUpdateClusterMsg',
'APIUpdateClusterOSMsg',
'APIUpdateConnectionBetweenL3NetWorkAndAliyunVSwitchMsg',
'APIUpdateDiskOfferingMsg',
'APIUpdateEcsImageMsg',
'APIUpdateEcsInstanceMsg',
'APIUpdateEcsInstanceVncPasswordMsg',
'APIUpdateEcsSecurityGroupMsg',
'APIUpdateEcsVSwitchMsg',
'APIUpdateEcsVpcMsg',
'APIUpdateEipMsg',
'APIUpdateEmailMediaMsg',
'APIUpdateEmailMonitorTriggerActionMsg',
'APIUpdateEncryptKeyMsg',
'APIUpdateFusionstorBackupStorageMonMsg',
'APIUpdateFusionstorPrimaryStorageMonMsg',
'APIUpdateGlobalConfigMsg',
'APIUpdateHostIommuStateMsg',
'APIUpdateHostMsg',
'APIUpdateHybridEipMsg',
'APIUpdateIPsecConnectionMsg',
'APIUpdateImageMsg',
'APIUpdateImageStoreBackupStorageMsg',
'APIUpdateInstanceOfferingMsg',
'APIUpdateIpRangeMsg',
'APIUpdateKVMHostMsg',
'APIUpdateL2NetworkMsg',
'APIUpdateL3NetworkMsg',
'APIUpdateLdapServerMsg',
'APIUpdateLicenseMsg',
'APIUpdateLicenseReply',
'APIUpdateLoadBalancerListenerMsg',
'APIUpdateLoadBalancerMsg',
'APIUpdateMonitorTriggerMsg',
'APIUpdateNotificationsStatusMsg',
'APIUpdateOssBucketMsg',
'APIUpdatePciDeviceMsg',
'APIUpdatePortForwardingRuleMsg',
'APIUpdatePrimaryStorageMsg',
'APIUpdateQuotaMsg',
'APIUpdateRouteInterfaceRemoteMsg',
'APIUpdateSNSApplicationEndpointMsg',
'APIUpdateSNSApplicationPlatformMsg',
'APIUpdateSNSTextTemplateMsg',
'APIUpdateSNSTopicMsg',
'APIUpdateSchedulerJobMsg',
'APIUpdateSchedulerTriggerMsg',
'APIUpdateSecurityGroupMsg',
'APIUpdateSftpBackupStorageMsg',
'APIUpdateSystemTagMsg',
'APIUpdateUsbDeviceMsg',
'APIUpdateUserGroupMsg',
'APIUpdateUserMsg',
'APIUpdateVCenterMsg',
'APIUpdateVRouterRouteTableMsg',
'APIUpdateVipMsg',
'APIUpdateVirtualBorderRouterRemoteMsg',
'APIUpdateVirtualRouterOfferingMsg',
'APIUpdateVmInstanceMsg',
'APIUpdateVmNicMacMsg',
'APIUpdateVolumeMsg',
'APIUpdateVolumeSnapshotMsg',
'APIUpdateVpcUserVpnGatewayMsg',
'APIUpdateVpcVpnConnectionRemoteMsg',
'APIUpdateVpcVpnGatewayMsg',
'APIUpdateWebhookMsg',
'APIUpdateZoneMsg',
'APIValidateSNSEmailPlatformMsg',
'APIValidateSessionMsg',
'APIValidateSessionReply',
'CreateTemplateFromVolumeOnPrimaryStorageReply',
]
class VmInstanceInventory(object):
def __init__(self):
self.uuid = None
self.name = None
self.description = None
self.zoneUuid = None
self.clusterUuid = None
self.imageUuid = None
self.hostUuid = None
self.lastHostUuid = None
self.instanceOfferingUuid = None
self.rootVolumeUuid = None
self.platform = None
self.defaultL3NetworkUuid = None
self.type = None
self.hypervisorType = None
self.memorySize = None
self.cpuNum = None
self.cpuSpeed = None
self.allocatorStrategy = None
self.createDate = None
self.lastOpDate = None
self.state = None
self.internalId = None
self.vmNics = None
self.allVolumes = None
def evaluate(self, inv):
if hasattr(inv, 'uuid'):
self.uuid = inv.uuid
else:
self.uuid = None
if hasattr(inv, 'name'):
self.name = inv.name
else:
self.name = None
if hasattr(inv, 'description'):
self.description = inv.description
else:
self.description = None
if hasattr(inv, 'zoneUuid'):
self.zoneUuid = inv.zoneUuid
else:
self.zoneUuid = None
if hasattr(inv, 'clusterUuid'):
self.clusterUuid = inv.clusterUuid
else:
self.clusterUuid = None
if hasattr(inv, 'imageUuid'):
self.imageUuid = inv.imageUuid
else:
self.imageUuid = None
if hasattr(inv, 'hostUuid'):
self.hostUuid = inv.hostUuid
else:
self.hostUuid = None
if hasattr(inv, 'lastHostUuid'):
self.lastHostUuid = inv.lastHostUuid
else:
self.lastHostUuid = None
if hasattr(inv, 'instanceOfferingUuid'):
self.instanceOfferingUuid = inv.instanceOfferingUuid
else:
self.instanceOfferingUuid = None
if hasattr(inv, 'rootVolumeUuid'):
self.rootVolumeUuid = inv.rootVolumeUuid
else:
self.rootVolumeUuid = None
if hasattr(inv, 'platform'):
self.platform = inv.platform
else:
self.platform = None
if hasattr(inv, 'defaultL3NetworkUuid'):
self.defaultL3NetworkUuid = inv.defaultL3NetworkUuid
else:
self.defaultL3NetworkUuid = None
if hasattr(inv, 'type'):
self.type = inv.type
else:
self.type = None
if hasattr(inv, 'hypervisorType'):
self.hypervisorType = inv.hypervisorType
else:
self.hypervisorType = None
if hasattr(inv, 'memorySize'):
self.memorySize = inv.memorySize
else:
self.memorySize = None
if hasattr(inv, 'cpuNum'):
self.cpuNum = inv.cpuNum
else:
self.cpuNum = None
if hasattr(inv, 'cpuSpeed'):
self.cpuSpeed = inv.cpuSpeed
else:
self.cpuSpeed = None
if hasattr(inv, 'allocatorStrategy'):
self.allocatorStrategy = inv.allocatorStrategy
else:
self.allocatorStrategy = None
if hasattr(inv, 'createDate'):
self.createDate = inv.createDate
else:
self.createDate = None
if hasattr(inv, 'lastOpDate'):
self.lastOpDate = inv.lastOpDate
else:
self.lastOpDate = None
if hasattr(inv, 'state'):
self.state = inv.state
else:
self.state = None
if hasattr(inv, 'internalId'):
self.internalId = inv.internalId
else:
self.internalId = None
if hasattr(inv, 'vmNics'):
self.vmNics = inv.vmNics
else:
self.vmNics = None
if hasattr(inv, 'allVolumes'):
self.allVolumes = inv.allVolumes
else:
self.allVolumes = None
class ApplianceVmInventory(VmInstanceInventory):
def __init__(self):
super(ApplianceVmInventory, self).__init__()
self.applianceVmType = None
self.managementNetworkUuid = None
self.defaultRouteL3NetworkUuid = None
self.status = None
self.agentPort = None
def evaluate(self, inv):
super(ApplianceVmInventory, self).evaluate(inv)
if hasattr(inv, 'applianceVmType'):
self.applianceVmType = inv.applianceVmType
else:
self.applianceVmType = None
if hasattr(inv, 'managementNetworkUuid'):
self.managementNetworkUuid = inv.managementNetworkUuid
else:
self.managementNetworkUuid = None
if hasattr(inv, 'defaultRouteL3NetworkUuid'):
self.defaultRouteL3NetworkUuid = inv.defaultRouteL3NetworkUuid
else:
self.defaultRouteL3NetworkUuid = None
if hasattr(inv, 'status'):
self.status = inv.status
else:
self.status = None
if hasattr(inv, 'agentPort'):
self.agentPort = inv.agentPort
else:
self.agentPort = None
class GlobalConfigInventory(object):
def __init__(self):
self.name = None
self.category = None
self.description = None
self.defaultValue = None
self.value = None
def evaluate(self, inv):
if hasattr(inv, 'name'):
self.name = inv.name
else:
self.name = None
if hasattr(inv, 'category'):
self.category = inv.category
else:
self.category = None
if hasattr(inv, 'description'):
self.description = inv.description
else:
self.description = None
if hasattr(inv, 'defaultValue'):
self.defaultValue = inv.defaultValue
else:
self.defaultValue = None
if hasattr(inv, 'value'):
self.value = inv.value
else:
self.value = None
class EcsInstanceInventory(object):
def __init__(self):
self.uuid = None
self.localVmInstanceUuid = None
self.ecsInstanceId = None
self.name = None
self.ecsStatus = None
self.ecsInstanceRootPassword = None
self.cpuCores = None
self.memorySize = None
self.ecsInstanceType = None
self.ecsBandWidth = None
self.ecsRootVolumeId = None
self.ecsRootVolumeCategory = None
self.ecsRootVolumeSize = None
self.privateIpAddress = None
self.publicIpAddress = None
self.ecsVSwitchUuid = None
self.ecsImageUuid = None
self.ecsSecurityGroupUuid = None
self.identityZoneUuid = None
self.chargeType = None
self.expireDate = None
self.createDate = None
self.lastOpDate = None
self.description = None
def evaluate(self, inv):
if hasattr(inv, 'uuid'):
self.uuid = inv.uuid
else:
self.uuid = None
if hasattr(inv, 'localVmInstanceUuid'):
self.localVmInstanceUuid = inv.localVmInstanceUuid
else:
self.localVmInstanceUuid = None
if hasattr(inv, 'ecsInstanceId'):
self.ecsInstanceId = inv.ecsInstanceId
else:
self.ecsInstanceId = None
if hasattr(inv, 'name'):
self.name = inv.name
else:
self.name = None
if hasattr(inv, 'ecsStatus'):
self.ecsStatus = inv.ecsStatus
else:
self.ecsStatus = None
if hasattr(inv, 'ecsInstanceRootPassword'):
self.ecsInstanceRootPassword = inv.ecsInstanceRootPassword
else:
self.ecsInstanceRootPassword = None
if hasattr(inv, 'cpuCores'):
self.cpuCores = inv.cpuCores
else:
self.cpuCores = None
if hasattr(inv, 'memorySize'):
self.memorySize = inv.memorySize
else:
self.memorySize = None
if hasattr(inv, 'ecsInstanceType'):
self.ecsInstanceType = inv.ecsInstanceType
else:
self.ecsInstanceType = None
if hasattr(inv, 'ecsBandWidth'):
self.ecsBandWidth = inv.ecsBandWidth
else:
self.ecsBandWidth = None
if hasattr(inv, 'ecsRootVolumeId'):
self.ecsRootVolumeId = inv.ecsRootVolumeId
else:
self.ecsRootVolumeId = None
if hasattr(inv, 'ecsRootVolumeCategory'):
self.ecsRootVolumeCategory = inv.ecsRootVolumeCategory
else:
self.ecsRootVolumeCategory = None
if hasattr(inv, 'ecsRootVolumeSize'):
self.ecsRootVolumeSize = inv.ecsRootVolumeSize
else:
self.ecsRootVolumeSize = None
if hasattr(inv, 'privateIpAddress'):
self.privateIpAddress = inv.privateIpAddress
else:
self.privateIpAddress = None
if hasattr(inv, 'publicIpAddress'):
self.publicIpAddress = inv.publicIpAddress
else:
self.publicIpAddress = None
if hasattr(inv, 'ecsVSwitchUuid'):
self.ecsVSwitchUuid = inv.ecsVSwitchUuid
else:
self.ecsVSwitchUuid = None
if hasattr(inv, 'ecsImageUuid'):
self.ecsImageUuid = inv.ecsImageUuid
else:
self.ecsImageUuid = None
if hasattr(inv, 'ecsSecurityGroupUuid'):
self.ecsSecurityGroupUuid = inv.ecsSecurityGroupUuid
else:
self.ecsSecurityGroupUuid = None
if hasattr(inv, 'identityZoneUuid'):
self.identityZoneUuid = inv.identityZoneUuid
else:
self.identityZoneUuid = None
if hasattr(inv, 'chargeType'):
self.chargeType = inv.chargeType
else:
self.chargeType = None
if | |
is not None:
oprot.writeFieldBegin('tid', TType.STRUCT, 1)
self.tid.write(oprot)
oprot.writeFieldEnd()
if self.auth is not None:
oprot.writeFieldBegin('auth', TType.STRING, 2)
oprot.writeString(self.auth)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.tid)
value = (value * 31) ^ hash(self.auth)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class timRoser_args:
"""
Attributes:
- roster
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'roster', (TimRoster, TimRoster.thrift_spec), None, ), # 1
)
def __init__(self, roster=None,):
self.roster = roster
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.roster = TimRoster()
self.roster.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('timRoser_args')
if self.roster is not None:
oprot.writeFieldBegin('roster', TType.STRUCT, 1)
self.roster.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.roster)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class timMessageList_args:
"""
Attributes:
- mbeanList
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'mbeanList', (TimMBeanList, TimMBeanList.thrift_spec), None, ), # 1
)
def __init__(self, mbeanList=None,):
self.mbeanList = mbeanList
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.mbeanList = TimMBeanList()
self.mbeanList.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('timMessageList_args')
if self.mbeanList is not None:
oprot.writeFieldBegin('mbeanList', TType.STRUCT, 1)
self.mbeanList.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.mbeanList)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class timPresenceList_args:
"""
Attributes:
- pbeanList
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'pbeanList', (TimPBeanList, TimPBeanList.thrift_spec), None, ), # 1
)
def __init__(self, pbeanList=None,):
self.pbeanList = pbeanList
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.pbeanList = TimPBeanList()
self.pbeanList.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('timPresenceList_args')
if self.pbeanList is not None:
oprot.writeFieldBegin('pbeanList', TType.STRUCT, 1)
self.pbeanList.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.pbeanList)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class timMessageIq_args:
"""
Attributes:
- timMsgIq
- iqType
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'timMsgIq', (TimMessageIq, TimMessageIq.thrift_spec), None, ), # 1
(2, TType.STRING, 'iqType', None, None, ), # 2
)
def __init__(self, timMsgIq=None, iqType=None,):
self.timMsgIq = timMsgIq
self.iqType = iqType
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.timMsgIq = TimMessageIq()
self.timMsgIq.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.iqType = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('timMessageIq_args')
if self.timMsgIq is not None:
oprot.writeFieldBegin('timMsgIq', TType.STRUCT, 1)
self.timMsgIq.write(oprot)
oprot.writeFieldEnd()
if self.iqType is not None:
oprot.writeFieldBegin('iqType', TType.STRING, 2)
oprot.writeString(self.iqType)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.timMsgIq)
value = (value * 31) ^ hash(self.iqType)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class timMessageResult_args:
"""
Attributes:
- mbean
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'mbean', (TimMBean, TimMBean.thrift_spec), None, ), # 1
)
def __init__(self, mbean=None,):
self.mbean = mbean
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.mbean = TimMBean()
self.mbean.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('timMessageResult_args')
if self.mbean is not None:
oprot.writeFieldBegin('mbean', TType.STRUCT, 1)
self.mbean.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.mbean)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class timProperty_args:
"""
Attributes:
- tpb
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'tpb', (TimPropertyBean, TimPropertyBean.thrift_spec), None, ), # 1
)
def __init__(self, tpb=None,):
self.tpb = tpb
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.tpb = TimPropertyBean()
self.tpb.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('timProperty_args')
if self.tpb is not None:
oprot.writeFieldBegin('tpb', TType.STRUCT, 1)
self.tpb.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.tpb)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class timRemoteUserAuth_args:
"""
Attributes:
- tid
- pwd
- auth
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'tid', (Tid, Tid.thrift_spec), None, ), # 1
(2, TType.STRING, 'pwd', None, None, ), # 2
(3, TType.STRUCT, 'auth', (TimAuth, TimAuth.thrift_spec), None, ), # 3
)
def __init__(self, tid=None, pwd=None, auth=None,):
self.tid = tid
self.pwd = <PASSWORD>
self.auth = auth
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.tid = Tid()
self.tid.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.pwd = iprot.readString()
else:
iprot.skip(ftype)
elif | |
currency_type: the type of the currency
:type currency_type: ``osid.type.Type``
:return: the list of supported currency format types
:rtype: ``osid.type.TypeList``
:raise: ``NullArgument`` -- ``currency_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
@abc.abstractmethod
def supports_coordinate_types_for_formatting(self, coordinate_type, coordinate_format_type):
"""Tests if a given coordinate formatting is supported.
:param coordinate_type: the type of the coordinate
:type coordinate_type: ``osid.type.Type``
:param coordinate_format_type: the type of the output coordinate format
:type coordinate_format_type: ``osid.type.Type``
:return: ``true`` if formatting with the given types is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``cooridinate_type`` or ``coodinate_format_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_coordinate_types_for_formatting(self):
"""Gets all the coordinate types for which formatting is available.
:return: the list of coordinate types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
coordinate_types_for_formatting = property(fget=get_coordinate_types_for_formatting)
@abc.abstractmethod
def get_coordinate_format_types_for_coordinate_type(self, coordinate_type):
"""Gets the list of coordinate format types for a given coordinate type.
:param coordinate_type: the type of the coordinate
:type coordinate_type: ``osid.type.Type``
:return: the list of supported coordinate format types
:rtype: ``osid.type.TypeList``
:raise: ``NullArgument`` -- ``coordinater_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
@abc.abstractmethod
def supports_unit_types_for_conversion(self, source_unit_type, target_unit_type):
"""Tests if a given measure conversion is supported.
:param source_unit_type: the type of the source measure
:type source_unit_type: ``osid.type.Type``
:param target_unit_type: the type of the target measure
:type target_unit_type: ``osid.type.Type``
:return: ``true`` if the given source and target conversion is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``source_unit_type`` or ``target_unit_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_unit_types_for_source(self, source_unit_type):
"""Gets the list of target measure types for a given source measure type.
:param source_unit_type: the type of the source measure
:type source_unit_type: ``osid.type.Type``
:return: the list of supported target measure types
:rtype: ``osid.type.TypeList``
:raise: ``NullArgument`` -- ``source_unit_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
@abc.abstractmethod
def get_source_unit_types(self):
"""Gets all the source unit types supported.
:return: the list of supported source unit types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
source_unit_types = property(fget=get_source_unit_types)
@abc.abstractmethod
def supports_currency_types_for_conversion(self, source_currency_type, target_currency_type):
"""Tests if a given currency conversion is supported.
:param source_currency_type: the type of the source currency
:type source_currency_type: ``osid.type.Type``
:param target_currency_type: the type of the target currency
:type target_currency_type: ``osid.type.Type``
:return: ``true`` if the given source and target conversion is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``source_currency_type`` or ``target_currency_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_currency_types_for_source(self, source_currency_type):
"""Gets the list of target currency types for a given source currency type.
:param source_currency_type: the type of the source currency
:type source_currency_type: ``osid.type.Type``
:return: the list of supported currency types
:rtype: ``osid.type.TypeList``
:raise: ``NullArgument`` -- ``source_currency_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
@abc.abstractmethod
def get_source_currency_types(self):
"""Gets the list of source currency types.
:return: the list of supported source currency types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
source_currency_types = property(fget=get_source_currency_types)
@abc.abstractmethod
def supports_calendar_types_for_conversion(self, source_calendar_type, target_calendar_type):
"""Tests if a given calendar conversion is supported.
:param source_calendar_type: the type of the source calendar
:type source_calendar_type: ``osid.type.Type``
:param target_calendar_type: the type of the target calendar
:type target_calendar_type: ``osid.type.Type``
:return: ``true`` if the given source and target conversion is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``source_calendar_type`` or ``target_calendar_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_calendar_types_for_source(self, source_calendar_type):
"""Gets the list of target calendar types for a given source calendar type.
:param source_calendar_type: the type of the source calendar
:type source_calendar_type: ``osid.type.Type``
:return: the list of supported calendar types
:rtype: ``osid.type.TypeList``
:raise: ``NullArgument`` -- ``source_calendar_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
@abc.abstractmethod
def get_source_calendar_types(self):
"""Gets the list of source calendar types.
:return: the list of supported source calendar types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
source_calendar_types = property(fget=get_source_calendar_types)
@abc.abstractmethod
def supports_time_types_for_conversion(self, source_time_type, target_time_type):
"""Tests if a given time conversion is supported.
:param source_time_type: the type of the source time
:type source_time_type: ``osid.type.Type``
:param target_time_type: the type of the target time
:type target_time_type: ``osid.type.Type``
:return: ``true`` if the given source and target conversion is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``source_time_type`` or ``target_time_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_time_types_for_source(self, source_time_type):
"""Gets the list of target time types for a given source time type.
:param source_time_type: the type of the source time
:type source_time_type: ``osid.type.Type``
:return: the list of supported time types
:rtype: ``osid.type.TypeList``
:raise: ``NullArgument`` -- ``source_time_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
@abc.abstractmethod
def get_source_time_types(self):
"""Gets the list of source time types.
:return: the list of supported source time types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
source_time_types = property(fget=get_source_time_types)
@abc.abstractmethod
def get_time_types_for_calendar_type(self, calendar_type):
"""Gets the list of time types supported for a given calendar type where they are both used in a ``DateTime``.
:param calendar_type: the type of the calendar
:type calendar_type: ``osid.type.Type``
:return: the list of supported time types
:rtype: ``osid.type.TypeList``
:raise: ``NullArgument`` -- ``calendar_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
@abc.abstractmethod
def get_calendar_types_for_time_type(self, time_type):
"""Gets the list of calendar types supported for a given time type where they are both used in a ``DateTime``.
:param time_type: the type of the time system
:type time_type: ``osid.type.Type``
:return: the list of supported calendar types
:rtype: ``osid.type.TypeList``
:raise: ``NullArgument`` -- ``time_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
@abc.abstractmethod
def supports_calendar_time_types(self, calendar_type, time_type):
"""Tests if a given calendar and time type are used together in a ``DateTime``.
:param calendar_type: the type of the calendar
:type calendar_type: ``osid.type.Type``
:param time_type: the type of the time system
:type time_type: ``osid.type.Type``
:return: ``true`` if the given calendar and time types are supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``calendar_type`` or ``time_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_coordinate_types_for_conversion(self, source_coordinate_type, target_coordinate_type):
"""Tests if a given coordinate type for conversion is supported.
:param source_coordinate_type: the type of the source coordinate
:type source_coordinate_type: ``osid.type.Type``
:param target_coordinate_type: the type of the target coordinate
:type target_coordinate_type: ``osid.type.Type``
:return: ``true`` if the given source and target conversion is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``source_coordinate_type`` or ``target_coordinate_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_coordinate_types_for_source(self, source_coordinate_type):
"""Gets the list of target coordinate types for a given source coordinate type.
:param source_coordinate_type: the type of the source coordinate
:type source_coordinate_type: ``osid.type.Type``
:return: the list of supported target coordinate types
:rtype: ``osid.type.TypeList``
:raise: ``NullArgument`` -- ``source_coordinate_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
@abc.abstractmethod
def get_source_coordinate_types(self):
"""Gets the list of source coordinate types.
:return: the list of supported source coordinate types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
source_coordinate_types = property(fget=get_source_coordinate_types)
@abc.abstractmethod
def supports_spatial_unit_record_types_for_conversion(self, source_spatial_unit_record_type, target_spatial_unit_record_type):
"""Tests if a given spatial unit conversion is supported.
:param source_spatial_unit_record_type: the type of the source spatial unit record
:type source_spatial_unit_record_type: ``osid.type.Type``
:param target_spatial_unit_record_type: the type of the target spatial unit record
:type target_spatial_unit_record_type: ``osid.type.Type``
:return: ``true`` if the given source and target conversion is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``source_spatial_unit_record_type`` or ``target_spatial_unit_record_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_spatial_unit_record_types_for_source(self, source_spatial_unit_record_type):
"""Gets the list | |
<filename>src/cool_to_cil.py
from semantic import Scope, VariableInfo
import visitor
import ast_nodes as COOL_AST
import cil_ast_nodes as CIL_AST
class BaseCOOLToCILVisitor:
def __init__(self, context):
self.dottypes = {}
self.dotdata = {}
self.dotcode = []
self.current_type = None
self.current_method = None
self.current_function = None
self.context = context
self.label_count = 0
self.context.set_type_tags()
self.context.set_type_max_tags()
@property
def params(self):
return self.current_function.params
@property
def localvars(self):
return self.current_function.localvars
@property
def instructions(self):
return self.current_function.instructions
def get_label(self):
self.label_count += 1
return f'label_{self.label_count}'
def register_param(self, vinfo):
param_node = CIL_AST.ParamDec(vinfo.name)
self.params.append(param_node)
return vinfo.name
def is_defined_param(self, name):
for p in self.params:
if p.name == name:
return True
return False
def register_local(self, var_name):
local_node = CIL_AST.LocalDec(var_name)
self.localvars.append(local_node)
return var_name
def define_internal_local(self, scope, name = "internal", cool_var_name = None, class_type = None):
if class_type != None:
cilname = f'{class_type}.{name}'
scope.define_cil_local(cool_var_name, cilname, None)
self.register_local(cilname)
else :
cilname = f'{name}_{len(self.localvars)}'
scope.define_cil_local(cool_var_name, cilname, None)
self.register_local(cilname)
return cilname
def register_instruction(self, instruction):
self.instructions.append(instruction)
return instruction
def to_function_name(self, method_name, type_name):
return f'{type_name}.{method_name}'
def register_function(self, function_name):
function_node = CIL_AST.Function(function_name, [], [], [])
self.dotcode.append(function_node)
return function_node
def register_type(self, name):
type_node = CIL_AST.Type(name)
self.dottypes[name] = type_node
return type_node
def is_in_data(self, name):
return name in self.dotdata.keys
def register_data(self, value):
vname = f's_{len(self.dotdata)}'
self.dotdata[vname] = value
return vname
def register_builtin_types(self, scope):
for t in ['Object', 'Int', 'String', 'Bool', 'IO']:
builtin_type = self.context.get_type(t)
cil_type = self.register_type(t)
cil_type.attributes = [f'{attr.name}' for attr in builtin_type.attributes]
cil_type.methods = {f'{m}':f'{c}.{m}' for c, m in builtin_type.get_all_methods()}
if t in ['Int', 'String', 'Bool']:
cil_type.attributes.append('value')
#----------------Object---------------------
#init
self.current_function = self.register_function('Object_init')
self.register_param(VariableInfo('self', None))
self.register_instruction(CIL_AST.Return(None))
#abort
self.current_function = self.register_function(self.to_function_name('abort', 'Object'))
self.register_param(VariableInfo('self',None))
msg = self.define_internal_local(scope=scope, name="msg")
key_msg = ''
for s in self.dotdata.keys():
if self.dotdata[s] == 'Abort called from class ':
key_msg = s
self.register_instruction(CIL_AST.LoadStr(key_msg, msg))
self.register_instruction(CIL_AST.PrintString(msg))
type_name = self.define_internal_local(scope=scope, name = "type_name" )
self.register_instruction(CIL_AST.TypeOf('self', type_name))
self.register_instruction(CIL_AST.PrintString(type_name))
eol_local = self.define_internal_local(scope=scope, name="eol")
for s in self.dotdata.keys():
if self.dotdata[s] == '\n':
eol = s
self.register_instruction(CIL_AST.LoadStr(eol, eol_local))
self.register_instruction(CIL_AST.PrintString(eol_local))
self.register_instruction(CIL_AST.Halt())
#type_name
self.current_function = self.register_function(self.to_function_name('type_name', 'Object'))
self.register_param(VariableInfo('self', None))
type_name = self.define_internal_local(scope=scope, name = "type_name" )
self.register_instruction(CIL_AST.TypeOf('self', type_name))
instance = self.define_internal_local(scope=scope, name="instance")
self.register_instruction(CIL_AST.Allocate('String',self.context.get_type('String').tag ,instance))
result_init = self.define_internal_local(scope=scope, name="result_init")
self.register_instruction(CIL_AST.Call(result_init, 'String_init', [CIL_AST.Arg(type_name),CIL_AST.Arg(instance)],"String"))
self.register_instruction(CIL_AST.Return(instance))
#copy
self.current_function = self.register_function(self.to_function_name('copy', 'Object'))
self.register_param(VariableInfo('self',None))
copy = self.define_internal_local(scope=scope, name= "copy")
self.register_instruction(CIL_AST.Copy('self', copy))
self.register_instruction(CIL_AST.Return(copy))
#----------------IO---------------------
#init
self.current_function = self.register_function('IO_init')
self.register_param(VariableInfo('self', None))
self.register_instruction(CIL_AST.Return(None))
#out_string
self.current_function = self.register_function(self.to_function_name('out_string', 'IO'))
self.register_param(VariableInfo('self', None))
self.register_param(VariableInfo('x', None))
v = self.define_internal_local(scope=scope, name="v")
self.register_instruction(CIL_AST.GetAttr(v, 'x','value','String'))
self.register_instruction(CIL_AST.PrintString(v))
self.register_instruction(CIL_AST.Return('self'))
#out_int
self.current_function = self.register_function(self.to_function_name('out_int', 'IO'))
self.register_param(VariableInfo('self', None))
self.register_param(VariableInfo('x', None))
v = self.define_internal_local(scope=scope, name="v")
self.register_instruction(CIL_AST.GetAttr(v, 'x','value','Int'))
self.register_instruction(CIL_AST.PrintInteger(v))
self.register_instruction(CIL_AST.Return('self'))
#in_string
self.current_function = self.register_function(self.to_function_name('in_string', 'IO'))
self.register_param(VariableInfo('self', None))
msg = self.define_internal_local(scope=scope, name="read_str")
self.register_instruction(CIL_AST.ReadString(msg))
instance = self.define_internal_local(scope=scope, name="instance")
self.register_instruction(CIL_AST.Allocate('String',self.context.get_type('String').tag ,instance))
result_init = self.define_internal_local(scope=scope, name="result_init")
self.register_instruction(CIL_AST.Call(result_init, 'String_init', [CIL_AST.Arg(msg),CIL_AST.Arg(instance)],"String"))
self.register_instruction(CIL_AST.Return(instance))
#in_int
self.current_function = self.register_function(self.to_function_name('in_int', 'IO'))
self.register_param(VariableInfo('self', None))
number = self.define_internal_local(scope=scope, name ="read_int")
self.register_instruction(CIL_AST.ReadInteger(number))
instance = self.define_internal_local(scope=scope, name="instance")
self.register_instruction(CIL_AST.Allocate('Int', self.context.get_type('Int').tag,instance))
result_init = self.define_internal_local(scope=scope, name="result_init")
self.register_instruction(CIL_AST.Call(result_init, 'Int_init', [ CIL_AST.Arg(number), CIL_AST.Arg(instance)], "Int"))
self.register_instruction(CIL_AST.Return(instance))
# ----------------String---------------------
#init
self.current_function=self.register_function('String_init')
self.register_param(VariableInfo('self', None))
self.register_param(VariableInfo('v', None))
self.register_instruction(CIL_AST.SetAttr('self', 'value', 'v', 'String'))
self.register_instruction(CIL_AST.Return(None))
#length
self.current_function = self.register_function(self.to_function_name('length', 'String'))
self.register_param(VariableInfo('self', None))
length_result = self.define_internal_local(scope=scope, name="length")
self.register_instruction(CIL_AST.Length('self', length_result))
instance = self.define_internal_local(scope=scope, name="instance")
self.register_instruction(CIL_AST.Allocate('Int', self.context.get_type('Int').tag,instance))
result_init = self.define_internal_local(scope=scope, name="result_init")
self.register_instruction(CIL_AST.Call(result_init,'Int_init', [CIL_AST.Arg(length_result),CIL_AST.Arg(instance)], "Int"))
self.register_instruction(CIL_AST.Return(instance))
#concat
self.current_function = self.register_function(self.to_function_name('concat', 'String'))
self.register_param(VariableInfo('self', None))
self.register_param(VariableInfo('s', None))
str1 = self.define_internal_local(scope=scope, name="str1")
self.register_instruction(CIL_AST.GetAttr(str1, 'self','value','String'))
len1 = self.define_internal_local(scope=scope, name="len1")
self.register_instruction(CIL_AST.Call(len1, 'String.length', [CIL_AST.Arg('self')], 'String'))
str2 = self.define_internal_local(scope=scope, name="str2")
self.register_instruction(CIL_AST.GetAttr(str2, 's', 'value', 'String'))
len2 = self.define_internal_local(scope=scope, name="len2")
self.register_instruction(CIL_AST.Call(len2, 'String.length', [CIL_AST.Arg('s')], 'String'))
local_len1 = self.define_internal_local(scope=scope, name="local_len1")
self.register_instruction(CIL_AST.GetAttr(local_len1, len1, 'value', 'Int'))
local_len2 = self.define_internal_local(scope=scope, name="local_len2")
self.register_instruction(CIL_AST.GetAttr(local_len2, len2, 'value', 'Int'))
concat_result = self.define_internal_local(scope=scope, name="concat")
self.register_instruction(CIL_AST.Concat(str1, local_len1, str2, local_len2, concat_result))
instance = self.define_internal_local(scope=scope, name="instance")
self.register_instruction(CIL_AST.Allocate('String',self.context.get_type('String').tag ,instance))
result_init = self.define_internal_local(scope=scope, name="result_init")
self.register_instruction(CIL_AST.Call(result_init, 'String_init', [CIL_AST.Arg(concat_result), CIL_AST.Arg(instance)],"String"))
self.register_instruction(CIL_AST.Return(instance))
#substr
self.current_function = self.register_function(self.to_function_name('substr', 'String'))
self.register_param(VariableInfo('self', None))
self.register_param(VariableInfo('i', None))
self.register_param(VariableInfo('l', None))
i_value=self.define_internal_local(scope=scope, name="i_value")
self.register_instruction(CIL_AST.GetAttr(i_value, 'i','value','Int'))
l_value = self.define_internal_local(scope=scope, name="l_value")
self.register_instruction(CIL_AST.GetAttr(l_value, 'l','value','Int'))
subs_result=self.define_internal_local(scope=scope, name="subs_result")
self.register_instruction(CIL_AST.SubStr(i_value, l_value, 'self', subs_result))
instance = self.define_internal_local(scope=scope, name="instance")
self.register_instruction(CIL_AST.Allocate('String', self.context.get_type('String').tag,instance))
result_init = self.define_internal_local(scope=scope, name="result_init")
self.register_instruction(CIL_AST.Call(result_init, 'String_init', [CIL_AST.Arg(subs_result),CIL_AST.Arg(instance)],"String"))
self.register_instruction(CIL_AST.Return(instance))
#----------------Bool---------------------
#init
self.current_function=self.register_function('Bool_init')
self.register_param(VariableInfo('self', None))
self.register_param(VariableInfo('v', None))
self.register_instruction(CIL_AST.SetAttr('self', 'value', 'v', 'Bool'))
self.register_instruction(CIL_AST.Return(None))
#----------------Int---------------------
#init
self.current_function=self.register_function('Int_init')
self.register_param(VariableInfo('self', None))
self.register_param(VariableInfo('v', None))
self.register_instruction(CIL_AST.SetAttr('self', 'value', 'v', 'Int'))
self.register_instruction(CIL_AST.Return(None))
def build_string_equals_function(self, scope):
self.current_function = self.register_function('String_equals')
self.register_param(VariableInfo('str1', None))
self.register_param(VariableInfo('str2', None))
str1 = self.define_internal_local(scope=scope, name="str1")
self.register_instruction(CIL_AST.GetAttr(str1, 'str1', 'value','String'))
str2 = self.define_internal_local(scope=scope, name="str2")
self.register_instruction(CIL_AST.GetAttr(str2, 'str2', 'value', 'String'))
result = self.define_internal_local(scope=scope, name="comparison_result")
self.register_instruction(CIL_AST.StringEquals(str1, str2, result))
self.register_instruction(CIL_AST.Return(result))
class COOLToCILVisitor(BaseCOOLToCILVisitor):
@visitor.on('node')
def visit(self, node, scope):
pass
@visitor.when(COOL_AST.Program)
def visit(self, node, scope = None):
scope = Scope()
self.current_function = self.register_function('main')
instance = self.define_internal_local(scope = scope, name = "instance")
result = self.define_internal_local(scope = scope, name = "result")
self.register_instruction(CIL_AST.Allocate('Main',self.context.get_type('Main').tag, instance))
self.register_instruction(CIL_AST.Call(result, 'Main_init', [CIL_AST.Arg(instance)],"Main"))
self.register_instruction(CIL_AST.Call(result, self.to_function_name('main', 'Main'), [CIL_AST.Arg(instance)],"Main"))
self.register_instruction(CIL_AST.Return(None))
self.current_function = None
self.register_data('Abort called from class ')
self.register_data('\n')
self.dotdata['empty_str'] = ''
#Add built-in types in .TYPES section
self.register_builtin_types(scope)
#Add string equals function
self.build_string_equals_function(scope)
for klass in node.classes:
self.visit(klass, scope.create_child())
return CIL_AST.Program(self.dottypes, self.dotdata, self.dotcode)
@visitor.when(COOL_AST.Class)
def visit(self, node, scope):
self.current_type = self.context.get_type(node.name)
#Handle all the .TYPE section
cil_type = self.register_type(self.current_type.name)
cil_type.attributes = [f'{attr.name}' for c, attr in self.current_type.get_all_attributes()]
cil_type.methods = {f'{m}':f'{c}.{m}' for c, m in self.current_type.get_all_methods()}
scope.define_cil_local("self", self.current_type.name, self.current_type)
func_declarations = [f for f in node.features if isinstance(f, COOL_AST.ClassMethod)]
attr_declarations = [a for a in node.features if not isinstance(a, COOL_AST.ClassMethod)]
for attr in attr_declarations:
scope.define_cil_local(attr.name, attr.name, node.name)
#-------------------------Init---------------------------------
self.current_function = self.register_function(f'{node.name}_init')
self.register_param(VariableInfo('self', None))
#Init parents recursively
result = self.define_internal_local(scope=scope, name = "result")
self.register_instruction(CIL_AST.Call(result, f'{node.parent}_init',[CIL_AST.Arg('self')], node.parent))
self.register_instruction(CIL_AST.Return(None))
for attr in attr_declarations:
self.visit(attr, scope)
#---------------------------------------------------------------
self.current_function = None
for feature in func_declarations:
self.visit(feature, scope.create_child())
self.current_type = None
@visitor.when(COOL_AST.ClassMethod)
def visit(self, node, scope):
self.current_method = self.current_type.get_method(node.name)
self.dottypes[self.current_type.name].methods[node.name] = f'{self.current_type.name}.{node.name}'
cil_method_name = self.to_function_name(node.name, self.current_type.name)
self.current_function = self.register_function(cil_method_name)
self.register_param(VariableInfo('self', self.current_type))
for p in node.params:
self.register_param(VariableInfo(p.name, p.param_type))
value = self.visit(node.expr, scope)
self.register_instruction(CIL_AST.Return(value))
self.current_method = None
@visitor.when(COOL_AST.AttributeDef)
def visit(self, node, scope):
instance = None
if node.type in ['Int', 'Bool']:
instance = self.define_internal_local(scope=scope, name="instance")
self.register_instruction(CIL_AST.Allocate(node.type,self.context.get_type(node.type).tag, instance))
value = self.define_internal_local(scope=scope, name="value")
self.register_instruction(CIL_AST.LoadInt(0,value))
result_init = self.define_internal_local(scope=scope, name="result_init")
self.register_instruction(CIL_AST.Call(result_init, f'{node.type}_init', [ CIL_AST.Arg(value), CIL_AST.Arg(instance)], node.type))
elif node.type == 'String':
instance = self.define_internal_local(scope=scope, name="instance")
self.register_instruction(CIL_AST.Allocate(node.type,self.context.get_type(node.type).tag ,instance))
value = self.define_internal_local(scope=scope, name="value")
self.register_instruction(CIL_AST.LoadStr('empty_str',value))
result_init = self.define_internal_local(scope=scope, name="result_init")
self.register_instruction(CIL_AST.Call(result_init, f'{node.type}_init', [CIL_AST.Arg(value),CIL_AST.Arg(instance)], node.type))
self.register_instruction(CIL_AST.SetAttr('self', node.name,instance, self.current_type.name))
@visitor.when(COOL_AST.AttributeInit)
def visit(self, node, scope):
expr = self.visit(node.expr, scope)
self.register_instruction(CIL_AST.SetAttr('self', node.name, expr, self.current_type.name))
@visitor.when(COOL_AST.AssignExpr)
def visit(self, node, scope):
expr_local = self.visit(node.expr, scope)
result_local = self.define_internal_local(scope=scope, name = "result" )
cil_node_name = scope.find_cil_local(node.name)
if self.is_defined_param(node.name):
self.register_instruction(CIL_AST.Assign(node.name, expr_local))
elif self.current_type.has_attr(node.name):
cil_type_name = 'self'
self.register_instruction(CIL_AST.SetAttr(cil_type_name, node.name, expr_local, self.current_type.name ))
else:
self.register_instruction(CIL_AST.Assign(cil_node_name, expr_local))
return expr_local
@visitor.when(COOL_AST.Block)
def visit(self, node, scope):
for e in node.exprs:
result_local = self.visit(e, scope)
return result_local
@visitor.when(COOL_AST.If)
def visit(self, node, scope):
result_local = self.define_internal_local(scope=scope, name = "result")
cond_value = self.visit(node.predicate, scope)
if_then_label = self.get_label()
self.register_instruction(CIL_AST.IfGoto(cond_value, if_then_label))
else_value = self.visit(node.else_body, scope)
self.register_instruction(CIL_AST.Assign(result_local, else_value))
end_if_label = self.get_label()
self.register_instruction(CIL_AST.Goto(end_if_label))
self.register_instruction(CIL_AST.Label(if_then_label))
then_value = self.visit(node.then_body, scope)
self.register_instruction(CIL_AST.Assign(result_local, then_value))
self.register_instruction(CIL_AST.Label(end_if_label))
return result_local
@visitor.when(COOL_AST.While)
def visit(self, node, scope):
result_local = self.define_internal_local(scope = scope, name = "result")
loop_init_label = self.get_label()
loop_body_label = self.get_label()
loop_end_label = self.get_label()
self.register_instruction(CIL_AST.Label(loop_init_label))
pred_value = self.visit(node.predicate, scope)
self.register_instruction(CIL_AST.IfGoto(pred_value, loop_body_label))
self.register_instruction(CIL_AST.Goto(loop_end_label))
self.register_instruction(CIL_AST.Label(loop_body_label))
body_value = self.visit(node.body, scope)
self.register_instruction(CIL_AST.Goto(loop_init_label))
self.register_instruction(CIL_AST.Label(loop_end_label))
self.register_instruction(CIL_AST.LoadVoid(result_local))
return result_local
@visitor.when(COOL_AST.DynamicCall)
def visit(self, node, scope):
result_local = self.define_internal_local(scope = scope, name = "result")
expr_value = self.visit(node.instance, scope)
call_args = []
for arg in reversed(node.args):
param_local = self.visit(arg, scope)
call_args.append(CIL_AST.Arg(param_local))
call_args.append(CIL_AST.Arg(expr_value))
dynamic_type = node.instance.computed_type.name
self.register_instruction(CIL_AST.VCall(result_local, node.method, call_args, dynamic_type, expr_value))
return result_local
@visitor.when(COOL_AST.StaticCall)
def visit(self, node, scope):
result_local = self.define_internal_local(scope = scope, name = "result")
expr_value = self.visit(node.instance, scope)
call_args = []
for arg in reversed(node.args):
param_local = self.visit(arg, scope)
call_args.append(CIL_AST.Arg(param_local))
call_args.append(CIL_AST.Arg(expr_value))
static_instance = self.define_internal_local(scope=scope, name='static_instance')
self.register_instruction(CIL_AST.Allocate(node.static_type,self.context.get_type(node.static_type).tag ,static_instance))
self.register_instruction(CIL_AST.VCall(result_local, node.method, call_args, node.static_type, static_instance))
return result_local
@visitor.when(COOL_AST.Let)
def visit(self, node, scope):
let_scope = scope.create_child()
for var in node.var_list:
self.visit(var, let_scope)
body_value = self.visit(node.body, let_scope)
result_local = self.define_internal_local(scope = scope, name = "let_result")
self.register_instruction(CIL_AST.Assign(result_local, body_value))
return result_local
@visitor.when(COOL_AST.LetVarInit)
def visit(self, node, scope):
expr_value = self.visit(node.expr, scope)
var_init = self.define_internal_local(scope = scope, name = node.name, cool_var_name= node.name)
self.register_instruction(CIL_AST.Assign(var_init, expr_value))
return var_init
@visitor.when(COOL_AST.LetVarDef)
def visit(self, node, scope):
instance = None
if node.type in ['Int', 'Bool']:
instance = self.define_internal_local(scope=scope, name="instance")
self.register_instruction(CIL_AST.Allocate(node.type,self.context.get_type(node.type).tag, instance))
value = self.define_internal_local(scope=scope, name="value")
self.register_instruction(CIL_AST.LoadInt(0,value))
result_init = self.define_internal_local(scope=scope, name="result_init")
self.register_instruction(CIL_AST.Call(result_init, f'{node.type}_init', [ CIL_AST.Arg(value), | |
the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[4]["MT_dirty__"] = False
self.vs[4]["GUID__"] = UUID('df1e83d1-32db-4056-ac38-2741b5898582')
self.vs[5]["MT_subtypeMatching__"] = False
self.vs[5]["MT_label__"] = """24"""
self.vs[5]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[5]["mm__"] = """MT_pre__hasAttribute_S"""
self.vs[5]["MT_dirty__"] = False
self.vs[5]["GUID__"] = UUID('8a054c15-e3fa-49b6-b451-0b72ab6a2584')
self.vs[6]["MT_subtypeMatching__"] = False
self.vs[6]["MT_label__"] = """28"""
self.vs[6]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[6]["mm__"] = """MT_pre__hasAttribute_T"""
self.vs[6]["MT_dirty__"] = False
self.vs[6]["GUID__"] = UUID('8a1e71e5-372c-4e51-928c-78a4af5fd4ea')
self.vs[7]["MT_subtypeMatching__"] = False
self.vs[7]["MT_pre__classtype"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[7]["MT_pre__cardinality"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[7]["MT_label__"] = """10"""
self.vs[7]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[7]["mm__"] = """MT_pre__Inst"""
self.vs[7]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[7]["MT_dirty__"] = False
self.vs[7]["GUID__"] = UUID('40520d48-84cf-4a4c-8c62-e2a2fbadb9e9')
self.vs[8]["MT_subtypeMatching__"] = False
self.vs[8]["MT_pre__classtype"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[8]["MT_pre__cardinality"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[8]["MT_label__"] = """15"""
self.vs[8]["MT_subtypes__"] = pickle.loads("""(lp1
S'MT_pre__PythonRef'
p2
a.""")
self.vs[8]["mm__"] = """MT_pre__Name"""
self.vs[8]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[8]["MT_dirty__"] = False
self.vs[8]["GUID__"] = UUID('9f164020-f954-4b0f-8d01-9152416dfe73')
self.vs[9]["MT_pivotOut__"] = """element4"""
self.vs[9]["MT_subtypeMatching__"] = False
self.vs[9]["MT_pre__classtype"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[9]["MT_pre__cardinality"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[9]["MT_pivotIn__"] = """element4"""
self.vs[9]["MT_label__"] = """4"""
self.vs[9]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[9]["mm__"] = """MT_pre__OUT2"""
self.vs[9]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[9]["MT_dirty__"] = False
self.vs[9]["GUID__"] = UUID('48145f24-2dd2-4428-b68d-cb52aa333a09')
self.vs[10]["MT_subtypeMatching__"] = False
self.vs[10]["MT_pre__Type"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[10]["MT_label__"] = """19"""
self.vs[10]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[10]["mm__"] = """MT_pre__Attribute"""
self.vs[10]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[10]["MT_dirty__"] = False
self.vs[10]["GUID__"] = UUID('1b23a4f9-8e30-4509-be19-5b827290f33f')
self.vs[11]["MT_subtypeMatching__"] = False
self.vs[11]["MT_pre__Type"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[11]["MT_label__"] = """25"""
self.vs[11]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[11]["mm__"] = """MT_pre__Attribute"""
self.vs[11]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[11]["MT_dirty__"] = False
self.vs[11]["GUID__"] = UUID('8aebfcf3-9e7b-4f6f-8e48-bcb0f096b1ba')
self.vs[12]["MT_subtypeMatching__"] = False
self.vs[12]["MT_label__"] = """21"""
self.vs[12]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[12]["mm__"] = """MT_pre__leftExpr"""
self.vs[12]["MT_dirty__"] = False
self.vs[12]["GUID__"] = UUID('4416fc7e-9ab9-4b93-bef2-dbe271d49a34')
self.vs[13]["MT_subtypeMatching__"] = False
self.vs[13]["MT_label__"] = """29"""
self.vs[13]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[13]["mm__"] = """MT_pre__leftExpr"""
self.vs[13]["MT_dirty__"] = False
self.vs[13]["GUID__"] = UUID('990e486f-db6b-41da-aa75-a99035d876be')
self.vs[14]["MT_subtypeMatching__"] = False
self.vs[14]["MT_label__"] = """23"""
self.vs[14]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[14]["mm__"] = """MT_pre__rightExpr"""
self.vs[14]["MT_dirty__"] = False
self.vs[14]["GUID__"] = UUID('ecaaf206-0f7a-41da-9907-e7c23040450f')
self.vs[15]["MT_subtypeMatching__"] = False
self.vs[15]["MT_label__"] = """30"""
self.vs[15]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[15]["mm__"] = """MT_pre__rightExpr"""
self.vs[15]["MT_dirty__"] = False
self.vs[15]["GUID__"] = UUID('361f9501-5606-43ef-ad3a-b1a182678ed0')
self.vs[16]["MT_subtypeMatching__"] = False
self.vs[16]["MT_pre__Type"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[16]["MT_label__"] = """22"""
self.vs[16]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[16]["mm__"] = """MT_pre__Constant"""
self.vs[16]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[16]["MT_dirty__"] = False
self.vs[16]["GUID__"] = UUID('ff03edde-10aa-40b5-ba3e-c8cc70e2813f')
self.vs[17]["MT_subtypeMatching__"] = False
self.vs[17]["MT_pre__Type"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can | |
short_name = process_swagger_name(ref)
fullname = full_swagger_name(ref)
mod_def = get_module_def(version)
item_ref = mod_def.get_class_desc(short_name)
if item_ref is None:
item_ref = ClassDescriptor(fullname, {}) # make a placeholder
mod_def.save_class_desc(item_ref)
else:
itype = items.get("type")
if itype:
if itype == "object":
item_ref = "object"
else:
item_ref = itype
else:
raise RuntimeError(f"Don't know how to process type of "
f"{name} in {self.containing_class.full_name}")
self.item_type = item_ref
elif self.item_type in types_map:
self.item_type = types_map[self.item_type]
else:
raise TypeError(f"Unknown type: {self.item_type} in property {self.name}")
elif ctype == "object":
# This can be a couple of things, depending on the release
# of the spec. Sometimes it's just used to allow any content,
# and sometimes it's for a untyped key/value object
# We'll look for the presence of 'additionalProperties' and if
# found, we'll treat it as a dict, otherwise an object
if 'additionalProperties' in d:
self.container_type = dict
else:
self.container_type = object
else:
self.container_type = None
if self.container_type is None:
if ctype in types_map:
self.prop_type = types_map[ctype]
else:
group, version, short_name = process_swagger_name(d["$ref"])
fullname = full_swagger_name(d["$ref"])
mod_def = get_module_def(version)
ref_class = mod_def.get_class_desc(short_name)
if ref_class is None:
ref_class = ClassDescriptor(fullname, {})
mod_def.save_class_desc(ref_class)
self.prop_type = ref_class
@staticmethod
def as_required(anno: str, as_required: bool) -> str:
return anno if as_required else f"Optional[{anno}]"
def depends_on(self) -> Union[ClassDescriptor, NoneType]:
result = None
if isinstance(self.item_type, ClassDescriptor):
result = self.item_type
elif isinstance(self.prop_type, ClassDescriptor):
result = self.prop_type
return result
def change_dep(self, new_dep):
if isinstance(self.item_type, ClassDescriptor):
self.item_type = new_dep
elif isinstance(self.prop_type, ClassDescriptor):
self.prop_type = new_dep
def as_python_typeanno(self, as_required: bool) -> str:
parts = [" ", self.name, ": "]
if self.container_type is None:
# then a straight-up type, either scalar or another object
if isinstance(self.prop_type, str):
parts.append(self.as_required(self.prop_type, as_required))
elif isinstance(self.prop_type, ClassDescriptor):
parts.append(self.as_required(f"'{self.prop_type.short_name}'",
as_required))
elif self.container_type is list:
if isinstance(self.item_type, ClassDescriptor):
parts.append(self.as_required(f"List['{self.item_type.short_name}']",
as_required))
else:
parts.append(self.as_required(f"List[{self.item_type}]",
as_required))
elif self.container_type is dict:
parts.append(self.as_required("Dict[str, str]", as_required))
elif self.container_type is object:
parts.append(self.as_required("object", as_required))
else:
raise TypeError(f"Unknown attribute {self.name} in "
f"{self.containing_class.short_name}, "
f"prop_type:{self.prop_type}, "
f"container:{self.container_type}")
# now check if we should add a field default
if not as_required:
# then we need to add a default value so we don't have to
# supply this argument when creating it programmatically
if self.container_type is not None:
# then we need a field
factory = "list" if self.container_type is list else "dict"
parts.append(f" = field(default_factory={factory})")
else:
# just default it to None
parts.append(f" = {self.default_value}")
return "".join(parts)
class OpParameter(object):
def __init__(self, name: str, ptype: Any, description: str, required: bool):
self.name = name
self.ptype = ptype
self.description = description
self.required = required
self.is_bodyany: bool = True if name == 'body' and ptype == 'Any' else False
def docstring(self, prefix="", hanging_indent=" ", linelen=70):
name = camel_to_pep8(self.name)
name = f"{name}_" if name in python_reserved else name
line = f':param {name}: {self.description}'
final_lines = ClassDescriptor.split_line(line, prefix=prefix,
hanging_indent=hanging_indent,
linelen=linelen)
return "\n".join(final_lines)
def as_python(self) -> str:
if type(self.ptype) == type:
ptype = self.ptype.__name__
elif isinstance(self.ptype, ClassDescriptor):
ptype = f"'{self.ptype.short_name}'"
else:
ptype = self.ptype
ptype = (ptype
if self.required
else f"Optional[{ptype}] = None")
name = camel_to_pep8(self.name)
name = f"{name}_" if name in python_reserved else name
return f"{name}: {ptype}"
class OpResponse(object):
def __init__(self, code: int, description: str, ref: Optional[str] = None):
self.code = code
self.description = description
self.ref = ref
def is_object(self) -> bool:
return isinstance(self.ref, ClassDescriptor)
class QueryDomainOperations(object):
"""
This object collects query operations for a single query domain
Instances of this class organize operations for which a Hikaru object is
not needed for input, but may result in one upon output. Given this, there
is no Hikaru class to make these operations methods on, and hence we gather
them together in this class. All operations in a single instance of this
class share the same element that comes right after the version element
in the path for the operation; so all operations with a path that looks like:
/mumble/v1/watch/moremumble
will be gathered in the 'watch' domain.
"""
def __init__(self, domain):
self.domain = domain
self.operations: Dict[str, Operation] = {}
def add_operation(self, op_id: str, operation: Operation):
self.operations[op_id] = operation
def get_path_version(path: str) -> str:
version = None
parts = path.split('/')
for part in parts:
if part.startswith('v1') or part.startswith('v2'):
version = part
break
return version
def get_path_domain(path: str):
version = get_path_version(path)
path_parts = path.split(f"/{version}/")
if len(path_parts) > 1:
parts = path_parts[1].split("/")
domain = parts[0]
else:
domain = None
return domain
def _best_guess(op: Operation) -> Optional[ClassDescriptor]:
# look for a good class for this op based on the op name
md = get_module_def(op.version)
meth_name = make_method_name(op)
parts = meth_name.split('_')
parts.reverse()
try:
parts.remove("namespaced") # this never shows up in a class name
except ValueError:
pass
if 'api' in parts:
parts[parts.index('api')] = 'API'
new_parts = []
guess: Optional[ClassDescriptor] = None
# first, look for just each part of the name as a class
# only take the first match
for part in parts:
test_name = part.capitalize() if part != 'API' else part
if guess is None and test_name in md.all_classes:
guess = md.all_classes[test_name]
# next, look for a longer name by concat'ing from the end forward, taking
# any longer matches
for part in parts:
new_parts.insert(0,
part.capitalize() if part != 'API' else part)
test_name = "".join(new_parts)
if test_name in md.all_classes:
if not guess or len(guess.short_name) < len(test_name):
guess = md.all_classes[test_name]
# final check: if a longer name is possible, then look for a permuation
if not guess or len(guess.short_name) < len(''.join(new_parts)):
# then a better guess might exist via some permutation of the name parts
for perm in permutations(new_parts):
test_name = "".join(perm)
if test_name in md.all_classes:
guess = md.all_classes[test_name]
break
return guess
def stop_when_true(test_expr, result_expr, seq):
"""
feed elements into expr until it returns True, returning that element (None otherwise)
:param test_expr: callable of 1 arg that returns True/False
:param result_expr: callable of 1 arg; takes found element from test_expr
and maps it to a final value to return
:param seq: iterable of elements that can be passed to expr; when expr returns
True then return that element, None otherwise
:return: an element from seq or None
"""
result = None
for e in seq:
if test_expr(e):
result = result_expr(e)
break
return result
def process_params_and_responses(path: str, verb: str, op_id: str,
params: list, responses: dict, description: str,
gvk_dict: dict,
reuse_op: Operation = None) -> Operation:
version = get_path_version(path)
if reuse_op is None:
new_op = Operation(verb, path, op_id, description, gvk_dict)
else:
new_op = reuse_op
for param in params:
has_mismatch = False
name = param["name"]
required = True if ('required' in param) and param['required'] else False
description = param.get('description', '')
if "type" in param:
ptype = param["type"]
elif "schema" in param:
schema = param["schema"]
pref = schema.get("$ref")
if pref:
k8s_name = full_swagger_name(pref)
_, sver, ptype = process_swagger_name(k8s_name)
if version and sver != version:
has_mismatch = True
mod_def = get_module_def(sver)
ptype = mod_def.get_class_desc(ptype) # you need cd later...
if ptype is None:
raise RuntimeError(f"Couldn't find a ClassDescriptor for "
f"parameter {k8s_name} in {op_id}")
else:
ptype = schema.get("type")
if not ptype:
raise RuntimeError(f"Can't determine type of param '{name}' "
f"in path {path}, verb {verb}")
elif ptype == "object":
ptype = "Any"
# otherwise, leave it alone
else:
raise RuntimeError(f"Don't know what to do with param"
f" {path}.{verb}.{name}")
new_op.add_parameter(name, ptype, description, required)
if has_mismatch:
objop_param_mismatches[f"{name}:{new_op.op_id}"] = new_op
cd_in_params: Optional[ClassDescriptor] = \
stop_when_true(lambda x: x is not None and
(isinstance(x.ptype, ClassDescriptor) or
x.is_bodyany),
lambda x: x.ptype,
[new_op.self_param] + new_op.parameters)
for code, response in responses.items():
has_mismatch = False
description = response.get('description', '')
if 'schema' in response:
if '$ref' in response['schema']:
ref = full_swagger_name(response['schema']['$ref'])
_, sver, ptype = process_swagger_name(ref)
if version and sver != version:
has_mismatch = True
mod_def = get_module_def(sver)
ptype = mod_def.get_class_desc(ptype)
if ptype is None:
raise RuntimeError(f"Couldn't find a ClassDescriptor for "
f"response {code} in {op_id}")
elif 'type' in response['schema']:
ptype = response['schema']['type']
ptype = types_map.get(ptype, ptype)
else:
raise RuntimeError(f"Don't know how to deal with this"
f" schema: {response['schema']}")
else:
ptype = None
new_op.add_return(code, ptype, description)
if has_mismatch:
response_mismatches[f"{code}:{new_op.op_id}"] = new_op
cd_in_responses: Optional[ClassDescriptor] = \
stop_when_true(lambda x:
isinstance(x.ref,
ClassDescriptor),
lambda x: x.ref,
new_op.returns.values())
whose_method: | |
1",
" two" : " 2",
" three" : " 3",
" four" : " 4",
" five" : " 5",
" six" : " 6",
" seven" : " 7",
" eight" : " 8",
" nine" : " 9",
" zero" : " 0"
}
for idx, turn in self.data.items():
slots_str = ", ".join([" ".join(slot) for slot in turn["slots"]])
if domain in turn["domains"]:
if domain+" "+slot_type in slots_str:
wi_type_set[domain].add(turn["dial_id"])
else:
wo_type_set[domain].add(turn["dial_id"])
tmp_union = wo_type_set[domain] - wi_type_set[domain]
tmp_union = sorted(list(tmp_union))
domain_related = sorted(list(wo_type_set[domain].union(wi_type_set[domain])))
for dial_id in domain_related:
for turn_num in range(30):
idx = dial_id + "-" + str(turn_num)
if idx in self.data:
turn = self.data[idx]
else:
continue
# already labeled
dom_type_list = [slot[0]+"--"+slot[1] for slot in turn["slots"]]
if domain+"--"+slot_type in dom_type_list:
break
# not labeled
last_turn = turn["context"].split("<system>")[-1]
sys_utt, user_utt = last_turn.split("<user>")
slots_string = ", ".join([" ".join(slot) for slot in turn["slots"]])
# in total for 100
other_domain, name_val, other_name_val = "", "", ""
for slot in turn["slots"]:
if slot[1] == slot_type and slot[2] == value:
other_domain = slot[0]
if slot[0] == domain and slot[1] == "name":
name_val = slot[2]
if slot[0] != domain and slot[1] == "name":
other_name_val = slot[2]
if re.search(r"star[s ]", sys_utt):
# work for 0
for num_word, num in num_map.items():
for suff in [" ", "-"]:
sys_utt = re.sub(num_word+suff, num+suff, sys_utt)
if re.search(r"[0-9][- ]star[s ]", sys_utt):
tokens = re.findall(r"\d+[- ]star[s ]", sys_utt)
label_value = []
for token in tokens:
if "-" in token:
label_value.append(token.split("-")[0])
else:
label_value.append(token.split()[0])
label_value = list(set(label_value))
self.update_tmp_log(idx, turn, last_turn, domain, slot_type, " | ".join(label_value))
pass
elif re.search(r"star rating of [0-9]", sys_utt):
tokens = re.findall(r"star rating of \d+", sys_utt)
label_value = []
for token in tokens:
label_value.append(token.split()[3])
label_value = list(set(label_value))
self.update_tmp_log(idx, turn, last_turn, domain, slot_type, " | ".join(label_value))
pass
self._save_tmp_log()
def Count_Name_Info(self):
wi_type_set = {
"attraction": set(),
"hotel": set(),
"restaurant": set(),
}
wo_type_set = {
"attraction": set(),
"hotel": set(),
"restaurant": set(),
}
tmp = {
"attraction": set(),
"hotel": set(),
"restaurant": set(),
}
for domain in ["restaurant", "attraction", "hotel"]:#tmp:
slot_type = "name"
self.tmp_log = {}
self.tmp_log_path = ".".join(self.data_path.split(".")[:-1]) + f"_{domain}_{slot_type}.json"
self._load_otgy()
poss_value = self.otgy[f"{domain}--{slot_type}"]
for idx, turn in self.data.items():
slots_str = ", ".join([" ".join(slot) for slot in turn["slots"]])
if domain in turn["domains"]:
if domain+" "+slot_type in slots_str:
wi_type_set[domain].add(turn["dial_id"])
else:
wo_type_set[domain].add(turn["dial_id"])
tmp_union = wo_type_set[domain] - wi_type_set[domain]
tmp_union = sorted(list(tmp_union))
domain_related = sorted(list(wo_type_set[domain].union(wi_type_set[domain])))
for dial_id in tqdm(domain_related):
for turn_num in range(30):
idx = dial_id + "-" + str(turn_num)
if idx in self.data:
turn = self.data[idx]
else:
continue
# already labeled
dom_type_list = [slot[0]+"--"+slot[1] for slot in turn["slots"]]
if domain+"--"+slot_type in dom_type_list:
continue
# not labeled
last_turn = turn["context"].split("<system>")[-1]
sys_utt, user_utt = last_turn.split("<user>")
slots_string = ", ".join([" ".join(slot) for slot in turn["slots"]])
flag = 0
other_domain, name_val, other_name_val = "", "", ""
for slot in turn["slots"]:
if slot[0] == domain and slot[1] == "name":
name_val = slot[2]
if slot[0] != domain and slot[1] == "name":
other_name_val = slot[2]
if domain == "restaurant":
# in total for 137
# adding
poss_value.update({
"caffe uno":0,
"travelers rest":0,
})
# removing
weird_rest_name = ["one", "ali", "bridge", "ask", "indian",
"south", "city", "italian restaurant",
"other restaurant", "ashley hotel",
"pizza", "funky","scudamores punt","scudamores punting",
"molecular gastronomy", "broughton house gallery",
"cambridge punter", "el shaddai", "el shaddia guesthouse",
"indian", "indiana restaurants", "south", ]
for value in poss_value:
label_value = value
# if " "+value in user_utt:
if re.search(r"[^a-z]"+value+r"[^a-z]", user_utt):
if value not in weird_rest_name\
and value not in other_name_val \
and "taxi" not in user_utt:
# # work for 18
flag = 1
self.update_tmp_log(idx, turn, last_turn, domain, slot_type, label_value)
pass
if flag == 0:
for value in poss_value:
label_value = value
if value not in weird_rest_name \
and value not in other_name_val:
# used to use weird list ["one", "ali", "bridge", "ask", "indian", "south", "city", "italian restaurant", "other restaurant"]
if re.search(r"[^a-z]"+value+r"[^a-z]", sys_utt) and value not in user_utt:
# work for 125
self.update_tmp_log(idx, turn, last_turn, domain, slot_type, label_value)
pass
elif value != "j restaurant" and fuzz.partial_ratio(value, sys_utt) >= 90:
self.update_tmp_log(idx, turn, last_turn, domain, slot_type, label_value)
pass
if domain == "hotel":
# in total for 150
# adding
poss_value.update({
"rosa's bed and breakfast": 0,
"alexander b & b": 0,
"marriott": 0,
})
# removing
weird_rest_name = ["yes", "hotel", "sou", "north", "bridge","doubletree by hilton cambridge"]
for value in poss_value:
label_value = value
if re.search(r"[^a-z]"+value+r"[^a-z]", user_utt):
if value not in weird_rest_name \
and value not in other_name_val \
and "taxi" not in user_utt:
# # work for 11
pass
flag = 1
self.update_tmp_log(idx, turn, last_turn, domain, slot_type, label_value)
if flag == 0:
for value in poss_value:
label_value = value
if re.search(r"[^a-z]"+value+r"[^a-z]", sys_utt) and value not in user_utt \
and value not in weird_rest_name \
and "taxi" not in sys_utt \
and value not in other_name_val:
# work for 146
self.update_tmp_log(idx, turn, last_turn, domain, slot_type, label_value)
pass
if domain == "attraction":
# in total for 147
# adding
poss_value.update({
"all saints":0,
})
# removing
weird_val_list = ["pizza", "country park", "university arms hotel",
"art", "cambridge", "street", "places", "bridge",
"place", "museums", "museum", "fun", "college", "trinity",
"free", "boat", "gallery", "aylesbray lodge guest house",
"church's", "milton", "funky", "nusha"]
for value in poss_value:
label_value = value
if re.search(r"[^a-z]"+value+r"[^a-z]", user_utt):
if value not in weird_val_list \
and value not in other_name_val \
and "taxi" not in user_utt:
# # work for 8
pass
flag = 1
self.update_tmp_log(idx, turn, last_turn, domain, slot_type, label_value)
if flag == 0:
for value in poss_value:
label_value = value
if re.search(r"[^a-z]"+value+r"[^a-z]", sys_utt):
if value not in user_utt \
and value not in weird_val_list \
and "taxi" not in sys_utt \
and value not in other_name_val:
# work for 141
self.update_tmp_log(idx, turn, last_turn, domain, slot_type, label_value)
pass
self._save_tmp_log()
def token_distance(self, s, w1, w2):
if w1 == w2 :
return 0
# get individual words in a list
s = re.sub("n't ", " not ", s)
s = re.sub(r"\.|,|\?", " ", s)
words = s.split(" ")
# assume total length of the string as
# minimum distance
min_dist = len(words)+1
# traverse through the entire string
for index in range(len(words)):
if words[index] == w1:
for search in range(len(words)):
if words[search] == w2:
# the distance between the words is
# the index of the first word - the
# current word index
curr = abs(index - search) - 1;
# comparing current distance with
# the previously assumed distance
if curr < min_dist:
min_dist = curr
# w1 and w2 are same and adjacent
return min_dist
def _load_otgy(self):
self.otgy = self._load_json(self.otgy_path)
def update_tmp_log(self, idx, turn, last_turn, domain, slot_type, label_value):
if idx not in self.tmp_log:
self.tmp_log[idx] = turn.copy()
del self.tmp_log[idx]["domains"]
self.tmp_log[idx]["context"] = last_turn
self.tmp_log[idx]["slots"] = ", ".join([" ".join(slot) for slot in self.tmp_log[idx]["slots"]])
self.tmp_log[idx]["add_slot"] = f"{domain} {slot_type} {label_value}"
else:
exist_val_list = self.tmp_log[idx]["add_slot"].split(" | ")
exist_val_list[0] = " ".join(exist_val_list[0].split(" ")[2:])
remove_set = set()
label_value_tmp = label_value
if label_value.startswith("the "):
label_value_tmp = "".join(label_value[4:])
for exist_val in exist_val_list:
exist_val_tmp = exist_val
if exist_val.startswith("the "):
exist_val_tmp = "".join(exist_val[4:])
if exist_val_tmp in label_value \
and exist_val_tmp != label_value \
and exist_val_tmp != label_value_tmp:
remove_set.add(exist_val)
exist_val_list = list(set(exist_val_list) - remove_set)
if label_value_tmp not in self.tmp_log[idx]["add_slot"]:
exist_val_list.append(label_value)
# order as in context
if len(exist_val_list) > 1:
val_idx_dict = {}
for value in exist_val_list:
val_idx_dict[value] = last_turn.find(value)
val_idx_dict = OrderedDict(
sorted(val_idx_dict.items(), key=lambda t: t[1])
)
exist_val_list = list(val_idx_dict.keys())
# pdb.set_trace()
self.tmp_log[idx]["add_slot"] = f"{domain} {slot_type} "+ " | ".join(exist_val_list)
def analyze(self):
"""
analyze results
"""
self._load_data()
# basic info
count_basic_info = self.Count_Basic_Info()
self.update_results(key_="count_basic_info", value_=count_basic_info)
# refer info
count_refer_info = self.Count_Refer_Info()
self.update_results(key_="count_refer_info", value_=count_refer_info)
# # name info
# count_name_info = self.Count_Name_Info_old()
# self.update_results(key_="count_name_info", value_=count_name_info)
if not os.path.exists(self.otgy_path):
self.Create_OTGY_M22()
# self.Analyze_Bias()
# self.Search_Name_Entity()
# save tmp log
# self._save_tmp_log()
self.Count_Type_Info()
self.Count_Dest_Depa_Info()
self.Count_Area_Info()
self.Count_Price_Info()
self.Count_Food_Info()
self.Count_Parking_Info()
| |
<filename>ACME-oneM2M-CSE-master/tests/testMgmtObj.py
#
# testMgmtObj.py
#
# (c) 2020 by <NAME>
# License: BSD 3-Clause License. See the LICENSE file for further details.
#
# Unit tests for all kind of MgmtObj specialisations
#
import unittest, sys
import requests
sys.path.append('../acme')
from typing import Tuple
from Constants import Constants as C
from Types import ResourceTypes as T, ResponseCode as RC
from init import *
nodeID = 'urn:sn:1234'
nod2RN = 'test2NOD'
nod2URL = f'{cseURL}/{nod2RN}'
class TestMgmtObj(unittest.TestCase):
nod = None
@classmethod
@unittest.skipIf(noCSE, 'No CSEBase')
def setUpClass(cls) -> None:
dct = { 'm2m:nod' : {
'rn' : nodRN,
'ni' : nodeID
}}
cls.nod, rsc = CREATE(cseURL, ORIGINATOR, T.NOD, dct)
assert rsc == RC.created, 'cannot create <node>'
@classmethod
@unittest.skipIf(noCSE, 'No CSEBase')
def tearDownClass(cls) -> None:
DELETE(nodURL, ORIGINATOR) # Just delete the Node and everything below it. Ignore whether it exists or not
#
# FWR
#
fwrRN = 'fwr'
fwrURL = f'{nodURL}/{fwrRN}'
@unittest.skipIf(noCSE, 'No CSEBase')
def test_createFWR(self) -> None:
""" Create [Firmware] """
dct = { 'm2m:fwr' : {
'mgd' : T.FWR,
'rn' : self.fwrRN,
'dc' : 'aFwr',
'vr' : '1234',
'fwn': 'myFwr',
'url': 'example.<EMAIL>',
'ud' : False
}}
r, rsc = CREATE(nodURL, ORIGINATOR, T.MGMTOBJ, dct)
self.assertEqual(rsc, RC.created)
self.assertIsNotNone(findXPath(r, 'm2m:fwr/ri'))
@unittest.skipIf(noCSE, 'No CSEBase')
def test_retrieveFWR(self) -> None:
""" Retrieve [Firmware] """
r, rsc = RETRIEVE(self.fwrURL, ORIGINATOR)
self.assertEqual(rsc, RC.OK)
self.assertEqual(findXPath(r, 'm2m:fwr/mgd'), T.FWR)
@unittest.skipIf(noCSE, 'No CSEBase')
def test_attributesFWR(self) -> None:
""" Test [Firmware] attributes """
r, rsc = RETRIEVE(self.fwrURL, ORIGINATOR)
self.assertEqual(rsc, RC.OK)
self.assertEqual(findXPath(r, 'm2m:fwr/ty'), T.MGMTOBJ)
self.assertEqual(findXPath(r, 'm2m:fwr/pi'), findXPath(TestMgmtObj.nod,'m2m:nod/ri'))
self.assertEqual(findXPath(r, 'm2m:fwr/rn'), self.fwrRN)
self.assertIsNotNone(findXPath(r, 'm2m:fwr/ct'))
self.assertIsNotNone(findXPath(r, 'm2m:fwr/lt'))
self.assertIsNotNone(findXPath(r, 'm2m:fwr/et'))
self.assertIsNotNone(findXPath(r, 'm2m:fwr/dc'))
self.assertEqual(findXPath(r, 'm2m:fwr/dc'), 'aFwr')
self.assertIsNotNone(findXPath(r, 'm2m:fwr/vr'))
self.assertEqual(findXPath(r, 'm2m:fwr/vr'), '1234')
self.assertIsNotNone(findXPath(r, 'm2m:fwr/fwn'))
self.assertEqual(findXPath(r, 'm2m:fwr/fwn'), 'myFwr')
self.assertIsNotNone(findXPath(r, 'm2m:fwr/url'))
self.assertEqual(findXPath(r, 'm2m:fwr/url'), 'example.com')
self.assertIsNotNone(findXPath(r, 'm2m:fwr/ud'))
self.assertEqual(findXPath(r, 'm2m:fwr/ud'), False)
self.assertIsNotNone(findXPath(r, 'm2m:fwr/uds'))
self.assertIsInstance(findXPath(r, 'm2m:fwr/uds'), dict)
@unittest.skipIf(noCSE, 'No CSEBase')
def test_deleteFWR(self) -> None:
""" Delete [Firmware] """
_, rsc = DELETE(self.fwrURL, ORIGINATOR)
self.assertEqual(rsc, RC.deleted)
#
# SWR
#
swrRN = 'swr'
swrURL = f'{nodURL}/{swrRN}'
@unittest.skipIf(noCSE, 'No CSEBase')
def test_createSWR(self) -> None:
"""Create [Software] """
dct = { 'm2m:swr' : {
'mgd' : T.SWR,
'rn' : self.swrRN,
'dc' : 'aSwr',
'vr' : '1234',
'swn': 'mySwr',
'url': 'example.com'
}}
r, rsc = CREATE(nodURL, ORIGINATOR, T.MGMTOBJ, dct)
self.assertEqual(rsc, RC.created)
self.assertIsNotNone(findXPath(r, 'm2m:swr/ri'))
@unittest.skipIf(noCSE, 'No CSEBase')
def test_retrieveSWR(self) -> None:
""" Retrieve [Software] """
r, rsc = RETRIEVE(self.swrURL, ORIGINATOR)
self.assertEqual(rsc, RC.OK)
self.assertEqual(findXPath(r, 'm2m:swr/mgd'), T.SWR)
@unittest.skipIf(noCSE, 'No CSEBase')
def test_attributesSWR(self) -> None:
""" Test [Software] attributes """
r, rsc = RETRIEVE(self.swrURL, ORIGINATOR)
self.assertEqual(rsc, RC.OK)
self.assertEqual(findXPath(r, 'm2m:swr/ty'), T.MGMTOBJ)
self.assertEqual(findXPath(r, 'm2m:swr/pi'), findXPath(TestMgmtObj.nod,'m2m:nod/ri'))
self.assertEqual(findXPath(r, 'm2m:swr/rn'), self.swrRN)
self.assertIsNotNone(findXPath(r, 'm2m:swr/ct'))
self.assertIsNotNone(findXPath(r, 'm2m:swr/lt'))
self.assertIsNotNone(findXPath(r, 'm2m:swr/et'))
self.assertIsNotNone(findXPath(r, 'm2m:swr/dc'))
self.assertEqual(findXPath(r, 'm2m:swr/dc'), 'aSwr')
self.assertIsNotNone(findXPath(r, 'm2m:swr/vr'))
self.assertEqual(findXPath(r, 'm2m:swr/vr'), '1234')
self.assertIsNotNone(findXPath(r, 'm2m:swr/swn'))
self.assertEqual(findXPath(r, 'm2m:swr/swn'), 'mySwr')
self.assertIsNotNone(findXPath(r, 'm2m:swr/url'))
self.assertEqual(findXPath(r, 'm2m:swr/url'), 'example.com')
self.assertIsNotNone(findXPath(r, 'm2m:swr/in'))
self.assertIsNotNone(findXPath(r, 'm2m:swr/un'))
self.assertIsNotNone(findXPath(r, 'm2m:swr/ins'))
@unittest.skipIf(noCSE, 'No CSEBase')
def test_deleteSWR(self) -> None:
""" Delete [Software] """
_, rsc = DELETE(self.swrURL, ORIGINATOR)
self.assertEqual(rsc, RC.deleted)
#
# MEM
#
@unittest.skipIf(noCSE, 'No CSEBase')
def test_createMEM(self) -> None:
""" Create [Memory] """
dct = { 'm2m:mem' : {
'mgd' : T.MEM,
'rn' : memRN,
'dc' : 'aMem',
'mma' : 1234,
'mmt' : 4321
}}
r, rsc = CREATE(nodURL, ORIGINATOR, T.MGMTOBJ, dct)
self.assertEqual(rsc, RC.created)
self.assertIsNotNone(findXPath(r, 'm2m:mem/ri'))
@unittest.skipIf(noCSE, 'No CSEBase')
def test_retrieveMEM(self) -> None:
""" Retrieve [Memory] """
r, rsc = RETRIEVE(memURL, ORIGINATOR)
self.assertEqual(rsc, RC.OK)
self.assertEqual(findXPath(r, 'm2m:mem/mgd'), T.MEM)
@unittest.skipIf(noCSE, 'No CSEBase')
def test_attributesMEM(self) -> None:
""" Test [Memory] attributes """
r, rsc = RETRIEVE(memURL, ORIGINATOR)
self.assertEqual(rsc, RC.OK)
self.assertEqual(findXPath(r, 'm2m:mem/ty'), T.MGMTOBJ)
self.assertEqual(findXPath(r, 'm2m:mem/pi'), findXPath(TestMgmtObj.nod,'m2m:nod/ri'))
self.assertEqual(findXPath(r, 'm2m:mem/rn'), memRN)
self.assertIsNotNone(findXPath(r, 'm2m:mem/ct'))
self.assertIsNotNone(findXPath(r, 'm2m:mem/lt'))
self.assertIsNotNone(findXPath(r, 'm2m:mem/et'))
self.assertIsNotNone(findXPath(r, 'm2m:mem/dc'))
self.assertEqual(findXPath(r, 'm2m:mem/dc'), 'aMem')
self.assertIsNotNone(findXPath(r, 'm2m:mem/mma'))
self.assertEqual(findXPath(r, 'm2m:mem/mma'), 1234)
self.assertIsNotNone(findXPath(r, 'm2m:mem/mmt'))
self.assertEqual(findXPath(r, 'm2m:mem/mmt'), 4321)
@unittest.skipIf(noCSE, 'No CSEBase')
def test_deleteMEM(self) -> None:
""" Delete [Memory] """
_, rsc = DELETE(memURL, ORIGINATOR)
self.assertEqual(rsc, RC.deleted)
#
# ANI
#
aniRN = 'ANI'
aniURL = f'{nodURL}/{aniRN}'
@unittest.skipIf(noCSE, 'No CSEBase')
def test_createANI(self) -> None:
""" Create [areaNwkInfo] """
dct = { 'm2m:ani' : {
'mgd' : T.ANI,
'rn' : self.aniRN,
'dc' : 'aAni',
'ant' : 'aniType',
'ldv' : [ 'dev1', 'dev2' ]
}}
r, rsc = CREATE(nodURL, ORIGINATOR, T.MGMTOBJ, dct)
self.assertEqual(rsc, RC.created)
self.assertIsNotNone(findXPath(r, 'm2m:ani/ri'))
@unittest.skipIf(noCSE, 'No CSEBase')
def test_retrieveANI(self) -> None:
""" Retrieve [areaNwkInfo] """
r, rsc = RETRIEVE(self.aniURL, ORIGINATOR)
self.assertEqual(rsc, RC.OK)
self.assertEqual(findXPath(r, 'm2m:ani/mgd'), T.ANI)
@unittest.skipIf(noCSE, 'No CSEBase')
def test_attributesANI(self) -> None:
""" Test [areaNwkInfo] attributes """
r, rsc = RETRIEVE(self.aniURL, ORIGINATOR)
self.assertEqual(rsc, RC.OK)
self.assertEqual(findXPath(r, 'm2m:ani/ty'), T.MGMTOBJ)
self.assertEqual(findXPath(r, 'm2m:ani/pi'), findXPath(TestMgmtObj.nod,'m2m:nod/ri'))
self.assertEqual(findXPath(r, 'm2m:ani/rn'), self.aniRN)
self.assertIsNotNone(findXPath(r, 'm2m:ani/ct'))
self.assertIsNotNone(findXPath(r, 'm2m:ani/lt'))
self.assertIsNotNone(findXPath(r, 'm2m:ani/et'))
self.assertIsNotNone(findXPath(r, 'm2m:ani/dc'))
self.assertEqual(findXPath(r, 'm2m:ani/dc'), 'aAni')
self.assertIsNotNone(findXPath(r, 'm2m:ani/ant'))
self.assertEqual(findXPath(r, 'm2m:ani/ant'), 'aniType')
self.assertIsNotNone(findXPath(r, 'm2m:ani/ldv'))
self.assertIsInstance(findXPath(r, 'm2m:ani/ldv'), list)
self.assertEqual(len(findXPath(r, 'm2m:ani/ldv')), 2)
self.assertIn('dev1', findXPath(r, 'm2m:ani/ldv'))
self.assertIn('dev2', findXPath(r, 'm2m:ani/ldv'))
@unittest.skipIf(noCSE, 'No CSEBase')
def test_deleteANI(self) -> None:
""" Delete [areaNwkInfo] """
_, rsc = DELETE(self.aniURL, ORIGINATOR)
self.assertEqual(rsc, RC.deleted)
#
# ANDI
#
andiRN = 'ANDI'
andiURL = f'{nodURL}/{andiRN}'
@unittest.skipIf(noCSE, 'No CSEBase')
def test_createANDI(self) -> None:
""" Create [areaNwkDeviceInfo] """
dct = { 'm2m:andi' : {
'mgd' : T.ANDI,
'rn' : self.andiRN,
'dc' : 'aAndi',
'dvd' : 'aDeviceID',
'dvt' : 'aDeviceType',
'awi' : 'aNetworkID',
'sli' : 5,
'sld' : 23,
'lnh' : [ 'dev1', 'dev2']
}}
r, rsc = CREATE(nodURL, ORIGINATOR, T.MGMTOBJ, dct)
self.assertEqual(rsc, RC.created)
self.assertIsNotNone(findXPath(r, 'm2m:andi/ri'))
@unittest.skipIf(noCSE, 'No CSEBase')
def test_retrieveANDI(self) -> None:
""" Retrieve [areaNwkDeviceInfo] """
r, rsc = RETRIEVE(self.andiURL, ORIGINATOR)
self.assertEqual(rsc, RC.OK)
self.assertEqual(findXPath(r, 'm2m:andi/mgd'), T.ANDI)
@unittest.skipIf(noCSE, 'No CSEBase')
def test_attributesANDI(self) -> None:
""" Test [areaNwkDeviceInfo] attributes """
r, rsc = RETRIEVE(self.andiURL, ORIGINATOR)
self.assertEqual(rsc, RC.OK)
self.assertEqual(findXPath(r, 'm2m:andi/ty'), T.MGMTOBJ)
self.assertEqual(findXPath(r, 'm2m:andi/pi'), findXPath(TestMgmtObj.nod,'m2m:nod/ri'))
self.assertEqual(findXPath(r, 'm2m:andi/rn'), self.andiRN)
self.assertIsNotNone(findXPath(r, 'm2m:andi/ct'))
self.assertIsNotNone(findXPath(r, 'm2m:andi/lt'))
self.assertIsNotNone(findXPath(r, 'm2m:andi/et'))
self.assertIsNotNone(findXPath(r, 'm2m:andi/dc'))
self.assertEqual(findXPath(r, 'm2m:andi/dc'), 'aAndi')
self.assertIsNotNone(findXPath(r, 'm2m:andi/dvd'))
self.assertEqual(findXPath(r, 'm2m:andi/dvd'), 'aDeviceID')
self.assertIsNotNone(findXPath(r, 'm2m:andi/dvt'))
self.assertEqual(findXPath(r, 'm2m:andi/dvt'), 'aDeviceType')
self.assertIsNotNone(findXPath(r, 'm2m:andi/awi'))
self.assertEqual(findXPath(r, 'm2m:andi/awi'), 'aNetworkID')
self.assertIsNotNone(findXPath(r, 'm2m:andi/sli'))
self.assertEqual(findXPath(r, 'm2m:andi/sli'), 5)
self.assertIsNotNone(findXPath(r, 'm2m:andi/sld'))
self.assertEqual(findXPath(r, 'm2m:andi/sld'), 23)
self.assertIsNotNone(findXPath(r, 'm2m:andi/lnh'))
self.assertIsInstance(findXPath(r, 'm2m:andi/lnh'), list)
self.assertEqual(len(findXPath(r, 'm2m:andi/lnh')), 2)
self.assertIn('dev1', findXPath(r, 'm2m:andi/lnh'))
self.assertIn('dev2', findXPath(r, 'm2m:andi/lnh'))
@unittest.skipIf(noCSE, 'No CSEBase')
def test_deleteANDI(self) -> None:
""" Delete [areaNwkDeviceInfo] """
_, rsc = DELETE(self.andiURL, ORIGINATOR)
self.assertEqual(rsc, RC.deleted)
#
# BAT
#
@unittest.skipIf(noCSE, 'No CSEBase')
def test_createBAT(self) -> None:
""" Create [battery] """
dct = { 'm2m:bat' : {
'mgd' : T.BAT,
'rn' : batRN,
'dc' : 'aBat',
'btl' : 23,
'bts' : 5
}}
r, rsc = CREATE(nodURL, ORIGINATOR, T.MGMTOBJ, dct)
self.assertEqual(rsc, RC.created)
self.assertIsNotNone(findXPath(r, 'm2m:bat/ri'))
@unittest.skipIf(noCSE, 'No CSEBase')
def test_retrieveBAT(self) -> None:
""" Retrieve [battery] """
r, rsc = RETRIEVE(batURL, ORIGINATOR)
self.assertEqual(rsc, RC.OK)
self.assertEqual(findXPath(r, 'm2m:bat/mgd'), T.BAT)
@unittest.skipIf(noCSE, 'No CSEBase')
def test_attributesBAT(self) -> None:
""" Test [battery] attributes """
r, rsc = RETRIEVE(batURL, ORIGINATOR)
self.assertEqual(rsc, RC.OK)
self.assertEqual(findXPath(r, 'm2m:bat/ty'), T.MGMTOBJ)
self.assertEqual(findXPath(r, 'm2m:bat/pi'), findXPath(TestMgmtObj.nod,'m2m:nod/ri'))
self.assertEqual(findXPath(r, 'm2m:bat/rn'), batRN)
self.assertIsNotNone(findXPath(r, 'm2m:bat/ct'))
self.assertIsNotNone(findXPath(r, 'm2m:bat/lt'))
self.assertIsNotNone(findXPath(r, 'm2m:bat/et'))
self.assertIsNotNone(findXPath(r, 'm2m:bat/dc'))
self.assertEqual(findXPath(r, 'm2m:bat/dc'), 'aBat')
self.assertIsNotNone(findXPath(r, 'm2m:bat/btl'))
self.assertEqual(findXPath(r, 'm2m:bat/btl'), 23)
self.assertIsNotNone(findXPath(r, 'm2m:bat/bts'))
self.assertEqual(findXPath(r, 'm2m:bat/bts'), 5)
@unittest.skipIf(noCSE, 'No CSEBase')
def test_deleteBAT(self) -> None:
""" Delete [battery] """
_, rsc = DELETE(batURL, ORIGINATOR)
self.assertEqual(rsc, RC.deleted)
#
# DVI
#
dviRN = 'DVI'
dviURL = f'{nodURL}/{dviRN}'
@unittest.skipIf(noCSE, 'No CSEBase')
def test_createDVI(self) -> None:
""" Create [deviceInfo] """
dct = { 'm2m:dvi' : {
'mgd' : T.DVI,
'rn' : self.dviRN,
'dc' : 'aDvi',
'dlb' : '|label:value anotherLabel:value',
'man' : 'a Manufacturer',
'mfdl': 'https://link.to.manufacturer.com/details',
'mfd' : '20010511T214200',
'mod' : 'Heart of Gold',
'smod': 'No.1',
'dty' : 'Starship',
'dvnm': 'a Device Name',
'fwv' : '1.0',
'swv' : '1.1',
'hwv' : '1.2',
'osv' : '1.3',
'cnty': 'Earth',
'loc' : 'Sol',
'syst': '20010511T214200',
'spur': 'http://example.com',
'purl': 'http://example.com/ui',
'ptl' : [ 'http' ]
}}
r, rsc = CREATE(nodURL, ORIGINATOR, T.MGMTOBJ, dct)
self.assertEqual(rsc, RC.created)
self.assertIsNotNone(findXPath(r, 'm2m:dvi/ri'))
@unittest.skipIf(noCSE, 'No CSEBase')
def test_retrieveDVI(self) -> None:
""" Retrieve [deviceInfo] """
r, rsc = RETRIEVE(self.dviURL, ORIGINATOR)
self.assertEqual(rsc, RC.OK)
self.assertEqual(findXPath(r, 'm2m:dvi/mgd'), T.DVI)
@unittest.skipIf(noCSE, 'No CSEBase')
def test_attributesDVI(self) -> None:
""" Test [deviceInfo] attributes """
r, rsc = RETRIEVE(self.dviURL, ORIGINATOR)
self.assertEqual(rsc, RC.OK)
self.assertEqual(findXPath(r, 'm2m:dvi/ty'), T.MGMTOBJ)
self.assertEqual(findXPath(r, 'm2m:dvi/pi'), findXPath(TestMgmtObj.nod,'m2m:nod/ri'))
self.assertEqual(findXPath(r, 'm2m:dvi/rn'), self.dviRN)
self.assertIsNotNone(findXPath(r, 'm2m:dvi/ct'))
self.assertIsNotNone(findXPath(r, 'm2m:dvi/lt'))
self.assertIsNotNone(findXPath(r, 'm2m:dvi/et'))
self.assertIsNotNone(findXPath(r, 'm2m:dvi/dc'))
self.assertEqual(findXPath(r, 'm2m:dvi/dc'), 'aDvi')
self.assertIsNotNone(findXPath(r, 'm2m:dvi/dlb'))
self.assertEqual(findXPath(r, 'm2m:dvi/dlb'), '|label:value anotherLabel:value')
self.assertIsNotNone(findXPath(r, 'm2m:dvi/man'))
self.assertEqual(findXPath(r, 'm2m:dvi/man'), 'a Manufacturer')
self.assertIsNotNone(findXPath(r, 'm2m:dvi/mfdl'))
self.assertEqual(findXPath(r, 'm2m:dvi/mfdl'), 'https://link.to.manufacturer.com/details')
self.assertIsNotNone(findXPath(r, 'm2m:dvi/man'))
self.assertEqual(findXPath(r, 'm2m:dvi/man'), 'a Manufacturer')
self.assertIsNotNone(findXPath(r, 'm2m:dvi/mfd'))
self.assertEqual(findXPath(r, 'm2m:dvi/mfd'), '20010511T214200')
self.assertIsNotNone(findXPath(r, 'm2m:dvi/mod'))
self.assertEqual(findXPath(r, 'm2m:dvi/mod'), 'Heart of Gold')
self.assertIsNotNone(findXPath(r, 'm2m:dvi/smod'))
self.assertEqual(findXPath(r, 'm2m:dvi/smod'), 'No.1')
self.assertIsNotNone(findXPath(r, 'm2m:dvi/dty'))
self.assertEqual(findXPath(r, 'm2m:dvi/dty'), 'Starship')
self.assertIsNotNone(findXPath(r, 'm2m:dvi/dvnm'))
self.assertEqual(findXPath(r, 'm2m:dvi/dvnm'), 'a Device Name')
self.assertIsNotNone(findXPath(r, 'm2m:dvi/fwv'))
self.assertEqual(findXPath(r, 'm2m:dvi/fwv'), '1.0')
self.assertIsNotNone(findXPath(r, 'm2m:dvi/swv'))
self.assertEqual(findXPath(r, 'm2m:dvi/swv'), '1.1')
self.assertIsNotNone(findXPath(r, 'm2m:dvi/hwv'))
self.assertEqual(findXPath(r, 'm2m:dvi/hwv'), '1.2')
self.assertIsNotNone(findXPath(r, 'm2m:dvi/osv'))
self.assertEqual(findXPath(r, 'm2m:dvi/osv'), '1.3')
self.assertIsNotNone(findXPath(r, 'm2m:dvi/cnty'))
self.assertEqual(findXPath(r, 'm2m:dvi/cnty'), 'Earth')
self.assertIsNotNone(findXPath(r, 'm2m:dvi/loc'))
self.assertEqual(findXPath(r, 'm2m:dvi/loc'), 'Sol')
self.assertIsNotNone(findXPath(r, 'm2m:dvi/syst'))
self.assertEqual(findXPath(r, 'm2m:dvi/syst'), '20010511T214200')
self.assertIsNotNone(findXPath(r, 'm2m:dvi/spur'))
self.assertEqual(findXPath(r, 'm2m:dvi/spur'), 'http://example.com')
self.assertIsNotNone(findXPath(r, 'm2m:dvi/purl'))
self.assertEqual(findXPath(r, 'm2m:dvi/purl'), 'http://example.com/ui')
self.assertIsNotNone(findXPath(r, 'm2m:dvi/ptl'))
self.assertEqual(findXPath(r, 'm2m:dvi/ptl'), [ 'http' ])
@unittest.skipIf(noCSE, 'No CSEBase')
def test_deleteDVI(self) -> None:
""" Delete [deviceInfo] """
_, rsc = DELETE(self.dviURL, ORIGINATOR)
self.assertEqual(rsc, RC.deleted)
#
# DVC
#
dvcRN = 'DVC'
dvcURL = f'{nodURL}/{dvcRN}'
@unittest.skipIf(noCSE, 'No CSEBase')
def test_createDVC(self) -> None:
""" Create [deviceCapability] """
dct = { 'm2m:dvc' : {
'mgd' : T.DVC,
'rn' : self.dvcRN,
'dc' : 'aDvc',
'can': 'aCapabilityName',
'att': True,
'cas': {
'acn' : 'anAction',
'sus' : 1
},
'cus': True
}}
r, rsc = CREATE(nodURL, ORIGINATOR, T.MGMTOBJ, dct)
self.assertEqual(rsc, RC.created)
self.assertIsNotNone(findXPath(r, 'm2m:dvc/ri'))
@unittest.skipIf(noCSE, 'No CSEBase')
def test_retrieveDVC(self) -> None:
""" Retrieve [deviceCapability] """
r, rsc = RETRIEVE(self.dvcURL, ORIGINATOR)
self.assertEqual(rsc, RC.OK)
self.assertEqual(findXPath(r, 'm2m:dvc/mgd'), T.DVC)
@unittest.skipIf(noCSE, 'No CSEBase')
def test_attributesDVC(self) -> None:
""" Test [deviceCapability] attributes """
r, rsc = RETRIEVE(self.dvcURL, ORIGINATOR)
self.assertEqual(rsc, RC.OK)
self.assertEqual(findXPath(r, 'm2m:dvc/ty'), T.MGMTOBJ)
self.assertEqual(findXPath(r, 'm2m:dvc/pi'), findXPath(TestMgmtObj.nod,'m2m:nod/ri'))
self.assertEqual(findXPath(r, 'm2m:dvc/rn'), self.dvcRN)
self.assertIsNotNone(findXPath(r, 'm2m:dvc/ct'))
self.assertIsNotNone(findXPath(r, 'm2m:dvc/lt'))
self.assertIsNotNone(findXPath(r, 'm2m:dvc/et'))
self.assertIsNotNone(findXPath(r, 'm2m:dvc/dc'))
self.assertEqual(findXPath(r, 'm2m:dvc/dc'), 'aDvc')
self.assertIsNotNone(findXPath(r, 'm2m:dvc/can'))
self.assertEqual(findXPath(r, 'm2m:dvc/can'), 'aCapabilityName')
self.assertIsNotNone(findXPath(r, 'm2m:dvc/att'))
self.assertTrue(findXPath(r, 'm2m:dvc/att'))
self.assertIsNotNone(findXPath(r, 'm2m:dvc/cas/acn'))
self.assertEqual(findXPath(r, 'm2m:dvc/cas/acn'), 'anAction')
self.assertIsNotNone(findXPath(r, 'm2m:dvc/cas/sus'))
self.assertEqual(findXPath(r, 'm2m:dvc/cas/sus'), 1)
self.assertIsNotNone(findXPath(r, 'm2m:dvc/cus'))
self.assertTrue(findXPath(r, 'm2m:dvc/cus'))
self.assertIsNotNone(findXPath(r, 'm2m:dvc/ena'))
self.assertTrue(findXPath(r, 'm2m:dvc/ena'))
self.assertIsNotNone(findXPath(r, 'm2m:dvc/dis'))
self.assertTrue(findXPath(r, 'm2m:dvc/dis'))
@unittest.skipIf(noCSE, 'No CSEBase')
def test_updateDVCEnaTrue(self) -> None:
""" Update [deviceCapability] ENA=False """
dct = { 'm2m:dvc' : {
'ena' : True,
}}
r, rsc = UPDATE(self.dvcURL, ORIGINATOR, dct)
self.assertEqual(rsc, RC.updated)
self.assertTrue(findXPath(r, 'm2m:dvc/ena'))
self.assertTrue(findXPath(r, 'm2m:dvc/dis'))
@unittest.skipIf(noCSE, 'No CSEBase')
def test_updateDVCEnaFalse(self) -> None:
""" Update [deviceCapability] ENA=False """
dct = { 'm2m:dvc' : {
'ena' : False,
}}
r, rsc = UPDATE(self.dvcURL, ORIGINATOR, dct)
self.assertEqual(rsc, RC.updated)
self.assertTrue(findXPath(r, 'm2m:dvc/ena'))
self.assertTrue(findXPath(r, 'm2m:dvc/dis'))
@unittest.skipIf(noCSE, 'No CSEBase')
def test_updateDVCDisTrue(self) -> None:
""" Test [deviceCapability] DIS """
dct = { 'm2m:dvc' : {
'dis' : True
}}
r, rsc = UPDATE(self.dvcURL, ORIGINATOR, dct)
self.assertEqual(rsc, RC.updated)
self.assertTrue(findXPath(r, 'm2m:dvc/ena'))
self.assertTrue(findXPath(r, 'm2m:dvc/dis'))
@unittest.skipIf(noCSE, 'No CSEBase')
def test_updateDVCDisFalse(self) -> None:
""" Test [deviceCapability] DIS """
dct = { 'm2m:dvc' : {
'dis' : False
}}
r, rsc = UPDATE(self.dvcURL, ORIGINATOR, dct)
self.assertEqual(rsc, RC.updated)
self.assertTrue(findXPath(r, 'm2m:dvc/ena'))
self.assertTrue(findXPath(r, 'm2m:dvc/dis'))
@unittest.skipIf(noCSE, 'No CSEBase')
def test_updateDVCEnaDisTrue(self) -> None:
""" Test [deviceCapability] ENA=True & DIS = True -> Fail """
dct = { 'm2m:dvc' : {
'ena' : True,
'dis' : True
}}
r, rsc = UPDATE(self.dvcURL, ORIGINATOR, dct)
self.assertEqual(rsc, RC.badRequest)
@unittest.skipIf(noCSE, 'No CSEBase')
def test_updateDVCEnaDisFalse(self) -> None:
""" Update [deviceCapability] ENA=False & DIS=False -> ENA=True & DIS=True """
dct = { 'm2m:dvc' : {
'ena' : False,
'dis' : False
}}
r, rsc = UPDATE(self.dvcURL, ORIGINATOR, dct)
self.assertEqual(rsc, RC.updated)
self.assertTrue(findXPath(r, 'm2m:dvc/ena'))
self.assertTrue(findXPath(r, 'm2m:dvc/dis'))
@unittest.skipIf(noCSE, 'No CSEBase')
def test_deleteDVC(self) -> None:
""" Delete [deviceCapability] """
_, rsc = DELETE(self.dvcURL, ORIGINATOR)
self.assertEqual(rsc, RC.deleted)
#
# RBO
#
rboRN = 'RBO'
rboURL = f'{nodURL}/{rboRN}'
@unittest.skipIf(noCSE, 'No CSEBase')
def test_createRBO(self) -> None:
""" Create [reboot] """
dct = { 'm2m:rbo' : {
'mgd' : T.RBO,
'rn' : self.rboRN,
'dc' : 'aRbo',
'rbo' : False,
'far' : False
}}
r, rsc = CREATE(nodURL, ORIGINATOR, T.MGMTOBJ, dct)
self.assertEqual(rsc, RC.created)
self.assertIsNotNone(findXPath(r, 'm2m:rbo/ri'))
@unittest.skipIf(noCSE, 'No CSEBase')
def test_retrieveRBO(self) -> None:
""" Retrieve [reboot] """
r, rsc = RETRIEVE(self.rboURL, ORIGINATOR)
self.assertEqual(rsc, RC.OK)
self.assertEqual(findXPath(r, 'm2m:rbo/mgd'), T.RBO)
@unittest.skipIf(noCSE, 'No CSEBase')
def test_attributesRBO(self) -> None:
""" Test [reboot] attributes """
r, rsc = RETRIEVE(self.rboURL, ORIGINATOR)
self.assertEqual(rsc, RC.OK)
self.assertEqual(findXPath(r, 'm2m:rbo/ty'), T.MGMTOBJ)
self.assertEqual(findXPath(r, 'm2m:rbo/pi'), findXPath(TestMgmtObj.nod,'m2m:nod/ri'))
self.assertEqual(findXPath(r, 'm2m:rbo/rn'), self.rboRN)
self.assertIsNotNone(findXPath(r, 'm2m:rbo/ct'))
self.assertIsNotNone(findXPath(r, 'm2m:rbo/lt'))
self.assertIsNotNone(findXPath(r, 'm2m:rbo/et'))
self.assertIsNotNone(findXPath(r, 'm2m:rbo/dc'))
self.assertEqual(findXPath(r, 'm2m:rbo/dc'), 'aRbo')
self.assertIsNotNone(findXPath(r, 'm2m:rbo/rbo'))
self.assertFalse(findXPath(r, 'm2m:rbo/rbo'))
self.assertIsNotNone(findXPath(r, 'm2m:rbo/far'))
self.assertFalse(findXPath(r, 'm2m:rbo/far'))
@unittest.skipIf(noCSE, 'No CSEBase')
def test_updateRBORboTrue(self) -> None:
""" Update [reboot] with RBO=True -> RBO=False """
dct = { 'm2m:rbo' : {
'rbo' : True,
}}
r, rsc = UPDATE(self.rboURL, ORIGINATOR, dct)
self.assertEqual(rsc, RC.updated)
self.assertFalse(findXPath(r, 'm2m:rbo/rbo'))
self.assertFalse(findXPath(r, 'm2m:rbo/far'))
@unittest.skipIf(noCSE, 'No CSEBase')
def test_updateRBORboFalse(self) -> None:
""" Update [reboot] with RBO=False -> RBO=False """
dct = { 'm2m:rbo' : {
'rbo' : False,
}}
r, rsc = UPDATE(self.rboURL, ORIGINATOR, dct)
self.assertEqual(rsc, RC.updated)
self.assertFalse(findXPath(r, 'm2m:rbo/rbo'))
self.assertFalse(findXPath(r, 'm2m:rbo/far'))
@unittest.skipIf(noCSE, 'No CSEBase')
def test_updateRBOFarTrue(self) -> | |
newDirDialog = QtGui.QInputDialog()
newDirDialog.setFocus()
dirName, ok = newDirDialog.getText(self, 'Add Directory', 'Enter directory name:')
if ok:
if dirName == '' or '/' in dirName:
warn_str = "add directory failed: the new directory name is an empty string or contains /: " + str(dirName) + "; directory not added"
warnings.warn(warn_str, RuntimeWarning)
self.ui.treeWidgetActors.blockSignals(False)
return
else:
new_actor_tree_widget_ID = actor_tree_widget_ID + str(dirName) + '/'
# we should check if this directory already exists before adding it
actor_tree_widget_list = self.ui.treeWidgetActors.findItems(new_actor_tree_widget_ID, QtCore.Qt.MatchExactly|QtCore.Qt.MatchRecursive, 1)
if len(actor_tree_widget_list) > 0:
warn_str = "add directory failed: a directory of that ID: " + str(dirName) + " already exists; directory not added"
warnings.warn(warn_str, RuntimeWarning)
self.ui.treeWidgetActors.blockSignals(False)
return
new_tree_widget_item = QtGui.QTreeWidgetItem(treeWidgetItem)
new_tree_widget_item.setText(0, dirName)
new_tree_widget_item.setText(1, new_actor_tree_widget_ID)
new_tree_widget_item.setIcon(2, QtGui.QIcon('icons/folder.png'))
new_tree_widget_item.setText(2, DIR_TYPE)
new_tree_widget_item.setFlags(new_tree_widget_item.flags() | QtCore.Qt.ItemIsTristate | QtCore.Qt.ItemIsUserCheckable)
new_tree_widget_item.setExpanded(True)
new_tree_widget_item.setCheckState(0, QtCore.Qt.Checked)
# add a reference to this directory in our dict
new_tree_object = TreeObject()
new_tree_object.object_type = DIR_TYPE
new_tree_object.transform = vtk.vtkTransform()
parent_tree_widget_item = new_tree_widget_item.parent()
if parent_tree_widget_item is not None:
parent_tree_object = self.tree_widget_items_to_objects[parent_tree_widget_item]
new_tree_object.transform.PostMultiply()
new_tree_object.transform.Concatenate(parent_tree_object.transform)
self.tree_widget_items_to_objects[new_tree_widget_item] = new_tree_object
self.ui.treeWidgetActors.blockSignals(False)
def treeItemRename(self, treeWidgetItem):
# startup the rename tree object widget
actor_tree_widget_ID = str(treeWidgetItem.text(1))
tree_object = self.tree_widget_items_to_objects[treeWidgetItem]
tree_object_type = tree_object.object_type
if tree_object_type == ACTOR_TYPE or tree_object_type == BILLBOARD_TYPE or tree_object_type == AXES_TYPE:
# if the tree object is an actor, startup the rename actor widget
renameDialog = QtGui.QInputDialog()
renameDialog.setFocus()
newName, ok = renameDialog.getText(self, 'Rename Actor', 'Enter new name:')
if ok:
if newName == '' or '/' in newName:
warn_str = "rename failed: the new name is an empty string or contains /: " + str(newName) + "; actor not renamed"
warnings.warn(warn_str, RuntimeWarning)
return
else:
level_list = actor_tree_widget_ID.split('/')
level_list[-1] = str(newName)
new_actor_tree_widget_ID = '/'.join(level_list)
actor_tree_widget_list = self.ui.treeWidgetActors.findItems(new_actor_tree_widget_ID, QtCore.Qt.MatchExactly|QtCore.Qt.MatchRecursive, 1)
if len(actor_tree_widget_list) > 0:
warn_str = "rename failed: an actor of that ID: " + str(level_list) + " already exists; actor not renamed"
warnings.warn(warn_str, RuntimeWarning)
return
treeWidgetItem.setText(0, newName)
treeWidgetItem.setText(1, new_actor_tree_widget_ID)
elif tree_object_type == DIR_TYPE:
# if the tree object is a directory, startup the rename directory widget
renameDialog = QtGui.QInputDialog()
renameDialog.setFocus()
newName, ok = renameDialog.getText(self, 'Rename Directory', 'Enter new name:')
if ok:
if newName == '' or '/' in newName:
warn_str = "rename failed: the new name is an empty string or contains /: " + str(newName) + "; directory not renamed"
warnings.warn(warn_str, RuntimeWarning)
return
else:
depth = self.getTreeItemDepth(treeWidgetItem)
level_list = actor_tree_widget_ID.split('/')
level_list[depth] = str(newName)
new_actor_tree_widget_ID = '/'.join(level_list)
# we should check if this directory already exists before recursing through structure and renaming all children
actor_tree_widget_list = self.ui.treeWidgetActors.findItems(new_actor_tree_widget_ID, QtCore.Qt.MatchExactly|QtCore.Qt.MatchRecursive, 1)
if len(actor_tree_widget_list) > 0:
warn_str = "rename failed: a directory of that ID: " + str(level_list) + " already exists; directory not renamed"
warnings.warn(warn_str, RuntimeWarning)
return
treeWidgetItem.setText(0, newName)
# recurse through children of this tree widget, renaming the corresponding directory in its ID
self.treeItemRecurseRenameID(treeWidgetItem, newName, depth)
def treeItemRecurseRenameID(self, treeWidgetItem, newName, depth):
# recurse through the tree, renaming all children of this directory
actor_tree_widget_ID = str(treeWidgetItem.text(1))
level_list = actor_tree_widget_ID.split('/')
level_list[depth] = str(newName)
new_actor_tree_widget_ID = '/'.join(level_list)
treeWidgetItem.setText(1, new_actor_tree_widget_ID)
for i in xrange(treeWidgetItem.childCount()):
child_tree_widget = treeWidgetItem.child(i)
self.treeItemRecurseRenameID(child_tree_widget, newName, depth)
def getTreeItemDepth(self, treeWidgetItem):
# returns the depth from the root of a tree object
depth = 0
treeWidgetItem = treeWidgetItem.parent()
while treeWidgetItem is not None:
treeWidgetItem = treeWidgetItem.parent()
depth += 1
return depth
def treeItemRemove(self, treeWidgetItem):
# recursively remove a tree object from the tree
tree_object = self.tree_widget_items_to_objects[treeWidgetItem]
tree_object_type = tree_object.object_type
if tree_object_type == ACTOR_TYPE or tree_object_type == BILLBOARD_TYPE or tree_object_type == AXES_TYPE:
actor = tree_object.actor
self.vtk_main_canvas.removeActor(actor)
axes = tree_object.axes
self.vtk_main_canvas.removeActorFrameAxes(axes)
del self.tree_widget_items_to_objects[treeWidgetItem]
elif tree_object_type == DIR_TYPE:
# recurse through children of this tree widget, removing them
while treeWidgetItem.childCount() != 0:
child_tree_widget = treeWidgetItem.child(0)
self.treeItemRemove(child_tree_widget)
del self.tree_widget_items_to_objects[treeWidgetItem]
parent_tree_widget = treeWidgetItem.parent()
if parent_tree_widget is None:
# this tree widget is seated at the top level
self.ui.treeWidgetActors.invisibleRootItem().removeChild(treeWidgetItem)
else:
parent_tree_widget.removeChild(treeWidgetItem)
def setBackgroundCheckMarks(self, value):
if value:
self.ui.actionBGLight.setChecked(True)
self.ui.actionBGDark.setChecked(False)
else:
self.ui.actionBGLight.setChecked(False)
self.ui.actionBGDark.setChecked(True)
def setCameraSelectionCheckMarks(self, value):
if value:
self.ui.actionCameraPerspective.setChecked(True)
self.ui.actionCameraTopDown.setChecked(False)
else:
self.ui.actionCameraPerspective.setChecked(False)
self.ui.actionCameraTopDown.setChecked(True)
##########################################################
### convenience functions for add primitive dialog box ###
##########################################################
def addGrid(self, level_list, full_length, cell_length):
actor = Primitives.Grid(full_length, cell_length)
self.addActor(level_list, actor, Primitives.PRIMITIVE_GRID)
def addAxes(self, level_list):
actor = Primitives.Axes()
self.addActor(level_list, actor, Primitives.PRIMITIVE_AXES)
def addArrow(self, level_list, res):
actor = Primitives.Arrow(res)
self.addActor(level_list, actor, Primitives.PRIMITIVE_ARROW)
def addBox(self, level_list, x, y, z):
actor = Primitives.Box(x, y, z)
self.addActor(level_list, actor, Primitives.PRIMITIVE_BOX)
def addSphere(self, level_list, r, t_res, p_res):
actor = Primitives.Sphere(r, t_res, p_res)
self.addActor(level_list, actor, Primitives.PRIMITIVE_SPHERE)
def addCylinder(self, level_list, r, h, res):
actor = Primitives.Cylinder(r, h, res)
self.addActor(level_list, actor, Primitives.PRIMITIVE_CYLINDER)
def addEllipsoid(self, level_list, xr, yr, zr):
actor = Primitives.Ellipsoid(xr, yr, zr)
self.addActor(level_list, actor, Primitives.PRIMITIVE_ELLIPSOID)
def addCone(self, level_list, r, h, res):
actor = Primitives.Cone(r, h, res)
self.addActor(level_list, actor, Primitives.PRIMITIVE_CONE)
def addTorus(self, level_list, ring_r, cross_section_r):
actor = Primitives.Torus(ring_r, cross_section_r)
self.addActor(level_list, actor, Primitives.PRIMITIVE_TORUS)
###################################################################
### convenience function for VTKCanvas special orientation axes ###
###################################################################
def addOrientationAxes(self):
axes = Primitives.Axes()
self.orientation_axes = vtk.vtkOrientationMarkerWidget()
self.orientation_axes.SetOrientationMarker(axes)
self.orientation_axes.SetInteractor(self.vtk_main_canvas.vtk_interactor)
self.orientation_axes.EnabledOn()
self.orientation_axes.InteractiveOff()
self.addActor(['orientation axes'], axes, Primitives.PRIMITIVE_AXES, False)
############################################
### trigger functions for LightField API ###
############################################
def _defaultActorObject(self, actor, actor_type, object_type):
# setup TreeObject default properties for an actor
default_tree_object = TreeObject()
default_tree_object.actor = actor
default_tree_object.actor_type = actor_type
default_tree_object.actor_visible = True
default_tree_object.object_type = object_type
default_tree_object.alpha = 1.0
default_tree_object.point_size = 1.0
default_tree_object.line_width = 1.0
default_tree_object.scale = 1.0
default_tree_object.mode = 'Surface'
default_tree_object.color = [1.0, 1.0, 1.0]
default_tree_object.offset = [0.0, 0.0, 0.0]
default_tree_object.orientation = [0.0, 0.0, 0.0]
default_tree_object.transform = vtk.vtkTransform()
return default_tree_object
def _getActorTreeObjectFromLevelList(self, level_list):
# given a level list, return the associated actor TreeObject, if it exists
actor_tree_widget_ID = '/'.join(level_list)
treeWidgetItem = self._getActorTreeWidgetItemFromLevelList(level_list)
if treeWidgetItem is None:
return None
tree_object = self.tree_widget_items_to_objects[treeWidgetItem]
tree_object_type = tree_object.object_type
if tree_object_type == ACTOR_TYPE or tree_object_type == BILLBOARD_TYPE or tree_object_type == AXES_TYPE:
return tree_object
def _getActorTreeWidgetItemFromLevelList(self, level_list):
# given a level list, return the associated actor tree widget, if it exists
actor_tree_widget_ID = '/'.join(level_list)
tree_widget_list = self.ui.treeWidgetActors.findItems(actor_tree_widget_ID, QtCore.Qt.MatchExactly|QtCore.Qt.MatchRecursive, 1)
if len(tree_widget_list) > 0:
treeWidgetItem = tree_widget_list[0]
return treeWidgetItem
warn_str = "failed: an actor tree widget with that level list ID: " + str(level_list) + " does not exist"
warnings.warn(warn_str, RuntimeWarning)
return None
def _getDirectoryTreeObjectFromLevelList(self, level_list):
# given a level list, return the associated directory TreeObject, if it exists
actor_tree_widget_ID = '/'.join(level_list) + '/'
treeWidgetItem = self._getDirectoryTreeWidgetItemFromLevelList(level_list)
if treeWidgetItem is None:
return None
tree_object = self.tree_widget_items_to_objects[treeWidgetItem]
tree_object_type = tree_object.object_type
if tree_object_type == DIR_TYPE:
return tree_object
def _getDirectoryTreeWidgetItemFromLevelList(self, level_list):
# given a level list, return the associated directory tree widget, if it exists
actor_tree_widget_ID = '/'.join(level_list) + '/'
tree_widget_list = self.ui.treeWidgetActors.findItems(actor_tree_widget_ID, QtCore.Qt.MatchExactly|QtCore.Qt.MatchRecursive, 1)
if len(tree_widget_list) > 0:
treeWidgetItem = tree_widget_list[0]
return treeWidgetItem
warn_str = "failed: a directory tree widget with that level list ID: " + str(level_list) + " does not exist"
warnings.warn(warn_str, RuntimeWarning)
return None
def addActor(self, level_list, actor, actor_type, add_bool=True):
# given a level list, actor, and actor type, add the associated tree widget to the GUI and add the actor to the VTK scene
self.ui.treeWidgetActors.blockSignals(True)
# an empty tree, or a tree which contains an empty string, or a non-string type, or string with / char, is malformed
if len(level_list) == 0:
warn_str = "addActor failed: the actor tree is malformed, it might be empty: " + str(level_list) + "; actor not added to the scene"
warnings.warn(warn_str, RuntimeWarning)
self.ui.treeWidgetActors.blockSignals(False)
self.emit(QtCore.SIGNAL('addActorStatus'), Status.MALFORMED_PATH)
return
for i in level_list:
if i == '' or type(i) is not str or '/' in i:
warn_str = "addActor failed: the actor tree is malformed, it might contain unsupported characters: " + str(i) + "; actor not added to the scene"
warnings.warn(warn_str, RuntimeWarning)
self.ui.treeWidgetActors.blockSignals(False)
self.emit(QtCore.SIGNAL('addActorStatus'), Status.MALFORMED_PATH)
return
# the name of the actor is the last element of the tree
actor_name = level_list[-1]
# get the actor tree widget ID by concatenating all level_list elements
actor_tree_widget_ID = '/'.join(level_list)
# if the actor tree widget already exists, then it is invalid
actor_tree_widget_list = self.ui.treeWidgetActors.findItems(actor_tree_widget_ID, QtCore.Qt.MatchExactly|QtCore.Qt.MatchRecursive, 1)
if len(actor_tree_widget_list) > 0:
warn_str = "addActor failed: the actor tree is malformed because an actor at that level list already exists: " + str(level_list) + "; actor not added to the scene"
warnings.warn(warn_str, RuntimeWarning)
self.ui.treeWidgetActors.blockSignals(False)
self.emit(QtCore.SIGNAL('addActorStatus'), Status.EXISTING_PATH)
return
# recurse through | |
<gh_stars>1-10
import unittest
import datetime
import pytz
from forthic.interpreter import Interpreter
from forthic.tokenizer import DLE
from forthic.global_module import GlobalModuleError
class TestGlobalModule(unittest.TestCase):
def test_literal(self):
interp = Interpreter()
interp.run("TRUE FALSE 2 3.14 2020-06-05 9:00 11:30 PM 22:15 AM")
self.assertEqual(interp.stack[0], True)
self.assertEqual(interp.stack[1], False)
self.assertEqual(interp.stack[2], 2)
self.assertEqual(interp.stack[3], 3.14)
self.assertEqual(interp.stack[4], datetime.date(2020, 6, 5))
self.assertEqual(interp.stack[5], datetime.time(9, 0))
self.assertEqual(interp.stack[6], datetime.time(23, 30))
self.assertEqual(interp.stack[7], datetime.time(10, 15))
def test_variables(self):
interp = Interpreter()
interp.run("['x' 'y'] VARIABLES")
variables = interp.app_module.variables
self.assertIsNotNone(variables.get('x'))
self.assertIsNotNone(variables.get('y'))
def test_set_get_variables(self):
interp = Interpreter()
interp.run("['x'] VARIABLES")
interp.run("24 x !")
x_var = interp.app_module.variables['x']
self.assertEqual(x_var.get_value(), 24)
interp.run("x @")
self.assertEqual(interp.stack[-1], 24)
def test_bang_at(self):
interp = Interpreter()
interp.run("['x'] VARIABLES")
interp.run("24 x !@")
x_var = interp.app_module.variables['x']
self.assertEqual(x_var.get_value(), 24)
self.assertEqual(interp.stack[-1], 24)
def test_interpret(self):
interp = Interpreter()
interp.run("'24' INTERPRET")
self.assertEqual(interp.stack[-1], 24)
interp.run("""'{module-A : MESSAGE "Hi" ;}' INTERPRET""")
interp.run("{module-A MESSAGE}")
self.assertEqual(interp.stack[-1], 'Hi')
def test_memo(self):
interp = Interpreter()
interp.run("""
['count'] VARIABLES
0 count !
'COUNT' 'count @ 1 + count ! count @' MEMO
""")
interp.run("COUNT")
self.assertEqual(interp.stack[-1], 1)
interp.run("COUNT")
self.assertEqual(interp.stack[-1], 1)
interp.run("COUNT! COUNT")
self.assertEqual(interp.stack[-1] , 2)
self.assertEqual(len(interp.stack), 3)
interp.run("COUNT!@")
self.assertEqual(interp.stack[-1] , 3)
def test_rec(self):
interp = Interpreter()
interp.run("""
[ ["alpha" 2] ["beta" 3] ["gamma" 4] ] REC
""")
self.assertEqual(len(interp.stack), 1)
rec = interp.stack[-1]
self.assertEqual(rec["alpha"], 2)
self.assertEqual(rec["gamma"], 4)
def test_rec_at(self):
interp = Interpreter()
interp.run("""
[ ["alpha" 2] ["beta" 3] ["gamma" 4] ] REC
'beta' REC@
""")
self.assertEqual(len(interp.stack), 1)
self.assertEqual(interp.stack[0] , 3)
def test_l_rec_bang(self):
# Case: Set value on a record
interp = Interpreter()
interp.run("""
[ ["alpha" 2] ["beta" 3] ["gamma" 4] ] REC
700 'beta' <REC! 'beta' REC@
""")
self.assertEqual(len(interp.stack), 1)
self.assertEqual(interp.stack[0], 700)
# Case: Set a nested value
interp = Interpreter()
interp.run("""
[] REC "Green" ["2021-03-22" "TEST-1234"] <REC! ["2021-03-22" "TEST-1234"] REC@
""")
self.assertEqual(len(interp.stack), 1)
self.assertEqual(interp.stack[0], "Green")
# Case: Set value on a NULL
interp = Interpreter()
interp.run("""
NULL 700 'beta' <REC! 'beta' REC@
""")
self.assertEqual(len(interp.stack), 1)
self.assertEqual(interp.stack[0], 700)
def test_SCREEN_bang(self):
interp = Interpreter()
interp.run("""
'Screen content' 'my-screen' SCREEN!
""")
self.assertEqual('Screen content', interp.app_module.get_screen('my-screen'))
def test_SCREEN(self):
interp = Interpreter()
interp.run("""
'Screen content' 'my-screen' SCREEN!
'my-screen' SCREEN
""")
self.assertEqual(interp.stack[0], 'Screen content')
def test_LOAD_SCREEN(self):
# Test normal load
interp = Interpreter()
interp.run("""
': MESSAGE "Howdy!";' 'message' SCREEN!
'message' LOAD-SCREEN
MESSAGE
""")
self.assertEqual(interp.stack[0], 'Howdy!')
# Test that recursive loads are prevented
def load_recursive_screen():
interp = Interpreter()
interp.run("""
': MESSAGE "Howdy!"; "message" LOAD-SCREEN' 'message' SCREEN!
'message' LOAD-SCREEN
""")
self.assertRaises(GlobalModuleError, load_recursive_screen)
def test_append(self):
# Test append to array
interp = Interpreter()
interp.run("""
[ 1 2 3 ] 4 APPEND
""")
self.assertEqual(len(interp.stack), 1)
array = interp.stack[-1]
self.assertEqual(array, [1, 2, 3, 4])
# Test append to record
interp = Interpreter()
interp.run("""
[["a" 1] ["b" 2]] REC ["c" 3] APPEND
""")
self.assertEqual(len(interp.stack), 1)
rec = interp.stack[-1]
values = [rec[k] for k in ["a", "b", "c"]]
self.assertEqual(values, [1, 2, 3])
def test_reverse(self):
interp = Interpreter()
interp.run("""
[ 1 2 3 ] REVERSE
""")
self.assertEqual(len(interp.stack), 1)
array = interp.stack[-1]
self.assertEqual(array, [3, 2, 1])
# Reverse record (no-op for records)
interp = Interpreter()
interp.run("""
[["a" 1] ["b" 2]] REC REVERSE
""")
self.assertEqual(len(interp.stack), 1)
self.assertEqual(list(interp.stack[-1].keys()), ["b", "a"])
def test_unique(self):
interp = Interpreter()
interp.run("""
[ 1 2 3 3 2 ] UNIQUE
""")
array = interp.stack[-1]
self.assertEqual(array, [1, 2, 3])
interp = Interpreter()
interp.run("""
[["a" 1] ["b" 2] ["c" 2] ["d" 1]] REC UNIQUE
""")
rec = interp.stack[-1]
self.assertEqual(sorted(rec.values()), [1, 2])
def test_del(self):
interp = Interpreter()
interp.run("""
[ "a" "b" "c" ] 1 <DEL
""")
array = interp.stack[-1]
self.assertEqual(array, ["a", "c"])
interp = Interpreter()
interp.run("""
[["a" 1] ["b" 2] ["c" 3]] REC "b" <DEL
""")
rec = interp.stack[-1]
self.assertEqual(sorted(rec.keys()), ["a", "c"])
interp = Interpreter()
interp.run("""
[["a" 1] ["b" 2] ["c" 3]] REC "d" <DEL
""")
rec = interp.stack[-1]
self.assertEqual(sorted(rec.keys()), ["a", "b", "c"])
def test_relabel(self):
interp = Interpreter()
interp.run("""
[ "a" "b" "c" ] [0 2] [25 23] RELABEL
""")
self.assertEqual(len(interp.stack), 1)
array = interp.stack[0]
self.assertEqual(array, ["c", "a"])
interp = Interpreter()
interp.run("""
[["a" 1] ["b" 2] ["c" 3]] REC ["a" "c"] ["alpha" "gamma"] RELABEL
""")
self.assertEqual(len(interp.stack), 1)
rec = interp.stack[0]
self.assertEqual(sorted(rec.keys()), ["alpha", "gamma"])
self.assertEqual([rec[k] for k in ["alpha", "gamma"]], [1, 3])
def make_records(self):
result = []
data = [[100, "user1", "OPEN"],
[101, "user1", "OPEN"],
[102, "user1", "IN PROGRESS"],
[103, "user1", "CLOSED"],
[104, "user2", "IN PROGRESS"],
[105, "user2", "OPEN"],
[106, "user2", "CLOSED"]]
for d in data:
rec = {"key": d[0], "assignee": d[1], "status": d[2]}
result.append(rec)
return result
def test_by_field(self):
interp = Interpreter()
interp.stack_push(self.make_records())
interp.run("'key' BY-FIELD")
grouped = interp.stack[0]
self.assertEqual(grouped[104]['status'], "IN PROGRESS")
def test_group_by_field(self):
interp = Interpreter()
interp.stack_push(self.make_records())
interp.run("'assignee' GROUP-BY-FIELD")
grouped = interp.stack[0]
self.assertEqual(sorted(grouped.keys()), ["user1", "user2"])
self.assertEqual(len(grouped["user1"]), 4)
self.assertEqual(len(grouped["user2"]), 3)
# Test grouping a record
interp = Interpreter()
# First, set up the record
records = self.make_records()
by_key = {}
for rec in records:
by_key[rec["key"]] = rec
interp.stack_push(by_key)
# Now group a record
interp.run("'assignee' GROUP-BY-FIELD")
grouped_rec = interp.stack[0]
self.assertEqual(sorted(grouped_rec.keys()), ["user1", "user2"])
self.assertEqual(len(grouped_rec["user1"]), 4)
self.assertEqual(len(grouped_rec["user2"]), 3)
self.assertEqual(grouped, grouped_rec)
def test_group_by(self):
interp = Interpreter()
interp.stack_push(self.make_records())
interp.run("""
"'assignee' REC@" GROUP-BY
""")
grouped = interp.stack[0]
self.assertEqual(sorted(grouped.keys()), ["user1", "user2"])
self.assertEqual(len(grouped["user1"]), 4)
self.assertEqual(len(grouped["user2"]), 3)
# Test grouping a record
interp = Interpreter()
# First, set up the record
records = self.make_records()
by_key = {}
for rec in records:
by_key[rec["key"]] = rec
interp.stack_push(by_key)
# Now group a record
interp.run("""
"'assignee' REC@" GROUP-BY
""")
grouped_rec = interp.stack[0]
self.assertEqual(sorted(grouped_rec.keys()), ["user1", "user2"])
self.assertEqual(len(grouped_rec["user1"]), 4)
self.assertEqual(len(grouped_rec["user2"]), 3)
self.assertEqual(grouped, grouped_rec)
def test_group_by_w_key(self):
interp = Interpreter()
interp.stack_push(self.make_records())
interp.run("""
['key' 'val'] VARIABLES
"val ! key ! key @ 3 MOD" GROUP-BY-w/KEY
""")
grouped = interp.stack[0]
self.assertEqual(sorted(grouped.keys()), [0, 1, 2])
self.assertEqual(len(grouped[0]), 3)
self.assertEqual(len(grouped[1]), 2)
self.assertEqual(len(grouped[2]), 2)
# Test grouping a record
interp = Interpreter()
# First, set up the record
records = self.make_records()
by_key = {}
for rec in records:
by_key[rec["key"]] = rec
interp.stack_push(by_key)
# Now group a record
interp.run("""
['key' 'val'] VARIABLES
"val ! key ! key @ 2 *" GROUP-BY-w/KEY
""")
grouped_rec = interp.stack[0]
self.assertEqual(sorted(list(grouped_rec.keys())), [200, 202, 204, 206, 208, 210, 212])
def test_groups_of(self):
interp = Interpreter()
interp.run("""
[1 2 3 4 5 6 7 8] 3 GROUPS-OF
""")
groups = interp.stack[0]
self.assertEqual(groups[0], [1, 2, 3])
self.assertEqual(groups[1], [4, 5, 6])
self.assertEqual(groups[2], [7, 8])
# Test grouping a record
interp = Interpreter()
# First, set up the record
records = self.make_records()
by_key = {}
for rec in records:
by_key[rec["key"]] = rec
interp.stack_push(by_key)
# Now group a record
interp.run("""
3 GROUPS-OF
""")
recs = interp.stack[0]
self.assertEqual(len(recs[0]), 3)
self.assertEqual(len(recs[1]), 3)
self.assertEqual(len(recs[2]), 1)
def test_map(self):
interp = Interpreter()
interp.run("""
[1 2 3 4 5] '2 *' MAP
""")
array = interp.stack[0]
self.assertEqual(array, [2, 4, 6, 8, 10])
# Test mapping over a record
interp = Interpreter()
# First, set up the record
records = self.make_records()
by_key = {}
for rec in records:
by_key[rec["key"]] = rec
interp.stack_push(by_key)
interp.run("""
"'status' REC@" MAP
""")
record = interp.stack[0]
self.assertEqual(record[100], "OPEN")
self.assertEqual(record[102], "IN PROGRESS")
self.assertEqual(record[106], "CLOSED")
def test_map_w_key(self):
interp = Interpreter()
interp.run("""
[1 2 3 4 5] '+ 2 *' MAP-w/KEY
""")
array = interp.stack[0]
self.assertEqual(array, [2, 6, 10, 14, 18])
# Test mapping over a record
interp = Interpreter()
# First, set up the record
records = self.make_records()
by_key = {}
for rec in records:
by_key[rec["key"]] = rec
interp.stack_push(by_key)
interp.run("""
["k" "v"] VARIABLES
"v ! k ! k @ >STR v @ 'status' REC@ CONCAT" MAP-w/KEY
""")
record = interp.stack[0]
self.assertEqual(record[100], "100OPEN")
self.assertEqual(record[102], "102IN PROGRESS")
self.assertEqual(record[106], "106CLOSED")
def test_foreach(self):
interp = Interpreter()
interp.run("""
0 [1 2 3 4 5] '+' FOREACH
""")
sum = interp.stack[0]
self.assertEqual(sum, 15)
# Test grouping a record
interp = Interpreter()
# First, set up the record
records = self.make_records()
by_key = {}
for rec in records:
by_key[rec["key"]] = rec
interp.stack_push(by_key)
interp.run("""
"" SWAP "'status' REC@ CONCAT" FOREACH
""")
string = interp.stack[0]
self.assertEqual(string, "OPENOPENIN PROGRESSCLOSEDIN PROGRESSOPENCLOSED")
def test_foreach_w_key(self):
interp = Interpreter()
interp.run("""
0 [1 2 3 4 5] '+ +' FOREACH-w/KEY
""")
sum = interp.stack[0]
self.assertEqual(sum, 25)
# Test grouping a record
interp = Interpreter()
# First, set up the record
records = self.make_records()
by_key = {}
for rec in records:
by_key[rec["key"]] = rec
interp.stack_push(by_key)
interp.run("""
"" SWAP "'status' REC@ CONCAT CONCAT" FOREACH-w/KEY
""")
string = interp.stack[0]
self.assertEqual(string, "100OPEN101OPEN102IN PROGRESS103CLOSED104IN PROGRESS105OPEN106CLOSED")
def test_foreach_to_errors(self):
interp = Interpreter()
interp.run("""
['2' '3' 'GARBAGE' | |
_find_only_threshold(self):
with torch.no_grad():
last_layer_fraction = self.nap_params.pop("last_layer_fraction", None)
self._get_layers_shapes(self.nap_params)
self.monitor = FullNetMonitor(self.class_count, self.nap_device,
layers_shapes=self.monitored_layers_shapes)
if not last_layer_fraction:
self.omit = False
else:
self.omit = True
neurons_to_monitor = self._choose_neurons_to_monitor(
int(self.monitored_layers_shapes[0] * last_layer_fraction))
self.monitor.set_neurons_to_monitor(neurons_to_monitor)
self._draw_train_vs_valid_heatmaps(self.known_loader, self.unknown_loader, self.nap_params)
# self._draw_train_heatmaps(self.train_loader, self.nap_params)
return 0
self._add_class_patterns_to_monitor(self.train_loader, nap_params=self.nap_params)
self._generate_test_distances(loader=self.known_loader, train=True)
self._generate_test_distances(loader=self.unknown_loader, train=False)
return 0
# self._check_duplicates_count()
df_known = self._process_dataset(self.known_loader, nap_params=self.nap_params)
df_unknown = self._process_dataset(self.unknown_loader, nap_params=self.nap_params)
self.threshold, acc = self._find_threshold(df_known, df_unknown, integers=True, cut_tail=True)
print(f"threshold: {self.threshold}, accuracy: {acc}")
self.accuracies = acc
return acc
def _find_thresolds_for_every_layer(self, n_steps=5):
with torch.no_grad():
last_layer_fraction = self.nap_params.pop("last_layer_fraction", None)
self._get_layers_shapes(self.nap_params)
if not last_layer_fraction:
self.omit = False
else:
self.omit = True
neurons_to_monitor = self._choose_neurons_to_monitor(
int(self.monitored_layers_shapes[0] * last_layer_fraction))
self.monitor.set_neurons_to_monitor(neurons_to_monitor)
thresholds = np.zeros((2, len(self.monitored_layers_shapes), n_steps))
accuracies = np.zeros(thresholds.shape)
scores = np.zeros(accuracies.shape)
linspace = np.linspace(0.1, 0.9, num=n_steps)
quantile_factors = np.sqrt(1. / np.abs(linspace - np.rint(linspace)))
self.add_factor = np.zeros((len(self.shape_factors), n_steps))
self.multiplier = np.zeros((len(self.shape_factors), n_steps))
for pool_type_id, pool_type in enumerate(["max", "avg"]):
for i, q in enumerate(linspace):
for k in self.nap_params:
self.nap_params[k]["quantile"] = q
self.nap_params[k]["pool_type"] = pool_type
self.monitor = FullNetMonitor(self.class_count, self.nap_device,
layers_shapes=self.monitored_layers_shapes)
self._add_class_patterns_to_monitor(self.train_loader, nap_params=self.nap_params)
df_known = self._process_dataset(self.known_loader, nap_params=self.nap_params)
df_unknown = self._process_dataset(self.unknown_loader, nap_params=self.nap_params)
thresholds[pool_type_id, :, i], accuracies[pool_type_id, :, i] = self._find_threshold(df_known,
df_unknown,
integers=True,
cut_tail=True)
for k in self.nap_params:
layer_id = len(self.monitored_layers_shapes) - int(k) - 1
self.add_factor[layer_id] = quantile_factors + self.shape_factors[layer_id]
self.multiplier[layer_id] = quantile_factors * (self.max_factor / self.shape_factors[layer_id])
self.valid_accuracies = accuracies
self.threshold = thresholds
self.scaled_threshold = (self.threshold + self.add_factor) * self.multiplier
return 0
quantile_factors = np.sqrt(1. / np.abs(linspace - np.rint(linspace)))
max_threshold = np.max((thresholds + quantile_factors) * quantile_factors, axis=2)[:, :, np.newaxis]
tf = 0.1
scores = (accuracies - 0.5) * (tf + np.abs(
((thresholds + quantile_factors) * quantile_factors - max_threshold) / max_threshold))
max_acc_ids = np.argmax(scores, axis=2)[:, :, np.newaxis]
self.threshold = np.take_along_axis(thresholds, max_acc_ids, axis=2).squeeze()
self.accuracies = np.take_along_axis(accuracies, max_acc_ids, axis=2).squeeze()
new_th = np.zeros(len(self.monitored_layers_shapes))
new_acc = np.zeros(len(self.monitored_layers_shapes))
self.multiplier = np.zeros(len(self.monitored_layers_shapes))
self.add_factor = np.zeros(len(self.monitored_layers_shapes))
# for k in self.nap_params:
# layer_id = len(self.monitored_layers_shapes) - int(k) - 1
#
# max_threshold_pools = np.min(max_threshold[:, layer_id, :])
#
# scores = (self.accuracies[:, layer_id] - 0.5) * (tf + np.abs(
# ((self.threshold[:, layer_id] + quantile_factors[max_acc_ids[:, layer_id, :]]) * quantile_factors[max_acc_ids[:, layer_id, :]] - max_threshold_pools) / max_threshold_pools))
# # if (scores[:, 0] >= scores[:, 1]).all():
# if self.accuracies[0, layer_id] >= self.accuracies[1, layer_id]:
# self.nap_params[k]["quantile"] = linspace[max_acc_ids[0, layer_id, :]].item()
# self.nap_params[k]["pool_type"] = "max"
# self.add_factor[layer_id] = quantile_factors[max_acc_ids[0, layer_id, :]] + self.shape_factors[layer_id]
# self.multiplier[layer_id] = quantile_factors[max_acc_ids[0, layer_id, :]] * (self.max_factor / self.shape_factors[layer_id])
# new_th[layer_id] = self.threshold[0, layer_id]
# new_acc[layer_id] = self.accuracies[0, layer_id]
# else:
# self.nap_params[k]["quantile"] = linspace[max_acc_ids[1, layer_id, :]].item()
# self.nap_params[k]["pool_type"] = "avg"
# self.add_factor[layer_id] = quantile_factors[max_acc_ids[1, layer_id, :]] + self.shape_factors[
# layer_id]
# self.multiplier[layer_id] = quantile_factors[max_acc_ids[1, layer_id, :]] * (
# self.max_factor / self.shape_factors[layer_id])
# new_th[layer_id] = self.threshold[1, layer_id]
# new_acc[layer_id] = self.accuracies[1, layer_id]
#
# self.accuracies = new_acc
# self.threshold = new_th
# # self.votes = int(len(self.monitored_layers_shapes) / 3 + 1)
# # if self.votes % 2 == 0:
# # self.votes += 1
# # while (self.accuracies > 0.5).sum() < self.votes:
# # self.votes -= 2
# self.scaled_threshold = (self.threshold + self.add_factor) * self.multiplier
# self.votes = 9
# self.chosen_layers = self.accuracies.argsort()[::-1][:self.votes]
# print(self.chosen_layers)
# print(f"threshold: {self.threshold}, accuracy: {self.accuracies} nap_params: {self.nap_params}")
# np.savez("results/article_plots/full_nets/fixed/" + self.model_name + "_" + self.train_dataset_name + "_" +
# self.valid_dataset_name + "allth-acc.csv", thresholds=thresholds, accuracies=accuracies)
# self.monitor = FullNetMonitor(self.class_count, self.nap_device,
# layers_shapes=self.monitored_layers_shapes)
# self._add_class_patterns_to_monitor(self.train_loader, nap_params=self.nap_params)
# return self.accuracies
for k in self.nap_params:
layer_id = len(self.monitored_layers_shapes) - int(k) - 1
self.nap_params[k]["quantile"] = linspace[max_acc_ids[0, layer_id, :]].item()
self.nap_params[k]["pool_type"] = "max"
new_th[layer_id] = self.threshold[0, layer_id]
new_acc[layer_id] = self.accuracies[0, layer_id]
self.accuracies_max = copy.deepcopy(new_acc)
self.threshold_max = copy.deepcopy(new_th)
self.nap_params_max = copy.deepcopy(self.nap_params)
self.votes = int(len(self.monitored_layers_shapes) / 3 + 1)
if self.votes % 2 == 0:
self.votes += 1
while (self.accuracies > 0.5).sum() < self.votes:
self.votes -= 2
self.chosen_layers_max = self.accuracies.argsort()[::-1][:self.votes]
for k in self.nap_params:
layer_id = len(self.monitored_layers_shapes) - int(k) - 1
self.nap_params[k]["quantile"] = linspace[max_acc_ids[1, layer_id, :]].item()
self.nap_params[k]["pool_type"] = "avg"
new_th[layer_id] = self.threshold[1, layer_id]
new_acc[layer_id] = self.accuracies[1, layer_id]
self.accuracies_avg = new_acc
self.threshold_avg = new_th
self.nap_params_avg = copy.deepcopy(self.nap_params)
self.votes = int(len(self.monitored_layers_shapes) / 3 + 1)
if self.votes % 2 == 0:
self.votes += 1
while (self.accuracies > 0.5).sum() < self.votes:
self.votes -= 2
self.chosen_layers_avg = self.accuracies.argsort()[::-1][:self.votes]
print(f"threshold: {self.threshold}, accuracy: {self.accuracies} nap_params: {self.nap_params}")
np.savez(
"results/article_plots/full_nets/fixed/hamming/" + self.model_name + "_" + self.train_dataset_name + "_" +
self.valid_dataset_name, thresholds=thresholds, accuracies=accuracies)
self.monitor_max = FullNetMonitor(self.class_count, self.nap_device,
layers_shapes=self.monitored_layers_shapes)
self._add_class_patterns_to_monitor(self.train_loader, nap_params=self.nap_params_max,
monitor=self.monitor_max)
self.monitor_avg = FullNetMonitor(self.class_count, self.nap_device,
layers_shapes=self.monitored_layers_shapes)
self._add_class_patterns_to_monitor(self.train_loader, nap_params=self.nap_params_avg,
monitor=self.monitor_avg)
return self.accuracies
def _find_best_layer_to_monitor(self):
best_acc = 0
results = []
self.omit = False
with torch.no_grad():
for layer in range(0, 13, 2):
for pool in range(1, 4):
# for q0 in np.concatenate((np.linspace(0.3, 0.5, num=3), np.linspace(0.81, 0.99, num=7))):
for q0 in np.linspace(0.1, 0.9, num=5):
for q1 in np.linspace(0.2, 0.2, num=1):
for q2 in np.linspace(0.2, 0.3, num=1):
for q3 in np.linspace(0.95, 0.97, num=1):
for q4 in np.linspace(0.85, 0.9, num=1):
# nap_params = [layer, pool, q0, q1, q2]
# resnet_nap_params = [q0, q1, q2, q3, q4]
nap_params = [layer, pool, q0]
self._get_layers_shapes(nap_params)
self.monitor = Monitor(self.class_count, self.nap_device,
layers_shapes=self.monitored_layers_shapes)
self._add_class_patterns_to_monitor(self.train_loader, nap_params=nap_params)
for i in tqdm.tqdm(np.linspace(int(self.monitored_layers_shapes[0]),
self.monitored_layers_shapes[0] -
self.monitored_layers_shapes[0] / 4, num=1)):
# print(
# f" quantile0: {q0} quantile1: {q1} quantile2: {q2} quantile3: {q3} quantile4: {q4} lastlayer: {i}")
print(f" quantile0: {q0} layer: {layer} pool: {pool} lastlayer: {i}")
i = int(i)
if self.omit:
neurons_to_monitor = self._choose_neurons_to_monitor(i)
self.monitor.set_neurons_to_monitor(neurons_to_monitor)
df_known = self._process_dataset(self.known_loader, nap_params=nap_params)
df_unknown = self._process_dataset(self.unknown_loader,
nap_params=nap_params)
threshold, acc = self._find_threshold(df_known, df_unknown, integers=True)
results.append([q0, q1, q2, q3, q4, i, threshold, acc])
if acc > best_acc + 0.01:
self.threshold = threshold
best_acc = acc
self.best_monitored_count = i
self.nap_params = nap_params
for layer in [13, 14]:
for q0 in np.linspace(0.1, 0.9, num=5):
for q1 in np.linspace(0.2, 0.2, num=1):
for q2 in np.linspace(0.2, 0.3, num=1):
for q3 in np.linspace(0.95, 0.97, num=1):
for q4 in np.linspace(0.85, 0.9, num=1):
pool = 0
nap_params = [layer, pool, q0, q1, q2]
# nap_params = [q0, q1, q2, q3, q4]
self._get_layers_shapes(nap_params)
self.monitor = Monitor(self.class_count, self.nap_device,
layers_shapes=self.monitored_layers_shapes)
self._add_class_patterns_to_monitor(self.train_loader, nap_params=nap_params)
for i in tqdm.tqdm(np.linspace(int(self.monitored_layers_shapes[0]),
self.monitored_layers_shapes[0] -
self.monitored_layers_shapes[0] / 4, num=1)):
# print(
# f" quantile0: {q0} quantile1: {q1} quantile2: {q2} quantile3: {q3} quantile4: {q4} lastlayer: {i}")
print(f" quantile0: {q0} layer: {layer} pool: {pool} lastlayer: {i}")
i = int(i)
if self.omit:
neurons_to_monitor = self._choose_neurons_to_monitor(i)
self.monitor.set_neurons_to_monitor(neurons_to_monitor)
df_known = self._process_dataset(self.known_loader, nap_params=nap_params)
df_unknown = self._process_dataset(self.unknown_loader, nap_params=nap_params)
threshold, acc = self._find_threshold(df_known, df_unknown, integers=True)
results.append([q0, q1, q2, q3, q4, i, threshold, acc])
if acc > best_acc + 0.01:
self.threshold = threshold
best_acc = acc
self.best_monitored_count = i
self.nap_params = nap_params
for i in results:
print(i)
return best_acc
def _process_dataset(self, testloader, nap_params=None):
hamming_distance = np.array([])
labels = np.array([])
testiter = iter(testloader)
for imgs, label in testiter:
label = label.to(self.args.device)
imgs = imgs.to(self.args.device)
outputs, intermediate_values, _ = self.base_model.forward_nap(imgs, nap_params=nap_params)
_, predicted = torch.max(outputs.data, 1)
distance = self.monitor.compute_hamming_distance(intermediate_values,
predicted.cpu().detach().numpy(), omit=self.omit)
# stacked = np.hstack((label.unsqueeze(1).cpu().numpy(), distance))
if hamming_distance.size:
hamming_distance = np.concatenate((hamming_distance, distance))
labels = np.concatenate((labels, label.unsqueeze(1).cpu().numpy()))
else:
hamming_distance = distance
labels = label.unsqueeze(1).cpu().numpy()
frames = []
for i in range(hamming_distance.shape[1]):
df = pd.DataFrame({"class": labels.flatten(), "hamming_distance": hamming_distance[:, i]})
frames.append(df)
return frames
def _find_threshold(self, dfs_known, dfs_unknown, integers=True, steps=1000, cut_tail=True):
thresholds = []
accuracies = []
for j, (df_known, df_unknown) in enumerate(zip(dfs_known, dfs_unknown)):
min = df_unknown["hamming_distance"].min() if df_unknown["hamming_distance"].min() > df_known[
"hamming_distance"].min() else \
df_known["hamming_distance"].min()
max = df_unknown["hamming_distance"].max() if df_unknown["hamming_distance"].max() > df_known[
"hamming_distance"].max() else \
df_known["hamming_distance"].max()
if cut_tail:
cut_threshold = int(df_known["hamming_distance"].quantile(.95))
cut_correct_count = (df_unknown["hamming_distance"] > cut_threshold).sum()
cut_correct_count += (df_known["hamming_distance"] <= cut_threshold).sum()
best_correct_count = 0
best_threshold = 0
for i in range(int(min) - 1, int(max) + 1) if integers else np.linspace(min, max, num=steps):
correct_count = 0
correct_count += (df_unknown["hamming_distance"] > i).sum()
correct_count += (df_known["hamming_distance"] <= i).sum()
if best_correct_count < correct_count:
best_correct_count = correct_count
best_threshold = i
if cut_tail:
if best_threshold > cut_threshold:
best_correct_count = cut_correct_count
best_threshold = cut_threshold
acc = best_correct_count / (len(df_unknown.index) + len(df_known.index))
thresholds.append(best_threshold)
accuracies.append(acc)
return np.array(thresholds), accuracies
def _get_layers_shapes(self, nap_params):
trainiter = iter(self.train_loader)
with torch.no_grad():
self.monitored_layers_shapes = \
self.base_model.forward_nap(trainiter.__next__()[0][0].unsqueeze(0).to(self.args.device),
nap_params=nap_params)[2]
shapes = np.array(self.monitored_layers_shapes)
self.shape_factors = shapes / shapes.min()
self.max_factor = self.shape_factors.max()
def _count_classes(self, loader):
dataiter = iter(loader)
count_class = dict()
for _, label in tqdm.tqdm(dataiter):
for i in range(label.shape[0]):
if count_class.get(label[i].item()):
count_class[label[i].item()] += 1
else:
count_class[label[i].item()] = 1
return count_class
def _count_classes_valid(self, loader, nap_params):
| |
import inspect
import re
import pkg_resources
from PyQt5 import uic, QtGui, QtCore, QtWidgets
import numpy as np
import collections
import time as ttime
from isstools.elements import elements
from isstools.trajectory.trajectory import trajectory_manager
from isstools.batch.batch import BatchManager
ui_path = pkg_resources.resource_filename('isstools', 'ui/ui_batch_mode.ui')
TIMEOUT = 120
class UIBatchMode(*uic.loadUiType(ui_path)):
def __init__(self,
plan_funcs,
motors_dict,
hhm,
RE,
db,
adc_list,
enc_list,
xia,
run_prep_traj,
create_log_scan,
sample_stages,
parent_gui,
job_submitter,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.setupUi(self)
self.plan_funcs = plan_funcs
self.plan_funcs_names = [plan.__name__ for plan in plan_funcs]
self.motors_dict = motors_dict
self.mot_list = self.motors_dict.keys()
self.mot_sorted_list = list(self.mot_list)
self.mot_sorted_list.sort()
self.traj_manager = trajectory_manager(hhm)
self.create_log_scan = create_log_scan
self.RE = RE
self.db = db
self.run_prep_traj = run_prep_traj
self.sample_stages = sample_stages
self.parent_gui = parent_gui
self.batch_mode_uids = []
self.treeView_batch = elements.TreeView(self, 'all')
self.treeView_samples_loop = elements.TreeView(self, 'sample')
self.treeView_samples_loop_scans = elements.TreeView(self, 'scan', unique_elements=False)
self.treeView_samples = elements.TreeView(self, 'sample')
self.treeView_scans = elements.TreeView(self, 'scan')
self.push_batch_delete_all.clicked.connect(self.delete_all_batch)
self.gridLayout_22.addWidget(self.treeView_samples_loop, 1, 0)
self.gridLayout_22.addWidget(self.treeView_samples_loop_scans, 1, 1)
self.gridLayout_23.addWidget(self.treeView_samples, 0, 0)
self.gridLayout_24.addWidget(self.treeView_batch, 0, 0)
self.gridLayout_26.addWidget(self.treeView_scans, 0, 0)
self.treeView_batch.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove)
# self.treeView_samples.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)
self.treeView_samples.setDragDropMode(QtWidgets.QAbstractItemView.DragOnly)
self.treeView_scans.setDragDropMode(QtWidgets.QAbstractItemView.DragOnly)
self.treeView_samples_loop.setDragDropMode(QtWidgets.QAbstractItemView.DropOnly)
self.treeView_samples_loop_scans.setDragDropMode(QtWidgets.QAbstractItemView.DropOnly)
self.batch_running = False
self.batch_pause = False
self.batch_abort = False
self.batch_results = {}
self.push_batch_pause.clicked.connect(self.pause_unpause_batch)
self.push_batch_abort.clicked.connect(self.abort_batch)
self.analog_samp_time = '1'
self.enc_samp_time = '1'
self.adc_list = adc_list
self.enc_list = enc_list
self.xia = xia
self.treeView_batch.header().hide()
self.treeView_samples.header().hide()
self.treeView_scans.header().hide()
self.treeView_samples_loop.header().hide()
self.treeView_samples_loop_scans.header().hide()
self.push_create_sample.clicked.connect(self.create_new_sample_func)
self.push_get_sample.clicked.connect(self.get_sample_pos)
self.model_samples = QtGui.QStandardItemModel(self)
self.treeView_samples.setModel(self.model_samples)
self.push_add_sample.clicked.connect(self.add_new_sample_func)
self.push_delete_sample.clicked.connect(self.delete_current_sample)
self.model_batch = QtGui.QStandardItemModel(self)
self.treeView_batch.setModel(self.model_batch)
self.push_add_sample_loop.clicked.connect(self.add_new_sample_loop_func)
self.push_delete_sample_loop.clicked.connect(self.delete_current_samples_loop)
self.model_samples_loop = QtGui.QStandardItemModel(self)
self.treeView_samples_loop.setModel(self.model_samples_loop)
self.push_delete_sample_loop_scan.clicked.connect(self.delete_current_samples_loop_scans)
self.model_samples_loop_scans = QtGui.QStandardItemModel(self)
self.treeView_samples_loop_scans.setModel(self.model_samples_loop_scans)
self.push_create_scan.clicked.connect(self.create_new_scan_func)
self.push_delete_scan.clicked.connect(self.delete_current_scan)
self.push_add_scan.clicked.connect(self.add_new_scan_func)
self.model_scans = QtGui.QStandardItemModel(self)
self.treeView_scans.setModel(self.model_scans)
self.push_batch_run.clicked.connect(self.start_batch)
self.push_batch_print_steps.clicked.connect(self.print_batch)
self.push_batch_delete.clicked.connect(self.delete_current_batch)
self.comboBox_scans.addItems(self.plan_funcs_names)
self.comboBox_scans.currentIndexChanged.connect(self.populateParams_batch)
self.push_create_scan_update.clicked.connect(self.update_batch_traj)
try:
self.update_batch_traj()
except OSError as err:
print('Error loading:', err)
self.params1_batch = []
self.params2_batch = []
self.params3_batch = []
if len(self.plan_funcs) != 0:
self.populateParams_batch(0)
self.comboBox_sample_loop_motor.addItems(self.mot_sorted_list)
self.comboBox_sample_loop_motor.currentTextChanged.connect(self.update_loop_values)
self.spinBox_sample_loop_rep.valueChanged.connect(self.restore_add_loop)
self.spinBox_sample_loop_rep.valueChanged.connect(self.comboBox_sample_loop_motor.setDisabled)
self.spinBox_sample_loop_rep.valueChanged.connect(self.doubleSpinBox_motor_range_start.setDisabled)
self.spinBox_sample_loop_rep.valueChanged.connect(self.doubleSpinBox_motor_range_stop.setDisabled)
self.spinBox_sample_loop_rep.valueChanged.connect(self.doubleSpinBox_motor_range_step.setDisabled)
self.spinBox_sample_loop_rep.valueChanged.connect(self.radioButton_sample_rel.setDisabled)
self.spinBox_sample_loop_rep.valueChanged.connect(self.radioButton_sample_abs.setDisabled)
self.radioButton_sample_rel.toggled.connect(self.set_loop_values)
self.last_lut = 0
self.push_load_csv.clicked.connect(self.load_csv)
self.push_save_csv.clicked.connect(self.save_csv)
#checking which xystage to use:
self.stage_x = ''
self.stage_y = ''
for stage in self.sample_stages:
if stage['x'] in self.motors_dict and stage['y'] in self.motors_dict:
if self.motors_dict[stage['x']]['object'].connected and\
self.motors_dict[stage['y']]['object'].connected:
self.stage_x = stage['x']
self.stage_y = stage['y']
break
if self.stage_x == '' or self.stage_y == '':
print('No stage set! Batch mode will not work!')
def pause_unpause_batch(self):
if self.batch_running == True:
self.batch_pause = not self.batch_pause
if self.batch_pause:
print('Pausing batch run... It will pause in the next step.')
self.push_batch_pause.setText('Unpause')
else:
print('Unpausing batch run...')
self.push_batch_pause.setText('Pause')
self.label_batch_step.setText(self.label_batch_step.text()[9:])
def abort_batch(self):
if self.batch_running == True:
self.batch_abort = True
self.re_abort()
def create_new_sample_func(self):
self.create_new_sample(self.lineEdit_sample_name.text(), self.doubleSpinBox_sample_x.value(),
self.doubleSpinBox_sample_y.value())
def get_sample_pos(self):
if self.stage_x not in self.mot_list:
raise Exception('Stage X was not passed to the GUI')
if self.stage_y not in self.mot_list:
raise Exception('Stage Y was not passed to the GUI')
if not self.motors_dict[self.stage_x]['object'].connected or \
not self.motors_dict[self.stage_y]['object'].connected:
raise Exception('Stage IOC not connected')
x_value = self.motors_dict[self.stage_x]['object'].position
y_value = self.motors_dict[self.stage_y]['object'].position
self.doubleSpinBox_sample_x.setValue(x_value)
self.doubleSpinBox_sample_y.setValue(y_value)
def add_new_sample_func(self):
indexes = self.treeView_samples.selectedIndexes()
for index in indexes:
item = index.model().itemFromIndex(index)
self.add_new_sample(item)
def delete_current_sample(self):
view = self.treeView_samples
index = view.currentIndex()
if index.row() < view.model().rowCount():
view.model().removeRows(index.row(), 1)
def add_new_sample_loop_func(self):
model_samples = self.treeView_samples_loop.model()
data_samples = []
for row in range(model_samples.rowCount()):
index = model_samples.index(row, 0)
data_samples.append(str(model_samples.data(index)))
model_scans = self.treeView_samples_loop_scans.model()
data_scans = []
for row in range(model_scans.rowCount()):
index = model_scans.index(row, 0)
data_scans.append(str(model_scans.data(index)))
self.add_new_sample_loop(data_samples, data_scans)
def delete_current_samples_loop(self):
view = self.treeView_samples_loop
index = view.currentIndex()
if index.row() < view.model().rowCount():
view.model().removeRows(index.row(), 1)
def delete_current_samples_loop_scans(self):
view = self.treeView_samples_loop_scans
index = view.currentIndex()
if index.row() < view.model().rowCount():
view.model().removeRows(index.row(), 1)
def delete_current_scan(self):
view = self.treeView_scans
index = view.currentIndex()
if index.row() < view.model().rowCount():
view.model().removeRows(index.row(), 1)
def create_new_scan_func(self):
self.create_new_scan(self.comboBox_scans.currentText(), self.comboBox_lut.currentText())
def add_new_scan_func(self):
indexes = self.treeView_scans.selectedIndexes()
for index in indexes:
item = index.model().itemFromIndex(index)
self.add_new_scan(item)
def start_batch(self):
print('[Launching Threads]')
self.run_batch()
def print_batch(self):
print('\n***** Printing Batch Steps *****')
self.run_batch(print_only=True)
print('***** Finished Batch Steps *****')
def delete_current_batch(self):
view = self.treeView_batch
index = view.currentIndex()
if index.row() < view.model().rowCount():
view.model().removeRows(index.row(), 1)
def delete_all_batch(self):
view = self.treeView_samples
if view.model().hasChildren():
view.model().removeRows(0, view.model().rowCount())
view = self.treeView_scans
if view.model().hasChildren():
view.model().removeRows(0, view.model().rowCount())
view = self.treeView_samples_loop
if view.model().hasChildren():
view.model().removeRows(0, view.model().rowCount())
view = self.treeView_samples_loop_scans
if view.model().hasChildren():
view.model().removeRows(0, view.model().rowCount())
view = self.treeView_batch
if view.model().hasChildren():
view.model().removeRows(0, view.model().rowCount())
def create_new_sample(self, name, x, y):
parent = self.model_samples.invisibleRootItem()
item = QtGui.QStandardItem('{} X:{} Y:{}'.format(name, x, y))
item.setDropEnabled(False)
item.item_type = 'sample'
item.x = x
item.y = y
# subitem = QtGui.QStandardItem('X: {}'.format(x))
# subitem.setEnabled(False)
# item.appendRow(subitem)
# subitem = QtGui.QStandardItem('Y: {}'.format(y))
# subitem.setEnabled(False)
# item.appendRow(subitem)
parent.appendRow(item)
self.treeView_samples.expand(self.model_samples.indexFromItem(item))
def add_new_sample(self, item):
parent = self.model_batch.invisibleRootItem()
new_item = item.clone()
new_item.item_type = 'sample'
new_item.x = item.x
new_item.y = item.y
new_item.setEditable(False)
new_item.setDropEnabled(False)
name = new_item.text()[:new_item.text().find(' X:')] # .split()[0]
new_item.setText('Move to "{}" X:{} Y:{}'.format(name, item.x, item.y))
for index in range(item.rowCount()):
subitem = QtGui.QStandardItem(item.child(index))
subitem.setEnabled(False)
subitem.setDropEnabled(False)
new_item.appendRow(subitem)
parent.appendRow(new_item)
def select_all_samples(self):
if len(self.treeView_samples.selectedIndexes()) < self.model_samples.rowCount():
self.treeView_samples.selectAll()
else:
self.treeView_samples.clearSelection()
def create_new_scan(self, curr_type, traj):
run_params = {}
for i in range(len(self.params1_batch)):
if (self.param_types_batch[i] == int):
run_params[self.params3_batch[i].text().split('=')[0]] = self.params2_batch[i].value()
elif (self.param_types_batch[i] == float):
run_params[self.params3_batch[i].text().split('=')[0]] = self.params2_batch[i].value()
elif (self.param_types_batch[i] == bool):
run_params[self.params3_batch[i].text().split('=')[0]] = bool(self.params2_batch[i].checkState())
elif (self.param_types_batch[i] == str):
run_params[self.params3_batch[i].text().split('=')[0]] = self.params2_batch[i].text()
params = str(run_params)[1:-1].replace(': ', ':').replace(',', '').replace("'", "")
parent = self.model_scans.invisibleRootItem()
if self.comboBox_lut.isEnabled():
item = QtGui.QStandardItem('{} Traj:{} {}'.format(curr_type, traj, params))
else:
item = QtGui.QStandardItem('{} {}'.format(curr_type, params))
item.setDropEnabled(False)
item.item_type = 'sample'
parent.appendRow(item)
self.treeView_samples.expand(self.model_samples.indexFromItem(item))
def add_new_scan(self, item):
parent = self.model_batch.invisibleRootItem()
new_item = item.clone()
new_item.item_type = 'scan'
new_item.setEditable(False)
new_item.setDropEnabled(False)
name = new_item.text().split()[0]
new_item.setText('Run {}'.format(new_item.text()))
for index in range(item.rowCount()):
subitem = QtGui.QStandardItem(item.child(index))
subitem.setEnabled(False)
subitem.setDropEnabled(False)
new_item.appendRow(subitem)
parent.appendRow(new_item)
def update_loop_values(self, text):
for motor in self.motors_dict:
if self.comboBox_sample_loop_motor.currentText() == self.motors_dict[motor]['name']:
curr_mot = self.motors_dict[motor]['object']
break
if self.radioButton_sample_rel.isChecked():
if curr_mot.connected == True:
self.push_add_sample_loop.setEnabled(True)
self.doubleSpinBox_motor_range_start.setValue(-0.5)
self.doubleSpinBox_motor_range_stop.setValue(0.5)
self.doubleSpinBox_motor_range_step.setValue(0.25)
self.push_add_sample_loop.setEnabled(True)
else:
self.push_add_sample_loop.setEnabled(False)
self.doubleSpinBox_motor_range_start.setValue(0)
self.doubleSpinBox_motor_range_stop.setValue(0)
self.doubleSpinBox_motor_range_step.setValue(0.025)
else:
if curr_mot.connected == True:
self.push_add_sample_loop.setEnabled(True)
curr_pos = curr_mot.read()[curr_mot.name]['value']
self.doubleSpinBox_motor_range_start.setValue(curr_pos - 0.1)
self.doubleSpinBox_motor_range_stop.setValue(curr_pos + 0.1)
self.doubleSpinBox_motor_range_step.setValue(0.025)
else:
self.push_add_sample_loop.setEnabled(False)
self.doubleSpinBox_motor_range_start.setValue(0)
self.doubleSpinBox_motor_range_stop.setValue(0)
self.doubleSpinBox_motor_range_step.setValue(0.025)
def restore_add_loop(self, value):
if value:
self.push_add_sample_loop.setEnabled(True)
def set_loop_values(self, checked):
if checked:
self.doubleSpinBox_motor_range_start.setValue(-0.5)
self.doubleSpinBox_motor_range_stop.setValue(0.5)
self.doubleSpinBox_motor_range_step.setValue(0.25)
self.push_add_sample_loop.setEnabled(True)
else:
motor_text = self.comboBox_sample_loop_motor.currentText()
self.update_loop_values(motor_text)
def add_new_sample_loop(self, samples, scans):
parent = self.model_batch.invisibleRootItem()
new_item = QtGui.QStandardItem('Sample Loop')
new_item.setEditable(False)
if self.spinBox_sample_loop_rep.value():
repetitions_item = QtGui.QStandardItem('Repetitions:{}'.format(self.spinBox_sample_loop_rep.value()))
else:
repetitions_item = QtGui.QStandardItem(
'Motor:{} Start:{} Stop:{} Step:{}'.format(self.comboBox_sample_loop_motor.currentText(),
self.doubleSpinBox_motor_range_start.value(),
self.doubleSpinBox_motor_range_stop.value(),
self.doubleSpinBox_motor_range_step.value()))
new_item.appendRow(repetitions_item)
if self.radioButton_sample_loop.isChecked():
primary = 'Samples'
else:
primary = 'Scans'
primary_item = QtGui.QStandardItem('Primary:{}'.format(primary))
new_item.appendRow(primary_item)
samples_item = QtGui.QStandardItem('Samples')
samples_item.setDropEnabled(False)
for index in range(len(samples)):
subitem = QtGui.QStandardItem(samples[index])
subitem.setDropEnabled(False)
samples_item.appendRow(subitem)
new_item.appendRow(samples_item)
scans_item = QtGui.QStandardItem('Scans')
scans_item.setDropEnabled(False)
for index in range(len(scans)):
subitem = QtGui.QStandardItem(scans[index])
subitem.setDropEnabled(False)
scans_item.appendRow(subitem)
new_item.appendRow(scans_item)
parent.appendRow(new_item)
self.treeView_batch.expand(self.model_batch.indexFromItem(new_item))
for index in range(new_item.rowCount()):
self.treeView_batch.expand(new_item.child(index).index())
def populateParams_batch(self, index):
if self.comboBox_scans.currentText()[: 5] != 'tscan':
self.comboBox_lut.setEnabled(False)
else:
self.comboBox_lut.setEnabled(True)
for i in range(len(self.params1_batch)):
self.gridLayout_31.removeWidget(self.params1_batch[i])
self.gridLayout_31.removeWidget(self.params2_batch[i])
self.gridLayout_31.removeWidget(self.params3_batch[i])
self.params1_batch[i].deleteLater()
self.params2_batch[i].deleteLater()
self.params3_batch[i].deleteLater()
self.params1_batch = []
self.params2_batch = []
self.params3_batch = []
self.param_types_batch = []
plan_func = self.plan_funcs[index]
signature = inspect.signature(plan_func)
for i in range(0, len(signature.parameters)):
default = re.sub(r':.*?=', '=', str(signature.parameters[list(signature.parameters)[i]]))
if default == str(signature.parameters[list(signature.parameters)[i]]):
default = re.sub(r':.*', '', str(signature.parameters[list(signature.parameters)[i]]))
self.addParamControl(list(signature.parameters)[i], default,
signature.parameters[list(signature.parameters)[i]].annotation,
grid=self.gridLayout_31,
params=[self.params1_batch, self.params2_batch, self.params3_batch])
self.param_types_batch.append(signature.parameters[list(signature.parameters)[i]].annotation)
def addParamControl(self, name, default, annotation, grid, params):
rows = int((grid.count()) / 3)
param1 = QtWidgets.QLabel(str(rows + 1))
param2 = None
def_val = ''
if default.find('=') != -1:
def_val = re.sub(r'.*=', '', default)
if annotation == int:
param2 = QtWidgets.QSpinBox()
param2.setMaximum(100000)
param2.setMinimum(-100000)
def_val = int(def_val)
param2.setValue(def_val)
elif annotation == float:
param2 = QtWidgets.QDoubleSpinBox()
param2.setMaximum(100000)
param2.setMinimum(-100000)
def_val = float(def_val)
param2.setValue(def_val)
elif annotation == bool:
param2 = QtWidgets.QCheckBox()
if def_val == 'True':
def_val = True
else:
def_val = False
param2.setCheckState(def_val)
param2.setTristate(False)
elif annotation == str:
param2 = QtWidgets.QLineEdit()
def_val = str(def_val)
param2.setText(def_val)
if param2 is not None:
param3 = QtWidgets.QLabel(default)
grid.addWidget(param1, rows, 0, QtCore.Qt.AlignTop)
grid.addWidget(param2, rows, 1, QtCore.Qt.AlignTop)
grid.addWidget(param3, rows, 2, QtCore.Qt.AlignTop)
params[0].append(param1)
params[1].append(param2)
params[2].append(param3)
def update_batch_traj(self):
self.trajectories = self.traj_manager.read_info(silent=True)
self.comboBox_lut.clear()
self.comboBox_lut.addItems(
['{}-{}'.format(lut, self.trajectories[lut]['name']) for lut in self.trajectories if lut != '9'])
def load_csv(self):
user_filepath = '/GPFS/xf08id/User Data/{}.{}.{}/'.format(self.RE.md['year'],
self.RE.md['cycle'],
self.RE.md['PROPOSAL'])
filename = QtWidgets.QFileDialog.getOpenFileName(caption='Select file to load',
directory=user_filepath,
filter='*.csv',
parent=self)[0]
if filename:
batman = BatchManager(self)
batman.load_csv(filename)
def save_csv(self):
user_filepath = '/GPFS/xf08id/User Data/{}.{}.{}/'.format(self.RE.md['year'],
self.RE.md['cycle'],
self.RE.md['PROPOSAL'])
filename = QtWidgets.QFileDialog.getSaveFileName(caption='Select file to save',
directory=user_filepath,
filter='*.csv',
parent=self)[0]
if filename:
if filename[-4:] != '.csv':
filename += '.csv'
batman = BatchManager(self)
batman.save_csv(filename)
def check_pause_abort_batch(self):
if self.batch_abort:
print('**** Aborting Batch! ****')
raise Exception('Abort button pressed by user')
elif self.batch_pause:
self.label_batch_step.setText('[Paused] {}'.format(self.label_batch_step.text()))
while self.batch_pause:
QtCore.QCoreApplication.processEvents()
def run_batch(self, print_only=False):
try:
self.last_lut = 0
current_index = 0
self.current_uid_list = []
if not print_only:
self.parent_gui.run_mode = 'batch'
self.batch_running = True
self.batch_pause = False
self.batch_abort = False
# Send sampling time to the pizzaboxes:
value = int(
round(float(self.analog_samp_time) / self.adc_list[0].sample_rate.value * 100000))
for adc in self.adc_list:
adc.averaging_points.put(str(value))
for enc in self.enc_list:
enc.filter_dt.put(float(self.enc_samp_time) * 100000)
if self.xia is not None:
if self.xia.input_trigger is not None:
self.xia.input_trigger.unit_sel.put(1) # ms, not us
self.xia.input_trigger.period_sp.put(int(self.xia_samp_time))
self.batch_results = {}
for batch_index in range(self.model_batch.rowCount()):
index = self.model_batch.index(batch_index, 0)
text | |
def is_proper_descendant_of(self, node1, node2):
return (self.is_descendant_of(node1, node2) and not (node1 == node2))
# Returns true iff node1 is a descendant of node2.
def is_descendant_of(self, node1, node2):
i = self.node_map[node1]
j = self.node_map[node2]
return self.dpath[i, j] == 1
# Returns the edge connecting node1 and node2, provided a unique such edge exists.
def get_edge(self, node1, node2):
i = self.node_map[node1]
j = self.node_map[node2]
end_1 = self.graph[i, j]
end_2 = self.graph[j, i]
if end_1 == 0 and end_2 == 0:
return None
edge = Edge(node1, node2, Endpoint(end_1), Endpoint(end_2))
return edge
# Returns the directed edge from node1 to node2, if there is one.
def get_directed_edge(self, node1, node2):
return self.get_edge(node1, node2)
# Returns the list of edges connected to a particular node. No particular ordering of the edges in the list is guaranteed.
def get_node_edges(self, node):
i = self.node_map[node]
edges = []
for j in range(self.num_vars):
if self.graph[j, i] != 0:
node2 = self.nodes[j]
edges.append(self.get_edge(node, node2))
return edges
def get_graph_edges(self):
edges = []
for i in range(self.num_vars):
node = self.nodes[i]
for j in range(i + 1, self.num_vars):
if self.graph[j, i] != 0:
node2 = self.nodes[j]
edges.append(self.get_edge(node, node2))
return edges
# Returns true if node2 is a definite noncollider between node1 and node3.
def is_def_noncollider(self, node1, node2, node3):
edges = self.get_node_edges(node2)
for edge in edges:
is_node1 = edge.get_distal_node(node2) == node1
is_node3 = edge.get_distal_node(node2) == node3
if is_node1 and edge.points_toward(node1):
return True
if is_node3 and edge.points_toward(node3):
return True
return False
# Returns true if node2 is a definite collider between node1 and node3.
def is_def_collider(self, node1, node2, node3):
edge1 = self.get_edge(node1, node2)
edge2 = self.get_edge(node2, node3)
if edge1 is None or edge2 is None:
return False
return str(edge1.get_proximal_endpoint(node2)) == "ARROW" and str(edge2.get_proximal_endpoint(node2)) == "ARROW"
# Returns true if node1 and node2 are d-connected on the set of nodes z.
def is_dconnected_to(self, node1, node2, z):
utils = GraphUtils()
return utils.is_dconnected_to(node1, node2, z, self)
# Returns true if node1 and node2 are d-separated on the set of nodes z.
def is_dseparated_from(self, node1, node2, z):
return not self.is_dconnected_to(node1, node2, z)
# Returns true if the graph is a pattern.
def is_pattern(self):
return False
# Returns true if the graph is a PAG.
def is_pag(self):
return False
# Returns true iff there is a single directed edge from node1 to node2.
def is_directed_from_to(self, node1, node2):
i = self.node_map[node1]
j = self.node_map[node2]
return self.graph[j, i] == 1
# REturns true iff there is a single undirected edge between node1 and node2.
def is_undirected_from_to(self, node1, node2):
return False
# Returns true iff the given node is exogenous.
def is_exogenous(self, node):
return self.get_indegree(node) == 0
# Returns the nodes adjacent to the given node with the given proximal endpoint.
def get_nodes_into(self, node, endpoint):
if not (str(endpoint) == "ARROW" or str(endpoint) == "TAIL"):
return []
i = self.node_map[node]
nodes = []
if str(endpoint) == "ARROW":
for j in range(self.num_vars):
if self.graph[i, j] == 1:
node2 = self.nodes[j]
nodes.append(node2)
else:
for j in range(self.num_vars):
if self.graph[j, i] == 1:
node2 = self.nodes[j]
nodes.append(node2)
return nodes
# Returns the nodes adjacent to the given node with the given distal endpoint.
def get_nodes_out_of(self, node, endpoint):
if not (str(endpoint) == "ARROW" or str(endpoint) == "TAIL"):
return []
i = self.node_map[node]
nodes = []
if str(endpoint) == "ARROW":
for j in range(self.num_vars):
if self.graph[j, i] == 1:
node2 = self.nodes[j]
nodes.append(node2)
else:
for j in range(self.num_vars):
if self.graph[i, j] == 1:
node2 = self.nodes[j]
nodes.append(node2)
return nodes
# Removes the given edge from the graph.
def remove_edge(self, edge):
node1 = edge.get_node1()
node2 = edge.get_node2()
i = self.node_map[node1]
j = self.node_map[node2]
self.graph[j, i] = 0
self.graph[i, j] = 0
# Removes the edge connecting the given two nodes, provided there is exactly one such edge.
def remove_connecting_edge(self, node1, node2):
i = self.node_map[node1]
j = self.node_map[node2]
self.graph[j, i] = 0
self.graph[i, j] = 0
# Removes all edges connecting node A to node B. In most cases, this will
# remove at most one edge, but since multiple edges are permitted in some
# graph implementations, the number will in some cases be greater than
# one.
def remove_connecting_edges(self, node1, node2):
self.remove_connecting_edge(node1, node2)
# Iterates through the list and removes any permissible edges found. The
# order in which edges are removed is the order in which they are presented
# in the iterator.
def remove_edges(self, edges):
for edge in edges:
self.remove_edge(edge)
# Removes a node from the graph.
def remove_node(self, node):
i = self.node_map[node]
graph = self.graph
graph = np.delete(graph, (i), axis=0)
graph = np.delete(graph, (i), axis=1)
self.graph = graph
nodes = self.nodes
nodes.remove(node)
self.nodes = nodes
node_map = self.node_map
node_map.pop(node)
self.node_map = node_map
# Iterates through the list and removes any permissible nodes found. The
# order in which nodes are removed is the order in which they are presented
# in the iterator.
def remove_nodes(self, nodes):
for node in nodes:
self.remove_node(node)
# Constructs and returns a subgraph consisting of a given subset of the
# nodes of this graph together with the edges between them.
def subgraph(self, nodes):
subgraph = Dag(nodes)
graph = self.graph
for i in range(self.num_vars):
if not (self.nodes[i] in nodes):
graph = np.delete(graph, (i), axis=0)
for i in range(self.num_vars):
if not (self.nodes[i] in nodes):
graph = np.delete(graph, (i), axis=1)
subgraph.graph = graph
subgraph.reconstitute_dpath(subgraph.get_graph_edges())
return subgraph
# Returns a string representation of the graph.
def __str__(self):
utils = GraphUtils()
return utils.graph_string(self)
# Transfers nodes and edges from one graph to another. One way this is
# used is to change graph types. One constructs a new graph based on the
# old graph, and this method is called to transfer the nodes and edges of
# the old graph to the new graph.
def transfer_nodes_and_edges(self, graph):
for node in graph.nodes:
self.add_node(node)
for edge in graph.get_graph_edges():
self.add_edge(edge)
def transfer_attributes(self, graph):
graph.attributes = self.attributes
# Returns the list of ambiguous triples associated with this graph. Triples <x, y, z> that no longer
# lie along a path in the getModel graph are removed.
def get_ambiguous_triples(self):
return self.ambiguous_triples
# Returns the set of underlines associated with this graph.
def get_underlines(self):
return self.underline_triples
# Returns the set of dotted underlines associated with this graph.
def get_dotted_underlines(self):
return self.dotted_underline_triples
# Returns true iff the triple <node1, node2, node3> is set as ambiguous.
def is_ambiguous_triple(self, node1, node2, node3):
return (node1, node2, node3) in self.ambiguous_triples
# Returns true iff the triple <node1, node2, node3> is set as underlined.
def is_underline_triple(self, node1, node2, node3):
return (node1, node2, node3) in self.underline_triples
# Returns true iff the triple <node1, node2, node3> is set as dotted underlined.
def is_dotted_underline_triple(self, node1, node2, node3):
return (node1, node2, node3) in self.dotted_underline_triples
# Adds the triple <node1, node2, node3> as an ambiguous triple to the graph.
def add_ambiguous_triple(self, node1, node2, node3):
self.ambiguous_triples.append((node1, node2, node3))
# Adds the triple <node1, node2, node3> as an underlined triple to the graph.
def add_underline_triple(self, node1, node2, node3):
self.underline_triples.append((node1, node2, node3))
# Adds the triple <node1, node2, node3> as a dotted underlined triple to the graph.
def add_dotted_underline_triple(self, node1, node2, node3):
self.dotted_underline_triples.append((node1, node2, node3))
# Removes the triple <node1, node2, node3> from the set of ambiguous triples.
def remove_ambiguous_triple(self, node1, node2, node3):
self.ambiguous_triples.remove((node1, node2, node3))
# Removes the triple <node1, node2, node3> from the set of underlined triples.
def remove_underline_triple(self, node1, node2, node3):
self.underline_triples.remove((node1, node2, node3))
# Removes the triple <node1, node2, node3> from the set of dotted underlined triples.
def remove_dotted_underline_triple(self, node1, node2, node3):
self.dotted_underline_triples.remove((node1, node2, node3))
# Sets the list of ambiguous triples to the triples in the given set.
def set_ambiguous_triples(self, triples):
self.ambiguous_triples = triples
# Sets the list of underlined triples to the triples in the given set.
def set_underline_triples(self, triples):
self.underline_triples = triples
# Sets the list of dotted underlined triples to the triples in the given set.
def set_dotted_underline_triples(self, triples):
self.dotted_underline_triples = triples
# Returns a tier ordering for acyclic | |
using Base64.
:param str to_encode: The string to encode.*
:param int to_encode: The integer to encode.*
:param float to_encode: The decimal to encode.*
:param bool to_encode: The boolean to encode.*
:param bytes to_encode: The bytes to encode.*
:param bool return_bytes: If True, this method will return the result in `bytes`.
*The supported data types (except str and bytes) are converted to str.
*str data types are converted to bytes.
:returns str: The string version of the <encoded> (data type `bytes`) variable.
:returns bytes: The bytes version of the <encoded> variable.
"""
if type(to_encode) in (str, int, float, bool):
to_encode = str(to_encode).encode(self.encoding)
elif type(to_encode) is bytes:
pass
else:
raise ValueError("Unsupported data type.")
encoded = base64.b64encode(to_encode)
if return_bytes:
return encoded
else:
return str(encoded.decode(self.encoding))
def __b64decode(self, to_decode):
"""
Decode <to_decode> using Base64.
:param str to_decode: The string to decode.
:param bytes to_decode: The bytes to decode.
:returns bytes: The decoded form of <to_decode> in `bytes` data type.
"""
if type(to_decode) is str:
to_decode = to_decode.encode(self.encoding)
elif type(to_decode) is bytes:
pass
else:
raise ValueError("Unsupported data type.")
return base64.b64decode(to_decode)
def __readconfig(self):
"""
Read the configuration file.
:returns void:
"""
with open(self.configpath, 'r') as f:
self.__data = json.loads(self.__b64decode(f.read()))
def __writeconfig(self):
"""
Replace existing data from <self.configpath> with <self.__data>.
:returns void:
"""
# Check if `self.__data` values are valid.
self._validate_data()
# Write new data to `self.configpath`.
with open(self.configpath, 'w') as f:
f.write(self.__b64encode(json.dumps(self.__data, separators=(',', ':'))))
def _validate_data(self, configdata=None):
"""
Validate configuration data (data type `dict`).
Raises `ValueError` if there is an invalid value in the data.
Raises `TypeError` if `configdata` is invalid.
:param dict configdata: The dictionary form of configuration data.
:returns void:
"""
if configdata is None:
configdata = self.__data
else:
if type(configdata) is dict:
pass
else:
raise TypeError("configdata must be a dictionary.")
for key in self.keynames:
if type(configdata[key]) not in self.keynames[key]:
raise ValueError("The key does not have a valid value data type.")
else:
continue
def __readdict(self):
"""
Read the dictionary and store it in <self.__dictionary>.
:returns void:
"""
if type(self.__data["dictionary"]) is str:
dictionary = self.__data["dictionary"].encode(self.encoding)
elif type(self.__data["dictionary"]) is bytes:
dictionary = self.__data["dictionary"]
else:
raise TypeError("Invalid dictionary!")
if dictionary == b'':
# A lazy guess if the dictionary is empty.
self.__dictionary = {}
return None
# Decompression
dictionary = self.__b64decode(dictionary)
if self.__data["compression"] == "None":
decompressed = dictionary
elif self.__data["compression"] == "zlib":
decompressed = zlib.decompress(dictionary)
else:
raise ValueError("Invalid compression algorithm name")
# Decryption
decompressed = self.__b64decode(decompressed)
if self.__data["encryption"] == "None":
decrypted = decompressed
elif self.__data["encryption"] == "aes256":
decrypted = self.__b64decode(AES256(self.__epass).decrypt(decompressed))
else:
raise ValueError("Invalid encryption algorithm name")
"""
if type(decrypted) is bytes:
decrypted = decrypted.decode()
"""
if decrypted == "":
# Another lazy check if plaintext is empty.
self.__dictionary = {}
return None
# <variable_name>|<datatype>|<value>
# <variable_name>|<datatype>|<array_datatype>|<values>
self.__dictionary = json.loads(decrypted)
def __writedict(self):
"""
Replace existing data from self.__data["dictionary"] with <newdict>.
:param str newdict: The whole dictionary to write.
:returns void:
"""
# Encrypt the result
if self.__data["encryption"] == "None":
eresult = self.__b64encode(json.dumps(self.__dictionary, separators=(',', ':')), True)
elif self.__data["encryption"] == "aes256":
eresult = self.__b64encode(AES256(self.__epass).encrypt(self.__b64encode(json.dumps(self.__dictionary, separators=(',', ':')))), True)
else:
raise ValueError("Invalid encryption algorithm name")
# Compress the result
if self.__data["compression"] == "None":
cresult = eresult
elif self.__data["compression"] == "zlib":
cresult = zlib.compress(eresult)
else:
raise ValueError("Invalid compression algorithm name")
self.__data["dictionary"] = self.__b64encode(cresult)
def load(self, load_dict=True):
"""
Get the current values of the configuration file.
This method must be called first to work with the configuration file.
:param bool load_dict: If True, this method will decrypt and decode the dictionary and store it to `self.__dictionary`.
:returns void:
"""
self.__readconfig()
if load_dict:
self.__readdict()
def info(self):
"""
Return information about the configuration file.
:returns dict:
"""
if self.__data is None:
raise ValueError("The configuration file is not yet loaded!")
result = self.__data
try:
result.pop("dictionary")
except KeyError:
pass
if type(result["version"]) is not list and result["version"] is not None:
newver = result["version"].split('.')
else:
newver = result["version"]
# Convert result["version"] to a list of integers
newver2 = []
for _ in newver:
newver2.append(int(_))
result["version"] = newver2
if self.__dictionary is None:
result["loaded_dictionary"] = False
else:
result["loaded_dictionary"] = True
return result
def get(self, key):
"""
Get the value of <key>.
:param str key: The name/key of the value you are looking for.
:returns str: Returns type(str) if the <key>'s datatype is `str`.
:returns int: Returns type(int) if the <key>'s datatype is `int`.
:returns float: Returns type(float) if the <key>'s datatype is `float`.
:returns bool: Returns type(bool) if the <key>'s datatype is `bool`.
:returns list: Returns type(list) if the <key>'s datatype is `arr`.
:returns bytes: Returns type(bytes) if the <key>'s datatype is `bin`.
"""
if self.__dictionary is None or self.__data is None:
raise ValueError("The configuration file is not yet loaded!")
elif type(self.__dictionary) is dict:
value = self.__dictionary[key]
if value[0] == "str":
value = str(value[1])
elif value[0] == "int":
value = int(value[1])
elif value[0] == "float":
value = float(value[1])
elif value[0] == "bool":
if int(value[1]) == 0:
value = False
elif int(value[1]) == 1:
value = True
else:
raise ValueError("Unknown boolean state")
elif value[0] == "arr":
newvalue = value[2]
valuearrdatatype = value[1]
value = []
for _ in newvalue:
if valuearrdatatype == "str":
value.append(str(_))
elif valuearrdatatype == "int":
value.append(int(_))
elif valuearrdatatype == "float":
value.append(float(_))
elif valuearrdatatype == "bool":
if int(_) == 0:
value.append(False)
elif int(_) == 1:
value.append(True)
else:
raise ValueError("Unknown boolean state")
elif valuearrdatatype == "bin":
value.append(self.__b64decode(_))
else:
raise ValueError("Invalid data type")
elif value[0] == "bin":
value = self.__b64decode(value[1])
else:
raise ValueError("Invalid data type")
return value
else:
raise ValueError("Invalid dictionary")
def add(self, key, valuetype, value, array_datatype=None):
"""
Add a new variable.
:param str key: The variable name/key.
:param str valuetype: The data type of the variable.
Available valuetypes:
- str
- int
- float
- bool
- arr (list)
- bin (binary)
:param str value: The value of <key>. (If <valuetype> is `str`)
:param int value: The value of <key>. (If <valuetype> is `int`)
:param float value: The value of <key>. (If <valuetype> is `float`)
:param bool value: The value of <key>. (If <valuetype> is `bool`)
:param tuple value: The value of <key>. (If <valuetype> is `arr`)
:param list value: The value of <key>. (If <valuetype> is `arr`)
:param bytes value: The value of <key>. (If <valuetype> is `bin`)
:param str array_datatype: [OPTIONAL; If your valuetype is `arr`, this is required] The data type of the array objects (`arr` not supported)
:returns void:
"""
# <variable_name>|<datatype>|<value>
# <variable_name>|<datatype>|<array_datatype>|<values>
if type(key) is not str:
raise TypeError("key is not a string")
if self.__dictionary is None or self.__data is None:
raise ValueError("The configuration file is not yet loaded!")
if self.__dictionary.get(key, None) is None:
# Add to the dictionary
if valuetype in self.datatypes:
if valuetype == "arr":
if array_datatype is None:
raise ValueError("array_datatype is required to create an array!")
keyvalue = [valuetype, array_datatype, []]
# Check the list
if type(value) in self.datatypes_conversion["arr"]:
for _ in value:
if keyvalue[1] == "str":
keyvalue[2].append(str(_))
elif keyvalue[1] == "int":
keyvalue[2].append(int(_))
elif keyvalue[1] == "float":
keyvalue[2].append(float(_))
elif keyvalue[1] == "bool":
if _ == True:
keyvalue[2].append(1)
elif _ == False:
keyvalue[2].append(0)
else:
raise ValueError("Unknown boolean state")
elif keyvalue[1] == "bin":
if type(_) in self.datatypes_conversion["bin"]:
keyvalue[2].append(self.__b64encode(_, True).decode(self.encoding))
else:
raise("array object is not in bytes data type")
else:
raise ValueError("Unsupported array datatype")
self.__dictionary[key] = keyvalue
else:
raise TypeError("value must be a tuple or list when creating an array.")
else:
if valuetype == "str":
self.__dictionary[key] = [valuetype, str(value)]
elif valuetype == "int":
self.__dictionary[key] = [valuetype, int(value)]
elif valuetype == "float":
self.__dictionary[key] = [valuetype, float(value)]
elif valuetype == "bool":
if value == True:
self.__dictionary[key] = [valuetype, 1]
elif value == False:
self.__dictionary[key] = [valuetype, 0]
else:
raise ValueError("Unknown boolean state")
elif valuetype == "bin":
if type(value) in self.datatypes_conversion["bin"]:
self.__dictionary[key] = [valuetype, self.__b64encode(value, True).decode(self.encoding)]
else:
raise ValueError("value is not in bytes data type")
else:
raise ValueError("Unsupported data type")
else:
raise ValueError("Unsupported data type")
else:
ValueError("A value is already assigned to the key. Use update() instead.")
def update(self, key, value):
"""
Update an existing variable.
:param str key: The | |
"""
lakeFS API
lakeFS HTTP API # noqa: E501
The version of the OpenAPI document: 0.1.0
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from lakefs_client.api_client import ApiClient, Endpoint as _Endpoint
from lakefs_client.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from lakefs_client.model.branch_creation import BranchCreation
from lakefs_client.model.diff_list import DiffList
from lakefs_client.model.error import Error
from lakefs_client.model.ref import Ref
from lakefs_client.model.ref_list import RefList
from lakefs_client.model.reset_creation import ResetCreation
from lakefs_client.model.revert_creation import RevertCreation
class BranchesApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __create_branch(
self,
repository,
branch_creation,
**kwargs
):
"""create branch # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_branch(repository, branch_creation, async_req=True)
>>> result = thread.get()
Args:
repository (str):
branch_creation (BranchCreation):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
str
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['repository'] = \
repository
kwargs['branch_creation'] = \
branch_creation
return self.call_with_http_info(**kwargs)
self.create_branch = _Endpoint(
settings={
'response_type': (str,),
'auth': [
'basic_auth',
'cookie_auth',
'jwt_token'
],
'endpoint_path': '/repositories/{repository}/branches',
'operation_id': 'create_branch',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'repository',
'branch_creation',
],
'required': [
'repository',
'branch_creation',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'repository':
(str,),
'branch_creation':
(BranchCreation,),
},
'attribute_map': {
'repository': 'repository',
},
'location_map': {
'repository': 'path',
'branch_creation': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'text/html',
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__create_branch
)
def __delete_branch(
self,
repository,
branch,
**kwargs
):
"""delete branch # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_branch(repository, branch, async_req=True)
>>> result = thread.get()
Args:
repository (str):
branch (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['repository'] = \
repository
kwargs['branch'] = \
branch
return self.call_with_http_info(**kwargs)
self.delete_branch = _Endpoint(
settings={
'response_type': None,
'auth': [
'basic_auth',
'cookie_auth',
'jwt_token'
],
'endpoint_path': '/repositories/{repository}/branches/{branch}',
'operation_id': 'delete_branch',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'repository',
'branch',
],
'required': [
'repository',
'branch',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'repository':
(str,),
'branch':
(str,),
},
'attribute_map': {
'repository': 'repository',
'branch': 'branch',
},
'location_map': {
'repository': 'path',
'branch': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__delete_branch
)
def __diff_branch(
self,
repository,
branch,
**kwargs
):
"""diff branch # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.diff_branch(repository, branch, async_req=True)
>>> result = thread.get()
Args:
repository (str):
branch (str):
Keyword Args:
after (str): return items after this value. [optional]
amount (int): how many items to return. [optional] if omitted the server will use the default value of 100
prefix (str): return items prefixed with this value. [optional]
delimiter (str): delimiter used to group common prefixes by. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
DiffList
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['repository'] = \
repository
kwargs['branch'] = \
branch
return self.call_with_http_info(**kwargs)
self.diff_branch = _Endpoint(
settings={
'response_type': (DiffList,),
'auth': [
'basic_auth',
'cookie_auth',
'jwt_token'
],
'endpoint_path': '/repositories/{repository}/branches/{branch}/diff',
'operation_id': 'diff_branch',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'repository',
'branch',
'after',
'amount',
'prefix',
'delimiter',
],
'required': [
'repository',
'branch',
],
'nullable': [
],
'enum': [
],
'validation': [
'amount',
]
},
root_map={
'validations': {
('amount',): {
'inclusive_maximum': 1000,
'inclusive_minimum': -1,
},
},
'allowed_values': {
},
'openapi_types': {
'repository':
(str,),
'branch':
(str,),
'after':
(str,),
'amount':
(int,),
'prefix':
(str,),
'delimiter':
(str,),
},
'attribute_map': {
'repository': 'repository',
'branch': 'branch',
'after': 'after',
'amount': 'amount',
'prefix': 'prefix',
'delimiter': 'delimiter',
},
'location_map': {
'repository': 'path',
'branch': 'path',
'after': 'query',
'amount': 'query',
'prefix': 'query',
'delimiter': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__diff_branch
)
def __get_branch(
self,
repository,
branch,
**kwargs
):
"""get branch # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_branch(repository, branch, async_req=True)
>>> result = thread.get()
Args:
repository (str):
branch (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is | |
<reponame>eberharf/cfl
import pickle
import json
import os
import numpy as np
from cfl.dataset import Dataset
from cfl.block import Block
import cfl.density_estimation_methods as cdem
import cfl.cluster_methods as ccm
from cfl import intervention_rec
'''
Methods in Experiment Class:
train()
predict()
__save_results()
__save_params()
__load_params()
add_dataset()
get_dataset()
load_dataset_results()
__build_block()
__make_exp_dir()
__propagate_verbosity()
'''
# TODO: this is a placeholder until we have a block registration system.
# NOTE: the keys of this dictionary are passed as part of the 'block_names'
# list. They are different than the names of the attributes in each block's
# self.name attribute
BLOCK_KEY = { 'CDE' : <KEY>,
'CauseClusterer' : ccm.CauseClusterer,
'EffectClusterer' : ccm.EffectClusterer} #TODO: maybe change this so that instead of
# calling clusterer, 'Kmeans', 'DBSCAN' and 'SNN' are registered as cluster methods
class Experiment():
'''The Experiment class:
- Creates a pipeline to pass data through the different Blocks of CFL
- Save parameters, models, results of the pipeline for reuse
'''
def __init__(self, data_info, X_train, Y_train, X_train_raw=None,
Y_train_raw=None, past_exp_path=None, block_names=None,
block_params=None, blocks=None, verbose=1, results_path=''):
'''
Sets up and trains an Experiment.
Arguments:
X_train (np.array) : an (n_samples, n_x_features) 2D array.
Y_train (np.array) : an (n_samples, n_y_features) 2D array.
data_info (dict) : a dictionary of information about this Experiment's
associated data. Refer to
cfl.block.validate_data_info() for
more information.
past_exp_path (str) : path to directory associated with a previously
trained Experiment.
block_names (list of strs) : list of block names to use (i.e. `['CondExpVB', 'KMeans']`).
Full list of names can be found here: <TODO>.
block_params (list of dicts) : list of dicts specifying parameters for each block specified
in block_names. Default is None.
blocks (list of Blocks): list of block objects. Default is None.
verbose (int): Amount of output to print. Possible values are 0, 1, 2. Default is 1.
results_path (str) : path to directory to save this experiment to.
Default is `''`.
Note: There are three ways to specify Blocks:
1) specify `past_exp_path`
2) specify both `block_names` and `block_params`
3) specify `blocks`.
Do not specify all four of these parameters.
'''
self.verbose = verbose
# OPTION 1 for Experiment initialization: load from path.
# if loading from past experiment, make sure no other block
# specifications are provided
if past_exp_path is not None:
assert (block_names is None), 'block_names should not be specified.'
assert (block_params is None), 'block_params should not be specified.'
assert (blocks is None), 'blocks should not be specified.'
# load in block names and params
print(f'Loading in Experiment from {past_exp_path}')
block_names, block_params = self.__load_params(os.path.join(past_exp_path, 'params'))
# OPTION 2 for Experiment initialization: create blocks from strings.
# make sure block names and params are both provided, and that
# blocks is left unpopulated
elif (block_names is not None) or (block_params is not None):
assert (block_names is not None), 'block_names should be specified.'
assert (block_params is not None), 'block_params should be specified.'
assert (blocks is None), 'blocks should not be specified.'
self.is_trained = False
# add verbosity to params that don't specify
# removed because was causing problems with some sklearn models that don't have a verbose param
# block_params = self.__propagate_verbosity(self.verbose, block_params)
#OPTION 3 for Experiment initialization: blocks pre-created.
# make sure that only blocks is provided.
elif blocks is not None:
assert (block_names is None), 'block_names should not be specified.'
assert (block_params is None), 'block_params should not be specified.'
for block in self.blocks:
assert isinstance(block, Block), \
'A specified block is not of type Block.'
self.is_trained = False
# make sure one of the three Experiment definitions is supplied
assert (past_exp_path is not None) or \
((block_names is not None) and (block_params is not None)) or \
(blocks is not None), 'Must provide one of the Experiment definitions.'
# build and track training dataset
# Note: explicitly stating one dataset for training as an Experiment
# attribute enforces the definition that an Experiment is a unique
# configuration of a trained CFL.
self.data_info = data_info
self.datasets = {}
self.dataset_train = self.add_dataset(X=X_train, Y=Y_train, \
dataset_name='dataset_train', \
Xraw=X_train_raw, \
Yraw=Y_train_raw)
self.datasets[self.dataset_train.get_name()] = self.dataset_train
# build experiment directory
self.save_path = self.__make_exp_dir(results_path)
# build blocks from names and params if blocks not provided (ie in Options 1
# or 2)
if blocks is None:
blocks = []
for bn,bp in zip(block_names, block_params): # data_info
blocks.append(self.__build_block(bn,bp))
# load in trained block info if past experiment provided
if past_exp_path is not None:
for block in blocks:
fn = os.path.join(past_exp_path, 'trained_blocks', block.get_name())
block.load_block(fn)
self.is_trained = True
# TODO: check that interfaces match
# TODO: assert in the function itself so we can give more info
# about what exactly is incompatible
# assert self.check_blocks_compatibility(), 'Specified blocks are incompatible'
# save configuration parameters for each block
self.blocks = blocks
self.block_names = block_names
self.block_params = block_params
self.__save_params()
def train(self, dataset=None, prev_results=None):
''' Train the CFL pipeline.
Arguments:
dataset : dataset name or object. (str or Dataset)
prev_results : dict of results to pass to first Block to be
trained, if needed. (dict)
Returns:
all_results : dict of results dicts from all Blocks. (dict dict)
'''
if not self.is_trained:
if self.verbose > 0:
print(20*'#' + ' Beginning CFL Experiment training. ' + 20*'#')
# check inputs
assert isinstance(dataset, (type(None), Dataset, str)), \
'dataset should be None, or of type Dataset or str.'
assert isinstance(prev_results, (type(None), dict)), \
'prev_results should be None or a dict'
# pull specified dataset
if dataset is None: #if you don't specify a dataset, use the one specified in initialization
dataset = self.get_dataset('dataset_train')
elif isinstance(dataset, str): #otherwise, they can pass a string specifying a particular dataset to use
if dataset != 'dataset_train':
if self.verbose > 0:
print('Warning: you are not using the dataset_train ' + \
'Dataset specified in Experiment initialization for ' + \
'training the CFL pipeline.')
dataset = self.get_dataset(dataset)
else:
if self.verbose > 0:
print('Warning: by specifying your own Dataset for ' + \
'training, you may not be using the same data as ' + \
'specified for training in Experiment initialization.')
all_results = {}
# this is the main logic - train each block
for block in self.blocks:
# train current block
if self.verbose > 0:
print(f'Beginning {block.get_name()} training...')
results = block.train(dataset, prev_results)
all_results[block.get_name()] = results
# save results
self.__save_results(results, dataset, block)
# save trained block
fn = os.path.join(self.save_path, 'trained_blocks', block.get_name())
block.save_block(fn)
# pass results on to next block
prev_results = results
if self.verbose > 0:
print(f'{block.get_name()} training complete.')
self.is_trained = True
dataset.set_cfl_results(all_results)
if self.verbose > 0:
print('Experiment training complete.')
return all_results
else:
raise Exception('This Experiment has already been trained. ' + \
'If you would like to use a new Dataset for training, ' + \
'please create a new Experiment.')
def predict(self, dataset, prev_results=None):
''' Predict using the trained CFL pipeline.
Arguments:
dataset (str or Dataset) : dataset name or object.
prev_results (dict) : dict of results to pass to first Block to
predict with, if needed.
Returns:
(dict of dicts) : dict of results dictionaries from all Blocks.
'''
if self.verbose > 0:
print('Beginning Experiment prediction.')
# check inputs
assert isinstance(dataset, (type(None), Dataset, str)), \
'dataset should be None, or of type Dataset or str.'
assert isinstance(prev_results, (type(None), dict)), \
'prev_results should be None or a dict'
# pull specified dataset
if isinstance(dataset, str):
dataset = self.get_dataset(dataset)
for bi,block in enumerate(self.blocks):
assert block.is_trained, 'Block {} has not been trained yet.'.format(bi)
all_results = {}
for block in self.blocks:
# predict with current block
if self.verbose > 0:
print(f'Beginning {block.get_name()} prediction...')
results = block.predict(dataset, prev_results)
all_results[block.get_name()] = results
# save results
self.__save_results(results, dataset, block)
# pass results on to next block
prev_results = results
if self.verbose > 0:
print(f'{block.get_name()} prediction complete.')
dataset.set_cfl_results(all_results)
if self.verbose > 0:
print('Prediction complete.')
return all_results
def add_dataset(self, X, | |
the type returned by ctx.Lock()?
self.data_blocks: typing.List[SharedMemory] = []
block_id: int
for block_id in range(maxsize):
self.data_blocks.append(SharedMemory(create=True, size=self.__class__.META_BLOCK_SIZE + self.chunk_size))
self.add_free_block(block_id)
def __getstate__(self):
"""This routine retrieves queue information when forking a new process."""
return (self.qid,
self.verbose,
self.chunk_size,
self.maxsize,
dill.dumps(self.serializer),
self.integrity_check,
self.deadlock_check,
self.deadlock_immanent_check,
self.watermark_check,
self.chunk_watermark,
self.mid_counter,
self.producer_lock,
self.free_list_lock,
self.msg_list_lock,
self.use_semaphores,
self.free_list_semaphore,
self.msg_list_semaphore,
dill.dumps(self.list_heads),
self.block_locks,
dill.dumps(self.data_blocks))
def __setstate__(self, state):
"""This routine saves queue information when forking a new process."""
(self.qid,
self.verbose,
self.chunk_size,
self.maxsize,
self.serializer,
self.integrity_check,
self.deadlock_check,
self.deadlock_immanent_check,
self.watermark_check,
self.chunk_watermark,
self.mid_counter,
self.producer_lock,
self.free_list_lock,
self.msg_list_lock,
self.use_semaphores,
self.free_list_semaphore,
self.msg_list_semaphore,
self.list_heads,
self.block_locks,
self.data_blocks) = state
self.list_heads = dill.loads(self.list_heads)
self.data_blocks = dill.loads(self.data_blocks)
self.serializer = dill.loads(self.serializer)
def get_list_head_field(self, lh: int, type_: str)->int:
"""int: Get a field from a list head.
Args:
lh (int): The index of the list head in the list head shared memory.
type (str): The name of the list head field."""
addr_s: typing.Optional[int]
addr_e: typing.Optional[int]
ctype: typing.Optional[str]
addr_s, addr_e, ctype = self.__class__.LIST_HEAD_STRUCT.get(type_, (None, None, None))
if addr_s is None or addr_e is None or ctype is None:
raise ValueError("get_list_head_field: unrecognized %s" % repr(type_))
return struct.unpack(ctype, self.list_heads.buf[(self.__class__.LIST_HEAD_SIZE * lh) + addr_s : (self.__class__.LIST_HEAD_SIZE * lh) + addr_e])[0]
def set_list_head_field(self, lh: int, data: int, type_: str):
addr_s: typing.Optional[int]
addr_e: typing.Optional[int]
ctype: typing.Optional[str]
addr_s, addr_e, ctype = self.__class__.LIST_HEAD_STRUCT.get(type_, (None, None, None))
if addr_s is None or addr_e is None or ctype is None:
raise ValueError("get_list_head_field: unrecognized %s" % repr(type_))
# TODO: find a better way to calm mypy's annoyance at the following:
self.list_heads.buf[(self.__class__.LIST_HEAD_SIZE * lh) + addr_s : (self.__class__.LIST_HEAD_SIZE * lh) + addr_e] = struct.pack(ctype, data) #type: ignore
def get_meta(self, block: SharedMemory, type_: str)->typing.Union[bytes, int]:
"""typing.Union[bytes, int]: Get a field from a block's metadata area in shared memory.
Args:
block (SharedMemory): The shared memory for the data block.
type_ (str): The name of the metadata field to extract."""
addr_s: typing.Optional[int]
addr_e: typing.Optional[int]
ctype: typing.Optional[str]
addr_s, addr_e, ctype = self.__class__.META_STRUCT.get(type_, (None, None, None))
if addr_s is None or addr_e is None or ctype is None:
raise ValueError("get_meta: unrecognized %s" % repr(type_))
return struct.unpack(ctype, block.buf[addr_s : addr_e])[0]
def set_meta(self, block: SharedMemory, data, type_: str):
addr_s: typing.Optional[int]
addr_e: typing.Optional[int]
ctype: typing.Optional[str]
addr_s, addr_e, ctype = self.__class__.META_STRUCT.get(type_, (None, None, None))
if addr_s is None or addr_e is None or ctype is None:
raise ValueError("set_meta: unrecognized %s" % repr(type_))
# TODO: find a better way to calm mypy's annoyance at the following:
block.buf[addr_s : addr_e] = struct.pack(ctype, data) #type: ignore
def get_data(self, block: SharedMemory, data_size: int)->bytes:
"""bytes: Get a memoryview of the a shared memory data block.
Args:
block (SharedMemory): The chared memory block.
data_size (int): The number of bytes in the returned memoryview slice."""
return block.buf[self.__class__.META_BLOCK_SIZE:self.__class__.META_BLOCK_SIZE+data_size]
def set_data(self, block: SharedMemory, data: bytes, data_size: int):
# TODO: find a better way to calm mypy's annoyance at the following:
block.buf[self.__class__.META_BLOCK_SIZE:self.__class__.META_BLOCK_SIZE+data_size] = data # type: ignore
def init_list_head(self, lh: int):
"""Initialize a block list, clearing the block count and setting the first_block
and last_block fields to the reserved value that indicates that they are
void pointers.
Args:
lh (int): The index of the list head in the list head shared memory area."""
self.set_list_head_field(lh, 0, 'block_count')
self.set_list_head_field(lh, self.__class__.RESERVED_BLOCK_ID, 'first_block')
self.set_list_head_field(lh, self.__class__.RESERVED_BLOCK_ID, 'last_block')
def get_block_count(self, lh: int)->int:
"""int: Get the count of blocks queued in a block list.
Args:
lh (int): The index of the list head in the list head shared memory area.
"""
return self.get_list_head_field(lh, 'block_count')
def get_first_block(self, lh: int)->typing.Optional[int]:
"""Get the first block on a block list, updating the list head fields.
Args:
lh (int): The index of the list head in the list head shared memory area.
Returns:
None: No block is available
int: The block_id of the first available block.
"""
block_count: int = self.get_block_count(lh)
if block_count == 0:
return None
block_id: int = self.get_list_head_field(lh, 'first_block')
block_count -= 1
if block_count == 0:
self.init_list_head(lh)
else:
with self.block_locks[block_id]:
maybe_next_block_id: typing.Union[bytes, int] = self.get_meta(self.data_blocks[block_id], 'next_block_id')
if isinstance(maybe_next_block_id, int):
next_block_id: int = maybe_next_block_id
else:
raise ValueError("get_first_block internal error: next_block_id is not int.")
self.set_list_head_field(lh, next_block_id, 'first_block')
self.set_list_head_field(lh, block_count, 'block_count')
return block_id
def add_block(self, lh: int, block_id: int):
"""Add a block to a block list.
Args:
lh (int): The index of the list head in the list head shared memory area.
"""
block_count: int = self.get_list_head_field(lh, 'block_count')
if block_count == 0:
self.set_list_head_field(lh, block_id, 'first_block')
self.set_list_head_field(lh, block_id, 'last_block')
self.set_list_head_field(lh, 1, 'block_count')
else:
last_block: int = self.get_list_head_field(lh, 'last_block')
with self.block_locks[last_block]:
self.set_meta(self.data_blocks[last_block], block_id, 'next_block_id')
self.set_list_head_field(lh, block_id, 'last_block')
self.set_list_head_field(lh, block_count + 1, 'block_count')
def get_free_block_count(self)->int:
"""int: Get the number of free blocks."""
with self.free_list_lock:
return self.get_block_count(self.__class__.FREE_LIST_HEAD)
def get_first_free_block(self, block: bool, timeout: typing.Optional[float])->typing.Optional[int]:
"""Get the first free block.
When using semaphores, optionally block with an optional timeout. If
you choose to block without a timeout, the method will not return until
a free block is available.
Args:
block (bool): When True, and when using semaphores, wait until an
free block is available or a timeout occurs.
timeout (typing.Optional[float]): When block is True and timeout is
positive, block for at most timeout seconds attempting to acquire
the free block.
Returns:
None: No block is available
int: The block_id of the first available block.
"""
if self.free_list_semaphore is not None:
self.free_list_semaphore.acquire(block=block, timeout=timeout)
with self.free_list_lock:
return self.get_first_block(self.__class__.FREE_LIST_HEAD)
def add_free_block(self, block_id: int):
"""Return a block to the free block list.
Args:
block_id (int): The identifier of the block being returned.
"""
with self.free_list_lock:
self.add_block(self.__class__.FREE_LIST_HEAD, block_id)
if self.free_list_semaphore is not None:
self.free_list_semaphore.release()
def get_msg_count(self)->int:
"""int: Get the number of messages on the message list."""
with self.msg_list_lock:
return self.get_block_count(self.__class__.MSG_LIST_HEAD)
def get_first_msg(self, block: bool, timeout: typing.Optional[float])->typing.Optional[int]:
"""Take the first available message, if any, from the available message list.
When using semaphores, optionally block with an optional timeout. If
you choose to block without a timeout, the method will not return until
a free block is available.
Args:
block (bool): When True, and when using semaphores, wait until an
message is available or a timeout occurs.
timeout (typing.Optional[float]): When block is True and timeout is
positive, block for at most timeout seconds attempting to acquire
the message.
Returns:
None: No message is available
int: The block_id of the first chunk of the first available message.
"""
if self.msg_list_semaphore is not None:
self.msg_list_semaphore.acquire(block=block, timeout=timeout)
with self.msg_list_lock:
return self.get_first_block(self.__class__.MSG_LIST_HEAD)
def add_msg(self, block_id: int):
"""Add a message to the available message list
Args:
block_id (int): The block identifier of the first chunk of the message.
"""
with self.msg_list_lock:
self.add_block(self.__class__.MSG_LIST_HEAD, block_id)
if self.msg_list_semaphore is not None:
self.msg_list_semaphore.release()
def generate_msg_id(self)->bytes:
"""bytes: Generate the next message identifier, but do not consume it.
Note:
Message IDs are assigned independenyly by each process using the queue.
They need to be paired with the source process ID to be used to identify
a message for debugging.
"""
return ("%012x" % (self.mid_counter + 1)).encode('utf-8')
def consume_msg_id(self):
"""Consume a message identifier.
Note:
Message identifiers are consumed when we are certain that we can process
the message. They will not be consumed if we start to process a message
but fail due to a conition such as insufficient free buffers.
"""
self.mid_counter += 1
def next_writable_block_id(self, block: bool, timeout: typing.Optional[float], msg_id: bytes, src_pid: int)->int:
"""int: Get the block ID of the first free block.
Get the block ID of the first free block, supporting
blocking/nonblocking modes and timeouts when blocking, even when
semaphores are not being used. Store int he block's metadata area the
message ID for the message we are building and the pid of the process
acquiring the block.
Args:
block (bool): When True, and when using semaphores, wait until an
free block is available or a timeout occurs.
timeout (typing.Optional[float]): When block is True and timeout is
positive, block for at most timeout seconds attempting to acquire
the free block.
msg_id (bytes): The message ID assigned to the message being built.
src_pid: The process ID (pid) of the process that is acquiring the block.
Raises:
queue.Full: No block is available. Full is raised immediately in nonblocking
mode, | |
4], dtype=np.int32)
png_bytes = raster.to_png(transparent=True, thumbnail_size=512)
w = recwarn.pop(GeoRaster2Warning)
assert str(w.message) == "Limiting %d bands raster to first three bands to generate png" % raster.num_bands
w = recwarn.pop(GeoRaster2Warning)
assert str(w.message) == "downscaling dtype to 'uint8' to convert to png"
img = Image.frombytes('RGBA', (raster.width, raster.height), png_bytes)
expected_image_size = raster.limit_to_bands([1, 2]).astype(np.uint8).to_pillow_image().size
assert img.size == expected_image_size
def test_to_raster_to_world():
pt = Point(1, 1)
in_world = some_raster.to_world(pt)
assert pytest.approx(in_world.get_shape(in_world.crs).x) == raster_origin.x + pt.x
assert pytest.approx(in_world.get_shape(in_world.crs).y) == raster_origin.y + pt.y
back_in_image = some_raster.to_raster(in_world)
assert back_in_image.equals_exact(pt, tolerance=5e-07)
def test_corner_invalid():
with pytest.raises(GeoRaster2Error) as error:
some_raster.corner('foo')
assert '%s' % error.value == "corner foo invalid, expected: ['ul', 'ur', 'br', 'bl']"
def test_corner():
expected_image_corners = {
'ul': Point(0, 0),
'ur': Point(some_raster.width, 0),
'bl': Point(0, some_raster.height),
'br': Point(some_raster.width, some_raster.height),
}
expected_corners = {
'ul': Point(raster_origin.x + 0, raster_origin.y + 0),
'ur': Point(raster_origin.x + some_raster.width, raster_origin.y + 0),
'bl': Point(raster_origin.x + 0, raster_origin.y + some_raster.height),
'br': Point(raster_origin.x + some_raster.width, raster_origin.y + some_raster.height),
}
for corner in GeoRaster2.corner_types():
assert some_raster.corner(corner).almost_equals(GeoVector(expected_corners[corner], some_raster.crs))
assert some_raster.image_corner(corner).equals_exact(expected_image_corners[corner], tolerance=5e-07)
def test_center():
ul = some_raster.corner('ul').get_shape(some_raster.crs)
br = some_raster.corner('br').get_shape(some_raster.crs)
expected_center = Point((ul.x + br.x) / 2, (ul.y + br.y) / 2)
expected_center_vector = GeoVector(expected_center, some_raster.crs)
assert expected_center_vector.equals_exact(some_raster.center(), tolerance=5e-07)
def test_bounds():
expected = Polygon([[0, 0],
[some_raster.width, 0],
[some_raster.width, some_raster.height],
[0, some_raster.height]])
assert some_raster.bounds().equals_exact(expected, tolerance=5e-07)
def test_footprint():
expected_shp = Polygon([[raster_origin.x + 0, raster_origin.y + 0],
[raster_origin.x + some_raster.width, raster_origin.y + 0],
[raster_origin.x + some_raster.width, raster_origin.y + some_raster.height],
[raster_origin.x + 0, raster_origin.y + some_raster.height]])
expected = GeoVector(expected_shp, some_raster.crs)
assert some_raster.footprint().almost_equals(expected)
def test_area():
scale = 2
raster = some_raster.copy_with(affine=some_raster.affine * Affine.scale(scale))
expected = raster.width * raster.height * (scale ** 2)
assert pytest.approx(expected, .01) == raster.area()
def test_reduce():
for op in ['min', 'max', 'sum', 'mean', 'var', 'std']:
expected = getattr(np, op)([0, 1, 2, 3, 4])
actual = getattr(some_raster, op)()[0]
assert expected == actual
def test_histogram():
hist = some_raster.histogram()
assert np.count_nonzero(hist['r']) == 5
assert hist.length == 256
def test_invert():
assert ((~some_raster).image.mask != some_raster.image.mask).all()
assert ~~some_raster == some_raster
def test_mask():
image = np.ma.masked_array(np.random.uniform(size=(3, 4, 5)).astype(np.uint8), np.full((3, 4, 5), False))
raster = some_raster.deepcopy_with(image=image, band_names=['r', 'g', 'b'])
w, h = raster.width // 2, raster.height // 2
left_quadrant = Polygon([[0, 0], [0, h], [w, h], [w, 0]])
vector = raster.to_world(left_quadrant, dst_crs=raster.crs)
masked = raster.mask(vector)
assert not masked.image[:, :w, :h].mask.any()
assert masked.image[:, w:, h:].mask.all()
# masking by GeoFeature
feature = GeoFeature(vector, properties={'prop1': 1, 'prop2': 2})
masked = raster.mask(feature)
assert not masked.image[:, :w, :h].mask.any()
assert masked.image[:, w:, h:].mask.all()
# raster masked "inside shape" should be inverse to raster masked "outside shape":
assert raster.mask(vector, mask_shape_nodata=True) == ~raster.mask(vector)
def test_mask_by_value():
expected_mask = np.array([
[[False, True, False], [False, False, False]],
[[False, True, False], [False, False, False]],
[[False, True, False], [False, False, False]]], dtype=bool)
assert np.array_equal(expected_mask, some_raster_multiband.mask_by_value(1).image.mask)
def test_get_item():
raster = some_raster.resize(2)
assert raster[:, :] == raster
assert (raster[1:-1, :].width, raster[1:-1, :].height) == (raster.width - 2, raster.height)
assert (raster[:, 1:-1].width, raster[:, 1:-1].height) == (raster.width, raster.height - 2)
assert raster[1:, 1:].corner('br').almost_equals(raster.corner('br'))
with pytest.raises(GeoRaster2Error):
raster[1:-1]
def test_slicing_negative_bounds_raises_warning_and_cutsoff_by_zero(recwarn):
negative_slice = some_raster[-1:, -1:]
w = recwarn.pop(GeoRaster2Warning)
assert str(w.message) == "Negative indices are not supported and were rounded to zero"
assert negative_slice == some_raster
def test_resolution():
raster = some_raster.deepcopy_with(affine=Affine.scale(2, 3))
assert raster.resolution() == np.sqrt(2 * 3)
def test_empty_from_roi():
roi = GeoVector(
Polygon.from_bounds(12.36, 42.05, 12.43, 42.10),
WGS84_CRS
).reproject(WEB_MERCATOR_CRS)
resolution = 20.0
band_names = ["a", "b", "c"]
some_dtype = np.uint16
empty = GeoRaster2.empty_from_roi(roi, resolution, band_names, some_dtype)
# Cannot compare approximate equality of polygons because the
# topology might be different, see https://github.com/Toblerity/Shapely/issues/535
# (Getting the envelope seems to fix the problem)
# Also, reprojecting the empty footprint to WGS84 permits using
# a positive number of decimals (the relative error is of course
# the same)
assert empty.footprint().reproject(WGS84_CRS).envelope.almost_equals(roi.envelope, decimal=3)
assert empty.resolution() == resolution
assert empty.crs == roi.crs
assert empty.band_names == band_names
assert empty.dtype == some_dtype
assert empty.affine.determinant == -1 * resolution * resolution
def test_georaster_contains_geometry():
roi = GeoVector(
Polygon.from_bounds(12.36, 42.05, 12.43, 42.10),
WGS84_CRS
).reproject(WEB_MERCATOR_CRS)
resolution = 20.0
empty = GeoRaster2.empty_from_roi(roi, resolution)
assert roi in empty
assert roi.buffer(-1) in empty
assert roi.buffer(1) not in empty
def test_empty_from_roi_respects_footprint():
# See https://github.com/satellogic/telluric/issues/39
raster = GeoRaster2.open("tests/data/raster/overlap1.tif")
empty = GeoRaster2.empty_from_roi(shape=raster.shape[1:][::-1], ul_corner=(v[0] for v in raster.corner('ul').xy),
resolution=raster.res_xy(), crs=raster.crs,
band_names=raster.band_names, dtype=raster.dtype)
empty_simple = GeoRaster2.empty_from_roi(roi=raster.footprint(),
resolution=raster.res_xy(),
band_names=raster.band_names, dtype=raster.dtype)
assert raster.footprint().almost_equals(empty.footprint())
assert raster.footprint().almost_equals(empty_simple.footprint())
def test_astype_uint8_to_uint8_conversion():
raster_uint8 = make_test_raster(value=42, band_names=[1, 2], dtype=np.uint8)
raster_uint8_copy = raster_uint8.astype(np.uint8)
assert raster_uint8 == raster_uint8_copy
def test_astype_uint8_to_uint16_conversion():
raster_uint8 = make_test_raster(value=42, band_names=[1, 2], dtype=np.uint8)
raster_uint16 = raster_uint8.astype(np.uint16)
expected_raster_uint16 = make_test_raster(257 * 42, band_names=[1, 2], dtype=np.uint16)
assert raster_uint16 == expected_raster_uint16
def test_astype_uint16_to_uint8_conversion():
expected_raster_uint8 = make_test_raster(value=20, band_names=[1, 2], dtype=np.uint8)
raster_uint16 = make_test_raster(128 * 42, band_names=[1, 2], dtype=np.uint16)
raster_uint8 = raster_uint16.astype(np.uint8)
assert raster_uint8 == expected_raster_uint8
def test_astype_uint8_to_uint8_roundtrip():
raster_uint8 = make_test_raster(value=20, band_names=[1, 2], dtype=np.uint8)
assert raster_uint8 == raster_uint8.astype(np.uint16).astype(np.uint8)
def test_astype_uint8_to_int32_conversion():
raster_uint8 = make_test_raster(42, band_names=[1, 2], dtype=np.uint8)
raster_int32 = raster_uint8.astype(np.int32)
expected_value = (2**32 - 1) / (2**8 - 1) * (42 - 127.5) - 1
expected_raster_int32 = make_test_raster(expected_value, band_names=[1, 2], dtype=np.int32)
assert raster_int32 == expected_raster_int32
def test_astype_raises_error_for_missing_ranges():
raster = make_test_raster(value=20, band_names=[1, 2], dtype=np.uint8)
with pytest.raises(GeoRaster2Error) as excinfo:
raster.astype(np.uint8, in_range=None, out_range='dtype')
assert "Both ranges should be specified or none of them." in excinfo.exconly()
with pytest.raises(GeoRaster2Error) as excinfo:
raster.astype(np.uint8, in_range='dtype', out_range=None)
assert "Both ranges should be specified or none of them." in excinfo.exconly()
def test_astype_raises_error_for_dtype_out_range_for_non_integer():
raster = make_test_raster(value=20, band_names=[1, 2], dtype=np.uint8)
with pytest.raises(GeoRaster2Error) as excinfo:
raster.astype(np.float32, out_range='dtype')
assert "Value 'dtype' of out_range parameter is supported only for integer type." in excinfo.exconly()
def test_astype_float32_to_uint8_conversion():
with pytest.warns(GeoRaster2Warning):
raster_uint8 = some_float32_raster.astype(np.uint8)
expected_raster_uint8 = GeoRaster2(image=np.array([
[[0, 51], [102, 153]],
[[178, 204], [229, 255]]], dtype=np.uint8),
affine=some_float32_raster.affine, crs=some_float32_raster.crs, nodata=None)
assert raster_uint8 == expected_raster_uint8
def test_astype_float32_to_uint8_conversion_with_out_range():
with pytest.warns(GeoRaster2Warning):
raster_uint8 = some_float32_raster.astype(np.uint8, out_range=(100, 200))
expected_raster_uint8 = GeoRaster2(image=np.array([
[[100, 120], [140, 160]],
[[169, 180], [189, 200]]], dtype=np.uint8),
affine=some_float32_raster.affine, crs=some_float32_raster.crs, nodata=None)
assert raster_uint8 == expected_raster_uint8
def test_astype_float32_to_uint8_conversion_with_in_range():
raster_uint8 = some_float32_raster.astype(np.uint8, in_range=(0.5, 1.0))
expected_raster_uint8 = GeoRaster2(image=np.array([
[[0, 0], [0, 51]],
[[101, 153], [203, 255]]], dtype=np.uint8),
affine=some_float32_raster.affine, crs=some_float32_raster.crs, nodata=None)
assert raster_uint8 == expected_raster_uint8
def test_astype_float32_to_uint8_conversion_without_stretching():
raster_float32 = make_test_raster(10.0, band_names=[1, 2], dtype=np.float32)
raster_uint8 = raster_float32.astype(np.uint8, in_range=None, out_range=None)
expected_raster_uint8 = make_test_raster(10, band_names=[1, 2], dtype=np.uint8)
assert raster_uint8 == expected_raster_uint8
def test_astype_float32_to_int8_conversion():
with pytest.warns(GeoRaster2Warning):
raster_uint8 = some_float32_raster.astype(np.int8)
expected_raster_uint8 = GeoRaster2(image=np.array([
[[-128, -76], [-25, 25]],
[[50, 76], [101, 127]]], dtype=np.int8),
affine=some_float32_raster.affine, crs=some_float32_raster.crs, nodata=None)
assert raster_uint8 == expected_raster_uint8
def test_astype_float32_to_int8_conversion_with_clip_negative():
with pytest.warns(GeoRaster2Warning):
raster_uint8 = some_float32_raster.astype(np.int8, clip_negative=True)
expected_raster_uint8 = GeoRaster2(image=np.array([
[[0, 25], [50, 76]],
[[88, 101], [114, 127]]], dtype=np.int8),
affine=some_float32_raster.affine, crs=some_float32_raster.crs, nodata=None)
assert raster_uint8 == expected_raster_uint8
def test_astype_float32_to_float16_conversion():
with pytest.warns(GeoRaster2Warning):
raster_float16 = some_float32_raster.astype(np.float16, out_range=(0.0, 10.0))
expected_raster_float16 = GeoRaster2(image=np.array([
[[0.0, 2.0], [4.0, 6.0]],
[[7.0, 8.0], [9.0, 10.0]]], dtype=np.float16),
affine=some_float32_raster.affine, crs=some_float32_raster.crs, nodata=None)
assert raster_float16 == expected_raster_float16
def test_astype_float32_to_float16_conversion_without_stretching():
raster_float16 = some_float32_raster.astype(np.float16, in_range=None, out_range=None)
expected_raster_float16 = GeoRaster2(
some_float32_array.astype(np.float16),
affine=some_float32_raster.affine, crs=some_float32_raster.crs, nodata=None)
assert raster_float16 == expected_raster_float16
def test_astype_uint8_to_float32_conversion():
raster_float32 = some_raster.astype(np.float32, out_range=(0, 1))
expected_raster_float32 = GeoRaster2(image=np.ma.array(
np.array([
[0.0, 0.003921568859368563, 0.007843137718737125],
[0.0117647061124444, 0.01568627543747425, 1.0]
], dtype=np.float32), mask=some_raster.image.mask),
affine=some_float32_raster.affine, crs=some_float32_raster.crs, nodata=None)
assert raster_float32 == expected_raster_float32
def test_astype_uint8_to_float32_conversion_with_image_in_range():
raster_float32 = some_raster.astype(np.float32, in_range='image', out_range=(0, 1))
expected_raster_float32 = GeoRaster2(image=np.ma.array(
np.array([
[0.0, 0.25, 0.5],
[0.75, 1.0, 1.0]
], dtype=np.float32), mask=some_raster.image.mask),
affine=some_float32_raster.affine, crs=some_float32_raster.crs, nodata=None)
assert raster_float32 == expected_raster_float32
def test_astype_uint8_to_float32_conversion_with_custom_in_range():
raster_float32 = some_raster.astype(np.float32, in_range=('min', 'max'), out_range=(0, 1))
expected_raster_float32 = GeoRaster2(image=np.ma.array(
np.array([
[0.0, 0.25, 0.5],
[0.75, 1.0, 1.0]
], dtype=np.float32), mask=some_raster.image.mask),
affine=some_float32_raster.affine, crs=some_float32_raster.crs, nodata=None)
assert raster_float32 == expected_raster_float32
raster_float32 = some_raster.astype(np.float32, in_range=(2, 4), out_range=(0, 1))
expected_raster_float32 = GeoRaster2(image=np.ma.array(
np.array([
[0.0, 0.0, 0.0],
[0.5, 1.0, 1.0]
], dtype=np.float32), mask=some_raster.image.mask),
affine=some_float32_raster.affine, crs=some_float32_raster.crs, nodata=None)
assert raster_float32 == expected_raster_float32
def test_astype_uint8_to_float32_conversion_without_stretching():
raster_uint8 = make_test_raster(value=20, band_names=[1, 2], dtype=np.uint8)
raster_float32 = raster_uint8.astype(np.float32, in_range=None, out_range=None)
expected_raster_float32 = make_test_raster(value=20, band_names=[1, 2], dtype=np.float32)
assert raster_float32 == expected_raster_float32
def test_png_thumbnail_has_expected_properties():
raster = GeoRaster2.open("tests/data/raster/rgb.tif")
expected_thumbnail = raster.resize(dest_width=512, resampling=Resampling.nearest)
result_thumbnail = GeoRaster2.from_bytes(
raster.to_png(transparent=True, thumbnail_size=512, resampling=Resampling.nearest, in_range='image'),
affine=expected_thumbnail.affine, crs=expected_thumbnail.crs, band_names=expected_thumbnail.band_names
)
assert result_thumbnail == expected_thumbnail
def test_destructor():
with NamedTemporaryFile(suffix='.tif', delete=False) as src:
raster = GeoRaster2(filename=src.name, temporary=True)
raster = some_raster
assert not os.path.isfile(src.name)
def test_save_temporary():
with NamedTemporaryFile(suffix='.tif', delete=False) as src, NamedTemporaryFile(suffix='.tif') as dst:
# create valid raster file
some_raster.save(src.name)
raster = GeoRaster2(filename=src.name, temporary=True)
assert raster._filename == src.name
assert raster._temporary
raster.save(dst.name)
assert not raster._temporary
assert raster._filename is None
# temporary file is removed
assert not os.path.isfile(src.name)
def test_save_uses_copy():
band_names = ["b1", "b2", "b3"]
raster = GeoRaster2.open("tests/data/raster/rgb.tif", band_names=band_names)
with NamedTemporaryFile(suffix='.tif') as target:
factors = [2, 4]
result = raster.save(target.name, overviews=True, factors=factors)
assert result.band_names == band_names
assert result.overviews_factors == factors
def test_reproject_lazy():
raster = GeoRaster2.open("tests/data/raster/rgb.tif")
reprojected = raster.reproject(dst_crs=WGS84_CRS)
assert reprojected._image is None
assert reprojected._filename is not None
assert | |
CMap()
>>> m[5] = 1
>>> m[8] = 4
>>> i = m.__iter__()
>>> print int(i.next())
5
>>> print int(i.next())
8
>>> print int(i.prev())
5
We are still left with the odd behavior that an
iterator cannot be dereferenced until after the first next().
Ex edge cases:
>>> from CMap import CMap
>>> m = CMap()
>>> i = m.__iter__()
>>> try:
... i.prev()
... except StopIteration:
... print 'StopIteration'
...
StopIteration
>>> m[5]='a'
>>> i = m.iterkeys()
>>> int(i.next())
5
>>> try: i.next()
... except StopIteration: print 'StopIteration'
...
StopIteration
>>> int(i.prev())
5
>>> try: int(i.prev())
... except StopIteration: print 'StopIteration'
...
StopIteration
>>> int(i.next())
5
"""
self._next()
return self.key()
def prev(self):
"""Returns the previous key in the map.
See next() for more detail and examples.
"""
self._prev()
return self.key()
class ValueIterator(_AbstractIterator):
def next(self):
"""@return: next value in the map.
>>> from CMap import *
>>> m = CMap()
>>> m[5] = 10
>>> m[6] = 3
>>> i = m.itervalues()
>>> int(i.next())
10
>>> int(i.next())
3
"""
self._next()
return self.value()
def prev(self):
self._prev()
return self.value()
class ItemIterator(_AbstractIterator):
def next(self):
"""@return: next item in the map's key ordering.
>>> from CMap import CMap
>>> m = CMap()
>>> m[5] = 10
>>> m[6] = 3
>>> i = m.iteritems()
>>> k,v = i.next()
>>> int(k)
5
>>> int(v)
10
>>> k,v = i.next()
>>> int(k)
6
>>> int(v)
3
"""
self._next()
return self.key(), self.value()
def prev(self):
self._prev()
return self.key(), self.value()
def __init__(self, d={} ):
"""Instantiate RBTree containing values from passed dict and
ordered based on cmp.
>>> m = CMap()
>>> len(m)
0
>>> m[5]=2
>>> len(m)
1
>>> print m[5]
2
"""
#self._index = {} # to speed up searches.
self._smap = map_constructor() # C++ map wrapped by swig.
for key, value in d.items():
self[key]=value
self._iterators = WeakKeyDictionary()
# whenever node is deleted. search iterators
# for any iterator that becomes invalid.
def __contains__(self,x):
return self.get(x) != None
def __iter__(self):
"""@return: KeyIterator positioned one before the beginning of the
key ordering so that the first next() returns the first key."""
return CMap.KeyIterator(self)
def begin(self):
"""Returns an iterator pointing at first key-value pair. This
differs from iterkeys, itervalues, and iteritems which return an
iterator pointing one before the first key-value pair.
@return: key iterator to first key-value.
>>> from CMap import *
>>> m = CMap()
>>> m[5.0] = 'a'
>>> i = m.begin()
>>> int(i.key()) # raises no IndexError.
5
>>> i = m.iterkeys()
>>> try:
... i.key()
... except IndexError:
... print 'IndexError raised'
...
IndexError raised
"""
i = CMap.KeyIterator(self, map_begin(self._smap) )
return i
def end(self):
"""Returns an iterator pointing after end of key ordering.
The iterator's prev method will move to the last
key-value pair in the ordering. This in keeping with
the notion that a range is specified as [i,j) where
j is not in the range, and the range [i,j) where i==j
is an empty range.
This operation takes O(1) time.
@return: key iterator one after end.
"""
i = CMap.KeyIterator(self,None) # None means one after last node.
return i
def iterkeys(self):
return CMap.KeyIterator(self)
def itervalues(self):
return CMap.ValueIterator(self)
def iteritems(self):
return CMap.ItemIterator(self)
def __len__(self):
return map_size(self._smap)
def __str__(self):
s = "{"
first = True
for k,v in self.items():
if first:
first = False
else:
s += ", "
if type(v) == str:
s += "%s: '%s'" % (k,v)
else:
s += "%s: %s" % (k,v)
s += "}"
return s
def __repr__(self):
return self.__str__()
def __getitem__(self, key):
# IMPL 1: without _index
return map_find(self._smap,key) # raises KeyError if key not found
# IMPL 2: with _index.
#return iter_value(self._index[key])
def __setitem__(self, key, value):
"""
>>> from CMap import CMap
>>> m = CMap()
>>> m[6] = 'bar'
>>> m[6]
'bar'
>>>
"""
assert type(key) == int or type(key) == float
# IMPL 1. without _index.
map_set(self._smap,key,value)
## IMPL 2. with _index
## If using indices following allows us to perform only one search.
#i = map_insert_iter(self._smap,key,value)
#if iter_value(i) != value:
# iter_set(i,value)
#else: self._index[key] = i
## END IMPL2
def __delitem__(self, key):
"""Deletes the item with matching key from the map.
This takes O(log n + k) where n is the number of elements
in the map and k is the number of iterators pointing into the map.
Before deleting the item it linearly searches through
all iterators pointing into the map and invalidates any that
are pointing at the item about to be deleted.
>>> from CMap import CMap
>>> m = CMap()
>>> m[12] = 'foo'
>>> m[13] = 'bar'
>>> m[14] = 'boo'
>>> del m[12]
>>> try:
... m[12]
... except KeyError:
... print 'ok'
...
ok
>>> j = m.begin()
>>> int(j.next())
14
>>> i = m.begin()
>>> i.value()
'bar'
>>> del m[13] # delete object referenced by an iterator
>>> try:
... i.value()
... except RuntimeError:
... print 'ok'
ok
>>> j.value() # deletion should not invalidate other iterators.
'boo'
"""
#map_erase( self._smap, key ) # map_erase is dangerous. It could
# delete the node causing an iterator
# to become invalid. --Dave
si = map_find_iter( self._smap, key ) # si = swig'd iterator.
if map_iter_at_end(self._smap, si):
iter_delete(si)
raise KeyError(key)
for i in list(self._iterators):
if iter_cmp( self._smap, i._si, si ) == 0:
i._invalidate()
map_iter_erase( self._smap, si )
iter_delete(si)
#iter_delete( self._index[key] ) # IMPL 2. with _index.
#del self._index[key] # IMPL 2. with _index.
def erase(self, iter):
"""Remove item pointed to by the iterator. All iterators that
point at the erased item including the passed iterator
are immediately invalidated after the deletion completes.
>>> from CMap import CMap
>>> m = CMap()
>>> m[12] = 'foo'
>>> i = m.find(12)
>>> m.erase(i)
>>> len(m) == 0
True
"""
if not iter._si:
raise RuntimeError( _("invalid iterator") )
if iter._si == BEGIN:
raise IndexError(_("Iterator does not point at key-value pair" ))
if self is not iter._map:
raise IndexError(_("Iterator points into a different CMap."))
if map_iter_at_end(self._smap, iter._si):
raise IndexError( _("Cannot erase end() iterator.") )
# invalidate iterators.
for i in list(self._iterators):
if iter._si is not i._si and iiter_cmp( self._smmap, iter._si, i._si ) == 0:
i._invalidate()
# remove item from the map.
map_iter_erase( self._smap, iter._si )
# invalidate last iterator pointing to the deleted location in the map.
iter._invalidate()
def __del__(self):
# invalidate all iterators.
for i in list(self._iterators):
i._invalidate()
map_delete(self._smap)
def get(self, key, default=None):
"""@return value corresponding to specified key or return 'default'
if the key is not found.
"""
try:
return map_find(self._smap,key) # IMPL 1. without _index.
#return iter_value(self._index[key]) # IMPL 2. with _index.
except KeyError:
return default
def keys(self):
"""
>>> from CMap import *
>>> m = CMap()
>>> m[4.0] = 7
>>> m[6.0] = 3
>>> [int(x) for x in m.keys()] # m.keys() but guaranteed integers.
[4, 6]
"""
k = []
for key in self:
k.append(key)
return k
def values(self):
"""
>>> from CMap import CMap
>>> m = CMap()
>>> m[4.0] = 7
>>> m[6.0] = 3
>>> m.values()
[7, 3]
"""
i = self.itervalues()
v = []
try:
while True:
v.append(i.next())
except StopIteration:
pass
return v
def items(self):
"""
>>> from CMap import CMap
>>> m = CMap()
>>> m[4.0] = 7
>>> m[6.0] = 3
>>> [(int(x[0]),int(x[1])) for x in m.items()]
[(4, 7), (6, 3)]
"""
i = self.iteritems()
itms = []
try:
while True:
itms.append(i.next())
except StopIteration:
pass
return itms
def has_key(self, key):
"""
>>> from CMap import CMap
>>> m = CMap()
>>> m[4.0] = 7
>>> if m.has_key(4): print 'ok'
...
ok
>>> if not m.has_key(7): print 'ok'
...
ok
"""
try:
self[key]
except KeyError:
return False
return True
def clear(self):
"""delete all entries
>>> from CMap import CMap
| |
# generated by 'clang2py'
# flags '-c -d -l ftd2xx64.dll ftd2xx.h -vvv -o _ftd2xx64.py'
# -*- coding: utf-8 -*-
#
# TARGET arch is: []
# WORD_SIZE is: 4
# POINTER_SIZE is: 8
# LONGDOUBLE_SIZE is: 8
#
import ctypes
# if local wordsize is same as target, keep ctypes pointer function.
if ctypes.sizeof(ctypes.c_void_p) == 8:
POINTER_T = ctypes.POINTER
else:
# required to access _ctypes
import _ctypes
# Emulate a pointer class using the approriate c_int32/c_int64 type
# The new class should have :
# ['__module__', 'from_param', '_type_', '__dict__', '__weakref__', '__doc__']
# but the class should be submitted to a unique instance for each base type
# to that if A == B, POINTER_T(A) == POINTER_T(B)
ctypes._pointer_t_type_cache = {}
def POINTER_T(pointee):
# a pointer should have the same length as LONG
fake_ptr_base_type = ctypes.c_uint32
# specific case for c_void_p
if pointee is None: # VOID pointer type. c_void_p.
pointee = type(None) # ctypes.c_void_p # ctypes.c_ulong
clsname = 'c_void'
else:
clsname = pointee.__name__
if clsname in ctypes._pointer_t_type_cache:
return ctypes._pointer_t_type_cache[clsname]
# make template
class _T(_ctypes._SimpleCData,):
_type_ = 'L'
_subtype_ = pointee
def _sub_addr_(self):
return self.value
def __repr__(self):
return '%s(%d)'%(clsname, self.value)
def contents(self):
raise TypeError('This is not a ctypes pointer.')
def __init__(self, **args):
raise TypeError('This is not a ctypes pointer. It is not instanciable.')
_class = type('LP_%d_%s'%(8, clsname), (_T,),{})
ctypes._pointer_t_type_cache[clsname] = _class
return _class
c_int128 = ctypes.c_ubyte*16
c_uint128 = c_int128
void = None
if ctypes.sizeof(ctypes.c_longdouble) == 8:
c_long_double_t = ctypes.c_longdouble
else:
c_long_double_t = ctypes.c_ubyte*8
_libraries = {}
_libraries['ftd2xx64.dll'] = ctypes.CDLL('ftd2xx64.dll')
PULONG = POINTER_T(ctypes.c_uint32)
PUCHAR = POINTER_T(ctypes.c_ubyte)
DWORD = ctypes.c_uint32
BOOL = ctypes.c_int32
WORD = ctypes.c_uint16
LPWORD = POINTER_T(ctypes.c_uint16)
LPLONG = POINTER_T(ctypes.c_int32)
LPDWORD = POINTER_T(ctypes.c_uint32)
LPVOID = POINTER_T(None)
ULONG = ctypes.c_uint32
UCHAR = ctypes.c_ubyte
USHORT = ctypes.c_uint16
class struct__SECURITY_ATTRIBUTES(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('nLength', ctypes.c_uint32),
('PADDING_0', ctypes.c_ubyte * 4),
('lpSecurityDescriptor', POINTER_T(None)),
('bInheritHandle', ctypes.c_int32),
('PADDING_1', ctypes.c_ubyte * 4),
]
LPSECURITY_ATTRIBUTES = POINTER_T(struct__SECURITY_ATTRIBUTES)
class struct__OVERLAPPED(ctypes.Structure):
pass
class union__OVERLAPPED_0(ctypes.Union):
pass
class struct__OVERLAPPED_0_0(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('Offset', ctypes.c_uint32),
('OffsetHigh', ctypes.c_uint32),
]
union__OVERLAPPED_0._pack_ = True # source:False
union__OVERLAPPED_0._fields_ = [
('_0', struct__OVERLAPPED_0_0),
('Pointer', POINTER_T(None)),
]
struct__OVERLAPPED._pack_ = True # source:False
struct__OVERLAPPED._fields_ = [
('Internal', ctypes.c_uint64),
('InternalHigh', ctypes.c_uint64),
('_2', union__OVERLAPPED_0),
('hEvent', POINTER_T(None)),
]
LPOVERLAPPED = POINTER_T(struct__OVERLAPPED)
PVOID = POINTER_T(None)
PCHAR = POINTER_T(ctypes.c_char)
LPCTSTR = POINTER_T(ctypes.c_char)
HANDLE = POINTER_T(None)
FT_HANDLE = POINTER_T(None)
FT_STATUS = ctypes.c_uint32
# values for enumeration 'c__Ea_FT_OK'
FT_OK = 0
FT_INVALID_HANDLE = 1
FT_DEVICE_NOT_FOUND = 2
FT_DEVICE_NOT_OPENED = 3
FT_IO_ERROR = 4
FT_INSUFFICIENT_RESOURCES = 5
FT_INVALID_PARAMETER = 6
FT_INVALID_BAUD_RATE = 7
FT_DEVICE_NOT_OPENED_FOR_ERASE = 8
FT_DEVICE_NOT_OPENED_FOR_WRITE = 9
FT_FAILED_TO_WRITE_DEVICE = 10
FT_EEPROM_READ_FAILED = 11
FT_EEPROM_WRITE_FAILED = 12
FT_EEPROM_ERASE_FAILED = 13
FT_EEPROM_NOT_PRESENT = 14
FT_EEPROM_NOT_PROGRAMMED = 15
FT_INVALID_ARGS = 16
FT_NOT_SUPPORTED = 17
FT_OTHER_ERROR = 18
FT_DEVICE_LIST_NOT_READY = 19
c__Ea_FT_OK = ctypes.c_int # enum
PFT_EVENT_HANDLER = POINTER_T(ctypes.CFUNCTYPE(None, ctypes.c_uint32, ctypes.c_uint32))
FT_DEVICE = ctypes.c_uint32
# values for enumeration 'c__Ea_FT_DEVICE_BM'
FT_DEVICE_BM = 0
FT_DEVICE_AM = 1
FT_DEVICE_100AX = 2
FT_DEVICE_UNKNOWN = 3
FT_DEVICE_2232C = 4
FT_DEVICE_232R = 5
FT_DEVICE_2232H = 6
FT_DEVICE_4232H = 7
FT_DEVICE_232H = 8
FT_DEVICE_X_SERIES = 9
FT_DEVICE_4222H_0 = 10
FT_DEVICE_4222H_1_2 = 11
FT_DEVICE_4222H_3 = 12
FT_DEVICE_4222_PROG = 13
FT_DEVICE_900 = 14
FT_DEVICE_930 = 15
FT_DEVICE_UMFTPD3A = 16
c__Ea_FT_DEVICE_BM = ctypes.c_int # enum
FT_Open = _libraries['ftd2xx64.dll'].FT_Open
FT_Open.restype = FT_STATUS
# FT_Open(deviceNumber, pHandle)
FT_Open.argtypes = [ctypes.c_int32, POINTER_T(POINTER_T(None))]
FT_Open.__doc__ = \
"""FT_STATUS FT_Open(c_int32 deviceNumber, LP_LP_None pHandle)
ftd2xx.h:334"""
FT_OpenEx = _libraries['ftd2xx64.dll'].FT_OpenEx
FT_OpenEx.restype = FT_STATUS
# FT_OpenEx(pArg1, Flags, pHandle)
FT_OpenEx.argtypes = [PVOID, DWORD, POINTER_T(POINTER_T(None))]
FT_OpenEx.__doc__ = \
"""FT_STATUS FT_OpenEx(PVOID pArg1, DWORD Flags, LP_LP_None pHandle)
ftd2xx.h:340"""
FT_ListDevices = _libraries['ftd2xx64.dll'].FT_ListDevices
FT_ListDevices.restype = FT_STATUS
# FT_ListDevices(pArg1, pArg2, Flags)
FT_ListDevices.argtypes = [PVOID, PVOID, DWORD]
FT_ListDevices.__doc__ = \
"""FT_STATUS FT_ListDevices(PVOID pArg1, PVOID pArg2, DWORD Flags)
ftd2xx.h:347"""
FT_Close = _libraries['ftd2xx64.dll'].FT_Close
FT_Close.restype = FT_STATUS
# FT_Close(ftHandle)
FT_Close.argtypes = [FT_HANDLE]
FT_Close.__doc__ = \
"""FT_STATUS FT_Close(FT_HANDLE ftHandle)
ftd2xx.h:354"""
FT_Read = _libraries['ftd2xx64.dll'].FT_Read
FT_Read.restype = FT_STATUS
# FT_Read(ftHandle, lpBuffer, dwBytesToRead, lpBytesReturned)
FT_Read.argtypes = [FT_HANDLE, LPVOID, DWORD, LPDWORD]
FT_Read.__doc__ = \
"""FT_STATUS FT_Read(FT_HANDLE ftHandle, LPVOID lpBuffer, DWORD dwBytesToRead, LPDWORD lpBytesReturned)
ftd2xx.h:359"""
FT_Write = _libraries['ftd2xx64.dll'].FT_Write
FT_Write.restype = FT_STATUS
# FT_Write(ftHandle, lpBuffer, dwBytesToWrite, lpBytesWritten)
FT_Write.argtypes = [FT_HANDLE, LPVOID, DWORD, LPDWORD]
FT_Write.__doc__ = \
"""FT_STATUS FT_Write(FT_HANDLE ftHandle, LPVOID lpBuffer, DWORD dwBytesToWrite, LPDWORD lpBytesWritten)
ftd2xx.h:367"""
FT_IoCtl = _libraries['ftd2xx64.dll'].FT_IoCtl
FT_IoCtl.restype = FT_STATUS
# FT_IoCtl(ftHandle, dwIoControlCode, lpInBuf, nInBufSize, lpOutBuf, nOutBufSize, lpBytesReturned, lpOverlapped)
FT_IoCtl.argtypes = [FT_HANDLE, DWORD, LPVOID, DWORD, LPVOID, DWORD, LPDWORD, LPOVERLAPPED]
FT_IoCtl.__doc__ = \
"""FT_STATUS FT_IoCtl(FT_HANDLE ftHandle, DWORD dwIoControlCode, LPVOID lpInBuf, DWORD nInBufSize, LPVOID lpOutBuf, DWORD nOutBufSize, LPDWORD lpBytesReturned, LPOVERLAPPED lpOverlapped)
ftd2xx.h:375"""
FT_SetBaudRate = _libraries['ftd2xx64.dll'].FT_SetBaudRate
FT_SetBaudRate.restype = FT_STATUS
# FT_SetBaudRate(ftHandle, BaudRate)
FT_SetBaudRate.argtypes = [FT_HANDLE, ULONG]
FT_SetBaudRate.__doc__ = \
"""FT_STATUS FT_SetBaudRate(FT_HANDLE ftHandle, ULONG BaudRate)
ftd2xx.h:387"""
FT_SetDivisor = _libraries['ftd2xx64.dll'].FT_SetDivisor
FT_SetDivisor.restype = FT_STATUS
# FT_SetDivisor(ftHandle, Divisor)
FT_SetDivisor.argtypes = [FT_HANDLE, USHORT]
FT_SetDivisor.__doc__ = \
"""FT_STATUS FT_SetDivisor(FT_HANDLE ftHandle, USHORT Divisor)
ftd2xx.h:393"""
FT_SetDataCharacteristics = _libraries['ftd2xx64.dll'].FT_SetDataCharacteristics
FT_SetDataCharacteristics.restype = FT_STATUS
# FT_SetDataCharacteristics(ftHandle, WordLength, StopBits, Parity)
FT_SetDataCharacteristics.argtypes = [FT_HANDLE, UCHAR, UCHAR, UCHAR]
FT_SetDataCharacteristics.__doc__ = \
"""FT_STATUS FT_SetDataCharacteristics(FT_HANDLE ftHandle, UCHAR WordLength, UCHAR StopBits, UCHAR Parity)
ftd2xx.h:399"""
FT_SetFlowControl = _libraries['ftd2xx64.dll'].FT_SetFlowControl
FT_SetFlowControl.restype = FT_STATUS
# FT_SetFlowControl(ftHandle, FlowControl, XonChar, XoffChar)
FT_SetFlowControl.argtypes = [FT_HANDLE, USHORT, UCHAR, UCHAR]
FT_SetFlowControl.__doc__ = \
"""FT_STATUS FT_SetFlowControl(FT_HANDLE ftHandle, USHORT FlowControl, UCHAR XonChar, UCHAR XoffChar)
ftd2xx.h:407"""
FT_ResetDevice = _libraries['ftd2xx64.dll'].FT_ResetDevice
FT_ResetDevice.restype = FT_STATUS
# FT_ResetDevice(ftHandle)
FT_ResetDevice.argtypes = [FT_HANDLE]
FT_ResetDevice.__doc__ = \
"""FT_STATUS FT_ResetDevice(FT_HANDLE ftHandle)
ftd2xx.h:415"""
FT_SetDtr = _libraries['ftd2xx64.dll'].FT_SetDtr
FT_SetDtr.restype = FT_STATUS
# FT_SetDtr(ftHandle)
FT_SetDtr.argtypes = [FT_HANDLE]
FT_SetDtr.__doc__ = \
"""FT_STATUS FT_SetDtr(FT_HANDLE ftHandle)
ftd2xx.h:420"""
FT_ClrDtr = _libraries['ftd2xx64.dll'].FT_ClrDtr
FT_ClrDtr.restype = FT_STATUS
# FT_ClrDtr(ftHandle)
FT_ClrDtr.argtypes = [FT_HANDLE]
FT_ClrDtr.__doc__ = \
"""FT_STATUS FT_ClrDtr(FT_HANDLE ftHandle)
ftd2xx.h:425"""
FT_SetRts = _libraries['ftd2xx64.dll'].FT_SetRts
FT_SetRts.restype = FT_STATUS
# FT_SetRts(ftHandle)
FT_SetRts.argtypes = [FT_HANDLE]
FT_SetRts.__doc__ = \
"""FT_STATUS FT_SetRts(FT_HANDLE ftHandle)
ftd2xx.h:430"""
FT_ClrRts = _libraries['ftd2xx64.dll'].FT_ClrRts
FT_ClrRts.restype = FT_STATUS
# FT_ClrRts(ftHandle)
FT_ClrRts.argtypes = [FT_HANDLE]
FT_ClrRts.__doc__ = \
"""FT_STATUS FT_ClrRts(FT_HANDLE ftHandle)
ftd2xx.h:435"""
FT_GetModemStatus = _libraries['ftd2xx64.dll'].FT_GetModemStatus
FT_GetModemStatus.restype = FT_STATUS
# FT_GetModemStatus(ftHandle, pModemStatus)
FT_GetModemStatus.argtypes = [FT_HANDLE, POINTER_T(ctypes.c_uint32)]
FT_GetModemStatus.__doc__ = \
"""FT_STATUS FT_GetModemStatus(FT_HANDLE ftHandle, LP_c_uint32 pModemStatus)
ftd2xx.h:440"""
FT_SetChars = _libraries['ftd2xx64.dll'].FT_SetChars
FT_SetChars.restype = FT_STATUS
# FT_SetChars(ftHandle, EventChar, EventCharEnabled, ErrorChar, ErrorCharEnabled)
FT_SetChars.argtypes = [FT_HANDLE, UCHAR, UCHAR, UCHAR, UCHAR]
FT_SetChars.__doc__ = \
"""FT_STATUS FT_SetChars(FT_HANDLE ftHandle, UCHAR EventChar, UCHAR EventCharEnabled, UCHAR ErrorChar, UCHAR ErrorCharEnabled)
ftd2xx.h:446"""
FT_Purge = _libraries['ftd2xx64.dll'].FT_Purge
FT_Purge.restype = FT_STATUS
# FT_Purge(ftHandle, Mask)
FT_Purge.argtypes = [FT_HANDLE, ULONG]
FT_Purge.__doc__ = \
"""FT_STATUS FT_Purge(FT_HANDLE ftHandle, ULONG Mask)
ftd2xx.h:455"""
FT_SetTimeouts = _libraries['ftd2xx64.dll'].FT_SetTimeouts
FT_SetTimeouts.restype = FT_STATUS
# FT_SetTimeouts(ftHandle, ReadTimeout, WriteTimeout)
FT_SetTimeouts.argtypes = [FT_HANDLE, ULONG, ULONG]
FT_SetTimeouts.__doc__ = \
"""FT_STATUS FT_SetTimeouts(FT_HANDLE ftHandle, ULONG ReadTimeout, ULONG WriteTimeout)
ftd2xx.h:461"""
FT_GetQueueStatus = _libraries['ftd2xx64.dll'].FT_GetQueueStatus
FT_GetQueueStatus.restype = FT_STATUS
# FT_GetQueueStatus(ftHandle, dwRxBytes)
FT_GetQueueStatus.argtypes = [FT_HANDLE, POINTER_T(ctypes.c_uint32)]
FT_GetQueueStatus.__doc__ = \
"""FT_STATUS FT_GetQueueStatus(FT_HANDLE ftHandle, LP_c_uint32 dwRxBytes)
ftd2xx.h:468"""
FT_SetEventNotification = _libraries['ftd2xx64.dll'].FT_SetEventNotification
FT_SetEventNotification.restype = FT_STATUS
# FT_SetEventNotification(ftHandle, Mask, Param)
FT_SetEventNotification.argtypes = [FT_HANDLE, DWORD, PVOID]
FT_SetEventNotification.__doc__ = \
"""FT_STATUS FT_SetEventNotification(FT_HANDLE ftHandle, DWORD Mask, PVOID Param)
ftd2xx.h:474"""
FT_GetStatus = _libraries['ftd2xx64.dll'].FT_GetStatus
FT_GetStatus.restype = FT_STATUS
# FT_GetStatus(ftHandle, dwRxBytes, dwTxBytes, dwEventDWord)
FT_GetStatus.argtypes = [FT_HANDLE, POINTER_T(ctypes.c_uint32), POINTER_T(ctypes.c_uint32), POINTER_T(ctypes.c_uint32)]
FT_GetStatus.__doc__ = \
"""FT_STATUS FT_GetStatus(FT_HANDLE ftHandle, LP_c_uint32 dwRxBytes, LP_c_uint32 dwTxBytes, LP_c_uint32 dwEventDWord)
ftd2xx.h:481"""
FT_SetBreakOn = _libraries['ftd2xx64.dll'].FT_SetBreakOn
FT_SetBreakOn.restype = FT_STATUS
# FT_SetBreakOn(ftHandle)
FT_SetBreakOn.argtypes = [FT_HANDLE]
FT_SetBreakOn.__doc__ = \
"""FT_STATUS FT_SetBreakOn(FT_HANDLE ftHandle)
ftd2xx.h:489"""
FT_SetBreakOff = _libraries['ftd2xx64.dll'].FT_SetBreakOff
FT_SetBreakOff.restype = FT_STATUS
# FT_SetBreakOff(ftHandle)
FT_SetBreakOff.argtypes = [FT_HANDLE]
FT_SetBreakOff.__doc__ = \
"""FT_STATUS FT_SetBreakOff(FT_HANDLE ftHandle)
ftd2xx.h:494"""
FT_SetWaitMask = _libraries['ftd2xx64.dll'].FT_SetWaitMask
FT_SetWaitMask.restype = FT_STATUS
# FT_SetWaitMask(ftHandle, Mask)
FT_SetWaitMask.argtypes = [FT_HANDLE, DWORD]
FT_SetWaitMask.__doc__ = \
"""FT_STATUS FT_SetWaitMask(FT_HANDLE ftHandle, DWORD Mask)
ftd2xx.h:499"""
FT_WaitOnMask = _libraries['ftd2xx64.dll'].FT_WaitOnMask
FT_WaitOnMask.restype = FT_STATUS
# FT_WaitOnMask(ftHandle, Mask)
FT_WaitOnMask.argtypes = [FT_HANDLE, POINTER_T(ctypes.c_uint32)]
FT_WaitOnMask.__doc__ = \
"""FT_STATUS FT_WaitOnMask(FT_HANDLE ftHandle, LP_c_uint32 Mask)
ftd2xx.h:505"""
FT_GetEventStatus = _libraries['ftd2xx64.dll'].FT_GetEventStatus
FT_GetEventStatus.restype = FT_STATUS
# FT_GetEventStatus(ftHandle, dwEventDWord)
FT_GetEventStatus.argtypes = [FT_HANDLE, POINTER_T(ctypes.c_uint32)]
FT_GetEventStatus.__doc__ = \
"""FT_STATUS FT_GetEventStatus(FT_HANDLE ftHandle, LP_c_uint32 dwEventDWord)
ftd2xx.h:511"""
FT_ReadEE = _libraries['ftd2xx64.dll'].FT_ReadEE
FT_ReadEE.restype = FT_STATUS
# FT_ReadEE(ftHandle, dwWordOffset, lpwValue)
FT_ReadEE.argtypes = [FT_HANDLE, DWORD, LPWORD]
FT_ReadEE.__doc__ = \
"""FT_STATUS FT_ReadEE(FT_HANDLE ftHandle, DWORD dwWordOffset, LPWORD lpwValue)
ftd2xx.h:517"""
FT_WriteEE = _libraries['ftd2xx64.dll'].FT_WriteEE
FT_WriteEE.restype = FT_STATUS
# FT_WriteEE(ftHandle, dwWordOffset, wValue)
FT_WriteEE.argtypes = [FT_HANDLE, DWORD, WORD]
FT_WriteEE.__doc__ = \
"""FT_STATUS FT_WriteEE(FT_HANDLE ftHandle, DWORD dwWordOffset, WORD wValue)
ftd2xx.h:524"""
FT_EraseEE = _libraries['ftd2xx64.dll'].FT_EraseEE
FT_EraseEE.restype = FT_STATUS
# FT_EraseEE(ftHandle)
FT_EraseEE.argtypes = [FT_HANDLE]
FT_EraseEE.__doc__ = \
"""FT_STATUS FT_EraseEE(FT_HANDLE ftHandle)
ftd2xx.h:531"""
class struct_ft_program_data(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('Signature1', ctypes.c_uint32),
('Signature2', ctypes.c_uint32),
('Version', ctypes.c_uint32),
('VendorId', ctypes.c_uint16),
('ProductId', ctypes.c_uint16),
('Manufacturer', POINTER_T(ctypes.c_char)),
('ManufacturerId', POINTER_T(ctypes.c_char)),
('Description', POINTER_T(ctypes.c_char)),
('SerialNumber', POINTER_T(ctypes.c_char)),
('MaxPower', ctypes.c_uint16),
('PnP', ctypes.c_uint16),
('SelfPowered', ctypes.c_uint16),
('RemoteWakeup', ctypes.c_uint16),
('Rev4', ctypes.c_ubyte),
('IsoIn', ctypes.c_ubyte),
('IsoOut', ctypes.c_ubyte),
('PullDownEnable', ctypes.c_ubyte),
('SerNumEnable', ctypes.c_ubyte),
('USBVersionEnable', ctypes.c_ubyte),
('USBVersion', ctypes.c_uint16),
('Rev5', ctypes.c_ubyte),
('IsoInA', ctypes.c_ubyte),
('IsoInB', ctypes.c_ubyte),
('IsoOutA', ctypes.c_ubyte),
('IsoOutB', ctypes.c_ubyte),
('PullDownEnable5', ctypes.c_ubyte),
('SerNumEnable5', ctypes.c_ubyte),
('USBVersionEnable5', ctypes.c_ubyte),
('USBVersion5', ctypes.c_uint16),
('AIsHighCurrent', ctypes.c_ubyte),
('BIsHighCurrent', ctypes.c_ubyte),
('IFAIsFifo', ctypes.c_ubyte),
('IFAIsFifoTar', ctypes.c_ubyte),
('IFAIsFastSer', ctypes.c_ubyte),
('AIsVCP', ctypes.c_ubyte),
('IFBIsFifo', ctypes.c_ubyte),
('IFBIsFifoTar', ctypes.c_ubyte),
('IFBIsFastSer', ctypes.c_ubyte),
('BIsVCP', ctypes.c_ubyte),
('UseExtOsc', ctypes.c_ubyte),
('HighDriveIOs', ctypes.c_ubyte),
('EndpointSize', ctypes.c_ubyte),
('PullDownEnableR', ctypes.c_ubyte),
('SerNumEnableR', ctypes.c_ubyte),
('InvertTXD', ctypes.c_ubyte),
('InvertRXD', ctypes.c_ubyte),
('InvertRTS', ctypes.c_ubyte),
('InvertCTS', ctypes.c_ubyte),
('InvertDTR', ctypes.c_ubyte),
('InvertDSR', ctypes.c_ubyte),
('InvertDCD', ctypes.c_ubyte),
('InvertRI', ctypes.c_ubyte),
('Cbus0', ctypes.c_ubyte),
('Cbus1', ctypes.c_ubyte),
('Cbus2', ctypes.c_ubyte),
('Cbus3', ctypes.c_ubyte),
('Cbus4', ctypes.c_ubyte),
('RIsD2XX', ctypes.c_ubyte),
('PullDownEnable7', ctypes.c_ubyte),
('SerNumEnable7', ctypes.c_ubyte),
('ALSlowSlew', ctypes.c_ubyte),
('ALSchmittInput', ctypes.c_ubyte),
('ALDriveCurrent', ctypes.c_ubyte),
('AHSlowSlew', ctypes.c_ubyte),
('AHSchmittInput', ctypes.c_ubyte),
('AHDriveCurrent', ctypes.c_ubyte),
('BLSlowSlew', ctypes.c_ubyte),
('BLSchmittInput', ctypes.c_ubyte),
('BLDriveCurrent', ctypes.c_ubyte),
('BHSlowSlew', ctypes.c_ubyte),
('BHSchmittInput', ctypes.c_ubyte),
('BHDriveCurrent', ctypes.c_ubyte),
('IFAIsFifo7', ctypes.c_ubyte),
('IFAIsFifoTar7', ctypes.c_ubyte),
('IFAIsFastSer7', ctypes.c_ubyte),
('AIsVCP7', ctypes.c_ubyte),
('IFBIsFifo7', ctypes.c_ubyte),
('IFBIsFifoTar7', ctypes.c_ubyte),
('IFBIsFastSer7', ctypes.c_ubyte),
('BIsVCP7', ctypes.c_ubyte),
('PowerSaveEnable', ctypes.c_ubyte),
('PullDownEnable8', ctypes.c_ubyte),
('SerNumEnable8', ctypes.c_ubyte),
('ASlowSlew', ctypes.c_ubyte),
('ASchmittInput', ctypes.c_ubyte),
('ADriveCurrent', ctypes.c_ubyte),
('BSlowSlew', ctypes.c_ubyte),
('BSchmittInput', ctypes.c_ubyte),
('BDriveCurrent', ctypes.c_ubyte),
('CSlowSlew', ctypes.c_ubyte),
('CSchmittInput', ctypes.c_ubyte),
('CDriveCurrent', ctypes.c_ubyte),
('DSlowSlew', ctypes.c_ubyte),
('DSchmittInput', ctypes.c_ubyte),
('DDriveCurrent', ctypes.c_ubyte),
('ARIIsTXDEN', ctypes.c_ubyte),
('BRIIsTXDEN', ctypes.c_ubyte),
('CRIIsTXDEN', ctypes.c_ubyte),
('DRIIsTXDEN', ctypes.c_ubyte),
('AIsVCP8', ctypes.c_ubyte),
('BIsVCP8', ctypes.c_ubyte),
('CIsVCP8', ctypes.c_ubyte),
('DIsVCP8', ctypes.c_ubyte),
('PullDownEnableH', ctypes.c_ubyte),
('SerNumEnableH', ctypes.c_ubyte),
('ACSlowSlewH', ctypes.c_ubyte),
('ACSchmittInputH', ctypes.c_ubyte),
('ACDriveCurrentH', ctypes.c_ubyte),
('ADSlowSlewH', ctypes.c_ubyte),
('ADSchmittInputH', ctypes.c_ubyte),
('ADDriveCurrentH', ctypes.c_ubyte),
('Cbus0H', ctypes.c_ubyte),
('Cbus1H', ctypes.c_ubyte),
('Cbus2H', ctypes.c_ubyte),
('Cbus3H', ctypes.c_ubyte),
('Cbus4H', ctypes.c_ubyte),
('Cbus5H', ctypes.c_ubyte),
('Cbus6H', ctypes.c_ubyte),
('Cbus7H', ctypes.c_ubyte),
('Cbus8H', ctypes.c_ubyte),
('Cbus9H', ctypes.c_ubyte),
('IsFifoH', ctypes.c_ubyte),
('IsFifoTarH', ctypes.c_ubyte),
('IsFastSerH', ctypes.c_ubyte),
| |
If it is not specified, the numeric
value of the corresponding missing value in Stata is returned.
Returns
-------
List
A list of lists containing the values from the frame.
Each sublist contains values for one observation.
Raises
------
ValueError
This error can be raised if
- any of the variable indices or names specified in `var` is out of :ref:`range <ref-framerange>` or not found.
- any of the observation indices specified in `obs` is out of :ref:`range <ref-framerange>`.
- `selectvar` is out of :ref:`range <ref-framerange>` or not found.
"""
if valuelabel is True:
bvalabel = 1
elif valuelabel is False:
bvalabel = 0
else:
raise TypeError("valuelabel must be a boolean value")
bmissing = 0
if not isinstance(missingval, _DefaultMissing):
bmissing = 1
svar = -1
if selectvar is None or selectvar=="":
svar = -2
elif selectvar!=-1:
svar = _get_df_var_index_single(self.name, self.id, selectvar)
if var is None and obs is None:
if bmissing == 1:
return _stp._st_df_getdata(self.name, self.id, var, obs, svar, 1, bvalabel, missingval)
else:
return _stp._st_df_getdata(self.name, self.id, var, obs, svar, 1, bvalabel)
else:
if var is None:
vars = None
else:
vars = _get_df_var_index_all(self.name, self.id, var)
if obs is None:
obss = None
else:
obss = _get_df_obs_index(self.name, self.id, obs)
if bmissing == 1:
return _stp._st_df_getdata(self.name, self.id, vars, obss, svar, 1, bvalabel, missingval)
else:
return _stp._st_df_getdata(self.name, self.id, vars, obss, svar, 1, bvalabel)
def getAsDict(self, var=None, obs=None, selectvar=None, valuelabel=False, missingval=_DefaultMissing()):
"""
Read values from the frame and store them in a dictionary. The keys
are the variable names. The values are the data values for the
corresponding variables.
Parameters
----------
var : int, str, or list-like, optional
Variables to access. It can be specified as a single variable
index or name, or an iterable of variable indices or names.
If `var` is not specified, all the variables are specified.
obs : int or list-like, optional
Observations to access. It can be specified as a single
observation index or an iterable of observation indices.
If `obs` is not specified, all the observations are specified.
selectvar : int or str, optional
Observations for which `selectvar!=0` will be selected. If
`selectvar` is an integer, it is interpreted as a variable
index. If `selectvar` is a string, it should contain the name
of a Stata variable. Specifying `selectvar` as "" has the
same result as not specifying `selectvar`, which means no observations
are excluded. Specifying `selectvar` as -1 means that observations
with missing values for the variables specified in `var` are to be
excluded.
valuelabel : bool, optional
Use the value label when available. Default is False.
missingval : :ref:`_DefaultMissing<ref-defaultmissing>`, `optional`
If `missingval` is specified, all the missing values in the returned
dictionary are replaced by this value. If it is not specified, the numeric
value of the corresponding missing value in Stata is returned.
Returns
-------
dictionary
Return a dictionary containing the data values from the frame.
Raises
------
ValueError
This error can be raised if
- any of the variable indices or names specified in `var` is out of :ref:`range <ref-framerange>` or not found.
- any of the observation indices specified in `obs` is out of :ref:`range <ref-framerange>`.
- `selectvar` is out of :ref:`range <ref-framerange>` or not found.
"""
if valuelabel is True:
bvalabel = 1
elif valuelabel is False:
bvalabel = 0
else:
raise TypeError("valuelabel must be a boolean value")
bmissing = 0
if not isinstance(missingval, _DefaultMissing):
bmissing = 1
svar = -1
if selectvar is None or selectvar=="":
svar = -2
elif selectvar!=-1:
svar = _get_df_var_index_single(self.name, self.id, selectvar)
if var is None:
nvars = _stp._st_df_getvarcount(self.name, self.id)
vars = list(range(nvars))
else:
vars = _get_df_var_index_all(self.name, self.id, var)
vars_len = len(vars)
if obs is None:
nobs = _stp._st_df_getobstotal(self.name, self.id)
if nobs==1:
obss=[0]
obss_len = 1
else:
obss = None
obss_len = 0
else:
obss = _get_df_obs_index(self.name, self.id, obs)
obss_len = len(obss)
if vars_len==1:
lvarnames = _stp._st_df_getvarname(self.name, self.id, vars[0])
if bmissing == 1:
ldata = _stp._st_df_getdata(self.name, self.id, vars, obss, svar, 2, bvalabel, missingval)
else:
ldata = _stp._st_df_getdata(self.name, self.id, vars, obss, svar, 2, bvalabel)
zdata = {}
if len(ldata) > 0:
zdata[lvarnames] = ldata
return zdata
elif obss_len==1:
lvarnames = [_stp._st_df_getvarname(self.name, self.id, v) for v in vars]
if bmissing == 1:
ldata = _stp._st_df_getdata(self.name, self.id, vars, obss, svar, 2, bvalabel, missingval)
else:
ldata = _stp._st_df_getdata(self.name, self.id, vars, obss, svar, 2, bvalabel)
zdata = {}
if len(ldata) > 0:
zdata = zip(lvarnames, ldata)
return dict(zdata)
else:
lvarnames = [_stp._st_df_getvarname(self.name, self.id, v) for v in vars]
if bmissing == 1:
ldata = _stp._st_df_getdata(self.name, self.id, vars, obss, svar, 2, bvalabel, missingval)
else:
ldata = _stp._st_df_getdata(self.name, self.id, vars, obss, svar, 2, bvalabel)
zdata = {}
if len(ldata) > 0:
zdata = zip(lvarnames, ldata)
return dict(zdata)
def getAt(self, var, obs):
"""
Read a value from the frame.
Parameters
----------
var : int or str
Variable to access. It can be specified as the variable
index or name.
obs : int
Observation to access.
Returns
-------
float or str
The value.
Raises
------
ValueError
This error can be raised if
- `var` is out of :ref:`range <ref-framerange>` or not found.
- `obs` is out of :ref:`range <ref-framerange>`.
"""
ovar = _get_df_var_index_single(self.name, self.id, var)
if _check_df_obs(self.name, self.id, obs):
raise ValueError("%d: obs out of range" % (obs))
return _stp._st_df_getdataat(self.name, self.id, ovar, obs)
def getFormattedValue(self, var, obs, bValueLabel):
"""
Read a value from the frame, applying its display format.
Parameters
----------
var : int or str
Variable to access. It can be specified as the variable
index or name.
obs : int
Observation to access.
bValueLabel : bool
Use the value label when available.
Returns
-------
str
The formatted value as a string.
Raises
------
ValueError
This error can be raised if
- `var` is out of :ref:`range <ref-framerange>` or not found.
- `obs` is out of :ref:`range <ref-framerange>`.
"""
if bValueLabel is True:
blbl = 1
elif bValueLabel is False:
blbl = 0
else:
raise TypeError("bValueLabel must be a boolean value")
oindex = _get_df_var_index_single(self.name, self.id, var)
if _check_df_obs(self.name, self.id, obs):
raise ValueError("%d: obs out of range" % (obs))
return _stp._st_df_getfmtvalue(self.name, self.id, oindex, obs, blbl)
@staticmethod
def getFrameAt(index):
"""
Utility method for getting the name of a Stata frame
at a given index.
Parameters
----------
index : int
The index for a frame.
Returns
-------
str
The name of the frame for the specified index.
"""
return _stp._st_df_getframeat(index)
@staticmethod
def getFrameCount():
"""
Utility method for getting the number of frames in Stata.
Returns
-------
int
The number of frames.
"""
return _stp._st_df_gettotalframecount()
def getObsTotal(self):
"""
Get the number of observations in the frame.
Returns
-------
int
The number of observations.
"""
return _stp._st_df_getobstotal(self.name, self.id)
def getStrVarWidth(self, var):
"""
Get the width of the variable of type **str**.
Parameters
----------
var : int or str
Variable to access. It can be specified as the
variable index or name.
Returns
-------
int
The width of the variable.
Raises
------
ValueError
If `var` is out of :ref:`range <ref-framerange>` or not found.
"""
oindex = _get_df_var_index_single(self.name, self.id, var)
return _stp._st_df_getstrvarwidth(self.name, self.id, oindex)
def getVarCount(self):
"""
Get the number of variables in the frame.
Returns
-------
int
The number of variables.
"""
return _stp._st_df_getvarcount(self.name, self.id)
def getVarFormat(self, var):
"""
Get the format for the variable in the frame.
Parameters
----------
var : int or str
Variable to access. It can be specified as the
variable index or name.
Returns
-------
str
The variable format.
Raises
------
ValueError
If `var` is out of :ref:`range <ref-framerange>` or not found.
"""
oindex = _get_df_var_index_single(self.name, self.id, var)
return _stp._st_df_getvarformat(self.name, self.id, oindex)
def getVarIndex(self, name):
"""
Look up the variable index for the specified name in the
frame.
Parameters
----------
name : str
Variable to access.
Returns
| |
= output_path + flood_objective["output"]['dret_index']
else:
gwater_index_cover = working_path + intermediate_files['index_cover']
gwater_index_rough = working_path + intermediate_files['index_rough']
gwater_slope_index = output_path + output_files['slope_index']
gwater_comb_weight_R = working_path + intermediate_files['comb_weight_ret']
gwater_dret_flowlen = working_path + intermediate_files['dret_flowlen']
gwater_dret_index = output_path + output_files['dret_index']
logger.info("Processing Groundwater Recharge/Baseflow objective...")
# Make Cover and Roughness Index rasters
if not os.path.exists(gwater_index_cover):
derive_raster_from_lulc(lulc_raster_uri, rios_fields["landuse"],
lulc_coeff_df, rios_fields["cover"],
gwater_index_cover)
if not os.path.exists(gwater_index_rough):
derive_raster_from_lulc(lulc_raster_uri, rios_fields["landuse"],
lulc_coeff_df, rios_fields["roughness"],
gwater_index_rough)
# Make other index rasters as necessary
if not os.path.exists(gwater_precip_annual_index): # Annual average precipitation index
normalize(precip_annual_raster_uri, gwater_precip_annual_index)
if not os.path.exists(gwater_aet_index): # Actual Evapotranspiration index
normalize(aet_raster_uri, gwater_aet_index)
# Slope Index
calculate_slope_index(slope_raster_uri, gwater_slope_index)
logger.info("\t... created Groundwater slope index: "
+ os.path.basename(gwater_slope_index))
# Downslope Retention Index
calculate_downslope_retention_index(gwater_index_rough, gwater_slope_index,
gwater_comb_weight_R, flow_dir_raster_uri, streams_raster_uri,
gwater_dret_flowlen, gwater_dret_index)
logger.info("\t... created Groundwater downslope retention index: "
+ os.path.basename(gwater_dret_index))
# Upslope Source
gw_weight = [gwater_precip_annual_index, soil_texture_raster_uri,
gwater_slope_index, soil_depth_index_uri]
gw_inverse_weight = [gwater_aet_index, gwater_index_cover,
gwater_index_rough]
calculate_upslope_source(gw_weight, gw_inverse_weight,
gwater_comb_weight_source, flow_dir_raster_uri,
dem_raster_uri, gwater_upslope_source)
logger.info("\t... created Groundwater upslope source: "
+ os.path.basename(gwater_upslope_source))
###############################################################################
###############################################################################
def main(working_path, output_path, hydro_path, rios_coeff_table,
lulc_raster_uri, dem_raster_uri,
erosivity_raster_uri=None, erodibility_raster_uri=None,
soil_depth_raster_uri=None, precip_month_raster_uri=None,
soil_texture_raster_uri=None, precip_annual_raster_uri=None,
aet_raster_uri=None,
river_buffer_dist=45., suffix="",
aoi_shape_uri=None, river_reference_shape_uri_list=None,
flow_dir_raster_uri=None, slope_raster_uri=None,
flow_acc_raster_uri=None, streams_raster_uri=None,
do_erosion=False, do_nutrient_p=False, do_nutrient_n=False,
do_flood=False, do_gw_bf=False,
clean_intermediate_files=False,
write_log=False):
"""
The main process that replaces the ArcGIS RIOS_Pre_Processing script.
It calculates the inputs for the RIOS IPA program such as
downslope retention index, upslope source, riparian index,
and slope index appropriately for Erosion Control, Phosphorus Retention,
Nitrogen Retention, Flood Mitigation, and Groundwater Retention/Baseflow.
Args :
working_path - path to directory for preprocessor intermediate files
output_path - path to directory for preprocessor outputs
hydro_path - path to directory for flow direction/path/
accumulation rasters
rios_coeff_table - path to csv table containing biophysical coefficients
lulc_raster_uri - path to raster of land use/land cover
dem_raster_uri - path to raster of digital elevation
Args [grouped by category] (optional):
[Rasters to input data]
erosivity_raster_uri - path to raster of rainfall erosivity
erodibility_raster_uri - path to raster of soil erodibility
soil_depth_raster_uri - path to raster of soil depth
precip_month_raster_uri - path to raster of peak monthly precipitation
soil_texture_raster_uri - path to raster of soil texture
precip_annual_raster_uri - path to raster of annual precipitaion
aet_raster_uri - path to raster of actual evapotranspiration
[Calculation-specific inputs/data]
river_buffer_dist - extent of riparian buffer (in raster
map units e.g. metres) Default = 45(m)
suffix - string to identify output files
aoi_shape_uri - path to shapefile of area of interest
[River data sources]
river_reference_shape_uri_list - path to list of shapefiles describing
rivers in the area
streams_raster_uri - path to raster describing DEM-
compatible stream (stream pixels = 1)
[Flags to trigger data preparation for RIOS objectives]
do_erosion - runs erosion control objective
do_nutrient_p - runs phosphorus fixing objective
do_nutrient_n - runs nitrogen fixing objective
do_flood - runs flood control objective
do_gw_bf - runs groundwater retention/baseflow objective
[Misc]
clean_intermediate_files - deletes intermediate files produced
write_log - stores extra numbers and bits that explain conversions & processes
"""
###############################################################################
# get basic setups for objectives
objective = get_objective_dictionary(
suffix=suffix, do_erosion=do_erosion, do_nutrient_p=do_nutrient_p,
do_nutrient_n=do_nutrient_n, do_flood=do_flood, do_gw_bf=do_gw_bf)
# make a bunch of lists to keep logs
parameter_log = [] # replacement for parameters file to be written
configuration_log = {} # where information on calculations will be saved
# With parameters log; later write input parameter values to an output file
parameter_log.append("Date and Time: " + time.strftime("%Y-%m-%d %H:%M"))
logger.info(parameter_log[-1])
logger.info("Validating arguments...")
###############################################################################
# Log whether we calculate inputs for the objectives
for obj in objective:
parameter_log.append("Calculate for %s: %s" %
(obj, str(objective[obj]['found']).lower()))
logger.info(parameter_log[-1])
###############################################################################
# Directory where output files will be written
working_path = os.path.normpath(working_path).rstrip(os.sep) + os.sep
parameter_log.append("Workspace: " + working_path)
logger.info(parameter_log[-1])
output_path = os.path.normpath(output_path).rstrip(os.sep) + os.sep
parameter_log.append("Output path: " + output_path)
logger.info(parameter_log[-1])
configuration_log["path"] = {"workspace": working_path,
"output": output_path,
"hydro": hydro_path}
# N.B. hydro_path is sorted previously
# get basic setups for datasets
input_data = get_input_data_param_dictionary()
# Describe what the data is
for indata in input_data:
this_param = locals()[input_data[indata]['param']]
parameter_log.append(("%s: %s" % (indata, this_param)))
logger.info(parameter_log[-1])
if this_param is None:
continue
if ("".join(this_param.split()) != "") and \
("".join(this_param.split()) != "#"):
input_data[indata]['found'] = True
# suffix to add to end of output filenames, as <filename>_<suffix>
parameter_log.append("Suffix: " + suffix)
logger.info(parameter_log[-1])
if ("".join(suffix.split()) == "") or (suffix == "#"):
suffix = ""
# note: add the underscore when needed, not before
configuration_log["suffix"] = {"preprocessor": suffix}
###############################################################################
# Make sure that required inputs are provided for each objective chosen
input_raster_dict = {}
missing_data = False
for obj in objective: # So for each objective...
if objective[obj]['found']: # ... that we have found
logger.info(obj + " selected, checking sources: ")
for dataset in objective[obj]['dataset']: # check each dataset
if dataset in input_raster_dict.keys():
continue # since we've already found this data
if input_data[dataset]['found']: # ... has also been found
logger.debug("\t" + dataset)
# .. and if it's a raster
if locals()[input_data[dataset]['param']].endswith('tif'):
# ... save it to our dictionary
infile = locals()[input_data[dataset]['param']]
input_raster_dict[dataset] = {'file': infile}
else:
logger.error("Missing Data: %s %s required for %s" %
(dataset, input_data[dataset]['type'], obj))
missing_data = True # if not found: log + flag problem
input_vector_list = {'area of interest': {'file': aoi_shape_uri},
'river reference':
{'file': river_reference_shape_uri_list}}
configuration_log['input'] = {'raster': input_raster_dict,
'vector': input_vector_list}
# Handle exceptions.
if missing_data:
raise IOError("Please identify all required data inputs.")
del missing_data # housekeeping
# Check and create intermediate/output folders
for folder in [output_path, working_path]:
if not os.path.exists(folder):
os.mkdir(folder)
###############################################################################
# Output files
# Intermediate files that are not objective specific
flow_dir_channels_raster_uri = get_intermediate_file(working_path,
"flowdir_channels", suffix=suffix)
slope_index_uri = get_intermediate_file(working_path,
"slope_index", suffix=suffix)
erosivity_index_uri = get_intermediate_file(working_path,
"erosivity_index", suffix=suffix)
erodibility_index_uri = get_intermediate_file(working_path,
"erodibility_index", suffix=suffix)
soil_depth_norm_raster_uri = get_intermediate_file(working_path,
"soil_depth_norm", suffix=suffix)
soil_depth_index_uri = get_intermediate_file(working_path,
"soil_depth_index", suffix=suffix)
# Record of files output -> configuration_log (eventually)
outConf = {} # for calculated rasters
indConf = {} # for indexed rasters
normConf = {} # for normalised rasters
hydroConf = {} # for hydro derived rasters
indConf["slope"] = {'index': slope_index_uri}
indConf["rainfall erosivity"] = {'index': erosivity_index_uri}
indConf["erodibility"] = {'index': erodibility_index_uri}
normConf["soil depth"] = {'file': soil_depth_norm_raster_uri}
indConf["soil depth"] = {'index': soil_depth_index_uri}
###
# Field names in RIOS coefficient table
rios_fields = get_rios_coefficient_fieldnames()
# Keep track of whether frequently-used layers have been created in this run
# Want to override previous runs, but re-use current versions
made_lulc_coeffs = False
lulc_coeff_df = None
made_flowdir_channels = False
made_slope_index = False
made_flood_slope_index = False
made_gwater_slope_index = False
made_soil_depth_index = False
made_erosivity_index = False
made_erodibility_index = False
made_flgw_index_cover = False
made_flgw_index_rough = False
###############################################################################
# Start using geoprocessor for stuff
input_raster_list = [a['file'] for a in input_raster_dict.values()]
prj_good = is_projection_consistent(input_raster_list, dem_raster_uri)
if not prj_good:
err_msg = "Input rasters must be in the same projection"
logger.error(err_msg)
raise AssertionError(err_msg)
###############################################################################
# Preprocess DEM derivatives for hydrological routing
logger.info("Creating hydrology layers...")
if flow_dir_raster_uri is None:
flow_dir_raster_uri = \
hydro_naming_convention(hydro_path, dem_raster_uri, "_flow_dir")
if slope_raster_uri is None:
slope_raster_uri = \
hydro_naming_convention(hydro_path, dem_raster_uri, "_slope")
# Create flow accumulation raster
if flow_acc_raster_uri is None:
flow_acc_raster_uri = \
hydro_naming_convention(hydro_path, dem_raster_uri, "_flow_acc")
hydroConf["flow direction"] = {"file": flow_dir_raster_uri}
hydroConf["slope"] = {"file": slope_raster_uri}
hydroConf["flow accumulation"] = {"file": flow_acc_raster_uri}
create_hydro_layers(hydro_path, dem_raster_uri,
flow_dir_raster_uri=flow_dir_raster_uri,
slope_raster_uri=slope_raster_uri,
flow_acc_raster_uri=flow_acc_raster_uri)
if streams_raster_uri is None:
streams_raster_uri = output_path + "streams_" + suffix + ".tif"
if not os.path.exists(streams_raster_uri):
check_streams_raster_sourcedata(streams_raster_uri,
river_reference_shape_uri_list)
# real purpose of this function is to create the stream raster;
# 'threshold flow accumulation' value is deprecated
logger.info("Creating streams raster from reference shapefiles")
thflac = optimize_threshold_flowacc(flow_acc_raster_uri,
river_reference_shape_uri_list,
workspace_path=working_path,
suffix=suffix, seedlen=1000,
aoi_shape_uri=aoi_shape_uri,
streams_raster_uri=streams_raster_uri)
else:
with rasterio.open(streams_raster_uri, 'r') as streams_raster:
stream_data = streams_raster.read(1)
stream_meta = streams_raster.meta
with rasterio.open(flow_acc_raster_uri, 'r') as flow_acc:
flo_data = flow_acc.read(1)
thflac = np.min(flo_data[np.where((stream_data > 0) &
(stream_data != stream_meta['nodata']))])
hydroConf['streams'] = \
{"file": streams_raster_uri,
"source": {"flow accumulation": hydroConf["flow accumulation"],
"river reference": {"file": river_reference_shape_uri_list}},
"factor": {"flow accumulation threshold": thflac}}
if 'Streams' not in hydroConf.keys():
hydroConf['streams'] = {"file": streams_raster_uri}
# Set flow direction raster to null where there are streams
if not made_flowdir_channels:
burn_stream_into_flowdir_channels(flow_dir_raster_uri, streams_raster_uri,
flow_dir_channels_raster_uri)
made_flowdir_channels = True
hydroConf["flow direction with channels"] = \
{'file': flow_dir_channels_raster_uri,
'source': {"flow direction": hydroConf["flow direction"],
"streams": hydroConf['streams']}}
################################################################################
# Make sure that at least one objective is chosen
if True not in [objective[obj]['found'] for obj in objective.keys()]:
logger.error("Error: No | |
and (len(self.params) == 0)):
r = to_python(self.name, context, params)
if (r.startswith(".")):
r = r[1:]
r = indent_str + r
return r
# Is this a VBA internal function? Or a call to an external function?
func_name = str(self.name)
if ("." in func_name):
func_name = func_name[func_name.index(".") + 1:]
import vba_library
is_internal = (func_name.lower() in vba_library.VBA_LIBRARY)
if (is_internal or is_external):
# Make the Python parameter list.
first = True
args = "["
for p in py_params:
if (not first):
args += ", "
first = False
args += p
# Execute() (dynamic VB execution) will be converted to Python and needs some
# special arguments so the exec() of the JIT generated code works.
if ((str(func_name) == "Execute") or
(str(func_name) == "ExecuteGlobal") or
(str(func_name) == "AddCode") or
(str(func_name) == "AddFromString")):
args += ", locals(), \"__JIT_EXEC__\""
args += "]"
# Internal function?
r = None
if is_internal:
r = indent_str + "core.vba_library.run_function(\"" + str(func_name) + "\", vm_context, " + args + ")"
else:
r = indent_str + "core.vba_library.run_external_function(\"" + str(func_name) + "\", vm_context, " + args + ",\"\")"
return r
# Generate the Python function call to a local function.
r = func_name + "("
first = True
for p in py_params:
if (not first):
r += ", "
first = False
r += p
r += ")"
if (r.startswith(".")):
r = r[1:]
r = indent_str + r
# Done.
return r
def _handle_as_member_access(self, context):
"""
Certain object method calls need to be handled as member access
expressions. Given parsing limitations some of these are parsed
as regular calls, so convert those to member access expressions
here
"""
# Is this a method call?
func_name = str(self.name).strip()
if (("." not in func_name) or (func_name.startswith("."))):
return None
short_func_name = func_name[func_name.rindex(".") + 1:]
# It's a method call. Is it one we are handling as a member
# access expression?
memb_funcs = set(["AddItem"])
if (short_func_name not in memb_funcs):
return None
# It should be handled as a member access expression.
# Convert it.
func_call_str = func_name + "("
first = True
for p in self.params:
if (not first):
func_call_str += ", "
first = False
p_eval = eval_arg(p, context=context)
if isinstance(p_eval, str):
p_eval = p_eval.replace('"', '""')
p_eval = '"' + p_eval + '"'
func_call_str += str(p_eval)
func_call_str += ")"
try:
memb_exp = member_access_expression.parseString(func_call_str, parseAll=True)[0]
# Evaluate the call as a member access expression.
return memb_exp.eval(context)
except ParseException as e:
# Can't eval as a member access expression.
return None
def _handle_with_calls(self, context):
# Can we handle this call as a member access expression?
as_member_access = self._handle_as_member_access(context)
if (as_member_access is not None):
return as_member_access
# Is this a call like '.WriteText "foo"'?
func_name = str(self.name).strip()
if (not func_name.startswith(".")):
return None
# We have a call to a function whose name starts with '.'. Are
# we in a With block?
if (len(context.with_prefix) == 0):
return None
# We have a method call of the With object. Make a member
# access expression representing the method call of the
# With object.
call_obj = Function_Call(None, None, None, old_call=self)
call_obj.name = func_name[1:] # Get rid of initial '.'
full_expr = MemberAccessExpression(None, None, None, raw_fields=(context.with_prefix, [call_obj], []))
# Evaluate the fully qualified object method call.
r = eval_arg(full_expr, context)
return r
def eval(self, context, params=None):
# Exit if an exit function statement was previously called.
if (context.exit_func):
log.info("Exit function previously called. Not evaluating '" + str(self) + "'")
return
# Save the unresolved argument values.
import vba_library
vba_library.var_names = self.params
# Reset the called function name if this is an alias for an imported external
# DLL function.
dll_func_name = context.get_true_name(self.name)
is_external = False
if (dll_func_name is not None):
is_external = True
self.name = dll_func_name
# Are we calling a member access expression?
if isinstance(self.name, MemberAccessExpression):
# Just evaluate the expression as the call.
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("Call of member access expression " + str(self.name))
r = self.name.eval(context, self.params)
return r
# TODO: The following should share the same code as MemberAccessExpression and Function_Call?
# Get argument values.
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("Call: eval params: " + str(self.params))
call_params = eval_args(self.params, context=context)
str_params = repr(call_params)
if (len(str_params) > 80):
str_params = str_params[:80] + "..."
# Would Visual Basic have thrown an error when evaluating the arguments?
if (context.have_error()):
log.warn('Short circuiting function call %s(%s) due to thrown VB error.' % (self.name, str_params))
return None
# Log functions of interest.
if (not context.throttle_logging):
log.info('Calling Procedure: %s(%r)' % (self.name, str_params))
if (is_external):
context.report_action("External Call", self.name + "(" + str(call_params) + ")", self.name, strip_null_bytes=True)
if ((self.name.lower() in context._log_funcs) or
(any(self.name.lower().endswith(func.lower()) for func in Function_Call.log_funcs))):
context.report_action(self.name, call_params, 'Interesting Function Call', strip_null_bytes=True)
# Handle method calls inside a With statement.
r = self._handle_with_calls(context)
if (r is not None):
return r
# Handle VBA functions:
func_name = str(self.name).strip()
if func_name.lower() == 'msgbox':
# 6.1.2.8.1.13 MsgBox
context.report_action('Display Message', call_params, 'MsgBox', strip_null_bytes=True)
# vbOK = 1
return 1
elif '.' in func_name:
tmp_call_params = call_params
if (func_name.endswith(".Write")):
tmp_call_params = []
for p in call_params:
if (isinstance(p, str)):
tmp_call_params.append(p.replace("\x00", ""))
else:
tmp_call_params.append(p)
if ((func_name != "Debug.Print") and
(not func_name.endswith("Add")) and
(not func_name.endswith("Write")) and
(len(tmp_call_params) > 0)):
context.report_action('Object.Method Call', tmp_call_params, func_name, strip_null_bytes=True)
# Emulate the function body.
try:
# Pull out the function name if referenced via a module, etc.
if ("." in func_name):
func_name = func_name[func_name.index(".") + 1:]
# Get the function.
s = context.get(func_name)
if (s is None):
raise KeyError("func not found")
if (hasattr(s, "eval")):
ret = s.eval(context=context, params=call_params)
# Set the values of the arguments passed as ByRef parameters.
if (hasattr(s, "byref_params") and s.byref_params):
for byref_param_info in s.byref_params.keys():
if (byref_param_info[1] < len(self.params)):
arg_var_name = str(self.params[byref_param_info[1]])
context.set(arg_var_name, s.byref_params[byref_param_info])
# We are out of the called function, so if we exited the called function early
# it does not apply to the current function.
context.exit_func = False
# Return function result.
return ret
except KeyError:
try:
tmp_name = func_name.replace("$", "").replace("VBA.", "").replace("Math.", "").\
replace("[", "").replace("]", "").replace("'", "").replace('"', '')
if ("." in tmp_name):
tmp_name = tmp_name[tmp_name.rindex(".") + 1:]
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("Looking for procedure %r" % tmp_name)
s = context.get(tmp_name)
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("Found procedure " + tmp_name + " = " + str(s))
if (s):
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("Found procedure. Running procedure " + tmp_name)
s.eval(context=context, params=call_params)
except KeyError:
# If something like Application.Run("foo", 12) is called, foo(12) will be run.
# Try to handle that.
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("Did not find procedure.")
if ((func_name == "Application.Run") or (func_name == "Run")):
# Pull the name of what is being run from the 1st arg.
new_func = call_params[0]
# The remaining params are passed as arguments to the other function.
new_params = call_params[1:]
# See if we can run the other function.
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("Try indirect run of function '" + new_func + "'")
r = "NULL"
try:
# Emulate the function, drilling down through layers of indirection to get the func name.
s = context.get(new_func)
while (isinstance(s, str)):
s = context.get(s)
if (isinstance(s, procedures.Function) or
isinstance(s, procedures.Sub) or
isinstance(s, VbaLibraryFunc)):
s = s.eval(context=context, params=new_params)
r = s
# We are out of the called function, so if we exited the called function early
# it does not apply to the current function.
context.exit_func = False
# Return the function result. This is "NULL" if we did not run a function.
return r
except KeyError:
# Return the function result. This is "NULL" if we did not run a function.
context.increase_general_errors()
log.warning('Function %r not found' % func_name)
return r
# Report that we could not find the function.
context.increase_general_errors()
log.warning('Function %r not found' % func_name)
except Exception as e:
traceback.print_exc(file=sys.stdout)
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("General error: " + str(e))
return
# 5.4.2.1 Call Statement
# a call statement is similar to a | |
vars(self)
for key in attributes:
print(key, ': \t', attributes[key])
class CentralCamera(Camera):
"""
A (central projection) camera class
"""
# list of attributes
_name = [] # camera name (string)
_camtype = [] # camera type (string)
_nu = [] # number of pixels horizontal
_nv = [] # number of pixels vertical
_u0 = [] # principal point horizontal
_v0 = [] # principal point vertical
_rhou = [] # pixel imagesize (single pixel) horizontal
_rhov = [] # pixel imagesize (single pixel) vertical
_fu = [] # focal length horizontal [units]
_fv = [] # focal length vertical [units]
_image = [] # image (TODO image class?), for now, just numpy array
_T = [] # camera pose (homogeneous transform, SE3 class)
_fig = [] # for plotting, figure handle/object reference
_ax = [] # for plotting, axes handle
def __init__(self,
f=8*1e-3,
pp=None,
**kwargs):
"""
Create instance of a Camera class
"""
super().__init__(**kwargs)
# TODO some of this logic to f and pp setters
self.f = f
if pp is None:
print('principal point not specified, \
setting it to centre of image plane')
self.pp = (self._nu / 2, self._nv / 2)
else:
self.pp = pp
def __str__(self):
s = super().__str__()
s += self.fmt.format('principal pt', self.pp)
s += self.fmt.format('focal length', self.f)
return s
def project(self, P, pose=None, objpose=None, visibility=False):
"""
Project world points to image plane
:param P: 3D points to project into camera image plane
:type P: array_like(3), array_like(3,n)
:param pose: camera pose with respect to the world frame, defaults to
camera's ``pose`` attribute
:type pose: SE3, optional
:param objpose: 3D point reference frame, defaults to world frame
:type objpose: SE3, optional
:param visibility: test if points are visible, default False
:type visibility: bool
:raises ValueError: [description]
:return: image plane points
:rtype: ndarray(2,n)
If ``pose`` is specified it is used for the camera pose instead of the
attribute ``pose``. The objects attribute is not updated.
The points ``P`` are by default with respect to the world frame, but
they can be transformed
If points are behind the camera, the image plane points are set to
NaN.
if ``visibility`` is True then check whether the projected point lies in
the bounds of the image plane. Return two values: the image plane
coordinates and an array of booleans indicating if the corresponding
point is visible.
If ``P`` is a Plucker object, then each value is projected into a
2D line in homogeneous form :math:`p[0] u + p[1] v + p[2] = 0`.
"""
P = base.getmatrix(P, (3,None))
if pose is None:
pose = self.pose
C = self.getC(pose)
if isinstance(P, np.ndarray):
# project 3D points
if objpose is not None:
P = objpose * P
x = C @ base.e2h(P)
x[2,x[2,:]<0] = np.nan # points behind the camera are set to NaN
x = base.h2e(x)
# if self._distortion is not None:
# x = self.distort(x)
# if self._noise is not None:
# # add Gaussian noise with specified standard deviation
# x += np.diag(self._noise) * np.random.randn(x.shape)
# do visibility check if required
if visibility:
visible = ~np.isnan(x[0,:]) \
& (x[0,:] >= 0) \
& (x[1,:] >= 0) \
& (x[0,:] < self.nu) \
& (x[1,:] < self.nv)
return x, visibility
else:
return x
elif isinstance(P, Plucker):
# project Plucker lines
x = np.empty(shape=(3, 0))
for p in P:
l = base.vex( C * p.skew * C.T)
x = np.c_[x, l / np.max(np.abs(l))] # normalize by largest element
return x
@property
def u0(self):
"""
Get principal point: horizontal coordinate
:return: horizontal component of principal point
:rtype: float
:seealso: :func:`v0`, :func:`pp`
"""
return self._u0
@property
def v0(self):
"""
Get principal point: vertical coordinate
:return: vertical component of principal point
:rtype: float
:seealso: :func:`u0`, :func:`pp`
"""
return self._v0
@property
def pp(self):
"""
Get principal point coordinate
:return: principal point
:rtype: 2-tuple
:seealso: :func:`u0`, :func:`v0`
"""
return (self._u0, self._v0)
@pp.setter
def pp(self, pp):
"""
Set principal point coordinate
:param pp: principal point
:type pp: array_like(2)
:seealso: :func:`u0`, :func:`v0`
"""
pp = base.getvector(pp)
if len(pp) == 1:
self._u0 = pp[0]
self._v0 = pp[0]
elif len(pp) == 2:
self._u0 = pp[0]
self._v0 = pp[1]
else:
raise ValueError(pp, 'pp must be a 1- or 2-element vector')
@property
def fu(self):
"""
Get focal length in horizontal direction
:return: focal length in horizontal direction
:rtype: 2-tuple
:seealso: :func:`fv`, :func:`f`
"""
return self._fu
@property
def fv(self):
"""
Get focal length in vertical direction
:return: focal length in horizontal direction
:rtype: 2-tuple
:seealso: :func:`fu`, :func:`f`
"""
return self._fv
@property
def f(self):
"""
Get focal length
:return: focal length
:rtype: 2-tuple
:seealso: :func:`fu`, :func:`fv`
"""
return (self._fu, self._fv)
@f.setter
def f(self, f):
"""[summary]
:param f: focal length
:type f: scalar or array_like(2)
:raises ValueError: incorrect length of ``f``
"""
f = base.getvector(f)
if len(f) == 1:
self._fu = f[0]
self._fv = f[0]
elif len(f) == 2:
self._fu = f[0]
self._fv = f[1]
else:
raise ValueError(f, 'f must be a 1- or 2-element vector')
@property
def K(self):
"""
Intrinsic matrix of camera
"""
K = np.array([[self._fu / self._rhou, 0, self._u0],
[ 0, self._fv / self._rhov, self._v0],
[ 0, 0, 1]], dtype=np.float)
return K
@property
def C(self):
"""
Camera matrix, camera calibration or projection matrix
"""
P0 = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0]], dtype=np.float)
return self.K @ P0 @ self.pose.inv().A
def getC(self, T=None):
"""
Get Camera matrix, camera calibration or projection matrix
"""
P0 = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0]], dtype=np.float)
if T is None:
C = self.K @ P0 @ self.pose.inv().A
else:
C = self.K @ P0 @ T.inv().A
return C
def H(self, T, N, d):
"""
Homography matrix
``H(T, N, d)`` is the (3, 3) homography matrix for the camera observing
the plane with normal ``N`` and at distance ``d`` from two viewpoints.
The first view is from the current camera pose (self.T), and the second
is after a relative motion represented by the homogeneous
transformation ``T``
"""
if d < 0:
raise ValueError(d, 'plane distance d must be > 0')
N = base.getvector(N)
if N[2] < 0:
raise ValueError(N, 'normal must be away from camera (N[2] >= 0)')
# T transform view 1 to view 2
T = SE3(T).inv()
HH = T.R + 1.0 / d * T.t @ N # need to ensure column then row = 3x3
# apply camera intrinsics
HH = self.K @ HH @ np.linalg.inv(self.K)
return HH / HH[2, 2] # normalised
def invH(self, H, K=None, ):
"""
Decompose homography matrix
``self.invH(H)`` decomposes the homography ``H`` (3,3) into the camerea
motion and the normal to the plane. In practice, there are multiple
solutions and the return ``S`` is a named tuple with elements
``S.T``, the camera motion as a homogeneous transform matrix (4,4), and
translation not to scale, and ``S.N`` the normal vector to the plawne
(3,3). # TODO why is the normal vector a 3x3?
"""
if K is None:
K = np.identity(3)
# also have K = self.K
H = np.linalg.inv(K) @ H @ K
# normalise so that the second singular value is one
U, S, V = np.linalg.svd(H, compute_uv=True)
H = H / S[1, 1]
# compute the SVD of the symmetric matrix H'*H = VSV'
U, S, V = np.linalg.svd(np.transpose(H) @ H)
# ensure V is right-handed
if np.linalg.det(V) < 0:
print('det(V) was < 0')
V = -V
# get squared singular values
s0 = S[0, 0]
s2 = S[2, 2]
# v0 = V[0:, 0]
# v1 = V[0:, 1]
# v2 = V[0:, 2]
# pure rotation - where all singular values == 1
if np.abs(s0 - s2) < (100 * np.spacing(1)):
print('Warning: Homography due to pure rotation')
if np.linalg.det(H) < 0:
H = -H
# sol = namedtuple('T', T, ''
| |
a * l1 + entries * l2,
# if we suppose that n, m, i, j > 0 we would have 0 <= a <= n + i and 0 <= entries <= m + j
# If we don't restrict these to be positive, then we only know a has to be between
# the smallest and largest combination of n and i (and similarly for entries)
latt_vect1_mult = np.arange(np.min([0, n, i, n+i]), np.max([0, n, i, n+i]))
if latt_vect1_mult.size == 0:
# in the case where periodicity_vect1 = [[0], [0]] we want this to be non-empty
latt_vect1_mult = np.array([0.])
latt_vect2_mult = np.arange(np.min([0, m, j, m+j]), np.max([0, m, j, m+j]))
if latt_vect2_mult.size == 0:
latt_vect2_mult = np.array([0.])
# expanded list of all possible sums of lattice vectors
xx, yy = np.meshgrid(latt_vect1_mult, latt_vect2_mult)
xrav = xx.ravel()
yrav = yy.ravel()
# size of basis
nbasis = len(self.basis_vects)
vects = np.zeros((2, xx.size * nbasis))
for ii in range(0, xx.size):
for jj in range(0, nbasis):
vects[:, ii * nbasis + jj][:, None] = xrav[ii] * self.lattice_vect1 + yrav[ii] * self.lattice_vect2 + self.basis_vects[jj]
# reduce to sites with periodicity unit self
xlocs_red, ylocs_red, _, _ = reduce_vectors(self.periodicity_vect1, self.periodicity_vect2,
vects[0, :], vects[1, :], mode='positive')
xlocs_red = np.round(xlocs_red, self._round_decimals)
ylocs_red = np.round(ylocs_red, self._round_decimals)
# eliminate duplicates
locs = np.unique(np.concatenate([xlocs_red[None, :], ylocs_red[None, :]], 0), axis=1)
xlocs = locs[0, :]
ylocs = locs[1, :]
nsites = len(xlocs)
return nsites, xlocs, ylocs
def get_reduced_distance(self, xlocs, ylocs):
"""
Returns the distance between two sites taking into account the periodicity of our lattice.
:param xlocs: a list of the x-coordinates of the lattice sites
:param ylocs: a list of the y-coordinates of the lattice sites
:return: xdist_min, ydist_min, latt_vect1_dist, latt_vect2_dist
xdist_min: is an nsites x nsites matrix where M[ii, jj] is the x-distance between sites i and j
ydist_min:
latt_vect1_dist: is an nsites x nsites matrix where M[ii, jj] is the number latt_vect1's separating sites i and j
latt_vect2_dist:
"""
nsites = len(xlocs)
xdist_min = np.zeros([nsites, nsites])
ydist_min = np.zeros([nsites, nsites])
latt_vect1_dist = np.zeros([nsites, nsites])
latt_vect2_dist = np.zeros([nsites, nsites])
for ii in range(0, nsites):
for jj in range(0, ii):
xdist_min[ii, jj], ydist_min[ii, jj], _, _ = \
reduce_vectors(self.periodicity_vect1, self.periodicity_vect2,
xlocs[ii] - xlocs[jj], ylocs[ii] - ylocs[jj], mode='centered')
_, _, latt_vect1_dist[ii, jj], latt_vect2_dist[ii, jj] = \
reduce_vectors(self.lattice_vect1, self.lattice_vect2,
xdist_min[ii, jj], ydist_min[ii, jj], mode='centered')
xdist_min[jj, ii] = - xdist_min[ii, jj]
ydist_min[jj, ii] = - ydist_min[ii, jj]
latt_vect1_dist[jj, ii] = - latt_vect1_dist[ii, jj]
latt_vect2_dist[jj, ii] = - -latt_vect2_dist[ii, jj]
return xdist_min, ydist_min, latt_vect1_dist, latt_vect2_dist
def get_phase_mat(self, xdist_matrix, ydist_matrix):
"""
Create a matrix of phase factors that should be included on e.g. hoppings or interaction terms between sites i
and j, based on the phases given by the class.
:param xdist_matrix: matrix of size nsites x nsites, where M[i,j] is the minimum distance between sites i and j
:param ydist_matrix:
:return: phase_mat
phase_mat:
"""
nsites = xdist_matrix.shape[0]
# create phase factors
phase_mat = np.zeros([nsites, nsites], dtype=np.complex)
for ii in range(0, nsites):
for jj in range(0, nsites):
# phase_mat[ii, jj] = site_phases1[ii] * site_phases1[jj].conj() * site_phases2[ii] * site_phases2[
# jj].conj()
amp1 = np.exp(1j * self.phase1 * (
xdist_matrix[ii, jj] * self.reciprocal_periodicity_vect1[0] +
ydist_matrix[ii, jj] * self.reciprocal_periodicity_vect1[1]))
amp2 = np.exp(1j * self.phase2 * (
xdist_matrix[ii, jj] * self.reciprocal_periodicity_vect2[0] +
ydist_matrix[ii, jj] * self.reciprocal_periodicity_vect2[1]))
phase_mat[ii, jj] = amp1 * amp2
if not np.any(phase_mat.imag > 10 ** -self._round_decimals):
phase_mat = phase_mat.real
return phase_mat
def reduce_to_unit_cell(self, xlocs, ylocs, mode='positive'):
return reduce_vectors(self.periodicity_vect1, self.periodicity_vect2, xlocs, ylocs, mode=mode)
# #################################
# Validation functions
# #################################
def validate_instance(self):
"""
Validate if the lattice class instance is correctly formed
:return:
"""
if not self.validate_latt_vects():
return 0
if not self.validate_periodicity_vects():
return 0
# check compatibility or periodicity vectors with lattice vectors
xred_p1, yred_p1, _, _ = reduce_vectors(self.lattice_vect1, self.lattice_vect2, self.periodicity_vect1[0, 0],
self.periodicity_vect1[1, 0], mode='positive')
if not np.array_equiv(xred_p1, 0) and np.array_equiv(yred_p1, 0):
return 0
xred_p2, yred_p2, _, _ = reduce_vectors(self.lattice_vect1, self.lattice_vect2, self.periodicity_vect2[0, 0],
self.periodicity_vect2[1, 0], mode='positive')
if not np.array_equiv(xred_p2, 0) and np.array_equiv(yred_p2, 0):
return 0
return 1
def validate_latt_vects(self):
# validate lattice vectors
norm1 = np.sqrt(self.lattice_vect1.transpose().dot(self.lattice_vect1))
norm2 = np.sqrt(self.lattice_vect2.transpose().dot(self.lattice_vect2))
det = np.linalg.det(np.concatenate((self.lattice_vect1, self.lattice_vect2), 1))
if np.round(norm1, self._round_decimals) == 0 or \
np.round(norm2, self._round_decimals) == 0 or \
np.round(det, self._round_decimals) == 0:
return 0
return 1
def validate_periodicity_vects(self):
# ensure periodicity vectors exist
if self.periodicity_vect1 is None or self.periodicity_vect2 is None:
return 0
# ensure periodicity vectors are not linearly dependent, if they are non-zero
# TODO: want to allow periodicity vectors to be zero in some cases ... maybe don't want this test
norm1 = np.sqrt(self.periodicity_vect1.transpose().dot(self.periodicity_vect1))
norm2 = np.sqrt(self.periodicity_vect2.transpose().dot(self.periodicity_vect2))
det = self.periodicity_vect1[0] * self.periodicity_vect2[1] - self.periodicity_vect1[1] * \
self.periodicity_vect2[0]
if not np.round(norm1, self._round_decimals) == 0 and not \
np.round(norm2, self._round_decimals) == 0 and \
np.round(det, self._round_decimals) == 0:
# i.e. if our periodicity vectors are linearly dependent but non-zero
return 0
return 1
# #################################
# Comparison functions
# #################################
def __eq__(self, other):
if np.array_equal(self.lattice_vect1, other.lattice_vect1) and \
np.array_equal(self.lattice_vect2, other.lattice_vect2) and \
np.array_equal(self.periodicity_vect1, other.periodicity_vect1) and \
np.array_equal(self.periodicity_vect2, other.periodicity_vect2) and \
self.phase1 == other.phase1 and \
self.phase2 == other.phase2:
return True
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
# ##########################
# module functions
# ##########################
def get_reciprocal_vects(vect1, vect2):
"""
Compute the reciprocal vectors. If we call the periodicity vecors a_i and the
reciprocal vectors b_j, then these should be defined such that dot(a_i, b_j) = delta_{ij}.
:return: reciprocal_vect1, reciprocal_vect2
"""
vect1 = ensure_column_vect(vect1)
vect2 = ensure_column_vect(vect2)
if not np.array_equal(vect1, np.zeros((2, 1))) and not np.array_equal(vect2, np.zeros((2, 1))):
A_mat = np.concatenate([vect1.transpose(), vect2.transpose()], 0)
try:
inv_a = np.linalg.inv(A_mat)
reciprocal_vect1 = inv_a[:, 0][:, None]
reciprocal_vect2 = inv_a[:, 1][:, None]
except np.linalg.LinAlgError:
raise Exception('vect1 and vect2 are linearly independent, so their reciprocal vectors could not be computed.')
# TODO: could catch singular matrix error and give more informative error
elif np.array_equal(vect1, np.zeros((2, 1))) and not np.array_equal(vect2, np.zeros((2, 1))):
reciprocal_vect1 = np.zeros((2, 1))
norm2 = np.sqrt(vect2.transpose().dot(vect2))
reciprocal_vect2 = vect2 / norm2 ** 2
elif not np.array_equal(vect1, np.zeros((2, 1))) and np.array_equal(vect2, np.zeros((2, 1))):
reciprocal_vect2 = np.zeros((2, 1))
norm1 = np.sqrt(vect1.transpose().dot(vect1))
reciprocal_vect1 = vect1 / norm1 ** 2
else:
reciprocal_vect1 = np.zeros((2, 1))
reciprocal_vect2 = np.zeros((2, 1))
return reciprocal_vect1, reciprocal_vect2
def reduce_vectors(vect1, vect2, xlocs, ylocs, mode='positive'):
"""
Given an arbitrary vector and a pair of basis vectors specifying a periodicity (TODO: sharpend this defn),
reduce the arbitrary vector to its representative in the Brillouin zone (analog). (TODO: add support for
either using a symmetric BZ or an always positive BZ).
:param vect1: size 2 x 1, i.e. a column vector
:param vect2: size 2 x 1, i.e. a column vector
:param xlocs:
:param ylocs:
:param mode: "positive" or "centered"
:return: xs_red, xs reduced to lie within the symmetry region
:return: ys_red, ys reduced to lie within the symmetry region
:return: n1s number of bvect1's subtracted from vects to get vects_reduced
:return: n2s number of bvect2's subtracted from vects to get vects_reduced
"""
# ensure input vectors have the desired format
periodicity_vect1 = ensure_column_vect(vect1)
periodicity_vect2 = ensure_column_vect(vect2)
# norms and dot products of these vectors
norm1 = np.sqrt(np.sum(periodicity_vect1 * periodicity_vect1))
norm2 = np.sqrt(np.sum(periodicity_vect2 * periodicity_vect2))
det = float(periodicity_vect1[0, 0] * periodicity_vect2[1, 0] - periodicity_vect1[1, 0] * periodicity_vect2[0, 0])
# shape xlocs as desired
xlocs = ensure_row_vect(xlocs)
ylocs = ensure_row_vect(ylocs)
# create a row vector of column vectors. Left multiplying this by a matrix M applies M to each of our column vectors
vects = np.concatenate([xlocs, ylocs], 0)
# we want to write each vector v = a*P^a + entries*P^entries
# if P1 and P2 are orthogonal, this is easy. We simply take the dot product of v and P1, P2
# if P1 and P2 are not orthogonal, then we change coordinates so that they are.
# Suppose M is a basis change matrix such that M*P^a = e1 and M*P^entries = e2
# Then Mv = a*e1 + entries*e2
# It is easy to see that M^(-1) is a matrix where the | |
see cytopy.transform.Scaler
scale_kwargs: dict, optional
Additional keyword arguments passed to Scaler
kwargs:
Keyword arguments for initialising Scikit-learn class
Returns
-------
Pandas.DataFrame and None and None
Updated dataframe with a new column named 'meta_label' with the meta-clustering
associations
Raises
------
AssertionError
Invalid Scikit-Learn or equivalent class provided in method
"""
vprint_ = vprint(verbose)
assert method in globals().keys(), \
"Not a recognised method from the Scikit-Learn cluster/mixture modules or HDBSCAN"
model = globals()[method](**kwargs)
vprint_(f"------ {method} meta-clustering ------")
vprint_("...summarising clusters")
metadata = summarise_clusters(data, features, scale_method, scale_kwargs, summary_method)
vprint_("...clustering the clusters")
metadata["meta_label"] = model.fit_predict(metadata[features].values)
if print_performance_metrics:
clustering_performance(metadata[features], metadata["meta_label"].values)
vprint_("...assigning meta-labels")
data = _assign_metalabels(data, metadata)
vprint_("------ Complete ------")
return data, None, None
def phenograph_metaclustering(data: pd.DataFrame,
features: list,
verbose: bool = True,
summary_method: str = "median",
scale_method: str or None = None,
scale_kwargs: dict or None = None,
print_performance_metrics: bool = True,
**kwargs):
"""
Meta-clustering with a the PhenoGraph algorithm. This function
will summarise the clusters in 'data' (where cluster IDs should be contained in a column
named 'cluster_label') and then 'cluster the clusters' using the PhenoGraph.
Parameters
----------
data: Pandas.DataFrame
Clustered data with columns for sample_id and cluster_label
features: list
Columns clustering is performed on
summary_method: str (default="median")
How to summarise the clusters for meta-clustering
print_performance_metrics: bool = True
Print Calinski-Harabasz Index, Silhouette Coefficient, and Davies-Bouldin Index
(see https://scikit-learn.org/stable/modules/clustering.html#clustering-performance-evaluation)
verbose: bool (default=True)
Whether to provide feedback to stdout
scale_method: str, optional
Perform scaling of centroids; see cytopy.transform.Scaler
scale_kwargs: dict, optional
Additional keyword arguments passed to Scaler
kwargs:
Keyword arguments passed to phenograph.cluster
Returns
-------
Pandas.DataFrame
Updated dataframe with a new column named 'meta_label' with the meta-clustering
associations
"""
vprint_ = vprint(verbose)
vprint_("----- Phenograph meta-clustering ------")
metadata = summarise_clusters(data, features, scale_method, scale_kwargs, summary_method)
vprint_("...summarising clusters")
vprint_("...clustering the clusters")
communities, graph, q = phenograph.cluster(metadata[features].values, **kwargs)
metadata["meta_label"] = communities
if print_performance_metrics:
clustering_performance(metadata[features], metadata["meta_label"].values)
vprint_("...assigning meta-labels")
data = _assign_metalabels(data, metadata)
vprint_("------ Complete ------")
return data, graph, q
def consensus_metacluster(data: pd.DataFrame,
features: list,
cluster_class: object,
verbose: bool = True,
summary_method: str = "median",
scale_method: str or None = None,
scale_kwargs: dict or None = None,
smallest_cluster_n: int = 5,
largest_cluster_n: int = 15,
n_resamples: int = 10,
resample_proportion: float = 0.5,
print_performance_metrics: bool = True,
**kwargs):
"""
Meta-clustering with the consensus clustering algorithm, as first described here:
https://link.springer.com/content/pdf/10.1023%2FA%3A1023949509487.pdf. This function
will summarise the clusters in 'data' (where cluster IDs should be contained in a column
named 'cluster_label') and then 'cluster the clusters'. The optimal number of clusters is
taken as a consensus amongst multiple rounds of clustering with random starts. The algorithm
used for clustering should be given with 'cluster_class' and should have the Scikit-Learn
signatures for clustering i.e. fit_predict method.
Parameters
----------
data: Pandas.DataFrame
Clustered data with columns for sample_id and cluster_label
features: list
Columns clustering is performed on
summary_method: str (default="median")
How to summarise the clusters for meta-clustering
cluster_class: object
Scikit-learn (or alike) object with the method 'fit_predict'.
verbose: bool (default=True)
Whether to provide feedback to stdout
smallest_cluster_n: int (default=5)
Minimum number of clusters to search for in consensus clustering
largest_cluster_n: int (default=15)
Maximum number of clusters to search for in consensus clustering
n_resamples: int (default=10)
Number of resampling rounds in consensus clustering
resample_proportion: float (default=0.5)
Proportion of data to sample (with replacement) in each round of sampling
in consensus clustering
print_performance_metrics: bool = True
Print Calinski-Harabasz Index, Silhouette Coefficient, and Davies-Bouldin Index
(see https://scikit-learn.org/stable/modules/clustering.html#clustering-performance-evaluation)
scale_method: str, optional
Perform scaling of centroids; see cytopy.transform.Scaler
scale_kwargs: dict, optional
Additional keyword arguments passed to Scaler
kwargs:
Additional keyword arguments to pass to ConsensusCluster
Returns
-------
Pandas.DataFrame
Updated dataframe with a new column named 'meta_label' with the meta-clustering
associations
Raises
------
AssertionError
If maximum number of meta clusters exceeds the maximum number of clusters identified in any
one sample
"""
vprint_ = vprint(verbose)
metadata = summarise_clusters(data, features, scale_method, scale_kwargs, summary_method)
assert (metadata.shape[0] * resample_proportion) > largest_cluster_n, \
f"Maximum number of meta clusters (largest_cluster_n) is currently set to {largest_cluster_n} but there are " \
f"only {metadata.shape[0] * resample_proportion} clusters to cluster in each sample. Either decrease " \
f"largest_cluster_n or increase resample_proportion."
vprint_("----- Consensus meta-clustering ------")
consensus_clust = ConsensusCluster(cluster=cluster_class,
smallest_cluster_n=smallest_cluster_n,
largest_cluster_n=largest_cluster_n,
n_resamples=n_resamples,
resample_proportion=resample_proportion,
**kwargs)
consensus_clust.fit(metadata[features].values)
metadata["meta_label"] = consensus_clust.predict_data(metadata[features])
if print_performance_metrics:
clustering_performance(metadata[features], metadata["meta_label"].values)
data = _assign_metalabels(data, metadata)
return data, None, None
def _flowsom_clustering(data: pd.DataFrame,
features: list,
verbose: bool,
meta_cluster_class: object,
init_kwargs: dict or None = None,
training_kwargs: dict or None = None,
meta_cluster_kwargs: dict or None = None):
"""
Wrapper of the FlowSOM method (see cytopy.flow.clustering.flowsom for local
implementation). Takes a dataframe to cluster and returns a trained FlowSOM
object, with meta-clustering of SOM nodes performed.
Parameters
----------
data: Pandas.DataFrame
Feature space
features: list
Columns to perform clustering on
verbose: bool
Whether to print output to stdout
meta_cluster_class: object
Scikit-learn (or alike) object with the method 'fit_predict'; used for
consensus clustering of SOM nodes
init_kwargs: dict, optional
Additional initialisation keyword parameters for FlowSOM (see cytopy.flow.clustering.flowsom.FlowSOM)
training_kwargs: dict, optional
Additional training keyword parameters for FlowSOM
(see cytopy.flow.clustering.flowsom.FlowSOM.train)
meta_cluster_kwargs: dict, optional
Additional meta_cluster keyword parameters for FlowSOM
(see cytopy.flow.clustering.flowsom.FlowSOM.meta_cluster)
Returns
-------
FlowSOM
"""
init_kwargs = init_kwargs or {}
training_kwargs = training_kwargs or {}
meta_cluster_kwargs = meta_cluster_kwargs or {}
cluster = FlowSOM(data=data,
features=features,
verbose=verbose,
**init_kwargs)
cluster.train(**training_kwargs)
cluster.meta_cluster(cluster_class=meta_cluster_class,
**meta_cluster_kwargs)
return cluster
def flowsom_clustering(data: pd.DataFrame,
features: list,
verbose: bool,
meta_cluster_class: callable,
global_clustering: bool = False,
init_kwargs: dict or None = None,
training_kwargs: dict or None = None,
meta_cluster_kwargs: dict or None = None,
print_performance_metrics: bool = True):
"""
Perform high-dimensional clustering of single cell data using the popular
FlowSOM algorithm (https://pubmed.ncbi.nlm.nih.gov/25573116/). For details
on the cytopy implementation of FlowSOM see cytopy.flow.clustering.flowsom.FlowSOM
Clustering is performed either on the entire dataframe (if global_clustering is True)
or on each biological sample, in which case a column should be provided called 'sample_id'
which this function will group on and perform clustering in turn. In both cases,
the clustering labels are assigned to a new column named 'cluster_label'.
Parameters
----------
data: Pandas.DataFrame
Clustered data with columns for sample_id and cluster_label
features: list
Columns clustering is performed on
verbose: bool
Whether to print output to stdout
meta_cluster_class: object
Scikit-learn (or alike) object with the method 'fit_predict'; used for
consensus clustering of SOM nodes
global_clustering: bool (default=False)
Whether to cluster the whole dataframe or group on 'sample_id' and cluster
groups
init_kwargs: dict, optional
Additional initialisation keyword parameters for FlowSOM (see cytopy.flow.clustering.flowsom.FlowSOM)
training_kwargs: dict, optional
Additional training keyword parameters for FlowSOM
(see cytopy.flow.clustering.flowsom.FlowSOM.train)
meta_cluster_kwargs: dict, optional
Additional meta_cluster keyword parameters for FlowSOM
(see cytopy.flow.clustering.flowsom.FlowSOM.meta_cluster)
print_performance_metrics: bool = True
Print Calinski-Harabasz Index, Silhouette Coefficient, and Davies-Bouldin Index
(see https://scikit-learn.org/stable/modules/clustering.html#clustering-performance-evaluation)
Returns
-------
Pandas.DataFrame and None and None
Modified dataframe with clustering IDs assigned to the column 'cluster_label'
"""
if global_clustering:
cluster = _flowsom_clustering(data=data,
features=features,
verbose=verbose,
meta_cluster_class=meta_cluster_class,
init_kwargs=init_kwargs,
training_kwargs=training_kwargs,
meta_cluster_kwargs=meta_cluster_kwargs)
data["cluster_label"] = cluster.predict()
if print_performance_metrics:
clustering_performance(data[features], data["cluster_label"].values)
return data, None, None
vprint_ = vprint(verbose)
for _id, df in data.groupby("sample_id"):
vprint_(f"----- Clustering {_id} -----")
cluster = _flowsom_clustering(data=df,
features=features,
verbose=verbose,
meta_cluster_class=meta_cluster_class,
init_kwargs=init_kwargs,
training_kwargs=training_kwargs,
meta_cluster_kwargs=meta_cluster_kwargs)
df["cluster_label"] = cluster.predict()
if print_performance_metrics:
clustering_performance(df[features], df["cluster_label"].values)
data.loc[df.index, ["cluster_label"]] = df.cluster_label
vprint_("\n")
return data, None, None
class Clustering:
"""
High-dimensional clustering offers the advantage of an unbiased approach
to classification of single cells whilst also exploiting all available variables
in your data (all your fluorochromes/isotypes). In cytopy, the clustering is
performed on a Population of a FileGroup. The resulting clusters are saved
as new Populations. We can compare the clustering results of many FileGroup's
by 'clustering the clusters', to do this we summarise their clusters and perform meta-clustering.
The Clustering class provides all the apparatus to perform high-dimensional clustering
using any of the following functions from the cytopy.flow.clustering.main module:
* sklearn_clustering - access any of the Scikit-Learn cluster/mixture classes for unsupervised learning;
currently also provides access to HDBSCAN
* phenograph_clustering - access to the PhenoGraph clustering algorithm
* flowsom_clustering - access to the FlowSOM clustering algorithm
In addition, meta-clustering (clustering or clusters) can be performed with any of the following from
the | |
from natsort import natsorted
import os
sntrim = sn[0].lower() + sn.split(' ')[1][0]
# rnaorderedkeys = natsorted(drna.keys(), alg=ns.IGNORECASE)
# print(rnaorderedkeys[:10])
output = []
for k in drna.keys():
try:
ddna[k]
except KeyError:
pass
else:
# if the read maps in the RNA and DNA sam files
for rnapos in drna[k]: # the read could map to multiple locations 'equally' well (according to MQ cutoff)
# print(rnapos)
for dnapos in ddna[k]: # the read could map to multiple locations 'equally' well (according to MQ)
# print(dnapos)
if PCRDupRemoval is False:
##### THIS SPLIT STATEMENT IS NOT UNIVERSAL! ##### Used to isolate chromosome number
# rnapos[0].split('_')[1]
# dnapos[0].split('_')[1]
output.append([sntrim + '_'.join(rnapos[0].split('_')[1:]), rnapos[1], str(int(rnapos[1]) + 1),
sntrim + '_'.join(dnapos[0].split('_')[1:]), dnapos[1], str(int(dnapos[1]) + 1)])
# , 'thickness=' + str(len(ddna[k])) # str(math.log(len(drna[k]) + len(ddna[k]), 10))
elif PCRDupRemoval is True:
##### THIS SPLIT STATEMENT IS NOT UNIVERSAL! ##### Used to isolate chromosome number
# k.split(':')[-1]] == the NNN PCR duplicate barcode (I attached to ends of read names with
# modified char_bridge_trackall.py script
output.append([sntrim + '_'.join(rnapos[0].split('_')[1:]), rnapos[1], str(int(rnapos[1]) + 1),
sntrim + '_'.join(dnapos[0].split('_')[1:]), dnapos[1], str(int(dnapos[1]) + 1),
k.split(':')[-1]])
# , 'thickness=' + str(len(ddna[k])) # str(math.log(len(drna[k]) + len(ddna[k]), 10))
sortoutput = natsorted(output, key=lambda y: (y[0], y[1]))
path, f = os.path.split(fDNA)
outpath = os.path.join(path, 'RNA.DNA.Contacts.%s.raw.txt' % sntrim)
# use output != [] to prevent writing only '\n' when no contacts present
if (count == 0) and (output != []):
with open(outpath, 'w') as OUT:
OUT.write('\n'.join([' '.join(l) for l in sortoutput]) + '\n')
elif (count > 0) and (output != []):
with open(outpath, 'a') as OUT:
OUT.write('\n'.join([' '.join(l) for l in sortoutput]) + '\n')
return outpath
def get_rightmost_reference_based_alignment_coordinate(CIGAR, leftmost_coordinate):
import re
cigar = re.findall(r'\d+[A-Z]', CIGAR)
if cigar == []: # if there was a match # sometimes CIGAR string == * # this skips unmapped reads
print(f'Provided CIGAR string: {CIGAR} does not match CIGAR pattern \\d+[A-Z]')
rightmost_position = 0 # assumes unmapped read
else: # then read should be mapped
rightmost_position = leftmost_coordinate - 1 # subtract 1 because leftmost base is 1-based
for i in cigar:
if i[-1] in ['M', 'N', 'D', 'X', '=']:
rightmost_position += int(i[:-1])
elif i[-1] in ['I', 'S', 'H', 'P']:
pass
else:
pass
return rightmost_position
def record_read_positions(bamfile, MQ, dreads={}):
#bamfile is full path to bam file
#MQ = int() = minimum mapping quality threshold
#dreads is class object utilizing
# if f'{flag:012b}'[-5] == '1': # if 1 then it is reverse # '000000010000' # 16 # then the read is mapped in reverse
import pysam
# print(f'{bamfile}')
with pysam.AlignmentFile(bamfile, 'rb') as FILE:
for line in FILE:
line = line.tostring() # changes the pysam formatting stuff to the actual format of the sam file
if line[0] == '@':
pass
elif int(line.strip().split('\t')[4]) >= MQ:
flag = int(line.strip().split('\t')[1]) # sam flag
# if () and (f'{flag:012b}'[-9] == '0')
if f'{flag:012b}'[-3] != '1': # if the read is not unmapped # '000000000100' == 4
if f'{flag:012b}'[-5] == '1': # if 1 then it is reverse # '000000010000' == 16 read mapped reverse
# key is read name, value is scaffold name and then 5' position of read mapped
# read name is reduced to final three fields which should be a unique identifier
# scaffold name is reduced to only scaffold number
# '_'.join(line.strip().split('\t')[0].split(':')[-3:])
fiveprimecoord = get_rightmost_reference_based_alignment_coordinate(
line.strip().split('\t')[5],
int(line.strip().split('\t')[3]))
dreads.setdefault(line.strip().split('\t')[0], []).append(
[line.strip().split('\t')[2], str(fiveprimecoord)])
# str(int(line.strip().split('\t')[3]) + len(line.strip().split('\t')[9]))
else:
# key is read name, value is scaffold name and then 5' position of read mapped
dreads.setdefault(line.strip().split('\t')[0], []).append(
[line.strip().split('\t')[2], line.strip().split('\t')[3]])
return dreads
def read_fasta_as_dict(f):
d = {} # fasta names are key, values are sequences as string
namelist = []
with open(f, 'r') as FILE:
for line in FILE:
if line[0] == '>':
if ' ' in line:
name = line.strip().split()[0][1:-len('_with_IES')]
namelist.append(name)
d[name] = []
else:
name = line.strip()[1:]
namelist.append(name)
d[name] = []
elif line.strip() != '': # else: # trying to prevent some crap happening on the last line
d[name].append(line.strip())
for name in namelist:
d[name] = ''.join(d[name]) # join list of partial sequences. Useful if interleaved fasta
return d, namelist
def run_samtools(samfile):
# samfile is full path to sam file
print('Starting samtools')
import os
import subprocess
path, sam = os.path.split(samfile)
bamfile = '.'.join(samfile.split('.')[:-1] + ['bam'])
sortbamfile = '.'.join(samfile.split('.')[:-1] + ['sort', 'bam'])
# sortsamfile = '.'.join(samfile.split('.')[:-1] + ['sort', 'sam'])
# flagstatfile = '.'.join(samfile.split('.')[:-1] + ['sort', 'sam', 'flagstat'])
with open(bamfile, 'w') as OUT:
# cmd = 'samtools view -h -b %s > %s' % (samfile, bamfile)
cmd = 'samtools view -h -b %s' % (samfile)
ps = subprocess.Popen(cmd.split(), stdout=OUT)
ps.wait()
# cmd = 'rm %s' % samfile
os.remove(samfile) # delete sam file
with open(sortbamfile, 'w') as OUT:
# cmd = 'samtools sort %s > %s' % (bamfile, sortbamfile)
cmd = 'samtools sort %s' % (bamfile)
ps = subprocess.Popen(cmd.split(), stdout=OUT)
ps.wait()
os.remove(bamfile) # delete bam file
cmd = 'samtools index %s' % sortbamfile
ps = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
ps.wait()
# with open(sortsamfile, 'w') as OUT:
# # cmd = 'samtools view -h %s > %s' % (sortbamfile, sortsamfile)
# cmd = 'samtools view -h %s' % (sortbamfile)
# ps = subprocess.Popen(cmd.split(), stdout=OUT)
# ps.wait()
# with open(flagstatfile, 'w') as OUT:
# # cmd = 'samtools flagstat %s > %s' % (sortsamfile, flagstatfile)
# cmd = 'samtools flagstat %s' % (sortsamfile)
# ps = subprocess.Popen(cmd.split(), stdout=OUT)
# ps.wait()
print('Finished with Samtools\n')
return sortbamfile # , sortsamfile
def run_hisat2(aligndatabase, fastqfile, alignargs=''):
print('Starting Hisat2: aligning\n%s' % fastqfile)
import os
import subprocess
path, f = os.path.split(fastqfile)
pathminusonedir, dir = os.path.split(path)
make_directory(os.path.join(pathminusonedir, 'hisat2'))
outsamfile = os.path.join(pathminusonedir, 'hisat2', '.'.join(f.split('.')[:-2] + ['sam'])) # assuming .fastq.gz
cmd = 'hisat2 -q %s -x %s -U %s -S %s' % (alignargs, aligndatabase, fastqfile, outsamfile)
subprocess.call(cmd.split())
print('Finished with Hisat2\n')
return outsamfile
def divide_number_of_lines_in_files_mult_100(f1, f2, expectedhuman=1.0):
# divide number of lines in f1 by f2 multiply by 100 (f1/f2)*100.00
# expected = expected percent of human contamination
c1, c2 = 0, 0
with open(f1, 'r') as FILE:
for line in FILE:
c1 += 1.0
with open(f2, 'r') as FILE:
for line in FILE:
c2 += 1.0
# percent of human RNA from all reads (human + species of interest)
# only includes RNA that pass char-bridge filters i.e. RNA min length 15 and DNA min length 15 etc.
percent = (c1 / (c2 / 2)) * 100.00 # divide c2 by 2 because counting line in rna fasta file
# this is percent of human free-floating RNA that should represent percent free-floating RNA in Species of Interest
expectedfreefloatingRNA = (percent / expectedhuman) * 100 # if 0.01 / 1.0 * 100 then = 1%
return percent, expectedfreefloatingRNA
def run_blastn_match_db(fastafile, database, outformat=6, percentidentitiythreshold=98.00, bestbitscoreonly=True):
# fasta file is full path to .fasta file
# database is full path to blastn database
# calculate % of human RNA spike-in by with BLASTn # this represents expected % free-floating RNA in sample
print('Start BLASTn on file:\n%s\nTo Database:\n%s\n' % (fastafile, database))
import subprocess
import os
path, f = os.path.split(fastafile)
pathminusonedir, dir = os.path.split(path)
outpath = os.path.join(pathminusonedir, 'blastn')
make_directory(outpath)
if bestbitscoreonly == True:
# takes the best BLAST result by bit score
outfile = 'best_bit_score_per_query.blastn.RNA.tsv'
fulloutpath = os.path.join(outpath, outfile)
# full cmd = 'blastn -query %s -db %s -outfmt %d | sort -k1,1 -k12,12nr -k11,11n | sort -u -k1,1 --merge > %s' % (fastafile, database, outformat, fulloutpath)
cmdpipe = ['blastn -query %s -db %s -outfmt %d' % (fastafile, database, outformat), 'sort -k1,1 -k12,12nr -k11,11n %s' % (fulloutpath + '.blastn'), 'sort -u -k1,1 --merge %s' % (fulloutpath + '.sort1')]
for count, cmd in enumerate(cmdpipe):
if count == 0:
with open(fulloutpath + '.blastn', 'w') as OUT:
print('Pipe step 1.1')
print(cmd)
ps = subprocess.Popen(cmd.split(), bufsize=-1, stdout=OUT)
print('Pipe step 1.2')
ps.wait()
print('Pipe step 1.3')
elif count != len(cmdpipe) - 1: # if it is not the last command
with open(fulloutpath + '.sort1', 'w') as OUT:
print('Pipe step 2.1')
ps = subprocess.Popen(cmd.split(), bufsize=-1, stdout=OUT)
print('Pipe step 2.2')
ps.wait()
print('Pipe step 2.3')
else: # it | |
# yellowbrick.datasaurus
# Plots a Datasaurus Quartet as an illustration of the importance of visualization.
#
# Author: <NAME>
# Created: Wed Jun 20 15:17:35 2018 -0400
#
# Copyright (C) 2018 District Data Labs
# For license information, see LICENSE.txt
#
# ID: datasaurus.py [] <EMAIL> $
"""
Plots a Datasaurus Quartet as an illustration of the importance of visualization.
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
import matplotlib.pyplot as plt
from yellowbrick.bestfit import draw_best_fit
from yellowbrick.style import get_color_cycle
##########################################################################
## DATASAURUS Data Arrays
##########################################################################
DATASAURUS = [
np.array([[55.3846, 51.5385, 46.1538, 42.8205, 40.7692, 38.7179, 35.641 ,
33.0769, 28.9744, 26.1538, 23.0769, 22.3077, 22.3077, 23.3333,
25.8974, 29.4872, 32.8205, 35.3846, 40.2564, 44.1026, 46.6667,
50. , 53.0769, 56.6667, 59.2308, 61.2821, 61.5385, 61.7949,
57.4359, 54.8718, 52.5641, 48.2051, 49.4872, 51.0256, 45.3846,
42.8205, 38.7179, 35.1282, 32.5641, 30. , 33.5897, 36.6667,
38.2051, 29.7436, 29.7436, 30. , 32.0513, 35.8974, 41.0256,
44.1026, 47.1795, 49.4872, 51.5385, 53.5897, 55.1282, 56.6667,
59.2308, 62.3077, 64.8718, 67.9487, 70.5128, 71.5385, 71.5385,
69.4872, 46.9231, 48.2051, 50. , 53.0769, 55.3846, 56.6667,
56.1538, 53.8462, 51.2821, 50. , 47.9487, 29.7436, 29.7436,
31.2821, 57.9487, 61.7949, 64.8718, 68.4615, 70.7692, 72.0513,
73.8462, 75.1282, 76.6667, 77.6923, 79.7436, 81.7949, 83.3333,
85.1282, 86.4103, 87.9487, 89.4872, 93.3333, 95.3846, 98.2051,
56.6667, 59.2308, 60.7692, 63.0769, 64.1026, 64.359 , 74.359 ,
71.2821, 67.9487, 65.8974, 63.0769, 61.2821, 58.7179, 55.1282,
52.3077, 49.7436, 47.4359, 44.8718, 48.7179, 51.2821, 54.1026,
56.1538, 52.0513, 48.7179, 47.1795, 46.1538, 50.5128, 53.8462,
57.4359, 60. , 64.1026, 66.9231, 71.2821, 74.359 , 78.2051,
67.9487, 68.4615, 68.2051, 37.6923, 39.4872, 91.2821, 50. ,
47.9487, 44.1026],
[97.1795, 96.0256, 94.4872, 91.4103, 88.3333, 84.8718, 79.8718,
77.5641, 74.4872, 71.4103, 66.4103, 61.7949, 57.1795, 52.9487,
51.0256, 51.0256, 51.0256, 51.4103, 51.4103, 52.9487, 54.1026,
55.2564, 55.641 , 56.0256, 57.9487, 62.1795, 66.4103, 69.1026,
55.2564, 49.8718, 46.0256, 38.3333, 42.1795, 44.1026, 36.4103,
32.5641, 31.4103, 30.2564, 32.1795, 36.7949, 41.4103, 45.641 ,
49.1026, 36.0256, 32.1795, 29.1026, 26.7949, 25.2564, 25.2564,
25.641 , 28.718 , 31.4103, 34.8718, 37.5641, 40.641 , 42.1795,
44.4872, 46.0256, 46.7949, 47.9487, 53.718 , 60.641 , 64.4872,
69.4872, 79.8718, 84.1026, 85.2564, 85.2564, 86.0256, 86.0256,
82.9487, 80.641 , 78.718 , 78.718 , 77.5641, 59.8718, 62.1795,
62.5641, 99.4872, 99.1026, 97.5641, 94.1026, 91.0256, 86.4103,
83.3333, 79.1026, 75.2564, 71.4103, 66.7949, 60.2564, 55.2564,
51.4103, 47.5641, 46.0256, 42.5641, 39.8718, 36.7949, 33.718 ,
40.641 , 38.3333, 33.718 , 29.1026, 25.2564, 24.1026, 22.9487,
22.9487, 22.1795, 20.2564, 19.1026, 19.1026, 18.3333, 18.3333,
18.3333, 17.5641, 16.0256, 13.718 , 14.8718, 14.8718, 14.8718,
14.1026, 12.5641, 11.0256, 9.8718, 6.0256, 9.4872, 10.2564,
10.2564, 10.641 , 10.641 , 10.641 , 10.641 , 10.641 , 10.641 ,
8.718 , 5.2564, 2.9487, 25.7692, 25.3846, 41.5385, 95.7692,
95. , 92.6923]]),
np.array([[51.20389114, 58.9744699 , 51.87207267, 48.17993079, 41.6832004 ,
37.8904155 , 39.54897369, 39.64957388, 34.75059705, 27.56083529,
24.63553998, 20.95946481, 20.68914905, 19.28820474, 20.02450057,
35.469523 , 36.89432765, 39.05554978, 46.95708015, 37.31045274,
40.009672 , 48.01438668, 53.70377593, 63.06749989, 62.04803251,
59.83996671, 55.16094182, 61.27978658, 60.83491753, 61.52059065,
36.91654386, 38.50219967, 48.66437073, 50.2852524 , 42.27633267,
54.03177562, 37.32935526, 41.38952255, 40.07466666, 35.34968062,
34.76370042, 37.02662945, 36.45556953, 35.53766421, 20.40894789,
23.49571047, 29.55754336, 33.00823391, 53.98039918, 52.2343086 ,
59.50307661, 41.16378107, 48.99304012, 59.26928032, 45.469177 ,
62.69126654, 73.42867087, 70.84642611, 71.53901985, 67.62086589,
72.47095256, 64.81223756, 60.85367987, 67.78949616, 41.60955727,
53.00302532, 54.71417106, 44.29166872, 49.19172196, 53.10138178,
51.59984815, 54.37972195, 46.4807681 , 53.17465627, 45.27200294,
36.03340215, 28.27119417, 25.05480608, 64.758887 , 63.14452748,
50.42467869, 70.64499626, 63.14904908, 62.82402452, 70.23686951,
70.04273524, 72.57062345, 75.13071604, 83.29390573, 79.66426228,
88.43210253, 89.11555901, 89.09219763, 91.72600577, 91.73553876,
91.50788817, 88.2390019 , 88.5305192 , 55.36516034, 62.56025887,
58.00666912, 55.06711799, 61.61477596, 68.54314354, 77.70610965,
68.453046 , 68.25720644, 70.25547467, 65.04432528, 60.09224661,
52.99202897, 50.14462898, 46.50861419, 43.80703196, 57.81785469,
50.94049266, 63.49732308, 50.01648295, 58.63676508, 54.73028909,
65.8755478 , 57.06098271, 46.81990795, 38.35939487, 47.31541578,
55.05191654, 50.51596026, 49.67741465, 67.28065952, 66.17301826,
61.08854414, 66.05308577, 72.66998927, 61.5034725 , 68.99502863,
78.24991617, 36.48198057, 50.96774838, 91.19105361, 55.86376849,
49.2805948 , 43.36850154],
[83.33977661, 85.49981761, 85.82973763, 85.04511674, 84.0179406 ,
82.567493 , 80.81260177, 82.66453387, 80.01109099, 72.84782559,
71.61071483, 66.04149838, 62.72130521, 62.06305936, 61.34262387,
43.11588495, 47.70655597, 55.54697371, 65.24040739, 45.2587509 ,
60.98658251, 65.71281959, 66.38948204, 64.03500046, 63.84586325,
64.47676444, 65.23730817, 65.7664025 , 64.60376971, 64.79185504,
41.09524744, 41.56715562, 30.68066685, 30.33792211, 34.52763612,
29.67234831, 39.60204231, 37.29605623, 34.6236852 , 47.14107313,
47.62479992, 44.46229305, 40.79184303, 48.72938687, 32.20303042,
25.32246815, 21.36477746, 15.98507146, 29.35098671, 29.71167299,
30.66967394, 34.31575825, 32.03035884, 29.64070177, 33.83119273,
30.29037383, 48.57785513, 52.28225333, 45.52180616, 38.00655847,
51.12213482, 62.81091559, 65.49914703, 61.36370155, 83.84868656,
84.6747986 , 84.04312807, 82.90944121, 85.87622912, 84.54765869,
84.81982149, 84.24035555, 83.51821167, 84.26056799, 85.23707942,
53.37168776, 72.84023126, 71.54859792, 82.31522364, 85.23669633,
85.17474759, 82.43091876, 83.94685535, 84.96618595, 82.17115106,
80.38502135, 80.97121843, 79.98409314, 70.77843179, 73.93230972,
64.624247 , 64.00150664, 57.76819305, 52.62335326, 48.97021089,
53.31265209, 31.47743488, 30.47603101, 30.44585028, 30.44713567,
30.2537213 , 29.0115352 , 29.99439119, 35.65783217, 20.30426019,
13.03552859, 12.38463915, 13.25038497, 11.00084148, 11.87211171,
9.90666848, 12.21154309, 11.20713449, 11.31894489, 10.94514243,
9.69154713, 11.91406917, 11.93385209, 11.97472107, 11.41288267,
11.73243636, 9.92056085, 10.49465268, 13.43132262, 12.85345178,
11.94998862, 9.76559162, 10.38313251, 14.12865153, 12.03791702,
10.08453441, 13.38022601, 15.23422594, 10.82841448, 13.99431053,
17.88324091, 15.16276009, 29.67977429, 46.67434284, 85.33648676,
84.04882283, 84.3321772 ]]),
np.array([[58.21360826, 58.19605369, 58.71823072, 57.27837287, 58.08202049,
57.48944777, 28.08874132, 28.08546821, 28.08727305, 27.57802522,
27.77991911, 28.58899981, 28.7391415 , 27.02460324, 28.8013367 ,
27.18646384, 29.2851466 , 39.4029453 , 28.81132844, 34.30395791,
29.60276098, 49.11615686, 39.61754583, 43.23308466, 64.89278794,
62.49014932, 68.98808443, 62.10561863, 32.46184674, 41.32720065,
44.00714993, 44.07406069, 44.00131524, 45.00630045, 44.44384061,
42.1787134 , 44.04456562, 41.64045402, 41.93833001, 44.05392751,
39.20671933, 28.70444923, 31.7086629 , 42.81171147, 43.30061489,
40.39863291, 40.43569158, 40.93654667, 39.66157367, 40.89925917,
41.96861683, 40.38340582, 56.53812645, 52.97069128, 54.62095259,
65.09904439, 63.05599091, 70.96013623, 69.89581924, 70.59589286,
69.64702143, 77.39298249, 64.40078719, 63.86895983, 56.59442132,
56.53133729, 59.65215837, 56.6365087 , 58.672288 , 58.22161273,
57.91466448, 55.31550906, 54.57572859, 54.41309365, 55.0745059 ,
29.43296052, 29.42268607, 29.00561416, 58.46183859, 57.99780474,
57.54947408, 59.52992846, 58.24939106, 58.02451401, 58.38212449,
62.56675904, 72.17582431, 79.47276157, 80.35770088, 78.75723614,
82.54023959, 86.43589719, 79.48868442, 81.53042032, 79.18678857,
77.89905795, 75.13071421, 76.05801375, 57.61467439, 56.17139753,
66.2878906 , 67.88171962, 64.0280813 , 77.49665175, 77.63465176,
77.86372643, 77.33815817, 76.18041653, 77.25265109, 77.41337528,
76.7318494 , 49.47110541, 42.47653994, 43.59511586, 50.33996967,
40.74898026, 38.38652558, 38.40401521, 38.76427889, 41.47014233,
47.15540481, 39.58256675, 41.74024382, 39.31187189, 41.67984769,
39.08746445, 41.48150286, 77.60608655, 75.98266152, 76.94575724,
77.54372007, 77.58473984, 76.82230426, 77.34857166, 77.57315269,
77.97261068, 41.52891976, 43.7225508 , 79.32607818, 56.66397408,
57.82178923, 58.2431719 ],
[91.88189151, 92.21498865, 90.31053209, 89.90760672, 92.00814501,
88.08528556, 63.51079443, 63.59019695, 63.12328281, 62.82103866,
63.51814752, 63.02408057, 62.72086389, 62.90185886, 63.38904039,
63.55872965, 63.38360583, 51.1508572 , 61.35785406, 56.54212591,
60.15734672, 63.66000062, 62.92518796, 63.16521872, 65.81417676,
74.58428961, 63.2321473 , 75.99087076, 62.88190292, 49.07025127,
46.44967378, 34.55320389, 33.90420735, 38.29901955, 36.0190833 ,
26.49211948, 35.66223828, 27.09309542, 24.99152298, 33.55639249,
51.5337157 , 61.7775254 , 58.83775437, 30.02044842, 31.5264262 ,
16.34700838, 20.23267068, 16.91300484, 15.60935558, 20.79852895,
26.4970726 , 21.39122552, 32.44424547, 29.04019669, 30.34452445,
27.24155756, 29.70909567, 41.25950129, 43.45375927, 41.96474387,
44.04444502, 63.37145906, 67.44871845, 70.21373883, 86.92700622,
87.49981107, 87.80946159, 85.63749556, 90.07716031, 90.41101877,
89.95380277, 80.25186069, 77.53628847, 78.22908659, 79.81754642,
60.80177654, 63.06846482, 63.39075133, 90.26532639, 92.15990861,
90.74890656, 88.32727415, 92.12968148, 91.69442117, 90.55347607,
77.74393476, 63.12892942, 63.40868612, 63.29543754, 53.33262001,
56.54105229, 59.79276181, 53.65167426, 56.02536457, 53.23479185,
51.82245833, 23.37244197, 16.38374969, 33.82244765, 32.11798877,
26.11710975, 24.23601841, 27.67268551, 14.94852356, 14.46185393,
14.61067765, 15.89005466, 15.91257375, 15.15151702, 15.22192798,
16.21684614, 25.06301931, 18.33847356, 19.99420098, 26.47139661,
16.18214166, 14.58021515, 14.45194845, 14.36559047, 17.27803344,
22.37793253, 17.64845284, 17.82932431, 15.64071697, 17.74591901,
15.12230394, 18.04743744, 15.16287254, 16.30692238, 15.85847833,
15.25394915, 15.83003939, 15.59516532, 15.77452924, 14.78064583,
14.95569875, 24.91642519, 19.0773278 , 52.90039129, 87.94012501,
90.69316655, 92.10432787]]),
np.array([[51.14791671, 50.51712581, 50.2074802 , 50.06948192, 50.56284634,
50.2885278 , 25.58347508, 25.48358339, 25.4435257 , 25.56511342,
25.92884427, 27.55147826, 27.53046637, 27.09557036, 27.43924961,
27.87826426, 27.33886892, 27.67840297, 52.63565768, 52.02521411,
52.88116479, 52.95260731, 52.52055249, 52.34282206, 51.92759021,
52.71377449, 50.44380279, 50.21669503, 52.18418011, 52.79209735,
52.58971986, 52.02884867, 52.72924658, 52.88431329, 52.50930089,
50.86268433, 50.89149225, 25.8551276 , 26.02564455, 27.89317272,
27.63996794, 27.8926589 , 52.79773294, 27.58063881, 26.49139853,
25.98531782, 26.20141928, 25.85756947, 50.70468436, 50.81197535,
50.56484556, 50.93930391, 50.45885484, 52.90136407, 52.68495344,
52.50008894, 51.83563726, 76.9954121 , 77.31060048, 77.92604434,
77.25438834, 76.2431578 , 77.08448437, 75.2280532 , 50.65835477,
50.20336581, 50.9295477 , 50.17867185, 50.42269806, 50.46422483,
50.44927033, 49.92838028, 50.48801364, 49.96490538, 50.75210826,
27.42242921, 27.6740834 , 27.53739532, 52.26334738, 51.73728166,
75.87096369, 75.24432621, 75.19829529, 75.70104153, 75.47933966,
75.19456687, 74.82025396, 75.16434049, 75.26335555, 77.75641893,
77.95443505, 77.08333777, 76.06355025, 77.68201632, 76.87808198,
76.94850272, 77.86405471, 75.77145009, 52.33156913, 52.59281837,
50.47704772, 75.29647509, 75.57395413, 75.40052716, 75.87099084,
75.60588476, 75.89557705, 75.7465632 , 75.14234148, 50.66177956,
50.69985064, 50.91894087, 50.72525854, 51.26387123, 51.25091965,
50.78515721, 50.50139658, 50.73367454, 50.71137854, 50.8127449 ,
51.01423295, 50.35352141, 50.43552957, 50.63098196, 51.0668072 ,
50.79235473, 50.55127806, 50.55975806, 75.32597855, 75.04472578,
75.28708772, 75.23996998, 75.1524592 , 75.96184009, 75.44806251,
75.75938382, 50.3782623 , 50.53363501, 77.50090732, 50.69112419,
49.99039495, 50.12718203],
[90.86741233, 89.10239459, 85.4600474 , 83.05766953, 82.93782178,
82.97525357, 82.91489113, 82.92908498, 82.8742005 , 82.92409777,
82.82118411, 51.48738653, 51.41484656, 52.07679944, 51.71207905,
50.70890793, 51.65304675, 51.18198917, 51.41855226, 52.12301105,
50.62155476, 50.07473901, 51.5024421 , 51.86195209, 52.25779061,
51.19794432, 82.94182882, 83.75234297, 51.97525067, 51.07339565,
51.3380902 , 52.1768375 , 51.20176505, 50.44143545, 51.41620515,
17.14563109, 17.14132373, 17.08190869, 16.92501353, 50.66196341,
51.39909748, 50.79528152, 50.68603709, 51.52476126, 17.40539097,
17.20372213, 17.09382391, 17.11384266, 17.02374454, 17.11492526,
17.07777732, 16.98102188, 17.03857897, 50.69056272, 51.29446922,
51.59435617, 52.33576553, 52.04552865, 51.74673004, 50.31866042,
51.46182482, 52.12368985, 51.9671367 , 82.98566202, 83.11447934,
82.98265686, 82.84604113, 83.18462233, 82.90990147, 82.93532841,
83.96992038, 82.99366549, 83.09951912, 83.7083177 , 82.9019501 ,
51.43887623, 51.30411215, 51.59365408, 94.24932783, 92.97911753,
88.38644174, 83.90349738, 83.46230334, 82.91945886, 82.88405139,
82.93211578, 82.96238879, 83.03499717, 82.9452793 , 51.15177033,
50.47557897, 52.15779927, 52.10465206, 51.16563781, 51.8675623 ,
51.90751654, 49.66254553, 17.11125121, 51.87886035, 51.39159152,
17.04828941, 17.01565319, 17.06219214, 17.04110689, 17.13489391,
17.06772306, 17.16994971, 17.10571651, 16.75492389, 17.07814052,
17.08518438, 17.14760476, | |
# -*- coding: utf-8 -*-
import itertools
import math
import six
def __get_target_length(size):
"""
Figures out the increased size of the string per
IBM Globalization Design Guideline A3: UI Expansion.
https://www-01.ibm.com/software/globalization/guidelines/a3.html
:param size: Current size of the string.
:returns: The desired increased size.
"""
target_lengths = {
six.moves.range(1, 11): 3,
six.moves.range(11, 21): 2,
six.moves.range(21, 31): 1.8,
six.moves.range(31, 51): 1.6,
six.moves.range(51, 71): 1.4,
}
target_length = 0
if size > 70:
target_length = int(math.ceil(size * 1.3))
else:
for r, v in target_lengths.items():
if size in r:
target_length = int(math.ceil(size * v))
return target_length
def transliterate_diacritic(s, fmt_spec):
"""
Transliterates an input string by replacing each latin letter with the same
letter with a diacritic added e.g. "Hello" -> "Ȟêĺĺø"
:param s: String to perform transliteration upon.
:param fmt_spec: Regex for placeholders.
:returns: Transliterated string.
"""
table = {
0x0041: 0x00C5, # LATIN CAPITAL LETTER A -> LATIN CAPITAL LETTER A WITH RING ABOVE
0x0042: 0x0181, # LATIN CAPITAL LETTER B -> LATIN CAPITAL LETTER B WITH HOOK
0x0043: 0x010A, # LATIN CAPITAL LETTER C -> LATIN CAPITAL LETTER C WITH DOT ABOVE
0x0044: 0x0110, # LATIN CAPITAL LETTER D -> LATIN CAPITAL LETTER D WITH STROKE
0x0045: 0x0204, # LATIN CAPITAL LETTER E -> LATIN CAPITAL LETTER E WITH DOUBLE GRAVE
0x0046: 0x1E1E, # LATIN CAPITAL LETTER F -> LATIN CAPITAL LETTER F WITH DOT ABOVE
0x0047: 0x0120, # LATIN CAPITAL LETTER G -> LATIN CAPITAL LETTER G WITH DOT ABOVE
0x0048: 0x021E, # LATIN CAPITAL LETTER H -> LATIN CAPITAL LETTER H WITH CARON
0x0049: 0x0130, # LATIN CAPITAL LETTER I -> LATIN CAPITAL LETTER I WITH DOT ABOVE
0x004A: 0x0134, # LATIN CAPITAL LETTER J -> LATIN CAPITAL LETTER J WITH CIRCUMFLEX
0x004B: 0x01E8, # LATIN CAPITAL LETTER K -> LATIN CAPITAL LETTER K WITH CARON
0x004C: 0x0139, # LATIN CAPITAL LETTER L -> LATIN CAPITAL LETTER L WITH ACUTE
0x004D: 0x1E40, # LATIN CAPITAL LETTER M -> LATIN CAPITAL LETTER M WITH DOT ABOVE
0x004E: 0x00D1, # LATIN CAPITAL LETTER N -> LATIN CAPITAL LETTER N WITH TILDE
0x004F: 0x00D2, # LATIN CAPITAL LETTER O -> LATIN CAPITAL LETTER O WITH GRAVE
0x0050: 0x01A4, # LATIN CAPITAL LETTER P -> LATIN CAPITAL LETTER P WITH HOOK
0x0051: 0xA756, # LATIN CAPITAL LETTER Q -> LATIN CAPITAL LETTER Q WITH STROKE THROUGH DESCENDER
0x0052: 0x0212, # LATIN CAPITAL LETTER R -> LATIN CAPITAL LETTER R WITH INVERTED BREVE
0x0053: 0x0218, # LATIN CAPITAL LETTER S -> LATIN CAPITAL LETTER S WITH COMMA BELOW
0x0054: 0x0164, # LATIN CAPITAL LETTER T -> LATIN CAPITAL LETTER T WITH CARON
0x0055: 0x00DC, # LATIN CAPITAL LETTER U -> LATIN CAPITAL LETTER U WITH DIAERESIS
0x0056: 0x1E7C, # LATIN CAPITAL LETTER V -> LATIN CAPITAL LETTER V WITH TILDE
0x0057: 0x1E82, # LATIN CAPITAL LETTER W -> LATIN CAPITAL LETTER W WITH ACUTE
0x0058: 0x1E8C, # LATIN CAPITAL LETTER X -> LATIN CAPITAL LETTER X WITH DIAERESIS
0x0059: 0x1E8E, # LATIN CAPITAL LETTER Y -> LATIN CAPITAL LETTER Y WITH DOT ABOVE
0x005A: 0x017D, # LATIN CAPITAL LETTER Z -> LATIN CAPITAL LETTER Z WITH CARON
0x0061: 0x00E0, # LATIN SMALL LETTER A -> LATIN SMALL LETTER A WITH GRAVE
0x0062: 0x0180, # LATIN SMALL LETTER B -> LATIN SMALL LETTER B WITH STROKE
0x0063: 0x010B, # LATIN SMALL LETTER C -> LATIN SMALL LETTER C WITH DOT ABOVE
0x0064: 0x0111, # LATIN SMALL LETTER D -> LATIN SMALL LETTER D WITH STROKE
0x0065: 0x00EA, # LATIN SMALL LETTER E -> LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0066: 0x0192, # LATIN SMALL LETTER F -> LATIN SMALL LETTER F WITH HOOK
0x0067: 0x011F, # LATIN SMALL LETTER G -> LATIN SMALL LETTER G WITH BREVE
0x0068: 0x021F, # LATIN SMALL LETTER H -> LATIN SMALL LETTER H WITH CARON
0x0069: 0x0131, # LATIN SMALL LETTER I -> LATIN SMALL LETTER DOTLESS I
0x006A: 0x01F0, # LATIN SMALL LETTER J -> LATIN SMALL LETTER J WITH CARON
0x006B: 0x01E9, # LATIN SMALL LETTER K -> LATIN SMALL LETTER K WITH CARON
0x006C: 0x013A, # LATIN SMALL LETTER L -> LATIN SMALL LETTER L WITH ACUTE
0x006D: 0x0271, # LATIN SMALL LETTER M -> LATIN SMALL LETTER M WITH HOOK
0x006E: 0x00F1, # LATIN SMALL LETTER N -> LATIN SMALL LETTER N WITH TILDE
0x006F: 0x00F8, # LATIN SMALL LETTER O -> LATIN SMALL LETTER O WITH STROKE
0x0070: 0x01A5, # LATIN SMALL LETTER P -> LATIN SMALL LETTER P WITH HOOK
0x0071: 0x02A0, # LATIN SMALL LETTER Q -> LATIN SMALL LETTER Q WITH HOOK
0x0072: 0x0213, # LATIN SMALL LETTER R -> LATIN SMALL LETTER R WITH INVERTED BREVE
0x0073: 0x0161, # LATIN SMALL LETTER S -> LATIN SMALL LETTER S WITH CARON
0x0074: 0x0165, # LATIN SMALL LETTER T -> LATIN SMALL LETTER T WITH CARON
0x0075: 0x00FC, # LATIN SMALL LETTER U -> LATIN SMALL LETTER U WITH DIAERESIS
0x0076: 0x1E7D, # LATIN SMALL LETTER V -> LATIN SMALL LETTER V WITH TILDE
0x0077: 0x1E81, # LATIN SMALL LETTER W -> LATIN SMALL LETTER W WITH GRAVE
0x0078: 0x1E8B, # LATIN SMALL LETTER X -> LATIN SMALL LETTER X WITH DOT ABOVE
0x0079: 0x00FF, # LATIN SMALL LETTER Y -> LATIN SMALL LETTER Y WITH DIAERESIS
0x007A: 0x017A, # LATIN SMALL LETTER Z -> LATIN SMALL LETTER Z WITH ACUTE
}
return s.translate(table)
def transliterate_circled(s, fmt_spec):
"""
Transliterates an input string by replacing each latin letter or digit with
the circled version of the same letter or digit e.g. "Hello" -> "🅗ⓔⓛⓛⓞ"
:param s: String to perform transliteration upon.
:param fmt_spec: Regex for placeholders.
:returns: Transliterated string.
"""
table = {
0x0030: 0x24EA, # DIGIT ZERO -> CIRCLED DIGIT ZERO
0x0031: 0x2460, # DIGIT ONE -> CIRCLED DIGIT ONE
0x0032: 0x2461, # DIGIT TWO -> CIRCLED DIGIT TWO
0x0033: 0x2462, # DIGIT THREE -> CIRCLED DIGIT THREE
0x0034: 0x2463, # DIGIT FOUR -> CIRCLED DIGIT FOUR
0x0035: 0x2464, # DIGIT FIVE -> CIRCLED DIGIT FIVE
0x0036: 0x2465, # DIGIT SIX -> CIRCLED DIGIT SIX
0x0037: 0x2466, # DIGIT SEVEN -> CIRCLED DIGIT SEVEN
0x0038: 0x2467, # DIGIT EIGHT -> CIRCLED DIGIT EIGHT
0x0039: 0x2468, # DIGIT NINE -> CIRCLED DIGIT NINE
0x0041: 0x24B6, # LATIN CAPITAL LETTER A -> CIRCLED LATIN CAPITAL LETTER A
0x0042: 0x24B7, # LATIN CAPITAL LETTER B -> CIRCLED LATIN CAPITAL LETTER B
0x0043: 0x24B8, # LATIN CAPITAL LETTER C -> CIRCLED LATIN CAPITAL LETTER C
0x0044: 0x24B9, # LATIN CAPITAL LETTER D -> CIRCLED LATIN CAPITAL LETTER D
0x0045: 0x24BA, # LATIN CAPITAL LETTER E -> CIRCLED LATIN CAPITAL LETTER E
0x0046: 0x24BB, # LATIN CAPITAL LETTER F -> CIRCLED LATIN CAPITAL LETTER F
0x0047: 0x24BC, # LATIN CAPITAL LETTER G -> CIRCLED LATIN CAPITAL LETTER G
0x0048: 0x24BD, # LATIN CAPITAL LETTER H -> CIRCLED LATIN CAPITAL LETTER H
0x0049: 0x24BE, # LATIN CAPITAL LETTER I -> CIRCLED LATIN CAPITAL LETTER I
0x004A: 0x24BF, # LATIN CAPITAL LETTER J -> CIRCLED LATIN CAPITAL LETTER J
0x004B: 0x24C0, # LATIN CAPITAL LETTER K -> CIRCLED LATIN CAPITAL LETTER K
0x004C: 0x24C1, # LATIN CAPITAL LETTER L -> CIRCLED LATIN CAPITAL LETTER L
0x004D: 0x24C2, # LATIN CAPITAL LETTER M -> CIRCLED LATIN CAPITAL LETTER M
0x004E: 0x24C3, # LATIN CAPITAL LETTER N -> CIRCLED LATIN CAPITAL LETTER N
0x004F: 0x24C4, # LATIN CAPITAL LETTER O -> CIRCLED LATIN CAPITAL LETTER O
0x0050: 0x24C5, # LATIN CAPITAL LETTER P -> CIRCLED LATIN CAPITAL LETTER P
0x0051: 0x24C6, # LATIN CAPITAL LETTER Q -> CIRCLED LATIN CAPITAL LETTER Q
0x0052: 0x24C7, # LATIN CAPITAL LETTER R -> CIRCLED LATIN CAPITAL LETTER R
0x0053: 0x24C8, # LATIN CAPITAL LETTER S -> CIRCLED LATIN CAPITAL LETTER S
0x0054: 0x24C9, # LATIN CAPITAL LETTER T -> CIRCLED LATIN CAPITAL LETTER T
0x0055: 0x24CA, # LATIN CAPITAL LETTER U | |
import math
import rvo.math as rvo_math
from .line import Line
from .vector import Vector2
class Agent:
"""
Defines an agent in the simulation.
"""
def __init__(self, simulator):
self.simulator_ = simulator
self.agent_neighbors_ = [] # (float, Agent)
self.obstacle_neighbors_ = [] # (float, Obstacle)
self.orca_lines_ = [] # Line
self.position_ = Vector2()
self.pref_velocity_ = Vector2()
self.velocity_ = Vector2()
self.id_ = 0
self.max_neighbors_ = 0
self.max_speed_ = 0.0
self.neighbor_dist_ = 0.0
self.radius_ = 0.0
self.time_horizon_ = 0.0
self.time_horizon_obst_ = 0.0
self.new_velocity_ = Vector2()
def compute_neighbors(self):
"""
Computes the neighbors of this agent.
"""
rangeSq = rvo_math.square(self.time_horizon_obst_ * self.max_speed_ + self.radius_)
self.obstacle_neighbors_ = []
self.simulator_.kd_tree_.compute_obstacle_neighbors(self, rangeSq)
self.agent_neighbors_ = []
if self.max_neighbors_ > 0:
rangeSq = rvo_math.square(self.neighbor_dist_)
self.simulator_.kd_tree_.compute_agent_neighbors(self, rangeSq)
def compute_new_velocity(self):
"""
Computes the new velocity of this agent.
"""
self.orca_lines_ = []
invTimeHorizonObst = 1.0 / self.time_horizon_obst_
# Create obstacle ORCA lines.
for i in range(len(self.obstacle_neighbors_)):
obstacle1 = self.obstacle_neighbors_[i][1]
obstacle2 = obstacle1.next_
relativePosition1 = obstacle1.point_ - self.position_
relativePosition2 = obstacle2.point_ - self.position_
# Check if velocity obstacle of obstacle is already taken care of by previously constructed obstacle ORCA lines.
alreadyCovered = False
for j in range(len(self.orca_lines_)):
det1 = rvo_math.det(invTimeHorizonObst * relativePosition1 - self.orca_lines_[j].point, self.orca_lines_[j].direction)
det2 = rvo_math.det(invTimeHorizonObst * relativePosition2 - self.orca_lines_[j].point, self.orca_lines_[j].direction)
if (det1 - invTimeHorizonObst * self.radius_ >= -rvo_math.EPSILON) and (det2 - invTimeHorizonObst * self.radius_ >= -rvo_math.EPSILON):
alreadyCovered = True
break
if alreadyCovered:
continue
# Not yet covered. Check for collisions.
distSq1 = rvo_math.abs_sq(relativePosition1)
distSq2 = rvo_math.abs_sq(relativePosition2)
radiusSq = rvo_math.square(self.radius_)
obstacleVector = obstacle2.point_ - obstacle1.point_
s = (-relativePosition1 @ obstacleVector) / rvo_math.abs_sq(obstacleVector)
distSqLine = rvo_math.abs_sq(-relativePosition1 - s * obstacleVector)
line = Line()
if s < 0.0 and distSq1 <= radiusSq:
# Collision with left vertex. Ignore if non-convex.
if obstacle1.convex_:
line.point = Vector2(0.0, 0.0)
line.direction = rvo_math.normalize(Vector2(-relativePosition1.y, relativePosition1.x))
self.orca_lines_.append(line)
continue
elif s > 1.0 and distSq2 <= radiusSq:
# Collision with right vertex. Ignore if non-convex or if it will be taken care of by neighboring obstacle.
if obstacle2.convex_ and rvo_math.det(relativePosition2, obstacle2.direction_) >= 0.0:
line.point = Vector2(0.0, 0.0)
line.direction = rvo_math.normalize(Vector2(-relativePosition2.y, relativePosition2.x))
self.orca_lines_.append(line)
continue
elif s >= 0.0 and s < 1.0 and distSqLine <= radiusSq:
# Collision with obstacle segment.
line.point = Vector2(0.0, 0.0)
line.direction = -obstacle1.direction_
self.orca_lines_.append(line)
continue
# No collision. Compute legs. When obliquely viewed, both legs can come from a single vertex. Legs extend cut-off line when non-convex vertex.
leftLegDirection = None
rightLegDirection = None
if s < 0.0 and distSqLine <= radiusSq:
# Obstacle viewed obliquely so that left vertex defines velocity obstacle.
if not obstacle1.convex_:
# Ignore obstacle.
continue
obstacle2 = obstacle1
leg1 = math.sqrt(distSq1 - radiusSq)
leftLegDirection = Vector2(relativePosition1.x * leg1 - relativePosition1.y * self.radius_, relativePosition1.x * self.radius_ + relativePosition1.y * leg1) / distSq1
rightLegDirection = Vector2(relativePosition1.x * leg1 + relativePosition1.y * self.radius_, -relativePosition1.x * self.radius_ + relativePosition1.y * leg1) / distSq1
elif s > 1.0 and distSqLine <= radiusSq:
# Obstacle viewed obliquely so that right vertex defines velocity obstacle.
if not obstacle2.convex_:
# Ignore obstacle.
continue
obstacle1 = obstacle2
leg2 = math.sqrt(distSq2 - radiusSq)
leftLegDirection = Vector2(relativePosition2.x * leg2 - relativePosition2.y * self.radius_, relativePosition2.x * self.radius_ + relativePosition2.y * leg2) / distSq2
rightLegDirection = Vector2(relativePosition2.x * leg2 + relativePosition2.y * self.radius_, -relativePosition2.x * self.radius_ + relativePosition2.y * leg2) / distSq2
else:
# Usual situation.
if obstacle1.convex_:
leg1 = math.sqrt(distSq1 - radiusSq)
leftLegDirection = Vector2(relativePosition1.x * leg1 - relativePosition1.y * self.radius_, relativePosition1.x * self.radius_ + relativePosition1.y * leg1) / distSq1
else:
# Left vertex non-convex left leg extends cut-off line.
leftLegDirection = -obstacle1.direction_
if obstacle2.convex_:
leg2 = math.sqrt(distSq2 - radiusSq)
rightLegDirection = Vector2(relativePosition2.x * leg2 + relativePosition2.y * self.radius_, -relativePosition2.x * self.radius_ + relativePosition2.y * leg2) / distSq2
else:
# Right vertex non-convex right leg extends cut-off line.
rightLegDirection = obstacle1.direction_
# Legs can never point into neighboring edge when convex vertex, take cutoff-line of neighboring edge instead. If velocity projected on "foreign" leg, no constraint is added.
leftNeighbor = obstacle1.previous_
isLeftLegForeign = False
isRightLegForeign = False
if obstacle1.convex_ and rvo_math.det(leftLegDirection, -leftNeighbor.direction_) >= 0.0:
# Left leg points into obstacle.
leftLegDirection = -leftNeighbor.direction_
isLeftLegForeign = True
if obstacle2.convex_ and rvo_math.det(rightLegDirection, obstacle2.direction_) <= 0.0:
# Right leg points into obstacle.
rightLegDirection = obstacle2.direction_
isRightLegForeign = True
# Compute cut-off centers.
leftCutOff = invTimeHorizonObst * (obstacle1.point_ - self.position_)
rightCutOff = invTimeHorizonObst * (obstacle2.point_ - self.position_)
cutOffVector = rightCutOff - leftCutOff
# Project current velocity on velocity obstacle.
# Check if current velocity is projected on cutoff circles.
t = 0.5 if obstacle1 == obstacle2 else ((self.velocity_ - leftCutOff) @ cutOffVector) / rvo_math.abs_sq(cutOffVector)
tLeft = (self.velocity_ - leftCutOff) @ leftLegDirection
tRight = (self.velocity_ - rightCutOff) @ rightLegDirection
if (t < 0.0 and tLeft < 0.0) or (obstacle1 == obstacle2 and tLeft < 0.0 and tRight < 0.0):
# Project on left cut-off circle.
unitW = rvo_math.normalize(self.velocity_ - leftCutOff)
line.direction = Vector2(unitW.y, -unitW.x)
line.point = leftCutOff + self.radius_ * invTimeHorizonObst * unitW
self.orca_lines_.append(line)
continue
elif t > 1.0 and tRight < 0.0:
# Project on right cut-off circle.
unitW = rvo_math.normalize(self.velocity_ - rightCutOff)
line.direction = Vector2(unitW.y, -unitW.x)
line.point = rightCutOff + self.radius_ * invTimeHorizonObst * unitW
self.orca_lines_.append(line)
continue
# Project on left leg, right leg, or cut-off line, whichever is closest to velocity.
distSqCutoff = math.inf if t < 0.0 or t > 1.0 or obstacle1 == obstacle2 else rvo_math.abs_sq(self.velocity_ - (leftCutOff + t * cutOffVector))
distSqLeft = math.inf if tLeft < 0.0 else rvo_math.abs_sq(self.velocity_ - (leftCutOff + tLeft * leftLegDirection))
distSqRight = math.inf if tRight < 0.0 else rvo_math.abs_sq(self.velocity_ - (rightCutOff + tRight * rightLegDirection))
if distSqCutoff <= distSqLeft and distSqCutoff <= distSqRight:
# Project on cut-off line.
line.direction = -obstacle1.direction_
line.point = leftCutOff + self.radius_ * invTimeHorizonObst * Vector2(-line.direction.y, line.direction.x)
self.orca_lines_.append(line)
continue
if distSqLeft <= distSqRight:
# Project on left leg.
if isLeftLegForeign:
continue
line.direction = leftLegDirection
line.point = leftCutOff + self.radius_ * invTimeHorizonObst * Vector2(-line.direction.y, line.direction.x)
self.orca_lines_.append(line)
continue
# Project on right leg.
if isRightLegForeign:
continue
line.direction = -rightLegDirection
line.point = rightCutOff + self.radius_ * invTimeHorizonObst * Vector2(-line.direction.y, line.direction.x)
self.orca_lines_.append(line)
numObstLines = len(self.orca_lines_)
invTimeHorizon = 1.0 / self.time_horizon_
# Create agent ORCA lines.
for i in range(len(self.agent_neighbors_)):
other = self.agent_neighbors_[i][1]
relativePosition = other.position_ - self.position_
relativeVelocity = self.velocity_ - other.velocity_
distSq = rvo_math.abs_sq(relativePosition)
combinedRadius = self.radius_ + other.radius_
combinedRadiusSq = rvo_math.square(combinedRadius)
line = Line()
u = Vector2()
if distSq > combinedRadiusSq:
# No collision.
w = relativeVelocity - invTimeHorizon * relativePosition
# Vector from cutoff center to relative velocity.
wLengthSq = rvo_math.abs_sq(w)
dotProduct1 = w @ relativePosition
if dotProduct1 < 0.0 and rvo_math.square(dotProduct1) > combinedRadiusSq * wLengthSq:
# Project on cut-off circle.
wLength = math.sqrt(wLengthSq)
unitW = w / wLength
line.direction = Vector2(unitW.y, -unitW.x)
u = (combinedRadius * invTimeHorizon - wLength) * unitW
else:
# Project on legs.
leg = math.sqrt(distSq - combinedRadiusSq)
if rvo_math.det(relativePosition, w) > 0.0:
# Project on left leg.
line.direction = Vector2(relativePosition.x * leg - relativePosition.y * combinedRadius, relativePosition.x * combinedRadius + relativePosition.y * leg) / distSq
else:
# Project on right leg.
line.direction = -Vector2(relativePosition.x * leg + relativePosition.y * combinedRadius, -relativePosition.x * combinedRadius + relativePosition.y * leg) / distSq
dotProduct2 = relativeVelocity @ line.direction
u = dotProduct2 * line.direction - relativeVelocity
else:
# Collision. Project on cut-off circle of time timeStep.
invTimeStep = 1.0 / self.simulator_.time_step_
# Vector from cutoff center to relative velocity.
w = relativeVelocity - invTimeStep * relativePosition
wLength = abs(w)
unitW = w / wLength
line.direction = Vector2(unitW.y, -unitW.x)
u = (combinedRadius * invTimeStep - wLength) * unitW
line.point = self.velocity_ + 0.5 * u
self.orca_lines_.append(line)
lineFail, self.new_velocity_ = self.linear_program2(self.orca_lines_, self.max_speed_, self.pref_velocity_, False, self.new_velocity_)
if lineFail < len(self.orca_lines_):
self.new_velocity_ = self.linear_program3(self.orca_lines_, numObstLines, lineFail, self.max_speed_, self.new_velocity_)
def insert_agent_neighbor(self, agent, rangeSq):
"""
Inserts an agent neighbor into the set of neighbors of this agent.
Args:
agent (Agent): A pointer to the agent to be inserted.
rangeSq (float): The squared range around this agent.
"""
if self != agent:
distSq = | |
from typing import Dict, List
import onnx
import torch
from onnx import helper
from ppq.core import (PPQ_CONFIG, ChannelwiseTensorQuantizationConfig,
DataType, OperationMeta, QuantizationProperty,
QuantizationStates, TensorMeta, TensorQuantizationConfig,
convert_any_to_torch_tensor)
from ppq.IR import BaseGraph, Operation, QuantableOperation, QuantableVariable
from ppq.IR.base.command import GraphCommand, GraphCommandType
from ppq.IR.morph import GraphDeviceSwitcher, GraphFormatter
from ppq.quantization.qfunction.linear import PPQLinearQuant_toInt
from ppq.utils.round import ppq_tensor_round
from .onnx_exporter import OnnxExporter
class ONNXRUNTIMExporter(OnnxExporter):
"""ONNXRUNTIME int8 QDQ format exporter, no further actions should be
applied to the graph because we will modify the graph in-place, and the
modified graph can't be executed. We remove Clip and Relu ops(fuse into
computing op) here when asym quantization for activation is applied, and
following the official implementation, when an variable has multiple
outputs, we assume the same quantization scales and offset. For parameters,
we pre-quantize the value and only insert DequantizeLinear op, both per-
layer/per-channel and asym/sym quantizations are supported for export, the
exported onnx model is tested to align with PPQ monitor when
CUDAExecutionProvider is applied in onnxruntime-gpu >= 1.8.1, i.e., to run
the model correctly if you have gpu and onnxruntime-gpu version installed.
X W b X quant(W) quant(b)
\ | / \ | /
\ | / quant dequant dequant
Conv -> \ | /
| dequant | /
| \ | /
Conv
|
quant
|
dequant
|
```
import onnxruntime as ort
sess_options = ort.SessionOptions()
sess = ort.InferenceSession(file_path, sess_options, providers=['CUDAExecutionProvider'])
res = sess.run(None, {sess.get_inputs()[0].name : dummy_input.cpu().numpy()})
```
"""
def __init__(self, removed_activation_types: List[str] = ['Relu', 'Clip']) -> None:
super().__init__()
self.removed_activation_types = removed_activation_types
def infer_qtype(self, config: TensorQuantizationConfig):
offset_dtype, value_dtype = torch.int8, torch.int8
if config.policy.has_property(QuantizationProperty.ASYMMETRICAL):
offset_dtype = torch.uint8
value_dtype = torch.uint8
if config.num_of_bits > 16:
offset_dtype = torch.int32
value_dtype = torch.int32
return offset_dtype, value_dtype
def insert_quant_on_variable(
self, graph: BaseGraph, var: QuantableVariable,
config: TensorQuantizationConfig, related_op: Operation,
meta: TensorMeta = None) -> Operation:
if meta is None: meta = var.meta
offset_dtype, value_dtype = self.infer_qtype(config)
scale = convert_any_to_torch_tensor(config.scale.clone(), dtype=torch.float32)
offset = ppq_tensor_round(config.offset.clone()).type(offset_dtype)
s_var = graph.create_variable(name=None, value=scale, is_parameter=True)
z_var = graph.create_variable(name=None, value=offset, is_parameter=True)
created = graph.create_operation(op_type='QuantizeLinear', attributes={})
if config.policy.has_property(QuantizationProperty.PER_CHANNEL):
assert isinstance(config, ChannelwiseTensorQuantizationConfig)
created.attributes['axis'] = config.channel_axis
if related_op is not None and var in related_op.inputs:
graph.insert_op_between_var_and_op(created, up_var=var, down_op=related_op)
else: graph.insert_op_on_var(created, var=var.name)
graph.create_link_with_op(variable=s_var, upstream_op=None, downstream_op=created)
graph.create_link_with_op(variable=z_var, upstream_op=None, downstream_op=created)
meta = OperationMeta(
input_metas = [TensorMeta(dtype=DataType.FP32, shape=meta.shape),
TensorMeta(dtype=DataType.FP32, shape=config.scale.shape),
TensorMeta(dtype=DataType.convert_from_torch(offset_dtype), shape=config.offset.shape)],
output_metas = [TensorMeta(dtype=DataType.convert_from_torch(value_dtype), shape=meta.shape)],
operation_name = created.name, operation_type=created.type, executing_order=-1)
created.meta_data = meta
return created
def insert_dequant_on_variable(
self, graph: BaseGraph, var: QuantableVariable,
config: TensorQuantizationConfig, related_op: Operation,
meta: TensorMeta = None) -> Operation:
if meta is None: meta = var.meta
offset_dtype, value_dtype = self.infer_qtype(config)
scale = convert_any_to_torch_tensor(config.scale.clone(), dtype=torch.float32)
offset = ppq_tensor_round(config.offset.clone()).type(offset_dtype)
s_var = graph.create_variable(name=None, value=scale.clone(), is_parameter=True)
z_var = graph.create_variable(name=None, value=offset.clone(), is_parameter=True)
created = graph.create_operation(op_type='DequantizeLinear', attributes={})
if config.policy.has_property(QuantizationProperty.PER_CHANNEL):
assert isinstance(config, ChannelwiseTensorQuantizationConfig)
created.attributes['axis'] = config.channel_axis
if var in related_op.inputs:
graph.insert_op_between_var_and_op(created, up_var=var, down_op=related_op)
else: graph.insert_op_on_var(created, var=var.name)
graph.create_link_with_op(variable=s_var, upstream_op=None, downstream_op=created)
graph.create_link_with_op(variable=z_var, upstream_op=None, downstream_op=created)
dq_meta = OperationMeta(
input_metas = [TensorMeta(dtype=DataType.convert_from_torch(value_dtype), shape=meta.shape),
TensorMeta(dtype=DataType.FP32, shape=config.scale.shape),
TensorMeta(dtype=DataType.convert_from_torch(offset_dtype), shape=config.offset.shape)],
output_metas = [TensorMeta(dtype=DataType.FP32, shape=meta.shape)],
operation_name = created.name, operation_type=created.type, executing_order=-1)
created.meta_data = dq_meta
return created
def remove_activation_ops(self, graph: BaseGraph) -> BaseGraph:
"""For Asymmetric Quantization Policy, Activations like Relu & Clip can
be removed from your network safely. Their function can be replaced by
quant & dequant operations.
So to say those activation is unnecessary for Asymmetric quantized network.
Args:
graph (BaseGraph): Processing Graph
activation_ops (List[Operation]): Removing activations.
"""
removed_activations = []
for op in graph.operations.values():
if not isinstance(op, QuantableOperation): continue
if op.type in {'Relu', 'Clip'}:
# Only ASYMMETRICAL quantized activations can be safely removed.
if op.config.input_quantization_config[0].policy.has_property(QuantizationProperty.ASYMMETRICAL):
removed_activations.append(op)
# Activation op can only be relu and clip,
# so it is safe to access op.inputs[0], op.outputs[0] as their input and output.
for op in removed_activations:
if not isinstance(op, QuantableOperation): continue
if len(graph.get_upstream_operations(op)) == 0: continue
quant_config = op.config.output_quantization_config[0]
upstream_op = graph.get_upstream_operations(op)[0]
if not isinstance(upstream_op, QuantableOperation): continue
if len(graph.get_downstream_operations(upstream_op)) != 1: continue
input_var, input_cfg = op.inputs[0], op.config.input_quantization_config[0]
if not input_cfg.policy.has_property(QuantizationProperty.ASYMMETRICAL): continue
# PATCH 20220304 Removing graph output op might cause error.
if op.outputs[0].name in graph.outputs:
graph.outputs.pop(op.outputs[0].name)
graph.outputs[input_var.name] = input_var
input_var, output_var = op.inputs[0], op.outputs[0]
graph.remove_operation(op)
graph.create_link_with_var(input_var, output_var)
# insert quant & dequant op on var
self.insert_dequant_on_variable(
graph=graph, var=input_var, config=quant_config,
related_op=upstream_op, meta=input_var.meta)
self.insert_quant_on_variable(
graph=graph, var=input_var, config=quant_config,
related_op=upstream_op, meta=input_var.meta)
formatter = GraphFormatter(graph)
formatter(GraphCommand(GraphCommandType.DELETE_ISOLATED))
return graph
def remove_duplicated_quant_op(self, graph: BaseGraph) -> BaseGraph:
"""Some time there will be more than 1 quant operation inserted with a
single variable. This function will remove duplicated quant operation
from variable if it is possible.
If inserted quant operations do not share a same zeropoint and scale,
Then there is no way to remove any one of them.
Args:
graph (BaseGraph): Processing Graph
Returns:
_type_: Processed Graph
"""
interested_pairs = []
for qt_op in graph.operations.values():
if qt_op.type == 'QuantizeLinear':
if len(graph.get_upstream_operations(qt_op)) != 1: continue
if graph.get_upstream_operations(qt_op)[0].type != 'DequantizeLinear': continue
interested_pairs.append((qt_op, graph.get_upstream_operations(qt_op)[0]))
mark_to_remove = set()
for qt_op, dq_op in interested_pairs:
assert isinstance(dq_op, Operation)
assert isinstance(qt_op, Operation)
scale_diff = torch.max(torch.abs(dq_op.inputs[1].value - qt_op.inputs[1].value)).item()
zeropoint_diff = torch.max(torch.abs(dq_op.inputs[2].value - qt_op.inputs[2].value)).item()
if scale_diff < 1e-5 and zeropoint_diff < 0.5: # zero point 是整数,所以只要误差小于1就行了。
# mark quant operation and its following operation(suppose to be another dequantization op)
mark_to_remove.add(qt_op)
assert len(graph.get_downstream_operations(qt_op)) == 1, 'Oops, that should never happen.'
mark_to_remove.add(graph.get_downstream_operations(qt_op)[0])
for op in mark_to_remove:
assert isinstance(op, Operation)
input_var, output_var = op.inputs[0], op.outputs[0]
graph.remove_operation(op)
graph.create_link_with_var(input_var, output_var)
return graph
@ property
def required_opsets(self) -> Dict[str, int]:
extra_domain_versions = [('ai.onnx', 13)]
return dict(extra_domain_versions)
def convert_operation_from_opset11_to_opset13(self, graph:BaseGraph) -> None:
"""Convert your network from opset 11 standard towards opset 13 With
Onnx definition, per-channel quant operation requires opset 13.
Args:
graph (BaseGraph): Processing graph.
"""
# this func transform representation of certain op from opset 11 to 13
for op in graph.operations.values():
if op.type == 'ReduceSum' or op.type == 'Squeeze' or op.type == 'Unsqueeze':
axes = convert_any_to_torch_tensor(op.attributes.pop('axes'), dtype=torch.int64)
var = graph.create_variable(name=None, value=axes, is_parameter=True)
graph.create_link_with_op(variable=var, upstream_op=None, downstream_op=op)
op.meta_data.input_metas.append(TensorMeta.parsing_from_torch_tensor(var.value, var.name))
elif op.type == 'Split':
if 'split' not in op.attributes: continue # split is already v13
split = convert_any_to_torch_tensor(op.attributes.pop('split'), dtype=torch.int64)
var = graph.create_variable(name=None, value=split, is_parameter=True)
graph.create_link_with_op(variable=var, upstream_op=None, downstream_op=op)
op.meta_data.input_metas.append(TensorMeta.parsing_from_torch_tensor(var.value, var.name))
def convert_operation(self, graph: BaseGraph, op: QuantableOperation,
process_activation: bool, process_parameter: bool,
quant_param_to_int: bool):
"""Convert an operation to onnx quant & dequant format by inserting
necessary quant & dequant op around it. There are 2 ways to represent
quantized ONNX models:
Operator Oriented. All the quantized operators have their own ONNX definitions,
like QLinearConv, MatMulInteger and etc.
Tensor Oriented, aka Quantize and DeQuantize (QDQ).
This format uses DQ(Q(tensor)) to simulate the quantize and dequantize process,
and QuantizeLinear and DeQuantizeLinear operators also carry the quantization parameters.
Quantization-Aware training (QAT) models converted from Tensorflow or exported from PyTorch.
Quantized models converted from tflite and other framework.
Args:
graph (BaseGraph): PPQ IR
op (Operation): Converting op
process_activation (bool): Converting op's activation
process_parameter (bool): Converting op's parameter
quant_param_to_int (bool): Quant op's parameter to int8
"""
# collect quantable vars, where we need to insert quant and dequant op
for config, var in op.config_with_variable:
meta = var.meta
if var.is_parameter:
# we do not want to process clip value here.
if op.type in {'Clip'}: continue
assert len(var.dest_ops) == 1, (
f'Can not export variable {var.name}, cause it has more than 1 destination operations. '
'PPQ require all parameters to have only 1 destination operation.')
if not process_parameter: continue
# override quantization state, so that we can export parameter correctly.
if config.state == QuantizationStates.BAKED:
config.state = QuantizationStates.ACTIVATED
if config.state == QuantizationStates.PASSIVE_BAKED:
config.state = QuantizationStates.PASSIVE
if QuantizationStates.can_export(config.state) and config.state not in {
QuantizationStates.FP32, QuantizationStates.SOI}:
# if not quant parameter to int, all parameter should export as fp32.
# needs insert both quant and dequant op for them
if not quant_param_to_int:
created = self.insert_quant_on_variable(
graph=graph, var=var, config=config, related_op=op, meta=meta)
var = created.outputs[0]
self.insert_dequant_on_variable(
graph=graph, var=var, config=config, related_op=op, meta=meta)
if quant_param_to_int:
var.value = PPQLinearQuant_toInt(tensor=var.value, config=config)
else:
if not process_activation: continue
if QuantizationStates.can_export(config.state) and config.state not in {
QuantizationStates.FP32, QuantizationStates.SOI}:
created = self.insert_quant_on_variable(
graph=graph, var=var, config=config, related_op=op, meta=meta)
var = created.outputs[0]
self.insert_dequant_on_variable(
graph=graph, var=var, config=config, related_op=op, meta=meta)
def prepare_graph(
self, graph: BaseGraph,
process_activation: bool = True,
process_parameter: bool = True,
remove_activation_fn: bool = True,
quant_parameter_to_int: bool = True) | |
# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Instalog datatypes.
Represents data that moves through Instalog (events, attachments) and ways of
iterating through it.
"""
import copy
import datetime
import filecmp
import logging
import time
from cros.factory.instalog import json_utils
from cros.factory.instalog import plugin_base
from cros.factory.instalog.utils import time_utils
class ProcessStage(json_utils.Serializable):
"""Represents a processing stage in the Event's history."""
BUFFER = 'BUFFER'
EXTERNAL = 'EXTERNAL'
# pylint: disable=redefined-outer-name
def __init__(self, node_id, time, plugin_id, plugin_type, target):
self.node_id = node_id
self.time = time
self.plugin_id = plugin_id
self.plugin_type = plugin_type
self.target = target
def ToDict(self):
"""Returns the dictionary equivalent of the ProcessStage object."""
return {
'node_id': self.node_id,
'time': self.time,
'plugin_id': self.plugin_id,
'plugin_type': self.plugin_type,
'target': self.target,
}
@classmethod
def FromDict(cls, dct):
"""Returns a ProcessStage object from its dictionary equivalent."""
if isinstance(dct['time'], datetime.datetime):
dct['time'] = time_utils.DatetimeToUnixtime(dct['time'])
return cls(
dct['node_id'], dct['time'], dct['plugin_id'],
dct['plugin_type'], dct['target'])
def __repr__(self):
"""Implements repr function for debugging."""
return ('ProcessStage(node_id=%r, time=%r, plugin_id=%r, '
'plugin_type=%r, target=%r)'
% (self.node_id, self.time, self.plugin_id,
self.plugin_type, self.target))
class Event(json_utils.Serializable):
"""Represents an Instalog event.
Properties:
payload: A dictionary representing Event data. It can be accessed either
through normal dictionary operators on the Event object itself
(e.g. event['field']), or through the `payload` properly
(e.g. event.payload[0]).
attachments: Dictionary of attachments for this event. Key is a string
identifying the file attachment; might match an ID within the
event payload itself. Value is where the file can be located
on the filesystem. Assumed to have read permissions.
history: A list representing the processing history of this Event. A list
of ProcessStage objects. The first ProcessStage object represents
the InputPlugin from which the Event originates.
"""
def __init__(self, payload, attachments=None, history=None):
self.payload = payload
self.attachments = {} if attachments is None else attachments
self.history = [] if history is None else history
if not isinstance(self.payload, dict):
raise TypeError('Provided payload argument must be of type `dict`')
if not isinstance(self.attachments, dict):
raise TypeError('Provided attachments argument must be of type `dict`')
if not isinstance(self.history, list):
raise TypeError('Provided history argument must be of type `list`')
def AppendStage(self, process_stage):
"""Records the next processing stage in this Event's history."""
self.history.append(process_stage)
@classmethod
def Deserialize(cls, json_string):
"""Deserializes an Event object given as a JSON string."""
if isinstance(json_string, bytes):
json_string = json_string.decode('utf-8')
obj = json_utils.decoder.decode(json_string)
# json_string = '{"__type__": "Event", "payload": {payload}, '
# '"attachments": {attachments}, "history": {history}}'
if isinstance(obj, Event):
return obj
# Legacy object serialization.
# json_string = '[{payload}, {attachments}]'
if isinstance(obj, list):
if len(obj) != 2:
raise ValueError('Given JSON string is a list, but the length is not 2')
return cls(
payload=obj[0],
attachments=obj[1])
# Case of only 'payload' being provided (run_plugin.py).
# json_string = '{payload}'
if isinstance(obj, dict):
return cls(payload=obj)
raise ValueError('Unable to deserialize the JSON string: %s' %
json_string)
def ToDict(self):
"""Returns the dictionary equivalent of the Event object."""
return {
'payload': self.payload,
'attachments': self.attachments,
'history': self.history,
}
@classmethod
def FromDict(cls, dct):
"""Returns an Event object from its dictionary equivalent."""
return cls(
payload=dct['payload'],
attachments=dct['attachments'],
history=dct['history'])
def __repr__(self):
"""Implements repr function for debugging."""
return ('Event(payload=%s, attachments=%s, history=%s)'
% (self.payload, self.attachments, self.history))
def __eq__(self, other):
"""Implements == operator."""
if not self.payload == other.payload:
return False
if not len(self.attachments) == len(other.attachments):
return False
for att_id, att_path in self.attachments.items():
if att_id not in other.attachments:
return False
other_path = other.attachments[att_id]
if att_path != other_path and not filecmp.cmp(att_path, other_path):
return False
return True
def __ne__(self, other):
"""Implements != operator."""
return not self == other
def __getitem__(self, key):
"""Implements dict [] get operator."""
return self.payload[key]
def get(self, key, default=None):
"""Implements dict get function."""
return self.payload.get(key, default)
def __setitem__(self, key, value):
"""Implements dict [] set operator."""
self.payload[key] = value
def __contains__(self, item):
"""Implements dict `in` operator."""
return item in self.payload
def keys(self):
"""Implements dict keys function."""
return list(self.payload)
def values(self):
"""Implements dict values function."""
return list(self.payload.values())
def iteritems(self):
"""Implements iteritems function."""
return iter(self.payload.items())
def setdefault(self, key, default):
"""Implements setdefault function."""
return self.payload.setdefault(key, default)
def __copy__(self):
"""Implements __copy__ function."""
return Event(self.payload, self.attachments, self.history)
def __deepcopy__(self, memo):
"""Implements __deepcopy__ function."""
result = self.__class__(copy.deepcopy(self.payload),
copy.deepcopy(self.attachments),
copy.deepcopy(self.history))
# Avoid excess copying if the Event is referenced from within the Event.
memo[id(self)] = result
return result
def Copy(self):
"""Uses __copy__ to return a shallow copy of this Event."""
return self.__copy__()
class EventStream:
"""Represents a stream of events for an output plugin to process.
Properties:
_plugin: A reference to the plugin using this EventStream.
_plugin_api: An instance of a class implementing PluginAPI. Usually the
PluginSandbox of this plugin.
_count: The number of events retrieved so far through this EventStream.
"""
def __init__(self, plugin, plugin_api):
self._plugin = plugin
self._plugin_api = plugin_api
self._count = 0
def __iter__(self):
return self.iter()
def iter(self, *args, **kwargs):
"""Create an iterator to get events out of this EventStream.
Refer to EventStreamIterator for argument specification.
Returns:
EventStreamIterator instance.
"""
logging.debug('Creating a stream iterator...')
return EventStreamIterator(self, *args, **kwargs)
def GetCount(self):
"""The total number of events retrieved so far."""
return self._count
def Next(self, timeout=1):
"""Gets the next available event from the buffer.
Just like a normal Python iterable, should raise StopIteration when no
more events are available. However, in the case that the plugin has been
paused, a WaitException will be raised.
Args:
timeout: Seconds to wait for retrieving next event.
Returns:
None if timeout or no more events are currently available.
Raises:
WaitException if the plugin has been paused.
"""
ret = self._plugin_api.EventStreamNext(self._plugin, self, timeout)
if ret is not None:
self._count += 1
return ret
def Commit(self):
"""Commits the current batch of events as successfully processed.
Raises:
UnexpectedAccess if the plugin instance is in some unexpected state and
is trying to access core functionality that it should not.
"""
return self._plugin_api.EventStreamCommit(self._plugin, self)
def Abort(self):
"""Aborts the current batch of events as failed to process.
Raises:
UnexpectedAccess if the plugin instance is in some unexpected state and
is trying to access core functionality that it should not.
"""
return self._plugin_api.EventStreamAbort(self._plugin, self)
class EventStreamIterator:
"""Iterator to get events out of an EventStream.
Properties:
event_stream: The EventStream from which to pull events.
blocking: Whether or not to make a blocking call (wait when no events
are available).
timeout: If making a blocking call, the total time to wait for new events
before timing out. Timing starts when the iterator is created, and
includes any time taken by the plugin to do work on the event.
interval: Time to wait in between making next() calls.
count: Number of events to retrieve before stopping.
_current_count: Current number of events retrieved.
_start: Start time, when this iterator was created.
"""
# By default, whether or not to block on next() calls.
_DEFAULT_BLOCKING = True
# By default, how long to block on next() calls. We want to prevent plugins
# from accidentally blocking forever.
_DEFAULT_TIMEOUT = 30
# By default, how long to block in between failed next() call attempts.
_DEFAULT_INTERVAL = 0.5
# By default, how many events to pull until the iterator ends. Default is
# to pull events until no longer available.
_DEFAULT_COUNT = float('inf')
def __init__(self, event_stream, blocking=_DEFAULT_BLOCKING,
timeout=_DEFAULT_TIMEOUT, interval=_DEFAULT_INTERVAL,
count=_DEFAULT_COUNT):
self.event_stream = event_stream
self.blocking = blocking
self.timeout = timeout
self.interval = interval
self.count = count
self._current_count = 0
self._start = time_utils.MonotonicTime()
def __iter__(self):
"""Returns self for special __iter__ function."""
return self
def __next__(self):
"""Returns next event from the EventStream.
Raises:
StopIteration if event count is reached, if timeout is reached,
or if a WaitException is encountered.
"""
while True:
# Check to see if we have enough events.
if self._current_count >= self.count:
logging.debug('Count up!')
raise StopIteration
# Check to see if we have timed out.
if (self.blocking and
(time_utils.MonotonicTime() - self._start) >= self.timeout):
logging.debug('Iterator timeout!')
raise StopIteration
# Try getting the next event. If the plugin is in a waiting state,
# stop iteration immediately.
try:
remaining_time = self._start + self.timeout - time_utils.MonotonicTime()
ret = self.event_stream.Next(timeout=remaining_time)
except plugin_base.WaitException:
raise StopIteration
# We have a value, exit the loop.
if ret | |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from ydb.public.api.protos import ydb_table_pb2 as ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2
class TableServiceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateSession = channel.unary_unary(
'/Ydb.Table.V1.TableService/CreateSession',
request_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.CreateSessionRequest.SerializeToString,
response_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.CreateSessionResponse.FromString,
)
self.DeleteSession = channel.unary_unary(
'/Ydb.Table.V1.TableService/DeleteSession',
request_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.DeleteSessionRequest.SerializeToString,
response_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.DeleteSessionResponse.FromString,
)
self.KeepAlive = channel.unary_unary(
'/Ydb.Table.V1.TableService/KeepAlive',
request_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.KeepAliveRequest.SerializeToString,
response_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.KeepAliveResponse.FromString,
)
self.CreateTable = channel.unary_unary(
'/Ydb.Table.V1.TableService/CreateTable',
request_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.CreateTableRequest.SerializeToString,
response_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.CreateTableResponse.FromString,
)
self.DropTable = channel.unary_unary(
'/Ydb.Table.V1.TableService/DropTable',
request_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.DropTableRequest.SerializeToString,
response_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.DropTableResponse.FromString,
)
self.AlterTable = channel.unary_unary(
'/Ydb.Table.V1.TableService/AlterTable',
request_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.AlterTableRequest.SerializeToString,
response_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.AlterTableResponse.FromString,
)
self.CopyTable = channel.unary_unary(
'/Ydb.Table.V1.TableService/CopyTable',
request_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.CopyTableRequest.SerializeToString,
response_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.CopyTableResponse.FromString,
)
self.CopyTables = channel.unary_unary(
'/Ydb.Table.V1.TableService/CopyTables',
request_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.CopyTablesRequest.SerializeToString,
response_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.CopyTablesResponse.FromString,
)
self.RenameTables = channel.unary_unary(
'/Ydb.Table.V1.TableService/RenameTables',
request_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.RenameTablesRequest.SerializeToString,
response_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.RenameTablesResponse.FromString,
)
self.DescribeTable = channel.unary_unary(
'/Ydb.Table.V1.TableService/DescribeTable',
request_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.DescribeTableRequest.SerializeToString,
response_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.DescribeTableResponse.FromString,
)
self.ExplainDataQuery = channel.unary_unary(
'/Ydb.Table.V1.TableService/ExplainDataQuery',
request_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.ExplainDataQueryRequest.SerializeToString,
response_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.ExplainDataQueryResponse.FromString,
)
self.PrepareDataQuery = channel.unary_unary(
'/Ydb.Table.V1.TableService/PrepareDataQuery',
request_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.PrepareDataQueryRequest.SerializeToString,
response_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.PrepareDataQueryResponse.FromString,
)
self.ExecuteDataQuery = channel.unary_unary(
'/Ydb.Table.V1.TableService/ExecuteDataQuery',
request_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.ExecuteDataQueryRequest.SerializeToString,
response_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.ExecuteDataQueryResponse.FromString,
)
self.ExecuteSchemeQuery = channel.unary_unary(
'/Ydb.Table.V1.TableService/ExecuteSchemeQuery',
request_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.ExecuteSchemeQueryRequest.SerializeToString,
response_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.ExecuteSchemeQueryResponse.FromString,
)
self.BeginTransaction = channel.unary_unary(
'/Ydb.Table.V1.TableService/BeginTransaction',
request_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.BeginTransactionRequest.SerializeToString,
response_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.BeginTransactionResponse.FromString,
)
self.CommitTransaction = channel.unary_unary(
'/Ydb.Table.V1.TableService/CommitTransaction',
request_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.CommitTransactionRequest.SerializeToString,
response_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.CommitTransactionResponse.FromString,
)
self.RollbackTransaction = channel.unary_unary(
'/Ydb.Table.V1.TableService/RollbackTransaction',
request_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.RollbackTransactionRequest.SerializeToString,
response_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.RollbackTransactionResponse.FromString,
)
self.DescribeTableOptions = channel.unary_unary(
'/Ydb.Table.V1.TableService/DescribeTableOptions',
request_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.DescribeTableOptionsRequest.SerializeToString,
response_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.DescribeTableOptionsResponse.FromString,
)
self.StreamReadTable = channel.unary_stream(
'/Ydb.Table.V1.TableService/StreamReadTable',
request_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.ReadTableRequest.SerializeToString,
response_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.ReadTableResponse.FromString,
)
self.BulkUpsert = channel.unary_unary(
'/Ydb.Table.V1.TableService/BulkUpsert',
request_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.BulkUpsertRequest.SerializeToString,
response_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.BulkUpsertResponse.FromString,
)
self.StreamExecuteScanQuery = channel.unary_stream(
'/Ydb.Table.V1.TableService/StreamExecuteScanQuery',
request_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.ExecuteScanQueryRequest.SerializeToString,
response_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.ExecuteScanQueryPartialResponse.FromString,
)
class TableServiceServicer(object):
"""Missing associated documentation comment in .proto file."""
def CreateSession(self, request, context):
"""Create new session. Implicit session creation is forbidden,
so user must create new session before execute any query,
otherwise BAD_SESSION status will be returned.
Simultaneous execution of requests are forbiden.
Sessions are volatile, can be invalidated by server, for example in case
of fatal errors. All requests with this session will fail with BAD_SESSION status.
So, client must be able to handle BAD_SESSION status.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteSession(self, request, context):
"""Ends a session, releasing server resources associated with it.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def KeepAlive(self, request, context):
"""Idle sessions can be kept alive by calling KeepAlive periodically.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateTable(self, request, context):
"""Creates new table.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DropTable(self, request, context):
"""Drop table.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AlterTable(self, request, context):
"""Modifies schema of given table.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CopyTable(self, request, context):
"""Creates copy of given table.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CopyTables(self, request, context):
"""Creates consistent copy of given tables.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RenameTables(self, request, context):
"""Creates consistent move of given tables.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DescribeTable(self, request, context):
"""Returns information about given table (metadata).
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ExplainDataQuery(self, request, context):
"""Explains data query.
SessionId of previously created session must be provided.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PrepareDataQuery(self, request, context):
"""Prepares data query, returns query id.
SessionId of previously created session must be provided.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ExecuteDataQuery(self, request, context):
"""Executes data query.
SessionId of previously created session must be provided.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ExecuteSchemeQuery(self, request, context):
"""Executes scheme query.
SessionId of previously created session must be provided.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def BeginTransaction(self, request, context):
"""Begins new transaction.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CommitTransaction(self, request, context):
"""Commits specified active transaction.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RollbackTransaction(self, request, context):
"""Performs a rollback of the specified active transaction.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DescribeTableOptions(self, request, context):
"""Describe supported table options.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StreamReadTable(self, request, context):
"""Streaming read table
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def BulkUpsert(self, request, context):
"""Upserts a batch of rows non-transactionally.
Returns success only when all rows were successfully upserted. In case of an error some rows might
be upserted and some might not.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StreamExecuteScanQuery(self, request, context):
"""Executes scan query with streaming result.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_TableServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'CreateSession': grpc.unary_unary_rpc_method_handler(
servicer.CreateSession,
request_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.CreateSessionRequest.FromString,
response_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.CreateSessionResponse.SerializeToString,
),
'DeleteSession': grpc.unary_unary_rpc_method_handler(
servicer.DeleteSession,
request_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.DeleteSessionRequest.FromString,
response_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.DeleteSessionResponse.SerializeToString,
),
'KeepAlive': grpc.unary_unary_rpc_method_handler(
servicer.KeepAlive,
request_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.KeepAliveRequest.FromString,
response_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.KeepAliveResponse.SerializeToString,
),
'CreateTable': grpc.unary_unary_rpc_method_handler(
servicer.CreateTable,
request_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.CreateTableRequest.FromString,
response_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.CreateTableResponse.SerializeToString,
),
'DropTable': grpc.unary_unary_rpc_method_handler(
servicer.DropTable,
request_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.DropTableRequest.FromString,
response_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.DropTableResponse.SerializeToString,
),
'AlterTable': grpc.unary_unary_rpc_method_handler(
servicer.AlterTable,
request_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.AlterTableRequest.FromString,
response_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.AlterTableResponse.SerializeToString,
),
'CopyTable': grpc.unary_unary_rpc_method_handler(
servicer.CopyTable,
request_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.CopyTableRequest.FromString,
response_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.CopyTableResponse.SerializeToString,
),
'CopyTables': grpc.unary_unary_rpc_method_handler(
servicer.CopyTables,
request_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.CopyTablesRequest.FromString,
response_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.CopyTablesResponse.SerializeToString,
),
'RenameTables': grpc.unary_unary_rpc_method_handler(
servicer.RenameTables,
request_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.RenameTablesRequest.FromString,
response_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.RenameTablesResponse.SerializeToString,
),
'DescribeTable': grpc.unary_unary_rpc_method_handler(
servicer.DescribeTable,
request_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.DescribeTableRequest.FromString,
response_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.DescribeTableResponse.SerializeToString,
),
'ExplainDataQuery': grpc.unary_unary_rpc_method_handler(
servicer.ExplainDataQuery,
request_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.ExplainDataQueryRequest.FromString,
response_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.ExplainDataQueryResponse.SerializeToString,
),
'PrepareDataQuery': grpc.unary_unary_rpc_method_handler(
servicer.PrepareDataQuery,
request_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.PrepareDataQueryRequest.FromString,
response_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.PrepareDataQueryResponse.SerializeToString,
),
'ExecuteDataQuery': grpc.unary_unary_rpc_method_handler(
servicer.ExecuteDataQuery,
request_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.ExecuteDataQueryRequest.FromString,
response_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.ExecuteDataQueryResponse.SerializeToString,
),
'ExecuteSchemeQuery': grpc.unary_unary_rpc_method_handler(
servicer.ExecuteSchemeQuery,
request_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.ExecuteSchemeQueryRequest.FromString,
response_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.ExecuteSchemeQueryResponse.SerializeToString,
),
'BeginTransaction': grpc.unary_unary_rpc_method_handler(
servicer.BeginTransaction,
request_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.BeginTransactionRequest.FromString,
response_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.BeginTransactionResponse.SerializeToString,
),
'CommitTransaction': grpc.unary_unary_rpc_method_handler(
servicer.CommitTransaction,
request_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.CommitTransactionRequest.FromString,
response_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.CommitTransactionResponse.SerializeToString,
),
'RollbackTransaction': grpc.unary_unary_rpc_method_handler(
servicer.RollbackTransaction,
request_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.RollbackTransactionRequest.FromString,
response_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.RollbackTransactionResponse.SerializeToString,
),
'DescribeTableOptions': grpc.unary_unary_rpc_method_handler(
servicer.DescribeTableOptions,
request_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.DescribeTableOptionsRequest.FromString,
response_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.DescribeTableOptionsResponse.SerializeToString,
),
'StreamReadTable': grpc.unary_stream_rpc_method_handler(
servicer.StreamReadTable,
request_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.ReadTableRequest.FromString,
response_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.ReadTableResponse.SerializeToString,
),
'BulkUpsert': grpc.unary_unary_rpc_method_handler(
servicer.BulkUpsert,
request_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.BulkUpsertRequest.FromString,
response_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.BulkUpsertResponse.SerializeToString,
),
'StreamExecuteScanQuery': grpc.unary_stream_rpc_method_handler(
servicer.StreamExecuteScanQuery,
request_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.ExecuteScanQueryRequest.FromString,
response_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.ExecuteScanQueryPartialResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'Ydb.Table.V1.TableService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class TableService(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def CreateSession(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Ydb.Table.V1.TableService/CreateSession',
ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.CreateSessionRequest.SerializeToString,
ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.CreateSessionResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteSession(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Ydb.Table.V1.TableService/DeleteSession',
ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.DeleteSessionRequest.SerializeToString,
ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.DeleteSessionResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def KeepAlive(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Ydb.Table.V1.TableService/KeepAlive',
ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.KeepAliveRequest.SerializeToString,
ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.KeepAliveResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateTable(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Ydb.Table.V1.TableService/CreateTable',
ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.CreateTableRequest.SerializeToString,
ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.CreateTableResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DropTable(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Ydb.Table.V1.TableService/DropTable',
ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.DropTableRequest.SerializeToString,
ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.DropTableResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def AlterTable(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Ydb.Table.V1.TableService/AlterTable',
ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.AlterTableRequest.SerializeToString,
ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.AlterTableResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CopyTable(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Ydb.Table.V1.TableService/CopyTable',
ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.CopyTableRequest.SerializeToString,
ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.CopyTableResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CopyTables(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Ydb.Table.V1.TableService/CopyTables',
ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.CopyTablesRequest.SerializeToString,
ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.CopyTablesResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def RenameTables(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Ydb.Table.V1.TableService/RenameTables',
ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.RenameTablesRequest.SerializeToString,
ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.RenameTablesResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DescribeTable(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Ydb.Table.V1.TableService/DescribeTable',
ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.DescribeTableRequest.SerializeToString,
ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.DescribeTableResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ExplainDataQuery(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Ydb.Table.V1.TableService/ExplainDataQuery',
ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.ExplainDataQueryRequest.SerializeToString,
ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.ExplainDataQueryResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def PrepareDataQuery(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Ydb.Table.V1.TableService/PrepareDataQuery',
ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.PrepareDataQueryRequest.SerializeToString,
ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.PrepareDataQueryResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ExecuteDataQuery(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Ydb.Table.V1.TableService/ExecuteDataQuery',
ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.ExecuteDataQueryRequest.SerializeToString,
ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.ExecuteDataQueryResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ExecuteSchemeQuery(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Ydb.Table.V1.TableService/ExecuteSchemeQuery',
ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.ExecuteSchemeQueryRequest.SerializeToString,
ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.ExecuteSchemeQueryResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def BeginTransaction(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Ydb.Table.V1.TableService/BeginTransaction',
ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.BeginTransactionRequest.SerializeToString,
ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.BeginTransactionResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CommitTransaction(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Ydb.Table.V1.TableService/CommitTransaction',
ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.CommitTransactionRequest.SerializeToString,
ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.CommitTransactionResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def RollbackTransaction(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Ydb.Table.V1.TableService/RollbackTransaction',
ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.RollbackTransactionRequest.SerializeToString,
ydb_dot_public_dot_api_dot_protos_dot_ydb__table__pb2.RollbackTransactionResponse.FromString,
options, | |
os.path.splitext(os.path.basename(selected_file))[0])
def accept(self):
selection_success = True
selected_path = self.lineEdit_fileName.text()
selected_filename = os.path.basename(selected_path)
timestamp = str(datetime.datetime.now())
# Remove some characters from timestap to get valid file name:
timestamp = timestamp[:19].translate({ord(c): None for c in ' :-.'})
target_path = (self.target_dir + '\\'
+ os.path.splitext(selected_filename)[0]
+ '_' + timestamp + '.png')
if os.path.isfile(selected_path):
# Copy file to data folder as png:
try:
imported_img = Image.open(selected_path)
imported_img.save(target_path)
except:
QMessageBox.warning(
self, 'Error',
'Could not load image file.',
QMessageBox.Ok)
selection_success = False
if selection_success:
new_img_number = self.ovm.get_number_imported()
self.ovm.add_imported_img()
self.cs.set_imported_img_centre_s(
new_img_number,
[self.doubleSpinBox_posX.value(),
self.doubleSpinBox_posY.value()])
self.ovm.set_imported_img_rotation(
new_img_number, self.spinBox_rotation.value())
self.ovm.set_imported_img_file(
new_img_number, target_path)
self.ovm.set_imported_img_name(new_img_number,
self.lineEdit_name.text())
width, height = imported_img.size
self.ovm.set_imported_img_size_px_py(
new_img_number, width, height)
self.ovm.set_imported_img_pixel_size(
new_img_number, self.doubleSpinBox_pixelSize.value())
self.ovm.set_imported_img_transparency(
new_img_number, self.spinBox_transparency.value())
else:
QMessageBox.warning(self, 'Error',
'Specified file not found.',
QMessageBox.Ok)
selection_success = False
if selection_success:
super().accept()
#------------------------------------------------------------------------------
class AdjustImageDlg(QDialog):
"""Adjust an imported image (size, rotation, transparency)"""
def __init__(self, ovm, cs, selected_img,
main_window_queue, main_window_trigger):
self.ovm = ovm
self.cs = cs
self.main_window_queue = main_window_queue
self.main_window_trigger = main_window_trigger
self.selected_img = selected_img
super().__init__()
loadUi('..\\gui\\adjust_imported_image_dlg.ui', self)
self.setWindowModality(Qt.ApplicationModal)
self.setWindowIcon(QIcon('..\\img\\icon_16px.ico'))
self.setFixedSize(self.size())
self.show()
self.lineEdit_selectedImage.setText(
self.ovm.get_imported_img_name(self.selected_img))
pos_x, pos_y = self.cs.get_imported_img_centre_s(self.selected_img)
self.doubleSpinBox_posX.setValue(pos_x)
self.doubleSpinBox_posY.setValue(pos_y)
self.doubleSpinBox_pixelSize.setValue(
self.ovm.get_imported_img_pixel_size(self.selected_img))
self.spinBox_rotation.setValue(
self.ovm.get_imported_img_rotation(self.selected_img))
self.spinBox_transparency.setValue(
self.ovm.get_imported_img_transparency(self.selected_img))
# Use "Apply" button to show changes in viewport
apply_button = self.buttonBox.button(QDialogButtonBox.Apply)
cancel_button = self.buttonBox.button(QDialogButtonBox.Cancel)
cancel_button.setAutoDefault(False)
cancel_button.setDefault(False)
apply_button.setDefault(True)
apply_button.setAutoDefault(True)
apply_button.clicked.connect(self.apply_changes)
def apply_changes(self):
"""Apply the current settings and redraw the image in the viewport."""
self.cs.set_imported_img_centre_s(
self.selected_img,
[self.doubleSpinBox_posX.value(),
self.doubleSpinBox_posY.value()])
self.ovm.set_imported_img_pixel_size(
self.selected_img, self.doubleSpinBox_pixelSize.value())
self.ovm.set_imported_img_rotation(
self.selected_img, self.spinBox_rotation.value())
self.ovm.set_imported_img_transparency(
self.selected_img, self.spinBox_transparency.value())
# Emit signals to reload and redraw:
self.main_window_queue.put('RELOAD IMPORTED' + str(self.selected_img))
self.main_window_trigger.s.emit()
#------------------------------------------------------------------------------
class DeleteImageDlg(QDialog):
"""Delete an imported image from the viewport."""
def __init__(self, ovm):
self.ovm = ovm
super().__init__()
loadUi('..\\gui\\delete_image_dlg.ui', self)
self.setWindowModality(Qt.ApplicationModal)
self.setWindowIcon(QIcon('..\\img\\icon_16px.ico'))
self.setFixedSize(self.size())
self.show()
# Populate the list widget with existing imported images:
img_list = []
for i in range(self.ovm.get_number_imported()):
img_list.append(str(i) + ' - ' + self.ovm.get_imported_img_name(i))
self.listWidget_imagelist.addItems(img_list)
def accept(self):
selected_img = self.listWidget_imagelist.currentRow()
if selected_img is not None:
self.ovm.delete_imported_img(selected_img)
super().accept()
#------------------------------------------------------------------------------
class GridSettingsDlg(QDialog):
"""Let the user change all settings for each grid."""
def __init__(self, grid_manager, sem, selected_grid,
config, main_window_queue, main_window_trigger):
super().__init__()
self.gm = grid_manager
self.sem = sem
self.current_grid = selected_grid
self.cfg = config
self.main_window_queue = main_window_queue
self.main_window_trigger = main_window_trigger
loadUi('..\\gui\\grid_settings_dlg.ui', self)
self.setWindowModality(Qt.ApplicationModal)
self.setWindowIcon(QIcon('..\\img\\icon_16px.ico'))
self.setFixedSize(self.size())
self.show()
# Set up grid selector:
self.comboBox_gridSelector.addItems(self.gm.get_grid_str_list())
self.comboBox_gridSelector.setCurrentIndex(self.current_grid)
self.comboBox_gridSelector.currentIndexChanged.connect(
self.change_grid)
# Set up colour selector:
for i in range(len(utils.COLOUR_SELECTOR)):
rgb = utils.COLOUR_SELECTOR[i]
colour_icon = QPixmap(20, 10)
colour_icon.fill(QColor(rgb[0], rgb[1], rgb[2]))
self.comboBox_colourSelector.addItem(QIcon(colour_icon), '')
store_res_list = [
'%d × %d' % (res[0], res[1]) for res in self.sem.STORE_RES]
self.comboBox_tileSize.addItems(store_res_list)
self.comboBox_tileSize.currentIndexChanged.connect(
self.show_tile_size_and_dose)
self.comboBox_dwellTime.addItems(map(str, self.sem.DWELL_TIME))
self.comboBox_dwellTime.currentIndexChanged.connect(
self.show_tile_size_and_dose)
self.doubleSpinBox_pixelSize.valueChanged.connect(
self.show_tile_size_and_dose)
# Adaptive focus tool button:
self.toolButton_adaptiveFocus.clicked.connect(
self.open_adaptive_focus_dlg)
# Reset wd/stig parameters:
self.pushButton_resetFocusParams.clicked.connect(
self.reset_wd_stig_params)
# Save, add and delete button:
self.pushButton_save.clicked.connect(self.save_current_settings)
self.pushButton_addGrid.clicked.connect(self.add_grid)
self.pushButton_deleteGrid.clicked.connect(self.delete_grid)
self.update_buttons()
self.show_current_settings()
self.show_tile_size_and_dose()
# inactivating add grid in magc_mode (should be done in magc panel instead)
if self.cfg['sys']['magc_mode'] == 'True':
self.pushButton_addGrid.setEnabled(False)
def show_current_settings(self):
self.comboBox_colourSelector.setCurrentIndex(
self.gm.get_display_colour_index(self.current_grid))
# Adaptive focus:
self.checkBox_adaptiveFocus.setChecked(
self.gm.is_adaptive_focus_active(self.current_grid))
self.spinBox_rows.setValue(self.gm.get_number_rows(self.current_grid))
self.spinBox_cols.setValue(self.gm.get_number_cols(self.current_grid))
self.spinBox_overlap.setValue(self.gm.get_overlap(self.current_grid))
self.doubleSpinBox_rotation.setValue(
self.gm.get_rotation(self.current_grid))
self.spinBox_shift.setValue(self.gm.get_row_shift(self.current_grid))
self.doubleSpinBox_pixelSize.setValue(
self.gm.get_pixel_size(self.current_grid))
self.comboBox_tileSize.setCurrentIndex(
self.gm.get_tile_size_selector(self.current_grid))
self.comboBox_dwellTime.setCurrentIndex(
self.gm.get_dwell_time_selector(self.current_grid))
self.spinBox_acqInterval.setValue(
self.gm.get_acq_interval(self.current_grid))
self.spinBox_acqIntervalOffset.setValue(
self.gm.get_acq_interval_offset(self.current_grid))
def show_tile_size_and_dose(self):
"""Calculate and display the tile size and the dose for the current
settings. Updated in real-time as user changes dwell time, frame
resolution and pixel size.
"""
tile_size_selector = self.comboBox_tileSize.currentIndex()
pixel_size = self.doubleSpinBox_pixelSize.value()
width = self.sem.STORE_RES[tile_size_selector][0] * pixel_size / 1000
height = self.sem.STORE_RES[tile_size_selector][1] * pixel_size / 1000
self.label_tileSize.setText('{0:.1f} × '.format(width)
+ '{0:.1f}'.format(height))
current = self.sem.get_beam_current()
dwell_time = float(self.comboBox_dwellTime.currentText())
pixel_size = self.doubleSpinBox_pixelSize.value()
# Show electron dose in electrons per square nanometre.
self.label_dose.setText('{0:.1f}'.format(
utils.calculate_electron_dose(current, dwell_time, pixel_size)))
def change_grid(self):
self.current_grid = self.comboBox_gridSelector.currentIndex()
self.update_buttons()
self.show_current_settings()
self.show_tile_size_and_dose()
def update_buttons(self):
"""Update labels on buttons and disable/enable delete button
depending on which grid is selected. Grid 0 cannot be deleted.
Only the last grid can be deleted. Reason: preserve identities of
grids and tiles within grids.
"""
if self.current_grid == 0:
self.pushButton_deleteGrid.setEnabled(False)
else:
self.pushButton_deleteGrid.setEnabled(
self.current_grid == self.gm.get_number_grids() - 1)
self.pushButton_save.setText(
'Save settings for grid %d' % self.current_grid)
self.pushButton_deleteGrid.setText('Delete grid %d' % self.current_grid)
def add_grid(self):
self.gm.add_new_grid()
self.current_grid = self.gm.get_number_grids() - 1
# Update grid selector:
self.comboBox_gridSelector.blockSignals(True)
self.comboBox_gridSelector.clear()
self.comboBox_gridSelector.addItems(self.gm.get_grid_str_list())
self.comboBox_gridSelector.setCurrentIndex(self.current_grid)
self.comboBox_gridSelector.blockSignals(False)
self.update_buttons()
self.show_current_settings()
self.show_tile_size_and_dose()
self.main_window_queue.put('GRID SETTINGS CHANGED')
self.main_window_trigger.s.emit()
def delete_grid(self):
user_reply = QMessageBox.question(
self, 'Delete grid',
'This will delete grid %d.\n\n'
'Do you wish to proceed?' % self.current_grid,
QMessageBox.Ok | QMessageBox.Cancel)
if user_reply == QMessageBox.Ok:
self.gm.delete_grid()
self.current_grid = self.gm.get_number_grids() - 1
# Update grid selector:
self.comboBox_gridSelector.blockSignals(True)
self.comboBox_gridSelector.clear()
self.comboBox_gridSelector.addItems(self.gm.get_grid_str_list())
self.comboBox_gridSelector.setCurrentIndex(self.current_grid)
self.comboBox_gridSelector.blockSignals(False)
self.update_buttons()
self.show_current_settings()
self.show_tile_size_and_dose()
self.main_window_queue.put('GRID SETTINGS CHANGED')
self.main_window_trigger.s.emit()
def reset_wd_stig_params(self):
user_reply = QMessageBox.question(
self, 'Reset focus/astigmatism parameters',
f'This will reset the focus and astigmatism parameters for '
f'all tiles in grid {self.current_grid}.\n'
f'Proceed?',
QMessageBox.Ok | QMessageBox.Cancel)
if user_reply == QMessageBox.Ok:
self.gm.initialize_wd_stig_map(self.current_grid)
self.main_window_queue.put('GRID SETTINGS CHANGED')
self.main_window_trigger.s.emit()
def save_current_settings(self):
if self.cfg['sys']['magc_mode'] == 'True':
grid_center = self.gm.get_grid_center_s(self.current_grid)
error_msg = ''
self.gm.set_grid_size(self.current_grid,
(self.spinBox_rows.value(),
self.spinBox_cols.value()))
self.gm.set_tile_size_selector(self.current_grid,
self.comboBox_tileSize.currentIndex())
tile_width_p = self.gm.get_tile_width_p(self.current_grid)
input_overlap = self.spinBox_overlap.value()
input_shift = self.spinBox_shift.value()
if -0.3 * tile_width_p <= input_overlap < 0.3 * tile_width_p:
self.gm.set_overlap(self.current_grid, input_overlap)
else:
error_msg = ('Overlap outside of allowed '
'range (-30% .. 30% frame width).')
self.gm.set_rotation(
self.current_grid, self.doubleSpinBox_rotation.value())
if 0 <= input_shift <= tile_width_p:
self.gm.set_row_shift(self.current_grid, input_shift)
else:
error_msg = ('Row shift outside of allowed '
'range (0 .. frame width).')
self.gm.set_display_colour(
self.current_grid, self.comboBox_colourSelector.currentIndex())
self.gm.set_adaptive_focus_enabled(self.current_grid,
self.checkBox_adaptiveFocus.isChecked())
if self.checkBox_adaptiveFocus.isChecked():
self.gm.calculate_focus_gradient(self.current_grid)
# Acquisition parameters:
self.gm.set_pixel_size(self.current_grid,
self.doubleSpinBox_pixelSize.value())
self.gm.set_dwell_time_selector(self.current_grid,
self.comboBox_dwellTime.currentIndex())
self.gm.set_acq_interval(
self.current_grid, self.spinBox_acqInterval.value())
self.gm.set_acq_interval_offset(
self.current_grid, self.spinBox_acqIntervalOffset.value())
# Recalculate grid:
self.gm.calculate_grid_map(self.current_grid)
# Update wd/stig map:
self.gm.initialize_wd_stig_map(self.current_grid)
if self.cfg['sys']['magc_mode'] == 'True':
self.gm.set_grid_center_s(self.current_grid, grid_center)
self.gm.update_source_ROIs_from_grids()
if error_msg:
QMessageBox.warning(self, 'Error', error_msg, QMessageBox.Ok)
else:
self.main_window_queue.put('GRID SETTINGS CHANGED')
self.main_window_trigger.s.emit()
def open_adaptive_focus_dlg(self):
sub_dialog = AdaptiveFocusSettingsDlg(self.gm, self.current_grid)
sub_dialog.exec_()
#------------------------------------------------------------------------------
class AdaptiveFocusSettingsDlg(QDialog):
"""Select the tiles to calculate the gradient for the adaptive focus."""
def __init__(self, gm, current_grid):
super().__init__()
self.gm = gm
self.current_grid = current_grid
loadUi('..\\gui\\adaptive_focus_settings_dlg.ui', self)
self.setWindowModality(Qt.ApplicationModal)
self.setWindowIcon(QIcon('..\\img\\icon_16px.ico'))
self.setFixedSize(self.size())
self.show()
self.lineEdit_currentGrid.setText('Grid ' + str(current_grid))
self.grid_illustration.setPixmap(QPixmap('..\\img\\grid.png'))
self.af_tiles = self.gm.get_adaptive_focus_tiles(self.current_grid)
# Backup variable for currently selected adaptive focus tiles:
self.prev_af_tiles = self.af_tiles.copy()
# Set up tile selectors for adaptive focus tiles:
number_of_tiles = self.gm.get_number_tiles(self.current_grid)
tile_list_str = ['-']
for tile in range(number_of_tiles):
tile_list_str.append(str(tile))
for i in range(3):
if self.af_tiles[i] >= number_of_tiles:
self.af_tiles[i] = -1
self.comboBox_tileUpperLeft.blockSignals(True)
self.comboBox_tileUpperLeft.addItems(tile_list_str)
self.comboBox_tileUpperLeft.setCurrentIndex(self.af_tiles[0] + 1)
self.comboBox_tileUpperLeft.currentIndexChanged.connect(
self.update_settings)
self.comboBox_tileUpperLeft.blockSignals(False)
self.comboBox_tileUpperRight.blockSignals(True)
self.comboBox_tileUpperRight.addItems(tile_list_str)
self.comboBox_tileUpperRight.setCurrentIndex(self.af_tiles[1] + 1)
self.comboBox_tileUpperRight.currentIndexChanged.connect(
self.update_settings)
self.comboBox_tileUpperRight.blockSignals(False)
self.comboBox_tileLowerLeft.blockSignals(True)
self.comboBox_tileLowerLeft.addItems(tile_list_str)
self.comboBox_tileLowerLeft.setCurrentIndex(self.af_tiles[2] + 1)
self.comboBox_tileLowerLeft.currentIndexChanged.connect(
self.update_settings)
self.comboBox_tileLowerLeft.blockSignals(False)
self.update_settings()
def update_settings(self):
"""Get selected working distances and calculate origin WD and
gradient if possible.
"""
self.af_tiles[0] = self.comboBox_tileUpperLeft.currentIndex() - 1
self.af_tiles[1] = self.comboBox_tileUpperRight.currentIndex() - 1
self.af_tiles[2] = self.comboBox_tileLowerLeft.currentIndex() - 1
if self.af_tiles[0] >= 0:
self.label_t1.setText('Tile ' + str(self.af_tiles[0]) + ':')
wd = self.gm.get_tile_wd(self.current_grid, self.af_tiles[0])
self.doubleSpinBox_t1.setValue(wd * 1000)
else:
self.label_t1.setText('Tile (-) :')
self.doubleSpinBox_t1.setValue(0)
if self.af_tiles[1] >= 0:
self.label_t2.setText('Tile ' + str(self.af_tiles[1]) + ':')
wd = self.gm.get_tile_wd(self.current_grid, self.af_tiles[1])
self.doubleSpinBox_t2.setValue(wd * 1000)
else:
self.label_t2.setText('Tile (-) :')
self.doubleSpinBox_t2.setValue(0)
if self.af_tiles[2] >= 0:
self.label_t3.setText('Tile ' + str(self.af_tiles[2]) + ':')
wd = self.gm.get_tile_wd(self.current_grid, self.af_tiles[2])
self.doubleSpinBox_t3.setValue(wd * 1000)
else:
self.label_t3.setText('Tile (-) :')
self.doubleSpinBox_t3.setValue(0)
self.gm.set_adaptive_focus_tiles(self.current_grid, self.af_tiles)
# Try to calculate focus map:
self.af_success = self.gm.calculate_focus_gradient(self.current_grid)
if self.af_success:
grad = self.gm.get_adaptive_focus_gradient(self.current_grid)
wd = self.gm.get_tile_wd(self.current_grid, 0)
current_status_str = (
'WD: ' + '{0:.6f}'.format(wd * 1000)
+ ' mm;\n' + chr(8710)
+ 'x: ' + '{0:.6f}'.format(grad[0] * 1000)
+ '; ' + chr(8710) + 'y: ' + '{0:.6f}'.format(grad[1] * 1000))
else:
current_status_str = 'Insufficient or incorrect tile selection'
self.textEdit_originGradients.setText(current_status_str)
def accept(self):
if self.af_success:
super().accept()
else:
QMessageBox.warning(
self, 'Error',
'Insufficient or incorrect tile selection. Cannot calculate '
'origin working distance and focus gradient.',
QMessageBox.Ok)
def reject(self):
# Restore previous selection:
self.gm.set_adaptive_focus_tiles(self.current_grid, self.prev_af_tiles)
# Recalculate with previous setting:
self.gm.calculate_focus_gradient(self.current_grid)
super().reject()
#------------------------------------------------------------------------------
class AdaptiveFocusSelectionDlg(QDialog):
def __init__(self, current_af_tiles):
super().__init__()
self.selected = None
loadUi('..\\gui\\adaptive_focus_selection_dlg.ui', self)
self.setWindowModality(Qt.ApplicationModal)
self.setWindowIcon(QIcon('..\\img\\icon_16px.ico'))
self.setFixedSize(self.size())
self.show()
self.grid_illustration.setPixmap(QPixmap('..\\img\\grid.png'))
if current_af_tiles[0] >= 0:
self.pushButton_pos0.setText(str(current_af_tiles[0]))
else:
self.pushButton_pos0.setText('-')
if current_af_tiles[1] >= 0:
self.pushButton_pos1.setText(str(current_af_tiles[1]))
else:
self.pushButton_pos1.setText('-')
if current_af_tiles[2] >= 0:
self.pushButton_pos2.setText(str(current_af_tiles[2]))
else:
self.pushButton_pos2.setText('-')
self.pushButton_pos0.clicked.connect(self.select_pos0)
self.pushButton_pos1.clicked.connect(self.select_pos1)
self.pushButton_pos2.clicked.connect(self.select_pos2)
def select_pos0(self):
self.selected = 0
super().accept()
def select_pos1(self):
self.selected = 1
super().accept()
def select_pos2(self):
self.selected = 2
super().accept()
#------------------------------------------------------------------------------
class GridRotationDlg(QDialog):
"""Change the rotation angle of a selected grid."""
def __init__(self, selected_grid, gm, cfg, main_window_queue, main_window_trigger):
self.selected_grid = selected_grid
self.gm = gm
self.cfg = cfg
self.main_window_queue = main_window_queue
self.main_window_trigger = main_window_trigger
self.rotation_in_progress = False
super().__init__()
loadUi('..\\gui\\change_grid_rotation_dlg.ui', self)
self.setWindowModality(Qt.ApplicationModal)
self.setWindowIcon(QIcon('..\\img\\icon_16px.ico'))
self.setFixedSize(self.size())
self.show()
self.label_description.setText(
f'Rotation of selected grid {self.selected_grid} in degrees:')
# Keep current angle and origin to enable undo option
self.previous_angle = self.gm.get_rotation(selected_grid)
self.previous_origin = self.gm.get_grid_origin_s(selected_grid)
# Set initial values:
self.doubleSpinBox_angle.setValue(self.previous_angle)
# Slider value 0..719 (twice the angle in degrees) for 0.5 | |
initialize the random number generator.
Can be None.
:param balance_experiences: If True, pattern of each class will be equally
spread across all experiences. If False, patterns will be assigned to
experiences in a complete random way. Defaults to False.
:param min_class_patterns_in_exp: The minimum amount of patterns of
every class that must be assigned to every experience. Compatible with
the ``balance_experiences`` parameter. An exception will be raised if
this constraint can't be satisfied. Defaults to 0.
:param fixed_exp_assignment: If not None, the pattern assignment
to use. It must be a list with an entry for each experience. Each entry
is a list that contains the indexes of patterns belonging to that
experience. Overrides the ``shuffle``, ``balance_experiences`` and
``min_class_patterns_in_exp`` parameters.
:param reproducibility_data: If not None, overrides all the other
scenario definition options, including ``fixed_exp_assignment``.
This is usually a dictionary containing data used to
reproduce a specific experiment. One can use the
``get_reproducibility_data`` method to get (and even distribute)
the experiment setup so that it can be loaded by passing it as this
parameter. In this way one can be sure that the same specific
experimental setup is being used (for reproducibility purposes).
Beware that, in order to reproduce an experiment, the same train and
test datasets must be used. Defaults to None.
:return: A properly initialized :class:`NIScenario` instance.
"""
seq_train_dataset, seq_test_dataset = train_dataset, test_dataset
if isinstance(train_dataset, list) or isinstance(train_dataset, tuple):
if len(train_dataset) != len(test_dataset):
raise ValueError('Train/test dataset lists must contain the '
'exact same number of datasets')
seq_train_dataset, seq_test_dataset, _ = \
concat_datasets_sequentially(train_dataset, test_dataset)
# Datasets should be instances of AvalancheDataset
seq_train_dataset = as_classification_dataset(seq_train_dataset).train()
seq_test_dataset = as_classification_dataset(seq_test_dataset).eval()
return NIScenario(
seq_train_dataset, seq_test_dataset,
n_experiences,
task_labels,
shuffle=shuffle, seed=seed,
balance_experiences=balance_experiences,
min_class_patterns_in_exp=min_class_patterns_in_exp,
fixed_exp_assignment=fixed_exp_assignment,
reproducibility_data=reproducibility_data)
def dataset_scenario(
train_dataset_list: Sequence[SupportedDataset],
test_dataset_list: Sequence[SupportedDataset],
task_labels: Sequence[int],
*,
complete_test_set_only: bool = False,
dataset_type: AvalancheDatasetType = AvalancheDatasetType.UNDEFINED) \
-> GenericCLScenario:
"""
Creates a generic scenario given a list of datasets and the respective task
labels. Each training dataset will be considered as a separate training
experience. Contents of the datasets will not be changed, including the
targets.
When loading the datasets from a set of fixed file lists, consider using
the :func:`filelist_scenario` helper method instead. Also, loading from
a list of paths is supported through the :func:`paths_scenario` helper.
In its base form, this function accepts a list of test datasets that must
contain the same amount of datasets of the training list.
Those pairs are then used to create the "past", "cumulative"
(a.k.a. growing) and "future" test sets. However, in certain Continual
Learning scenarios only the concept of "complete" test set makes sense. In
that case, the ``complete_test_set_only`` parameter should be set to True
(see the parameter description for more info).
Beware that pattern transformations must already be included in the
datasets (when needed).
:param train_dataset_list: A list of training datasets.
:param test_dataset_list: A list of test datasets.
:param task_labels: A list of task labels. Must contain the same amount of
elements of the ``train_dataset_list`` parameter. For
Single-Incremental-Task (a.k.a. Task-Free) scenarios, this is usually
a list of zeros. For Multi Task scenario, this is usually a list of
ascending task labels (starting from 0).
:param complete_test_set_only: If True, only the complete test set will
be returned by the scenario. This means that the ``test_dataset_list``
parameter must be list with a single element (the complete test set).
Defaults to False, which means that ``train_dataset_list`` and
``test_dataset_list`` must contain the same amount of datasets.
:param dataset_type: The type of the dataset. Defaults to None, which
means that the type will be obtained from the input datasets. If input
datasets are not instances of :class:`AvalancheDataset`, the type
UNDEFINED will be used.
:returns: A properly initialized :class:`GenericCLScenario` instance.
"""
return create_multi_dataset_generic_scenario(
train_dataset_list=train_dataset_list,
test_dataset_list=test_dataset_list,
task_labels=task_labels,
complete_test_set_only=complete_test_set_only,
dataset_type=dataset_type)
def filelist_scenario(
root: Union[str, Path],
train_file_lists: Sequence[Union[str, Path]],
test_file_lists: Union[Union[str, Path], Sequence[Union[str, Path]]],
task_labels: Sequence[int],
*,
complete_test_set_only: bool = False,
train_transform=None, train_target_transform=None,
eval_transform=None, eval_target_transform=None) -> GenericCLScenario:
"""
Creates a generic scenario given a list of filelists and the respective task
labels. A separate dataset will be created for each filelist and each of
those training datasets will be considered a separate training experience.
In its base form, this function accepts a list of filelists for the test
datsets that must contain the same amount of elements of the training list.
Those pairs of datasets are then used to create the "past", "cumulative"
(a.k.a. growing) and "future" test sets. However, in certain Continual
Learning scenarios only the concept of "complete" test set makes sense. In
that case, the ``complete_test_set_only`` should be set to True (see the
parameter description for more info).
This helper functions is the best shot when loading Caffe-style dataset
based on filelists.
The resulting benchmark instance and the intermediate datasets used to
populate it will be of type CLASSIFICATION.
:param root: The root path of the dataset.
:param train_file_lists: A list of filelists describing the
paths of the training patterns for each experience.
:param test_file_lists: A list of filelists describing the
paths of the test patterns for each experience.
:param task_labels: A list of task labels. Must contain the same amount of
elements of the ``train_file_lists`` parameter. For
Single-Incremental-Task (a.k.a. Task-Free) scenarios, this is usually
a list of zeros. For Multi Task scenario, this is usually a list of
ascending task labels (starting from 0).
:param complete_test_set_only: If True, only the complete test set will
be returned by the scenario. This means that the ``test_file_lists``
parameter must be list with a single element (the complete test set).
Alternatively, can be a plain string or :class:`Path` object.
Defaults to False, which means that ``train_file_lists`` and
``test_file_lists`` must contain the same amount of filelists paths.
:param train_transform: The transformation to apply to the training data,
e.g. a random crop, a normalization or a concatenation of different
transformations (see torchvision.transform documentation for a
comprehensive list of possible transformations). Defaults to None.
:param train_target_transform: The transformation to apply to training
patterns targets. Defaults to None.
:param eval_transform: The transformation to apply to the test data,
e.g. a random crop, a normalization or a concatenation of different
transformations (see torchvision.transform documentation for a
comprehensive list of possible transformations). Defaults to None.
:param eval_target_transform: The transformation to apply to test
patterns targets. Defaults to None.
:returns: A properly initialized :class:`GenericCLScenario` instance.
"""
return create_generic_scenario_from_filelists(
root=root,
train_file_lists=train_file_lists,
test_file_lists=test_file_lists,
task_labels=task_labels,
complete_test_set_only=complete_test_set_only,
train_transform=train_transform,
train_target_transform=train_target_transform,
eval_transform=eval_transform,
eval_target_transform=eval_target_transform)
FileAndLabel = Tuple[Union[str, Path], int]
def paths_scenario(
train_list_of_files: Sequence[Sequence[FileAndLabel]],
test_list_of_files: Union[Sequence[FileAndLabel],
Sequence[Sequence[FileAndLabel]]],
task_labels: Sequence[int],
*,
complete_test_set_only: bool = False,
train_transform=None, train_target_transform=None,
eval_transform=None, eval_target_transform=None,
dataset_type: AvalancheDatasetType = AvalancheDatasetType.UNDEFINED) \
-> GenericCLScenario:
"""
Creates a generic scenario given a list of files and class labels.
A separate dataset will be created for each list and each of
those training datasets will be considered a separate training experience.
This is very similar to `filelist_scenario`, with the main difference being
that `filelist_scenario` accepts, for each experience, a file list formatted
in Caffe-style. On the contrary, this accepts a list of tuples where each
tuple contains two elements: the full path to the pattern and its label.
Optionally, the tuple may contain a third element describing the bounding
box of the element to crop. This last bounding box may be useful when trying
to extract the part of the image depicting the desired element.
In its base form, this function accepts a list of lists of tuples for the
test datsets that must contain the same amount of lists of the training
list. Those pairs of datasets are then used to create the "past",
"cumulative" (a.k.a. growing) and "future" test sets. However, in certain
Continual Learning scenarios only the concept of "complete" test set makes
sense. In that case, the ``complete_test_set_only`` should be set to True
(see the parameter description for more info).
The label of each pattern doesn't have to be an int.
:param train_list_of_files: A list of lists. Each | |
"""Tests for Samsung TV config flow."""
from unittest.mock import call, patch
from asynctest import mock
import pytest
from samsungctl.exceptions import AccessDenied, UnhandledResponse
from samsungtvws.exceptions import ConnectionFailure
from websocket import WebSocketProtocolException
from homeassistant.components.samsungtv.const import (
CONF_MANUFACTURER,
CONF_MODEL,
DOMAIN,
)
from homeassistant.components.ssdp import (
ATTR_SSDP_LOCATION,
ATTR_UPNP_FRIENDLY_NAME,
ATTR_UPNP_MANUFACTURER,
ATTR_UPNP_MODEL_NAME,
ATTR_UPNP_UDN,
)
from homeassistant.const import CONF_HOST, CONF_ID, CONF_METHOD, CONF_NAME
MOCK_USER_DATA = {CONF_HOST: "fake_host", CONF_NAME: "fake_name"}
MOCK_SSDP_DATA = {
ATTR_SSDP_LOCATION: "https://fake_host:12345/test",
ATTR_UPNP_FRIENDLY_NAME: "[TV]fake_name",
ATTR_UPNP_MANUFACTURER: "fake_manufacturer",
ATTR_UPNP_MODEL_NAME: "fake_model",
ATTR_UPNP_UDN: "uuid:fake_uuid",
}
MOCK_SSDP_DATA_NOPREFIX = {
ATTR_SSDP_LOCATION: "http://fake2_host:12345/test",
ATTR_UPNP_FRIENDLY_NAME: "fake2_name",
ATTR_UPNP_MANUFACTURER: "fake2_manufacturer",
ATTR_UPNP_MODEL_NAME: "fake2_model",
ATTR_UPNP_UDN: "fake2_uuid",
}
AUTODETECT_LEGACY = {
"name": "HomeAssistant",
"description": "HomeAssistant",
"id": "ha.component.samsung",
"method": "legacy",
"port": None,
"host": "fake_host",
"timeout": 31,
}
@pytest.fixture(name="remote")
def remote_fixture():
"""Patch the samsungctl Remote."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote"
) as remote_class, patch(
"homeassistant.components.samsungtv.config_flow.socket"
) as socket_class:
remote = mock.Mock()
remote.__enter__ = mock.Mock()
remote.__exit__ = mock.Mock()
remote_class.return_value = remote
socket = mock.Mock()
socket_class.return_value = socket
yield remote
@pytest.fixture(name="remotews")
def remotews_fixture():
"""Patch the samsungtvws SamsungTVWS."""
with patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWS"
) as remotews_class, patch(
"homeassistant.components.samsungtv.config_flow.socket"
) as socket_class:
remotews = mock.Mock()
remotews.__enter__ = mock.Mock()
remotews.__exit__ = mock.Mock()
remotews_class.return_value = remotews
socket = mock.Mock()
socket_class.return_value = socket
yield remotews
async def test_user_legacy(hass, remote):
"""Test starting a flow by user."""
# show form
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
# entry was added
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=MOCK_USER_DATA
)
# legacy tv entry created
assert result["type"] == "create_entry"
assert result["title"] == "fake_name"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_NAME] == "fake_name"
assert result["data"][CONF_METHOD] == "legacy"
assert result["data"][CONF_MANUFACTURER] is None
assert result["data"][CONF_MODEL] is None
assert result["data"][CONF_ID] is None
async def test_user_websocket(hass, remotews):
"""Test starting a flow by user."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote", side_effect=OSError("Boom")
):
# show form
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
# entry was added
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=MOCK_USER_DATA
)
# legacy tv entry created
assert result["type"] == "create_entry"
assert result["title"] == "fake_name"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_NAME] == "fake_name"
assert result["data"][CONF_METHOD] == "websocket"
assert result["data"][CONF_MANUFACTURER] is None
assert result["data"][CONF_MODEL] is None
assert result["data"][CONF_ID] is None
async def test_user_legacy_missing_auth(hass):
"""Test starting a flow by user with authentication."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=AccessDenied("Boom"),
), patch("homeassistant.components.samsungtv.config_flow.socket"):
# legacy device missing authentication
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}, data=MOCK_USER_DATA
)
assert result["type"] == "abort"
assert result["reason"] == "auth_missing"
async def test_user_legacy_not_supported(hass):
"""Test starting a flow by user for not supported device."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=UnhandledResponse("Boom"),
), patch("homeassistant.components.samsungtv.config_flow.socket"):
# legacy device not supported
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}, data=MOCK_USER_DATA
)
assert result["type"] == "abort"
assert result["reason"] == "not_supported"
async def test_user_websocket_not_supported(hass):
"""Test starting a flow by user for not supported device."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote", side_effect=OSError("Boom"),
), patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWS",
side_effect=WebSocketProtocolException("Boom"),
), patch(
"homeassistant.components.samsungtv.config_flow.socket"
):
# websocket device not supported
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}, data=MOCK_USER_DATA
)
assert result["type"] == "abort"
assert result["reason"] == "not_supported"
async def test_user_not_successful(hass):
"""Test starting a flow by user but no connection found."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote", side_effect=OSError("Boom"),
), patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWS",
side_effect=OSError("Boom"),
), patch(
"homeassistant.components.samsungtv.config_flow.socket"
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}, data=MOCK_USER_DATA
)
assert result["type"] == "abort"
assert result["reason"] == "not_successful"
async def test_user_not_successful_2(hass):
"""Test starting a flow by user but no connection found."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote", side_effect=OSError("Boom"),
), patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWS",
side_effect=ConnectionFailure("Boom"),
), patch(
"homeassistant.components.samsungtv.config_flow.socket"
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}, data=MOCK_USER_DATA
)
assert result["type"] == "abort"
assert result["reason"] == "not_successful"
async def test_user_already_configured(hass, remote):
"""Test starting a flow by user when already configured."""
# entry was added
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}, data=MOCK_USER_DATA
)
assert result["type"] == "create_entry"
# failed as already configured
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}, data=MOCK_USER_DATA
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_ssdp(hass, remote):
"""Test starting a flow from discovery."""
# confirm to add the entry
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
# entry was added
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input="whatever"
)
assert result["type"] == "create_entry"
assert result["title"] == "fake_model"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_NAME] == "Samsung fake_model"
assert result["data"][CONF_MANUFACTURER] == "fake_manufacturer"
assert result["data"][CONF_MODEL] == "fake_model"
assert result["data"][CONF_ID] == "fake_uuid"
async def test_ssdp_noprefix(hass, remote):
"""Test starting a flow from discovery without prefixes."""
# confirm to add the entry
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_SSDP_DATA_NOPREFIX
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
# entry was added
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input="whatever"
)
assert result["type"] == "create_entry"
assert result["title"] == "fake2_model"
assert result["data"][CONF_HOST] == "fake2_host"
assert result["data"][CONF_NAME] == "Samsung fake2_model"
assert result["data"][CONF_MANUFACTURER] == "fake2_manufacturer"
assert result["data"][CONF_MODEL] == "fake2_model"
assert result["data"][CONF_ID] == "fake2_uuid"
async def test_ssdp_legacy_missing_auth(hass):
"""Test starting a flow from discovery with authentication."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=AccessDenied("Boom"),
), patch("homeassistant.components.samsungtv.config_flow.socket"):
# confirm to add the entry
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
# missing authentication
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input="whatever"
)
assert result["type"] == "abort"
assert result["reason"] == "auth_missing"
async def test_ssdp_legacy_not_supported(hass):
"""Test starting a flow from discovery for not supported device."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=UnhandledResponse("Boom"),
), patch("homeassistant.components.samsungtv.config_flow.socket"):
# confirm to add the entry
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
# device not supported
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input="whatever"
)
assert result["type"] == "abort"
assert result["reason"] == "not_supported"
async def test_ssdp_websocket_not_supported(hass):
"""Test starting a flow from discovery for not supported device."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote", side_effect=OSError("Boom"),
), patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWS",
side_effect=WebSocketProtocolException("Boom"),
), patch(
"homeassistant.components.samsungtv.config_flow.socket"
):
# confirm to add the entry
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
# device not supported
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input="whatever"
)
assert result["type"] == "abort"
assert result["reason"] == "not_supported"
async def test_ssdp_not_successful(hass):
"""Test starting a flow from discovery but no device found."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote", side_effect=OSError("Boom"),
), patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWS",
side_effect=OSError("Boom"),
), patch(
"homeassistant.components.samsungtv.config_flow.socket"
):
# confirm to add the entry
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
# device not found
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input="whatever"
)
assert result["type"] == "abort"
assert result["reason"] == "not_successful"
async def test_ssdp_not_successful_2(hass):
"""Test starting a flow from discovery but no device found."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote", side_effect=OSError("Boom"),
), patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWS",
side_effect=ConnectionFailure("Boom"),
), patch(
"homeassistant.components.samsungtv.config_flow.socket"
):
# confirm to add the entry
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
# device not found
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input="whatever"
)
assert result["type"] == "abort"
assert result["reason"] == "not_successful"
async def test_ssdp_already_in_progress(hass, remote):
"""Test starting a flow from discovery twice."""
# confirm to add the entry
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
# failed as already in progress
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_SSDP_DATA
)
assert result["type"] == "abort"
assert result["reason"] == "already_in_progress"
async def test_ssdp_already_configured(hass, remote):
"""Test starting a flow from discovery when already configured."""
# entry was added
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}, data=MOCK_USER_DATA
)
assert result["type"] == "create_entry"
entry = result["result"]
assert entry.data[CONF_MANUFACTURER] is None
assert entry.data[CONF_MODEL] is None
assert entry.data[CONF_ID] is None
# failed as already configured
result2 = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_SSDP_DATA
)
assert result2["type"] == "abort"
assert result2["reason"] == "already_configured"
# check updated device info
assert entry.data[CONF_MANUFACTURER] == "fake_manufacturer"
assert entry.data[CONF_MODEL] == "fake_model"
assert entry.data[CONF_ID] == "fake_uuid"
async def test_autodetect_websocket(hass, remote, remotews):
"""Test for send key with autodetection of protocol."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote", side_effect=OSError("Boom"),
), patch("homeassistant.components.samsungtv.bridge.SamsungTVWS") as remotews:
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}, data=MOCK_USER_DATA
)
assert result["type"] == "create_entry"
assert result["data"][CONF_METHOD] == "websocket"
assert remotews.call_count == 1
assert remotews.call_args_list == [
call(
host="fake_host",
name="HomeAssistant",
port=8001,
timeout=31,
token=None,
)
]
async def test_autodetect_auth_missing(hass, remote):
"""Test for send key with autodetection of protocol."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=[AccessDenied("Boom")],
) as remote:
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}, data=MOCK_USER_DATA
)
assert result["type"] == "abort"
assert result["reason"] == "auth_missing"
assert remote.call_count == 1
assert remote.call_args_list == [call(AUTODETECT_LEGACY)]
async def test_autodetect_not_supported(hass, remote):
"""Test for send key with autodetection | |
portfolio_cfg['seed']
if seed is not None:
set_seed(seed)
if freq is None:
freq = portfolio_cfg['freq']
if broadcast_kwargs is None:
broadcast_kwargs = {}
if wrapper_kwargs is None:
wrapper_kwargs = {}
if not wrapper_kwargs.get('group_select', True) and cash_sharing:
raise ValueError("group_select cannot be disabled if cash_sharing=True")
# Broadcast inputs
# Only close is broadcast, others can remain unchanged thanks to flexible indexing
broadcastable_args = (
close,
entries,
exits,
size,
price,
size_type,
direction,
fees,
fixed_fees,
slippage,
min_size,
max_size,
reject_prob,
lock_cash,
allow_partial,
raise_reject,
log,
accumulate,
conflict_mode,
close_first,
val_price
)
keep_raw = [False] + [True] * (len(broadcastable_args) - 1)
broadcast_kwargs = merge_dicts(dict(require_kwargs=dict(requirements='W')), broadcast_kwargs)
broadcasted_args = broadcast(*broadcastable_args, **broadcast_kwargs, keep_raw=keep_raw)
close = broadcasted_args[0]
if not checks.is_pandas(close):
close = pd.Series(close) if close.ndim == 1 else pd.DataFrame(close)
target_shape_2d = (close.shape[0], close.shape[1] if close.ndim > 1 else 1)
wrapper = ArrayWrapper.from_obj(close, freq=freq, group_by=group_by, **wrapper_kwargs)
cs_group_lens = wrapper.grouper.get_group_lens(group_by=None if cash_sharing else False)
init_cash = np.require(np.broadcast_to(init_cash, (len(cs_group_lens),)), dtype=np.float_)
group_lens = wrapper.grouper.get_group_lens(group_by=group_by)
if checks.is_any_array(call_seq):
call_seq = nb.require_call_seq(broadcast(call_seq, to_shape=target_shape_2d, to_pd=False))
else:
call_seq = nb.build_call_seq(target_shape_2d, group_lens, call_seq_type=call_seq)
if max_orders is None:
max_orders = target_shape_2d[0] * target_shape_2d[1]
if max_logs is None:
max_logs = target_shape_2d[0] * target_shape_2d[1]
if not np.any(log):
max_logs = 1
# Perform calculation
order_records, log_records = nb.simulate_from_signals_nb(
target_shape_2d,
cs_group_lens, # group only if cash sharing is enabled to speed up
init_cash,
call_seq,
*broadcasted_args[1:],
auto_call_seq,
update_value,
max_orders,
max_logs,
close.ndim == 2
)
# Create an instance
return cls(
wrapper,
close,
order_records,
log_records,
init_cash if init_cash_mode is None else init_cash_mode,
cash_sharing,
call_seq,
**kwargs
)
@classmethod
def from_orders(cls: tp.Type[PortfolioT],
close: tp.ArrayLike,
size: tp.ArrayLike,
size_type: tp.Optional[tp.ArrayLike] = None,
direction: tp.Optional[tp.ArrayLike] = None,
price: tp.Optional[tp.ArrayLike] = None,
fees: tp.Optional[tp.ArrayLike] = None,
fixed_fees: tp.Optional[tp.ArrayLike] = None,
slippage: tp.Optional[tp.ArrayLike] = None,
min_size: tp.Optional[tp.ArrayLike] = None,
max_size: tp.Optional[tp.ArrayLike] = None,
reject_prob: tp.Optional[tp.ArrayLike] = None,
lock_cash: tp.Optional[tp.ArrayLike] = None,
allow_partial: tp.Optional[tp.ArrayLike] = None,
raise_reject: tp.Optional[tp.ArrayLike] = None,
log: tp.Optional[tp.ArrayLike] = None,
val_price: tp.Optional[tp.ArrayLike] = None,
init_cash: tp.Optional[tp.ArrayLike] = None,
cash_sharing: tp.Optional[bool] = None,
call_seq: tp.Optional[tp.ArrayLike] = None,
update_value: tp.Optional[tp.ArrayLike] = None,
max_orders: tp.Optional[int] = None,
max_logs: tp.Optional[int] = None,
seed: tp.Optional[int] = None,
group_by: tp.GroupByLike = None,
broadcast_kwargs: tp.KwargsLike = None,
wrapper_kwargs: tp.KwargsLike = None,
freq: tp.Optional[tp.FrequencyLike] = None,
**kwargs) -> PortfolioT:
"""Simulate portfolio from orders.
Starting with initial cash `init_cash`, orders `size` for `price`.
Args:
close (array_like): Reference price, such as close.
Will broadcast.
Will be used for calculating unrealized P&L and portfolio value.
size (float or array_like): Size to order.
Will broadcast.
Behavior depends upon `size_type` and `direction`.
For any fixed size:
* Set to any number to buy/sell some fixed amount or value.
Longs are limited by cash in the account, while shorts are only limited if `lock_cash`.
* Set to `np.inf` to buy for all cash, or `-np.inf` to sell for all free cash.
If `direction` is not `all`, `-np.inf` will close the position.
* Set to `np.nan` or 0 to skip.
For any target size:
* Set to any number to buy/sell an amount relative to the current position or value.
* Set to 0 to close the current position.
* Set to `np.nan` to skip.
size_type (SizeType or array_like): See `vectorbt.portfolio.enums.SizeType`.
Will broadcast.
!!! note
`SizeType.Percent` does not support position reversal. Switch to a single direction.
!!! warning
Be cautious using `SizeType.Percent` with `call_seq` set to 'auto'.
To execute sell orders before buy orders, the value of each order in the group
needs to be approximated in advance. But since `SizeType.Percent` depends
upon the cash balance, which cannot be calculated in advance since it may change
after each order, this can yield a non-optimal call sequence.
direction (Direction or array_like): See `vectorbt.portfolio.enums.Direction`.
Will broadcast.
price (array_like of float): Order price.
Defaults to `close`. Will broadcast.
fees (float or array_like): Fees in percentage of the order value.
Will broadcast. Note that 0.01 = 1%.
fixed_fees (float or array_like): Fixed amount of fees to pay per order.
Will broadcast.
slippage (float or array_like): Slippage in percentage of price.
Will broadcast. Note that 0.01 = 1%.
min_size (float or array_like): Minimum size for an order to be accepted.
Will broadcast.
max_size (float or array_like): Maximum size for an order.
Will broadcast.
Will be partially filled if exceeded.
reject_prob (float or array_like): Order rejection probability.
Will broadcast.
lock_cash (bool or array_like): Whether to lock cash when shorting.
Will broadcast.
allow_partial (bool or array_like): Whether to allow partial fills.
Will broadcast.
Does not apply when size is `np.inf`.
raise_reject (bool or array_like): Whether to raise an exception if order gets rejected.
Will broadcast.
log (bool or array_like): Whether to log orders.
Will broadcast.
val_price (array_like of float): Asset valuation price.
Defaults to `price`. Will broadcast.
Used at the time of decision making to calculate value of each asset in the group,
for example, to convert target value into target amount.
!!! note
Make sure to use timestamp for `val_price` that comes before timestamps of
all orders in the group with cash sharing (previous `close` for example),
otherwise you're cheating yourself.
init_cash (InitCashMode, float or array_like of float): Initial capital.
By default, will broadcast to the number of columns.
If cash sharing is enabled, will broadcast to the number of groups.
See `vectorbt.portfolio.enums.InitCashMode` to find optimal initial cash.
!!! note
Mode `InitCashMode.AutoAlign` is applied after the portfolio is initialized
to set the same initial cash for all columns/groups. Changing grouping
will change the initial cash, so be aware when indexing.
cash_sharing (bool): Whether to share cash within the same group.
!!! warning
Introduces cross-asset dependencies.
This method presumes that in a group of assets that share the same capital all
orders will be executed within the same tick and retain their price regardless
of their position in the queue, even though they depend upon each other and thus
cannot be executed in parallel.
call_seq (CallSeqType or array_like of int): Default sequence of calls per row and group.
Each value in this sequence should indicate the position of column in the group to
call next. Processing of `call_seq` goes always from left to right.
For example, `[2, 0, 1]` would first call column 'c', then 'a', and finally 'b'.
* Use `vectorbt.portfolio.enums.CallSeqType` to select a sequence type.
* Set to array to specify custom sequence. Will not broadcast.
If `CallSeqType.Auto` selected, rearranges calls dynamically based on order value.
Calculates value of all orders per row and group, and sorts them by this value.
Sell orders will be executed first to release funds for buy orders.
!!! warning
`CallSeqType.Auto` should be used with caution:
* It not only presumes that order prices are known beforehand, but also that
orders can be executed in arbitrary order and still retain their price.
In reality, this is hardly the case: after processing one asset, some time
has passed and the price for other assets might have already changed.
* Even if you're able to specify a slippage large enough to compensate for
this behavior, slippage itself should depend upon execution order.
This method doesn't let you do that.
* If one order is rejected, it still may execute next orders and possibly
leave them without required funds.
For more control, use `Portfolio.from_order_func`.
update_value (bool): Whether to update group value after each filled order.
max_orders (int): Size of the order records array.
Defaults to the number of elements in the broadcasted shape.
Set to a lower number if you run out of memory.
max_logs (int): Size of the log records array.
Defaults to the number of elements in the broadcasted shape if any of the `log` is True,
otherwise to 1.
Set to a lower number if you run out of memory.
seed (int): Seed to be set for both `call_seq` and at the beginning of the simulation.
group_by (any): Group columns. See `vectorbt.base.column_grouper.ColumnGrouper`.
broadcast_kwargs (dict): Keyword arguments passed to `vectorbt.base.reshape_fns.broadcast`.
wrapper_kwargs (dict): Keyword arguments passed to `vectorbt.base.array_wrapper.ArrayWrapper`.
freq (any): Index frequency | |
double *);
void drotg_(double *,double *,double *,double *);
void drotm_( const int*, double *, const int*, double *, const int*, const double *);
void drotmg_(double *,double *,double *,const double *, double *);
void dswap_( const int*, double *, const int*, double *, const int*);
void dcopy_( const int*, const double *, const int*, double *, const int*);
void daxpy_( const int*, const double *, const double *, const int*, double *, const int*);
void dswap_( const int*, double *, const int*, double *, const int*);
double ddot_(const int*, const double *, const int*, const double *, const int*);
void dsdot_sub_(const int*, const float *, const int*, const float *, const int*, double *);
void ddot_sub_( const int*, const double *, const int*, const double *, const int*, double *);
void dscal_( const int*, const double *, double *, const int*);
void dnrm2_sub_( const int*, const double *, const int*, double *);
void dasum_sub_( const int*, const double *, const int*, double *);
void idamax_sub_( const int*, const double * , const int*, const int*);
/* Single Complex Precision */
void cswap_( const int*, void *, const int*, void *, const int*);
void ccopy_( const int*, const void *, const int*, void *, const int*);
void caxpy_( const int*, const void *, const void *, const int*, void *, const int*);
void cswap_( const int*, void *, const int*, void *, const int*);
void cdotc_sub_( const int*, const void *, const int*, const void *, const int*, void *);
void cdotu_sub_( const int*, const void *, const int*, const void *, const int*, void *);
void cscal_( const int*, const void *, void *, const int*);
void icamax_sub_( const int*, const void *, const int*, const int*);
void csscal_( const int*, const float *, void *, const int*);
void scnrm2_sub_( const int*, const void *, const int*, float *);
void scasum_sub_( const int*, const void *, const int*, float *);
/* Double Complex Precision */
void zswap_( const int*, void *, const int*, void *, const int*);
void zcopy_( const int*, const void *, const int*, void *, const int*);
void zaxpy_( const int*, const void *, const void *, const int*, void *, const int*);
void zswap_( const int*, void *, const int*, void *, const int*);
void zdotc_sub_( const int*, const void *, const int*, const void *, const int*, void *);
void zdotu_sub_( const int*, const void *, const int*, const void *, const int*, void *);
void zdscal_( const int*, const double *, void *, const int*);
void zscal_( const int*, const void *, void *, const int*);
void dznrm2_sub_( const int*, const void *, const int*, double *);
void dzasum_sub_( const int*, const void *, const int*, double *);
void izamax_sub_( const int*, const void *, const int*, const int*);
/***********/
/* Level 2 */
/***********/
/* Single Precision */
void sgemv_(char*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);
void sgbmv_(char*, const int*, const int*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);
void ssymv_(char*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);
void ssbmv_(char*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);
void sspmv_(char*, const int*, const float *, const float *, const float *, const int*, const float *, float *, const int*);
void strmv_( char*, char*, char*, const int*, const float *, const int*, float *, const int*);
void stbmv_( char*, char*, char*, const int*, const int*, const float *, const int*, float *, const int*);
void strsv_( char*, char*, char*, const int*, const float *, const int*, float *, const int*);
void stbsv_( char*, char*, char*, const int*, const int*, const float *, const int*, float *, const int*);
void stpmv_( char*, char*, char*, const int*, const float *, float *, const int*);
void stpsv_( char*, char*, char*, const int*, const float *, float *, const int*);
void sger_( const int*, const int*, const float *, const float *, const int*, const float *, const int*, float *, const int*);
void ssyr_(char*, const int*, const float *, const float *, const int*, float *, const int*);
void sspr_(char*, const int*, const float *, const float *, const int*, float *);
void sspr2_(char*, const int*, const float *, const float *, const int*, const float *, const int*, float *);
void ssyr2_(char*, const int*, const float *, const float *, const int*, const float *, const int*, float *, const int*);
/* Double Precision */
void dgemv_(char*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);
void dgbmv_(char*, const int*, const int*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);
void dsymv_(char*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);
void dsbmv_(char*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);
void dspmv_(char*, const int*, const double *, const double *, const double *, const int*, const double *, double *, const int*);
void dtrmv_( char*, char*, char*, const int*, const double *, const int*, double *, const int*);
void dtbmv_( char*, char*, char*, const int*, const int*, const double *, const int*, double *, const int*);
void dtrsv_( char*, char*, char*, const int*, const double *, const int*, double *, const int*);
void dtbsv_( char*, char*, char*, const int*, const int*, const double *, const int*, double *, const int*);
void dtpmv_( char*, char*, char*, const int*, const double *, double *, const int*);
void dtpsv_( char*, char*, char*, const int*, const double *, double *, const int*);
void dger_( const int*, const int*, const double *, const double *, const int*, const double *, const int*, double *, const int*);
void dsyr_(char*, const int*, const double *, const double *, const int*, double *, const int*);
void dspr_(char*, const int*, const double *, const double *, const int*, double *);
void dspr2_(char*, const int*, const double *, const double *, const int*, const double *, const int*, double *);
void dsyr2_(char*, const int*, const double *, const double *, const int*, const double *, const int*, double *, const int*);
/* Single Complex Precision */
void cgemv_(char*, const int*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);
void cgbmv_(char*, const int*, const int*, const int*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);
void chemv_(char*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);
void chbmv_(char*, const int*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);
void chpmv_(char*, const int*, const void *, const void *, const void *, const int*, const void *, void *, const int*);
void ctrmv_( char*, char*, char*, const int*, const void *, const int*, void *, const int*);
void ctbmv_( char*, char*, char*, const int*, const int*, const void *, const int*, void *, const int*);
void | |
# coding: utf-8
"""
3Di API
3Di simulation API (latest stable version: v3) Framework release: 2.9.0 3Di core release: 2.2.2 deployed on: 11:01AM (UTC) on January 11, 2022 # noqa: E501
The version of the OpenAPI document: v3
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
import logging
import pprint
import re # noqa: F401
import six
from threedi_api_client.openapi.configuration import Configuration
logger = logging.getLogger(__name__)
class SchematisationRevision(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'url': 'str',
'id': 'int',
'created': 'datetime',
'schematisation': 'str',
'schematisation_id': 'int',
'number': 'int',
'sqlite': 'Sqlite',
'rasters': 'list[RevisionRaster]',
'archived': 'datetime',
'commit_date': 'datetime',
'commit_user': 'str',
'commit_first_name': 'str',
'commit_last_name': 'str',
'commit_message': 'str',
'is_valid': 'bool'
}
attribute_map = {
'url': 'url',
'id': 'id',
'created': 'created',
'schematisation': 'schematisation',
'schematisation_id': 'schematisation_id',
'number': 'number',
'sqlite': 'sqlite',
'rasters': 'rasters',
'archived': 'archived',
'commit_date': 'commit_date',
'commit_user': 'commit_user',
'commit_first_name': 'commit_first_name',
'commit_last_name': 'commit_last_name',
'commit_message': 'commit_message',
'is_valid': 'is_valid'
}
def __init__(self, url=None, id=None, created=None, schematisation=None, schematisation_id=None, number=None, sqlite=None, rasters=None, archived=None, commit_date=None, commit_user=None, commit_first_name=None, commit_last_name=None, commit_message=None, is_valid=None, local_vars_configuration=None): # noqa: E501
"""SchematisationRevision - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._url = None
self._id = None
self._created = None
self._schematisation = None
self._schematisation_id = None
self._number = None
self._sqlite = None
self._rasters = None
self._archived = None
self._commit_date = None
self._commit_user = None
self._commit_first_name = None
self._commit_last_name = None
self._commit_message = None
self._is_valid = None
self.discriminator = None
if url is not None:
self.url = url
if id is not None:
self.id = id
if created is not None:
self.created = created
if schematisation is not None:
self.schematisation = schematisation
if schematisation_id is not None:
self.schematisation_id = schematisation_id
if number is not None:
self.number = number
if sqlite is not None:
self.sqlite = sqlite
if rasters is not None:
self.rasters = rasters
if archived is not None:
self.archived = archived
if commit_date is not None:
self.commit_date = commit_date
if commit_user is not None:
self.commit_user = commit_user
if commit_first_name is not None:
self.commit_first_name = commit_first_name
if commit_last_name is not None:
self.commit_last_name = commit_last_name
if commit_message is not None:
self.commit_message = commit_message
if is_valid is not None:
self.is_valid = is_valid
@property
def url(self):
"""Gets the url of this SchematisationRevision. # noqa: E501
:return: The url of this SchematisationRevision. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this SchematisationRevision.
:param url: The url of this SchematisationRevision. # noqa: E501
:type: str
"""
self._url = url
@property
def id(self):
"""Gets the id of this SchematisationRevision. # noqa: E501
:return: The id of this SchematisationRevision. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this SchematisationRevision.
:param id: The id of this SchematisationRevision. # noqa: E501
:type: int
"""
self._id = id
@property
def created(self):
"""Gets the created of this SchematisationRevision. # noqa: E501
:return: The created of this SchematisationRevision. # noqa: E501
:rtype: datetime
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this SchematisationRevision.
:param created: The created of this SchematisationRevision. # noqa: E501
:type: datetime
"""
self._created = created
@property
def schematisation(self):
"""Gets the schematisation of this SchematisationRevision. # noqa: E501
:return: The schematisation of this SchematisationRevision. # noqa: E501
:rtype: str
"""
return self._schematisation
@schematisation.setter
def schematisation(self, schematisation):
"""Sets the schematisation of this SchematisationRevision.
:param schematisation: The schematisation of this SchematisationRevision. # noqa: E501
:type: str
"""
self._schematisation = schematisation
@property
def schematisation_id(self):
"""Gets the schematisation_id of this SchematisationRevision. # noqa: E501
:return: The schematisation_id of this SchematisationRevision. # noqa: E501
:rtype: int
"""
return self._schematisation_id
@schematisation_id.setter
def schematisation_id(self, schematisation_id):
"""Sets the schematisation_id of this SchematisationRevision.
:param schematisation_id: The schematisation_id of this SchematisationRevision. # noqa: E501
:type: int
"""
self._schematisation_id = schematisation_id
@property
def number(self):
"""Gets the number of this SchematisationRevision. # noqa: E501
:return: The number of this SchematisationRevision. # noqa: E501
:rtype: int
"""
return self._number
@number.setter
def number(self, number):
"""Sets the number of this SchematisationRevision.
:param number: The number of this SchematisationRevision. # noqa: E501
:type: int
"""
self._number = number
@property
def sqlite(self):
"""Gets the sqlite of this SchematisationRevision. # noqa: E501
:return: The sqlite of this SchematisationRevision. # noqa: E501
:rtype: Sqlite
"""
return self._sqlite
@sqlite.setter
def sqlite(self, sqlite):
"""Sets the sqlite of this SchematisationRevision.
:param sqlite: The sqlite of this SchematisationRevision. # noqa: E501
:type: Sqlite
"""
self._sqlite = sqlite
@property
def rasters(self):
"""Gets the rasters of this SchematisationRevision. # noqa: E501
:return: The rasters of this SchematisationRevision. # noqa: E501
:rtype: list[RevisionRaster]
"""
return self._rasters
@rasters.setter
def rasters(self, rasters):
"""Sets the rasters of this SchematisationRevision.
:param rasters: The rasters of this SchematisationRevision. # noqa: E501
:type: list[RevisionRaster]
"""
self._rasters = rasters
@property
def archived(self):
"""Gets the archived of this SchematisationRevision. # noqa: E501
:return: The archived of this SchematisationRevision. # noqa: E501
:rtype: datetime
"""
return self._archived
@archived.setter
def archived(self, archived):
"""Sets the archived of this SchematisationRevision.
:param archived: The archived of this SchematisationRevision. # noqa: E501
:type: datetime
"""
self._archived = archived
@property
def commit_date(self):
"""Gets the commit_date of this SchematisationRevision. # noqa: E501
:return: The commit_date of this SchematisationRevision. # noqa: E501
:rtype: datetime
"""
return self._commit_date
@commit_date.setter
def commit_date(self, commit_date):
"""Sets the commit_date of this SchematisationRevision.
:param commit_date: The commit_date of this SchematisationRevision. # noqa: E501
:type: datetime
"""
self._commit_date = commit_date
@property
def commit_user(self):
"""Gets the commit_user of this SchematisationRevision. # noqa: E501
The username of a user # noqa: E501
:return: The commit_user of this SchematisationRevision. # noqa: E501
:rtype: str
"""
return self._commit_user
@commit_user.setter
def commit_user(self, commit_user):
"""Sets the commit_user of this SchematisationRevision.
The username of a user # noqa: E501
:param commit_user: The commit_user of this SchematisationRevision. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
commit_user is not None and not re.search(r'^[\w.@+-]+$', commit_user)): # noqa: E501
raise ValueError(r"Invalid value for `commit_user`, must be a follow pattern or equal to `/^[\w.@+-]+$/`") # noqa: E501
self._commit_user = commit_user
@property
def commit_first_name(self):
"""Gets the commit_first_name of this SchematisationRevision. # noqa: E501
:return: The commit_first_name of this SchematisationRevision. # noqa: E501
:rtype: str
"""
return self._commit_first_name
@commit_first_name.setter
def commit_first_name(self, commit_first_name):
"""Sets the commit_first_name of this SchematisationRevision.
:param commit_first_name: The commit_first_name of this SchematisationRevision. # noqa: E501
:type: str
"""
self._commit_first_name = commit_first_name
@property
def commit_last_name(self):
"""Gets the commit_last_name of this SchematisationRevision. # noqa: E501
:return: The commit_last_name of this SchematisationRevision. # noqa: E501
:rtype: str
"""
return self._commit_last_name
@commit_last_name.setter
def commit_last_name(self, commit_last_name):
"""Sets the commit_last_name of this SchematisationRevision.
:param commit_last_name: The commit_last_name of this SchematisationRevision. # noqa: E501
:type: str
"""
self._commit_last_name = commit_last_name
@property
def commit_message(self):
"""Gets the commit_message of this SchematisationRevision. # noqa: E501
:return: The commit_message of this SchematisationRevision. # noqa: E501
:rtype: str
"""
return self._commit_message
@commit_message.setter
def commit_message(self, commit_message):
"""Sets the commit_message of this SchematisationRevision.
:param commit_message: The commit_message of this SchematisationRevision. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
commit_message is not None and len(commit_message) < 1):
raise ValueError("Invalid value for `commit_message`, length must be greater than or equal to `1`") # noqa: E501
self._commit_message = commit_message
@property
def is_valid(self):
"""Gets the is_valid of this SchematisationRevision. # noqa: E501
:return: The is_valid of this SchematisationRevision. # noqa: E501
:rtype: bool
"""
return self._is_valid
@is_valid.setter
def is_valid(self, is_valid):
"""Sets the is_valid of this SchematisationRevision.
:param is_valid: The is_valid of this SchematisationRevision. # noqa: E501
:type: bool
"""
self._is_valid = is_valid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif | |
return pulumi.get(self, "role")
@property
@pulumi.getter
def rules(self) -> Optional[Sequence['outputs.BucketV2ReplicationConfigurationRule']]:
"""
The rules managing the replication.
"""
return pulumi.get(self, "rules")
@pulumi.output_type
class BucketV2ReplicationConfigurationRule(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "deleteMarkerReplicationStatus":
suggest = "delete_marker_replication_status"
elif key == "sourceSelectionCriterias":
suggest = "source_selection_criterias"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BucketV2ReplicationConfigurationRule. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BucketV2ReplicationConfigurationRule.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BucketV2ReplicationConfigurationRule.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
delete_marker_replication_status: Optional[str] = None,
destinations: Optional[Sequence['outputs.BucketV2ReplicationConfigurationRuleDestination']] = None,
filters: Optional[Sequence['outputs.BucketV2ReplicationConfigurationRuleFilter']] = None,
id: Optional[str] = None,
prefix: Optional[str] = None,
priority: Optional[int] = None,
source_selection_criterias: Optional[Sequence['outputs.BucketV2ReplicationConfigurationRuleSourceSelectionCriteria']] = None,
status: Optional[str] = None):
"""
:param str delete_marker_replication_status: Whether delete markers are replicated.
:param Sequence['BucketV2ReplicationConfigurationRuleDestinationArgs'] destinations: The destination for the rule.
:param Sequence['BucketV2ReplicationConfigurationRuleFilterArgs'] filters: Filter that identifies subset of objects to which the replication rule applies.
:param str id: Unique identifier for the rule.
:param str prefix: Object keyname prefix identifying one or more objects to which the rule applies
:param int priority: The priority associated with the rule.
:param Sequence['BucketV2ReplicationConfigurationRuleSourceSelectionCriteriaArgs'] source_selection_criterias: The special object selection criteria.
:param str status: The status of the rule.
"""
if delete_marker_replication_status is not None:
pulumi.set(__self__, "delete_marker_replication_status", delete_marker_replication_status)
if destinations is not None:
pulumi.set(__self__, "destinations", destinations)
if filters is not None:
pulumi.set(__self__, "filters", filters)
if id is not None:
pulumi.set(__self__, "id", id)
if prefix is not None:
pulumi.set(__self__, "prefix", prefix)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if source_selection_criterias is not None:
pulumi.set(__self__, "source_selection_criterias", source_selection_criterias)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="deleteMarkerReplicationStatus")
def delete_marker_replication_status(self) -> Optional[str]:
"""
Whether delete markers are replicated.
"""
return pulumi.get(self, "delete_marker_replication_status")
@property
@pulumi.getter
def destinations(self) -> Optional[Sequence['outputs.BucketV2ReplicationConfigurationRuleDestination']]:
"""
The destination for the rule.
"""
return pulumi.get(self, "destinations")
@property
@pulumi.getter
def filters(self) -> Optional[Sequence['outputs.BucketV2ReplicationConfigurationRuleFilter']]:
"""
Filter that identifies subset of objects to which the replication rule applies.
"""
return pulumi.get(self, "filters")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Unique identifier for the rule.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def prefix(self) -> Optional[str]:
"""
Object keyname prefix identifying one or more objects to which the rule applies
"""
return pulumi.get(self, "prefix")
@property
@pulumi.getter
def priority(self) -> Optional[int]:
"""
The priority associated with the rule.
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter(name="sourceSelectionCriterias")
def source_selection_criterias(self) -> Optional[Sequence['outputs.BucketV2ReplicationConfigurationRuleSourceSelectionCriteria']]:
"""
The special object selection criteria.
"""
return pulumi.get(self, "source_selection_criterias")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
The status of the rule.
"""
return pulumi.get(self, "status")
@pulumi.output_type
class BucketV2ReplicationConfigurationRuleDestination(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "accessControlTranslations":
suggest = "access_control_translations"
elif key == "accountId":
suggest = "account_id"
elif key == "replicaKmsKeyId":
suggest = "replica_kms_key_id"
elif key == "replicationTimes":
suggest = "replication_times"
elif key == "storageClass":
suggest = "storage_class"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BucketV2ReplicationConfigurationRuleDestination. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BucketV2ReplicationConfigurationRuleDestination.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BucketV2ReplicationConfigurationRuleDestination.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
access_control_translations: Optional[Sequence['outputs.BucketV2ReplicationConfigurationRuleDestinationAccessControlTranslation']] = None,
account_id: Optional[str] = None,
bucket: Optional[str] = None,
metrics: Optional[Sequence['outputs.BucketV2ReplicationConfigurationRuleDestinationMetric']] = None,
replica_kms_key_id: Optional[str] = None,
replication_times: Optional[Sequence['outputs.BucketV2ReplicationConfigurationRuleDestinationReplicationTime']] = None,
storage_class: Optional[str] = None):
"""
:param Sequence['BucketV2ReplicationConfigurationRuleDestinationAccessControlTranslationArgs'] access_control_translations: The overrides to use for object owners on replication.
:param str account_id: The Account ID to use for overriding the object owner on replication.
:param str bucket: The name of the bucket. If omitted, this provider will assign a random, unique name. Must be lowercase and less than or equal to 63 characters in length. A full list of bucket naming rules [may be found here](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html).
:param Sequence['BucketV2ReplicationConfigurationRuleDestinationMetricArgs'] metrics: Replication metrics.
:param str replica_kms_key_id: Destination KMS encryption key ARN for SSE-KMS replication.
:param Sequence['BucketV2ReplicationConfigurationRuleDestinationReplicationTimeArgs'] replication_times: S3 Replication Time Control (S3 RTC).
:param str storage_class: The [storage class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_Destination.html#AmazonS3-Type-Destination-StorageClass) used to store the object.
"""
if access_control_translations is not None:
pulumi.set(__self__, "access_control_translations", access_control_translations)
if account_id is not None:
pulumi.set(__self__, "account_id", account_id)
if bucket is not None:
pulumi.set(__self__, "bucket", bucket)
if metrics is not None:
pulumi.set(__self__, "metrics", metrics)
if replica_kms_key_id is not None:
pulumi.set(__self__, "replica_kms_key_id", replica_kms_key_id)
if replication_times is not None:
pulumi.set(__self__, "replication_times", replication_times)
if storage_class is not None:
pulumi.set(__self__, "storage_class", storage_class)
@property
@pulumi.getter(name="accessControlTranslations")
def access_control_translations(self) -> Optional[Sequence['outputs.BucketV2ReplicationConfigurationRuleDestinationAccessControlTranslation']]:
"""
The overrides to use for object owners on replication.
"""
return pulumi.get(self, "access_control_translations")
@property
@pulumi.getter(name="accountId")
def account_id(self) -> Optional[str]:
"""
The Account ID to use for overriding the object owner on replication.
"""
return pulumi.get(self, "account_id")
@property
@pulumi.getter
def bucket(self) -> Optional[str]:
"""
The name of the bucket. If omitted, this provider will assign a random, unique name. Must be lowercase and less than or equal to 63 characters in length. A full list of bucket naming rules [may be found here](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html).
"""
return pulumi.get(self, "bucket")
@property
@pulumi.getter
def metrics(self) -> Optional[Sequence['outputs.BucketV2ReplicationConfigurationRuleDestinationMetric']]:
"""
Replication metrics.
"""
return pulumi.get(self, "metrics")
@property
@pulumi.getter(name="replicaKmsKeyId")
def replica_kms_key_id(self) -> Optional[str]:
"""
Destination KMS encryption key ARN for SSE-KMS replication.
"""
return pulumi.get(self, "replica_kms_key_id")
@property
@pulumi.getter(name="replicationTimes")
def replication_times(self) -> Optional[Sequence['outputs.BucketV2ReplicationConfigurationRuleDestinationReplicationTime']]:
"""
S3 Replication Time Control (S3 RTC).
"""
return pulumi.get(self, "replication_times")
@property
@pulumi.getter(name="storageClass")
def storage_class(self) -> Optional[str]:
"""
The [storage class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_Destination.html#AmazonS3-Type-Destination-StorageClass) used to store the object.
"""
return pulumi.get(self, "storage_class")
@pulumi.output_type
class BucketV2ReplicationConfigurationRuleDestinationAccessControlTranslation(dict):
def __init__(__self__, *,
owner: Optional[str] = None):
"""
:param str owner: The override value for the owner on replicated objects.
"""
if owner is not None:
pulumi.set(__self__, "owner", owner)
@property
@pulumi.getter
def owner(self) -> Optional[str]:
"""
The override value for the owner on replicated objects.
"""
return pulumi.get(self, "owner")
@pulumi.output_type
class BucketV2ReplicationConfigurationRuleDestinationMetric(dict):
def __init__(__self__, *,
minutes: Optional[int] = None,
status: Optional[str] = None):
"""
:param int minutes: Threshold within which objects are to be replicated.
:param str status: The status of the rule.
"""
if minutes is not None:
pulumi.set(__self__, "minutes", minutes)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter
def minutes(self) -> Optional[int]:
"""
Threshold within which objects are to be replicated.
"""
return pulumi.get(self, "minutes")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
The status of the rule.
"""
return pulumi.get(self, "status")
@pulumi.output_type
class BucketV2ReplicationConfigurationRuleDestinationReplicationTime(dict):
def __init__(__self__, *,
minutes: Optional[int] = None,
status: Optional[str] = None):
"""
:param int minutes: Threshold within which objects are to be replicated.
:param str status: The status of the rule.
"""
if minutes is not None:
pulumi.set(__self__, "minutes", minutes)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter
def minutes(self) -> Optional[int]:
"""
Threshold within which objects are to be replicated.
"""
return pulumi.get(self, "minutes")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
The status of the rule.
"""
return pulumi.get(self, "status")
@pulumi.output_type
class BucketV2ReplicationConfigurationRuleFilter(dict):
def __init__(__self__, *,
prefix: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None):
"""
:param str prefix: Object keyname prefix identifying one or more objects to which the rule applies
:param Mapping[str, str] tags: A map of tags to assign to the bucket. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
if prefix is not None:
pulumi.set(__self__, "prefix", prefix)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def prefix(self) -> Optional[str]:
"""
Object keyname prefix identifying one or more objects to which the rule applies
"""
return pulumi.get(self, "prefix")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
A map of tags to assign to the bucket. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@pulumi.output_type
class BucketV2ReplicationConfigurationRuleSourceSelectionCriteria(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "sseKmsEncryptedObjects":
suggest = "sse_kms_encrypted_objects"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BucketV2ReplicationConfigurationRuleSourceSelectionCriteria. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BucketV2ReplicationConfigurationRuleSourceSelectionCriteria.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BucketV2ReplicationConfigurationRuleSourceSelectionCriteria.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
sse_kms_encrypted_objects: Optional[Sequence['outputs.BucketV2ReplicationConfigurationRuleSourceSelectionCriteriaSseKmsEncryptedObject']] = None):
"""
:param Sequence['BucketV2ReplicationConfigurationRuleSourceSelectionCriteriaSseKmsEncryptedObjectArgs'] sse_kms_encrypted_objects: Matched SSE-KMS encrypted objects.
"""
if sse_kms_encrypted_objects is not None:
| |
"#670763",
"#650762",
"#630660",
"#62065e",
"#60055d",
"#5e055b",
"#5d045a",
"#5b0458",
"#5a0357",
"#580355",
"#560254",
"#550252",
"#530151",
"#51014f",
"#50004e",
"#4e004c",
"#4d004b"
],
"CMRmap": [
"#000000",
"#010104",
"#020208",
"#03030c",
"#040410",
"#060614",
"#070718",
"#08081c",
"#090920",
"#0a0a24",
"#0c0c28",
"#0d0d2c",
"#0e0e30",
"#0f0f34",
"#101038",
"#12123c",
"#131340",
"#141444",
"#151548",
"#16164c",
"#181850",
"#191954",
"#1a1a58",
"#1b1b5c",
"#1c1c60",
"#1e1e64",
"#1f1f68",
"#20206c",
"#212170",
"#222274",
"#242478",
"#25257c",
"#26267f",
"#272681",
"#282683",
"#2a2685",
"#2b2687",
"#2c2689",
"#2d268b",
"#2e268d",
"#30268f",
"#312691",
"#322693",
"#332695",
"#342697",
"#352699",
"#37269b",
"#38269d",
"#39269f",
"#3a26a1",
"#3c26a3",
"#3d26a5",
"#3e26a7",
"#3f26a9",
"#4026ab",
"#4126ad",
"#4326af",
"#4426b1",
"#4526b3",
"#4626b5",
"#4826b7",
"#4926b9",
"#4a26bb",
"#4b26bd",
"#4d26be",
"#4f26bc",
"#5127ba",
"#5427b8",
"#5627b6",
"#5928b4",
"#5b28b2",
"#5d29b0",
"#6029ae",
"#6229ac",
"#652aaa",
"#672aa8",
"#692ba6",
"#6c2ba4",
"#6e2ba2",
"#712ca0",
"#732c9e",
"#752d9c",
"#782d9a",
"#7a2d98",
"#7d2e96",
"#7f2e94",
"#812f92",
"#842f90",
"#862f8e",
"#89308c",
"#8b308a",
"#8d3188",
"#903186",
"#923184",
"#953282",
"#973280",
"#9a337e",
"#9d337b",
"#a03378",
"#a33476",
"#a73473",
"#aa3570",
"#ad356d",
"#b0356a",
"#b33668",
"#b73665",
"#ba3762",
"#bd375f",
"#c0375c",
"#c3385a",
"#c73857",
"#ca3954",
"#cd3951",
"#d0394e",
"#d33a4c",
"#d73a49",
"#da3b46",
"#dd3b43",
"#e03b40",
"#e33c3e",
"#e73c3b",
"#ea3d38",
"#ed3d35",
"#f03d32",
"#f33e30",
"#f73e2d",
"#fa3f2a",
"#fd3f27",
"#fe4025",
"#fd4224",
"#fd4423",
"#fc4622",
"#fb4820",
"#fa4a1f",
"#f94c1e",
"#f94e1d",
"#f8501c",
"#f7521a",
"#f65419",
"#f55618",
"#f55817",
"#f45a16",
"#f35c14",
"#f25e13",
"#f16012",
"#f16211",
"#f06410",
"#ef660e",
"#ee680d",
"#ed6a0c",
"#ed6c0b",
"#ec6e0a",
"#eb7008",
"#ea7207",
"#e97406",
"#e97605",
"#e87804",
"#e77a02",
"#e67c01",
"#e57e00",
"#e58000",
"#e58201",
"#e58402",
"#e58602",
"#e58803",
"#e58a04",
"#e58c05",
"#e58e06",
"#e59006",
"#e59207",
"#e59408",
"#e59609",
"#e5980a",
"#e59a0a",
"#e59c0b",
"#e59e0c",
"#e5a00d",
"#e5a20e",
"#e5a40e",
"#e5a60f",
"#e5a810",
"#e5aa11",
"#e5ac12",
"#e5ae12",
"#e5b013",
"#e5b214",
"#e5b415",
"#e5b616",
"#e5b816",
"#e5ba17",
"#e5bc18",
"#e5be19",
"#e5c01b",
"#e5c11f",
"#e5c222",
"#e5c325",
"#e5c428",
"#e5c62b",
"#e5c72f",
"#e5c832",
"#e5c935",
"#e5ca38",
"#e5cc3b",
"#e5cd3f",
"#e5ce42",
"#e5cf45",
"#e5d048",
"#e5d24b",
"#e5d34f",
"#e5d452",
"#e5d555",
"#e5d658",
"#e5d85b",
"#e5d95f",
"#e5da62",
"#e5db65",
"#e5dc68",
"#e5de6b",
"#e5df6f",
"#e5e072",
"#e5e175",
"#e5e278",
"#e5e47b",
"#e5e57f",
"#e6e683",
"#e7e787",
"#e7e78b",
"#e8e88f",
"#e9e992",
"#eaea97",
"#ebeb9b",
"#ebeb9f",
"#ececa3",
"#ededa7",
"#eeeeab",
"#efefaf",
"#efefb3",
"#f0f0b7",
"#f1f1bb",
"#f2f2bf",
"#f3f3c3",
"#f3f3c7",
"#f4f4cb",
"#f5f5cf",
"#f6f6d2",
"#f7f7d7",
"#f7f7db",
"#f8f8df",
"#f9f9e3",
"#fafae7",
"#fbfbeb",
"#fbfbef",
"#fcfcf3",
"#fdfdf7",
"#fefefb",
"#ffffff"
],
"Dark2": [
"#1b9e77",
"#1b9e77",
"#1b9e77",
"#1b9e77",
"#1b9e77",
"#1b9e77",
"#1b9e77",
"#1b9e77",
"#1b9e77",
"#1b9e77",
"#1b9e77",
"#1b9e77",
"#1b9e77",
"#1b9e77",
"#1b9e77",
"#1b9e77",
"#1b9e77",
"#1b9e77",
"#1b9e77",
"#1b9e77",
"#1b9e77",
"#1b9e77",
"#1b9e77",
"#1b9e77",
"#1b9e77",
"#1b9e77",
"#1b9e77",
"#1b9e77",
"#1b9e77",
"#1b9e77",
"#1b9e77",
"#1b9e77",
"#d95f02",
"#d95f02",
"#d95f02",
"#d95f02",
"#d95f02",
"#d95f02",
"#d95f02",
"#d95f02",
"#d95f02",
"#d95f02",
"#d95f02",
"#d95f02",
"#d95f02",
"#d95f02",
"#d95f02",
"#d95f02",
"#d95f02",
"#d95f02",
"#d95f02",
"#d95f02",
"#d95f02",
"#d95f02",
"#d95f02",
"#d95f02",
"#d95f02",
"#d95f02",
"#d95f02",
"#d95f02",
"#d95f02",
"#d95f02",
"#d95f02",
"#d95f02",
"#7570b3",
"#7570b3",
"#7570b3",
"#7570b3",
"#7570b3",
"#7570b3",
"#7570b3",
"#7570b3",
"#7570b3",
"#7570b3",
"#7570b3",
"#7570b3",
"#7570b3",
"#7570b3",
"#7570b3",
"#7570b3",
"#7570b3",
"#7570b3",
"#7570b3",
"#7570b3",
"#7570b3",
"#7570b3",
"#7570b3",
"#7570b3",
"#7570b3",
"#7570b3",
"#7570b3",
"#7570b3",
"#7570b3",
"#7570b3",
"#7570b3",
"#7570b3",
"#e7298a",
"#e7298a",
"#e7298a",
"#e7298a",
"#e7298a",
"#e7298a",
"#e7298a",
"#e7298a",
"#e7298a",
"#e7298a",
"#e7298a",
"#e7298a",
"#e7298a",
"#e7298a",
"#e7298a",
"#e7298a",
"#e7298a",
"#e7298a",
"#e7298a",
"#e7298a",
"#e7298a",
"#e7298a",
"#e7298a",
"#e7298a",
"#e7298a",
"#e7298a",
"#e7298a",
"#e7298a",
"#e7298a",
"#e7298a",
"#e7298a",
"#e7298a",
"#66a61e",
"#66a61e",
"#66a61e",
"#66a61e",
"#66a61e",
"#66a61e",
"#66a61e",
"#66a61e",
"#66a61e",
"#66a61e",
"#66a61e",
"#66a61e",
"#66a61e",
"#66a61e",
"#66a61e",
"#66a61e",
"#66a61e",
"#66a61e",
"#66a61e",
"#66a61e",
"#66a61e",
"#66a61e",
"#66a61e",
"#66a61e",
"#66a61e",
"#66a61e",
"#66a61e",
"#66a61e",
"#66a61e",
"#66a61e",
"#66a61e",
"#66a61e",
"#e6ab02",
"#e6ab02",
"#e6ab02",
"#e6ab02",
"#e6ab02",
"#e6ab02",
"#e6ab02",
"#e6ab02",
"#e6ab02",
"#e6ab02",
"#e6ab02",
"#e6ab02",
"#e6ab02",
"#e6ab02",
"#e6ab02",
"#e6ab02",
"#e6ab02",
"#e6ab02",
"#e6ab02",
"#e6ab02",
"#e6ab02",
"#e6ab02",
"#e6ab02",
"#e6ab02",
"#e6ab02",
"#e6ab02",
"#e6ab02",
"#e6ab02",
"#e6ab02",
"#e6ab02",
"#e6ab02",
"#e6ab02",
"#a6761d",
"#a6761d",
"#a6761d",
"#a6761d",
"#a6761d",
"#a6761d",
"#a6761d",
"#a6761d",
"#a6761d",
"#a6761d",
"#a6761d",
"#a6761d",
"#a6761d",
"#a6761d",
"#a6761d",
"#a6761d",
"#a6761d",
"#a6761d",
"#a6761d",
"#a6761d",
"#a6761d",
"#a6761d",
"#a6761d",
"#a6761d",
"#a6761d",
"#a6761d",
"#a6761d",
"#a6761d",
"#a6761d",
"#a6761d",
"#a6761d",
"#a6761d",
"#666666",
"#666666",
"#666666",
"#666666",
"#666666",
"#666666",
"#666666",
"#666666",
"#666666",
"#666666",
"#666666",
"#666666",
"#666666",
"#666666",
"#666666",
"#666666",
"#666666",
"#666666",
"#666666",
"#666666",
"#666666",
"#666666",
"#666666",
"#666666",
"#666666",
"#666666",
"#666666",
"#666666",
"#666666",
"#666666",
"#666666",
"#666666"
],
"GnBu": [
"#f7fcf0",
"#f6fbef",
"#f5fbee",
"#f4fbee",
"#f4faed",
"#f3faec",
"#f2faec",
"#f1faeb",
"#f1f9ea",
"#f0f9ea",
"#eff9e9",
"#eff8e8",
"#eef8e8",
"#edf8e7",
"#ecf8e6",
"#ecf7e6",
"#ebf7e5",
"#eaf7e4",
"#eaf6e4",
"#e9f6e3",
"#e8f6e2",
"#e7f6e2",
"#e7f5e1",
"#e6f5e0",
"#e5f5e0",
"#e4f4df",
"#e4f4de",
"#e3f4de",
"#e2f4dd",
"#e2f3dc",
"#e1f3dc",
"#e0f3db",
"#dff2da",
"#dff2da",
"#def2d9",
"#def2d8",
"#ddf1d8",
"#dcf1d7",
"#dcf1d6",
"#dbf1d6",
"#daf0d5",
"#daf0d4",
"#d9f0d4",
"#d9f0d3",
"#d8efd2",
"#d7efd1",
"#d7efd1",
"#d6efd0",
"#d5eecf",
"#d5eecf",
"#d4eece",
"#d4eecd",
"#d3edcd",
"#d2edcc",
"#d2edcb",
"#d1edcb",
"#d0ecca",
"#d0ecc9",
"#cfecc8",
"#ceecc8",
"#ceebc7",
"#cdebc6",
"#cdebc6",
"#ccebc5",
"#cbeac4",
"#caeac4",
"#c9eac3",
"#c8e9c3",
"#c7e9c2",
"#c6e8c2",
"#c4e8c1",
"#c3e7c1",
"#c2e7c0",
"#c1e6c0",
"#c0e6bf",
"#bfe6bf",
"#bee5be",
"#bde5be",
"#bbe4bd",
"#bae4bd",
"#b9e3bc",
"#b8e3bc",
"#b7e2bb",
"#b6e2bb",
"#b5e2ba",
"#b4e1ba",
"#b2e1b9",
"#b1e0b9",
"#b0e0b8",
"#afdfb8",
"#aedfb7",
"#addfb7",
"#acdeb6",
"#aadeb6",
"#a9ddb5",
"#a8ddb5",
"#a7dcb5",
"#a6dcb5",
"#a4dbb6",
"#a3dbb6",
"#a1dab7",
"#a0dab7",
"#9fd9b8",
"#9dd9b8",
"#9cd8b8",
"#9ad8b9",
"#99d7b9",
"#97d6ba",
"#96d6ba",
"#95d5bb",
"#93d5bb",
"#92d4bc",
"#90d4bc",
"#8fd3bd",
"#8ed3bd",
"#8cd2be",
"#8bd2be",
"#89d1bf",
"#88d1bf",
"#87d0c0",
"#85d0c0",
"#84cfc0",
"#82cec1",
"#81cec1",
"#7fcdc2",
"#7ecdc2",
"#7dccc3",
"#7bccc3",
"#7acbc4",
"#78cac4",
"#77cac5",
"#76c9c5",
"#74c8c6",
"#73c7c6",
"#71c6c7",
"#70c6c7",
"#6fc5c8",
"#6dc4c8",
"#6cc3c8",
"#6ac2c9",
"#69c2c9",
"#67c1ca",
"#66c0ca",
"#65bfcb",
"#63bfcb",
"#62becc",
"#60bdcc",
"#5fbccd",
"#5ebbcd",
"#5cbbce",
"#5bbace",
"#59b9cf",
"#58b8cf",
"#57b8d0",
"#55b7d0",
"#54b6d0",
"#52b5d1",
"#51b4d1",
"#4fb4d2",
"#4eb3d2",
"#4db2d2",
"#4cb1d1",
"#4bafd1",
"#4aaed0",
"#48adcf",
"#47accf",
"#46aace",
"#45a9cd",
"#44a8cd",
"#43a7cc",
"#42a6cb",
"#41a4cb",
"#40a3ca",
"#3fa2ca",
"#3da1c9",
"#3c9fc8",
"#3b9ec8",
"#3a9dc7",
"#399cc6",
"#389ac6",
"#3799c5",
"#3698c4",
"#3597c4",
"#3496c3",
"#3294c2",
"#3193c2",
"#3092c1",
"#2f91c0",
"#2e8fc0",
"#2d8ebf",
"#2c8dbe",
"#2b8cbe",
"#2a8bbd",
"#298abd",
"#2788bc",
"#2687bb",
"#2586bb",
"#2485ba",
"#2384ba",
"#2283b9",
"#2182b9",
"#2080b8",
"#1f7fb7",
"#1e7eb7",
"#1d7db6",
"#1b7cb6",
"#1a7bb5",
"#197ab5",
"#1879b4",
"#1777b3",
"#1676b3",
"#1575b2",
"#1474b2",
"#1373b1",
"#1272b1",
"#1071b0",
"#0f70b0",
"#0e6eaf",
"#0d6dae",
"#0c6cae",
"#0b6bad",
"#0a6aad",
"#0969ac",
"#0868ac",
"#0866aa",
"#0865a9",
"#0864a8",
"#0863a6",
"#0861a5",
"#0860a4",
"#085fa2",
"#085ea1",
"#085ca0",
"#085b9e",
"#085a9d",
"#08599b",
"#08579a",
"#085699",
"#085597",
"#085496",
"#085295",
"#085193",
"#085092",
"#084f91",
"#084d8f",
"#084c8e",
"#084b8d",
"#084a8b",
"#08488a",
"#084789",
"#084687",
"#084586",
"#084385",
"#084283",
"#084182",
"#084081"
],
"Greens": [
"#f7fcf5",
"#f6fbf4",
"#f5fbf3",
"#f5fbf3",
"#f4fbf2",
"#f4faf1",
"#f3faf1",
"#f3faf0",
"#f2faef",
"#f1faef",
"#f1f9ee",
"#f0f9ed",
"#f0f9ed",
"#eff9ec",
"#eff8eb",
"#eef8eb",
"#edf8ea",
"#edf8e9",
"#ecf8e9",
"#ecf7e8",
"#ebf7e7",
"#ebf7e7",
"#eaf7e6",
"#eaf6e5",
"#e9f6e5",
"#e8f6e4",
"#e8f6e3",
"#e7f6e3",
"#e7f5e2",
"#e6f5e1",
"#e6f5e1",
"#e5f5e0",
"#e4f4df",
"#e3f4de",
"#e3f4dd",
"#e2f3dc",
"#e1f3db",
"#e0f3da",
"#dff2d9",
"#def2d8",
"#ddf1d7",
"#dcf1d6",
"#dbf1d5",
"#daf0d4",
"#d9f0d3",
"#d8f0d2",
"#d7efd1",
"#d6efd0",
"#d5eecf",
"#d4eece",
"#d3eecd",
"#d3edcc",
"#d2edcb",
"#d1edca",
"#d0ecc9",
"#cfecc8",
"#ceebc7",
"#cdebc6",
"#ccebc5",
"#cbeac4",
"#caeac3",
"#c9eac2",
"#c8e9c1",
"#c7e9c0",
"#c6e8bf",
"#c5e8be",
"#c4e7bd",
"#c3e7bc",
"#c1e6bb",
"#c0e6b9",
"#bfe5b8",
"#bee5b7",
"#bde4b6",
"#bbe4b5",
"#bae3b4",
"#b9e3b2",
"#b8e2b1",
"#b7e2b0",
"#b6e1af",
"#b4e1ae",
"#b3e0ad",
"#b2e0ab",
"#b1dfaa",
"#b0dfa9",
"#aedea8",
"#addea7",
"#acdda6",
"#abdda5",
"#aadca3",
"#a8dca2",
"#a7dba1",
"#a6dba0",
"#a5da9f",
"#a4da9e",
"#a2d99c",
"#a1d99b",
"#a0d89a",
"#9fd899",
"#9dd798",
"#9cd697",
"#9ad695",
"#99d594",
"#98d493",
"#96d492",
"#95d391",
"#93d290",
"#92d28e",
"#90d18d",
"#8fd08c",
"#8ed08b",
"#8ccf8a",
"#8bce89",
"#89ce87",
"#88cd86",
"#87cc85",
"#85cc84",
"#84cb83",
"#82ca82",
"#81ca81",
"#80c97f",
"#7ec87e",
"#7dc87d",
"#7bc77c",
"#7ac67b",
"#78c67a",
"#77c578",
"#76c477",
"#74c476",
"#73c375",
"#71c274",
"#70c274",
"#6ec173",
"#6cc072",
"#6bbf71",
"#69be70",
"#68be70",
"#66bd6f",
"#64bc6e",
"#63bb6d",
"#61ba6c",
"#60ba6c",
"#5eb96b",
"#5cb86a",
"#5bb769",
"#59b769",
"#58b668",
"#56b567",
"#54b466",
"#53b365",
"#51b365",
"#50b264",
"#4eb163",
"#4cb062",
"#4bb061",
"#49af61",
"#48ae60",
"#46ad5f",
"#44ac5e",
"#43ac5e",
"#41ab5d",
"#40aa5c",
"#3fa95b",
"#3ea85b",
"#3da75a",
"#3ca659",
"#3ba558",
"#3aa458",
"#39a357",
"#38a256",
"#37a155",
"#37a055",
"#369f54",
"#359e53",
"#349d52",
"#339c51",
"#329b51",
"#319a50",
"#30994f",
"#2f984e",
"#2e974e",
"#2d964d",
"#2c954c",
"#2b944b",
"#2a934b",
"#29924a",
"#289149",
"#279048",
"#278f48",
"#268e47",
"#258d46",
"#248c45",
"#238b45",
"#228a44",
"#218943",
"#1f8842",
"#1e8742",
"#1d8641",
"#1c8540",
"#1b843f",
"#1a833e",
"#19823e",
"#18813d",
"#17803c",
"#167f3b",
"#157e3a",
"#137e3a",
"#127d39",
"#117c38",
"#107b37",
"#0f7a37",
"#0e7936",
"#0d7835",
"#0c7734",
"#0b7633",
"#0a7533",
"#087432",
"#077331",
"#067230",
"#057130",
"#04702f",
"#036f2e",
"#026f2d",
"#016e2c",
"#006d2c",
"#006b2b",
"#006a2b",
"#00692a",
"#006829",
"#006629",
"#006528",
"#006428",
"#006227",
"#006127",
"#006026",
"#005f26",
"#005d25",
"#005c25",
"#005b24",
"#005924",
"#005823",
"#005723",
"#005622",
"#005421",
"#005321",
"#005220",
"#005020",
"#004f1f",
"#004e1f",
"#004d1e",
"#004b1e",
"#004a1d",
"#00491d",
"#00471c",
"#00461c",
"#00451b",
"#00441b"
],
"Greys": [
"#ffffff",
"#fefefe",
"#fefefe",
"#fdfdfd",
"#fdfdfd",
"#fcfcfc",
"#fcfcfc",
"#fbfbfb",
"#fbfbfb",
"#fafafa",
"#fafafa",
"#f9f9f9",
"#f9f9f9",
"#f8f8f8",
"#f8f8f8",
"#f7f7f7",
"#f7f7f7",
"#f7f7f7",
"#f6f6f6",
"#f6f6f6",
"#f5f5f5",
"#f5f5f5",
"#f4f4f4",
"#f4f4f4",
"#f3f3f3",
"#f3f3f3",
"#f2f2f2",
"#f2f2f2",
"#f1f1f1",
"#f1f1f1",
"#f0f0f0",
"#f0f0f0",
"#efefef",
"#efefef",
"#eeeeee",
"#ededed",
"#ededed",
"#ececec",
"#ebebeb",
"#eaeaea",
"#eaeaea",
"#e9e9e9",
"#e8e8e8",
"#e7e7e7",
"#e7e7e7",
"#e6e6e6",
"#e5e5e5",
"#e5e5e5",
"#e4e4e4",
"#e3e3e3",
"#e2e2e2",
"#e2e2e2",
"#e1e1e1",
"#e0e0e0",
"#e0e0e0",
"#dfdfdf",
"#dedede",
"#dddddd",
"#dddddd",
"#dcdcdc",
"#dbdbdb",
"#dadada",
"#dadada",
"#d9d9d9",
"#d8d8d8",
"#d7d7d7",
"#d7d7d7",
"#d6d6d6",
"#d5d5d5",
"#d4d4d4",
"#d3d3d3",
"#d2d2d2",
"#d1d1d1",
"#d0d0d0",
"#cfcfcf",
"#cfcfcf",
"#cecece",
"#cdcdcd",
"#cccccc",
"#cbcbcb",
"#cacaca",
"#c9c9c9",
"#c8c8c8",
"#c8c8c8",
"#c7c7c7",
"#c6c6c6",
"#c5c5c5",
"#c4c4c4",
"#c3c3c3",
"#c2c2c2",
"#c1c1c1",
"#c1c1c1",
"#c0c0c0",
"#bfbfbf",
"#bebebe",
"#bdbdbd",
"#bcbcbc",
"#bbbbbb",
"#bababa",
"#b8b8b8",
"#b7b7b7",
"#b6b6b6",
"#b5b5b5",
"#b3b3b3",
"#b2b2b2",
"#b1b1b1",
"#b0b0b0",
"#afafaf",
"#adadad",
"#acacac",
"#ababab",
"#aaaaaa",
"#a8a8a8",
"#a7a7a7",
"#a6a6a6",
"#a5a5a5",
"#a4a4a4",
"#a2a2a2",
"#a1a1a1",
"#a0a0a0",
"#9f9f9f",
"#9d9d9d",
"#9c9c9c",
"#9b9b9b",
"#9a9a9a",
"#999999",
"#979797",
"#969696",
"#959595",
"#949494",
"#939393",
"#929292",
"#919191",
"#8f8f8f",
"#8e8e8e",
"#8d8d8d",
"#8c8c8c",
"#8b8b8b",
"#8a8a8a",
"#898989",
"#888888",
"#878787",
"#868686",
"#848484",
"#838383",
"#828282",
"#818181",
"#808080",
"#7f7f7f",
"#7e7e7e",
"#7d7d7d",
"#7c7c7c",
"#7b7b7b",
"#7a7a7a",
"#787878",
"#777777",
"#767676",
"#757575",
"#747474",
"#737373",
"#727272",
"#717171",
"#707070",
"#6f6f6f",
"#6e6e6e",
"#6d6d6d",
"#6c6c6c",
"#6b6b6b",
"#6a6a6a",
"#696969",
"#686868",
"#666666",
"#656565",
"#646464",
"#636363",
"#626262",
"#616161",
"#606060",
"#5f5f5f",
"#5e5e5e",
"#5d5d5d",
"#5c5c5c",
"#5b5b5b",
"#5a5a5a",
"#595959",
"#585858",
"#575757",
"#565656",
"#555555",
"#545454",
"#535353",
"#525252",
"#505050",
"#4f4f4f",
"#4e4e4e",
"#4c4c4c",
"#4b4b4b",
"#494949",
"#484848",
"#474747",
"#454545",
"#444444",
"#424242",
"#414141",
"#404040",
"#3e3e3e",
"#3d3d3d",
"#3b3b3b",
"#3a3a3a",
"#383838",
"#373737",
"#363636",
"#343434",
"#333333",
"#313131",
"#303030",
"#2f2f2f",
"#2d2d2d",
"#2c2c2c",
"#2a2a2a",
"#292929",
"#282828",
"#262626",
"#252525",
| |
= tmpTokens[1]
elif (len(tmpTokens) == 3):
if (tmpTokens[1] != "?"):
xName = tmpTokens[1] + "," + tmpTokens[2]
else:
xName = tmpTokens[2]
else:
print " FATAL ERROR ???? !!!! ", tmpTokens, cTokens
sys.exit(-1)
else:
if (len(tmpTokens) == 1):
print " FATAL ERROR ???? !!!! ", tmpTokens, cTokens
sys.exit(-1)
elif (len(tmpTokens) == 2):
geneName = tmpTokens[1]
xName = tmpTokens[1]
elif (len(tmpTokens) == 3):
geneName = tmpTokens[1]
if (tmpTokens[1] != "?"):
xName = tmpTokens[1] + "," + tmpTokens[2]
else:
xName = tmpTokens[2]
else:
print " FATAL ERROR ????? !!!!! ", tmpTokens, cTokens
sys.exit(-1)
# print " (f) "
tmpName = makeFeatureName(
dType, fType, geneName, '', -1, -1, '', xName)
elif (zPlat == "MDA_RPPA_Core"):
print cTokens
geneName = cTokens[0]
tmpName = makeFeatureName("N", "RPPA", geneName)
else:
# print " (g) "
tmpName = makeFeatureName(dType, fType, cTokens[0])
if (geneList[iGene] != tmpName):
print " ERROR ??? feature name not as expected ??? <%s> <%s> <%s> " % (geneList[iGene], tmpName, cTokens[0])
sys.exit(-1)
try:
# for most platforms, the data value we are interested in is in
# the last column, but sometimes not ... best to make this decision
# for each platform carefully
# IlluminaGA_miRNASeq --> [-2]
# IlluminaHiSeq_miRNASeq --> [-2]
# IlluminaGA_RNASeq --> [-1]
# IlluminaHiSeq_RNASeq --> [-1]
# IlluminaHiSeq_RNASeqV2 --> [-1]
# Genome_Wide_SNP_6 --> [-1]
# HT_HG-U133A --> [-1]
# MDA_RPPA_Core --> [-1]
# AgilentG4502A_07_1 --> [-1]
# AgilentG4502A_07_2 --> [-1]
# AgilentG4502A_07_3 --> [-1]
# H-miRNA_8x15K --> [-1]
# HumanMethylation27 --> [1]
# HumanMethylation450 --> [1]
if (zPlat == "IlluminaGA_RNASeq" or zPlat == "IlluminaGA_RNASeqV2" or
zPlat == "IlluminaHiSeq_RNASeq" or zPlat == "IlluminaHiSeq_RNASeqV2" or
zPlat == "Genome_Wide_SNP_6" or zPlat == "HT_HG-U133A" or
zPlat == "MDA_RPPA_Core" or zPlat == "H-miRNA_8x15K" or
zPlat == "AgilentG4502A_07_1" or zPlat == "AgilentG4502A_07_2" or zPlat == "AgilentG4502A_07_3"):
dataVec += [float(cTokens[-1])]
elif (zPlat == "IlluminaGA_miRNASeq" or zPlat == "IlluminaHiSeq_miRNASeq"):
dataVec += [float(cTokens[-2])]
elif (zPlat == "HumanMethylation27" or zPlat == "HumanMethylation450"):
dataVec += [float(cTokens[1])]
else:
print " double check platform data location ??? "
print zPlat, cTokens
sys.exit(-1)
dataVec += [float(cTokens[1])]
except:
if (len(cTokens) == 1):
dataVec += [NA_VALUE]
elif (cTokens[1] == 'null'):
dataVec += [NA_VALUE]
elif (cTokens[1] == 'NA'):
dataVec += [NA_VALUE]
else:
print ' ERROR converting token to float ??? ', zPlat
print cLine
print cTokens, len(cTokens)
sys.exit(-1)
iGene += 1
# print dataVec[:10]
print " --> returning ", len(geneList), len(dataVec)
print geneList[:5]
print dataVec[:5]
return (geneList, dataVec)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def mergeSegments(segList, dataVec):
print " in mergeSegments ... ", len(segList), len(dataVec)
print " THIS HAS NOT BEEN IMPLEMENTED !!! "
sys.exit(-1)
return (segList, dataVec)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def lookAtSegs(segMatrix):
maxNumSeg = len(segMatrix)
numSamples = len(segMatrix[0])
twnSeg = []
segLen = []
numChr = 24
for iChr in range(numChr):
# FIXME !!!
pref = '%d_' % (iChr + 1)
for jS in range(numSamples):
lastSegEnd = -1
for iS in range(maxNumSeg):
if (segMatrix[iS][jS] == ''):
continue
if (not segMatrix[iS][jS].startswith(pref)):
continue
tokenList = segMatrix[iS][jS].split(':')
segStart = int(tokenList[3])
segEnd = int(tokenList[4])
segLength = (segEnd - segStart + 1)
segLen += [segLength]
if (lastSegEnd >= 0):
delLength = (segStart - lastSegEnd)
twnSeg += [delLength]
lastSegEnd = segEnd
segLen.sort()
twnSeg.sort()
nSegLen = len(segLen)
nTwnSeg = len(twnSeg)
print " "
print nSegLen, segLen[:10], segLen[-10:]
print nTwnSeg, twnSeg[:10], twnSeg[-10:]
print " "
print segLen[nSegLen / 10], segLen[nSegLen / 4], segLen[nSegLen / 2], segLen[3 * nSegLen / 4], segLen[9 * nSegLen / 10]
print twnSeg[nTwnSeg / 10], twnSeg[nTwnSeg / 4], twnSeg[nTwnSeg / 2], twnSeg[3 * nTwnSeg / 4], twnSeg[9 * nTwnSeg / 10]
# based on a quick look at COAD data, the median segment length is 145kb,
# (max = 151Mb), and the median between-segments gap is 1500bp (max = 20Mb)
# 10th %ile of segment lengths is 472bp, 90th %ile is 13Mb
# 10th %ile of segment gaps is 78bp, 90th %ile is 8Kb
print " "
print " "
sys.exit(-1)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def prettyPrint(aVec, skipFlag=1):
if (len(aVec) < 1):
print " WARNING: in prettyPrint ... empty vector ... "
return
ii = 0
print " %6d %8.2f " % (ii, aVec[ii])
lastVal = aVec[ii]
for ii in range(1, len(aVec) - 1):
if (abs(aVec[ii] - lastVal) > 0.1):
print " %6d %8.3f " % (ii, aVec[ii])
lastVal = aVec[ii]
if (not skipFlag):
lastVal = (NA_VALUE + NA_VALUE)
ii = len(aVec) - 1
print " %6d %8.2f " % (ii, aVec[ii])
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def prettyPrint2(aDict):
startKeys = aDict.keys()
startKeys.sort()
# print startKeys
print " number of start keys : ", len(startKeys), startKeys[:3], startKeys[-3:]
for iStart in startKeys:
stopKeys = aDict[iStart].keys()
if (len(stopKeys) != 1):
print " ERROR ??? in prettyPrint2 ... how can there by multiple stop keys ??? "
print aDict
sys.exit(-1)
iStop = stopKeys[0]
try:
print " [ (%d,%d) : %.3f ] " % (int(iStart), int(iStop), aDict[iStart][iStop])
except:
print " ERROR in prettyPrint2 ??? "
print " startKeys : ", startKeys
print " contents for current startKey : ", iStart, aDict[iStart]
print " stopKeys : ", stopKeys
print " number of stopKeys : ", len(aDict[iStart])
sys.exit(-1)
# sys.exit(-1)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# this used to be done with a single numpy.subtract call but now we need
# to handle the NA_VALUEs ...
## diffvec[k] = cnvsum[k+1] - cnvsum[k]
## diffvec = numpy.subtract ( cnvsum[1::1], cnvsum[0:-1:1] )
def computeFirstDiffVec(cnvsum):
diffvec = numpy.zeros(len(cnvsum) - 1)
minDiff = 999999.
maxDiff = -999999.
for kk in range(len(diffvec)):
# if either of the two values to be differenced are NA, then
# we set the diffvec value to NA
if (cnvsum[kk] == NA_VALUE):
diffvec[kk] = NA_VALUE
elif (cnvsum[kk] == abs(NA_VALUE)):
diffvec[kk] = NA_VALUE
elif (cnvsum[kk + 1] == NA_VALUE):
diffvec[kk] = NA_VALUE
elif (cnvsum[kk + 1] == abs(NA_VALUE)):
diffvec[kk] = NA_VALUE
# only if we have two valid values do we compute the difference
else:
diffvec[kk] = cnvsum[kk + 1] - cnvsum[kk]
if (minDiff > diffvec[kk]):
minDiff = diffvec[kk]
if (maxDiff < diffvec[kk]):
maxDiff = diffvec[kk]
# sanity check ...
if (0):
if (maxDiff > 1000):
print " ERROR ??? how did we get this value ??? "
print " "
for ii in range(len(diffvec)):
print ii, cnvsum[ii + 1], cnvsum[ii], diffvec[ii]
sys.exit(-1)
# print " done computing diffvec "
return (diffvec, minDiff, maxDiff)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def removeZerosNAs(diffvec):
# print " "
# print " in removeZerosNAs ... "
# print diffvec[:10]
# print diffvec[-10:]
# print " "
n1 = len(diffvec)
if (n1 < 1):
print " in removeZerosNAs ... zero length vector ??? "
sys.exit(-1)
# print n1, min(diffvec), max(diffvec)
n0 = 0
for kk in range(n1):
if (abs(diffvec[kk]) < 0.0001):
n0 += 1
elif (abs(diffvec[kk]) > (abs(NA_VALUE) - 1)):
n0 += 1
else:
doNothing = 1
# print kk, diffvec[kk], abs(diffvec[kk]), n0
# print " "
# print " "
n2 = n1 - n0
# print " starting with %d values, removing %d values, left with %d values
# " % ( n1, n0, n2 )
if (n2 < 10):
print " ERROR ??? "
print " in removeZerosNAs ... ", len(diffvec), n1, n0, n2
prettyPrint(diffvec)
sys.exit(-1)
diffvec_nz = numpy.zeros(n2)
nn = 0
for kk in range(n1):
if (abs(diffvec[kk]) < 0.0001):
doNothing = 1
elif (abs(diffvec[kk]) > (abs(NA_VALUE) - 1)):
doNothing = 1
else:
diffvec_nz[nn] = diffvec[kk]
nn += 1
# print " returning diffvec_nz ... ", len(diffvec_nz), nn,
# min(diffvec_nz), max(diffvec_nz)
return (diffvec_nz)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def initializeVec(veclen, value=NA_VALUE):
aVec = numpy.zeros((veclen, 1))
for kk in range(veclen):
aVec[kk] = value
return (aVec)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# accumulate the current binned vector into the summed vector ...
# 25may12: changing this to operate on absolute values so that large positive
# values and large negative values don't cancel each other out!!!
def add2CNVsum(cnvsum, cnvvec):
cnvvec = abs(cnvvec)
# first any values in cnvsum that are NA should just be set
# equal to whatever is coming in (even if it is NA)
b1 = (cnvsum == abs(NA_VALUE))
cnvsum[b1] = cnvvec[b1]
# next, any values in cnvvec that are *not* NA should be
# added to any values in cnvsum that are *not* NA
# EXCEPT | |
[no_entity_id
] + [self.label_map[label]
for label in labels] + [no_entity_id]
record = self.Record_With_Label_Id(
token_ids=token_ids,
text_type_ids=text_type_ids,
position_ids=position_ids,
label_id=label_ids)
else:
tokens = self._reseg_token_label(
tokens=tokens, tokenizer=tokenizer, phase=phase)
if len(tokens) > max_seq_length - 2:
tokens = tokens[0:(max_seq_length - 2)]
tokens = ["[CLS]"] + tokens + ["[SEP]"]
token_ids = tokenizer.convert_tokens_to_ids(tokens)
position_ids = list(range(len(token_ids)))
text_type_ids = [0] * len(token_ids)
record = self.Record_Wo_Label_Id(
token_ids=token_ids,
text_type_ids=text_type_ids,
position_ids=position_ids,
)
return record
class MultiLabelClassifyReader(BaseNLPReader):
def _pad_batch_records(self, batch_records, phase=None):
batch_token_ids = [record.token_ids for record in batch_records]
batch_text_type_ids = [record.text_type_ids for record in batch_records]
batch_position_ids = [record.position_ids for record in batch_records]
# padding
padded_token_ids, input_mask = pad_batch_data(
batch_token_ids,
pad_idx=self.pad_id,
max_seq_len=self.max_seq_len,
return_input_mask=True)
padded_text_type_ids = pad_batch_data(
batch_text_type_ids,
max_seq_len=self.max_seq_len,
pad_idx=self.pad_id)
padded_position_ids = pad_batch_data(
batch_position_ids,
max_seq_len=self.max_seq_len,
pad_idx=self.pad_id)
return_list = [
padded_token_ids, padded_position_ids, padded_text_type_ids,
input_mask
]
if phase != "predict":
batch_labels_ids = [record.label_id for record in batch_records]
num_label = len(self.dataset.get_labels())
batch_labels = np.array(batch_labels_ids).astype("int64").reshape(
[-1, num_label])
return_list += [batch_labels]
return return_list
def _convert_example_to_record(self,
example,
max_seq_length,
tokenizer,
phase=None):
"""Converts a single `Example` into a single `Record`."""
text_a = tokenization.convert_to_unicode(example.text_a)
tokens_a = tokenizer.tokenize(text_a)
tokens_b = None
if example.text_b is not None:
#if "text_b" in example._fields:
text_b = tokenization.convert_to_unicode(example.text_b)
tokens_b = tokenizer.tokenize(text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
self._truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
tokens = []
text_type_ids = []
tokens.append("[CLS]")
text_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
text_type_ids.append(0)
tokens.append("[SEP]")
text_type_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
text_type_ids.append(1)
tokens.append("[SEP]")
text_type_ids.append(1)
token_ids = tokenizer.convert_tokens_to_ids(tokens)
position_ids = list(range(len(token_ids)))
label_ids = []
if phase == "predict":
label_ids = [0, 0, 0, 0, 0, 0]
else:
for label in example.label:
label_ids.append(int(label))
if phase != "predict":
record = self.Record_With_Label_Id(
token_ids=token_ids,
text_type_ids=text_type_ids,
position_ids=position_ids,
label_id=label_ids)
else:
record = self.Record_Wo_Label_Id(
token_ids=token_ids,
text_type_ids=text_type_ids,
position_ids=position_ids)
return record
class RegressionReader(BaseNLPReader):
def _pad_batch_records(self, batch_records, phase=None):
batch_token_ids = [record.token_ids for record in batch_records]
batch_text_type_ids = [record.text_type_ids for record in batch_records]
batch_position_ids = [record.position_ids for record in batch_records]
padded_token_ids, input_mask = pad_batch_data(
batch_token_ids,
max_seq_len=self.max_seq_len,
pad_idx=self.pad_id,
return_input_mask=True)
padded_text_type_ids = pad_batch_data(
batch_text_type_ids,
max_seq_len=self.max_seq_len,
pad_idx=self.pad_id)
padded_position_ids = pad_batch_data(
batch_position_ids,
max_seq_len=self.max_seq_len,
pad_idx=self.pad_id)
return_list = [
padded_token_ids, padded_position_ids, padded_text_type_ids,
input_mask
]
if phase != "predict":
batch_labels = [record.label_id for record in batch_records]
# the only diff with ClassifyReader: astype("float32")
batch_labels = np.array(batch_labels).astype("float32").reshape(
[-1, 1])
return_list += [batch_labels]
return return_list
def data_generator(self,
batch_size=1,
phase='train',
shuffle=True,
data=None,
return_list=True):
if phase != 'predict' and not self.dataset:
raise ValueError("The dataset is none and it's not allowed.")
if phase == 'train':
shuffle = True
examples = self.get_train_examples()
self.num_examples['train'] = len(examples)
elif phase == 'val' or phase == 'dev':
shuffle = False
examples = self.get_dev_examples()
self.num_examples['dev'] = len(examples)
elif phase == 'test':
shuffle = False
examples = self.get_test_examples()
self.num_examples['test'] = len(examples)
elif phase == 'predict':
shuffle = False
examples = []
seq_id = 0
for item in data:
# set label in order to run the program
label = -1 # different from BaseNLPReader
if len(item) == 1:
item_i = InputExample(
guid=seq_id, text_a=item[0], label=label)
elif len(item) == 2:
item_i = InputExample(
guid=seq_id,
text_a=item[0],
text_b=item[1],
label=label)
else:
raise ValueError(
"The length of input_text is out of handling, which must be 1 or 2!"
)
examples.append(item_i)
seq_id += 1
else:
raise ValueError(
"Unknown phase, which should be in ['train', 'dev', 'test', 'predict']."
)
def wrapper():
if shuffle:
np.random.shuffle(examples)
for batch_data in self._prepare_batch_data(
examples, batch_size, phase=phase):
if return_list:
# for DataFeeder
yield [batch_data]
else:
# for DataLoader
yield batch_data
return wrapper
class Features(object):
"""A single set of features of squad_data."""
def __init__(
self,
unique_id,
example_index,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
token_ids,
position_ids,
text_type_ids,
start_position=None,
end_position=None,
is_impossible=None,
):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.token_ids = token_ids
self.position_ids = position_ids
self.text_type_ids = text_type_ids
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def __repr__(self):
s = ""
s += "unique_id: %s " % self.unique_id
s += "example_index: %s " % self.example_index
s += "start_position: %s " % self.start_position
s += "end_position: %s " % self.end_position
s += "is_impossible: %s " % self.is_impossible
# s += "tokens: %s" % self.tokens
# s += "token_to_orig_map %s" % self.token_to_orig_map
return s
class ReadingComprehensionReader(BaseNLPReader):
def __init__(self,
dataset,
vocab_path,
do_lower_case=True,
max_seq_len=512,
doc_stride=128,
max_query_length=64,
random_seed=None,
use_task_id=False,
sp_model_path=None,
word_dict_path=None,
in_tokens=False):
super(ReadingComprehensionReader, self).__init__(
vocab_path=vocab_path,
dataset=dataset,
label_map_config=None,
max_seq_len=max_seq_len,
do_lower_case=do_lower_case,
random_seed=random_seed,
use_task_id=use_task_id,
sp_model_path=sp_model_path,
word_dict_path=word_dict_path,
in_tokens=in_tokens)
self.doc_stride = doc_stride
self.max_query_length = max_query_length
self._DocSpan = collections.namedtuple("DocSpan", ["start", "length"])
# self.all_examples[phase] and self.all_features[phase] will be used
# in write_prediction in reading_comprehension_task
self.all_features = {"train": [], "dev": [], "test": [], "predict": []}
self.all_examples = {"train": [], "dev": [], "test": [], "predict": []}
def _pad_batch_records(self, batch_records, phase):
batch_token_ids = [record.token_ids for record in batch_records]
batch_text_type_ids = [record.text_type_ids for record in batch_records]
batch_position_ids = [record.position_ids for record in batch_records]
batch_unique_ids = [record.unique_id for record in batch_records]
batch_unique_ids = np.array(batch_unique_ids).astype("int64").reshape(
[-1, 1])
# padding
padded_token_ids, input_mask = pad_batch_data(
batch_token_ids,
pad_idx=self.pad_id,
return_input_mask=True,
max_seq_len=self.max_seq_len)
padded_text_type_ids = pad_batch_data(
batch_text_type_ids,
pad_idx=self.pad_id,
max_seq_len=self.max_seq_len)
padded_position_ids = pad_batch_data(
batch_position_ids,
pad_idx=self.pad_id,
max_seq_len=self.max_seq_len)
return_list = [
padded_token_ids, padded_position_ids, padded_text_type_ids,
input_mask, batch_unique_ids
]
if phase != "predict":
batch_start_position = [
record.start_position for record in batch_records
]
batch_end_position = [
record.end_position for record in batch_records
]
batch_start_position = np.array(batch_start_position).astype(
"int64").reshape([-1, 1])
batch_end_position = np.array(batch_end_position).astype(
"int64").reshape([-1, 1])
return_list += [batch_start_position, batch_end_position]
return return_list
def _prepare_batch_data(self, records, batch_size, phase=None):
"""generate batch records"""
batch_records, max_len = [], 0
for index, record in enumerate(records):
if phase == "train":
self.current_example = index
max_len = max(max_len, len(record.token_ids))
if self.in_tokens:
to_append = (len(batch_records) + 1) * max_len <= batch_size
else:
to_append = len(batch_records) < batch_size
if to_append:
batch_records.append(record)
else:
yield self._pad_batch_records(batch_records, phase)
batch_records, max_len = [record], len(record.token_ids)
if batch_records:
yield self._pad_batch_records(batch_records, phase)
def data_generator(self,
batch_size=1,
phase='train',
shuffle=False,
data=None,
return_list=True):
# we need all_examples and all_features in write_prediction in reading_comprehension_task
# we can also use all_examples and all_features to avoid duplicate long-time preprocessing
examples = None
if self.all_examples[phase]:
examples = self.all_examples[phase]
else:
if phase == 'train':
examples = self.get_train_examples()
elif phase == 'dev':
examples = self.get_dev_examples()
elif phase == 'test':
examples = self.get_test_examples()
elif phase == 'predict':
examples = data
else:
raise ValueError(
"Unknown phase, which should be in ['train', 'dev', 'test', 'predict']."
)
self.all_examples[phase] = examples
shuffle = True if phase == 'train' else False
# As reading comprehension task will divide a long context into several doc_spans and then get multiple features
# To get the real total steps, we need to know the features' length
# So we use _convert_examples_to_records rather than _convert_example_to_record in this task
if self.all_features[phase]:
features = self.all_features[phase]
else:
features = self._convert_examples_to_records(
examples, self.max_seq_len, self.tokenizer, phase)
self.all_features[phase] = features
# self.num_examples["train"] use in strategy.py to show the total steps,
# we need to cover it with correct len(features)
self.num_examples[phase] = len(features)
def wrapper():
if shuffle:
np.random.shuffle(features)
for batch_data in self._prepare_batch_data(
features, batch_size, phase=phase):
if return_list:
# for DataFeeder
yield [batch_data]
else:
# for DataLoader
yield batch_data
return wrapper
def _convert_examples_to_records(self,
examples,
max_seq_length,
tokenizer,
phase=None):
"""Loads a data file into a list of `InputBatch`s."""
features = []
unique_id = 1000000000
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > self.max_query_length:
query_tokens = query_tokens[0:self.max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
is_impossible = example.is_impossible if hasattr(
example, "is_impossible") else False
if phase != "predict" and is_impossible:
tok_start_position = -1
tok_end_position = -1
if phase != "predict" and not is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position +
1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position,
tok_end_position) = self.improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position,
tokenizer, example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the | |
index is not in the current frame.',
'Worm index is not in the current frame. Select a valid index.',
QMessageBox.Ok)
return
last_index = self.trajectories_data['worm_index_manual'].max()
new_ind1 = last_index + 1
new_ind2 = last_index + 2
good = self.trajectories_data['worm_index_manual'] == worm_ind
frames = self.trajectories_data.loc[good, 'frame_number']
frames = frames.sort_values(inplace=False)
good = frames < self.frame_number
index1 = frames[good].index
index2 = frames[~good].index
self.trajectories_data.ix[index1, 'worm_index_manual'] = new_ind1
self.trajectories_data.ix[index2, 'worm_index_manual'] = new_ind2
self.rois[0].index = new_ind1
self.rois[1].index = new_ind2
#this might be too slow. I might need to change it
self.traj_worm_index_grouped = self.trajectories_data.groupby(self.worm_index_type)
self.updateImage()
class FeatureReaderBase(TrackerViewerAuxGUI):
index_cols = ['worm_index', 'timestamp', 'motion_modes', 'skeleton_id', 'well_name']
valid_fields = ['/timeseries_data', '/features_timeseries']
def __init__(self, ui):
self.timeseries_data = None
self.feat_column = ''
super().__init__(ui)
def updateSkelFile(self, skeletons_file):
super().updateSkelFile(skeletons_file)
try:
self.traj_colors = {}
with pd.HDFStore(self.skeletons_file, 'r') as ske_file_id:
for field in self.valid_fields:
if field in ske_file_id:
self.timeseries_data = ske_file_id[field]
if field == '/timeseries_data':
blob_features = ske_file_id['/blob_features']
blob_features.columns = ['blob_' + x for x in blob_features.columns]
self.timeseries_data = pd.concat((self.timeseries_data, blob_features), axis=1)
break
else:
raise KeyError
if not len(self.timeseries_data) != len(self.trajectories_data):
ValueError('timeseries_data and trajectories_data does not match. You might be using an old version of featuresN.hdf5')
self.valid_features = [x for x in self.timeseries_data.columns if x not in self.index_cols]
except (TypeError, AttributeError, IOError, KeyError, tables.exceptions.HDF5ExtError):
self.valid_features = None
self.timeseries_data = None
class MarkersDrawer(FeatureReaderBase):
def __init__(self, ui):
super().__init__(ui)
self.traj_colors = {}
self.n_points_traj = 250
self.n_colors = 256
cmap = matplotlib.cm.get_cmap("bwr")
palette = [cmap(x) for x in np.linspace(0, 1, self.n_colors)]
#palette = sns.color_palette("RdBu_r", self.n_colors)
palette = np.round(np.array(palette)*255).astype(np.int)
self.palette = [QColor(*x) for x in palette]
self.drawT = {x: self.ui.comboBox_drawType.findText(x , flags=Qt.MatchContains)
for x in ['boxes', 'traj', 'skel']}
self.showT = {x: self.ui.comboBox_showLabels.findText(x , flags=Qt.MatchContains)
for x in ['hide', 'all', 'filter']}
self.ui.comboBox_showLabels.setCurrentIndex(self.showT['all'])
self.ui.comboBox_showLabels.currentIndexChanged.connect(self.updateImage)
self.ui.comboBox_drawType.currentIndexChanged.connect(self.updateImage)
self.ui.feature_column.currentIndexChanged.connect(self.change_feature)
self.ui.feat_max_value.valueChanged.connect(self.updateImage)
self.ui.feat_min_value.valueChanged.connect(self.updateImage)
self.ui.is_color_features.stateChanged.connect(self.updateImage)
self.enable_color_feats(False)
self.ui.spinBox_step.valueChanged.connect(self.updateImage)
def updateSkelFile(self, skeletons_file):
self.ui.is_color_features.setChecked(False)
super().updateSkelFile(skeletons_file)
self.ui.feature_column.clear()
if self.timeseries_data is None:
#no feature data
self.enable_color_feats(False)
else:
self.enable_color_feats(True)
self.ui.feature_column.addItems(self.valid_features)
self._h_find_feat_limits()
def change_feature(self):
self._h_find_feat_limits()
self.updateImage()
def _h_find_feat_limits(self):
self.feat_column = str(self.ui.feature_column.currentText())
print(self.feat_column)
if self.feat_column and self.timeseries_data is not None:
f_max = self.timeseries_data[self.feat_column].max()
f_min = self.timeseries_data[self.feat_column].min()
q1, q2 = self.timeseries_data[self.feat_column].quantile([0.02, 0.98])
else:
f_min, f_max, q1, q2 = 0,0,0,0
self.ui.feat_max_value.setRange(f_min, f_max)
self.ui.feat_min_value.setRange(f_min, f_max)
self.ui.feat_min_value.setValue(q1)
self.ui.feat_max_value.setValue(q2)
def enable_color_feats(self, value):
self.ui.feature_column.setEnabled(value)
self.ui.feat_min_value.setEnabled(value)
self.ui.feat_max_value.setEnabled(value)
self.ui.is_color_features.setEnabled(value)
def _h_assign_feat_color(self, irow):
feat_val = self.timeseries_data.loc[irow, self.feat_column]
if (feat_val != feat_val):
return Qt.black
#this function can and should be optimized
f_min = self.ui.feat_min_value.value()
f_max = self.ui.feat_max_value.value()
if f_min == f_max: #dummy range in case all the values are the same
f_min, f_max = -1, 1
elif f_min > f_max:
return Qt.black
nn = np.clip((feat_val - f_min)/(f_max - f_min), 0, 1)
ind = int(np.round(nn*(self.n_colors-1)))
col = self.palette[ind]
return col
def draw_worm_markers(self, image):
'''
Draw traj worm trajectory.
'''
if not self.worm_index_type in self.frame_data or \
self.ui.comboBox_showLabels.currentIndex() == self.showT['hide']:
return
if hasattr(self, 'current_worm_index'):
current_index = self.current_worm_index
else:
current_index = -1
painter = QPainter()
painter.begin(image)
self.fontsize = max(1, max(image.height(), image.width()) // 120)
penwidth = max(1, max(image.height(), image.width()) // 800)
self.penwidth = penwidth if penwidth % 2 == 1 else penwidth + 1
if not self.label_type in self.frame_data:
self.frame_data[self.label_type] = self.wlab['U']
for row_id, row_data in self.frame_data.iterrows():
# check if the coordinates are nan
if np.isnan(row_data['coord_x']) or np.isnan(row_data['coord_y']):
continue
#if select between showing filtered index or not
if self.ui.comboBox_showLabels.currentIndex() == self.showT['filter']:
continue
is_current_index = current_index == int(row_data[self.worm_index_type])
cb_ind = self.ui.comboBox_drawType.currentIndex()
if cb_ind == self.drawT['boxes']:
self.draw_boxes(painter, row_id, row_data, is_current_index)
elif cb_ind == self.drawT['traj']:
self.draw_trajectories(painter, row_data, is_current_index)
elif cb_ind == self.drawT['skel']:
self.draw_skeletons(painter, row_id, row_data, is_current_index)
painter.end()
def _h_get_trajectory(self, worm_index, current_frame):
worm_data = self.traj_worm_index_grouped.get_group(worm_index)
valid_index = worm_data.index[worm_data['frame_number']<= current_frame]
ini = max(0, valid_index.size - self.frame_step*self.n_points_traj)
traj_ind = valid_index.values[ini::self.frame_step]
traj_data = worm_data.loc[traj_ind]
return traj_data
def draw_trajectories(self, painter, row_data, is_current_index):
if self.traj_worm_index_grouped is None:
return
worm_index = int(row_data[self.worm_index_type])
current_frame = row_data['frame_number']
traj_data = self._h_get_trajectory(worm_index, current_frame)
traj_data = traj_data.dropna(subset=['coord_x', 'coord_y'])
x_v = traj_data['coord_x'].round()
y_v = traj_data['coord_y'].round()
points = [QPointF(*map(int, c)) for c in zip(x_v, y_v)]
if self.ui.is_color_features.isChecked():
vec_color = [self._h_assign_feat_color(x) for x in traj_data.index]
pen = QPen()
pen.setWidth(self.penwidth)
for p1, p2, c in zip(points[1:], points[:-1], vec_color):
pen.setColor(c)
painter.setPen(pen)
painter.drawLine(p1, p2)
else:
pol = QPolygonF()
for p in points:
pol.append(p)
if not worm_index in self.traj_colors:
self.traj_colors[worm_index] = QColor(*np.random.randint(50, 230, 3))
col = self.traj_colors[worm_index]
pen = QPen()
pen.setWidth(self.penwidth)
pen.setColor(col)
painter.setPen(pen)
painter.drawPolyline(pol)
def draw_boxes(self, painter, row_id, row_data, is_current_index):
'''
Draw traj worm trajectory.
'''
worm_index = int(row_data[self.worm_index_type])
x = int(round(row_data['coord_x']))
y = int(round(row_data['coord_y']))
label_color = self.wlabC[int(row_data[self.label_type])]
if not self.ui.is_color_features.isChecked():
label_color = self.wlabC[int(row_data[self.label_type])]
else:
label_color = self._h_assign_feat_color(row_id)
pen = QPen()
pen.setColor(label_color)
pen.setWidth(self.penwidth)
painter.setPen(pen)
painter.setFont(QFont('Decorative', self.fontsize))
painter.drawText(x, y, str(worm_index))
bb = row_data['roi_size']
painter.drawRect(x - bb / 2, y - bb / 2, bb, bb)
if is_current_index:
b_size = bb//5
offset = bb/2 - b_size
painter.fillRect(x + offset, y + offset, b_size, b_size, QBrush(label_color))
def draw_skeletons(self, painter, roi_id, row_data, is_current_index):
if self.traj_worm_index_grouped is None:
return
if self.coordinates_fields is None:
return
worm_index = int(row_data[self.worm_index_type])
skel_id = int(row_data['skeleton_id'])
if self.coordinates_fields is None or skel_id < 0:
return
skel_dat = {}
with tables.File(self.skeletons_file, 'r') as skel_file_id:
# print(self.coordinates_group)
# print(self.coordinates_fields)
for ff, tt in self.coordinates_fields.items():
field = self.coordinates_group + ff
if field in skel_file_id:
dat = skel_file_id.get_node(field)[skel_id]
dat /= self.microns_per_pixel
if self.stage_position_pix is not None and self.stage_position_pix.size > 0:
#subtract stage motion if necessary
dat -= self.stage_position_pix[self.frame_number]
#dat[:, 0] = (dat[:, 0] - roi_corner[0] + 0.5) * c_ratio_x
#dat[:, 1] = (dat[:, 1] - roi_corner[1] + 0.5) * c_ratio_y
else:
dat = np.full((1,2), np.nan)
skel_dat[tt] = dat
if 'is_good_skel' in row_data and row_data['is_good_skel'] == 0:
skel_colors = BAD_SKEL_COLOURS
else:
skel_colors = GOOD_SKEL_COLOURS
qPlg = {}
for tt, dat in skel_dat.items():
qPlg[tt] = QPolygonF()
for p in dat:
#do not add point if it is nan
if p[0] == p[0]:
qPlg[tt].append(QPointF(*p))
if not qPlg or len(qPlg['skeleton']) == 0:
#all nan skeleton nothing to do here...
return
pen = QPen()
pen.setWidth(0.5)
# pen.setColor(QColor(col))
# painter.setPen(pen)
# painter.drawPolyline(pol_v)
for k, pol_v in qPlg.items():
color = skel_colors[k]
pen.setColor(QColor(*color))
painter.setPen(pen)
painter.drawPolyline(pol_v)
pen.setColor(Qt.black)
painter.setBrush(Qt.white)
painter.setPen(pen)
radius = 2
painter.drawEllipse(qPlg['skeleton'][0], radius, radius)
painter.drawEllipse(QPointF(0,0), radius, radius)
class PlotCommunicator(FeatureReaderBase, ROIManager):
def __init__(self, ui=''):
super().__init__(ui)
self.ui.pushButton_plot.setEnabled(False)
self.ui.pushButton_plot.clicked.connect(self.show_plot)
self.plotter = None
def closePrev(self):
if self.plotter is not None:
self.plotter.close()
self.plotter = None
def updateSkelFile(self, skeletons_file):
super().updateSkelFile(skeletons_file)
self.closePrev()
if self.timeseries_data is None:
self.ui.pushButton_plot.setEnabled(False)
else:
self.ui.pushButton_plot.setEnabled(True)
def show_plot(self):
self.closePrev()
self.plotter = PlotFeatures(self.skeletons_file,
self.timeseries_data,
self.traj_worm_index_grouped,
self.time_units,
self.xy_units,
self.fps,
parent = self)
self.plotter.setWindowFlags(self.plotter.windowFlags() | Qt.WindowStaysOnTopHint)
self.plotter.show()
self.update_plot()
def update_plot(self):
if self.plotter:
self.plotter.plot(self.current_worm_index, self.feat_column)
class MWTrackerViewer_GUI( MarkersDrawer, PlotCommunicator,
FoodContourDrawer, BlobLabeler, IntensityLabeler, TrajectoryEditor, WellsDrawer):
def __init__(self, ui='', argv=''):
if not ui:
super().__init__(Ui_MWTrackerViewer())
else:
super().__init__(ui)
self.setWindowTitle("Multi-Worm Viewer")
self.vfilename = '' if len(argv) <= 1 else argv[1]
self.videos_dir = r"/Volumes/behavgenom$/GeckoVideo/MaskedVideos/"
self.results_dir = ''
self.skeletons_file = ''
self.worm_index_type = 'worm_index_manual'
self.frame_data = None
self.ui.comboBox_labelType.currentIndexChanged.connect(self.selectWormIndexType)
self.ui.pushButton_save.clicked.connect(self.saveData)
# select worm ROI when doubleclick a worm
self.mainImage._canvas.mouseDoubleClickEvent = self.selectWorm
self.mainImage._canvas.mouseRightClickEvent = self.toggleWellStatus
self.ui.comboBox_ROI1.activated.connect(self.update_plot)
self.ui.comboBox_ROI2.activated.connect(self.update_plot)
def saveData(self):
'''save data from manual labelling. pytables saving format is more convenient than pandas'''
if os.name == 'nt':
# I Windows the paths return by QFileDialog use / as the file
# separation character. We need to correct it.
for field_name in ['vfilename', 'skeletons_file']:
setattr(
self, field_name, getattr(
self, field_name).replace(
'/', os.sep))
has_skeletons_file = ((self.skeletons_file is not None)
and (self.skeletons_file != ''))
if has_skeletons_file:
save_modified_table(self.skeletons_file,
self.trajectories_data,
'trajectories_data')
if self.is_fov_tosplit:
if has_skeletons_file:
self.fovsplitter.write_fov_wells_to_file(self.skeletons_file)
else:
warnings.warn('No skeletons file. Saving wells info in masked video')
self.fid.close()
self.fovsplitter.write_fov_wells_to_file(self.vfilename)
# self.fid = tables.File(self.vfilename, 'r')
self.updateVideoFile(self.vfilename)
if has_skeletons_file:
self.updateSkelFile(self.skeletons_file)
def updateVideoFile(self, vfilename):
super().updateVideoFile(vfilename)
self.updateImage()
def updateSkelFile(self, skeletons_file):
super().updateSkelFile(skeletons_file)
if self.trajectories_data is None:
#empty file nothing to do here
self.updateImage()
return
#correct the `worm_index_N` to the actual name `worm_index_manual`
if 'worm_index_N' in self.trajectories_data:
self.trajectories_data = self.trajectories_data.rename(
columns={'worm_index_N': 'worm_index_manual'})
#if this is really a trajectories_data not (_features.hdf5) add `worm_index_manual` if it does not exists
if not 'worm_index_manual' in self.trajectories_data and not self.is_estimated_trajectories_data:
self.trajectories_data['worm_label'] = self.wlab['U']
self.trajectories_data['worm_index_manual'] = self.trajectories_data['worm_index_joined']
#deactiate the save option if we are dealing with estimated data...
self.ui.pushButton_save.setEnabled(not self.is_estimated_trajectories_data)
#add this column if it does not exist
if not 'has_skeleton' in self.trajectories_data:
self.trajectories_data['has_skeleton'] = (
self.trajectories_data['skeleton_id'] >= 0).astype(np.uint8)
self.updateWormIndexTypeMenu()
self.updateImage()
def updateWormIndexTypeMenu(self):
possible_indexes = [x.replace('worm_index_', '') for x in self.trajectories_data.columns if x.startswith('worm_index_')]
assert len(set(possible_indexes)) == len(possible_indexes) #all indexes ending must be different
menu_names = sorted([x + ' | |
<reponame>polivbr/pulumi-azure-native
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = ['ExtensionArgs', 'Extension']
@pulumi.input_type
class ExtensionArgs:
def __init__(__self__, *,
arc_setting_name: pulumi.Input[str],
cluster_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
auto_upgrade_minor_version: Optional[pulumi.Input[bool]] = None,
created_at: Optional[pulumi.Input[str]] = None,
created_by: Optional[pulumi.Input[str]] = None,
created_by_type: Optional[pulumi.Input[Union[str, 'CreatedByType']]] = None,
extension_name: Optional[pulumi.Input[str]] = None,
force_update_tag: Optional[pulumi.Input[str]] = None,
last_modified_at: Optional[pulumi.Input[str]] = None,
last_modified_by: Optional[pulumi.Input[str]] = None,
last_modified_by_type: Optional[pulumi.Input[Union[str, 'CreatedByType']]] = None,
protected_settings: Optional[Any] = None,
publisher: Optional[pulumi.Input[str]] = None,
settings: Optional[Any] = None,
type: Optional[pulumi.Input[str]] = None,
type_handler_version: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Extension resource.
:param pulumi.Input[str] arc_setting_name: The name of the proxy resource holding details of HCI ArcSetting information.
:param pulumi.Input[str] cluster_name: The name of the cluster.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[bool] auto_upgrade_minor_version: Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true.
:param pulumi.Input[str] created_at: The timestamp of resource creation (UTC).
:param pulumi.Input[str] created_by: The identity that created the resource.
:param pulumi.Input[Union[str, 'CreatedByType']] created_by_type: The type of identity that created the resource.
:param pulumi.Input[str] extension_name: The name of the machine extension.
:param pulumi.Input[str] force_update_tag: How the extension handler should be forced to update even if the extension configuration has not changed.
:param pulumi.Input[str] last_modified_at: The timestamp of resource last modification (UTC)
:param pulumi.Input[str] last_modified_by: The identity that last modified the resource.
:param pulumi.Input[Union[str, 'CreatedByType']] last_modified_by_type: The type of identity that last modified the resource.
:param Any protected_settings: Protected settings (may contain secrets).
:param pulumi.Input[str] publisher: The name of the extension handler publisher.
:param Any settings: Json formatted public settings for the extension.
:param pulumi.Input[str] type: Specifies the type of the extension; an example is "CustomScriptExtension".
:param pulumi.Input[str] type_handler_version: Specifies the version of the script handler.
"""
pulumi.set(__self__, "arc_setting_name", arc_setting_name)
pulumi.set(__self__, "cluster_name", cluster_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if auto_upgrade_minor_version is not None:
pulumi.set(__self__, "auto_upgrade_minor_version", auto_upgrade_minor_version)
if created_at is not None:
pulumi.set(__self__, "created_at", created_at)
if created_by is not None:
pulumi.set(__self__, "created_by", created_by)
if created_by_type is not None:
pulumi.set(__self__, "created_by_type", created_by_type)
if extension_name is not None:
pulumi.set(__self__, "extension_name", extension_name)
if force_update_tag is not None:
pulumi.set(__self__, "force_update_tag", force_update_tag)
if last_modified_at is not None:
pulumi.set(__self__, "last_modified_at", last_modified_at)
if last_modified_by is not None:
pulumi.set(__self__, "last_modified_by", last_modified_by)
if last_modified_by_type is not None:
pulumi.set(__self__, "last_modified_by_type", last_modified_by_type)
if protected_settings is not None:
pulumi.set(__self__, "protected_settings", protected_settings)
if publisher is not None:
pulumi.set(__self__, "publisher", publisher)
if settings is not None:
pulumi.set(__self__, "settings", settings)
if type is not None:
pulumi.set(__self__, "type", type)
if type_handler_version is not None:
pulumi.set(__self__, "type_handler_version", type_handler_version)
@property
@pulumi.getter(name="arcSettingName")
def arc_setting_name(self) -> pulumi.Input[str]:
"""
The name of the proxy resource holding details of HCI ArcSetting information.
"""
return pulumi.get(self, "arc_setting_name")
@arc_setting_name.setter
def arc_setting_name(self, value: pulumi.Input[str]):
pulumi.set(self, "arc_setting_name", value)
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> pulumi.Input[str]:
"""
The name of the cluster.
"""
return pulumi.get(self, "cluster_name")
@cluster_name.setter
def cluster_name(self, value: pulumi.Input[str]):
pulumi.set(self, "cluster_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="autoUpgradeMinorVersion")
def auto_upgrade_minor_version(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true.
"""
return pulumi.get(self, "auto_upgrade_minor_version")
@auto_upgrade_minor_version.setter
def auto_upgrade_minor_version(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auto_upgrade_minor_version", value)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[pulumi.Input[str]]:
"""
The timestamp of resource creation (UTC).
"""
return pulumi.get(self, "created_at")
@created_at.setter
def created_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_at", value)
@property
@pulumi.getter(name="createdBy")
def created_by(self) -> Optional[pulumi.Input[str]]:
"""
The identity that created the resource.
"""
return pulumi.get(self, "created_by")
@created_by.setter
def created_by(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_by", value)
@property
@pulumi.getter(name="createdByType")
def created_by_type(self) -> Optional[pulumi.Input[Union[str, 'CreatedByType']]]:
"""
The type of identity that created the resource.
"""
return pulumi.get(self, "created_by_type")
@created_by_type.setter
def created_by_type(self, value: Optional[pulumi.Input[Union[str, 'CreatedByType']]]):
pulumi.set(self, "created_by_type", value)
@property
@pulumi.getter(name="extensionName")
def extension_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the machine extension.
"""
return pulumi.get(self, "extension_name")
@extension_name.setter
def extension_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "extension_name", value)
@property
@pulumi.getter(name="forceUpdateTag")
def force_update_tag(self) -> Optional[pulumi.Input[str]]:
"""
How the extension handler should be forced to update even if the extension configuration has not changed.
"""
return pulumi.get(self, "force_update_tag")
@force_update_tag.setter
def force_update_tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "force_update_tag", value)
@property
@pulumi.getter(name="lastModifiedAt")
def last_modified_at(self) -> Optional[pulumi.Input[str]]:
"""
The timestamp of resource last modification (UTC)
"""
return pulumi.get(self, "last_modified_at")
@last_modified_at.setter
def last_modified_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_modified_at", value)
@property
@pulumi.getter(name="lastModifiedBy")
def last_modified_by(self) -> Optional[pulumi.Input[str]]:
"""
The identity that last modified the resource.
"""
return pulumi.get(self, "last_modified_by")
@last_modified_by.setter
def last_modified_by(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_modified_by", value)
@property
@pulumi.getter(name="lastModifiedByType")
def last_modified_by_type(self) -> Optional[pulumi.Input[Union[str, 'CreatedByType']]]:
"""
The type of identity that last modified the resource.
"""
return pulumi.get(self, "last_modified_by_type")
@last_modified_by_type.setter
def last_modified_by_type(self, value: Optional[pulumi.Input[Union[str, 'CreatedByType']]]):
pulumi.set(self, "last_modified_by_type", value)
@property
@pulumi.getter(name="protectedSettings")
def protected_settings(self) -> Optional[Any]:
"""
Protected settings (may contain secrets).
"""
return pulumi.get(self, "protected_settings")
@protected_settings.setter
def protected_settings(self, value: Optional[Any]):
pulumi.set(self, "protected_settings", value)
@property
@pulumi.getter
def publisher(self) -> Optional[pulumi.Input[str]]:
"""
The name of the extension handler publisher.
"""
return pulumi.get(self, "publisher")
@publisher.setter
def publisher(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "publisher", value)
@property
@pulumi.getter
def settings(self) -> Optional[Any]:
"""
Json formatted public settings for the extension.
"""
return pulumi.get(self, "settings")
@settings.setter
def settings(self, value: Optional[Any]):
pulumi.set(self, "settings", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the type of the extension; an example is "CustomScriptExtension".
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="typeHandlerVersion")
def type_handler_version(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the version of the script handler.
"""
return pulumi.get(self, "type_handler_version")
@type_handler_version.setter
def type_handler_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type_handler_version", value)
class Extension(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
arc_setting_name: Optional[pulumi.Input[str]] = None,
auto_upgrade_minor_version: Optional[pulumi.Input[bool]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
created_at: Optional[pulumi.Input[str]] = None,
created_by: Optional[pulumi.Input[str]] = None,
created_by_type: Optional[pulumi.Input[Union[str, 'CreatedByType']]] = None,
extension_name: Optional[pulumi.Input[str]] = None,
force_update_tag: Optional[pulumi.Input[str]] = None,
last_modified_at: Optional[pulumi.Input[str]] = None,
last_modified_by: Optional[pulumi.Input[str]] = None,
last_modified_by_type: Optional[pulumi.Input[Union[str, 'CreatedByType']]] = None,
protected_settings: Optional[Any] = None,
publisher: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
settings: Optional[Any] = None,
type: Optional[pulumi.Input[str]] = None,
type_handler_version: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Details of a particular extension in HCI Cluster.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arc_setting_name: The name of the proxy resource holding details of HCI ArcSetting information.
:param pulumi.Input[bool] auto_upgrade_minor_version: Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true.
:param pulumi.Input[str] cluster_name: The name of the cluster.
:param pulumi.Input[str] created_at: The timestamp of resource creation (UTC).
:param pulumi.Input[str] created_by: The identity that created the resource.
:param pulumi.Input[Union[str, 'CreatedByType']] created_by_type: The type of identity that created the resource.
:param pulumi.Input[str] extension_name: The name of the machine extension.
:param pulumi.Input[str] force_update_tag: How the extension handler should be forced to update even if the extension configuration has not changed.
:param pulumi.Input[str] last_modified_at: The timestamp of resource last modification (UTC)
:param pulumi.Input[str] last_modified_by: The identity that last modified the resource.
:param pulumi.Input[Union[str, 'CreatedByType']] last_modified_by_type: The type of identity that last modified the resource.
:param Any protected_settings: Protected settings (may contain secrets).
:param pulumi.Input[str] publisher: The name of the extension handler publisher.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param Any settings: Json formatted public settings for the extension.
:param pulumi.Input[str] type: Specifies the type of the extension; an example is "CustomScriptExtension".
:param | |
<gh_stars>1-10
import os
import sys
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import initializers, regularizers, constraints
from tensorflow.python.keras.layers import Layer
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.keras.utils import conv_utils
sys.path.append(os.path.abspath('./'))
sys.path.append(os.path.abspath('./gan'))
import utils
class ConditionalInstanceNormalization(Layer):
"""Conditional Instance normalization layer.
Normalize the activations of the previous layer at each step,
i.e. applies a transformation that maintains the mean activation
close to 0 and the activation standard deviation close to 1.
Each class has it own normalization parametes.
# Arguments
number_of_classes: Number of classes, 10 for cifar10.
axis: Integer, the axis that should be normalized
(typically the features axis).
For instance, after a `Conv2D` layer with
`data_format="channels_first"`,
set `axis=1` in `InstanceNormalization`.
Setting `axis=None` will normalize all values in each instance of the batch.
Axis 0 is the batch dimension. `axis` cannot be set to 0 to avoid errors.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor.
If False, `beta` is ignored.
scale: If True, multiply by `gamma`.
If False, `gamma` is not used.
When the next layer is linear (also e.g. `nn.relu`),
this can be disabled since the scaling
will be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: Optional constraint for the beta weight.
gamma_constraint: Optional constraint for the gamma weight.
# Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
Same shape as input.
# References
- [A Learned Representation For Artistic Style](https://arxiv.org/abs/1610.07629)
"""
def __init__(self,
number_of_classes,
axis=None,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer='zeros',
gamma_initializer='ones',
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
**kwargs):
super(ConditionalInstanceNormalization, self).__init__(**kwargs)
self.number_of_classes = number_of_classes
self.supports_masking = True
self.axis = axis
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_constraint = constraints.get(gamma_constraint)
def build(self, input_shape):
ndim = len(input_shape[0])
cls = input_shape[1]
if len(cls) != 2:
raise ValueError("Classes should be one dimensional")
if self.axis == 0:
raise ValueError('Axis cannot be zero')
if (self.axis is not None) and (ndim == 2):
raise ValueError('Cannot specify axis for rank 1 tensor')
if self.axis is None:
shape = (self.number_of_classes, 1)
else:
shape = (self.number_of_classes, input_shape[0][self.axis])
if self.scale:
self.gamma = self.add_weight(shape=shape,
name='gamma',
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint)
else:
self.gamma = None
if self.center:
self.beta = self.add_weight(shape=shape,
name='beta',
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint)
else:
self.beta = None
super(ConditionalInstanceNormalization, self).build(input_shape)
def call(self, inputs, training=None):
class_labels = K.squeeze(inputs[1], axis=1)
inputs = inputs[0]
input_shape = K.int_shape(inputs)
reduction_axes = list(range(0, len(input_shape)))
if self.axis is not None:
del reduction_axes[self.axis]
del reduction_axes[0]
mean = K.mean(inputs, reduction_axes, keepdims=True)
stddev = K.std(inputs, reduction_axes, keepdims=True) + self.epsilon
normed = (inputs - mean) / stddev
broadcast_shape = [1] * len(input_shape)
broadcast_shape[0] = K.shape(inputs)[0]
if self.axis is not None:
broadcast_shape[self.axis] = input_shape[self.axis]
if self.scale:
broadcast_gamma = K.reshape(K.gather(self.gamma, class_labels), broadcast_shape)
normed = normed * broadcast_gamma
if self.center:
broadcast_beta = K.reshape(K.gather(self.beta, class_labels), broadcast_shape)
normed = normed + broadcast_beta
return normed
def compute_output_shape(self, input_shape):
return input_shape[0]
def get_config(self):
config = {
'number_of_classes': self.number_of_classes,
'axis': self.axis,
'epsilon': self.epsilon,
'center': self.center,
'scale': self.scale,
'beta_initializer': initializers.serialize(self.beta_initializer),
'gamma_initializer': initializers.serialize(self.gamma_initializer),
'beta_regularizer': regularizers.serialize(self.beta_regularizer),
'gamma_regularizer': regularizers.serialize(self.gamma_regularizer),
'beta_constraint': constraints.serialize(self.beta_constraint),
'gamma_constraint': constraints.serialize(self.gamma_constraint)
}
base_config = super(ConditionalInstanceNormalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ConditionalBatchNormalization(Layer):
"""Batch normalization layer (Ioffe and Szegedy, 2014).
Normalize the activations of the previous layer at each batch,
i.e. applies a transformation that maintains the mean activation
close to 0 and the activation standard deviation close to 1.
# Arguments
axis: Integer, the axis that should be normalized
(typically the features axis).
For instance, after a `Conv2D` layer with
`data_format="channels_first"`,
set `axis=1` in `BatchNormalization`.
momentum: Momentum for the moving average.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor.
If False, `beta` is ignored.
scale: If True, multiply by `gamma`.
If False, `gamma` is not used.
When the next layer is linear (also e.g. `nn.relu`),
this can be disabled since the scaling
will be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
moving_mean_initializer: Initializer for the moving mean.
moving_variance_initializer: Initializer for the moving variance.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: Optional constraint for the beta weight.
gamma_constraint: Optional constraint for the gamma weight.
# Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
Same shape as input.
# References
- [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](https://arxiv.org/abs/1502.03167)
"""
def __init__(self,
number_of_classes,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer='zeros',
gamma_initializer='ones',
moving_mean_initializer='zeros',
moving_variance_initializer='ones',
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
**kwargs):
super(ConditionalBatchNormalization, self).__init__(**kwargs)
self.number_of_classes = number_of_classes
self.supports_masking = True
self.axis = axis
self.momentum = momentum
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.moving_mean_initializer = initializers.get(moving_mean_initializer)
self.moving_variance_initializer = initializers.get(moving_variance_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_constraint = constraints.get(gamma_constraint)
def build(self, input_shape):
input_shape = input_shape[0]
dim = input_shape[self.axis]
if dim is None:
raise ValueError('Axis ' + str(self.axis) + ' of '
'input tensor should have a defined dimension '
'but the layer received an input with shape ' +
str(input_shape) + '.')
shape = (dim, )
if self.scale:
self.gamma = self.add_weight((self.number_of_classes, dim),
name='gamma',
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint)
else:
self.gamma = None
if self.center:
self.beta = self.add_weight((self.number_of_classes, dim),
name='beta',
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint)
else:
self.beta = None
self.moving_mean = self.add_weight(
shape,
name='moving_mean',
initializer=self.moving_mean_initializer,
trainable=False)
self.moving_variance = self.add_weight(
shape,
name='moving_variance',
initializer=self.moving_variance_initializer,
trainable=False)
self.built = True
def call(self, inputs, training=None):
class_labels = K.squeeze(inputs[1], axis=1)
inputs = inputs[0]
input_shape = K.int_shape(inputs)
# Prepare broadcasting shape.
ndim = len(input_shape)
reduction_axes = list(range(len(input_shape)))
del reduction_axes[self.axis]
broadcast_shape = [1] * len(input_shape)
broadcast_shape[self.axis] = input_shape[self.axis]
# Determines whether broadcasting is needed.
needs_broadcasting = (sorted(reduction_axes) != range(ndim)[:-1])
param_broadcast = [1] * len(input_shape)
param_broadcast[self.axis] = input_shape[self.axis]
param_broadcast[0] = K.shape(inputs)[0]
if self.scale:
broadcast_gamma = K.reshape(K.gather(self.gamma, class_labels), param_broadcast)
else:
broadcast_gamma = None
if self.center:
broadcast_beta = K.reshape(K.gather(self.beta, class_labels), param_broadcast)
else:
broadcast_beta = None
normed, mean, variance = K.normalize_batch_in_training(
inputs, gamma=None, beta=None,
reduction_axes=reduction_axes, epsilon=self.epsilon)
if training in {0, False}:
return normed
else:
self.add_update([K.moving_average_update(self.moving_mean,
mean,
self.momentum),
K.moving_average_update(self.moving_variance,
variance,
self.momentum)],
inputs)
def normalize_inference():
if needs_broadcasting:
# In this case we must explictly broadcast all parameters.
broadcast_moving_mean = K.reshape(self.moving_mean,
broadcast_shape)
broadcast_moving_variance = K.reshape(self.moving_variance,
broadcast_shape)
return K.batch_normalization(
inputs,
broadcast_moving_mean,
broadcast_moving_variance,
beta=None,
gamma=None,
epsilon=self.epsilon)
else:
return K.batch_normalization(
inputs,
self.moving_mean,
self.moving_variance,
beta=None,
gamma=None,
epsilon=self.epsilon)
# Pick the normalized form corresponding to the training phase.
out = K.in_train_phase(normed,
normalize_inference,
training=training)
return out * broadcast_gamma + broadcast_beta
def compute_output_shape(self, input_shape):
return input_shape[0]
def get_config(self):
config = {
'number_of_classes': self.number_of_classes,
'axis': self.axis,
'momentum': self.momentum,
'epsilon': self.epsilon,
'center': self.center,
'scale': self.scale,
'beta_initializer': initializers.serialize(self.beta_initializer),
'gamma_initializer': initializers.serialize(self.gamma_initializer),
'moving_mean_initializer': initializers.serialize(self.moving_mean_initializer),
'moving_variance_initializer': initializers.serialize(self.moving_variance_initializer),
'beta_regularizer': regularizers.serialize(self.beta_regularizer),
'gamma_regularizer': regularizers.serialize(self.gamma_regularizer),
'beta_constraint': constraints.serialize(self.beta_constraint),
'gamma_constraint': constraints.serialize(self.gamma_constraint)
}
base_config = super(ConditionalBatchNormalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class DecorelationNormalization(Layer):
def __init__(self,
axis=-1,
momentum=0.99,
epsilon=1e-3,
m_per_group=0,
decomposition='cholesky',
iter_num=5,
instance_norm=0,
renorm=False,
data_format=None,
moving_mean_initializer='zeros',
moving_cov_initializer='identity',
device='cpu',
**kwargs):
assert decomposition in ['cholesky', 'zca', 'pca', 'iter_norm',
'cholesky_wm', 'zca_wm', 'pca_wm', 'iter_norm_wm']
super(DecorelationNormalization, self).__init__(**kwargs)
self.supports_masking = True
self.momentum = momentum
self.epsilon = epsilon
self.m_per_group = m_per_group
self.moving_mean_initializer = initializers.get(moving_mean_initializer)
# self.moving_cov_initializer = initializers.get(moving_cov_initializer)
self.axis = axis
self.renorm = renorm
self.decomposition = decomposition
self.iter_num = iter_num
self.instance_norm = instance_norm
self.device = device
self.data_format = conv_utils.normalize_data_format(data_format)
def matrix_initializer(self, shape, dtype=tf.float32, partition_info=None):
moving_convs = []
for i in range(shape[0]):
moving_conv = tf.expand_dims(tf.eye(shape[1], dtype=dtype), 0)
moving_convs.append(moving_conv)
moving_convs = tf.concat(moving_convs, 0)
return moving_convs
def build(self, input_shape):
assert self.data_format == 'channels_last'
dim = input_shape.as_list()[self.axis]
if dim is None:
raise ValueError('Axis ' + str(self.axis) + ' of '
'input tensor should have a defined dimension '
'but the layer received an input with shape ' | |
<reponame>emre2038/wazimap-ng
import uuid
import pathlib
import os
from collections import OrderedDict, defaultdict, Mapping
import logging
logger = logging.Logger(__name__)
def format_perc(n):
return f"{n :.2%}"
def format_float(n, decimals=2):
return f"{round(n, decimals) :,}"
def format_int(n):
return f"{round(n):,}"
def get_random_filename(filename):
ext = pathlib.Path(filename).suffix
filename = os.path.join(str(uuid.uuid4()), os.path.extsep, ext)
return filename
def truthy(s):
if noney(s): return None
return str(s).lower() == "true" or str(s) == 1
def noney(n):
return n is None or str(n).lower() == "none"
def int_or_none(i):
if noney(i):
return None
return int(i)
def sort_list_using_order(lst, order, key_func=lambda x: x):
if len(lst) == 0:
return []
if order is None or len(order) == 0:
return lst
lookup = {o: idx for idx, o in enumerate(order)}
infinity = float("inf")
return sorted(lst, key=lambda x: lookup.get(key_func(x), infinity))
def mergedict(a, b, path=None, concatenate_arrays=True, update=True):
"""
Derived from: http://stackoverflow.com/questions/7204805/python-dictionaries-of-dictionaries-merge
merges b into a
concatenate_arrays - if True then arrays are concatenate, otherwise they are recursively traversed
update - if True then values in b clobber values in a
"""
if path is None: path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
mergedict(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass # same leaf value
elif isinstance(a[key], list) and isinstance(b[key], list):
if concatenate_arrays:
a[key].extend(b[key])
else:
for idx, val in enumerate(b[key]):
a[key][idx] = mergedict(a[key][idx], b[key][idx], path + [str(key), str(idx)], update=update)
elif update:
a[key] = b[key]
else:
raise Exception('Conflict at %s' % '.'.join(path + [str(key)]))
else:
a[key] = b[key]
def qsdict(qs, *args):
"""
Convert a query string to a nested dict
:param qs: The query string from which the array will be created. qs can also be an object and args are all attributes on that object.
:param args: A list of fields which will be nested one inside the other in the dict. The last arg will be the value in the innermost dict. If an arg is callable then it will be executed with q as a parameter. If the last parameter is a tuple then all the values inside that tuple will be added as leaves.
:return: A nested dict
e.g.
qs = Queryset([{fa:1, fb:2, fc:3}, {fa:3, fb:4, fc:5}]),
using fc as the value
{
1 : {
2 : 3
},
3 : {
4 : 5
}
}
"""
if len(args) < 2:
raise ValueError("Need at least two fields to nest dicts")
args = list(args)
def v(q, key):
if callable(key):
return key(q)
elif hasattr(q, key):
return getattr(q, key)
elif key in q:
return q[key]
else:
return None
d = {}
for q in qs:
nested_dicts = [d]
for idx, key in enumerate(args[:-2]):
current_dict = nested_dicts[-1]
value = v(q, key)
if type(value) == list:
arr = value
for el in arr:
current_dict[el] = qsdict([q], *args[idx + 1:])
break
else:
if not value in current_dict:
current_dict[value] = OrderedDict()
nested_dicts.append(current_dict[value])
else:
current_dict = nested_dicts[-1]
value = v(q, args[-2])
if type(value) == list:
arr = value
for el in arr:
current_dict[el] = v(q, args[-1]) # need to handle a tuple as well
else:
if type(args[-1]) == tuple:
current_dict[v(q, args[-2])] = [v(q, el) for el in args[-1]]
else:
current_dict[v(q, args[-2])] = v(q, args[-1])
return d
def expand_nested_list(lst, key):
"""
[{"a": "b", key: [1, 2, 3]}]
becomes
[
{"a": "b", key: 1},
{"a": "b", key: 2},
{"a": "b", key: 3}
]
"""
for row in lst:
for js in row[key]:
row_copy = row.copy()
row_copy[key] = js
yield row_copy
try:
pytest_available = True
import pytest
except ImportError as error:
pytest_available = False
logger.warning("pytest not installed - some tests cannot be run.")
# Tests
def test_qdict_empty_input():
if not pytest_available:
return
with pytest.raises(ValueError):
d = qsdict([])
with pytest.raises(ValueError):
d = qsdict([[]])
def test_qdict_empty_row():
if not pytest_available:
return
def test_qdict_at_least_two_parameters():
if not pytest_available:
return
with pytest.raises(ValueError):
d = qsdict([{"a": "b"}], "a")
def test_qdict_basic_input():
d = qsdict([{"a": "b", "c": "d"}], "a", "c")
assert d == {
"b": "d"
}
def test_qdict_two_rows():
d = qsdict([
{"a": "b", "c": "d"},
{"a": "c", "c": "e"},
], "a", "c")
assert d == {
"b": "d",
"c": "e"
}
def test_qdict_overwrites_value_with_two_parameters():
d = qsdict([
{"a": "b", "c": "d"},
{"a": "b", "c": "f"},
], "a", "c")
assert d == {
"b": "f"
}
def test_qdict_3_level_nesting():
d = [
{"a": 1, "b": 2, "c": 3},
{"a": 1, "b": 4, "c": 6},
]
d1 = qsdict(d, "a", "b", "c")
assert d1 == {
1 : {
2: 3,
4: 6
}
}
d2 = qsdict(d, "b", "a", "c")
assert d2 == {
2 : {
1: 3
},
4 : {
1: 6
}
}
def test_callable():
d = [
{"a": 1, "b": 2, "c": 3},
{"a": 1, "b": 4, "c": 6},
]
d1 = qsdict(d, "a", lambda x: "Hello World", "b", "c")
assert d1 == {
1 : {
"Hello World": {
2: 3,
4: 6
}
}
}
d2 = qsdict(d, "b", lambda x: x["a"] + 1, "c")
assert d2 == {
2 : {
2: 3
},
4 : {
2: 6
}
}
def test_object_properties():
class TestClass:
def __init__(self, a, b, c):
self.a, self.b, self.c = a, b, c
c1 = TestClass(1, 2, 3)
c2 = TestClass(1, 4, 6)
d = [c1, c2]
d1 = qsdict(d, "a", "b", "c")
assert d1 == {
1 : {
2: 3,
4: 6
}
}
def test_long_input():
d = [
{"a": 1, "b": 2, "c": 3, "d": 4, "e": 5},
{"a": 1, "b": 2, "c": 3, "d": 7, "e": 8},
]
d1 = qsdict(d, "a", "b", "c", "d", "e")
assert d1 == {
1: {
2: {
3: {
4: 5,
7: 8
}
}
}
}
def test_array():
d = [
{"a": 1, "b": ["x", "y"], "c": 4, "d": 5},
{"a": 2, "b": ["x", "y"], "c": 4, "d": 6}
]
d1 = qsdict(d, "a", "b", "c", "d")
assert d1 == {
1: {
"x": {
4: 5
},
"y": {
4: 5
}
},
2: {
"x": {
4: 6
},
"y": {
4: 6
}
}
}
def test_multiple_arrays():
d = [
{"a": 1, "b": ["x", "y"], "c": 2, "d": ["z", "w"], "e": 5, "f": 6},
{"a": 2, "b": ["x", "y"], "c": 4, "d": ["z", "w"], "e": 6, "f": 8}
]
d1 = qsdict(d, "a", "b", "c", "d", "e", "f")
assert d1 == {
1: {
"x": {
2: {
"z": {
5: 6
},
"w": {
5: 6
}
}
},
"y": {
2: {
"z": {
5: 6
},
"w": {
5: 6
}
}
}
},
2: {
"x": {
4: {
"z": {
6: 8
},
"w": {
6: 8
}
}
},
"y": {
4: {
"z": {
6: 8
},
"w": {
6: 8
}
}
}
}
}
def test_array_at_the_end():
d = [
{"a": 1, "b": 3, "c": 5, "d": ["x", "y"]},
{"a": 2, "b": 4, "c": 6, "d": ["x", "y"]},
]
d1 = qsdict(d, "a", "b", "c", "d")
assert d1 == {
1: {
3: {
5: ["x", "y"]
}
},
2: {
4: {
6: ["x", "y"]
}
}
}
def test_array_at_second_last_position():
d = [
{"a": 1, "b": 3, "c": ["x", "y"], "d": 5},
{"a": 2, "b": 4, "c": ["x", "y"], "d": 6},
]
d1 = qsdict(d, "a", "b", "c", "d")
assert d1 == {
1: {
3: {
"x": 5,
"y": 5
}
},
2: {
4: {
"x": 6,
"y": 6
}
},
}
def flatten_dict(d):
"""
Flatten a dictionary into an array of arrays e.g:
{
a: {
x: 2,
y: 3
},
b: {
x: 4,
y: 5
}
}
becomes
[
[a, x, 2],
[a, y, 3],
[b, x, 4],
[b, y, 5],
]
used as a component of the pivot function
"""
if not isinstance(d, Mapping):
return [[d]]
arr = []
for k, v in d.items():
for el in flatten_dict(v):
arr.append([k] + el)
return arr
def rearrange(in_arrs, order):
"""
rearrange elements in a given list of arrays. The last element always remains in place
e.g.
d =[
[a, x, 2],
[a, y, 3],
[b, x, 4],
[b, y, 5],
]
| |
self._backSteps == 0:
lst.append(item)
else:
lst.insert(self._backSteps, item)
def _constructHeaders(self):
headers = super()._constructHeaders()
if self._coulombEnergy:
self._add_item(headers, 'Coulomb Energy (kJ/mole)')
if self._atomicVirial:
self._add_item(headers, 'Atomic Virial (kJ/mole)')
if self._nonbondedVirial:
self._add_item(headers, 'Nonbonded Virial (kJ/mole)')
if self._atomicPressure:
self._add_item(headers, 'Atomic Pressure (atm)')
if self._molecularVirial:
self._add_item(headers, 'Molecular Virial (kJ/mole)')
if self._molecularPressure:
self._add_item(headers, 'Molecular Pressure (atm)')
if self._molecularKineticEnergy:
self._add_item(headers, 'Molecular Kinetic Energy (kJ/mole)')
if self._globalParameterStates is not None:
for index in self._globalParameterStates.index:
self._add_item(headers, 'Energy[{}] (kJ/mole)'.format(index))
if self._globalParameters is not None:
for name in self._globalParameters:
self._add_item(headers, name)
if self._energyDerivatives is not None:
for name in self._energyDerivatives:
self._add_item(headers, 'diff(E,{})'.format(name))
if self._collectiveVariables is not None:
for force in self._collectiveVariables:
for index in range(force.getNumCollectiveVariables()):
name = force.getCollectiveVariableName(index)
self._add_item(headers, name)
return headers
def _constructReportValues(self, simulation, state):
values = super()._constructReportValues(simulation, state)
if self._computing:
computer = self._pressureComputer
computer.import_configuration(state)
atomicVirial = computer.get_atomic_virial().value_in_unit(unit.kilojoules_per_mole)
if self._coulombEnergy:
coulombVirial = computer.get_coulomb_virial()
self._add_item(values, coulombVirial.value_in_unit(unit.kilojoules_per_mole))
if self._atomicVirial:
self._add_item(values, atomicVirial)
if self._nonbondedVirial:
nonbondedVirial = computer.get_dispersion_virial() + computer.get_coulomb_virial()
self._add_item(values, nonbondedVirial.value_in_unit(unit.kilojoules_per_mole))
if self._atomicPressure:
atomicPressure = computer.get_atomic_pressure()
self._add_item(values, atomicPressure.value_in_unit(unit.atmospheres))
if self._molecularVirial or self._molecularPressure:
forces = state.getForces(asNumpy=True)
if self._molecularVirial:
molecularVirial = computer.get_molecular_virial(forces)
self._add_item(values, molecularVirial.value_in_unit(unit.kilojoules_per_mole))
if self._molecularPressure:
molecularPressure = computer.get_molecular_pressure(forces)
self._add_item(values, molecularPressure.value_in_unit(unit.atmospheres))
if self._molecularKineticEnergy:
molKinEng = computer.get_molecular_kinetic_energy()
self._add_item(values, molKinEng.value_in_unit(unit.kilojoules_per_mole))
if self._globalParameterStates is not None:
original = dict()
for name in self._globalParameterStates.columns:
original[name] = simulation.context.getParameter(name)
latest = original.copy()
for index, row in self._globalParameterStates.iterrows():
for name, value in row.items():
if value != latest[name]:
simulation.context.setParameter(name, value)
latest[name] = value
energy = simulation.context.getState(getEnergy=True).getPotentialEnergy()
self._add_item(values, energy.value_in_unit(unit.kilojoules_per_mole))
for name, value in original.items():
if value != latest[name]:
simulation.context.setParameter(name, value)
if self._globalParameters is not None:
for name in self._globalParameters:
self._add_item(values, simulation.context.getParameter(name))
if self._energyDerivatives is not None:
mystate = simulation.context.getState(getParameterDerivatives=True)
derivative = mystate.getEnergyParameterDerivatives()
for name in self._energyDerivatives:
self._add_item(values, derivative[name])
if self._collectiveVariables is not None:
for force in self._collectiveVariables:
for cv in force.getCollectiveVariableValues(simulation.context):
self._add_item(values, cv)
return values
class XYZReporter(_AtomsMM_Reporter):
"""
Outputs to an XYZ-format file a series of frames containing the coordinates, velocities,
momenta, or forces on all atoms in a Simulation.
.. note::
Coordinates are expressed in nanometers, velocities in nanometer/picosecond, momenta in
dalton*nanometer/picosecond, and forces in dalton*nanometer/picosecond^2.
To use this reporter, create an XYZReporter object and append it to the Simulation's list of
reporters.
Keyword Args
------------
output : str, default='positions'
Which kind of info to report. Valid options are 'positions', 'velocities', 'momenta' and
'forces'.
groups : set(int), default=None
Which force groups to consider in the force calculations. If this is `None`, then all
force groups will be evaluated.
"""
def __init__(self, file, reportInterval, **kwargs):
self._output = kwargs.get('output', 'positions')
self._groups = kwargs.get('groups', None)
if self._output == 'positions':
self._unit = unit.angstroms
elif self._output == 'velocities':
self._unit = unit.angstroms/unit.picoseconds
elif self._output == 'momenta':
self._unit = unit.dalton*unit.angstroms/unit.picoseconds
elif self._output == 'forces':
self._unit = unit.dalton*unit.angstroms/unit.picoseconds**2
else:
raise InputError('Unrecognizable keyword value')
super().__init__(file, reportInterval, **kwargs)
self._needsPositions = self._output == 'positions'
self._needsVelocities = self._output in ['velocities', 'momenta']
self._needsForces = self._output == 'forces'
def _initialize(self, simulation, state):
self._symbols = [atom.element.symbol for atom in simulation.topology.atoms()]
sys = simulation.system
self._N = sys.getNumParticles()
if self._output == 'momenta':
mass = [sys.getParticleMass(i).value_in_unit(unit.dalton) for i in range(self._N)]
self._mass = np.vstack([mass, mass, mass]).transpose()*unit.dalton
def _get_values(self, simulation, state):
if self._output == 'positions':
values = state.getPositions(asNumpy=True)
elif self._output == 'velocities':
values = state.getVelocities(asNumpy=True)
elif self._output == 'momenta':
values = self._mass*state.getVelocities(asNumpy=True)
elif self._groups is None:
values = state.getForces(asNumpy=True)
else:
new_state = simulation.context.getState(getForces=True, groups=self._groups)
values = new_state.getForces(asNumpy=True)
return values.value_in_unit(self._unit)
def _write(self, step, N, names, values):
print(N, file=self._out)
pd.DataFrame(index=names, data=values).to_csv(
self._out,
sep='\t',
header=[f'{self._output} in {self._unit} at time step {step}', '', ''],
)
def _generateReport(self, simulation, state):
values = self._get_values(simulation, state)
self._write(simulation.currentStep, self._N, self._symbols, values)
class CenterOfMassReporter(XYZReporter):
"""
Outputs to an XYZ-format file a series of frames containing the center-of-mass coordinates,
center-of-mass velocities, total momenta, or resultant forces on all molecules in a Simulation.
.. note::
Coordinates are expressed in nanometers, velocities in nanometer/picosecond, momenta in
dalton*nanometer/picosecond, and forces in dalton*nanometer/picosecond^2.
To use this reporter, create an CenterOfMassReporter object and append it to the Simulation's
list of reporters.
Keyword Args
------------
output : str, default='positions'
Which kind of info to report. Valid options are 'positions', 'velocities', 'momenta' and
'forces'.
groups : set(int), default=None
Which force groups to consider in the force calculations. If this is `None`, then all
force groups will be evaluated.
"""
def _initialize(self, simulation, state):
super()._initialize(simulation, state)
self._mols = _MoleculeTotalizer(simulation.context, simulation.topology)
def _generateReport(self, simulation, state):
values = self._get_values(simulation, state)
if self._output in ['positions', 'velocities']:
cm_values = self._mols.massFrac.dot(values)
else:
cm_values = self._mols.selection.dot(values)
self._write(simulation.currentStep, self._mols.nmols, self._mols.residues, cm_values)
class CustomIntegratorReporter(_AtomsMM_Reporter):
"""
Outputs global and per-DoF variables of a CustomIntegrator instance.
Keyword Args
------------
describeOnly : bool, optional, default=True
Whether to output only descriptive statistics that summarize the activated per-Dof
variables.
"""
def __init__(self, file, reportInterval, **kwargs):
super().__init__(file, reportInterval, **kwargs)
self._describeOnly = kwargs.pop('describeOnly', True)
self._variables = []
for key, value in kwargs.items():
if value is True:
self._variables.append(key)
if not self._variables:
raise InputError("No global or perDof variables have been passed")
def _initialize(self, simulation, state):
integrator = self._integrator = simulation.integrator
if not isinstance(integrator, openmm.CustomIntegrator):
raise Exception("simulation.integrator is not a CustomIntegrator")
self._globals = {}
for index in range(integrator.getNumGlobalVariables()):
variable = integrator.getGlobalVariableName(index)
if variable in self._variables:
self._globals[variable] = index
self._perDof = {}
for index in range(integrator.getNumPerDofVariables()):
variable = integrator.getPerDofVariableName(index)
if variable in self._variables:
self._perDof[variable] = index
if set(self._variables) != set(self._globals) | set(self._perDof):
raise InputError("Unknown variables have been passed")
def _generateReport(self, simulation, state):
for variable, index in self._globals.items():
value = self._integrator.getGlobalVariable(index)
print('{}\n{}'.format(variable, value), file=self._out)
for variable, index in self._perDof.items():
values = self._integrator.getPerDofVariable(index)
titles = ['{}.{}'.format(variable, dir) for dir in ['x', 'y', 'z']]
df = pd.DataFrame(data=np.array(values), columns=titles)
if self._describeOnly:
print(df.describe(), file=self._out)
else:
df.to_csv(self._out, sep='\t')
class ExpandedEnsembleReporter(_AtomsMM_Reporter):
"""
Performs an Expanded Ensemble simulation and reports the energies of multiple states.
Parameters
----------
states : pandas.DataFrame_
A DataFrame containing context global parameters (column names) and sets of values
thereof. The potential energy will be reported for every state these parameters define.
If one of the variables is named as `weight`, then its set of values will be assigned
to every state as an importance sampling weight. Otherwise, all states will have
identical weights. States which are supposed to only have their energies reported, with
no actual visits, can have their weights set up to `-inf`.
temperature : unit.Quantity
The system temperature.
Keyword Args
------------
reportsPerExchange : int, optional, default=1
The number of reports between attempts to exchange the global parameter state, that is,
the exchange interval measured in units of report intervals.
"""
def __init__(self, file, reportInterval, states, temperature, **kwargs):
self._parameter_states = states.copy()
self._nstates = len(states.index)
self._reports_per_exchange = kwargs.pop('reportsPerExchange', 1)
super().__init__(file, reportInterval, **kwargs)
if 'weight' in states:
self._weights = self._parameter_states.pop('weight').values
finite = np.where(np.isfinite(self._weights))[0]
self._first_state = finite[0]
self._last_state = finite[-1]
else:
self._weights = np.zeros(self._nstates)
self._first_state = 0
self._last_state = self._nstates - 1
kT = (unit.MOLAR_GAS_CONSTANT_R*temperature).value_in_unit(unit.kilojoules_per_mole)
self._beta = 1.0/kT
self._nreports = 0
self._overall_visits = np.zeros(self._nstates, dtype=int)
self._downhill_visits = np.zeros(self._nstates, dtype=int)
self._probability_accumulators = np.zeros(self._nstates)
self._downhill = False
self._counting_started = False
self._regime_change = []
def _initialize(self, simulation, state):
headers = ['step', 'state']
for index in self._parameter_states.index:
headers.append('Energy[{}] (kJ/mole)'.format(index))
print(*headers, sep=self._separator, file=self._out)
def _register_visit(self, state):
if self._downhill:
if state == self._first_state:
self._downhill = False
self._regime_change.append(self._nreports)
elif state == self._last_state:
self._downhill = True
self._regime_change.append(self._nreports)
if self._counting_started:
self._overall_visits[state] += 1
if self._downhill:
self._downhill_visits[state] += 1
else:
self._counting_started = self._downhill is True
def _generateReport(self, simulation, state):
energies = np.zeros(self._nstates)
original = dict()
for name in self._parameter_states.columns:
original[name] = simulation.context.getParameter(name)
latest = original.copy()
for i, (index, row) in enumerate(self._parameter_states.iterrows()):
for name, value in row.items():
if value != latest[name]:
simulation.context.setParameter(name, value)
latest[name] = value
energy = simulation.context.getState(getEnergy=True).getPotentialEnergy()
energies[i] = energy.value_in_unit(unit.kilojoules_per_mole)
self._nreports += 1
exponents = self._weights - self._beta*energies
probabilities = np.exp(exponents - np.amax(exponents))
probabilities /= np.sum(probabilities)
self._probability_accumulators += probabilities
if self._nreports % self._reports_per_exchange == 0:
state = np.random.choice(self._nstates, p=probabilities)
for name, value in self._parameter_states.iloc[state].items():
if value != latest[name]:
simulation.context.setParameter(name, value)
self._register_visit(state)
print(simulation.currentStep, state, *energies, sep=self._separator, file=self._out)
def _isochronal_delta(self, f, n):
N = len(f)
b = 3/(n*(n+1)*(2*n+1))
seq = np.arange(1, n+1)
a = (b/2)*np.array([n*(n+1)-k*(k-1) for k in seq])
ind = np.argsort(f)
fa = f[ind]
delta = np.empty(N)
delta[0] = -fa[0]/2 + np.sum(a*fa[1:n+1])
for i in range(1, N-1):
delta[i] = b*np.sum([k*(fa[min(i+k, N-1)] | |
####
####
##
## Project PairsDB
##
## Copyright (C) 2002 <NAME> All rights reserved
##
## Author: <NAME> <<EMAIL>>
##
## $Id: Table.py,v 1.2 2002/11/18 13:03:28 heger Exp $
##
##
####
####
#----------------------------------------------------------------
# Name: Table
#--------------General Information-------------------------------
# File: Table.py
# Version: 1.0
# Description: base class for handling tables in mysql
# Author: <NAME> (<EMAIL>)
#--------------Documentation-------------------------------------
#
#
#--------------Change History------------------------------------
# 19 Jan 2000 Created
#
#
#----------------------------------------------------------------
import string, os, Pairsdb, Experiment
class Field:
def __init__ ( self, name, type, modifier, value):
self.name = name # name of the field
self.type = type # type
self.modifier = modifier # modifier like NOT NULL
self.value = value # value for retrieval and insertion
#-------------------------------------------
# Class: Table
# Superclasses: none
# Subclasses: Table_....
# Function: basic functionality for creating tables and indices
# Author: <NAME>
#-------------------------------------------
class Table:
dbhandle = None
name = "" # defined in subclass
fields = () # defined in subclass, array of array
indices = () # defined in subclass
data = {} # defined in subclass by Insert-Method
def __init__ ( self, dbhandle ):
self.dbhandle = dbhandle
#-------------------------------------------------------------------------------------------------------
def GetFullName( self ):
"""return name + database prefix."""
return self.dbhandle.GetDatabase() + "." + self.name
#-------------------------------------------------------------------------------------------------------
def GetName( self ):
"""return table name."""
return self.name
#------------------------------------------------------------------------------------------------------
# create table and indices if not existing
def Create (self, HEAP = 0, SOURCE=None, MAX_ROWS=None, AVG_ROW_LENGTH=None, TEMPORARY=0):
"""Create this table. If HEAP is 1, then the table will be created on the heap as a temporay table."""
if not self.Exists():
if not TEMPORARY:
statement = "CREATE TABLE " + self.name + ' ( '
else:
statement = "CREATE TEMPORARY TABLE " + self.name + ' ( '
f = map (string.join, self.fields )
statement = statement + string.join( f, ',') # add fields
if (len(self.indices) > 0 ):
statement = statement + ', ' + string.join( self.indices, ',') # add indices
statement = statement + ' ) '
if HEAP:
statement = statement + ' TYPE=HEAP '
if MAX_ROWS:
statement = statement + ' MAX_ROWS=%i ' % MAX_ROWS
if AVG_ROW_LENGTH:
statement = statement + ' AVG_ROW_LENGTH=%i ' % AVG_ROW_LENGTH
## has to be the last
if SOURCE:
statement = statement + SOURCE
return self.Execute( statement )
#------------------------------------------------------------------------------------------------------
# create table and indices if not existing
def CreateHeap (self, SOURCE):
"""Create this table on the heap."""
if not self.Exists():
statement = "CREATE TABLE " + self.name
if (len(self.indices) > 0 ):
statement = statement + ' (%s) ' % string.join( self.indices, ',') # add indices
statement = statement + " TYPE=HEAP " + SOURCE
return self.Execute( statement )
#------------------------------------------------------------------------------------------------------
def DropIndices( self, indices = None ):
"""remove indices from database. If no parameter is given, remove all."""
if not indices:
statement = "SHOW INDEX FROM " + self.name
indices = map(lambda x: x[2], self.Execute( statement ).fetchall())
for index in indices:
statement = "ALTER TABLE %s DROP INDEX %s" % (self.name, index )
self.Execute( statement )
#------------------------------------------------------------------------------------------------------
def CreateIndices( self ):
for index in self.indices:
statement = "ALTER TABLE %s ADD %s" % (self.name, index )
self.Execute(statement)
#------------------------------------------------------------------------------------------------------
def Exists(self):
"""return 1, if table exists, otherwise return 0"""
return self.dbhandle.Exists(self.name)
#------------------------------------------------------------------------------------------------------
def Drop( self ):
statement = "DROP TABLE IF EXISTS " + self.name
return self.Execute( statement )
#------------------------------------------------------------------------------------------------------
def Insert (self):
statement = "INSERT INTO " + self.name + " ( " + \
string.join( self.data.keys(), ',') + \
') VALUES (\'' + \
string.join( map(str, self.data.values()),'\',\'') + '\')'
return self.Execute( statement )
#------------------------------------------------------------------------------------------------------
def Update (self):
t = map ( lambda x,y: x + "=" + y, self.data.keys(), self.data.values())
statement = "UPDATE " + self.name + \
" SET " + string.join( t, ',') + \
" WHERE " + self.whereclause
return self.Execute( statement )
#------------------------------------------------------------------------------------------------------
def Lock (self, modus = 'WRITE'):
"""lock THIS table for read and write access.
"""
statement = 'LOCK TABLES ' + self.name + ' ' + modus
return self.Execute( statement )
#------------------------------------------------------------------------------------------------------
def Unlock( self ):
"""unlock ALL tables.
"""
statement = 'UNLOCK TABLES'
return self.Execute( statement )
#------------------------------------------------------------------------------------------------------
def Execute( self, statement ):
return self.dbhandle.Execute( statement )
#------------------------------------------------------------------------------------------------------
def Check( self, statement = None, msg = None ):
if not statement:
print "No checks defined for table %s " % self.name
return
print "Checking table " + self.name + " :" + msg
query = self.Execute( statement )
if query.rowcount > 0:
print "%i inconsistencies found " % query.rowcount
while 1:
entry = query.fetchone()
if not entry: break
print entry
else:
print "no inconsistencies found"
#------------------------------------------------------------------------------------------------------
def RowCount( self ):
try:
query = self.Execute( "SELECT COUNT(*) FROM " + self.name )
except:
return 0
return query.fetchone()[0]
#------------------------------------------------------------------------------------------------------
def Empty( self ):
if self.RowCount() > 0:
return 0
else:
return 1
#------------------------------------------------------------------------------------------------------
def PrintStatistics( self ):
"""Print some information about the table."""
print "%s %-20s : %i " % (self.dbhandle.GetDate(), self.name, self.RowCount())
#------------------------------------------------------------------------------------------------------
def PrintFieldStatistics( self, field, title = None):
"""Print some summary information on field table."""
if not title: title = field
statement = "SELECT MIN(%s), MAX(%s), " % (field, field) +\
" AVG(%s), STDDEV(%s) FROM %s" % (field, field, self.name )
result = self.Execute(statement).fetchone()
print "%s\t%i\t%i\t%8.2f\t%8.2f" % ((title,) + result)
#------------------------------------------------------------------------------------------------------
def Optimize( self ):
"""Optimize table."""
return self.Execute("OPTIMIZE TABLE " + self.name)
#-------------------------------------------------------------------------------------------------------
def Backup( self ):
"""Create a backup-copy of the table."""
self.Execute("DROP TABLE IF EXISTS %s_backup" % self.name)
self.Execute("ALTER TABLE %s RENAME AS %s_backup" % (self.name, self.name))
return self.Restore()
#-------------------------------------------------------------------------------------------------------
def Restore( self ):
"""Restore table from backup-copy."""
self.Drop()
self.Create()
return self.Execute( "INSERT INTO %s SELECT * FROM %s_backup" % (self.name, self.name))
#-------------------------------------------------------------------------------------------------------
def InsertDataFromTable( self, src_name ):
"""enter data from an identical table into the current table.
"""
self.Drop()
self.Create()
self.Execute( "INSERT INTO %s SELECT * FROM %s " % (self.name, src_name))
#-------------------------------------------------------------------------------------------------------
def GetClone( self, new_name ):
"""create and return a clone (including data) of this table.
The new table has name new_name.
"""
new_table = self.GetAnotherInstance()
self.CreateNewTable( new_name )
new_table.SetName( new_name )
new_table.InsertDataFromTable( self.name )
return new_table
#-------------------------------------------------------------------------------------------------------
def CreateNewTable( self, dest_name ):
"""create a identical version of this table (without data).
"""
source_name = self.name
if self.dbhandle.Exists(dest_name):
raise "table %s does already exist" % dest_name
else:
self.name = dest_name
self.Create()
self.name = source_name
#-------------------------------------------------------------------------------------------------------
def SetName( self, new_name ):
"""set table to current name. Check for yourself, if it exists."""
self.name = new_name
#-------------------------------------------------------------------------------------------------------
def Clear( self ):
"""Clear table."""
self.Drop()
self.Create()
#-------------------------------------------------------------------------------------------------------
def DeleteAll( self ):
"""Delete all entries from the table."""
return self.Execute("DELETE FROM %s" % self.name)
#-------------------------------------------------------------------------------------------------------
def MakeUnique( self ):
"""make table unique.
This is done by making a SELECT DISTINCT into an file and
then loading it into the table.
-> Since this method is sometimes executed from remote hosts and
files are stored only on the local host, they have to go into /tmp.
"""
outfile = '%s/pairsdb_unique_%s_%i.tmp' % (Pairsdb.PATH_TEMP, self.name, os.getpid())
self.Execute("SELECT DISTINCTROW * FROM %s INTO OUTFILE '%s' " % (self.name, outfile) )
self.DeleteAll()
self.LoadDump( outfile, islocal = 0 )
os.remove( outfile )
#------------------------------------------------------------------------------------------------------
def Load( self,
filename,
option_duplicates = '',
local = None,
skip_update = 1,
separator = "\t",
check_permissions = 1,
no_indices = False,
use_field_list = None):
"""load data from file into mysql-table. The option can be either '', 'IGNORE', or 'REPLACE'.
skip_update means, that the update field is not in the file to be loaded, but gets set automatically.
local means, the file is local and is read via the mysql_import tool.
If no_indices is set to True, indices are turned off for loading. They are not turned on
afterwards. Do so by ALTER TABLE ... ENABLE KEYS.
"""
if not use_field_list:
use_field_list = map( lambda x: x[0], self.fields)
# make file word readable
if check_permissions and not local:
os.chmod( filename, 0664)
fields_list = [] # build the list of fields to insert
for f in use_field_list:
if (f == 'updated') and skip_update: # elminiate field updated, so that it gets set
continue # automatically
fields_list.append(f)
if no_indices:
self.Execute( "ALTER TABLE %s DISABLE KEYS" % self.name )
if not local:
self.Lock("WRITE")
statement = "LOAD DATA INFILE '%s' %s" % (filename, option_duplicates) +\
" INTO TABLE | |
<filename>vagrant/catalog/application.py
"""Module defining routes and associated methods for the item-catelog Flask applicaiton.
Methods:
show_login: Serves client with page to allows them to sign in using Google OAuth.
gconnect: Connects client using Google OAuth.
gdisconnect: Disconnects the client from the server.
catalog_JSON: Returns all item data in the catelog in JSON format.
item_JSON: Returns a specific item's information in JSON.
show_catalog: Provides the client with the ability to view all categories and items.
show_category: Provides the client with the ability to view a category.
new_category: Provides the client with the ability to create a new category.
edit_category: Provides the client with the ability to edit a category.
delete_category: Provides the client with the ability to delete a category.
show_item: Provides the client with the ability to view an item.
new_item: Provides the client with the ability to create a new item.
edit_item: Provides the client with the ability to edit an item.
delete_item: Provides the client with the ability to delete an item.
"""
import random
import string
import json
import httplib2
import requests
from flask import Flask, render_template, request, redirect, url_for, flash, jsonify, make_response
from flask import session as login_session
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
from models import Base, User, Category, Item
app = Flask(__name__)
engine = create_engine('sqlite:///itemCatalog.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
CLIENT_ID = json.loads(open('client_secrets.json', 'r').read())['web']['client_id']
# Create anti-forgery state token
@app.route('/login')
def show_login():
"""Serves client with page to allows them to sign in using Google OAuth.
Args:
None
Returns:
login.html
"""
state = ''.join(random.choice(string.ascii_uppercase + string.digits)
for x in range(32))
login_session['state'] = state
# return "The current session state is %s" % login_session['state']
return render_template('login.html', STATE=state)
@app.route('/gconnect', methods=['POST'])
def gconnect():
"""Connects client using Google OAuth.
Args:
None.
Returns:
None.
"""
# Validate state token
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Obtain authorization code
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(
json.dumps('Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check that the access token is valid.
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
% access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1].decode())
# If there was an error in the access token info, abort.
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is used for the intended user.
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(
json.dumps("Token's user ID doesn't match given user ID."), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is valid for this app.
if result['issued_to'] != CLIENT_ID:
response = make_response(
json.dumps("Token's client ID does not match app's."), 401)
print("Token's client ID does not match app's.")
response.headers['Content-Type'] = 'application/json'
return response
stored_access_token = login_session.get('access_token')
stored_gplus_id = login_session.get('gplus_id')
if stored_access_token is not None and gplus_id == stored_gplus_id:
response = make_response(json.dumps('Current user is already connected.'),
200)
response.headers['Content-Type'] = 'application/json'
return response
# Store the access token in the session for later use.
login_session['access_token'] = credentials.access_token
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': credentials.access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
login_session['username'] = data['name']
login_session['picture'] = data['picture']
login_session['email'] = data['email']
# Check for user registration if not registered create a new user.
user_id = get_user_id(login_session['email'])
if user_id:
login_session['user_id'] = user_id
flash('Welcome back {}!'.format(login_session['username']))
else:
new_user = create_user(login_session['username'], login_session['email'])
login_session['user_id'] = new_user.id
flash('Welcome {}. We are excited that you have joined us!'.format(login_session['username']))
# ADD PROVIDER TO LOGIN SESSION
login_session['provider'] = 'google'
return render_template('catalog.html')
@app.route('/gdisconnect')
def gdisconnect():
"""Disconnects the client from the server.
Args:
None
Returns:
Redirect to catelog.htlm through show_catelog()
"""
# Only disconnect a connected user.
access_token = login_session.get('access_token')
if access_token is None:
response = make_response(
json.dumps('Current user not connected.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token
h = httplib2.Http()
result = h.request(url, 'GET')[0]
if result['status'] == '200':
del login_session['access_token']
del login_session['gplus_id']
del login_session['username']
del login_session['email']
del login_session['picture']
del login_session['user_id']
response = make_response(json.dumps('Successfully disconnected.'), 200)
response.headers['Content-Type'] = 'application/json'
flash('Successfully disconnected.')
return redirect(url_for('show_catalog'))
else:
response = make_response(json.dumps('Failed to revoke token for given user.', 400))
response.headers['Content-Type'] = 'application/json'
flash('Failed to revoke token for given user.')
return redirect(url_for('show_catalog'))
# User helper functions
def create_user(name, email):
"""Creates a new user in the database and returns the new User object.
Args:
name: The name of the new user.
email: The email of the new user. This is assumed to be unique in the database.
Returns:
User object for the newly registered user.
"""
new_user = User(name=name, email=email)
session.add(new_user)
session.commit()
return new_user
def get_user_id(email):
"""Gets the user id of registered User based off of email.
Args:
email: The email of the user. This is assumed to be unique in the database.
Returns:
user_id if the user email exists in the User table, otherwise returns None.
"""
try:
user = session.query(User).filter_by(email=email).one()
return user.id
except:
return None
# JSON APIs to view Item Catalog Information
@app.route('/api/v01/catalog/JSON/')
def catalog_JSON():
"""Returns all item data in the catelog in JSON format.
Args:
None.
Returns:
JSON object representing all Item data in the database.
"""
items = session.query(Item).all()
return jsonify(items=[i.serialize for i in items])
@app.route('/api/v01/item/<int:item_id>/JSON/')
def item_JSON(item_id):
"""Returns a specific item's information in JSON.
Args:
item_id: An integer representing the database id of the item who's data is to be returned
Returns:
JSON object representing a specific Item where Item.id==item_id.
"""
item = session.query(Item).filter_by(id=item_id).one()
return jsonify(item=item.serialize)
# Item Catalog Pages
@app.route('/')
@app.route('/catalog/')
def show_catalog():
"""Provides the client with the ability to view all categories and items.
Args:
None.
Returns:
catelog.html
"""
categories = session.query(Category).all()
items = [i.serialize for i in session.query(Item).all()]
return render_template(
'catalog.html',
items=items,
categories=categories,
login_session=login_session
)
@app.route('/category/<int:category_id>/')
def show_category(category_id):
"""Provides the client with the ability to view a category.
Args:
category_id: An integer representing the database id of the category to be viewed.
Returns:
category.html for the category that has category_id
"""
category = session.query(Category).filter_by(id=category_id).one()
return render_template('category.html', category=category, login_session=login_session)
@app.route('/category/new/', methods=['GET', 'POST'])
def new_category():
"""Provides the client with the ability to create a new category.
Verifies that the user is logged in. On GET requests, a form will be served for user to create
a new category. On POST requests, the category will be created. If the user is not logged in,
the client will be redirected to the main page.
Args:
None.
Returns:
Either category_new.html or redirects to catelog.htlm via show_catelog().
"""
# Verify the user is logged in.
if 'user_id' not in login_session:
flash('Please login in order to add categories')
return redirect(url_for('show_catalog'))
if request.method == 'POST':
new_category = Category(name=request.form['name'], user_id=login_session['user_id'])
session.add(new_category)
flash('New Category {} Successfully Created'.format(new_category.name))
session.commit()
return redirect(url_for('show_catalog'))
else:
return render_template('category_new.html', login_session=login_session)
@app.route('/category/<int:category_id>/edit/', methods=['GET', 'POST'])
def edit_category(category_id):
"""Provides the client with the ability to edit a category.
Verifies that the user is logged in. On GET requests, a form will be served for user to edit
the category. On POST requests, the category will be edited. If the user is not logged in, the
client will be redirected to the main page.
Args:
category_id: An integer representing the database id of the category to be edited.
Returns:
Either category_edit.html or redirects to catelog.htlm via show_catelog().
"""
# Verify the user is logged in.
if 'user_id' not in login_session:
flash('Please login in order to edit categories')
return redirect(url_for('show_catalog'))
edited_category = session.query(Category).filter_by(id=category_id).one()
# Verify that user is attempting to edit a category they created.
if login_session['user_id'] != edited_category.user_id:
flash('Sorry. You can only edit categories that you added.')
return redirect(url_for('show_catalog'))
if request.method == 'POST':
if 'btn_submit' in request.form:
if request.form['name']:
edited_category.name = request.form['name']
session.add(edited_category)
session.commit()
flash('Category Successfully Edited {}'.format(edited_category.name))
else:
flash('Category Edit {} Was Cancelled'.format(edited_category.name))
return redirect(url_for('show_catalog'))
else:
return render_template(
'category_edit.html',
category=edited_category,
login_session=login_session
)
@app.route('/category/<int:category_id>/delete/', methods=['GET', 'POST'])
def delete_category(category_id):
"""Provides the client with the ability to delete a category.
Verifies that the user is logged in. On GET requests, a form will be served for user to delete
the category. On POST requests, the category will be | |
b=4, size=0)]
"""
jeżeli self.matching_blocks jest nie Nic:
zwróć self.matching_blocks
la, lb = len(self.a), len(self.b)
# This jest most naturally expressed jako a recursive algorithm, but
# at least one user bumped into extreme use cases that exceeded
# the recursion limit on their box. So, now we maintain a list
# ('queue`) of blocks we still need to look at, oraz append partial
# results to `matching_blocks` w a loop; the matches are sorted
# at the end.
queue = [(0, la, 0, lb)]
matching_blocks = []
dopóki queue:
alo, ahi, blo, bhi = queue.pop()
i, j, k = x = self.find_longest_match(alo, ahi, blo, bhi)
# a[alo:i] vs b[blo:j] unknown
# a[i:i+k] same jako b[j:j+k]
# a[i+k:ahi] vs b[j+k:bhi] unknown
jeżeli k: # jeżeli k jest 0, there was no matching block
matching_blocks.append(x)
jeżeli alo < i oraz blo < j:
queue.append((alo, i, blo, j))
jeżeli i+k < ahi oraz j+k < bhi:
queue.append((i+k, ahi, j+k, bhi))
matching_blocks.sort()
# It's possible that we have adjacent equal blocks w the
# matching_blocks list now. Starting przy 2.5, this code was added
# to collapse them.
i1 = j1 = k1 = 0
non_adjacent = []
dla i2, j2, k2 w matching_blocks:
# Is this block adjacent to i1, j1, k1?
jeżeli i1 + k1 == i2 oraz j1 + k1 == j2:
# Yes, so collapse them -- this just increases the length of
# the first block by the length of the second, oraz the first
# block so lengthened remains the block to compare against.
k1 += k2
inaczej:
# Not adjacent. Remember the first block (k1==0 means it's
# the dummy we started with), oraz make the second block the
# new block to compare against.
jeżeli k1:
non_adjacent.append((i1, j1, k1))
i1, j1, k1 = i2, j2, k2
jeżeli k1:
non_adjacent.append((i1, j1, k1))
non_adjacent.append( (la, lb, 0) )
self.matching_blocks = list(map(Match._make, non_adjacent))
zwróć self.matching_blocks
def get_opcodes(self):
"""Return list of 5-tuples describing how to turn a into b.
Each tuple jest of the form (tag, i1, i2, j1, j2). The first tuple
has i1 == j1 == 0, oraz remaining tuples have i1 == the i2 z the
tuple preceding it, oraz likewise dla j1 == the previous j2.
The tags are strings, przy these meanings:
'replace': a[i1:i2] should be replaced by b[j1:j2]
'delete': a[i1:i2] should be deleted.
Note that j1==j2 w this case.
'insert': b[j1:j2] should be inserted at a[i1:i1].
Note that i1==i2 w this case.
'equal': a[i1:i2] == b[j1:j2]
>>> a = "qabxcd"
>>> b = "abycdf"
>>> s = SequenceMatcher(Nic, a, b)
>>> dla tag, i1, i2, j1, j2 w s.get_opcodes():
... print(("%7s a[%d:%d] (%s) b[%d:%d] (%s)" %
... (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2])))
delete a[0:1] (q) b[0:0] ()
equal a[1:3] (ab) b[0:2] (ab)
replace a[3:4] (x) b[2:3] (y)
equal a[4:6] (cd) b[3:5] (cd)
insert a[6:6] () b[5:6] (f)
"""
jeżeli self.opcodes jest nie Nic:
zwróć self.opcodes
i = j = 0
self.opcodes = answer = []
dla ai, bj, size w self.get_matching_blocks():
# invariant: we've pumped out correct diffs to change
# a[:i] into b[:j], oraz the next matching block jest
# a[ai:ai+size] == b[bj:bj+size]. So we need to pump
# out a diff to change a[i:ai] into b[j:bj], pump out
# the matching block, oraz move (i,j) beyond the match
tag = ''
jeżeli i < ai oraz j < bj:
tag = 'replace'
albo_inaczej i < ai:
tag = 'delete'
albo_inaczej j < bj:
tag = 'insert'
jeżeli tag:
answer.append( (tag, i, ai, j, bj) )
i, j = ai+size, bj+size
# the list of matching blocks jest terminated by a
# sentinel przy size 0
jeżeli size:
answer.append( ('equal', ai, i, bj, j) )
zwróć answer
def get_grouped_opcodes(self, n=3):
""" Isolate change clusters by eliminating ranges przy no changes.
Return a generator of groups przy up to n lines of context.
Each group jest w the same format jako returned by get_opcodes().
>>> z pprint zaimportuj pprint
>>> a = list(map(str, range(1,40)))
>>> b = a[:]
>>> b[8:8] = ['i'] # Make an insertion
>>> b[20] += 'x' # Make a replacement
>>> b[23:28] = [] # Make a deletion
>>> b[30] += 'y' # Make another replacement
>>> pprint(list(SequenceMatcher(Nic,a,b).get_grouped_opcodes()))
[[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)],
[('equal', 16, 19, 17, 20),
('replace', 19, 20, 20, 21),
('equal', 20, 22, 21, 23),
('delete', 22, 27, 23, 23),
('equal', 27, 30, 23, 26)],
[('equal', 31, 34, 27, 30),
('replace', 34, 35, 30, 31),
('equal', 35, 38, 31, 34)]]
"""
codes = self.get_opcodes()
jeżeli nie codes:
codes = [("equal", 0, 1, 0, 1)]
# Fixup leading oraz trailing groups jeżeli they show no changes.
jeżeli codes[0][0] == 'equal':
tag, i1, i2, j1, j2 = codes[0]
codes[0] = tag, max(i1, i2-n), i2, max(j1, j2-n), j2
jeżeli codes[-1][0] == 'equal':
tag, i1, i2, j1, j2 = codes[-1]
codes[-1] = tag, i1, min(i2, i1+n), j1, min(j2, j1+n)
nn = n + n
group = []
dla tag, i1, i2, j1, j2 w codes:
# End the current group oraz start a new one whenever
# there jest a large range przy no changes.
jeżeli tag == 'equal' oraz i2-i1 > nn:
group.append((tag, i1, min(i2, i1+n), j1, min(j2, j1+n)))
uzyskaj group
group = []
i1, j1 = max(i1, i2-n), max(j1, j2-n)
group.append((tag, i1, i2, j1 ,j2))
jeżeli group oraz nie (len(group)==1 oraz group[0][0] == 'equal'):
uzyskaj group
def ratio(self):
"""Return a measure of the sequences' similarity (float w [0,1]).
Where T jest the total number of elements w both sequences, oraz
M jest the number of matches, this jest 2.0*M / T.
Note that this jest 1 jeżeli the sequences are identical, oraz 0 if
they have nothing w common.
.ratio() jest expensive to compute jeżeli you haven't already computed
.get_matching_blocks() albo .get_opcodes(), w which case you may
want to try .quick_ratio() albo .real_quick_ratio() first to get an
upper bound.
>>> s = SequenceMatcher(Nic, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.quick_ratio()
0.75
>>> s.real_quick_ratio()
1.0
"""
matches = sum(triple[-1] dla triple w self.get_matching_blocks())
zwróć _calculate_ratio(matches, len(self.a) + len(self.b))
def quick_ratio(self):
"""Return an upper bound on ratio() relatively quickly.
This isn't defined beyond that it jest an upper bound on .ratio(), oraz
jest faster to compute.
"""
# viewing a oraz b jako multisets, set matches to the cardinality
# of their intersection; this counts the number of matches
# without regard to order, so jest clearly an upper bound
jeżeli self.fullbcount jest Nic:
self.fullbcount = fullbcount = {}
dla elt w self.b:
fullbcount[elt] = fullbcount.get(elt, 0) + 1
fullbcount = self.fullbcount
# avail[x] jest the number of times x appears w 'b' less the
# number of times we've seen it w 'a' so far ... kinda
avail = {}
availhas, matches = avail.__contains__, 0
dla elt w self.a:
jeżeli availhas(elt):
numb = avail[elt]
inaczej:
numb = fullbcount.get(elt, 0)
avail[elt] = numb - 1
jeżeli numb > 0:
matches = matches + 1
zwróć _calculate_ratio(matches, len(self.a) + len(self.b))
def real_quick_ratio(self):
"""Return an upper bound on ratio() very quickly.
This isn't defined beyond that it jest an upper bound on .ratio(), oraz
jest faster to compute than either .ratio() albo .quick_ratio().
"""
la, lb = len(self.a), len(self.b)
# can't have more matches than the number of elements w the
# shorter sequence
zwróć _calculate_ratio(min(la, lb), la + lb)
def get_close_matches(word, possibilities, n=3, cutoff=0.6):
"""Use SequenceMatcher to zwróć list of the best "good enough" matches.
word jest a sequence dla which close matches are desired (typically a
string).
possibilities jest a list of sequences against which to match word
(typically a list of strings).
Optional arg n (default 3) jest the maximum number of close matches to
return. n must | |
<reponame>sail-sg/mugs
# Copyright 2022 Garena Online Private Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Mugs training code
"""
import argparse
import datetime
import json
import math
import os
import sys
import time
from collections import OrderedDict
from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
from torchvision import models as torchvision_models
import utils
from src.loss import get_multi_granular_loss
from src.model import get_model
from src.multicropdataset import data_prefetcher, get_dataset
from src.optimizer import cancel_gradients_last_layer, get_optimizer, clip_gradients
torchvision_archs = sorted(
name
for name in torchvision_models.__dict__
if name.islower()
and not name.startswith("__")
and callable(torchvision_models.__dict__[name])
)
def get_args_parser():
parser = argparse.ArgumentParser("Mugs", add_help=False)
##======== Model parameters ============
parser.add_argument(
"--arch",
type=str,
default="vit_small",
choices=["vit_small", "vit_base", "vit_large"],
help="""Name of architecture to train.""",
)
parser.add_argument(
"--patch_size",
type=int,
default=16,
help="""Size in pixels
of input square patches - default 16 (for 16x16 patches). Using smaller
values leads to better performance but requires more memory. Applies only
for ViTs (vit_small and vit_base). If <16, we recommend disabling
mixed precision training (--use_fp16 false) to avoid unstabilities.""",
)
##======== Training/Optimization parameters ============
parser.add_argument(
"--momentum_teacher",
type=float,
default=0.996,
help="""Base EMA
parameter for teacher update. The value is increased to 1 during training with
cosine schedule. We recommend setting a higher value with small batches: for
example use 0.9995 with batch size of 256.""",
)
parser.add_argument(
"--use_fp16",
type=utils.bool_flag,
default=False,
help="""Whether or not
to use half precision for training. Improves training time and memory requirements,
but can provoke instability and slight decay of performance. We recommend disabling
mixed precision if the loss is unstable, if reducing the patch size or if training
with bigger ViTs.""",
)
parser.add_argument(
"--weight_decay",
type=float,
default=0.04,
help="""Initial value of the
weight decay. With ViT, a smaller value at the beginning of training works well.""",
)
parser.add_argument(
"--weight_decay_end",
type=float,
default=0.2,
help="""Final value of the
weight decay. We use a cosine schedule for WD and using a larger decay by
the end of training improves performance for ViTs.""",
)
parser.add_argument(
"--clip_grad",
type=float,
default=3.0,
help="""Maximal parameter
gradient norm if using gradient clipping. Clipping with norm .3 ~ 1.0 can
help optimization for larger ViT architectures. 0 for disabling.""",
)
parser.add_argument(
"--batch_size_per_gpu",
type=int,
default=64,
help="Per-GPU batch-size : number of distinct images loaded on one GPU.",
)
parser.add_argument(
"--epochs", type=int, default=100, help="Number of epochs of training."
)
parser.add_argument(
"--warmup_epochs",
default=10,
type=int,
help="""Number of epochs for the linear learning-rate warm up.=""",
)
parser.add_argument(
"--freeze_last_layer",
type=int,
default=1,
help="""Number of epochs during
which we keep the output layer fixed for the group supervision loss. Typically doing so during
the first epoch helps training. Try increasing this value if the loss does not decrease.""",
)
parser.add_argument(
"--lr",
type=float,
default=0.0008,
help="""Learning rate at the end of
linear warmup (highest LR used during training). The learning rate is linearly scaled
with the batch size, and specified here for a reference batch size of 256.""",
)
parser.add_argument(
"--patch_embed_lr_mult",
type=float,
default=0.2,
help="""For patch
embedding layer, its learning rate is lr * patch_embed_lr_mult (<1.0) in most case, which
stables training and also slightly improve the performance.""",
)
parser.add_argument(
"--min_lr",
type=float,
default=1e-6,
help="""Target LR at the
end of optimization. We use a cosine LR schedule with linear warmup.""",
)
parser.add_argument(
"--optimizer",
type=str,
default="adamw",
choices=["adamw", "sgd", "lars"],
help="""Type of optimizer. We recommend using adamw
with ViTs.""",
)
parser.add_argument(
"--drop_path_rate", type=float, default=0.1, help="""stochastic depth rate"""
)
##======== Multi-granular supervisions (instance/local-group/group supervisions) ==========
parser.add_argument(
"--loss_weights",
type=float,
nargs="+",
default=[1.0, 1.0, 1.0],
help="""three loss weights for instance, local-group, group supervision losses in turn""",
)
parser.add_argument(
"--use_bn_in_head",
type=utils.bool_flag,
default=False,
help="Whether to use batch normalizations in the three projection heads (Default: False)",
)
parser.add_argument(
"--norm_before_pred",
type=utils.bool_flag,
default=True,
help="""Whether to use batch normalizations after projection heads (namely before
prediction heads) in instance and local-group supervisions. (Default: False)""",
)
# parameters for instance discrimination supervision
parser.add_argument(
"--instance_out_dim",
type=int,
default=256,
help="""output dimention in the projection and prediction heads.""",
)
parser.add_argument(
"--instance_queue_size",
type=int,
default=65536,
help="""the queue size of the memory to store the negative keys.""",
)
parser.add_argument(
"--instance_temp",
type=float,
default=0.2,
help="""the temperature parameters for the infoNCE loss in instance supervision.""",
)
# parameters for local-group discrimination supervision
parser.add_argument(
"--local_group_out_dim",
type=int,
default=256,
help="""output dimention in the projection and prediction heads.""",
)
parser.add_argument(
"--local_group_knn_top_n",
type=int,
default=8,
help="how many neighbors we use to aggregate for a local-group",
)
parser.add_argument(
"--local_group_queue_size",
type=int,
default=65536,
help="""the queue sizes of the memory to store the negative keys for infoNCE loss and
another memory size to store the weak augmentated samples for local-group aggregation.""",
)
parser.add_argument(
"--local_group_temp",
type=float,
default=0.2,
help="""the temperature parameters for the infoNCE loss in instance supervision.""",
)
## parameters for group discrimination supervision
parser.add_argument(
"--group_out_dim",
type=int,
default=65536,
help="""output dimention in the prediction heads.""",
)
parser.add_argument(
"--group_bottleneck_dim",
type=float,
default=256,
help="""head bottleneck dimention in the prediction heads.""",
)
parser.add_argument(
"--norm_last_layer",
type=utils.bool_flag,
default=True,
help="""Whether or not to weight normalize the last layer of the group supervision head.
Not normalizing leads to better performance but can make the training unstable. We
typically set this paramater to False with vit_small and True with vit_base and vit_large.""",
)
parser.add_argument(
"--group_student_temp",
type=float,
default=0.1,
help="""the temperature parameters for the clustering loss in student output.""",
)
parser.add_argument(
"--group_warmup_teacher_temp",
default=0.04,
type=float,
help="""Initial value for the teacher temperature: 0.04 works well in most cases.
Try decreasing it if the training loss does not decrease.""",
)
parser.add_argument(
"--group_teacher_temp",
default=0.04,
type=float,
help="""Final value
(after linear warmup) of the teacher temperature. For most experiments, anything above
0.07 is unstable. We recommend starting with the default value of 0.04 and increase
this slightly if needed.""",
)
parser.add_argument(
"--group_warmup_teacher_temp_epochs",
default=0,
type=int,
help="""Number of warmup epochs for the teacher temperature (Default: 30).""",
)
##======== augmentation parameters ============
# Multi-crop parameters
parser.add_argument(
"--global_crops_scale",
type=float,
nargs="+",
default=(0.25, 1.0),
help="""Scale range of the cropped image before resizing, relatively to the origin image.
Used for large global view cropping. When disabling multi-crop (--local_crops_number 0), we
recommand using a wider range of scale ("--global_crops_scale 0.14 1." for example)""",
)
parser.add_argument(
"--local_crops_number",
type=int,
default=10,
help="""Number of small
local views to generate. Set this parameter to 0 to disable multi-crop training.
When disabling multi-crop we recommend to use "--global_crops_scale 0.14 1." """,
)
parser.add_argument(
"--local_crops_scale",
type=float,
nargs="+",
default=(0.05, 0.25),
help="""Scale range of the cropped image before resizing, relatively to the origin image.
Used for small local view cropping of multi-crop.""",
)
# strong augmentation parameters
parser.add_argument(
"--timm_auto_augment_par",
type=str,
default="rand-m9-mstd0.5-inc1",
help="""the parameters for the AutoAugment used in DeiT.""",
)
parser.add_argument(
"--color_aug",
type=utils.bool_flag,
default=False,
help="""after AutoAugment, whether we further perform color augmentation. (Default: False).""",
)
parser.add_argument(
"--size_crops",
type=int,
default=[96],
nargs="+",
help="""the small crop size. Note we use multi-crop strategy, namely two 224-sized crops +
ten 96-sized crops. (Default: 96)""",
)
parser.add_argument(
"--strong_ratio",
type=float,
default=0.45,
help="""the ratio of image augmentation for the AutoAugment used in DeiT.""",
)
parser.add_argument(
"--re_prob",
type=float,
default=0.25,
help="""the re-prob parameter of image augmentation for the AutoAugment used in DeiT.""",
)
parser.add_argument(
"--vanilla_weak_augmentation",
type=utils.bool_flag,
default=False,
help="""Whether we use the same augmentation in DINO, namely only using weak augmentation.""",
)
parser.add_argument(
"--prob",
type=float,
default=0.5,
help="""When we use strong augmentation and weak augmentation, the ratio of images to
be cropped with strong augmentation.""",
)
##======== Misc ============
parser.add_argument(
"--data_path",
default="/dataset/imageNet100_sicy/train/",
type=str,
help="""Please specify path to the ImageNet training data.""",
)
parser.add_argument(
"--output_dir",
default="./exp/",
type=str,
help="""Path to save logs and checkpoints.""",
)
parser.add_argument(
"--saveckp_freq",
default=50,
type=int,
help="""Save checkpoint every x epochs.""",
)
parser.add_argument("--seed", default=0, type=int, help="""Random seed.""")
parser.add_argument(
"--num_workers",
default=12,
type=int,
help="""Number of data loading workers per GPU.""",
)
parser.add_argument(
"--dist_url",
default="env://",
type=str,
help="""url used to set up
distributed training; see https://pytorch.org/docs/stable/distributed.html""",
)
parser.add_argument(
"--local_rank",
default=0,
type=int,
help="""local rank for distrbuted training.""",
)
parser.add_argument(
"--rank", default=0, type=int, help="""rank for distrbuted training."""
)
parser.add_argument(
| |
import numpy as np
import sys
class field():
def __init__( self, rows, columns):
if ((rows < 5 or rows > 19) or (columns < 5 or columns > 19)):
print("Game field size limited to 5..19 x 5..19.")
exit()
self.__rows = rows
self.__columns = columns
self.__rows_to_win = 5
self.__field = np.zeros(( rows,columns), dtype=np.int)
self.__metric_moves = np.zeros(( 2, rows,columns), dtype=np.int)
#self.__metric_moves2 = np.zeros(( 2, rows,columns), dtype=np.int)
def reset( self):
self.__field = np.zeros(( self.__rows, self.__columns), dtype=np.int)
self.__metric_moves = np.zeros(( 2, self.__rows, self.__columns), dtype=np.int)
def get_number_of_rows( self):
return self.__rows
def get_number_of_columns( self):
return self.__columns
def is_free( self, i, j):
return (self.__field[i,j] == 0)
def get_value( self, i, j):
return self.__field[i,j]
def get_center_position( self):
return [self.__rows>>1, self.__columns>>1]
def set( self, player, i, j):
if self.__field[i,j] == 0:
self.__field[i,j] = player
return True
else:
print("Error in field.set() for player",player,". Field (", i, j, ") is already occupied with value", self.__field[i,j])
return False
def unset( self, i, j):
self.__field[i,j] = 0
def get_list_of_empty_positions( self):
moves = []
for i in range( self.__rows):
for j in range( self.__columns):
if self.__field[i,j] == 0:
moves.append([i,j])
return moves
################################################################
#
# set_pattern_antipattern()
#
# This function discriminates which flag a player shall
# use to indicate a game field as used.
# "1" is used by player 1. Opponent flag (anti_pattern) is "2".
# "2" is used by player 2. Opponent flag (anti_pattern) is "1".
#
################################################################
def set_pattern_antipattern( self, player):
if player == 1:
return 1,2
else:
return 2,1
################################################################
#
# get_first_empty_field()
#
# Returns two indices of first game field starting from top left
# corner, which is still free.
#
################################################################
def get_first_empty_field( self):
for i in range( self.__rows):
for j in range( self.__columns):
if self.is_free( i,j):
return [i,j]
################################################################
#
# get_list_of_surrounding_fields()
#
# Returns list of game field indices containing all fields
# which surround game fields already in use.
#
# The parameter depth indicates the distance from a field in
# use.
#
################################################################
def add_to_list( self, l, x, y):
if [x,y] not in l:
l.append([x,y])
return l
def get_list_of_surrounding_fields( self, player, depth):
l = []
pattern, anti_pattern = self.set_pattern_antipattern( player)
for i in range( self.__rows):
for j in range( self.__columns):
#if self.__field[i,j] == pattern:
if self.__field[i,j] != 0:
if i-depth >= 0:
if self.is_free( i-depth, j):
l = self.add_to_list( l, i-depth, j)
if j-depth >= 0 :
if self.is_free( i-depth, j-depth):
l = self.add_to_list( l, i-depth, j-depth)
if self.is_free( i,j-depth):
l = self.add_to_list( l, i, j-depth)
if j + depth < self.get_number_of_columns():
if self.is_free( i-depth,j+depth):
l = self.add_to_list( l, i-depth, j+depth)
if self.is_free( i,j+depth):
l = self.add_to_list( l, i, j+depth)
if i+depth < self.get_number_of_rows():
if self.is_free( i+depth,j):
l = self.add_to_list( l, i+depth, j)
if j-depth >= 0:
if self.is_free( i+depth,j-depth):
l = self.add_to_list( l, i+depth, j-depth)
if self.is_free( i,j-depth):
l = self.add_to_list( l, i, j-depth)
if j+depth < self.get_number_of_columns():
if self.is_free( i+depth,j+depth):
l = self.add_to_list( l, i+depth, j+depth)
if self.is_free( i,j+depth):
l = self.add_to_list( l, i, j+depth)
return l
################################################################
#
# check_line_of_sight()
#
# Given two game fields, the function checks whether they are
# in a line of sight, i.e. between them there is no game field
# used by the opponent (anti_pattern) and that their distance
# is not exceeding 4.
#
################################################################
def check_line_of_sight( self, anti_pattern, pos1, pos2):
r1, c1 = pos1[0], pos1[1]
r2, c2 = pos2[0], pos2[1]
if r1 == r2:
start, length = min(c2,c1), abs(c2-c1)
if length > 4:
return False
elif length <= 1:
return True
for i in range(length-1):
if self.__field[r1, 1+start+i] == anti_pattern:
return False
return True
elif c1 == c2:
start, length = min(r2,r1), abs(r2-r1)
if length > 4:
return False
elif length <= 1:
return True
for i in range(length-1):
if self.__field[1+start+i,c1] == anti_pattern:
return False
return True
elif abs(r1-r2) == abs(c1-c2):
length = abs(c1-c2)
if length > 4:
return False
elif length <= 1:
return True
if ((r1 > r2 and c1 > c2) or (r1 < r2 and c1 < c2)):
# falling diagonale
start_r, start_c, length = min(r1, r2), min(c1, c2), abs(r2-r1)
for i in range( length-1):
if self.__field[min(self.get_number_of_rows()-1,1+start_r+i),min(self.get_number_of_columns()-1,1+start_c+i)] == anti_pattern:
return False
return True
else:
# rising diagonale
start_r, start_c, length = max(r1, r2), min(c1, c2), abs(r2-r1)
for i in range( length-1):
if self.__field[min(self.get_number_of_rows()-1,1+start_r+i),min(self.get_number_of_columns()-1,1+start_c+i)] == anti_pattern: # first index out of bounds
return False
return True
return False
"""
def calc_max_available_moves( self, data, pattern, anti_pattern ):
count_max = 1
if len( data) >= 5:
frames = len(data) - 5
for w in range(frames+1):
existing_fields = 0
for w in data[w:w+5]:
if w == anti_pattern:
existing = 6
break
elif w == pattern:
existing_fields +=1
if existing_fields > count_max:
count_max = existing_fields
return count_max
"""
######################################################################
#
# calc_min_required_moves()
#
# For a given data bar (row/column/diag/anti-diag), the function
# determines the minimal number of fields which are
# needed within this bar to build an "all-5" sequence with "pattern".
#
######################################################################
def calc_min_required_moves( self, data, pattern, anti_pattern):
count_min = 6
if len( data) >= 5:
frames = len(data) - 5
for w in range(frames+1):
missing_fields = 0
for w in data[w:w+5]:
if w == anti_pattern:
missing_fields = 6
break
elif w == 0:
missing_fields +=1
if missing_fields < count_min:
count_min = missing_fields
return count_min
######################################################################
#
# calc_min_required_moves2()
#
# For a given data bar (row/column/diag/anti-diag) with an offset
# for the curent field, function determines the minimal number of fields
# which are needed within this bar to build an "all-5" sequence
# with "pattern" (including the field with the offset).
#
######################################################################
def calc_min_required_moves2( self, data, offset, pattern, anti_pattern):
count_min = 6
start_left = max( 0, offset - 4)
end_right = min( offset+4, len(data)-1)
frames = max(0, end_right - start_left - 3)
if frames > 0:
for w in range(frames):
missing_fields = 0
for u in data[start_left + w:start_left + w + 5]:
if u == anti_pattern:
missing_fields = 6
break
elif u == 0:
missing_fields +=1
if missing_fields < count_min:
count_min = missing_fields
return count_min
######################################################################
#
# calc_moves_4_directions()
#
# For a given game field with indices i,j, the function determines
# the minimal number of fields which are needed to build
# an "all-5" sequence with "pattern" in any bar direction (horiz, vert,
# diag, anti-diag).
#
######################################################################
def calc_moves_4_directions( self, pattern, anti_pattern, i, j):
count_min = []
for direction in range(4):
bar, offset = self.return_4plus4_bar_by_direction( direction, i, j)
count_min.append( self.calc_min_required_moves2( bar, offset, pattern, anti_pattern))
return count_min
######################################################################
#
# calc_moves()
#
# For a given game field with indices i,j, the function determines
# the minimal number of fields which are needed to build
# an "all-5" sequence with "pattern" in any bar direction (horiz, vert,
# diag, anti-diag). During the bar assessment the offset of the field
# within the bar is not considered.
#
######################################################################
def calc_moves( self, pattern, anti_pattern, i, j):
bar, offset = self.return_4plus4_bar_by_direction( 0, i, j)
count_min = self.calc_min_required_moves( bar, pattern, anti_pattern)
for direction in range(1,4):
bar, offset = self.return_4plus4_bar_by_direction( direction, i, j)
count_min = min( count_min, self.calc_min_required_moves( bar, pattern, anti_pattern))
return count_min
######################################################################
#
# calc_metric_moves()
#
# Function determines for each unused game field the minimum number
# of moves which are needed to build an "all-5" sequence
# in any bar direction (horiz, vert, diag, anti-diag).
# This calculation is done for both player and opponent and stored
# in an internal matrix self.__metric_moves.
#
# The function returns 4 lists for player and opponent:
# - all game fields with 1 missing element to reach "5"
# - all game fields with 2 missing elements to reach "5"
# - all game fields with 3 missing elements to reach "5"
# - all game fields with 4 missing elements to reach "5"
#
######################################################################
def calc_metric_moves( self, index1, index2 ):
for p in range(2):
pattern, anti_pattern = self.set_pattern_antipattern( p+1)
for i in range( self.__rows):
for j in range( self.__columns):
if self.__field[i,j] == 0:
self.__metric_moves[p,i,j] = self.calc_moves( pattern, anti_pattern, i, j)
else:
self.__metric_moves[p,i,j] = 0
# setup ordered list of possible moves for player and opponent
pos_ply, pos_opp = [], []
for i in range(4):
pos_ply.append(self.get_metric_fields( index1, i+1))
pos_opp.append(self.get_metric_fields( index2, i+1))
return pos_ply, pos_opp
def get_metric_fields( self, player, index):
if player == 1 or player == 2:
if index >= 1 and index < 5:
return np.argwhere( self.__metric_moves[player-1] == index)
else:
print("get_metric_field(). Wrong parameters", player, index)
exit()
"""
def calc_metric_moves2( self ):
for p in range(2):
pattern, anti_pattern = self.set_pattern_antipattern( p+1)
#print(player, pattern, anti_pattern)
for i in range( self.__rows):
for j in range( self.__columns):
if self.__field[i,j] == 0:
self.__metric_moves2[p,i,j] = self.calc_moves( pattern, anti_pattern, i, j)
else:
self.__metric_moves2[p,i,j] = 0
return
def get_metric_fields2( self, player, index):
if player == 1 or player == 2:
if index >= 1 and index < 5:
return np.argwhere( self.__metric_moves2[player-1] == index)
else:
print("get_metric_field(). Wrong parameters", player, index)
exit()
"""
######################################################################
#
# find_5_equals()
#
# For a given data bar, the function determines the maximum count of
# consecutive patterns (when larger or equal to 5) and the offset
# of the element which completes the sequence.
#
# Otherwise it returns 0,0
#
######################################################################
def find_5_equals( self, pattern, data):
count = 0
count_max = 0
m_store = 0
for m in range( len( data)):
if data[m] == pattern:
count +=1
if count >= 5:
count_max = count
m_store = m
else:
count = 0
return count_max, m_store
######################################################################
#
# return_full_bar_by_direction()
#
# For a given game field with indices i,j and a requested direction index
# the function returns the associated bar with the offset of the related
# field in this bar.
#
# The direction indices are:
# 0: horizontal
# 1: vertical
# 2: diagonal (bottom left to upper right)
# 3: anti/inverse diagonal
#
######################################################################
def return_full_bar_by_direction( self, direction, i, j):
if direction == 0: # horizontal
return (self.__field[i]), j
elif direction == 1: # vertical
return (self.__field[:,j]), i
elif direction == 2: # diagonal
return (np.diag(self.__field, j-i)), min(i,j)
elif direction == 3: # inverse diagonal
return (np.diag(self.__field[:,::-1], (self.__columns-1-j) -i)[::-1]), min(self.__rows-1-i, j)
else:
print("ERROR: return_full_bar_by_direction() | |
else:
for i in range(p):
phi.append(B[:, i * q : (i + 1) * q])
if check_stationarity:
M = np.zeros((p * q, p * q))
for i in range(p):
M[0:q, i * q : (i + 1) * q] = phi[i]
for i in range(1, p):
M[i * q : (i + 1) * q, (i - 1) * q : i * q] = np.eye(q, q)
r, v = np.linalg.eig(M)
if np.any(np.abs(r) > 0.999):
raise RuntimeError(
"Error in estimate_var_params_ols: " "nonstationary VAR(p) process"
)
if d == 1:
phi = _compute_differenced_model_params(phi, p, q, 1)
if include_constant_term:
phi.insert(0, c)
phi.append(np.zeros((q, q)))
return phi
def estimate_var_params_ols_localized(
x,
p,
window_radius,
d=0,
include_constant_term=False,
h=0,
lam=0.0,
window="gaussian",
):
r"""Estimate the parameters of a vector autoregressive VAR(p) model
:math:`\mathbf{x}_{k+1,i}=\mathbf{c}_i+\mathbf{\Phi}_{1,i}\mathbf{x}_{k,i}+
\mathbf{\Phi}_{2,i}\mathbf{x}_{k-1,i}+\dots+\mathbf{\Phi}_{p,i}
\mathbf{x}_{k-p,i}+\mathbf{\Phi}_{p+1,i}\mathbf{\epsilon}`
by using ordinary least squares (OLS), where :math:`i` denote spatial
coordinates with arbitrary dimension. If :math:`d\geq 1`, the parameters
are estimated for a d times differenced time series that is integrated back
to the original one by summation of the differences.
Parameters
----------
x : array_like
Array of shape (n, q, :) containing a time series of length n=p+d+h+1
with q-dimensional variables. The remaining dimensions are flattened.
The remaining dimensions starting from the third one represent the
samples.
p : int
The order of the model.
window_radius : float
Radius of the moving window. If window is 'gaussian', window_radius is
the standard deviation of the Gaussian filter. If window is 'uniform',
the size of the window is 2*window_radius+1.
d : {0,1}
The order of differencing to apply to the time series.
include_constant_term : bool
Include the constant term :math:`\mathbf{c}` to the model.
h : int
If h>0, the fitting is done by using a history of length h in addition
to the minimal required number of time steps n=p+d+1.
lam : float
If lam>0, the regression is regularized by adding a penalty term
(i.e. ridge regression).
window : {"gaussian", "uniform"}
The weight function to use for the moving window. Applicable if
window_radius < np.inf. Defaults to 'gaussian'.
Returns
-------
out : list
The estimated parameter matrices :math:`\mathbf{\Phi}_{1,i},
\mathbf{\Phi}_{2,i},\dots,\mathbf{\Phi}_{p+1,i}`. If
include_constant_term is True, the constant term :math:`\mathbf{c}_i` is
added to the beginning of the list. Each element of the list is a matrix
of shape (x.shape[2:], q, q).
Notes
-----
Estimation of the innovation parameter :math:`\mathbf{\Phi}_{p+1}` is not
currently implemented, and it is set to a zero matrix.
"""
q = x.shape[1]
n = x.shape[0]
if n != p + d + h + 1:
raise ValueError(
"n = %d, p = %d, d = %d, h = %d, but n = p+d+h+1 = %d required"
% (n, p, d, h, p + d + h + 1)
)
if d == 1:
x = np.diff(x, axis=0)
n -= d
if window == "gaussian":
convol_filter = ndimage.gaussian_filter
else:
convol_filter = ndimage.uniform_filter
if window == "uniform":
window_size = 2 * window_radius + 1
else:
window_size = window_radius
XZ = np.zeros(np.hstack([[q, p * q], x.shape[2:]]))
for i in range(q):
for k in range(p):
for j in range(q):
for l in range(h + 1):
tmp = convol_filter(
x[p + l, i, :] * x[p - 1 - k + l, j, :],
window_size,
mode="constant",
)
XZ[i, k * q + j, :] += tmp
if include_constant_term:
v = np.zeros(np.hstack([[q], x.shape[2:]]))
for i in range(q):
for j in range(h + 1):
v[i, :] += convol_filter(x[p + j, i, :], window_size, mode="constant")
XZ = np.hstack([v[:, np.newaxis, :], XZ])
if not include_constant_term:
Z2 = np.zeros(np.hstack([[p * q, p * q], x.shape[2:]]))
for i in range(p):
for j in range(q):
for k in range(p):
for l in range(q):
for m in range(h + 1):
tmp = convol_filter(
x[p - 1 - i + m, j, :] * x[p - 1 - k + m, l, :],
window_size,
mode="constant",
)
Z2[i * q + j, k * q + l, :] += tmp
else:
Z2 = np.zeros(np.hstack([[p * q + 1, p * q + 1], x.shape[2:]]))
Z2[0, 0, :] = convol_filter(np.ones(x.shape[2:]), window_size, mode="constant")
for i in range(p):
for j in range(q):
for k in range(h + 1):
tmp = convol_filter(
x[p - 1 - i + k, j, :], window_size, mode="constant"
)
Z2[0, i * q + j + 1, :] += tmp
Z2[i * q + j + 1, 0, :] += tmp
for i in range(p):
for j in range(q):
for k in range(p):
for l in range(q):
for m in range(h + 1):
tmp = convol_filter(
x[p - 1 - i + m, j, :] * x[p - 1 - k + m, l, :],
window_size,
mode="constant",
)
Z2[i * q + j + 1, k * q + l + 1, :] += tmp
m = np.prod(x.shape[2:])
if include_constant_term:
c = np.empty((m, q))
XZ = XZ.reshape((XZ.shape[0], XZ.shape[1], m))
Z2 = Z2.reshape((Z2.shape[0], Z2.shape[1], m))
phi = np.empty((p, m, q, q))
for i in range(m):
try:
B = np.dot(
XZ[:, :, i], np.linalg.inv(Z2[:, :, i] + lam * np.eye(Z2.shape[0]))
)
for k in range(p):
if not include_constant_term:
phi[k, i, :, :] = B[:, k * q : (k + 1) * q]
else:
phi[k, i, :, :] = B[:, k * q + 1 : (k + 1) * q + 1]
if include_constant_term:
c[i, :] = B[:, 0]
except np.linalg.LinAlgError:
phi[:, i, :, :] = np.nan
if include_constant_term:
c[i, :] = np.nan
phi_out = [
phi[i].reshape(np.hstack([x.shape[2:], [q, q]])) for i in range(len(phi))
]
if d == 1:
phi_out = _compute_differenced_model_params(phi_out, p, q, 1)
phi_out.append(np.zeros(phi_out[0].shape))
if include_constant_term:
phi_out.insert(0, c.reshape(np.hstack([x.shape[2:], [q]])))
return phi_out
def estimate_var_params_yw(gamma, d=0, check_stationarity=True):
r"""Estimate the parameters of a VAR(p) model
:math:`\mathbf{x}_{k+1}=\mathbf{\Phi}_1\mathbf{x}_k+
\mathbf{\Phi}_2\mathbf{x}_{k-1}+\dots+\mathbf{\Phi}_p\mathbf{x}_{k-p}+
\mathbf{\Phi}_{p+1}\mathbf{\epsilon}`
from the Yule-Walker equations using the given correlation matrices
:math:`\mathbf{\Gamma}_0,\mathbf{\Gamma}_1,\dots,\mathbf{\Gamma}_n`, where
n=p.
Parameters
----------
gamma : list
List of correlation matrices
:math:`\mathbf{\Gamma}_0,\mathbf{\Gamma}_1,\dots,\mathbf{\Gamma}_n`.
To obtain these matrices, use
:py:func:`pysteps.timeseries.correlation.temporal_autocorrelation_multivariate`
with window_radius=np.inf.
d : {0,1}
The order of differencing. If d=1, the correlation coefficients gamma
are assumed to be computed from the differenced time series, which is
also done for the resulting parameter estimates.
check_stationarity : bool
If True, the stationarity of the resulting VAR(p) process is tested. An
exception is thrown if the process is not stationary.
Returns
-------
out : list
List of VAR(p) coefficient matrices :math:`\mathbf{\Phi}_1,
\mathbf{\Phi}_2,\dots\mathbf{\Phi}_{p+1}`, where the last matrix
corresponds to the innovation term.
Notes
-----
To estimate the parameters of an integrated VARI(p,d) model, compute the
correlation coefficients gamma by calling
:py:func:`pysteps.timeseries.correlation.temporal_autocorrelation_multivariate`
with d>0. Estimation of the innovation parameter :math:`\mathbf{\Phi}_{p+1}`
is not currently implemented, and it is set to a zero matrix.
"""
p = len(gamma) - 1
q = gamma[0].shape[0]
for i in range(len(gamma)):
if gamma[i].shape[0] != q or gamma[i].shape[1] != q:
raise ValueError(
"dimension mismatch: gamma[%d].shape=%s, but (%d,%d) expected"
% (i, str(gamma[i].shape), q, q)
)
if d not in [0, 1]:
raise ValueError("d = %d, but 0 or 1 required" % d)
a = np.empty((p * q, p * q))
for i in range(p):
for j in range(p):
a_tmp = gamma[abs(i - j)]
if i > j:
a_tmp = a_tmp.T
a[i * q : (i + 1) * q, j * q : (j + 1) * q] = a_tmp
b = np.vstack([gamma[i].T for i in range(1, p + 1)])
x = np.linalg.solve(a, b)
phi = []
for i in range(p):
phi.append(x[i * q : (i + 1) * q, :])
if check_stationarity:
if not test_var_stationarity(phi):
raise RuntimeError(
"Error in estimate_var_params_yw: " "nonstationary VAR(p) process"
)
if d == 1:
phi = _compute_differenced_model_params(phi, p, q, 1)
phi.append(np.zeros(phi[0].shape))
return phi
def estimate_var_params_yw_localized(gamma, d=0):
r"""Estimate the parameters of a vector autoregressive VAR(p) model
:math:`\mathbf{x}_{k+1,i}=\mathbf{\Phi}_{1,i}\mathbf{x}_{k,i}+
\mathbf{\Phi}_{2,i}\mathbf{x}_{k-1,i}+\dots+\mathbf{\Phi}_{p,i}
\mathbf{x}_{k-p,i}+\mathbf{\Phi}_{p+1,i}\mathbf{\epsilon}`
from the Yule-Walker equations by using the given correlation matrices,
where :math:`i` denote spatial coordinates with arbitrary dimension.
Parameters
----------
gamma : list
List of correlation matrices
:math:`\mathbf{\Gamma}_0,\mathbf{\Gamma}_1,\dots,\mathbf{\Gamma}_n`.
To obtain these matrices, use
:py:func:`pysteps.timeseries.correlation.temporal_autocorrelation_multivariate`
with window_radius<np.inf.
d : | |
<filename>tencentcloud/vod/v20180717/models.py
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class AIAnalysisTemplateItem(AbstractModel):
"""AI 智能分析模板详情
"""
def __init__(self):
"""
:param Definition: 智能分析模板唯一标识。
:type Definition: int
:param Name: 智能分析模板名称。
:type Name: str
:param Comment: 智能分析模板描述信息。
:type Comment: str
:param ClassificationConfigure: 智能分类任务控制参数。
注意:此字段可能返回 null,表示取不到有效值。
:type ClassificationConfigure: :class:`tencentcloud.vod.v20180717.models.ClassificationConfigureInfo`
:param TagConfigure: 智能标签任务控制参数。
注意:此字段可能返回 null,表示取不到有效值。
:type TagConfigure: :class:`tencentcloud.vod.v20180717.models.TagConfigureInfo`
:param CoverConfigure: 智能封面任务控制参数。
注意:此字段可能返回 null,表示取不到有效值。
:type CoverConfigure: :class:`tencentcloud.vod.v20180717.models.CoverConfigureInfo`
:param FrameTagConfigure: 智能按帧标签任务控制参数。
注意:此字段可能返回 null,表示取不到有效值。
:type FrameTagConfigure: :class:`tencentcloud.vod.v20180717.models.FrameTagConfigureInfo`
:param HighlightConfigure: 智能精彩集锦任务控制参数。
:type HighlightConfigure: :class:`tencentcloud.vod.v20180717.models.HighlightsConfigureInfo`
:param CreateTime: 模板创建时间,使用 [ISO 日期格式](https://cloud.tencent.com/document/product/266/11732#I)。
:type CreateTime: str
:param UpdateTime: 模板最后修改时间,使用 [ISO 日期格式](https://cloud.tencent.com/document/product/266/11732#I)。
:type UpdateTime: str
"""
self.Definition = None
self.Name = None
self.Comment = None
self.ClassificationConfigure = None
self.TagConfigure = None
self.CoverConfigure = None
self.FrameTagConfigure = None
self.HighlightConfigure = None
self.CreateTime = None
self.UpdateTime = None
def _deserialize(self, params):
self.Definition = params.get("Definition")
self.Name = params.get("Name")
self.Comment = params.get("Comment")
if params.get("ClassificationConfigure") is not None:
self.ClassificationConfigure = ClassificationConfigureInfo()
self.ClassificationConfigure._deserialize(params.get("ClassificationConfigure"))
if params.get("TagConfigure") is not None:
self.TagConfigure = TagConfigureInfo()
self.TagConfigure._deserialize(params.get("TagConfigure"))
if params.get("CoverConfigure") is not None:
self.CoverConfigure = CoverConfigureInfo()
self.CoverConfigure._deserialize(params.get("CoverConfigure"))
if params.get("FrameTagConfigure") is not None:
self.FrameTagConfigure = FrameTagConfigureInfo()
self.FrameTagConfigure._deserialize(params.get("FrameTagConfigure"))
if params.get("HighlightConfigure") is not None:
self.HighlightConfigure = HighlightsConfigureInfo()
self.HighlightConfigure._deserialize(params.get("HighlightConfigure"))
self.CreateTime = params.get("CreateTime")
self.UpdateTime = params.get("UpdateTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AIRecognitionTemplateItem(AbstractModel):
"""视频内容识别模板详情
"""
def __init__(self):
"""
:param Definition: 视频内容识别模板唯一标识。
:type Definition: int
:param Name: 视频内容识别模板名称。
:type Name: str
:param Comment: 视频内容识别模板描述信息。
:type Comment: str
:param HeadTailConfigure: 头尾识别控制参数。
注意:此字段可能返回 null,表示取不到有效值。
:type HeadTailConfigure: :class:`tencentcloud.vod.v20180717.models.HeadTailConfigureInfo`
:param SegmentConfigure: 拆条识别控制参数。
注意:此字段可能返回 null,表示取不到有效值。
:type SegmentConfigure: :class:`tencentcloud.vod.v20180717.models.SegmentConfigureInfo`
:param FaceConfigure: 人脸识别控制参数。
注意:此字段可能返回 null,表示取不到有效值。
:type FaceConfigure: :class:`tencentcloud.vod.v20180717.models.FaceConfigureInfo`
:param OcrFullTextConfigure: 文本全文识别控制参数。
注意:此字段可能返回 null,表示取不到有效值。
:type OcrFullTextConfigure: :class:`tencentcloud.vod.v20180717.models.OcrFullTextConfigureInfo`
:param OcrWordsConfigure: 文本关键词识别控制参数。
注意:此字段可能返回 null,表示取不到有效值。
:type OcrWordsConfigure: :class:`tencentcloud.vod.v20180717.models.OcrWordsConfigureInfo`
:param AsrFullTextConfigure: 语音全文识别控制参数。
注意:此字段可能返回 null,表示取不到有效值。
:type AsrFullTextConfigure: :class:`tencentcloud.vod.v20180717.models.AsrFullTextConfigureInfo`
:param AsrWordsConfigure: 语音关键词识别控制参数。
注意:此字段可能返回 null,表示取不到有效值。
:type AsrWordsConfigure: :class:`tencentcloud.vod.v20180717.models.AsrWordsConfigureInfo`
:param ObjectConfigure: 物体识别控制参数。
注意:此字段可能返回 null,表示取不到有效值。
:type ObjectConfigure: :class:`tencentcloud.vod.v20180717.models.ObjectConfigureInfo`
:param ScreenshotInterval: 截图时间间隔,单位:秒。
:type ScreenshotInterval: float
:param CreateTime: 模板创建时间,使用 [ISO 日期格式](https://cloud.tencent.com/document/product/266/11732#I)。
:type CreateTime: str
:param UpdateTime: 模板最后修改时间,使用 [ISO 日期格式](https://cloud.tencent.com/document/product/266/11732#I)。
:type UpdateTime: str
"""
self.Definition = None
self.Name = None
self.Comment = None
self.HeadTailConfigure = None
self.SegmentConfigure = None
self.FaceConfigure = None
self.OcrFullTextConfigure = None
self.OcrWordsConfigure = None
self.AsrFullTextConfigure = None
self.AsrWordsConfigure = None
self.ObjectConfigure = None
self.ScreenshotInterval = None
self.CreateTime = None
self.UpdateTime = None
def _deserialize(self, params):
self.Definition = params.get("Definition")
self.Name = params.get("Name")
self.Comment = params.get("Comment")
if params.get("HeadTailConfigure") is not None:
self.HeadTailConfigure = HeadTailConfigureInfo()
self.HeadTailConfigure._deserialize(params.get("HeadTailConfigure"))
if params.get("SegmentConfigure") is not None:
self.SegmentConfigure = SegmentConfigureInfo()
self.SegmentConfigure._deserialize(params.get("SegmentConfigure"))
if params.get("FaceConfigure") is not None:
self.FaceConfigure = FaceConfigureInfo()
self.FaceConfigure._deserialize(params.get("FaceConfigure"))
if params.get("OcrFullTextConfigure") is not None:
self.OcrFullTextConfigure = OcrFullTextConfigureInfo()
self.OcrFullTextConfigure._deserialize(params.get("OcrFullTextConfigure"))
if params.get("OcrWordsConfigure") is not None:
self.OcrWordsConfigure = OcrWordsConfigureInfo()
self.OcrWordsConfigure._deserialize(params.get("OcrWordsConfigure"))
if params.get("AsrFullTextConfigure") is not None:
self.AsrFullTextConfigure = AsrFullTextConfigureInfo()
self.AsrFullTextConfigure._deserialize(params.get("AsrFullTextConfigure"))
if params.get("AsrWordsConfigure") is not None:
self.AsrWordsConfigure = AsrWordsConfigureInfo()
self.AsrWordsConfigure._deserialize(params.get("AsrWordsConfigure"))
if params.get("ObjectConfigure") is not None:
self.ObjectConfigure = ObjectConfigureInfo()
self.ObjectConfigure._deserialize(params.get("ObjectConfigure"))
self.ScreenshotInterval = params.get("ScreenshotInterval")
self.CreateTime = params.get("CreateTime")
self.UpdateTime = params.get("UpdateTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AccelerateAreaInfo(AbstractModel):
"""域名的地区加速信息
"""
def __init__(self):
"""
:param Area: 加速地区,可选值:
<li>Chinese Mainland:中国境内(不包含港澳台)。</li>
<li>Outside Chinese Mainland:中国境外。</li>
:type Area: str
:param TencentDisableReason: 腾讯禁用原因,可选值:
<li>ForLegalReasons:因法律原因导致关闭加速;</li>
<li>ForOverdueBills:因欠费停服导致关闭加速。</li>
:type TencentDisableReason: str
:param TencentEdgeDomain: 加速域名对应的 CNAME 域名。
:type TencentEdgeDomain: str
"""
self.Area = None
self.TencentDisableReason = None
self.TencentEdgeDomain = None
def _deserialize(self, params):
self.Area = params.get("Area")
self.TencentDisableReason = params.get("TencentDisableReason")
self.TencentEdgeDomain = params.get("TencentEdgeDomain")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AdaptiveDynamicStreamingInfoItem(AbstractModel):
"""转自适应码流信息
"""
def __init__(self):
"""
:param Definition: 转自适应码流规格。
:type Definition: int
:param Package: 打包格式,只能为 HLS。
:type Package: str
:param DrmType: 加密类型。
:type DrmType: str
:param Url: 播放地址。
:type Url: str
"""
self.Definition = None
self.Package = None
self.DrmType = None
self.Url = None
def _deserialize(self, params):
self.Definition = params.get("Definition")
self.Package = params.get("Package")
self.DrmType = params.get("DrmType")
self.Url = params.get("Url")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AdaptiveDynamicStreamingTaskInput(AbstractModel):
"""对视频转自适应码流的输入参数类型
"""
def __init__(self):
"""
:param Definition: 转自适应码流模板 ID。
:type Definition: int
:param WatermarkSet: 水印列表,支持多张图片或文字水印,最大可支持 10 张。
:type WatermarkSet: list of WatermarkInput
:param SubtitleSet: 字幕列表,元素为字幕 ID,支持多个字幕,最大可支持16个。
:type SubtitleSet: list of str
"""
self.Definition = None
self.WatermarkSet = None
self.SubtitleSet = None
def _deserialize(self, params):
self.Definition = params.get("Definition")
if params.get("WatermarkSet") is not None:
self.WatermarkSet = []
for item in params.get("WatermarkSet"):
obj = WatermarkInput()
obj._deserialize(item)
self.WatermarkSet.append(obj)
self.SubtitleSet = params.get("SubtitleSet")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AdaptiveDynamicStreamingTemplate(AbstractModel):
"""转自适应码流模板详情
"""
def __init__(self):
"""
:param Definition: 转自适应码流模板唯一标识。
:type Definition: int
:param Type: 模板类型,取值范围:
<li>Preset:系统预置模板;</li>
<li>Custom:用户自定义模板。</li>
:type Type: str
:param Name: 转自适应码流模板名称。
:type Name: str
:param Comment: 转自适应码流模板描述信息。
:type Comment: str
:param Format: 自适应转码格式,取值范围:
<li>HLS。</li>
:type Format: str
:param DrmType: DRM 类型,取值范围:
<li>FairPlay;</li>
<li>SimpleAES;</li>
<li>Widevine。</li>
如果取值为空字符串,代表不对视频做 DRM 保护。
:type DrmType: str
:param StreamInfos: 自适应转码输入流参数信息,最多输入10路流。
:type StreamInfos: list of AdaptiveStreamTemplate
:param DisableHigherVideoBitrate: 是否禁止视频低码率转高码率,取值范围:
<li>0:否,</li>
<li>1:是。</li>
:type DisableHigherVideoBitrate: int
:param DisableHigherVideoResolution: 是否禁止视频分辨率转高分辨率,取值范围:
<li>0:否,</li>
<li>1:是。</li>
:type DisableHigherVideoResolution: int
:param CreateTime: 模板创建时间,使用 [ISO 日期格式](https://cloud.tencent.com/document/product/266/11732#I)。
:type CreateTime: str
:param UpdateTime: 模板最后修改时间,使用 [ISO 日期格式](https://cloud.tencent.com/document/product/266/11732#I)。
:type UpdateTime: str
"""
self.Definition = None
self.Type = None
self.Name = None
self.Comment = None
self.Format = None
self.DrmType = None
self.StreamInfos = None
self.DisableHigherVideoBitrate = None
self.DisableHigherVideoResolution = None
self.CreateTime = None
self.UpdateTime = None
def _deserialize(self, params):
self.Definition = params.get("Definition")
self.Type = params.get("Type")
self.Name = params.get("Name")
self.Comment = params.get("Comment")
self.Format = params.get("Format")
self.DrmType = params.get("DrmType")
if params.get("StreamInfos") is not None:
self.StreamInfos = []
for item in params.get("StreamInfos"):
obj = AdaptiveStreamTemplate()
obj._deserialize(item)
self.StreamInfos.append(obj)
self.DisableHigherVideoBitrate = params.get("DisableHigherVideoBitrate")
self.DisableHigherVideoResolution = params.get("DisableHigherVideoResolution")
self.CreateTime = params.get("CreateTime")
self.UpdateTime = params.get("UpdateTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AdaptiveStreamTemplate(AbstractModel):
"""自适应转码流参数模板
"""
def __init__(self):
"""
:param Video: 视频参数信息。
:type Video: :class:`tencentcloud.vod.v20180717.models.VideoTemplateInfo`
:param Audio: 音频参数信息。
:type Audio: :class:`tencentcloud.vod.v20180717.models.AudioTemplateInfo`
:param RemoveAudio: 是否移除音频流,取值范围:
<li>0:否,</li>
<li>1:是。</li>
:type RemoveAudio: int
:param RemoveVideo: 是否移除视频流,取值范围:
<li>0:否,</li>
<li>1:是。</li>
:type RemoveVideo: int
"""
self.Video = None
self.Audio = None
self.RemoveAudio = None
self.RemoveVideo = None
def _deserialize(self, params):
if params.get("Video") is not None:
self.Video = VideoTemplateInfo()
self.Video._deserialize(params.get("Video"))
if params.get("Audio") is not None:
self.Audio = AudioTemplateInfo()
self.Audio._deserialize(params.get("Audio"))
self.RemoveAudio = params.get("RemoveAudio")
self.RemoveVideo = params.get("RemoveVideo")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AiAnalysisResult(AbstractModel):
"""智能分析结果
"""
def __init__(self):
"""
:param Type: 任务的类型,可以取的值有:
<li>Classification:智能分类</li>
<li>Cover:智能封面</li>
<li>Tag:智能标签</li>
<li>FrameTag:智能按帧标签</li>
<li>Highlight:智能精彩集锦</li>
:type Type: str
:param ClassificationTask: 视频内容分析智能分类任务的查询结果,当任务类型为 Classification 时有效。
注意:此字段可能返回 null,表示取不到有效值。
:type ClassificationTask: :class:`tencentcloud.vod.v20180717.models.AiAnalysisTaskClassificationResult`
:param CoverTask: 视频内容分析智能封面任务的查询结果,当任务类型为 Cover 时有效。
注意:此字段可能返回 null,表示取不到有效值。
:type CoverTask: :class:`tencentcloud.vod.v20180717.models.AiAnalysisTaskCoverResult`
:param TagTask: 视频内容分析智能标签任务的查询结果,当任务类型为 Tag 时有效。
注意:此字段可能返回 null,表示取不到有效值。
:type TagTask: :class:`tencentcloud.vod.v20180717.models.AiAnalysisTaskTagResult`
:param FrameTagTask: 视频内容分析智能按帧标签任务的查询结果,当任务类型为 FrameTag 时有效。
注意:此字段可能返回 null,表示取不到有效值。
:type FrameTagTask: :class:`tencentcloud.vod.v20180717.models.AiAnalysisTaskFrameTagResult`
:param HighlightTask: 视频内容分析智能精彩集锦任务的查询结果,当任务类型为 Highlight 时有效。
注意:此字段可能返回 null,表示取不到有效值。
:type HighlightTask: :class:`tencentcloud.vod.v20180717.models.AiAnalysisTaskHighlightResult`
"""
self.Type = None
self.ClassificationTask = None
self.CoverTask = None
self.TagTask = None
self.FrameTagTask = None
self.HighlightTask = None
def _deserialize(self, params):
self.Type = params.get("Type")
if params.get("ClassificationTask") is not None:
self.ClassificationTask = AiAnalysisTaskClassificationResult()
self.ClassificationTask._deserialize(params.get("ClassificationTask"))
if params.get("CoverTask") is not None:
self.CoverTask = AiAnalysisTaskCoverResult()
self.CoverTask._deserialize(params.get("CoverTask"))
if params.get("TagTask") is not None:
self.TagTask = AiAnalysisTaskTagResult()
self.TagTask._deserialize(params.get("TagTask"))
if params.get("FrameTagTask") is not None:
self.FrameTagTask = AiAnalysisTaskFrameTagResult()
self.FrameTagTask._deserialize(params.get("FrameTagTask"))
if params.get("HighlightTask") is not None:
self.HighlightTask = AiAnalysisTaskHighlightResult()
self.HighlightTask._deserialize(params.get("HighlightTask"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AiAnalysisTaskClassificationInput(AbstractModel):
"""智能分类任务输入类型
"""
def __init__(self):
"""
:param Definition: 视频智能分类模板 ID。
:type Definition: int
"""
self.Definition = None
def _deserialize(self, params):
self.Definition = params.get("Definition")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds | |
== 'ambient':
if self.dim() < self.ambient_dim():
return self.base_ring().zero()
if not self.is_compact():
from sage.rings.infinity import infinity
return infinity
from sage.functions.other import factorial
return self._volume_normaliz('induced_lattice') / factorial(self.dim())
else:
raise TypeError("the measure should be `ambient`, `euclidean`, or `induced_lattice`")
def _triangulate_normaliz(self):
r"""
Give a triangulation of the polyhedron using normaliz.
OUTPUT:
For compact polyhedra a list of simplices
each represented by indices of their vertices.
For cones a list of simplicial cones
each represented by indices of their rays.
.. NOTE::
This function depends on Normaliz (i.e. the ``pynormaliz`` optional
package). See the Normaliz documentation for further details.
EXAMPLES::
sage: P = Polyhedron(vertices=[[0,0,1],[1,0,1],[0,1,1],[1,1,1]],backend='normaliz') # optional - pynormaliz
sage: P._triangulate_normaliz() # optional - pynormaliz
[(0, 1, 2), (1, 2, 3)]
sage: C1 = Polyhedron(rays=[[0,0,1],[1,0,1],[0,1,1],[1,1,1]],backend='normaliz') # optional - pynormaliz
sage: C1._triangulate_normaliz() # optional - pynormaliz
[(0, 1, 2), (1, 2, 3)]
sage: C2 = Polyhedron(rays=[[1,0,1],[0,0,1],[0,1,1],[1,1,10/9]],backend='normaliz') # optional - pynormaliz
sage: C2._triangulate_normaliz() # optional - pynormaliz
[(0, 1, 2), (1, 2, 3)]
Works only for cones and compact polyhedra::
sage: P = polytopes.cube(backend='normaliz') # optional - pynormaliz
sage: Q = Polyhedron(rays=[[0,1]], backend='normaliz') # optional - pynormaliz
sage: R = Polyhedron(lines=[[0,1]], backend='normaliz') # optional - pynormaliz
sage: (P*Q)._triangulate_normaliz() # optional - pynormaliz
Traceback (most recent call last):
...
NotImplementedError: triangulation of non-compact polyhedra that are not cones is not supported
sage: (P*R)._triangulate_normaliz() # optional - pynormaliz
Traceback (most recent call last):
...
NotImplementedError: triangulation of non-compact not pointed polyhedron is not supported
TESTS:
Check that :trac:`30531` is fixed::
sage: P = polytopes.cube(backend='normaliz')*AA(2).sqrt() # optional - pynormaliz
sage: P._triangulate_normaliz() # optional - pynormaliz
[(0, 1, 2, 4),
(1, 2, 4, 3),
(1, 3, 4, 5),
(3, 5, 6, 7),
(6, 2, 4, 3),
(6, 3, 4, 5)]
::
sage: C1 = Polyhedron(rays=[[0,0,1],[1,0,AA(2).sqrt()],[0,1,1],[1,1,1]], backend='normaliz') # optional - pynormaliz
sage: C1._triangulate_normaliz() # optional - pynormaliz
[(0, 1, 3), (0, 3, 2)]
"""
if self.lines():
raise NotImplementedError("triangulation of non-compact not pointed polyhedron is not supported")
if len(self.vertices_list()) >= 2 and self.rays_list(): # A mix of polytope and cone
raise NotImplementedError("triangulation of non-compact polyhedra that are not cones is not supported")
if self.is_compact():
cone = self._normaliz_cone
else:
# Make a inhomogeneous copy of the cone.
cone = self._cone_from_Vrepresentation_and_Hrepresentation(
self.vertices(), self.rays(), self.lines(),
self.inequalities(), self.equations(), homogeneous=True)
# Compute the triangulation.
assert cone
# Normaliz does not guarantee that the order of generators is kept during
# computation of the triangulation.
# Those are the generators that the indices of the triangulation correspond to:
nmz_triangulation, nmz_triangulation_generators = self._nmz_result(cone, "Triangulation")
base_ring = self.base_ring()
v_list = self.vertices_list()
r_list = self.rays_list()
new_to_old = {}
for i, g in enumerate(nmz_triangulation_generators):
if self.is_compact():
d = base_ring(g[-1])
vertex = [base_ring(x) / d for x in g[:-1]]
new_to_old[i] = v_list.index(vertex)
else:
if g[-1] > 0:
new_to_old[i] = None
else:
try:
new_to_old[i] = r_list.index([base_ring(x) for x in g[:-1]])
except ValueError:
# Rays are only unique up to scaling.
new_ray = vector(base_ring, g[:-1])
for j, r in enumerate(self.rays()):
ray = r.vector()
try:
# Check for colinearity.
_ = new_ray / ray
new_to_old[i] = j
break
except (TypeError, ArithmeticError):
pass
else:
raise ValueError("could not match rays after computing triangulation with original rays")
def new_indices(old_indices):
for i in old_indices:
if new_to_old[i] is not None:
yield new_to_old[i]
return [tuple(new_indices(x[0])) for x in nmz_triangulation]
#########################################################################
class Polyhedron_QQ_normaliz(Polyhedron_normaliz, Polyhedron_QQ):
r"""
Polyhedra over `\QQ` with normaliz.
INPUT:
- ``Vrep`` -- a list ``[vertices, rays, lines]`` or ``None``
- ``Hrep`` -- a list ``[ieqs, eqns]`` or ``None``
EXAMPLES::
sage: p = Polyhedron(vertices=[(0,0),(1,0),(0,1)], # optional - pynormaliz
....: rays=[(1,1)], lines=[],
....: backend='normaliz', base_ring=QQ)
sage: TestSuite(p).run() # optional - pynormaliz
"""
@cached_method(do_pickle=True)
def ehrhart_series(self, variable='t'):
r"""
Return the Ehrhart series of a compact rational polyhedron.
The Ehrhart series is the generating function where the coefficient of
`t^k` is number of integer lattice points inside the `k`-th dilation of
the polytope.
INPUT:
- ``variable`` -- string (default: ``'t'``)
OUTPUT:
A rational function.
EXAMPLES::
sage: S = Polyhedron(vertices=[[0,1],[1,0]], backend='normaliz') # optional - pynormaliz
sage: ES = S.ehrhart_series() # optional - pynormaliz
sage: ES.numerator() # optional - pynormaliz
1
sage: ES.denominator().factor() # optional - pynormaliz
(t - 1)^2
sage: C = Polyhedron(vertices = [[0,0,0],[0,0,1],[0,1,0],[0,1,1],[1,0,0],[1,0,1],[1,1,0],[1,1,1]],backend='normaliz') # optional - pynormaliz
sage: ES = C.ehrhart_series() # optional - pynormaliz
sage: ES.numerator() # optional - pynormaliz
t^2 + 4*t + 1
sage: ES.denominator().factor() # optional - pynormaliz
(t - 1)^4
The following example is from the Normaliz manual contained in the file
``rational.in``::
sage: rat_poly = Polyhedron(vertices=[[1/2,1/2],[-1/3,-1/3],[1/4,-1/2]],backend='normaliz') # optional - pynormaliz
sage: ES = rat_poly.ehrhart_series() # optional - pynormaliz
sage: ES.numerator() # optional - pynormaliz
2*t^6 + 3*t^5 + 4*t^4 + 3*t^3 + t^2 + t + 1
sage: ES.denominator().factor() # optional - pynormaliz
(-1) * (t + 1)^2 * (t - 1)^3 * (t^2 + 1) * (t^2 + t + 1)
The polyhedron should be compact::
sage: C = Polyhedron(backend='normaliz',rays=[[1,2],[2,1]]) # optional - pynormaliz
sage: C.ehrhart_series() # optional - pynormaliz
Traceback (most recent call last):
...
NotImplementedError: Ehrhart series can only be computed for compact polyhedron
.. SEEALSO::
:meth:`~sage.geometry.polyhedron.backend_normaliz.hilbert_series`
TESTS:
Check that the Ehrhart series is pickled::
sage: new_poly = loads(dumps(rat_poly)) # optional - pynormaliz
sage: new_poly.ehrhart_series.is_in_cache() # optional - pynormaliz
True
"""
if self.is_empty():
return 0
if not self.is_compact():
raise NotImplementedError("Ehrhart series can only be computed for compact polyhedron")
cone = self._normaliz_cone
e = self._nmz_result(cone, "EhrhartSeries")
# The output format of PyNormaliz is a list with 3 things:
# 1) the coefficients of the h^*-polynomial
# 2) a list of the exponents e such that (1-t^e) appears as a factor in
# the denominator
# 3) a shifting of the generating function.
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
from sage.rings.fraction_field import FractionField
poly_ring = FractionField(PolynomialRing(ZZ, variable))
t = poly_ring.gens()[0]
es = sum([e[0][i] * t**i for i in range(len(e[0]))])
for expo in range(len(e[1])):
es = es / (1 - t**e[1][expo])
# The shift:
es = es * t**e[2]
return es
def _ehrhart_quasipolynomial_normaliz(self, variable='t'):
r"""
Return the Ehrhart quasipolynomial of a compact rational polyhedron
using Normaliz.
If it is a polynomial, returns the polynomial. Otherwise, returns a
tuple of rational polynomials whose length is the quasi-period of the
quasipolynomial and each rational polynomial describes a residue class.
INPUT:
- ``variable`` -- string (default: ``'t'``)
OUTPUT:
A polynomial or tuple of polynomials.
EXAMPLES::
sage: C = Polyhedron(vertices = [[0,0,0],[0,0,1],[0,1,0],[0,1,1],[1,0,0],[1,0,1],[1,1,0],[1,1,1]],backend='normaliz') # optional - pynormaliz
sage: C._ehrhart_quasipolynomial_normaliz() # optional - pynormaliz
t^3 + 3*t^2 + 3*t + 1
sage: P = Polyhedron(vertices=[[0,0],[3/2,0],[0,3/2],[1,1]],backend='normaliz') # optional - pynormaliz
sage: P._ehrhart_quasipolynomial_normaliz() # optional - pynormaliz
(3/2*t^2 + 2*t + 1, 3/2*t^2 + 2*t + 1/2)
sage: P._ehrhart_quasipolynomial_normaliz('x') # optional - pynormaliz
(3/2*x^2 + 2*x + 1, 3/2*x^2 + 2*x + 1/2)
The quasipolynomial evaluated at ``i`` counts the integral points
in the ``i``-th dilate::
sage: Q = Polyhedron(vertices = [[-1/3],[2/3]],backend='normaliz') # optional - pynormaliz
sage: p0,p1,p2 = Q._ehrhart_quasipolynomial_normaliz() # optional - pynormaliz
sage: r0 = [p0(i) for i in range(15)] # optional - pynormaliz
sage: r1 = [p1(i) for i in range(15)] # optional - pynormaliz
sage: r2 = [p2(i) for i in range(15)] # optional - pynormaliz
sage: result = [None]*15 # optional - pynormaliz
sage: result[::3] = r0[::3] # optional - pynormaliz
sage: result[1::3] = r1[1::3] # optional - pynormaliz
sage: result[2::3] = r2[2::3] # optional - pynormaliz
sage: result == [(i*Q).integral_points_count() for i in range(15)] # optional - pynormaliz
True
.. SEEALSO::
:meth:`~sage.geometry.polyhedron.backend_normaliz.hilbert_series`,
:meth:`~sage.geometry.polyhedron.backend_normaliz.ehrhart_series`
"""
cone = self._normaliz_cone
# Normaliz needs to compute the EhrhartSeries first
assert NmzCompute(cone, ["EhrhartSeries"])
e = self._nmz_result(cone, "EhrhartQuasiPolynomial")
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
poly_ring = PolynomialRing(QQ, variable)
t = poly_ring.gens()[0]
if len(e) == 2:
# It is a polynomial
es = sum([e[0][i] * t**i for i in range(len(e[0]))])
return es / ZZ(e[1])
else:
# It is a quasipolynomial
polynomials = []
for p in e[:-1]:
es = sum([p[i] * t**i for i in range(len(p))]) / ZZ(e[-1])
polynomials += [es]
return tuple(polynomials)
_ehrhart_polynomial_normaliz = _ehrhart_quasipolynomial_normaliz
@cached_method(do_pickle=True, key=lambda self, g, v: (tuple(g), v))
def hilbert_series(self, |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.