input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
import json
from os import path
from os import mkdir
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import numpy as np
from astropy.time import Time
import glob
import matplotlib.cm as cm
def convert_dict_to_nested_type(report):
if type(report) is dict:
for k, v in report.items():
print(k)
convert_dict_to_nested_type(v)
print()
elif type(report) is list:
for el in report:
convert_dict_to_nested_type(el)
else:
print(type(report))
def save_report(report, date):
dir_path = "report_db/"
today = Time(date, format="jd")
current_day = today.iso.split(" ")[0].split("-")
dir_path = dir_path + str(current_day[1]) + "/"
report_name = str(current_day[2]) + ".json"
report_path = dir_path + report_name
# convert_dict_to_nested_type(report)
if path.isdir(dir_path):
with open(report_path, "w") as outfile:
# warning : serialize dictionary with numpy type doesn't work,
# convert values to nested python type
json.dump(report, outfile, indent=4)
else:
mkdir(dir_path)
with open(report_path, "w") as outfile:
json.dump(report, outfile, indent=4)
def parse_intra_night_report(intra_night_report):
if len(intra_night_report) > 0:
nb_sep_assoc = intra_night_report["number of separation association"]
nb_mag_filter = intra_night_report[
"number of association filtered by magnitude"
]
nb_tracklets = intra_night_report["number of intra night tracklets"]
metrics = intra_night_report["association metrics"]
if len(metrics) > 0:
pr = metrics["precision"]
re = metrics["recall"]
tp = metrics["True Positif"]
fp = metrics["False Positif"]
fn = metrics["False Negatif"]
tot = metrics["total real association"]
else:
pr = 100
re = 100
tp = 0
fp = 0
fn = 0
tot = 0
return np.array(
[nb_sep_assoc, nb_mag_filter, nb_tracklets, pr, re, tp, fp, fn, tot]
)
else:
return np.array([0, 0, 0, 100, 100, 0, 0, 0, 0])
def parse_association_report(association_report):
if len(association_report) > 0:
nb_sep_assoc = association_report[
"number of inter night separation based association"
]
nb_mag_filter = association_report[
"number of inter night magnitude filtered association"
]
nb_angle_filter = association_report[
"number of inter night angle filtered association"
]
nb_duplicates = association_report["number of duplicated association"]
metrics = association_report["metrics"]
if len(metrics) > 0:
pr = metrics["precision"]
re = metrics["recall"]
tp = metrics["True Positif"]
fp = metrics["False Positif"]
fn = metrics["False Negatif"]
tot = metrics["total real association"]
if fp == 0 and tot == 0:
pr = 100
re = 100
else:
pr = 100
re = 100
tp = 0
fp = 0
fn = 0
tot = 0
return np.array(
[
nb_sep_assoc,
nb_mag_filter,
nb_angle_filter,
nb_duplicates,
pr,
re,
tp,
fp,
fn,
tot,
]
)
else:
return np.array([0, 0, 0, 0, 100, 100, 0, 0, 0, 0])
def parse_trajectories_report(inter_night_report):
updated_trajectories = inter_night_report["list of updated trajectories"]
all_assoc_report = []
if len(inter_night_report["all nid report"]) > 0:
for report in inter_night_report["all nid report"]:
traj_to_track_report = report["trajectories_to_tracklets_report"]
traj_to_obs_report = report["trajectories_to_new_observation_report"]
all_assoc_report.append(
np.array(
[
parse_association_report(traj_to_track_report),
parse_association_report(traj_to_obs_report),
]
)
)
return updated_trajectories, np.array(all_assoc_report)
def parse_tracklets_obs_report(inter_night_report):
updated_trajectories = inter_night_report["list of updated trajectories"]
all_assoc_report = []
if len(inter_night_report["all nid report"]) > 0:
for report in inter_night_report["all nid report"]:
obs_to_track_report = report["old observation to tracklets report"]
obs_to_obs_report = report["old observation to new observation report"]
all_assoc_report.append(
np.array(
[
parse_association_report(obs_to_track_report),
parse_association_report(obs_to_obs_report),
]
)
)
return updated_trajectories, np.array(all_assoc_report)
def parse_inter_night_report(report):
intra_report = report["intra night report"]
traj_report = report["trajectory association report"]
if "tracklets and observation association report" in report:
track_report = report["tracklets and observation association report"]
parse_track_report = parse_tracklets_obs_report(track_report)
else:
parse_track_report = [], np.array([])
nb_traj = report["nb trajectories"]
nb_most_recent_traj = report["nb most recent traj"]
nb_old_obs = report["nb old observations"]
nb_new_obs = report["nb new observations"]
time = report["computation time of the night"]
parse_intra_report = parse_intra_night_report(intra_report)
parse_traj_report = parse_trajectories_report(traj_report)
return (
parse_intra_report,
parse_traj_report,
parse_track_report,
np.array([nb_traj, time, nb_most_recent_traj, nb_old_obs, nb_new_obs]),
)
def open_and_parse_report(path):
with open(path, "r") as file:
inter_night_report = json.load(file)
return parse_inter_night_report(inter_night_report)
def make_autopct(values):
def my_autopct(pct):
total = sum(values)
val = int(round(pct * total / 100.0))
return "{p:.2f}% ({v:d})".format(p=pct, v=val)
return my_autopct
def plot_report(parse_report):
fig, (ax, ax2) = plt.subplots(2, 1, figsize=(20, 20))
intra_assoc_value = parse_report[0]
traj_assoc_value = parse_report[1]
track_assoc_value = parse_report[2]
intra_values = [intra_assoc_value[1], (intra_assoc_value[0] - intra_assoc_value[1])]
labels = ("magnitude filtering", "remaining associations")
explode = (0.1, 0.0)
ax.pie(
intra_values,
explode=explode,
shadow=True,
labels=labels,
autopct=make_autopct(intra_values),
)
ax.axis("equal")
def transform_data(data):
return np.array(
[data[1], data[2], data[3], (data[0] - data[1] - data[2] - data[3])]
)
if len(traj_assoc_value[1]) > 0:
traj_assoc_value = traj_assoc_value[1].sum(axis=1).sum(axis=0)
traj_assoc_value = transform_data(traj_assoc_value)
else:
traj_assoc_value = np.array([0, 0, 0, 0])
if len(track_assoc_value[1]) > 0:
track_assoc_value = track_assoc_value[1].sum(axis=1).sum(axis=0)
track_assoc_value = transform_data(track_assoc_value)
else:
track_assoc_value = np.array([0, 0, 0, 0])
vals = np.concatenate([[traj_assoc_value], [track_assoc_value]], axis=0)
slop = 0.0001
group_size = vals.sum(axis=1) + slop
subgroup_size = vals.flatten()
subgroup_names = subgroup_size
# Create colors
a, b = [plt.cm.Blues, plt.cm.Reds]
ax2.axis("equal")
mypie, _ = ax2.pie(
group_size, radius=1.3, labels=group_size - slop, colors=[a(0.6), b(0.6)]
)
plt.setp(mypie, width=0.3, edgecolor="white")
# Second Ring (Inside)
mypie2, _ = ax2.pie(
subgroup_size,
radius=1.3 - 0.3,
labels=subgroup_names,
labeldistance=0.7,
colors=[
a(subgroup_size[0] / group_size[0] - slop),
a(subgroup_size[1] / group_size[0] - slop),
a(subgroup_size[2] / group_size[0] - slop),
a(subgroup_size[3] / group_size[0] - slop),
b(subgroup_size[0] / group_size[1] - slop),
b(subgroup_size[1] / group_size[1] - slop),
b(subgroup_size[2] / group_size[1] - slop),
b(subgroup_size[3] / group_size[1] - slop),
],
)
plt.setp(mypie2, width=0.4, edgecolor="white")
ax2.margins(0, 0)
subgroup_names_legs = [
"Trajectory association",
"Tracklets and observation association",
"filtered by magnitude",
"filtered by angle",
"duplicated association",
"remaining association",
"filtered by magnitude",
"filtered by angle",
"duplicated association",
"remaining association",
]
ax2.legend(subgroup_names_legs, loc="best")
ax.set_title("intra night association")
ax2.set_title("inter night association")
plt.show()
def get_intra_night_metrics(parse_report):
intra_night = parse_report[0]
return np.array(intra_night)[3:]
def get_intra_night_associations(parse_report):
intra_night = parse_report[0]
return np.array(intra_night)[:3]
def get_inter_night_metrics(parse_report):
traj_assoc_report = parse_report[1][1]
track_assoc_report = parse_report[2][1]
if len(traj_assoc_report) > 0:
traj_to_tracklets = traj_assoc_report[:, 0, 4:]
traj_to_obs = traj_assoc_report[:, 1, 4:]
else:
traj_to_tracklets = np.array([100, 100, 0, 0, 0, 0])
traj_to_obs = np.array([100, 100, 0, 0, 0, 0])
if len(track_assoc_report) > 0:
old_obs_to_track = track_assoc_report[:, 0, 4:]
old_obs_to_new_obs = track_assoc_report[:, 1, 4:]
else:
old_obs_to_track = np.array([100, 100, 0, 0, 0, 0])
old_obs_to_new_obs = np.array([100, 100, 0, 0, 0, 0])
return traj_to_tracklets, traj_to_obs, old_obs_to_track, old_obs_to_new_obs
def get_inter_night_stat(parse_report):
return parse_report[3]
def get_inter_night_associations(parse_report):
traj_assoc_report = parse_report[1][1]
track_assoc_report = parse_report[2][1]
if len(traj_assoc_report) > 0:
traj_to_tracklets = traj_assoc_report[:, 0, :4]
traj_to_obs = traj_assoc_report[:, 1, :4]
else:
traj_to_tracklets = np.array([0, 0, 0, 0])
traj_to_obs = np.array([0, 0, 0, 0])
if len(track_assoc_report) > 0:
old_obs_to_track = track_assoc_report[:, 0, :4]
old_obs_to_new_obs = track_assoc_report[:, 1, :4]
else:
old_obs_to_track = np.array([0, 0, 0, 0])
old_obs_to_new_obs = np.array([0, 0, 0, 0])
return traj_to_tracklets, traj_to_obs, old_obs_to_track, old_obs_to_new_obs
def mean_metrics_over_nights(metrics):
# test = np.ones(np.shape(metrics), dtype=bool)
# idx = np.where(metrics[:, -1] == 0)
# test[idx] = np.zeros(6, dtype=bool)
return np.mean(metrics, axis=0)
def plot_metrics(fig, metrics, axes, title):
values_idx = np.arange(1, np.shape(metrics[:, :2])[0] + 1)
css_color = mcolors.CSS4_COLORS
axes[0].plot(
values_idx,
np.cumsum(metrics[:, 0]) / values_idx,
label="precision",
color=css_color["crimson"],
)
axes[0].plot(
values_idx,
np.cumsum(metrics[:, 1]) / values_idx,
label="recall",
color=css_color["chocolate"],
)
axes[0].set_title(title)
axes[1].plot(
values_idx,
np.cumsum(metrics[:, 2:-1], axis=0),
alpha=0.8,
label=["True Positif", "False Positif", "False Negatif"],
)
axes[1].plot(
values_idx, np.cumsum(metrics[:, -1]), label="total real association", alpha=0.7
)
axes[1].set_yscale("log")
colors = [
css_color["green"],
css_color["red"],
css_color["royalblue"],
css_color["black"],
]
for i, j in enumerate(axes[1].lines):
j.set_color(colors[i])
lines_labels = [ax.get_legend_handles_labels() for ax in axes]
lines, labels = [sum(lol, []) for lol in zip(*lines_labels)]
fig.legend(lines, labels, loc=(0.5, 0.45), framealpha=0.2)
def plot_intra_assoc(assoc, axes, title):
values_idx = np.arange(1, np.shape(assoc[:, :2])[0] + 1)
axes.plot(
values_idx,
assoc,
label=["separation assoc", "magnitude filter", "detected tracklets"],
)
axes.set_title(title)
axes.legend()
def plot_inter_assoc(assoc, ax, title):
values_idx = np.arange(1, np.shape(assoc[:, :2])[0] + 1)
assoc[:, 1] = assoc[:, 0] - assoc[:, 1]
assoc[:, 2] = assoc[:, 1] - assoc[:, 2]
assoc[:, 3] = np.cumsum(assoc[:, 2] - assoc[:, 3])
ax.plot(
values_idx,
assoc,
label=[
"separation assoc",
"magnitude filter",
"angle filter",
"remain after removing duplicates",
],
)
ax.set_yscale("log")
ax.set_title(title)
def plot_inter_stat(stat, axes, title):
values_idx = np.arange(1, np.shape(stat[:, :2])[0] + 1)
axes[0].plot(values_idx, np.cumsum(stat[:, 1]))
axes[0].set_title("cumulative elapsed time")
axes[1].plot(values_idx, stat[:, 0])
axes[1].set_title("cumulative number of trajectories")
axes[2].plot(
values_idx,
stat[:, 2:],
label=[
"number of most recent trajectories",
"number of old observations",
"number of new observations",
],
)
axes[2].set_title("inputs statistics")
axes[2].legend()
def plot_trajectories(traj_df, mpc_plot):
gb_traj = (
traj_df.groupby(["trajectory_id"])
.agg(
{
"ra": list,
"dec": list,
"dcmag": list,
"fid": list,
"nid": list,
"candid": lambda x: len(x),
}
)
.reset_index()
)
_, (ax1, ax2) = plt.subplots(2, 1, figsize=(40, 40))
colors = cm.jet(np.linspace(0, 1, len(mpc_plot)))
for i, rows in mpc_plot.iterrows():
ra = rows["ra"]
dec = rows["dec"]
ax1.scatter(ra, dec, color=colors[i])
colors = cm.Set1(np.linspace(0, 1, len(gb_traj)))
for i, rows in gb_traj.iterrows():
ra = rows["ra"]
dec = rows["dec"]
ax2.scatter(ra, dec, color=colors[i])
ax1.set_title("real trajectories")
ax2.set_title("detected trajectories")
def load_performance_stat(only_intra_night=False):
all_path_report = sorted(glob.glob("report_db/*/*"))
all_inter_metrics = [[], [], [], []]
all_intra_metrics = []
all_inter_assoc = [[], [], [], []]
all_intra_assoc = []
all_inter_stat = []
with open(all_path_report[0], "r") as file:
intra_night_report = json.load(file)
intra_night_values = | |
<gh_stars>0
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import logging
import numpy as np
from astropy import units as u
from astropy.io.registry import IORegistryError
from astropy.table import Table, vstack
from gammapy.modeling import Dataset, Datasets, Fit, Parameters
from gammapy.modeling.models import (
PowerLawSpectralModel,
ScaleSpectralModel,
SkyModel,
SkyModels,
)
from gammapy.utils.interpolation import interpolate_profile
from gammapy.utils.scripts import make_path
from gammapy.utils.table import table_from_row_data, table_standardise_units_copy
from .dataset import SpectrumDatasetOnOff
__all__ = ["FluxPoints", "FluxPointsEstimator", "FluxPointsDataset"]
log = logging.getLogger(__name__)
REQUIRED_COLUMNS = {
"dnde": ["e_ref", "dnde"],
"e2dnde": ["e_ref", "e2dnde"],
"flux": ["e_min", "e_max", "flux"],
"eflux": ["e_min", "e_max", "eflux"],
# TODO: extend required columns
"likelihood": [
"e_min",
"e_max",
"e_ref",
"ref_dnde",
"norm",
"norm_scan",
"stat_scan",
],
}
OPTIONAL_COLUMNS = {
"dnde": ["dnde_err", "dnde_errp", "dnde_errn", "dnde_ul", "is_ul"],
"e2dnde": ["e2dnde_err", "e2dnde_errp", "e2dnde_errn", "e2dnde_ul", "is_ul"],
"flux": ["flux_err", "flux_errp", "flux_errn", "flux_ul", "is_ul"],
"eflux": ["eflux_err", "eflux_errp", "eflux_errn", "eflux_ul", "is_ul"],
}
DEFAULT_UNIT = {
"dnde": u.Unit("cm-2 s-1 TeV-1"),
"e2dnde": u.Unit("erg cm-2 s-1"),
"flux": u.Unit("cm-2 s-1"),
"eflux": u.Unit("erg cm-2 s-1"),
}
class FluxPoints:
"""Flux points container.
The supported formats are described here: :ref:`gadf:flux-points`
In summary, the following formats and minimum required columns are:
* Format ``dnde``: columns ``e_ref`` and ``dnde``
* Format ``e2dnde``: columns ``e_ref``, ``e2dnde``
* Format ``flux``: columns ``e_min``, ``e_max``, ``flux``
* Format ``eflux``: columns ``e_min``, ``e_max``, ``eflux``
Parameters
----------
table : `~astropy.table.Table`
Table with flux point data
Attributes
----------
table : `~astropy.table.Table`
Table with flux point data
Examples
--------
The `FluxPoints` object is most easily created by reading a file with
flux points given in one of the formats documented above::
from gammapy.spectrum import FluxPoints
filename = '$GAMMAPY_DATA/tests/spectrum/flux_points/flux_points.fits'
flux_points = FluxPoints.read(filename)
flux_points.plot()
An instance of `FluxPoints` can also be created by passing an instance of
`astropy.table.Table`, which contains the required columns, such as `'e_ref'`
and `'dnde'`. The corresponding `sed_type` has to be defined in the meta data
of the table::
from astropy import units as u
from astropy.table import Table
from gammapy.spectrum import FluxPoints
from gammapy.modeling.models import PowerLawSpectralModel
table = Table()
pwl = PowerLawSpectralModel()
e_ref = np.logspace(0, 2, 7) * u.TeV
table['e_ref'] = e_ref
table['dnde'] = pwl(e_ref)
table.meta['SED_TYPE'] = 'dnde'
flux_points = FluxPoints(table)
flux_points.plot()
If you have flux points in a different data format, the format can be changed
by renaming the table columns and adding meta data::
from astropy import units as u
from astropy.table import Table
from gammapy.spectrum import FluxPoints
table = Table.read('$GAMMAPY_DATA/tests/spectrum/flux_points/flux_points_ctb_37b.txt',
format='ascii.csv', delimiter=' ', comment='#')
table.meta['SED_TYPE'] = 'dnde'
table.rename_column('Differential_Flux', 'dnde')
table['dnde'].unit = 'cm-2 s-1 TeV-1'
table.rename_column('lower_error', 'dnde_errn')
table['dnde_errn'].unit = 'cm-2 s-1 TeV-1'
table.rename_column('upper_error', 'dnde_errp')
table['dnde_errp'].unit = 'cm-2 s-1 TeV-1'
table.rename_column('E', 'e_ref')
table['e_ref'].unit = 'TeV'
flux_points = FluxPoints(table)
flux_points.plot()
"""
def __init__(self, table):
self.table = table_standardise_units_copy(table)
# validate that the table is a valid representation
# of the given flux point sed type
self._validate_table(self.table, table.meta["SED_TYPE"])
def __repr__(self):
return f"{self.__class__.__name__}(sed_type={self.sed_type!r}, n_points={len(self.table)})"
@property
def table_formatted(self):
"""Return formatted version of the flux points table. Used for pretty printing"""
table = self.table.copy()
for column in table.colnames:
if column.startswith(("dnde", "eflux", "flux", "e2dnde", "ref")):
table[column].format = ".3e"
elif column.startswith(
("e_min", "e_max", "e_ref", "sqrt_ts", "norm", "ts", "stat")
):
table[column].format = ".3f"
return table
@classmethod
def read(cls, filename, **kwargs):
"""Read flux points.
Parameters
----------
filename : str
Filename
kwargs : dict
Keyword arguments passed to `astropy.table.Table.read`.
"""
filename = make_path(filename)
try:
table = Table.read(filename, **kwargs)
except IORegistryError:
kwargs.setdefault("format", "ascii.ecsv")
table = Table.read(filename, **kwargs)
if "SED_TYPE" not in table.meta.keys():
sed_type = cls._guess_sed_type(table)
table.meta["SED_TYPE"] = sed_type
# TODO: check sign and factor 2 here
# https://github.com/gammapy/gammapy/pull/2546#issuecomment-554274318
# The idea below is to support the format here:
# https://gamma-astro-data-formats.readthedocs.io/en/latest/spectra/flux_points/index.html#likelihood-columns
# but internally to go to the uniform "stat"
if "loglike" in table.colnames and "stat" not in table.colnames:
table["stat"] = 2 * table["loglike"]
if "loglike_null" in table.colnames and "stat_null" not in table.colnames:
table["stat_null"] = 2 * table["loglike_null"]
if "dloglike_scan" in table.colnames and "stat_scan" not in table.colnames:
table["stat_scan"] = 2 * table["dloglike_scan"]
return cls(table=table)
def write(self, filename, **kwargs):
"""Write flux points.
Parameters
----------
filename : str
Filename
kwargs : dict
Keyword arguments passed to `astropy.table.Table.write`.
"""
filename = make_path(filename)
try:
self.table.write(filename, **kwargs)
except IORegistryError:
kwargs.setdefault("format", "ascii.ecsv")
self.table.write(filename, **kwargs)
@classmethod
def stack(cls, flux_points):
"""Create flux points by stacking list of flux points.
The first `FluxPoints` object in the list is taken as a reference to infer
column names and units for the stacked object.
Parameters
----------
flux_points : list of `FluxPoints`
List of flux points to stack.
Returns
-------
flux_points : `FluxPoints`
Flux points without upper limit points.
"""
reference = flux_points[0].table
tables = []
for _ in flux_points:
table = _.table
for colname in reference.colnames:
column = reference[colname]
if column.unit:
table[colname] = table[colname].quantity.to(column.unit)
tables.append(table[reference.colnames])
table_stacked = vstack(tables)
table_stacked.meta["SED_TYPE"] = reference.meta["SED_TYPE"]
return cls(table_stacked)
def drop_ul(self):
"""Drop upper limit flux points.
Returns
-------
flux_points : `FluxPoints`
Flux points with upper limit points removed.
Examples
--------
>>> from gammapy.spectrum import FluxPoints
>>> filename = '$GAMMAPY_DATA/tests/spectrum/flux_points/flux_points.fits'
>>> flux_points = FluxPoints.read(filename)
>>> print(flux_points)
FluxPoints(sed_type="flux", n_points=24)
>>> print(flux_points.drop_ul())
FluxPoints(sed_type="flux", n_points=19)
"""
table_drop_ul = self.table[~self.is_ul]
return self.__class__(table_drop_ul)
def _flux_to_dnde(self, e_ref, table, model, pwl_approx):
if model is None:
model = PowerLawSpectralModel()
e_min, e_max = self.e_min, self.e_max
flux = table["flux"].quantity
dnde = self._dnde_from_flux(flux, model, e_ref, e_min, e_max, pwl_approx)
# Add to result table
table["e_ref"] = e_ref
table["dnde"] = dnde
if "flux_err" in table.colnames:
table["dnde_err"] = dnde * table["flux_err"].quantity / flux
if "flux_errn" in table.colnames:
table["dnde_errn"] = dnde * table["flux_errn"].quantity / flux
table["dnde_errp"] = dnde * table["flux_errp"].quantity / flux
if "flux_ul" in table.colnames:
flux_ul = table["flux_ul"].quantity
dnde_ul = self._dnde_from_flux(
flux_ul, model, e_ref, e_min, e_max, pwl_approx
)
table["dnde_ul"] = dnde_ul
return table
@staticmethod
def _dnde_to_e2dnde(e_ref, table):
for suffix in ["", "_ul", "_err", "_errp", "_errn"]:
try:
data = table["dnde" + suffix].quantity
table["e2dnde" + suffix] = (e_ref ** 2 * data).to(
DEFAULT_UNIT["e2dnde"]
)
except KeyError:
continue
return table
@staticmethod
def _e2dnde_to_dnde(e_ref, table):
for suffix in ["", "_ul", "_err", "_errp", "_errn"]:
try:
data = table["e2dnde" + suffix].quantity
table["dnde" + suffix] = (data / e_ref ** 2).to(DEFAULT_UNIT["dnde"])
except KeyError:
continue
return table
def to_sed_type(self, sed_type, method="log_center", model=None, pwl_approx=False):
"""Convert to a different SED type (return new `FluxPoints`).
See: https://ui.adsabs.harvard.edu/abs/1995NIMPA.355..541L for details
on the `'lafferty'` method.
Parameters
----------
sed_type : {'dnde'}
SED type to convert to.
model : `~gammapy.modeling.models.SpectralModel`
Spectral model assumption. Note that the value of the amplitude parameter
does not matter. Still it is recommended to use something with the right
scale and units. E.g. `amplitude = 1e-12 * u.Unit('cm-2 s-1 TeV-1')`
method : {'lafferty', 'log_center', 'table'}
Flux points `e_ref` estimation method:
* `'laferty'` Lafferty & Wyatt model-based e_ref
* `'log_center'` log bin center e_ref
* `'table'` using column 'e_ref' from input flux_points
pwl_approx : bool
Use local power law appoximation at e_ref to compute differential flux
from the integral flux. This method is used by the Fermi-LAT catalogs.
Returns
-------
flux_points : `FluxPoints`
Flux points including differential quantity columns `dnde`
and `dnde_err` (optional), `dnde_ul` (optional).
Examples
--------
>>> from gammapy.spectrum import FluxPoints
>>> from gammapy.modeling.models import PowerLawSpectralModel
>>> filename = '$GAMMAPY_DATA/tests/spectrum/flux_points/flux_points.fits'
>>> flux_points = FluxPoints.read(filename)
>>> model = PowerLawSpectralModel(index=2.2)
>>> flux_points_dnde = flux_points.to_sed_type('dnde', model=model)
"""
# TODO: implement other directions.
table = self.table.copy()
if self.sed_type == "flux" and sed_type == "dnde":
# Compute e_ref
if method == "table":
e_ref = table["e_ref"].quantity
elif method == "log_center":
e_ref = np.sqrt(self.e_min * self.e_max)
elif method == "lafferty":
# set e_ref that it represents the mean dnde in the given energy bin
e_ref = self._e_ref_lafferty(model, self.e_min, self.e_max)
else:
raise ValueError(f"Invalid method: {method}")
table = self._flux_to_dnde(e_ref, table, model, pwl_approx)
elif self.sed_type == "dnde" and sed_type == "e2dnde":
table = self._dnde_to_e2dnde(self.e_ref, table)
elif self.sed_type == "e2dnde" and sed_type == "dnde":
table = self._e2dnde_to_dnde(self.e_ref, table)
elif self.sed_type == "likelihood" and sed_type in ["dnde", "flux", "eflux"]:
for suffix in ["", "_ul", "_err", "_errp", "_errn"]:
try:
table[sed_type + suffix] = (
table["ref_" + sed_type] * table["norm" + suffix]
)
except KeyError:
continue
else:
raise NotImplementedError
table.meta["SED_TYPE"] = sed_type
return FluxPoints(table)
@staticmethod
def _e_ref_lafferty(model, e_min, e_max):
"""Helper for `to_sed_type`.
Compute e_ref that the value at e_ref corresponds
to the mean value between e_min and e_max.
"""
flux = model.integral(e_min, e_max)
dnde_mean = flux / (e_max - e_min)
return model.inverse(dnde_mean)
| |
<filename>htm_rl/htm_rl/modules/htm/temporal_memory.py<gh_stars>1-10
from functools import reduce
from htm_rl.modules.htm.connections import Connections
from htm.bindings.sdr import SDR
from htm.advanced.support.numpy_helpers import setCompare, argmaxMulti, getAllCellsInColumns
import numpy as np
from htm.bindings.math import Random
from math import exp
EPS = 1e-12
UINT_DTYPE = "uint32"
REAL_DTYPE = "float32"
REAL64_DTYPE = "float64"
_TIE_BREAKER_FACTOR = 0.000001
class GeneralFeedbackTM:
def __init__(self,
columns,
cells_per_column,
context_cells,
feedback_cells,
activation_threshold_basal,
learning_threshold_basal,
activation_threshold_apical,
learning_threshold_apical,
connected_threshold_basal=0.5,
permanence_increment_basal=0.1,
permanence_decrement_basal=0.01,
initial_permanence_basal=0.4,
predicted_segment_decrement_basal=0.001,
sample_size_basal=-1,
max_synapses_per_segment_basal=-1,
max_segments_per_cell_basal=255,
connected_threshold_apical=0.5,
permanence_increment_apical=0.1,
permanence_decrement_apical=0.01,
initial_permanence_apical=0.4,
predicted_segment_decrement_apical=0.001,
sample_size_apical=-1,
max_synapses_per_segment_apical=-1,
max_segments_per_cell_apical=255,
prune_zero_synapses=True,
timeseries=False,
anomaly_window=1000,
confidence_window=1000,
noise_tolerance=0.0,
sm_ac=0,
seed=None,
):
self.columns = columns
self.cells_per_column = cells_per_column
self.local_cells = columns * cells_per_column
self.context_cells = context_cells
self.feedback_cells = feedback_cells
self.activation_threshold_basal = activation_threshold_basal
self.learning_threshold_basal = learning_threshold_basal
self.activation_threshold_apical = activation_threshold_apical
self.learning_threshold_apical = learning_threshold_apical
self.connected_threshold_basal = connected_threshold_basal
self.permanence_increment_basal = permanence_increment_basal
self.permanence_decrement_basal = permanence_decrement_basal
self.initial_permanence_basal = initial_permanence_basal
self.predicted_segment_decrement_basal = predicted_segment_decrement_basal
self.sample_size_basal = sample_size_basal
self.max_synapses_per_segment_basal = max_synapses_per_segment_basal
self.max_segments_per_cell_basal = max_segments_per_cell_basal
self.connected_threshold_apical = connected_threshold_apical
self.permanence_increment_apical = permanence_increment_apical
self.permanence_decrement_apical = permanence_decrement_apical
self.initial_permanence_apical = initial_permanence_apical
self.predicted_segment_decrement_apical = predicted_segment_decrement_apical
self.sample_size_apical = sample_size_apical
self.max_synapses_per_segment_apical = max_synapses_per_segment_apical
self.max_segments_per_cell_apical = max_segments_per_cell_apical
self.timeseries = timeseries
self.prune_zero_synapses = prune_zero_synapses
self.sm_ac = sm_ac
self.noise_tolerance = noise_tolerance
self.total_cells = self.local_cells + self.context_cells + self.feedback_cells
self.local_range = (0, self.local_cells)
self.context_range = (self.local_range[1], self.local_range[1] + self.context_cells)
self.feedback_range = (self.context_range[1], self.context_range[1] + self.feedback_cells)
self.basal_connections = Connections(numCells=self.total_cells,
connectedThreshold=self.connected_threshold_basal,
timeseries=self.timeseries)
self.apical_connections = Connections(numCells=self.total_cells,
connectedThreshold=self.connected_threshold_apical,
timeseries=self.timeseries)
self.active_cells = SDR(self.total_cells)
self.winner_cells = SDR(self.total_cells)
self.predicted_cells = SDR(self.total_cells)
self.active_columns = SDR(self.columns)
self.predicted_columns = SDR(self.columns)
self.correct_predicted_cells = SDR(self.total_cells)
self.active_cells_context = SDR(self.total_cells)
self.active_cells_feedback = SDR(self.total_cells)
self.predictive_cells_basal = np.empty(0)
self.active_segments_basal = np.empty(0)
self.matching_segments_basal = np.empty(0)
self.num_potential_basal = np.empty(0)
self.predictive_cells_apical = np.empty(0)
self.active_segments_apical = np.empty(0)
self.matching_segments_apical = np.empty(0)
self.num_potential_apical = np.empty(0)
self.anomaly_window = anomaly_window
self.confidence_window = confidence_window
self.anomaly = [0.0 for _ in range(self.anomaly_window)]
self.confidence = [0.0 for _ in range(self.confidence_window)]
self.anomaly_threshold = 0
self.confidence_threshold = 0
self.mean_active_columns = 0
if seed:
self.rng = Random(seed)
else:
self.rng = Random()
def reset(self):
self.active_cells = SDR(self.total_cells)
self.winner_cells = SDR(self.total_cells)
self.predicted_cells = SDR(self.total_cells)
self.active_columns = SDR(self.columns)
self.predicted_columns = SDR(self.columns)
self.active_cells_context = SDR(self.total_cells)
self.active_cells_feedback = SDR(self.total_cells)
self.predictive_cells_basal = np.empty(0)
self.active_segments_basal = np.empty(0)
self.matching_segments_basal = np.empty(0)
self.num_potential_basal = np.empty(0)
self.predictive_cells_apical = np.empty(0)
self.active_segments_apical = np.empty(0)
self.matching_segments_apical = np.empty(0)
self.num_potential_apical = np.empty(0)
# input
def set_active_columns(self, columns_id):
self.active_columns.sparse = np.array(columns_id)
def set_active_context_cells(self, cells_id):
self.active_cells_context.sparse = np.array(cells_id) + self.context_range[0]
def set_active_feedback_cells(self, cells_id):
self.active_cells_feedback.sparse = np.array(cells_id) + self.feedback_range[0]
# output
def get_active_columns(self):
return np.copy(self.active_columns.sparse)
def get_predicted_columns(self):
return self.predicted_columns.sparse
def get_active_cells(self):
return self.active_cells.sparse - self.local_range[0]
def get_winner_cells(self):
return self.winner_cells.sparse - self.local_range[0]
def get_correctly_predicted_cells(self):
return self.correct_predicted_cells.sparse - self.local_range[0]
# processing
def activate_basal_dendrites(self, learn):
self.active_segments_basal, self.matching_segments_basal, self.predictive_cells_basal, self.num_potential_basal = self._activate_dendrites(
self.basal_connections, self.active_cells_context, self.activation_threshold_basal,
self.learning_threshold_basal, learn
)
def activate_apical_dendrites(self, learn):
self.active_segments_apical, self.matching_segments_apical, self.predictive_cells_apical, self.num_potential_apical = self._activate_dendrites(
self.apical_connections, self.active_cells_feedback, self.activation_threshold_apical,
self.learning_threshold_apical, learn
)
def predict_cells(self):
"""
Calculates predicted cells. Should be called after dendrite activations.
:return:
"""
# basal and apical coincidence predict first
predicted_cells = np.intersect1d(self.predictive_cells_basal, self.predictive_cells_apical)
# if there is no coincidence, predict all possible cases
if predicted_cells.size == 0:
predicted_cells = self.predictive_cells_basal
self.predicted_cells.sparse = predicted_cells.astype('uint32')
self.predicted_columns.sparse = np.unique(self._columns_for_cells(self.predicted_cells.sparse))
confidence = min(len(self.predicted_cells.sparse) / (self.mean_active_columns + EPS), 1.0)
self.confidence_threshold = self.confidence_threshold + (
confidence - self.confidence[0]) / self.confidence_window
self.confidence.append(confidence)
self.confidence.pop(0)
def activate_cells(self, learn: bool):
"""
Calculates new active cells and performs connections' learning.
:param learn: if true, connections will learn patterns from previous step
:return:
"""
# Calculate active cells
correct_predicted_cells, bursting_columns = setCompare(self.predicted_cells.sparse, self.active_columns.sparse,
aKey=self._columns_for_cells(
self.predicted_cells.sparse),
rightMinusLeft=True)
self.correct_predicted_cells.sparse = correct_predicted_cells
new_active_cells = np.concatenate((correct_predicted_cells,
getAllCellsInColumns(bursting_columns,
self.cells_per_column) + self.local_range[0]))
(learning_active_basal_segments,
learning_matching_basal_segments,
learning_matching_apical_segments,
cells_to_grow_apical_segments,
basal_segments_to_punish,
apical_segments_to_punish,
cells_to_grow_apical_and_basal_segments,
new_winner_cells) = self._calculate_learning(bursting_columns, correct_predicted_cells)
# Learn
if learn:
# Learn on existing segments
if self.active_cells_context.sparse.size > 0:
for learning_segments in (learning_active_basal_segments, learning_matching_basal_segments):
self._learn(self.basal_connections, learning_segments, self.active_cells_context,
self.active_cells_context.sparse,
self.num_potential_basal, self.sample_size_basal, self.max_synapses_per_segment_basal,
self.initial_permanence_basal, self.permanence_increment_basal,
self.permanence_decrement_basal,
self.learning_threshold_basal)
if self.active_cells_feedback.sparse.size > 0:
self._learn(self.apical_connections, learning_matching_apical_segments, self.active_cells_feedback,
self.active_cells_feedback.sparse,
self.num_potential_apical, self.sample_size_apical, self.max_synapses_per_segment_apical,
self.initial_permanence_apical, self.permanence_increment_apical,
self.permanence_decrement_apical,
self.learning_threshold_apical)
# Punish incorrect predictions
if self.predicted_segment_decrement_basal != 0.0:
if self.active_cells_context.sparse.size > 0:
for segment in basal_segments_to_punish:
self.basal_connections.adaptSegment(segment, self.active_cells_context,
-self.predicted_segment_decrement_basal, 0.0,
self.prune_zero_synapses, self.learning_threshold_basal)
if self.active_cells_feedback.sparse.size > 0:
for segment in apical_segments_to_punish:
self.apical_connections.adaptSegment(segment, self.active_cells_feedback,
-self.predicted_segment_decrement_apical, 0.0,
self.prune_zero_synapses, self.learning_threshold_apical)
# Grow new segments
if self.active_cells_context.sparse.size > 0:
self._learn_on_new_segments(self.basal_connections,
cells_to_grow_apical_and_basal_segments,
self.active_cells_context.sparse,
self.sample_size_basal, self.max_synapses_per_segment_basal,
self.initial_permanence_basal,
self.max_segments_per_cell_basal)
if self.active_cells_feedback.sparse.size > 0:
self._learn_on_new_segments(self.apical_connections,
np.concatenate((cells_to_grow_apical_segments,
cells_to_grow_apical_and_basal_segments)),
self.active_cells_feedback.sparse,
self.sample_size_apical, self.max_synapses_per_segment_apical,
self.initial_permanence_apical,
self.max_segments_per_cell_apical)
self.active_cells.sparse = np.unique(new_active_cells.astype('uint32'))
self.winner_cells.sparse = np.unique(new_winner_cells)
n_active_columns = self.active_columns.sparse.size
self.mean_active_columns = self.sm_ac * self.mean_active_columns + (
1 - self.sm_ac) * n_active_columns
if n_active_columns != 0:
anomaly = len(bursting_columns) / n_active_columns
else:
anomaly = 1.0
self.anomaly_threshold = self.anomaly_threshold + (anomaly - self.anomaly[0]) / self.anomaly_window
self.anomaly.append(anomaly)
self.anomaly.pop(0)
def _learn(self, connections, learning_segments, active_cells, winner_cells, num_potential, sample_size,
max_synapses_per_segment,
initial_permanence, permanence_increment, permanence_decrement, segmentThreshold):
"""
Learn on specified segments
:param connections: exemplar of Connections class
:param learning_segments: list of segments' id
:param active_cells: list of active cells' id
:param winner_cells: list of winner cells' id (cells to which connections will be grown)
:param num_potential: list of counts of potential synapses for every segment
:return:
"""
for segment in learning_segments:
connections.adaptSegment(segment, active_cells, permanence_increment, permanence_decrement,
self.prune_zero_synapses, segmentThreshold)
if sample_size == -1:
max_new = len(winner_cells)
else:
max_new = sample_size - num_potential[segment]
if max_synapses_per_segment != -1:
synapse_counts = connections.numSynapses(segment)
num_synapses_to_reach_max = max_synapses_per_segment - synapse_counts
max_new = min(max_new, num_synapses_to_reach_max)
if max_new > 0:
connections.growSynapses(segment, winner_cells, initial_permanence, self.rng, max_new)
def _learn_on_new_segments(self, connections: Connections, new_segment_cells, growth_candidates, sample_size,
max_synapses_per_segment,
initial_permanence, max_segments_per_cell):
"""
Grows new segments and learn on them
:param connections:
:param new_segment_cells: cells' id to grow new segments on
:param growth_candidates: cells' id to grow synapses to
:return:
"""
num_new_synapses = len(growth_candidates)
if sample_size != -1:
num_new_synapses = min(num_new_synapses, sample_size)
if max_synapses_per_segment != -1:
num_new_synapses = min(num_new_synapses, max_synapses_per_segment)
for cell in new_segment_cells:
new_segment = connections.createSegment(cell, max_segments_per_cell)
connections.growSynapses(new_segment, growth_candidates, initial_permanence, self.rng,
maxNew=num_new_synapses)
def _calculate_learning(self, bursting_columns, correct_predicted_cells):
"""
Calculates which segments to train and where to grow new segments
:param bursting_columns: numpy array of columns' id
:param correct_predicted_cells: numpy array of cells' id
:return:
"""
# Correctly predicted columns
# choose active segments for correctly predicted cells
learning_active_basal_segments = self.basal_connections.filterSegmentsByCell(self.active_segments_basal,
correct_predicted_cells)
# choose all matching apical segments for correctly predicted segments
# if there is no matching segment, we should grow an apical segment on this cell
learning_matching_apical_segments, cells_to_grow_apical_segments = setCompare(self.matching_segments_apical,
correct_predicted_cells,
aKey=self.apical_connections.mapSegmentsToCells(
self.matching_segments_apical),
rightMinusLeft=True)
# narrow apical segments to the best one per correctly predicted cell
learning_matching_apical_segments = self._choose_best_segment_per_cell(self.apical_connections,
correct_predicted_cells,
learning_matching_apical_segments,
self.num_potential_apical)
# all cells with matching segments
cells_for_matching_basal = self.basal_connections.mapSegmentsToCells(self.matching_segments_basal)
cells_for_matching_apical = self.apical_connections.mapSegmentsToCells(self.matching_segments_apical)
matching_cells = np.unique(cells_for_matching_basal)
matching_cells_in_bursting_columns, bursting_columns_with_no_match = setCompare(matching_cells,
bursting_columns,
aKey=self._columns_for_cells(
matching_cells),
rightMinusLeft=True)
# choose the best segment per cell
if matching_cells_in_bursting_columns.size > 0:
(learning_matching_basal_segments,
learning_matching_apical_segments2,
cells_to_grow_apical_segments2
) = self._choose_best_segment_per_column(
matching_cells_in_bursting_columns)
else:
learning_matching_basal_segments = np.empty(0, dtype=np.int32)
learning_matching_apical_segments2 = np.empty(0, dtype=np.int32)
cells_to_grow_apical_segments2 = np.empty(0, dtype=np.int32)
# cells on which new apical and basal segments will be grown
if bursting_columns_with_no_match.size > 0:
cells_to_grow_apical_and_basal_segments = self._get_cells_with_fewest_segments(self.basal_connections,
self.apical_connections,
bursting_columns_with_no_match)
else:
cells_to_grow_apical_and_basal_segments = np.empty(0, dtype=UINT_DTYPE)
# compile all segments and cells together
cells_to_grow_apical_segments = np.concatenate([cells_to_grow_apical_segments, cells_to_grow_apical_segments2])
learning_matching_apical_segments = np.concatenate(
[learning_matching_apical_segments, learning_matching_apical_segments2])
winner_cells = np.concatenate(
(correct_predicted_cells,
self.basal_connections.mapSegmentsToCells(learning_matching_basal_segments),
cells_to_grow_apical_and_basal_segments)
)
# Incorrectly predicted columns
incorrect_matching_basal_mask = np.isin(self._columns_for_cells(cells_for_matching_basal),
self.active_columns.sparse, invert=True)
incorrect_matching_apical_mask = np.isin(self._columns_for_cells(cells_for_matching_apical),
self.active_columns.sparse, invert=True)
basal_segments_to_punish = self.matching_segments_basal[incorrect_matching_basal_mask]
apical_segments_to_punish = self.matching_segments_apical[incorrect_matching_apical_mask]
return (learning_active_basal_segments.astype('uint32'),
learning_matching_basal_segments.astype('uint32'),
learning_matching_apical_segments.astype('uint32'),
cells_to_grow_apical_segments.astype('uint32'),
basal_segments_to_punish.astype('uint32'),
apical_segments_to_punish.astype('uint32'),
cells_to_grow_apical_and_basal_segments.astype('uint32'),
winner_cells.astype('uint32'))
def _choose_best_segment_per_column(self, cells):
"""
Chooses best matching segment per column among the cells, using apical tie breaking.
:param cells: numpy array of cells' id
:return:
"""
candidate_basal_segments = self.basal_connections.filterSegmentsByCell(self.matching_segments_basal, cells)
candidate_apical_segments = self._choose_best_segment_per_cell(self.apical_connections, cells,
self.matching_segments_apical,
self.num_potential_apical)
intersection_mask = np.in1d(self.basal_connections.mapSegmentsToCells(candidate_basal_segments),
self.apical_connections.mapSegmentsToCells(candidate_apical_segments))
candidate_basal_with_apical_neighbour = candidate_basal_segments[intersection_mask]
# for segment, that have no adjacent apical segment the score is zero, else score is sigmoid(best_apical_segment) - 0.5
cells_for_apical_segments = self.apical_connections.mapSegmentsToCells(candidate_apical_segments)
cells_for_basal_segments = self.basal_connections.mapSegmentsToCells(candidate_basal_with_apical_neighbour)
tiebreaker = np.zeros_like(candidate_basal_segments)
# WARNING, lazy realization of tiebreaking! May be slow!
# TODO make optimized tiebreaking
tiebreaker[intersection_mask] = np.array(
[exp(self.num_potential_apical[candidate_apical_segments[cells_for_apical_segments == x]].sum()) for x
in cells_for_basal_segments]
)
#
one_per_column_filter = argmaxMulti(
self.num_potential_basal[candidate_basal_segments] + tiebreaker / (tiebreaker + 1) - 0.5,
groupKeys=self._columns_for_cells(
self.basal_connections.mapSegmentsToCells(candidate_basal_segments)))
learning_basal_segments = candidate_basal_segments[one_per_column_filter]
cells_for_learning_basal_segments = self.basal_connections.mapSegmentsToCells(learning_basal_segments)
learning_apical_segments = candidate_apical_segments[np.in1d(cells_for_apical_segments,
cells_for_learning_basal_segments)]
# if there is no matching apical segment on learning_basal_segment: grow one
cells_to_grow_apical_segments = cells_for_learning_basal_segments[np.in1d(cells_for_learning_basal_segments,
cells_for_apical_segments,
invert=True)]
return (learning_basal_segments.astype('uint32'),
learning_apical_segments.astype('uint32'),
cells_to_grow_apical_segments.astype('uint32'))
@staticmethod
def _choose_best_segment_per_cell(connections, cells, segments, num_potential):
"""
Calculates best matching segment per cell.
:param connections:
:param cells: numpy array of cells' id
:param segments: numpy array of segments' id
:param num_potential:
:return:
"""
candidate_segments = connections.filterSegmentsByCell(segments, | |
<reponame>AntonBiryukovUofC/unearthed-zinc
import numpy as np
from sklearn.model_selection import TimeSeriesSplit
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import QuantileTransformer
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.utils import indexable
from sklearn.utils.validation import _num_samples
np.random.seed(123)
import xgboost as xgb
import lightgbm as lgb
from sklearn.metrics import make_scorer
DROPCOLS = ['primary_cleaner.state.floatbank8_a_level',
'rougher.state.floatbank10_a_level',
'secondary_cleaner.state.floatbank6_a_level',
'rougher.state.floatbank10_f_level',
'rougher.state.floatbank10_e_level',
'secondary_cleaner.state.floatbank4_a_level',
'rougher.state.floatbank10_c_level',
'rougher.state.floatbank10_d_level',
'secondary_cleaner.state.floatbank4_a_air',
'rougher.state.floatbank10_c_level',
'secondary_cleaner.state.floatbank3_b_level',
'secondary_cleaner.state.floatbank4_b_level',
'secondary_cleaner.state.floatbank2_b_level',
]
DROPCOLS = ['rougher.input.floatbank10_copper_sulfate',
'rougher.input.floatbank10_xanthate',
'rougher.state.floatbank10_b_air',
'rougher.state.floatbank10_e_air',
'rougher.state.floatbank10_f_air',
'primary_cleaner.state.floatbank8_b_air',
'primary_cleaner.state.floatbank8_c_air',
"secondary_cleaner.state.floatbank4_b_air",
'secondary_cleaner.state.floatbank2_b_air',
"secondary_cleaner.state.floatbank5_b_air",
"secondary_cleaner.state.floatbank3_a_air"
]
def _mase_numeric_only(predicted, measured):
naive_forecast_error = np.abs(measured[1:] - measured[:-1]).mean()
forecast_error = \
np.abs(measured - np.nan_to_num(predicted)) / naive_forecast_error
return np.nanmean(forecast_error)
def mase(predicted, measured, min_samples=3):
if min_samples < 2:
raise ValueError('mase.min_samples must be at least 2')
# Make sure we have numpy arrays
predicted = np.asarray(predicted)
measured = np.asarray(measured)
# Apply MASE over all the non-NaN slices with at least 3 hours of data
if np.isnan(measured).any():
segments = [
_mase_numeric_only(predicted[_slice], measured[_slice])
for _slice in np.ma.clump_unmasked(np.ma.masked_invalid(measured))
if abs(_slice.stop - _slice.start) > min_samples
]
if not segments:
raise ValueError("Couldn't find any non-NaN segments longer than "
"{} in measurements".format(min_samples))
score = np.mean(segments)
else:
if len(measured) < min_samples:
raise ValueError('Need at least {} samples to calculate MASE'.format(min_samples))
score = _mase_numeric_only(predicted, measured)
return score
def my_custom_scorer():
return make_scorer(mase, greater_is_better=False)
def train_model(X, y, folds, model=None, score=mase, params=None, fixed_length=False, train_splits=None,
test_splits=None):
import pandas as pd
scores = []
feature_importance = pd.DataFrame()
for fold_n, (train_index, valid_index) in enumerate(
folds.split(X, train_splits=train_splits, test_splits=test_splits)):
# print('Fold', fold_n, 'started at', time.ctime())
X_train, X_valid = X.iloc[train_index], X.iloc[valid_index]
y_train, y_valid = y.iloc[train_index], y.iloc[valid_index]
m = model(X_train, y_train, X_valid, y_valid, params=params)
score_val = m.evaluate(X_val=X_valid, y_val=y_valid)
# print(f'Fold {fold_n}. Score: {score_val:.4f}.')
print('')
scores.append(score_val)
print(f'CV mean score: {np.mean(scores):.4f}, std: {np.std(scores):.4f}.')
mu = np.mean(scores)
sd = np.std(scores)
return scores, mu, sd, m
#
class TimeSeriesSplitImproved(TimeSeriesSplit):
def split(self, X, y=None, groups=None, fixed_length=False,
train_splits=5, test_splits=6):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
fixed_length : bool, hether training sets should always have
common length
train_splits : positive int, for the minimum number of
splits to include in training sets
test_splits : positive int, for the number of splits to
include in the test set
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
Example:
Nspl = int(Nmonths_total * 30 / 5)
Nmonths_total = 8
Nmonths_test = 4
Nmonths_min_train = 2
cv_ts = TimeSeriesSplitImproved(n_splits=Nspl)
k = 0
tt = pd.DataFrame()
fig, ax = plt.subplots(figsize=(10, 18))
for train_index, test_index in cv_ts.split(X, fixed_length=False,
train_splits=Nspl // Nmonths_total * Nmonths_min_train,
test_splits=int(Nmonths_test / Nmonths_total * Nspl)):
# print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
print(y_test.head(1))
k += 1
ax.plot(y_train.index, (y_train * 0 + k))
ax.plot(y_test.index, (y_test * 0 + k + 0.2))
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
n_splits = self.n_splits
n_folds = n_splits + 1
train_splits, test_splits = int(train_splits), int(test_splits)
if n_folds > n_samples:
raise ValueError(
("Cannot have number of folds ={0} greater than the number of samples: {1}.").format(n_folds,
n_samples))
indices = np.arange(n_samples)
split_size = (n_samples // n_folds)
test_size = split_size * test_splits
train_size = split_size * train_splits
test_starts = range(train_size + n_samples % n_folds,
n_samples - (test_size - split_size),
split_size)
if fixed_length:
for i, test_start in zip(range(len(test_starts)),
test_starts):
rem = 0
if i == 0:
rem = n_samples % n_folds
yield (indices[(test_start - train_size - rem):test_start],
indices[test_start:test_start + test_size])
else:
for test_start in test_starts:
yield (indices[:test_start],
indices[test_start:test_start + test_size])
# This function takes one model and fit it to the train and test data
# It returns the model MASE, CV prediction, and test prediction
# Create a function to fit a base model on K-1 folds, predict on 1 fold
def base_fit(model, folds, features, target, trainData, testData):
# Initialize empty lists and matrix to store data
model_mase = []
model_val_predictions = np.empty((trainData.shape[0], 1))
k = 0
# Loop through the index in KFolds
model_test_predictions = np.zeros((testData.shape[0],))
model_val_true = np.zeros((trainData.shape[0], 1))
for train_index, val_index in folds.split(trainData):
k = k + 1
# Split the train data into train and validation data
train, validation = trainData.iloc[train_index], trainData.iloc[val_index]
# Get the features and target
train_features, train_target = train[features], train[target]
validation_features, validation_target = validation[features], validation[target]
# Fit the base model to the train data and make prediciton for validation data
if (model.__class__ == xgb.sklearn.XGBRegressor) | (model.__class__ == lgb.sklearn.LGBMRegressor):
print('Fitting a boost model with limited tree rounds')
evalset = [(validation_features, np.ravel(validation_target))]
model.fit(train_features, np.ravel(train_target), eval_set=evalset, early_stopping_rounds=20, verbose=False)
else:
model.fit(train_features, train_target.values)
if (model.__class__ == xgb.sklearn.XGBRegressor):
print(model.best_ntree_limit)
print('Using xgboost with limited tree rounds')
validation_predictions = model.predict(validation_features, ntree_limit=model.best_ntree_limit)
elif (model.__class__ == lgb.sklearn.LGBMRegressor):
print(model.best_iteration_)
print('Using lgbmboost with limited tree rounds')
validation_predictions = model.predict(validation_features, num_iteration=model.best_iteration_)
else:
print('Using generic predict')
validation_predictions = model.predict(validation_features)
# Calculate and store the MASE for validation data
print(mase(validation_predictions, validation_target))
# model_mase.append(mase(validation_predictions,validation_target))
# Save the validation prediction for level 1 model training
model_val_predictions[val_index, 0] = validation_predictions.reshape(validation.shape[0])
model_val_true[val_index, 0] = validation_target.values
model_test_predictions += model.predict(testData[features])
model_test_predictions = model_test_predictions / k
# Fit the base model to the whole training data
# model.fit(trainData[features], np.ravel(trainData[target]))
# Get base model prediction for the test data
# model_test_predictions = model.predict(testData[features])
# Calculate and store the MASE for validation data
# model_val_predictions = model_val_predictions
model_mase.append(mase(model_val_predictions, model_val_true))
# Create a function to fit a dictionary of models, and get their OOF predictions from the training data
# Function that takes a dictionary of models and fits it to the data using baseFit
# The results of the models are then aggregated and returned for level 1 model training
def stacks(level0_models, folds, features, target, trainData, testData):
num_models = len(level0_models.keys()) # Number of models
# Initialize empty lists and matrix
level0_trainFeatures = np.empty((trainData.shape[0], num_models))
level0_testFeatures = np.empty((testData.shape[0], num_models))
# Loop through the models
for i, key in enumerate(level0_models.keys()):
print('Fitting %s -----------------------' % (key))
model_mase, val_predictions, test_predictions = base_fit(level0_models[key], folds, features, target, trainData,
testData)
# Print the average MASE for the model
print('%s average MASE: %s' % (key, np.mean(model_mase)))
print('\n')
# Aggregate the base model validation and test data predictions
level0_trainFeatures[:, i] = val_predictions.reshape(trainData.shape[0])
level0_testFeatures[:, i] = test_predictions.reshape(testData.shape[0])
return (level0_trainFeatures, level0_testFeatures)
# Function that takes a dictionary of classifiers and train them on base model predictions
def stackerTraining(stacker, folds, level0_trainFeatures, trainData, target=None):
for k in stacker.keys():
print('Training stacker %s' % (k))
stacker_model = stacker[k]
y_pred = np.zeros_like(trainData[target].values)
y_true = np.zeros_like(trainData[target].values)
for t, v in folds.split(trainData, trainData[target]):
train, validation = level0_trainFeatures[t, :], level0_trainFeatures[v, :]
# Get the features and target
train_features, train_target = train, trainData.iloc[t][target]
validation_features, validation_target = validation, trainData.iloc[v][target]
if (stacker_model.__class__ == xgb.sklearn.XGBRegressor) | (
stacker_model.__class__ == lgb.sklearn.LGBMRegressor):
print('Fitting a boost model with limited tree rounds')
evalset = [(validation_features, np.ravel(validation_target))]
stacker_model.fit(train_features, np.ravel(train_target), eval_set=evalset, early_stopping_rounds=20,
verbose=False)
print(stacker_model.best_iteration_)
else:
stacker_model.fit(level0_trainFeatures[t, :], train_target)
y_pred[v] = stacker_model.predict(level0_trainFeatures[v])
y_true[v] = trainData.iloc[v][target].values
stacker_mase = mase(y_pred, y_true)
average_mase = mase(level0_trainFeatures.mean(axis=1), y_true)
print('%s Stacker MASE: %s' % (k, stacker_mase))
print('%s Averaging MASE: %s' % (k, average_mase))
DROPCOLS_DIFF_FINAL = [
"diff_week",
"diff_encod_rel_primary_cleaner.input.copper_sulfate",
"diff_dayw",
"diff_encod_rel_primary_cleaner.input.depressant",
"diff_encod_rel_rougher.input.feed_pb",
"diff_encod_dif_primary_cleaner.input.depressant",
"diff_encod_val_primary_cleaner.input.feed_size",
"diff_encod_rel_primary_cleaner.input.xanthate",
"diff_encod_dif_primary_cleaner.input.xanthate",
"diff_daily_avg_final",
"diff_encod_dif_primary_cleaner.input.feed_size",
"diff_encod_rel_primary_cleaner.state.floatbank8_a_level",
"diff_encod_dif_primary_cleaner.state.floatbank8_a_level",
"diff_hour",
"diff_daily_avg_rougher",
"diff_rougher.state.floatbank10_b_level",
"diff_primary_cleaner.input.feed_size",
"diff_primary_cleaner.state.floatbank8_a_air",
"diff_primary_cleaner.state.floatbank8_a_level",
"diff_primary_cleaner.state.floatbank8_d_air",
"diff_rougher.input.feed_fe",
"diff_rougher.input.floatbank11_copper_sulfate",
"diff_rougher.state.floatbank10_a_air",
"diff_rougher.state.floatbank10_a_level",
"diff_rougher.state.floatbank10_c_level",
"diff_secondary_cleaner.state.floatbank6_a_level",
"diff_rougher.state.floatbank10_d_air",
"diff_rougher.state.floatbank10_d_level",
"diff_secondary_cleaner.state.floatbank2_a_air",
"diff_secondary_cleaner.state.floatbank2_b_air",
"diff_secondary_cleaner.state.floatbank2_b_level",
"diff_secondary_cleaner.state.floatbank3_b_level",
"diff_secondary_cleaner.state.floatbank4_a_level",
"diff_secondary_cleaner.state.floatbank5_a_air",
"diff_secondary_cleaner.state.floatbank5_b_air",
"diff_secondary_cleaner.state.floatbank5_b_level",
"diff_rougher.state.floatbank10_e_level",
"diff_encod_val_primary_cleaner.input.copper_sulfate",
]
DROPCOLS_FINAL = [
"primary_cleaner.state.floatbank8_b_level",
"encod_rel_primary_cleaner.input.depressant",
"secondary_cleaner.state.floatbank2_a_level",
"rougher.state.floatbank10_e_level",
"rougher.state.floatbank10_d_level",
"encod_dif_primary_cleaner.input.depressant",
"secondary_cleaner.state.floatbank3_a_level",
"primary_cleaner.state.floatbank8_a_level",
"hour",
"secondary_cleaner.state.floatbank4_a_level",
"secondary_cleaner.state.floatbank3_b_level",
"secondary_cleaner.state.floatbank5_a_level",
"encod_val_rougher.input.feed_zn",
"encod_rel_primary_cleaner.state.floatbank8_a_level",
"secondary_cleaner.state.floatbank2_b_level",
"encod_val_primary_cleaner.input.feed_size",
"secondary_cleaner.state.floatbank6_a_level",
"rougher.state.floatbank10_a_level",
"encod_dif_primary_cleaner.state.floatbank8_a_level",
"secondary_cleaner.state.floatbank3_a_level"
]
DROPCOLS_DIFF_ROUGHER = [
"diff_rougher.state.floatbank10_c_air",
"diff_rougher.state.floatbank10_b_air",
"diff_encod_dif_rougher.input.feed_zn",
"diff_rougher.input.feed_size",
"diff_rougher.state.floatbank10_f_air",
"diff_encod_val_rougher.input.feed_fe",
"diff_rougher.input.feed_rate",
"diff_encod_rel_rougher.input.feed_fe",
"diff_rougher.state.floatbank10_d_level",
"diff_encod_dif_rougher.input.feed_fe",
"diff_rougher.state.floatbank10_a_air",
"diff_encod_dif_rougher.input.feed_pb",
"diff_encod_rel_rougher.input.feed_pb",
"diff_rougher.state.floatbank10_b_level",
"diff_rougher.state.floatbank10_a_level",
"diff_hour",
"diff_daily_avg_rougher",
"diff_rougher.state.floatbank10_f_level",
"diff_rougher.state.floatbank10_e_level",
"diff_rougher.state.floatbank10_e_air",
"diff_dayw"
]
DROPCOLS_ROUGHER = [
"rougher.state.floatbank10_f_level"
]
COLS_TO_DIFF_TOP10 = set([
# these matter for Final
"rougher.input.feed_zn",
"primary_cleaner.input.xanthate",
"rougher.input.floatbank10_xanthate",
"rougher.input.feed_pb",
"primary_cleaner.input.depressant",
"encod_val_rougher.input.feed_zn",
"rougher.input.floatbank11_xanthate",
"primary_cleaner.state.floatbank8_d_level"
"rougher.input.floatbank10_copper_sulfate"
"encod_val_rougher.input.feed_pb",
"primary_cleaner.state.floatbank8_c_level"
"rougher.input.feed_sol",
"primary_cleaner.state.floatbank8_c_air"
"primary_cleaner.input.copper_sulfate",
# these come from Rougher (deleted the duplicates)
"rougher.input.floatbank11_xanthate",
"encod_val_rougher.input.feed_zn",
"rougher.input.feed_fe",
"rougher.state.floatbank10_d_air",
"rougher.state.floatbank10_c_level",
"encod_val_rougher.input.feed_pb",
"encod_rel_rougher.input.feed_zn",
"rougher.input.floatbank11_copper_sulfate",
"rougher.input.feed_sol",
"rougher.input.feed_size"
])
#
# COLS_TO_DIFF_TOP20 = [
#
# ]
level0_models_rougher = {}
obj = 'mae'
level0_models_rougher['LGBM_rougher_base_a'] = lgb.LGBMRegressor(objective=obj,
learning_rate=0.05, n_estimators=500, random_state=91,
**{'max_depth': 5, 'num_leaves': 100, 'feature_fraction': '0.363', 'bagging_fraction': '0.262'})
level0_models_rougher['LGBM_rougher_base_b'] =lgb.LGBMRegressor(objective=obj,
learning_rate=0.05, n_estimators=500, random_state=92,
**{'max_depth': 4, 'num_leaves': 110, 'feature_fraction': '0.448', 'bagging_fraction': '0.445'})
level0_models_rougher['LGBM_rougher_base_c'] =lgb.LGBMRegressor(objective=obj,
learning_rate=0.05, n_estimators=500, random_state=93,
**{'max_depth': 4, 'num_leaves': 155, 'feature_fraction': '0.449', 'bagging_fraction': '0.598'})
level0_models_rougher['LGBM_rougher_base_d'] =lgb.LGBMRegressor(objective=obj,
learning_rate=0.05, n_estimators=500, random_state=94,
**{'max_depth': 5, 'num_leaves': | |
<gh_stars>10-100
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import numpy as np
import tensorflow as tf
from .base_vocabs import BaseVocab
from . import conllu_vocabs as cv
from parser.neural import nn, nonlin, classifiers
import pdb
#***************************************************************
class SecondOrderVocab(BaseVocab):
""""""
#=============================================================
def __init__(self, *args, **kwargs):
""""""
super(SecondOrderVocab, self).__init__(*args, **kwargs)
self.PAD_STR = '_'
self.PAD_IDX = -1
self.ROOT_STR = '0'
self.ROOT_IDX = 0
self.BOS_STR = '<bos>'
self.BOS_IDX = 999998
self.EOS_STR = '<eos>'
self.EOS_IDX = 999999
return
#=============================================================
def add(self, token):
""""""
return self.index(token)
#=============================================================
def token(self, index):
""""""
if index > -1:
return str(index)
else:
return '_'
#=============================================================
def index(self, token):
""""""
if token != '_':
return int(token)
else:
return -1
#=============================================================
def get_root(self):
""""""
return self.ROOT_STR
#=============================================================
def get_bos(self):
""""""
return self.BOS_STR
#=============================================================
def get_eos(self):
""""""
return self.EOS_STR
#=============================================================
def get_bilinear_classifier(self, layer, token_weights, variable_scope=None, reuse=False, debug=False, token_weights4D=None,prev_output=None,sentence_mask=None):
""""""
outputs = {}
recur_layer = layer
hidden_keep_prob = 1 if reuse else self.hidden_keep_prob
hidden_keep_prob_tri = 1 if reuse else self.hidden_keep_prob_tri
add_linear = self.add_linear
#here set n_splits to be three
if self.separate_embed:
n_splits = 9*(1+self.linearize+self.distance)
else:
n_splits = 3*(1+self.linearize+self.distance)
with tf.variable_scope(variable_scope or self.field):
for i in six.moves.range(0, self.n_layers-1):#number of layers of FNN? what is this?
with tf.variable_scope('FC-%d' % i):#here is FNN? did not run
layer = classifiers.hidden(layer, n_splits*self.hidden_size,
hidden_func=self.hidden_func,
hidden_keep_prob=hidden_keep_prob)
with tf.variable_scope('FC-top'):#FNN output and split two layer? FNN+split. Linear transform for a sentence, n_splits is number of features you want
#this linear transformation contains word information
if self.use_unary_hidden:
print('separate unary and binary hidden size')
if self.separate_embed:
hidden_list=2*[self.unary_hidden]+(n_splits-2)*[self.hidden_size]
else:
hidden_list=2*[self.unary_hidden]+n_splits*[self.hidden_size]
else:
hidden_list=n_splits*[self.hidden_size]
#pdb.set_trace()
layers = classifiers.hiddens(layer, hidden_list,
hidden_func=self.hidden_func,
hidden_keep_prob=hidden_keep_prob)
if self.separate_embed:
# unary_head + unary_dep + sib_head + sib_dep + gp_head + gp_dep + gp_(head+dep) + cop_head + cop_dep
unary_layer1, unary_layer2, sib_head, sib_dep, gp_head, gp_dep, gp_headdep, cop_head, cop_dep = layers.pop(0), layers.pop(0), layers.pop(0), layers.pop(0), layers.pop(0)\
, layers.pop(0), layers.pop(0), layers.pop(0), layers.pop(0)
else:
# head + dep + (head+dep)
if self.use_unary_hidden:
unary_layer1, unary_layer2, layer1, layer2, layer3 = layers.pop(0), layers.pop(0), layers.pop(0), layers.pop(0), layers.pop(0)
else:
layer1, layer2, layer3 = layers.pop(0), layers.pop(0), layers.pop(0)
#pdb.set_trace()
unary_layer1=layer1
unary_layer2=layer2
if self.linearize:#false
lin_layer1, lin_layer2 = layers.pop(0), layers.pop(0), layers.pop(0)
if self.distance:#false in graph
dist_layer1, dist_layer2 = layers.pop(0), layers.pop(0), layers.pop(0)
#pdb.set_trace()
if self.layer_mask:
#pdb.set_trace()
if sentence_mask==None:
sentence_mask=tf.expand_dims(tf.cast(tf.transpose(token_weights,[0,2,1])[:,0],dtype=tf.float32),-1)
else:
sentence_mask=tf.expand_dims(tf.cast(sentence_mask,dtype=tf.float32),-1)
unary_layer1=unary_layer1*sentence_mask
unary_layer2=unary_layer2*sentence_mask
if self.separate_embed:
sib_head, sib_dep, gp_head, gp_dep, gp_headdep, cop_head, cop_dep = sib_head*sentence_mask, sib_dep*sentence_mask,\
gp_head*sentence_mask, gp_dep*sentence_mask,\
gp_headdep*sentence_mask, cop_head*sentence_mask,\
cop_dep*sentence_mask
else:
if not self.separate_embed:
layer1=layer1*sentence_mask
layer2=layer2*sentence_mask
layer3=layer3*sentence_mask
pass
with tf.variable_scope('Discriminator'):
if self.diagonal:
logits = classifiers.diagonal_bilinear_discriminator(
layer1, layer2,
hidden_keep_prob=hidden_keep_prob,
add_linear=add_linear)
if self.linearize:
with tf.variable_scope('Linearization'):
lin_logits = classifiers.diagonal_bilinear_discriminator(
lin_layer1, lin_layer2,
hidden_keep_prob=hidden_keep_prob,
add_linear=add_linear)
if self.distance:
with tf.variable_scope('Distance'):
dist_lamda = 1+tf.nn.softplus(classifiers.diagonal_bilinear_discriminator(
dist_layer1, dist_layer2,
hidden_keep_prob=hidden_keep_prob,
add_linear=add_linear))
else:
if not reuse:
print('init: ',self.tri_std, self.tri_std_unary)
if self.as_score:
unary = classifiers.bilinear_discriminator(
unary_layer1, unary_layer2,
hidden_keep_prob=hidden_keep_prob,
add_linear=add_linear,tri_std=self.tri_std_unary)
else:
unary = classifiers.bilinear_classifier(
unary_layer1, unary_layer2, 2,
hidden_keep_prob=hidden_keep_prob,
add_linear=add_linear,tri_std=self.tri_std_unary)
# head dep dep
if self.use_sib:
with tf.variable_scope('Sibling'):
layer_sib = classifiers.trilinear_discriminator_outer(
sib_head, sib_dep, sib_dep,
hidden_keep_prob=hidden_keep_prob_tri,
add_linear=add_linear, tri_std=self.tri_std, hidden_k=self.hidden_k)
# head head+dep dep
if self.use_gp:
with tf.variable_scope('GrandParents'):
layer_gp = classifiers.trilinear_discriminator_outer(
gp_head, gp_headdep, gp_dep,
hidden_keep_prob=hidden_keep_prob_tri,
add_linear=add_linear, tri_std=self.tri_std, hidden_k=self.hidden_k)
# head dep head
if self.use_cop:
with tf.variable_scope('CoParents'):
layer_cop = classifiers.trilinear_discriminator_outer(
cop_head, cop_dep, cop_head,
hidden_keep_prob=hidden_keep_prob_tri,
add_linear=add_linear, tri_std=self.tri_std, hidden_k=self.hidden_k)
#-----------------------------------------------------------
if self.linearize:
with tf.variable_scope('Linearization'):
lin_logits = classifiers.bilinear_discriminator(
lin_layer1, lin_layer2,
hidden_keep_prob=hidden_keep_prob,
add_linear=add_linear)
if self.distance:
with tf.variable_scope('Distance'):
dist_lamda = 1+tf.nn.softplus(classifiers.bilinear_discriminator(
dist_layer1, dist_layer2,
hidden_keep_prob=hidden_keep_prob,
add_linear=add_linear))
if debug:
outputs['printdata']={}
outputs['printdata']['q_value_orig']=unary
if self.new_potential:
#pdb.set_trace()
if self.use_sib:
outputs['printdata']['layer_sib_old']=layer_sib
if self.use_cop:
outputs['printdata']['layer_cop_old']=layer_cop
if self.use_gp:
outputs['printdata']['layer_gp_old']=layer_gp
#outputs['printdata']['layer1']=layer1
if self.two_gpu:
print('two gpu training for MF')
#pdb.set_trace()
GPUID=1
else:
GPUID=0
with tf.device('/device:GPU:'+str(GPUID)):
#-----------------------------------------------------------
#Let's start mean field algorithm and CRF-RNN here
#normalize the probability of two labels (1 and 0)
if prev_output!=None:
label_layer=prev_output['label_layer']
with tf.variable_scope('Label_trilinear'):
label_sib,label_cop,label_gp=classifiers.trilinear_label_layer(label_layer, weight_type=1)
layer_sib+=label_sib
layer_cop+=label_cop
layer_gp+=label_gp
if self.layer_mask:
print('use layer mask')
if self.as_score:
unary=unary*tf.cast(tf.transpose(token_weights,[0,2,1]),dtype=tf.float32)
else:
unary=unary*tf.cast(tf.expand_dims(tf.transpose(token_weights,[0,2,1]),-2),dtype=tf.float32)
if self.use_sib:
layer_sib=layer_sib*token_weights4D
if self.remove_root_child:
# abc -> ab,ac
layer_sib=layer_sib*self.token_weights_sib
if self.use_cop:
layer_cop=layer_cop*token_weights4D
if self.remove_root_child:
#abc -> ab,cb
layer_cop=layer_cop*self.token_weights_cop
if self.use_gp:
layer_gp=layer_gp*token_weights4D
if self.remove_root_child:
#abc -> ab, bc
layer_gp=layer_gp*self.token_weights_gp
if self.as_score:
unary_potential=tf.stack([tf.zeros_like(unary),unary],axis=2)
else:
unary_potential=-unary
q_value=unary_potential#suppose 1 is label 1
#1 sibling (n x ma x mb x mc) * (n x ma x mc) -> (n x ma x mb)
#2 grand parent (n x ma x mb x mc) * (n x mb x mc) -> (n x ma x mb)
#3 coparent (n x ma x mb x mc) * (n x mc x mb) -> (n x ma x mb)
if self.new_potential:
if self.use_sib:
layer_sib = layer_sib-tf.linalg.band_part(layer_sib,-1,0) + tf.transpose(tf.linalg.band_part(layer_sib,0,-1),perm=[0,1,3,2])
if self.use_gp:
layer_gp2 = tf.transpose(layer_gp,perm=[0,2,3,1])
if self.use_cop:
#(n x ma x mb x mc) -> (n x mb x ma x mc)
#in order to create a symmtric tensor on ma and mc
layer_cop = tf.transpose(layer_cop,perm=[0,2,1,3])
# first set lower triangle part to be zero, then assign the upper triangle part transposed to lower triangle part
layer_cop = layer_cop - tf.linalg.band_part(layer_cop,-1,0) + tf.transpose(tf.linalg.band_part(layer_cop,0,-1),perm=[0,1,3,2])
# Finally (n x mb x ma x mc) -> (n x ma x mb x mc)
layer_cop = tf.transpose(layer_cop,perm=[0,2,1,3])
#======================CRF-RNN==========================
for i in range(int(self.num_iteration)):
q_value=tf.nn.softmax(q_value,2)
if debug and i==0:
outputs['q_value_old']=q_value
if debug:
outputs['q_value'+str(i)]=q_value
if self.use_sib:
second_temp_sib = tf.einsum('nac,nabc->nab', q_value[:,:,1,:], layer_sib)
else:
second_temp_sib=0
if self.use_gp:
#'''
#a->b->c
second_temp_gp = tf.einsum('nbc,nabc->nab', q_value[:,:,1,:], layer_gp)
second_temp_gp2 = tf.einsum('nca,nabc->nab', q_value[:,:,1,:], layer_gp2)
else:
second_temp_gp=0
second_temp_gp2=0
if self.use_cop:
with tf.device('/device:GPU:2'):
second_temp_cop = tf.einsum('ncb,nabc->nab', q_value[:,:,1,:], layer_cop)
else:
second_temp_cop=0
#'''
if self.self_minus:
#'''
if self.use_sib:
#-----------------------------------------------------------
# minus all a = b = c part
#(n x ma x mb) -> (n x ma) -> (n x ma x 1) | (n x ma x mb x mc) -> (n x mb x ma x mc) -> (n x mb x ma) -> (n x ma x mb)
#Q(a,a)*p(a,b,a)
diag_sib1 = tf.expand_dims(tf.linalg.diag_part(q_value[:,:,1,:]),-1) * tf.transpose(tf.linalg.diag_part(tf.transpose(layer_sib,perm=[0,2,1,3])),perm=[0,2,1])
# (n x ma x mb x mc) -> (n x ma x mb)
#Q(a,b)*p(a,b,b)
diag_sib2 = q_value[:,:,1,:] * tf.linalg.diag_part(layer_sib)
second_temp_sib = second_temp_sib - diag_sib1 - diag_sib2
if self.use_gp:
#(n x ma x mb x mc) -> (n x mb x ma x mc) -> (n x mb x ma) -> (n x ma x mb)
#Q(b,a)*p(a,b,a)
diag_gp1 = tf.transpose(q_value[:,:,1,:],perm=[0,2,1]) * tf.transpose(tf.linalg.diag_part(tf.transpose(layer_gp,perm=[0,2,1,3])),perm=[0,2,1])
#(n x ma x mb) -> (n x mb) -> (n x 1 x mb) | (n x ma x mb x mc) -> (n x ma x mb)
#Q(b,b)*p(a,b,b)
diag_gp2 = tf.expand_dims(tf.linalg.diag_part(q_value[:,:,1,:]),1) * tf.linalg.diag_part(layer_gp)
#(n x ma x mb x mc) -> (n x mb x ma x mc) -> (n x mb x ma) -> (n x ma x mb)
#Q(a,a)*p(a,b,a)
diag_gp21 = tf.expand_dims(tf.linalg.diag_part(q_value[:,:,1,:]),-1) * tf.transpose(tf.linalg.diag_part(tf.transpose(layer_gp2,perm=[0,2,1,3])),perm=[0,2,1])
#(n x ma x mb) -> (n x mb) -> (n x 1 x mb) | (n x ma x mb x mc) -> (n x ma x mb)
#Q(b,a)*p(a,b,b)
diag_gp22 = tf.transpose(q_value[:,:,1,:],perm=[0,2,1]) * tf.linalg.diag_part(layer_gp2)
if debug:
#pdb.set_trace()
'''
outputs['binary_old']=layer_gp
outputs['q_value_old']=q_value
outputs['second_temp_sib_old']=second_temp_gp
outputs['diag_sib1']=diag_gp1
outputs['diag_sib2']=diag_gp2
#'''
pass
second_temp_gp = second_temp_gp - diag_gp1 - diag_gp2
#c->a->b
second_temp_gp2 = second_temp_gp2 - diag_gp21 - diag_gp22
#second_temp_gp=second_temp_gp+second_temp_gp2
if self.use_cop:
with tf.device('/device:GPU:2'):
#(n x ma x mb x mc) -> (n x mb x ma x mc) -> (n x mb x ma) -> (n x ma x mb)
#Q(a,b)*p(a,b,a)
diag_cop1 = q_value[:,:,1,:] * tf.transpose(tf.linalg.diag_part(tf.transpose(layer_cop,perm=[0,2,1,3])),perm=[0,2,1])
#(n x ma x mb) -> (n x mb) -> (n x 1 x mb) | (n x ma x mb x mc) -> (n x ma x mb)
#Q(b,b)*p(a,b,b)
diag_cop2 = tf.expand_dims(tf.linalg.diag_part(q_value[:,:,1,:]),1) * tf.linalg.diag_part(layer_cop)
second_temp_cop = second_temp_cop - diag_cop1 - diag_cop2
#'''
if debug:
if self.use_sib:
outputs['printdata']['second_temp_sib'+str(i)+'after']=second_temp_sib
outputs['printdata']['diag_sib1'+str(i)]=diag_sib1
outputs['printdata']['diag_sib2'+str(i)]=diag_sib2
if self.use_gp:
#outputs['printdata']['second_temp_gp']=second_temp_gp
outputs['printdata']['second_temp_gp'+str(i)+'after']=second_temp_gp
outputs['printdata']['second_temp_gp2'+str(i)+'after']=second_temp_gp2
if self.use_cop:
outputs['printdata']['second_temp_cop'+str(i)+'after']=second_temp_cop
#pdb.set_trace()
second_temp = second_temp_sib + second_temp_gp + second_temp_gp2 + second_temp_cop
if self.remove_loop:
#pdb.set_trace()
second_temp = second_temp - tf.linalg.diag(tf.linalg.diag_part(second_temp))
'''
if not self.sibling_only:
second_temp = second_temp_sib + second_temp_gp + second_temp_cop
elif self.use_sib:
second_temp = second_temp_sib
'''
#Second order potential update function
if debug:
outputs['printdata']['second_temp'+str(i)]=second_temp
if self.as_score:
second_temp=self.unary_weight * unary_potential[:,:,1,:] + second_temp
else:
second_temp=self.unary_weight * unary_potential[:,:,1,:] - second_temp
q_value=tf.stack([unary_potential[:,:,0,:],second_temp],axis=2)
if debug:
outputs['printdata']['q_value'+str(i)]=q_value
#q_value=-unary
#CRF-RNN end
#======================CRF-RNN==========================
#-----------------------------------------------------------
# Process the targets
# (n x m x m) -> (n x m x m)
#here in fact is a graph, which is m*m representing the connection between each edge
unlabeled_targets = self.placeholder#ground truth graph, what is self.placeholder?
#USELESS
shape = tf.shape(unary_layer1)
batch_size, bucket_size = shape[0], shape[1]
# (1 x m)
ids = tf.expand_dims(tf.range(bucket_size), 0)
# (1 x m) -> (1 x 1 x m)
head_ids = tf.expand_dims(ids, -2)
# (1 x m) -> (1 x m x 1)
dep_ids = tf.expand_dims(ids, -1)
if debug:
#outputs['printdata']['logits']=logits
outputs['printdata']['q_value']=q_value
outputs['q_value'+str(i+1)]=tf.nn.softmax(q_value,2)
outputs['printdata']['unary']=unary
#outputs['printdata']['binary']=binary
outputs['printdata']['second_temp']=second_temp
if self.use_sib:
outputs['printdata']['second_temp_sib']=second_temp_sib
outputs['printdata']['layer_sib']=layer_sib
if self.use_gp:
#outputs['printdata']['second_temp_gp']=second_temp_gp
outputs['printdata']['second_temp_gp']=second_temp_gp
outputs['printdata']['second_temp_gp2']=second_temp_gp2
outputs['printdata']['layer_gp']=layer_gp
if self.use_cop:
outputs['printdata']['layer_cop']=layer_cop
outputs['printdata']['second_temp_cop']=second_temp_cop
outputs['printdata']['layer1']=unary_layer1
outputs['printdata']['layer2']=unary_layer2
if not self.separate_embed:
outputs['printdata']['layer3']=layer3
outputs['printdata']['targets']=unlabeled_targets
outputs['printdata']['token_weights']=token_weights
if self.sibling_only:
outputs['printdata']['binary_weights']=binary_weights
outputs['printdata']['binary']=layer_sib
if self.new_potential:
#outputs['printdata']['layer_sib2']=layer_sib2
#outputs['printdata']['layer_gp2']=layer_gp2
#outputs['printdata']['layer_cop2']=layer_cop2
pass
'''
outputs['printdata']['binary_weights']=binary_weights
outputs['printdata']['binary_weights_cop']=binary_weights_cop
outputs['printdata']['binary_weights_gp']=binary_weights_gp
outputs['printdata']['unary_weights']=unary_weights
#'''
#-----------------------------------------------------------
# Note: here need a transpose as the target is the transpose graph(or opposite direction of adjacency graph)
# (n x ma x 2 x mb) -> (n x mb x 2 x ma)
if self.transposed:
q_value=tf.transpose(q_value, [0,3,2,1])
# Compute probabilities/cross entropy
# (n x m x 2 x m) -> (n x m x m x 2)
transposed_logits = tf.transpose(q_value, [0,1,3,2])
probabilities=tf.nn.softmax(transposed_logits) * tf.to_float(tf.expand_dims(token_weights, axis=-1))
#probabilities=tf.nn.sigmoid(transposed_logits[:,:,:,1])*token_weights
#TODO: what I want is still a probability of label 1, compared to the origin sigmoid(logits)? check later
probabilities=probabilities[:,:,:,1]
#label_probabilities = tf.nn.softmax(transposed_logits) * tf.to_float(tf.expand_dims(token_weights, axis=-1))
# (n x m), (n x m x m), (n x m) -> ()
# pdb.set_trace()
targets=unlabeled_targets
logits=transposed_logits[:,:,:,1]
loss = tf.losses.sparse_softmax_cross_entropy(targets,logits,weights=sentence_mask)
# (n x m) -> (n x m x m x 1)
one_hot_targets = tf.expand_dims(tf.one_hot(targets, bucket_size), -1)
# (n x m) -> ()
n_tokens = tf.to_float(tf.reduce_sum(token_weights))
if self.linearize:
# (n x m x m) -> (n x m x 1 x m)
lin_xent_reshaped = tf.expand_dims(lin_xent, -2)
# (n x m x 1 x m) * (n x m x m x 1) -> (n x m x 1 x 1)
lin_target_xent = tf.matmul(lin_xent_reshaped, one_hot_targets)
# (n x m x 1 x 1) -> (n x m)
lin_target_xent = tf.squeeze(lin_target_xent, [-1, -2])
# (n x m), (n x m), (n x m) -> ()
loss -= tf.reduce_sum(lin_target_xent*tf.to_float(token_weights)) / (n_tokens + 1e-12)
if self.distance:
# (n x m x m) -> (n x m x 1 x m)
dist_kld_reshaped = tf.expand_dims(dist_kld, -2)
# (n x m x 1 x m) * (n x m | |
map.get('body') is not None:
self.body = map.get('body')
if map.get('id') is not None:
self.id = map.get('id')
if map.get('status') is not None:
self.status = map.get('status')
return self
class CancelLinkRequest(TeaModel):
"""
*
"""
def __init__(self, headers=None, temporary_token=None):
self.headers = headers # type: Dict[str, str]
# 待绑定的临时token,此token只能访问绑定、取消绑定接口
self.temporary_token = temporary_token # type: str
def validate(self):
self.validate_required(self.temporary_token, 'temporary_token')
def to_map(self):
result = {}
if self.headers is not None:
result['headers'] = self.headers
if self.temporary_token is not None:
result['temporary_token'] = self.temporary_token
return result
def from_map(self, map={}):
if map.get('headers') is not None:
self.headers = map.get('headers')
if map.get('temporary_token') is not None:
self.temporary_token = map.get('temporary_token')
return self
class Captcha(TeaModel):
"""
*
"""
def __init__(self, captcha=None, captcha_format=None, captcha_id=None):
# 图片验证码,base64格式
self.captcha = captcha # type: str
# 图片格式
self.captcha_format = captcha_format # type: str
# 图片验证码ID
self.captcha_id = captcha_id # type: str
def validate(self):
self.validate_required(self.captcha, 'captcha')
self.validate_required(self.captcha_format, 'captcha_format')
self.validate_required(self.captcha_id, 'captcha_id')
def to_map(self):
result = {}
if self.captcha is not None:
result['captcha'] = self.captcha
if self.captcha_format is not None:
result['captcha_format'] = self.captcha_format
if self.captcha_id is not None:
result['captcha_id'] = self.captcha_id
return result
def from_map(self, map={}):
if map.get('captcha') is not None:
self.captcha = map.get('captcha')
if map.get('captcha_format') is not None:
self.captcha_format = map.get('captcha_format')
if map.get('captcha_id') is not None:
self.captcha_id = map.get('captcha_id')
return self
class CompleteFileResponse(TeaModel):
"""
complete file response
"""
def __init__(self, category=None, content_hash=None, content_hash_name=None, content_type=None, crc_64hash=None,
created_at=None, description=None, domain_id=None, download_url=None, drive_id=None, encrypt_mode=None,
file_extension=None, file_id=None, hidden=None, image_media_metadata=None, labels=None, meta=None, name=None,
parent_file_id=None, punish_flag=None, size=None, starred=None, status=None, streams_info=None, thumbnail=None,
trashed_at=None, type=None, updated_at=None, upload_id=None, url=None, user_meta=None,
video_media_metadata=None, video_preview_metadata=None):
# category
self.category = category # type: str
# Content Hash
self.content_hash = content_hash # type: str
# content_hash_name
self.content_hash_name = content_hash_name # type: str
# content_type
self.content_type = content_type # type: str
# crc64_hash
self.crc_64hash = crc_64hash # type: str
# created_at
self.created_at = created_at # type: str
# description
self.description = description # type: str
# DomainID
self.domain_id = domain_id # type: str
# download_url
self.download_url = download_url # type: str
# drive_id
self.drive_id = drive_id # type: str
# encrypt_mode
self.encrypt_mode = encrypt_mode # type: str
# file_extension
self.file_extension = file_extension # type: str
# file_id
self.file_id = file_id # type: str
# Hidden
# type: boolean
self.hidden = hidden # type: bool
self.image_media_metadata = image_media_metadata # type: ImageMediaResponse
# labels
self.labels = labels # type: List[str]
self.meta = meta # type: str
# name
self.name = name # type: str
# parent_file_id
self.parent_file_id = parent_file_id # type: str
self.punish_flag = punish_flag # type: int
# Size
self.size = size # type: int
# starred
# type: boolean
self.starred = starred # type: bool
# status
self.status = status # type: str
# @Deprecated streams url info
self.streams_info = streams_info # type: dict
# thumbnail
self.thumbnail = thumbnail # type: str
# trashed_at
self.trashed_at = trashed_at # type: str
# type
self.type = type # type: str
# updated_at
self.updated_at = updated_at # type: str
# upload_id
self.upload_id = upload_id # type: str
# url
self.url = url # type: str
# user_meta
self.user_meta = user_meta # type: str
self.video_media_metadata = video_media_metadata # type: VideoMediaResponse
self.video_preview_metadata = video_preview_metadata # type: VideoPreviewResponse
def validate(self):
if self.domain_id is not None:
self.validate_pattern(self.domain_id, 'domain_id', '[a-z0-9A-Z]+')
if self.drive_id is not None:
self.validate_pattern(self.drive_id, 'drive_id', '[0-9]+')
if self.file_id is not None:
self.validate_max_length(self.file_id, 'file_id', 50)
self.validate_pattern(self.file_id, 'file_id', '[a-z0-9]{1,50}')
if self.image_media_metadata:
self.image_media_metadata.validate()
self.validate_required(self.name, 'name')
if self.name is not None:
self.validate_pattern(self.name, 'name', '[a-zA-Z0-9.-]{1,1000}')
if self.parent_file_id is not None:
self.validate_max_length(self.parent_file_id, 'parent_file_id', 50)
self.validate_pattern(self.parent_file_id, 'parent_file_id', '[a-z0-9]{1,50}')
if self.size is not None:
self.validate_maximum(self.size, 'size', 53687091200)
self.validate_minimum(self.size, 'size', 0)
if self.video_media_metadata:
self.video_media_metadata.validate()
if self.video_preview_metadata:
self.video_preview_metadata.validate()
def to_map(self):
result = {}
if self.category is not None:
result['category'] = self.category
if self.content_hash is not None:
result['content_hash'] = self.content_hash
if self.content_hash_name is not None:
result['content_hash_name'] = self.content_hash_name
if self.content_type is not None:
result['content_type'] = self.content_type
if self.crc_64hash is not None:
result['crc64_hash'] = self.crc_64hash
if self.created_at is not None:
result['created_at'] = self.created_at
if self.description is not None:
result['description'] = self.description
if self.domain_id is not None:
result['domain_id'] = self.domain_id
if self.download_url is not None:
result['download_url'] = self.download_url
if self.drive_id is not None:
result['drive_id'] = self.drive_id
if self.encrypt_mode is not None:
result['encrypt_mode'] = self.encrypt_mode
if self.file_extension is not None:
result['file_extension'] = self.file_extension
if self.file_id is not None:
result['file_id'] = self.file_id
if self.hidden is not None:
result['hidden'] = self.hidden
if self.image_media_metadata is not None:
result['image_media_metadata'] = self.image_media_metadata.to_map()
if self.labels is not None:
result['labels'] = self.labels
if self.meta is not None:
result['meta'] = self.meta
if self.name is not None:
result['name'] = self.name
if self.parent_file_id is not None:
result['parent_file_id'] = self.parent_file_id
if self.punish_flag is not None:
result['punish_flag'] = self.punish_flag
if self.size is not None:
result['size'] = self.size
if self.starred is not None:
result['starred'] = self.starred
if self.status is not None:
result['status'] = self.status
if self.streams_info is not None:
result['streams_info'] = self.streams_info
if self.thumbnail is not None:
result['thumbnail'] = self.thumbnail
if self.trashed_at is not None:
result['trashed_at'] = self.trashed_at
if self.type is not None:
result['type'] = self.type
if self.updated_at is not None:
result['updated_at'] = self.updated_at
if self.upload_id is not None:
result['upload_id'] = self.upload_id
if self.url is not None:
result['url'] = self.url
if self.user_meta is not None:
result['user_meta'] = self.user_meta
if self.video_media_metadata is not None:
result['video_media_metadata'] = self.video_media_metadata.to_map()
if self.video_preview_metadata is not None:
result['video_preview_metadata'] = self.video_preview_metadata.to_map()
return result
def from_map(self, map={}):
if map.get('category') is not None:
self.category = map.get('category')
if map.get('content_hash') is not None:
self.content_hash = map.get('content_hash')
if map.get('content_hash_name') is not None:
self.content_hash_name = map.get('content_hash_name')
if map.get('content_type') is not None:
self.content_type = map.get('content_type')
if map.get('crc64_hash') is not None:
self.crc_64hash = map.get('crc64_hash')
if map.get('created_at') is not None:
self.created_at = map.get('created_at')
if map.get('description') is not None:
self.description = map.get('description')
if map.get('domain_id') is not None:
self.domain_id = map.get('domain_id')
if map.get('download_url') is not None:
self.download_url = map.get('download_url')
if map.get('drive_id') is not None:
self.drive_id = map.get('drive_id')
if map.get('encrypt_mode') is not None:
self.encrypt_mode = map.get('encrypt_mode')
if map.get('file_extension') is not None:
self.file_extension = map.get('file_extension')
if map.get('file_id') is not None:
self.file_id = map.get('file_id')
if map.get('hidden') is not None:
self.hidden = map.get('hidden')
if map.get('image_media_metadata') is not None:
temp_model = ImageMediaResponse()
self.image_media_metadata = temp_model.from_map(map['image_media_metadata'])
if map.get('labels') is not None:
self.labels = map.get('labels')
if map.get('meta') is not None:
self.meta = map.get('meta')
if map.get('name') is not None:
self.name = map.get('name')
if map.get('parent_file_id') is not None:
self.parent_file_id = map.get('parent_file_id')
if map.get('punish_flag') is not None:
self.punish_flag = map.get('punish_flag')
if map.get('size') is not None:
self.size = map.get('size')
if map.get('starred') is not None:
self.starred = map.get('starred')
if map.get('status') is not None:
self.status = map.get('status')
if map.get('streams_info') is not None:
self.streams_info = map.get('streams_info')
if map.get('thumbnail') is not None:
self.thumbnail = map.get('thumbnail')
if map.get('trashed_at') is not None:
self.trashed_at = map.get('trashed_at')
if map.get('type') is not None:
self.type = map.get('type')
if map.get('updated_at') is not None:
self.updated_at = map.get('updated_at')
if map.get('upload_id') is not None:
self.upload_id = map.get('upload_id')
if map.get('url') is not None:
self.url = map.get('url')
if map.get('user_meta') is not None:
self.user_meta = map.get('user_meta')
if map.get('video_media_metadata') is not None:
temp_model = VideoMediaResponse()
self.video_media_metadata = temp_model.from_map(map['video_media_metadata'])
if map.get('video_preview_metadata') is not None:
temp_model = VideoPreviewResponse()
self.video_preview_metadata = temp_model.from_map(map['video_preview_metadata'])
return self
class ConfirmLinkRequest(TeaModel):
"""
*
"""
def __init__(self, headers=None, temporary_token=None):
self.headers = headers # type: Dict[str, str]
# 待绑定的临时token,此token只能访问绑定、取消绑定接口
self.temporary_token = temporary_token # type: str
def validate(self):
self.validate_required(self.temporary_token, 'temporary_token')
def to_map(self):
result = {}
if self.headers is not None:
result['headers'] = self.headers
if self.temporary_token is not None:
result['temporary_token'] = self.temporary_token
return result
def from_map(self, map={}):
if map.get('headers') is not None:
self.headers = map.get('headers')
if map.get('temporary_token') is not None:
self.temporary_token = map.get('temporary_token')
return self
class CopyFileResponse(TeaModel):
"""
文件拷贝 response
"""
def __init__(self, async_task_id=None, domain_id=None, drive_id=None, file_id=None):
# async_task_id
self.async_task_id = async_task_id # type: str
# DomainID
self.domain_id = domain_id # type: str
# drive_id
self.drive_id = drive_id # type: str
# file_id
self.file_id = file_id # type: str
def validate(self):
if self.domain_id is not None:
self.validate_pattern(self.domain_id, 'domain_id', '[a-z0-9A-Z]+')
if self.drive_id is not None:
self.validate_pattern(self.drive_id, 'drive_id', '[0-9]+')
if self.file_id is not None:
self.validate_max_length(self.file_id, 'file_id', 50)
self.validate_pattern(self.file_id, 'file_id', '[a-z0-9]{1,50}')
def to_map(self):
result = {}
if self.async_task_id is not None:
result['async_task_id'] = self.async_task_id
if self.domain_id is | |
#!/usr/bin/env python3
#######################
#
# FireEye API
#
# Copyright (c) 2015 United States Government/National Institutes of Health
# Author: <NAME>
# Modifications from original made by: <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#####################
import re
import json
import demjson
import requests
import lxml.etree
class ApiVersion110(object):
valid_durations = ('1_hour', '2_hours', '6_hours',
'12_hours', '24_hours', '48_hours')
valid_info_levels = ('concise', 'normal', 'extended')
valid_report_durations = ('pastWeek', 'pastMonth',
'pastThreeMonths', 'rangeReport')
valid_profiles = ('winxp-sp2', 'winxp-sp3', 'win7-sp1', 'win7x64-sp1')
valid_content_types = ('application/json', 'application/xml')
def __init__(self, ax_address, verifySSL=True, apiToken=None,
clientToken=None, contentType='application/json'):
self.base_url = 'https://{}/wsapis/v1.1.0'.format(ax_address)
self.verify_SSL = verifySSL
self.content_type = contentType
self.api_token = apiToken
self.client_token = clientToken
def authenticate(self, username, password):
headers = {}
url = '{}/auth/login?'.format(self.base_url)
if self.client_token:
headers['X-FeClient-Token'] = self.client_token
response = requests.post(url, auth=(username, password),
headers=headers, verify=self.verify_SSL)
if response.status_code == 200:
self.api_token = response.headers['X-FeApi-Token']
elif response.status_code == 400:
raise Exception('Authentication Refused')
elif response.status_code == 503:
raise Exception('Web Services API server disabled')
else:
raise Exception('Unexpected response: {}'.format(response.text))
return response
def retrieve_alert(self, **kwargs):
# Valid filters to be passed in kwargs
valid_filter_params = ('alert_id',
'duration',
'info_level',
'file_name',
'file_type',
'url',
'md5',
'malware_name',
'malware_type',
'start_time',
'end_time')
headers = {'X-FeApi-Token': self.api_token,
'Accept': self.content_type}
url = '{}/alerts?'.format(self.base_url)
for key, value in kwargs.items():
if key in valid_filter_params:
url = '{}&{}="{}"'.format(url, key, value)
if self.client_token:
headers['X-FeClient-Token'] = self.client_token
response = requests.get(url, headers=headers, verify=self.verify_SSL)
if response.status_code == 200:
pass
elif response.status_code == 400:
raise Exception('Filter value invalid')
else:
raise Exception('Unexpected response: {}'.format(response.text))
return response
def retrieve_report_by_reportID(self, reportID, formatPDF=False,
reportType='alertDetailsReport'):
headers = {'X-FeApi-Token': self.api_token,
'Accept': self.content_type}
url = '{}/reports/report?report_type={}&id={}'.format(self.base_url,
reportType,
reportID)
if self.client_token:
headers['X-FeClient-Token'] = self.client_token
if formatPDF:
headers['Accept'] = 'application/pdf'
response = requests.get(url, headers=headers, verify=self.verify_SSL)
if response.status_code == 200:
pass
else:
raise Exception('Unexpected response: {}'.format(response.text))
return response
def retrieve_report_by_infectionID(self, infectionID, formatPDF=False,
infectionType='malware-object',
reportType='alertDetailsReport'):
headers = {'X-FeApi-Token': self.api_token,
'Accept': self.content_type}
url = '{}/reports/report?report_type={}&infection_id={}&infection_type={}'
url = url.format(self.base_url, reportType, infectionID, infectionType)
if self.client_token:
headers['X-FeClient-Token'] = self.client_token
if formatPDF:
headers['Accept'] = 'application/pdf'
response = requests.get(url, headers=headers, verify=self.verify_SSL)
if response.status_code == 200:
pass
else:
raise Exception('Unexpected response: {}'.format(response.text))
return response
def query_configuration(self):
headers = {'X-FeApi-Token': self.api_token,
'Accept': self.content_type}
url = '{}/config'.format(self.base_url)
if self.client_token:
headers['X-FeClient-Token'] = self.client_token
response = requests.get(url, headers=headers, verify=self.verify_SSL)
if response.status_code == 200:
pass
elif response.status_code == 401:
raise Exception('Invalid session token')
else:
raise Exception('Unexpected response: {}'.format(response.text))
return response
def submit_file(self, fileHandle, fileName,
profiles=['win7-sp1'],
analysisType='2',
force='false',
timeout='500',
application='0',
prefetch='1',
priority='0'):
headers = {'X-FeApi-Token': self.api_token,
'Accept': self.content_type}
url = '{}/submissions'.format(self.base_url)
rawData = {'application': str(application),
'timeout': str(timeout),
'priority': str(priority),
'profiles': profiles,
'analysistype': str(analysisType),
'force': str(force),
'prefetch': str(prefetch)}
submissionData = ('', json.dumps(rawData), 'application/json')
# Ensure file handle is at top of file
fileHandle.seek(0)
fileData = (fileName, fileHandle.read())
files = {'options': submissionData,
'filename': fileData}
response = requests.post(url, headers=headers, files=files,
verify=self.verify_SSL)
if response.status_code == 200:
pass
elif response.status_code == 400:
raise Exception('Filter value invalid')
else:
raise Exception('Unexpected response: {}'.format(response.text))
return response
def submit_url(self, urls,
profiles=['win7-sp1'],
analysisType='2',
force='false',
timeout='500',
application='0',
prefetch='1',
priority='0'):
headers = {'X-FeApi-Token': self.api_token,
'Accept': self.content_type}
url = '{}/submissions/url'.format(self.base_url)
# Convert to list if urls are passed as tuple
if isinstance(urls, tuple):
urls = list(urls)
# Lazy check for single url submissions
# that are not in a list and then convert
if not isinstance(urls, list):
urls = [urls]
rawData = {'urls': urls,
'application': str(application),
'timeout': str(timeout),
'priority': str(priority),
'profiles': profiles,
'analysistype': str(analysisType),
'force': str(force),
'prefetch': str(prefetch)}
submissionData = ('', json.dumps(rawData), 'application/json')
files = {'options': submissionData}
response = requests.post(url, headers=headers, files=files,
verify=self.verify_SSL)
if response.status_code == 200:
pass
elif response.status_code == 400:
raise Exception('Filter value invalid')
elif response.status_code == 500:
raise Exception('Server encountered issue, retry later')
else:
raise Exception('Unexpected response: {}'.format(response.text))
return response
def query_submission_status(self, submissionKey):
headers = {'X-FeApi-Token': self.api_token,
'Accept': self.content_type}
url = '{}/submissions/status/{}'.format(self.base_url,
submissionKey)
response = requests.get(url, headers=headers, verify=self.verify_SSL)
if response.status_code == 200:
pass
elif response.status_code == 401:
raise Exception('Invalid session token')
elif response.status_code == 404:
raise Exception('Invalid submission key')
else:
raise Exception('Unexpected response: {}'.format(response.text))
return response
def retrieve_submission_results(self, submissionKey, infoLevel='extended'):
headers = {'X-FeApi-Token': self.api_token,
'Accept': self.content_type}
url = '{}/submissions/results/{}'.format(self.base_url,
submissionKey)
if infoLevel and (infoLevel in self.valid_info_levels):
url = '{}?info_level={}'.format(url, infoLevel)
response = requests.get(url, headers=headers, verify=self.verify_SSL)
if response.status_code == 200:
pass
elif response.status_code == 401:
raise Exception('Invalid session token')
elif response.status_code == 404:
raise Exception('Invalid submission key')
else:
raise Exception('Unexpected response: {}'.format(response.text))
return response
def logout(self):
headers = {'X-FeApi-Token': self.api_token,
'Accept': self.content_type}
url = '{}/auth/logout'.format(self.base_url)
response = requests.post(url, headers=headers, verify=self.verify_SSL)
if response.status_code == 204:
pass
elif response.status_code == 304:
raise Exception('Missing session token')
else:
raise Exception('Unexpected response: {}'.format(response.text))
return response
class ResponseHandler(object):
xml_namespaces = {'f': 'http://www.fireeye.com/alert/2011/AlertSchema'}
valid_response_types = ('json', 'xml', 'text')
def __init__(self, responseObject, responseType='json'):
if responseType not in self.valid_response_types:
raise Exception('Invalid response type specified')
self.response_type = responseType
# Check if responseObject is requests response object
if isinstance(responseObject, requests.models.Response):
# Process requests response object depending on specified type
if responseType == 'json':
try:
self.response_object = responseObject.json()
except ValueError:
# Attempt to cleanup malformed or unwanted JSON elements
# from FireEye and then use demjson to load the object
cleanedObject = re.sub(r'\n\s+', '', responseObject.text)
cleanedObject = re.sub(r'\n', '', cleanedObject)
cleanedObject = re.sub(r'(\"+)?N/A(\"+)?', '\"N/A\"', cleanedObject)
self.response_object = demjson.decode(cleanedObject)
except:
message = 'JSON parsing error of response:\n{}'\
.format(responseObject.text)
raise Exception(message)
elif responseType == 'xml':
self.response_object = lxml.etree.fromstring(responseObject.content)
elif responseType == 'text':
self.response_object = responseObject.text
else: # placeholder for future types
self.response_object = responseObject.text
else:
self.response_object = responseObject
def find_md5(self):
if self.response_type == 'json':
return self._findMd5JSON()
elif self.response_type == 'xml':
return self._findMd5XML()
else:
raise Exception('Invalid response type for find md5 method')
def _findMd5JSON(self):
malwareSection = self._findMalwareSectionJSON()
md5_values = self._lookForKeyInJsonList(malwareSection, 'md5sum')
return md5_values
def _findMd5XML(self):
try:
xpath_value = '/notification/malware/analyis/md5sum/'
md5_entries = self.response_object.xpath(xpath_value)
except lxml.etree.XPathEvalError:
xpath_value = '/f:alerts/f:alert/f:explanation/f:malware-detected/f:malware/f:md5sum'
md5_entries = self.response_object.xpath(xpath_value, namespaces=self.xml_namespaces)
except:
md5_entries = []
md5_values = [md5.text for md5 in md5_entries]
return md5_values
def find_profiles(self):
if self.response_type == 'json':
return self._findProfilesJSON()
elif self.response_type == 'xml':
return self._findProfilesXML()
else:
raise Exception('Invalid response type for find profiles method')
def _findProfilesJSON(self):
malwareSection = self._findMalwareSectionJSON()
profile_values = self._lookForKeyInJsonList(malwareSection, 'profile')
return profile_values
def _findProfilesXML(self):
try:
xpath_value = '/notification/malware/analysis/profile/name'
profile_entries = self.response_object.xpath(xpath_value)
except lxml.etree.XPathEvalError:
xpath_value = '/f:alerts/f:alert/f:explanation/f:malware-detected/f:malware/f:profile'
profile_entries = self.response_object.xpath(xpath_value, namespaces=self.xml_namespaces)
except:
profile_entries = []
profile_values = [profile.text for profile in profile_entries]
return profile_values
def find_malware_section(self):
if self.response_type == 'json':
return self._findMalwareSectionJSON()
elif self.response_type == 'xml':
return self._findMalwareSectionXML()
else:
raise Exception('Invalid response type for find malware section method')
def _findMalwareSectionJSON(self):
malware_sections = []
alerts = self._lookForListOrDict(self.response_object, 'alert')
for alert in alerts:
explanations = self._lookForListOrDict(alert, 'explanation')
for explanation in explanations:
malwareDetections = self._lookForListOrDict(explanation,
'malware-detected')
for malwareDetected in malwareDetections:
malware_sections.extend(self._lookForListOrDict(malwareDetected,
'malware'))
return malware_sections
def _findMalwareSectionXML(self):
try:
xpath_value = '/notification/malware/analysis'
malware_entries = self.response_object.xpath(xpath_value)
except lxml.etree.XPathEvalError:
xpath_value = '/f:alerts/f:alert/f:explanation/f:malware-detected/f:malware'
malware_entries = self.response_object.xpath(xpath_value, namespaces=self.xml_namespaces)
except:
malware_entries = []
malware_sections = [section.text for section in malware_entries]
return malware_sections
def _lookForListOrDict(self, subDict, targetName):
returnData = []
if targetName in subDict and subDict[targetName]:
if isinstance(subDict[targetName], dict):
returnData = [subDict[targetName]]
else:
returnData = subDict[targetName]
return returnData
def _lookForKeyInJsonList(self, targetList, targetKey):
returnData = []
for entry in targetList:
if (targetKey in entry) and (entry[targetKey] != ""):
returnData.append(entry[targetKey])
return returnData
def format_JSON(self):
formattedObject = {}
# Traversal logic:
# http://nvie.com/posts/modifying-deeply-nested-structures/
def traverse(obj):
if isinstance(obj, dict):
return {k: traverse(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [traverse(elem) for elem in | |
<filename>peripheral/tcc_u2213/config/tcc.py<gh_stars>0
# coding: utf-8
"""*****************************************************************************
* Copyright (C) 2018 Microchip Technology Inc. and its subsidiaries.
*
* Subject to your compliance with these terms, you may use Microchip software
* and any derivatives exclusively with Microchip products. It is your
* responsibility to comply with third party license terms applicable to your
* use of third party software (including open source software) that may
* accompany Microchip software.
*
* THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER
* EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED
* WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A
* PARTICULAR PURPOSE.
*
* IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
* INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND
* WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS
* BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE
* FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN
* ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
* THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
*****************************************************************************"""
global InterruptVector
InterruptVector = []
global InterruptHandler
InterruptHandler = []
global InterruptHandlerLock
InterruptHandlerLock = []
global InterruptVectorUpdate
InterruptVectorUpdate = []
global tccInstanceName
global intPrev
intPrev = 0
global numOfChannels
tccSym_Channel_Menu = []
tccSym_Channel_CC = []
tccSym_Channel_Polarity = []
tccSym_Channel_WAVE_SWAP = []
tccSym_Channel_WEXCTRL_DTIEN = []
tccSym_Channel_INTENSET_MC = []
tccSym_Channel_EVCTRL_MCEO = []
tccSym_DRVCTRL_NRE_NRV = []
tccSym_PATT_PGE = []
tccSym_PATT_PGV = []
###################################################################################################
########################################## Callbacks #############################################
###################################################################################################
def tccEvsys(symbol, event):
if(event["id"] == "TCC_EVCTRL_OVFEO"):
Database.setSymbolValue("evsys", "GENERATOR_"+str(tccInstanceName.getValue())+"_OVF_ACTIVE", event["value"], 2)
if(event["id"] == "TCC_EVCTRL_EVACT"):
Database.setSymbolValue("evsys", "USER_"+str(tccInstanceName.getValue())+"_EV_0_READY", False, 2)
Database.setSymbolValue("evsys", "USER_"+str(tccInstanceName.getValue())+"_EV_1_READY", False, 2)
if (event["value"] == "Event 0 Rising Edge" or event["value"] == "Event 0 Falling Edge"):
Database.setSymbolValue("evsys", "USER_"+str(tccInstanceName.getValue())+"_EV_0_READY", True, 2)
elif (event["value"] == "Event 1 Rising Edge" or event["value"] == "Event 1 Falling Edge"):
Database.setSymbolValue("evsys", "USER_"+str(tccInstanceName.getValue())+"_EV_1_READY", True, 2)
if("EVCTRL_MC" in event["id"]):
mcInstance = event["id"].split("_")[2:]
event_name = "_" + "_".join(mcInstance)
print(event_name)
Database.setSymbolValue("evsys", "GENERATOR_"+str(tccInstanceName.getValue())+ str(event_name)+"_ACTIVE", event["value"], 2)
def updateTCCInterruptStatus(symbol, event):
component = symbol.getComponent()
# For single interrupt line for peripheral
if len(InterruptVector) == 1:
if (component.getSymbolValue("TCC_INTENSET_OVF") or component.getSymbolValue("TCC_INTENSET_FAULT0") or
component.getSymbolValue("TCC_INTENSET_FAULT1")):
Database.setSymbolValue("core", InterruptVector[0], True, 2)
Database.setSymbolValue("core", InterruptHandlerLock[0], True, 2)
Database.setSymbolValue("core", InterruptHandler[0], tccInstanceName.getValue() + "_PWMInterruptHandler", 2)
else:
Database.setSymbolValue("core", InterruptVector[0], False, 2)
Database.setSymbolValue("core", InterruptHandlerLock[0], False, 2)
Database.setSymbolValue("core", InterruptHandler[0], tccInstanceName.getValue() + "_Handler", 2)
# For multiple interrupt lines for peripheral
else:
if (event["id"] == "TCC_INTENSET_OVF") or (event["id"] == "TCC_INTENSET_FAULT0") or (event["id"] == "TCC_INTENSET_FAULT1"):
if (component.getSymbolValue("TCC_INTENSET_OVF") or component.getSymbolValue("TCC_INTENSET_FAULT0") or
component.getSymbolValue("TCC_INTENSET_FAULT1")):
Database.setSymbolValue("core", InterruptVector[0], True, 2)
Database.setSymbolValue("core", InterruptHandlerLock[0], True, 2)
Database.setSymbolValue("core", InterruptHandler[0], tccInstanceName.getValue() + "_PWMInterruptHandler", 2)
else:
Database.setSymbolValue("core", InterruptVector[0], False, 2)
Database.setSymbolValue("core", InterruptHandlerLock[0], False, 2)
Database.setSymbolValue("core", InterruptHandler[0], tccInstanceName.getValue() + "_Handler", 2)
else:
mcInstance = int(event["id"].split("_")[-1])
Database.setSymbolValue("core", InterruptVector[mcInstance+1], event["value"], 2)
Database.setSymbolValue("core", InterruptHandlerLock[mcInstance+1], event["value"], 2)
if event["value"] == True:
Database.setSymbolValue("core", InterruptHandler[mcInstance+1], InterruptHandler[mcInstance+1].split("_INTERRUPT_HANDLER")[0] + "_InterruptHandler", 2)
else:
Database.setSymbolValue("core", InterruptHandler[mcInstance+1], InterruptHandler[mcInstance+1].split("_INTERRUPT_HANDLER")[0] + "_Handler", 2)
def updateTCCInterruptWarningStatus(symbol, event):
global InterruptVectorUpdate
global numOfChannels
isVisible = False
component = symbol.getComponent()
if (component.getSymbolValue("TCC_INTENSET_OVF") or component.getSymbolValue("TCC_INTENSET_FAULT0") or
component.getSymbolValue("TCC_INTENSET_FAULT1")):
if (Database.getSymbolValue("core", InterruptVectorUpdate[0].split(".")[-1]) == True):
isVisible = True
else:
for i in range(0, numOfChannels):
if (component.getSymbolValue("TCC_INTENSET_MC_"+str(i)) == True):
if (Database.getSymbolValue("core", InterruptVectorUpdate[1].split(".")[-1]) == True):
isVisible = True
if (isVisible == True):
symbol.setVisible(True)
else:
symbol.setVisible(False)
def updateTCCClockWarningStatus(symbol, event):
if event["value"] == False:
symbol.setVisible(True)
else:
symbol.setVisible(False)
def tccDirVisible(symbol, event):
if (event["id"] == "TCC_WAVE_WAVEGEN" and tccSym_Slave_Mode.getValue() == False):
symObj = event["symbol"]
if (symObj.getSelectedKey() == "NPWM"):
symbol.setVisible(True)
else:
symbol.setVisible(False)
elif (event["id"] == "TCC_SLAVE_MODE"):
symbol.setVisible(not event["value"])
def tccFaultVisible(symbol, event):
if(event["value"] == "Disabled"):
symbol.setVisible(False)
else:
symbol.setVisible(True)
def tccDeadTimeVisible(symbol, event):
if (tccSym_Channel_WEXCTRL_DTIEN[0].getValue() == True or tccSym_Channel_WEXCTRL_DTIEN[1].getValue() == True or
tccSym_Channel_WEXCTRL_DTIEN[2].getValue() == True or tccSym_Channel_WEXCTRL_DTIEN[3].getValue() == True):
symbol.setVisible(True)
else:
symbol.setVisible(False)
def tccPattgenVisible(symbol, event):
if(event["value"] == True):
symbol.setVisible(True)
else:
symbol.setVisible(False)
def tccPWMFreqCalc(symbol, event):
if (tccSym_Slave_Mode.getValue() == False):
clock_freq = Database.getSymbolValue("core", tccInstanceName.getValue()+"_CLOCK_FREQUENCY")
if clock_freq == 0:
clock_freq = 1
prescaler = int(tccSym_CTRLA_PRESCALER.getSelectedKey()[3:])
period = tccSym_PER_PER.getValue() + 1
if (tccSym_WAVE_WAVEGEN.getValue() == 0): #single slope PWM
slope = 1
else:
slope = 2
frequency = ((clock_freq / prescaler) / period) / slope
symbol.setLabel("**** PWM Frequency is " +str(frequency)+ " Hz ****")
symbol.setVisible(True)
elif (event["id"] == "TCC_SLAVE_MODE"):
symbol.setVisible(not event["value"])
def tccDeadTimeCalc(symbol, event):
clock_freq = Database.getSymbolValue("core", tccInstanceName.getValue()+"_CLOCK_FREQUENCY")
if clock_freq == 0:
clock_freq = 1
if (symbol.getID() == "TCC_DTLS_COMMENT"):
dead_time = (tccSym_WEXCTRL_DTLS.getValue() * 1000000.0 / (clock_freq))
symbol.setLabel("**** Low side dead time is "+str(dead_time)+ " uS ****")
if (symbol.getID() == "TCC_DTHS_COMMENT"):
dead_time = (tccSym_WEXCTRL_DTHS.getValue() * 1000000.0 / (clock_freq))
symbol.setLabel("**** High side dead time is "+str(dead_time)+ " uS ****")
def tccSlaveCommentVisible(symbol, event):
symbol.setVisible(event["value"])
def tccFault0IntVisible(symbol, event):
if ("Event 0" in event["value"]):
symbol.setVisible(True)
else:
symbol.setVisible(False)
def tccFault1IntVisible(symbol, event):
if ("Event 1" in event["value"]):
symbol.setVisible(True)
else:
symbol.setVisible(False)
def tccSlaveModeVisibility(symbol, event):
print(not event["value"])
symbol.setVisible(not event["value"])
###################################################################################################
########################################## Component #############################################
###################################################################################################
def instantiateComponent(tccComponent):
global InterruptVector
global InterruptHandler
global InterruptHandlerLock
global tccInstanceName
global tccSym_INTENSET_OVF
global InterruptVectorUpdate
eventDepList = []
interruptDepList = []
tccInstanceName = tccComponent.createStringSymbol("TCC_INSTANCE_NAME", None)
tccInstanceName.setVisible(False)
tccInstanceName.setDefaultValue(tccComponent.getID().upper())
#clock enable
Database.setSymbolValue("core", tccInstanceName.getValue() + "_CLOCK_ENABLE", True, 2)
################################ ATDF ####################################################
node = ATDF.getNode("/avr-tools-device-file/devices/device/peripherals/module@[name=\"TCC\"]/instance@[name=\""+tccInstanceName.getValue()+"\"]/parameters")
global numOfChannels
numOfChannels = 4
deadTimeImplemented = 1
swapImplemented = 1
outputMatrixImplemented = 1
patternGenImplemented = 1
numOfOutputs = 8
size = 24
parameters = []
parameters = node.getChildren()
for param in range (0, len(parameters)):
if(parameters[param].getAttribute("name") == "CC_NUM"):
numOfChannels = int(parameters[param].getAttribute("value"))
if(parameters[param].getAttribute("name") == "DTI"):
deadTimeImplemented = int(parameters[param].getAttribute("value"))
if(parameters[param].getAttribute("name") == "SWAP"):
swapImplemented = int(parameters[param].getAttribute("value"))
if(parameters[param].getAttribute("name") == "OTMX"):
outputMatrixImplemented = int(parameters[param].getAttribute("value"))
if(parameters[param].getAttribute("name") == "OW_NUM"):
numOfOutputs = int(parameters[param].getAttribute("value"))
if(parameters[param].getAttribute("name") == "PG"):
patternGenImplemented = int(parameters[param].getAttribute("value"))
if(parameters[param].getAttribute("name") == "SIZE"):
size = int(parameters[param].getAttribute("value"))
tccSym_NUM_CHANNELS = tccComponent.createIntegerSymbol("TCC_NUM_CHANNELS", None)
tccSym_NUM_CHANNELS.setDefaultValue(int(numOfChannels))
tccSym_NUM_CHANNELS.setVisible(False)
tccSym_NUM_OUTPUTS = tccComponent.createIntegerSymbol("TCC_NUM_OUTPUTS", None)
tccSym_NUM_OUTPUTS.setDefaultValue(int(numOfOutputs))
tccSym_NUM_OUTPUTS.setVisible(False)
tccSym_Is_DeadTime = tccComponent.createIntegerSymbol("TCC_IS_DEAD_TIME", None)
tccSym_Is_DeadTime.setDefaultValue(int(deadTimeImplemented))
tccSym_Is_DeadTime.setVisible(False)
tccSym_Is_Swap = tccComponent.createIntegerSymbol("TCC_IS_SWAP", None)
tccSym_Is_Swap.setDefaultValue(int(swapImplemented))
tccSym_Is_Swap.setVisible(False)
tccSym_Is_OTM = tccComponent.createIntegerSymbol("TCC_IS_OTM", None)
tccSym_Is_OTM.setDefaultValue(int(outputMatrixImplemented))
tccSym_Is_OTM.setVisible(False)
tccSym_Is_PG = tccComponent.createIntegerSymbol("TCC_IS_PG", None)
tccSym_Is_PG.setDefaultValue(int(patternGenImplemented))
tccSym_Is_PG.setVisible(False)
tccSym_SIZE = tccComponent.createIntegerSymbol("TCC_SIZE", None)
tccSym_SIZE.setDefaultValue(int(size))
tccSym_SIZE.setVisible(False)
tccSym_MCU_FAMILY = tccComponent.createStringSymbol("TCC_MCU_FAMILY", None)
tccSym_MCU_FAMILY.setVisible(False)
node = ATDF.getNode("/avr-tools-device-file/devices")
series = node.getChildren()[0].getAttribute("family")
tccSym_MCU_FAMILY.setDefaultValue(node.getChildren()[0].getAttribute("family"))
node = ATDF.getNode("/avr-tools-device-file/modules/module@[name=\"TCC\"]/register-group@[name=\"TCC\"]")
register_names = []
register_names = node.getChildren()
tccSym_CBUF_REG_NAME = tccComponent.createStringSymbol("TCC_CBUF_REG_NAME", None)
tccSym_CBUF_REG_NAME.setVisible(False)
for index in range(0, len(register_names)):
if "CCBUF" in register_names[index].getAttribute("name"):
tccSym_CBUF_REG_NAME.setDefaultValue("CCBUF")
break
else:
tccSym_CBUF_REG_NAME.setDefaultValue("CCB")
tccSym_PBUF_REG_NAME = tccComponent.createStringSymbol("TCC_PBUF_REG_NAME", None)
tccSym_PBUF_REG_NAME.setVisible(False)
for index in range(0, len(register_names)):
if "PERBUF" in register_names[index].getAttribute("name"):
tccSym_PBUF_REG_NAME.setDefaultValue("PERBUF")
break
else:
tccSym_PBUF_REG_NAME.setDefaultValue("PERB")
tccSym_PATBUF_REG_NAME = tccComponent.createStringSymbol("TCC_PATBUF_REG_NAME", None)
tccSym_PATBUF_REG_NAME.setVisible(False)
for index in range(0, len(register_names)):
if "PATTBUF" in register_names[index].getAttribute("name"):
tccSym_PATBUF_REG_NAME.setDefaultValue("PATTBUF")
break
else:
tccSym_PATBUF_REG_NAME.setDefaultValue("PATTB")
# master slave mode
tccInstanceMasterValue = 0
tccInstanceMasterNode = ATDF.getNode("/avr-tools-device-file/devices/device/peripherals/module@[name=\"TCC\"]/instance@[name=\""+tccInstanceName.getValue()+"\"]/parameters/param@[name=\"MASTER_SLAVE_MODE\"]")
if (tccInstanceMasterNode != None):
tccInstanceMasterValue = int(tccInstanceMasterNode.getAttribute("value"))
if (tccInstanceMasterValue == 2): #slave
activeComponentList = Database.getActiveComponentIDs()
temp = int(tccInstanceName.getValue().split("TCC")[1])
masterComponentID = "TCC" + str(temp - 1)
global tccSym_Slave_Mode
tccSym_Slave_Mode = tccComponent.createBooleanSymbol("TCC_SLAVE_MODE", None)
tccSym_Slave_Mode.setLabel("Enable Slave")
tccSym_Slave_Mode.setDefaultValue(False)
if ((tccInstanceMasterValue == 2)):
tccSym_Slave_Mode.setVisible(True)
else:
tccSym_Slave_Mode.setVisible(False)
if (tccInstanceMasterValue == 2): #slave
tccSym_Slave_Mode_Comment = tccComponent.createCommentSymbol("TCC_SLAVE_MODE_COMMENT", None)
tccSym_Slave_Mode_Comment.setLabel("**** Ensure Master - " + str(masterComponentID) + " is in active components ****")
tccSym_Slave_Mode_Comment.setVisible(False)
tccSym_Slave_Mode_Comment.setDependencies(tccSlaveCommentVisible, ["TCC_SLAVE_MODE"])
###########################################################################################
#prescaler configuration
global tccSym_CTRLA_PRESCALER
tccSym_CTRLA_PRESCALER = tccComponent.createKeyValueSetSymbol("TCC_CTRLA_PRESCALER", None)
tccSym_CTRLA_PRESCALER.setLabel("Select Prescaler")
tccSym_CTRLA_PRESCALER.setDefaultValue(0)
tccSym_CTRLA_PRESCALER.setOutputMode("Key")
tccSym_CTRLA_PRESCALER.setDisplayMode("Description")
node = ATDF.getNode("/avr-tools-device-file/modules/module@[name=\"TCC\"]/value-group@[name=\"TCC_CTRLA__PRESCALER\"]")
values = []
values = node.getChildren()
for index in range(0, len(values)):
tccSym_CTRLA_PRESCALER.addKey(values[index].getAttribute("name"), values[index].getAttribute("value"),
values[index].getAttribute("caption"))
tccSym_CTRLA_PRESCALER.setDependencies(tccSlaveModeVisibility, ["TCC_SLAVE_MODE"])
#waveform option
global tccSym_WAVE_WAVEGEN
tccSym_WAVE_WAVEGEN = tccComponent.createKeyValueSetSymbol("TCC_WAVE_WAVEGEN", None)
tccSym_WAVE_WAVEGEN.setLabel("Select PWM Type")
tccSym_WAVE_WAVEGEN.setDefaultValue(0)
tccSym_WAVE_WAVEGEN.setOutputMode("Key")
tccSym_WAVE_WAVEGEN.setDisplayMode("Description")
tccSym_WAVE_WAVEGEN.addKey("NPWM", "2", "Single slope PWM")
tccSym_WAVE_WAVEGEN.addKey("DSBOTTOM", "5", "Dual slope PWM with interrupt/event when counter = ZERO")
tccSym_WAVE_WAVEGEN.addKey("DSBOTH", "6", "Dual slope PWM with interrupt/event when counter = ZERO or counter = TOP")
tccSym_WAVE_WAVEGEN.addKey("DSTOP", "7", "Dual slope PWM with interrupt/event when counter = TOP")
tccSym_WAVE_WAVEGEN.setDependencies(tccSlaveModeVisibility, ["TCC_SLAVE_MODE"])
tccSym_CTRLBSET_DIR = tccComponent.createBooleanSymbol("TCC_CTRLBSET_DIR", None)
tccSym_CTRLBSET_DIR.setLabel("PWM Direction - Count Down")
tccSym_CTRLBSET_DIR.setDefaultValue(False)
tccSym_CTRLBSET_DIR.setDependencies(tccDirVisible, ["TCC_WAVE_WAVEGEN", "TCC_SLAVE_MODE"])
if (outputMatrixImplemented == 1):
tccSym_WEXCTRL_OTMX = tccComponent.createKeyValueSetSymbol("TCC_WEXCTRL_OTMX", None)
tccSym_WEXCTRL_OTMX.setLabel("Select Output Matrix")
tccSym_WEXCTRL_OTMX.setDefaultValue(0)
tccSym_WEXCTRL_OTMX.setOutputMode("Value")
tccSym_WEXCTRL_OTMX.setDisplayMode("Description")
tccSym_WEXCTRL_OTMX.addKey("OTMX_0", "0", "Default Channel Outputs")
tccSym_WEXCTRL_OTMX.addKey("OTMX_1", "1", "Modulo Half Number of Channel Outputs")
tccSym_WEXCTRL_OTMX.addKey("OTMX_2", "2", "Only Channel 0 Outputs")
tccSym_WEXCTRL_OTMX.addKey("OTMX_3", "3", "Channel 0 + Remaining Channel 1 Outputs")
#Period Value
global tccSym_PER_PER
tccSym_PER_PER = tccComponent.createIntegerSymbol("TCC_PER_PER", None)
tccSym_PER_PER.setLabel("Period Value")
tccSym_PER_PER.setDefaultValue(2399)
tccSym_PER_PER.setMin(0)
tccSym_PER_PER.setMax(pow(2, size) - 1)
tccSym_PER_PER.setDependencies(tccSlaveModeVisibility, ["TCC_SLAVE_MODE"])
clock_freq = Database.getSymbolValue("core", tccInstanceName.getValue()+"_CLOCK_FREQUENCY")
if clock_freq == 0:
clock_freq = 1
prescaler = int(tccSym_CTRLA_PRESCALER.getSelectedKey()[3:])
period = tccSym_PER_PER.getValue() + 1
if (tccSym_WAVE_WAVEGEN.getValue() == 0):
slope = 1
else:
slope = 2
frequency = ((clock_freq / prescaler) / period) / slope
#Calculated frequency
tccSym_Frequency = tccComponent.createCommentSymbol("TCC_FREQUENCY", None)
tccSym_Frequency.setLabel("**** PWM Frequency is "+str(frequency)+" Hz ****")
tccSym_Frequency.setDependencies(tccPWMFreqCalc, ["core."+tccInstanceName.getValue()+"_CLOCK_FREQUENCY", "TCC_PER_PER", "TCC_WAVE_WAVEGEN", "TCC_CTRLA_PRESCALER", "TCC_SLAVE_MODE"])
#Period interrupt
tccSym_INTENSET_OVF = tccComponent.createBooleanSymbol("TCC_INTENSET_OVF", None)
tccSym_INTENSET_OVF.setLabel("Enable Period Interrupt")
tccSym_INTENSET_OVF.setDefaultValue(False)
interruptDepList.append("TCC_INTENSET_OVF")
#Period out event
tccSym_EVCTRL_OVFEO = tccComponent.createBooleanSymbol("TCC_EVCTRL_OVFEO", None)
tccSym_EVCTRL_OVFEO.setLabel("Enable Period Event Out")
tccSym_EVCTRL_OVFEO.setDefaultValue(False)
eventDepList.append("TCC_EVCTRL_OVFEO")
tccSym_MainChannel_Menu = tccComponent.createMenuSymbol("TCC_CHANNEL", None)
tccSym_MainChannel_Menu.setLabel("Channel Configurations")
for channelID in range(0, int(numOfChannels)):
#channel menu
tccSym_Channel_Menu.append(channelID)
tccSym_Channel_Menu[channelID] = tccComponent.createMenuSymbol("TCC_CHANNEL"+str(channelID), tccSym_MainChannel_Menu)
tccSym_Channel_Menu[channelID].setLabel("Channel "+str(channelID))
#Duty
tccSym_Channel_CC.append(channelID)
tccSym_Channel_CC[channelID] = tccComponent.createIntegerSymbol("TCC_"+str(channelID)+"_CC", tccSym_Channel_Menu[channelID])
tccSym_Channel_CC[channelID].setLabel("Duty Value")
tccSym_Channel_CC[channelID].setMin(0)
tccSym_Channel_CC[channelID].setMax(pow(2, | |
<filename>list6/task1.py
""" Filtry dolno i górnoprzepustowe """
from sys import argv
class EliasGamma:
def encode(self, number):
code = bin(number)[2:]
code = "0" * (len(code) - 1) + code
return code
def decode(self, code):
codes = []
counter = 0
idx = 0
while idx < len(code):
if code[idx] == "0":
counter += 1
idx += 1
else:
codes.append(int(code[idx : idx + counter + 1], base=2))
idx += counter + 1
counter = 0
return codes
class Pixel:
def __init__(self, red, green, blue):
self.red = red
self.green = green
self.blue = blue
def __add__(self, other):
return Pixel(
self.red + other.red, self.green + other.green, self.blue + other.blue
)
def __sub__(self, other):
return Pixel(
self.red - other.red, self.green - other.green, self.blue - other.blue
)
def __mul__(self, number):
return Pixel(self.red * number, self.green * number, self.blue * number)
def __div__(self, number):
return Pixel(self.red / number, self.green / number, self.blue / number)
def __floordiv__(self, number):
return Pixel(self.red // number, self.green // number, self.blue // number)
def __mod__(self, number):
return Pixel(self.red % number, self.green % number, self.blue % number)
def uniform_quantization(self, step):
r = int(self.red // step * step)
g = int(self.green // step * step)
b = int(self.blue // step * step)
return Pixel(r, g, b)
class Bitmap:
def __init__(self, bitmap, width, height):
self.width = width
self.height = height
result = []
row = []
for i in range(width * height):
row.append(
Pixel(
blue=bitmap[i * 3], green=bitmap[i * 3 + 1], red=bitmap[i * 3 + 2]
)
)
if width == len(row):
result.insert(0, row)
row = []
self.bitmap = result
def __getitem__(self, pos):
x, y = pos
ret_x, ret_y = x, y
if x < 0:
ret_x = 0
elif x >= self.width:
ret_x = self.width - 1
if y < 0:
ret_y = 0
elif y >= self.height:
ret_y = self.height - 1
return self.bitmap[ret_y][ret_x]
def parse_bitmap(bitmap, width, height):
result = []
row = []
for i in range(width * height):
row.append(
Pixel(blue=bitmap[i * 3], green=bitmap[i * 3 + 1], red=bitmap[i * 3 + 2])
)
if width == len(row):
result.insert(0, row)
row = []
return result
def filters(bitmap, x, y, high=False):
weights_low = [[1, 1, 1], [1, 1, 1], [1, 1, 1]]
wegihts_high = [[0, -1, 0], [-1, 5, -1], [0, -1, 0]]
weights = wegihts_high if high else weights_low
pix = Pixel(0, 0, 0)
for i in range(-1, 2):
for j in range(-1, 2):
pix += bitmap[x + i, y + j] * weights[i + 1][j + 1]
weights_sum = sum([sum(row) for row in weights])
if weights_sum <= 0:
weights_sum = 1
pix = pix // weights_sum
pix.red = 0 if pix.red < 0 else pix.red
pix.green = 0 if pix.green < 0 else pix.green
pix.blue = 0 if pix.blue < 0 else pix.blue
pix.red = 255 if pix.red > 255 else pix.red
pix.green = 255 if pix.green > 255 else pix.green
pix.blue = 255 if pix.blue > 255 else pix.blue
return pix
def bitmap_to_array(bitmap):
""" returns payload array for TGA file """
payload = []
for pixel in bitmap:
payload += [pixel.blue, pixel.green, pixel.red]
return payload
def bitmap_to_bytes(bitmap):
payload = []
for pixel in bitmap:
payload += [pixel.blue, pixel.green, pixel.red]
return bytes(payload)
def differential_coding(bitmap):
a = bitmap[0]
result = [a]
for pixel in bitmap[1:]:
a = pixel - a
result.append(a)
a = pixel
return result
def differential_decoding(diffs):
a = diffs[0]
result = [a]
for x in diffs[1:]:
a = a + x
result.append(a)
return result
def quantify_uniform(bitmap, k):
step = 256 // (2 ** k)
return [pixel.uniform_quantization(step) for pixel in bitmap]
def quantify_nonuniform(bitmap, k):
reds = []
greens = []
blues = []
for pixel in bitmap:
reds.append(pixel.red)
greens.append(pixel.green)
blues.append(pixel.blue)
red_codebook = nonuniform_quantization(reds, k)
green_codebook = nonuniform_quantization(greens, k)
blues_codebook = nonuniform_quantization(blues, k)
new_reds = [red_codebook[elem] for elem in reds]
new_greens = [green_codebook[elem] for elem in greens]
new_blues = [blues_codebook[elem] for elem in blues]
quantified_bitmap = []
for red, green, blue in zip(new_reds, new_greens, new_blues):
quantified_bitmap.append(Pixel(red, green, blue))
return quantified_bitmap
def nonuniform_quantization(pixels, k):
n = 2 ** k
d = {i: 0 for i in range(256)}
for p in pixels:
d[p] += 1
intervals = {(i, i + 1): d[i] + d[i + 1] for i in d if i % 2 == 0}
while len(intervals) > n:
intervals_list = list(intervals)
min_interval = min(intervals)
i = intervals_list.index(min_interval)
if i == 0:
to_join = intervals_list[1]
elif k == len(intervals_list) - 1:
to_join = intervals_list[-2]
else:
to_join = (
intervals_list[k - 1]
if intervals[intervals_list[k - 1]] < intervals[intervals_list[k + 1]]
else intervals_list[k + 1]
)
new_interval = (
(min_interval[0], to_join[1])
if to_join[0] > min_interval[0]
else (to_join[0], min_interval[1])
)
intervals[new_interval] = intervals[min_interval] + intervals[to_join]
del intervals[min_interval]
del intervals[to_join]
intervals = dict(sorted(intervals.items()))
values = [(interval[0] + interval[1]) // 2 for interval in intervals]
codebook = {}
j = 0
for i in range(256):
if j + 1 < n and abs(values[j + 1] - i) <= abs(values[j] - i):
j += 1
codebook[i] = values[j]
return codebook
def encode(bitmap, k):
filtered_low = [
filters(bitmap, x, y)
for y in reversed(range(bitmap.height))
for x in range(bitmap.width)
]
filtered_high = [
filters(bitmap, x, y, True)
for y in reversed(range(bitmap.height))
for x in range(bitmap.width)
]
low = differential_coding(filtered_low)
byte_array = bitmap_to_array(low)
# map all values to positive numbers for Elias coding
byte_array = [2 * x if x > 0 else abs(x) * 2 + 1 for x in byte_array]
bitstring = "".join([EliasGamma().encode(x) for x in byte_array])
# pad bitstring with zeros
if len(bitstring) % 8 != 0:
bitstring += "0" * (8 - (len(bitstring) % 8))
b = bytes(int(bitstring[i : i + 8], 2) for i in range(0, len(bitstring), 8))
# now stuff for filtered_high
# quantified = quantify_uniform(filtered_high, k)
quantified = quantify_nonuniform(filtered_high, k)
quantified_bytes = bytes(bitmap_to_array(quantified))
# tests stuff
# flatten the bitmap
bitmap = [
bitmap[x, y]
for y in reversed(range(bitmap.height))
for x in range(bitmap.width)
]
l_mse, l_mse_r, l_mse_g, l_mse_b, l_snr = tests(bitmap, filtered_low)
h_mse, h_mse_r, h_mse_g, h_mse_b, h_snr = tests(bitmap, quantified)
print("Low MSE:", l_mse)
print("Low MSE (red):", l_mse_r)
print("Low MSE (green):", l_mse_g)
print("Low MSE (blue):", l_mse_b)
print("Low SNR:", l_snr)
print("High MSE:", h_mse)
print("High MSE (red):", h_mse_r)
print("High MSE (green):", h_mse_g)
print("High MSE (blue):", h_mse_b)
print("High SNR:", h_snr)
return filtered_low, b, filtered_high, quantified_bytes
def decode(payload_low):
hexstring = payload_low.hex()
bitstring = "".join(
[
"{0:08b}".format(int(hexstring[x : x + 2], base=16))
for x in range(0, len(hexstring), 2)
]
)
codes = EliasGamma().decode(bitstring)
diffs = [x // 2 if x % 2 == 0 else -(x // 2) for x in codes]
bitmap = [
Pixel(int(diffs[i + 2]), int(diffs[i + 1]), int(diffs[i]))
for i in range(0, len(diffs), 3)
]
bitmap = differential_decoding(bitmap)
bitmap = bitmap_to_array(bitmap)
return bytes(bitmap)
def d(a, b):
return (a - b) ** 2
def mse(original, new):
return (1 / len(original)) * sum([d(a, b) for a, b in zip(original, new)])
def snr(x, mserr):
return ((1 / len(x)) * sum([d(i, 0) for i in x])) / mserr
def tests(original, new):
original_array = []
for pixel in original:
original_array += [pixel.blue, pixel.green, pixel.red]
original_red = [pixel.red for pixel in original]
original_green = [pixel.green for pixel in original]
original_blue = [pixel.blue for pixel in original]
new_array = []
for pixel in new:
new_array += [pixel.blue, pixel.green, pixel.red]
new_red = [pixel.red for pixel in new]
new_green = [pixel.green for pixel in new]
new_blue = [pixel.blue for pixel in new]
mserr = mse(original_array, new_array)
mserr_red = mse(original_red, new_red)
mserr_green = mse(original_green, new_green)
mserr_blue = mse(original_blue, new_blue)
snratio = snr(original_array, mserr)
return mserr, mserr_red, mserr_green, mserr_blue, snratio
def main():
if len(argv) >= 3:
with open(argv[1], "rb") as f:
tga = f.read()
header = tga[:18]
footer = tga[len(tga) - 26 :]
width = tga[13] * 256 + tga[12]
height = tga[15] * 256 + tga[14]
if argv[2] == "--encode":
bitmap = Bitmap(tga[18 : len(tga) - 26], width, height)
if len(argv) == 4:
k = int(argv[3])
else:
k = 2
filtered_low, b, filtered_high, quantified = encode(bitmap, k)
filtered_low = bytes(bitmap_to_array(filtered_low))
filtered_high = bytes(bitmap_to_array(filtered_high))
with open("output_low.tga", "wb") as f:
| |
import discord
import sys
import random
import hashlib
import time
import requests
import asyncio
import math
import re
from typing import NamedTuple
from discord.ext import commands
TOKEN, KEY, SECRET, GROUP_LINK = None, None, None, None
def setup(bot):
print('Admin_extension se loada.')
with open("data_bases/secret.bot") as fp:
global TOKEN, KEY, SECRET, GROUP_LINK
TOKEN = fp.readline().replace('\n','')
KEY = fp.readline().replace('\n','')
SECRET = fp.readline().replace('\n','')
GROUP_LINK = fp.readline().replace('\n', '')
bot.add_cog(Admin_cog(bot))
def teardown(bot):
print('Admin_extension se unloada.')
bot.remove_cog('Admin')
def normalize(x):
y = ""
x = x.lower()
for c in x:
if c=='č':
y+='c'
elif c=='ć':
y+='c'
elif c=='ž':
y+='z'
elif c=='š':
y+='s'
elif c=='đ':
y+='d'
elif ord(c)>=ord('a') and ord(c)<=ord('z'):
y+=c
return y
class Zadatak (NamedTuple):
ime: str = "" ### ime zadatka
tezina: float = "" ### procijena tezine
natjecanje: str = "" ### ime natjecanja
izvor: str = "" ### link na zadatak
tags: str = "" ### tagovi
ans: str = "None" ### link na tutorial
korisnici = None
zadatci = None
contests = None
botlocal = None
async def IsBotAdmin (ctx):
try:
role = discord.utils.get(ctx.guild.roles, name="BotAdmin")
if not(role in ctx.author.roles):
await ctx.send("Sorry, nisi moj šef! :stuck_out_tongue_winking_eye: \nČini se da nemaš potrebne ovlasti da učiniš ovo...")
return 0;
else: return 1;
except:
return 0;
async def UPDATE_FILE():
global korisnici
f = open("data_bases/korisnici.bot", "w")
for user in korisnici:
linija = user.ime + '&' + user.did + '&' + user.CF + '&' + user.CSES + '&' + user.AT + '&'
for i in user.solve:
linija+=str(i)
f.write(linija + '\n')
f.close()
botlocal.korisnici = korisnici
return;
async def UPDATE_ZADATCI():
global zadatci, contests
f = open("data_bases/zadatci.bot", "w")
linija = '#'.join(map(str, contests))
f.write(linija + '\n')
for zad in zadatci:
linija = zad.ime + '#' + str(zad.tezina) + '#' + zad.natjecanje + '#' + zad.izvor + '#' + zad.tags + '#' + zad.ans
f.write(linija + '\n')
f.close()
botlocal.contests = contests
botlocal.zadatci = zadatci
return;
async def try_to_get (link):
for xakfme in range(10):
ReQuEsT = requests.get(link)
if ReQuEsT.status_code==200:
return ReQuEsT.json()
else: await asyncio.sleep(1)
ReQuEsT = {'status':'FAIL'}
return ReQuEsT
########################### Admin naredbe
class Admin_cog(commands.Cog, name="Admin", description="Služi uglavnom za administriranje bazama podataka"):
def __init__(self, bot):
global botlocal
self.bot = botlocal = bot
global korisnici, zadatci, contests
korisnici = bot.korisnici
zadatci = bot.zadatci
contests = bot.contests
@commands.command(name='problem_info', brief='Vraća sve informacije o zadatku po njegovom izvoru!', help='Vraća sve informacije o zadatku po njegovom izvoru!\nJako koristan jer među vraćenim podatcima vraća i indeks zadatka u bazi podataka\nNpr. ;problem_info 260626/A')
async def problem_info(self, ctx, idx):
global zadatci
br_find = 0
for i in range(len(zadatci)):
zad = zadatci[i]
if idx == zad.izvor:
info = '"' + zad.ime + '" '
info += '"' + str(zad.tezina) + '" '
info += '"' + zad.natjecanje + '" '
info += '"' + zad.izvor + '" '
info += '"' + zad.tags + '" '
info += '"' + zad.ans + '"'
info += "\nIndeks ovog zadatka u bazi je " + str(i)
br_find+=1
await ctx.send(info)
if br_find==0:
await ctx.send("Nema nađenih rezultata...")
@commands.command(name='problem_edit', brief="Mjenja podatke zadatka u bazi", help='Nepovratno mjenja podatke zadatka u bazi, potrebno je znati indeks zadatka u bazi (prvi argument)- koristi problem_info ili findGLV\n\nime je ime zadatka\ntezina je procijena težine na ljestvici od 0 do 10 (npr 0.37)\nnatjecanje je kolokvijalno ime natjecanja (npr. Test)\nizvor je jedinstveni kod u codeforces bazi i sastoji se od contest id + redno slovo (npr. 260626/A)\ntagove valjda odvajati zarezima bez razmaka (npr. "math,vikanje")\nans je link na editorijal - naravno da nije nužan argument ako ga nema; stavi "None" u tom slučaju')
async def problem_edit(self, ctx, idx, ime, tezina, natjecanje, izvor, tags, ans="None"):
if not (await IsBotAdmin(ctx)): return;
idx = int(idx)
zad = zadatci[idx]
info = '"' + zad.ime + '" '
info += '"' + str(zad.tezina) + '" '
info += '"' + zad.natjecanje + '" '
info += '"' + zad.izvor + '" '
info += '"' + zad.tags + '" '
info += '"' + zad.ans + '" '
await ctx.send("Stari info: " + info)
sendS = await ctx.send("Jesi li siguran da želiš promjeniti podatke zadatka?\nKao potvrdu, stavi 👍 ovu poruku!")
await sendS.add_reaction('👍' )
try:
reaction, _ = await self.bot.wait_for('reaction_add', check=lambda r, u: r.emoji=='👍' and r.message.id == sendS.id and u == ctx.author, timeout=60)
except asyncio.TimeoutError:
await sendS.remove_reaction('👍', self.bot.user)
return;
zadatci[idx] = zadatci[idx]._replace(ime=ime)
zadatci[idx] = zadatci[idx]._replace(tezina = float(tezina))
zadatci[idx] = zadatci[idx]._replace(natjecanje = natjecanje)
zadatci[idx] = zadatci[idx]._replace(izvor = izvor)
zadatci[idx] = zadatci[idx]._replace(tags = tags)
zadatci[idx] = zadatci[idx]._replace(ans = ans)
zad = zadatci[idx]
info = '"' + zad.ime + '" '
info += '"' + str(zad.tezina) + '" '
info += '"' + zad.natjecanje + '" '
info += '"' + zad.izvor + '" '
info += '"' + zad.tags + '" '
info += '"' + zad.ans + '" '
await UPDATE_ZADATCI()
await ctx.send("Editirano. \nNovi info: " + info)
@commands.command(name='problem_add', brief="Dodaje zadatak u bazu", help='Dodaje zadatak u bazu kroz sve potrebne parametre.\nime je ime zadatka\ntezina je procijena težine na ljestvici od 0 do 10 (npr 0.37)\nnatjecanje je kolokvijalno ime natjecanja (npr. Test)\nizvor je jedinstveni kod u codeforces bazi i sastoji se od contest id + redno slovo (npr. 260626/A)\ntagove valjda odvajati zarezima bez razmaka (npr. "math,vikanje")\nans je link na editorijal - naravno da nije nužan argument ako ga nema; stavi "None" u tom slučaju')
async def problem_add(self, ctx, ime, tezina, natjecanje, izvor, tags, ans="None"):
if not (await IsBotAdmin(ctx)): return;
info = '"' + ime + '" '
info += '"' + tezina + '" '
info += '"' + natjecanje + '" '
info += '"' + izvor + '" '
info += '"' + tags + '" '
info += '"' + ans + '" '
await ctx.send("Info: " + info)
sendS = await ctx.send("Jesi li siguran da želiš dodati zadatak?\nKao potvrdu, stavi 👍 ovu poruku!")
await sendS.add_reaction('👍' )
try:
reaction, _ = await self.bot.wait_for('reaction_add', check=lambda r, u: r.emoji=='👍' and r.message.id == sendS.id and u == ctx.author, timeout=60)
except asyncio.TimeoutError:
await sendS.remove_reaction('👍', self.bot.user)
return;
idx = len(zadatci)
zadatci.append(Zadatak())
zadatci[idx] = zadatci[idx]._replace(ime=ime)
zadatci[idx] = zadatci[idx]._replace(tezina = float(tezina))
zadatci[idx] = zadatci[idx]._replace(natjecanje = natjecanje)
zadatci[idx] = zadatci[idx]._replace(izvor = izvor)
zadatci[idx] = zadatci[idx]._replace(tags = tags)
zadatci[idx] = zadatci[idx]._replace(ans = ans)
zad = zadatci[idx]
info = '"' + zad.ime + '" '
info += '"' + str(zad.tezina) + '" '
info += '"' + zad.natjecanje + '" '
info += '"' + zad.izvor + '" '
info += '"' + zad.tags + '" '
info += '"' + zad.ans + '" '
await UPDATE_ZADATCI()
await ctx.send("Zadatak dodan. \nInfo: " + info)
@commands.command(name='problem_del', brief="Briše zadatak iz baze", help='Nepovratno briše zadatak iz baze\nPotreban je indeks zadatka u bazi - koristi ;problem_info')
async def problem_del(self, ctx, idx):
if not (await IsBotAdmin(ctx)): return;
idx = int(idx)
zad = zadatci[idx]
info = '"' + zad.ime + '" '
info += '"' + str(zad.tezina) + '" '
info += '"' + zad.natjecanje + '" '
info += '"' + zad.izvor + '" '
info += '"' + zad.tags + '" '
info += '"' + zad.ans + '" '
await ctx.send("Info: " + info)
sendS = await ctx.send("Jesi li siguran da želiš izbrisati zadatak?\nKao potvrdu, stavi 👍 ovu poruku!")
await sendS.add_reaction('👍' )
try:
reaction, _ = await self.bot.wait_for('reaction_add', check=lambda r, u: r.emoji=='👍' and r.message.id == sendS.id and u == ctx.author, timeout=60)
except asyncio.TimeoutError:
await sendS.remove_reaction('👍', self.bot.user)
return;
zadatci.remove(zadatci[idx])
await UPDATE_ZADATCI()
await ctx.send("Zadatak je uspješno izbrisan")
@commands.command(name='add_contest', brief="Povlači zadatke s codeforces natjecanja", help='Polu automatski povlači zadatke s nekog codeforces natjecanja\nArgument id je contest id na codeforcesu (šesteroznamenkasti broj u linku na contest)\nIme natjecanja je kolokvijalno ime natjecanja (npr. GLVoni 3.kolo)\nEditorial je link na editorial (na grupnom blogu), a ako još nije napisan stavi "None"')
async def add_contest (self, ctx, id, ime, editorial="None"):
global KEY, SECRET
if not (await IsBotAdmin(ctx)): return;
curtime = int(time.time())
myrand = random.randint(100000, 999999)
try_to_hash = str(myrand)+"/contest.standings?apiKey="+KEY+"&contestId="+str(id)+"&time="+str(curtime)+"#"+SECRET
myhash = hashlib.sha512(bytes(try_to_hash, 'utf-8')).hexdigest()
link = "https://codeforces.com/api/contest.standings?apiKey="+KEY+"&contestId="+str(id)+"&time="+str(curtime)+"&apiSig="+str(myrand)+myhash
rezultat = await try_to_get(link)
if rezultat["status"]!="OK":
await ctx.send("Čini se da je Codeforces srušen trenutno, pa ne mogu napraviti što trenutno tražiš od mene :cry:")
await ctx.send("Pokušaj kasnije...")
return;
for problem in (rezultat["result"]["problems"]):
x | |
and subpixel, are skipped. If dither_direct is true, then primary
# and subpixel dithers are both done. It is guaranteed (by APT) in this case that
# then number of exposures is a multiple of 3.
try:
dither_direct = observation_dict['DitherNirissWfssDirectImages']
if dither_direct == 'NO_DITHERING':
if verbose:
self.logger.info(('NIRISS WFSS parallel and NO_DITHERING set for direct imgages. Adjusting '
'number_of_dithers to 1 for the matching NIRCam exposures.'))
num_dithers = exposures_dictionary['number_of_dithers']
for counter in range(0, len(num_dithers), 3):
num_dithers[counter: counter+3] = ['1', num_dithers[counter+1], '1']
except:
pass
############################################################
else:
# Determine if there is an aperture override
override = obs.find('.//' + self.apt + 'FiducialPointOverride')
FiducialPointOverride = True if override is not None else False
for element in template:
element_tag_stripped = element.tag.split(ns)[1]
# loop through exposures and collect dither parameters
if element_tag_stripped == 'DitherPatternType':
DitherPatternType = element.text
elif element_tag_stripped == 'ImageDithers':
number_of_primary_dithers = int(element.text)
elif element_tag_stripped == 'SubpixelPositions':
if element.text != 'NONE':
number_of_subpixel_positions = np.int(element.text)
elif element_tag_stripped == 'PrimaryDithers':
if (element.text is not None) & (element.text != 'NONE'):
number_of_primary_dithers = int(element.text)
elif element_tag_stripped == 'Dithers':
DitherPatternType = element.find(ns + 'MrsDitherSpecification').find(ns + 'DitherType').text
number_of_primary_dithers = int(DitherPatternType[0])
elif element_tag_stripped == 'SubpixelDithers':
if element.text is not None:
number_of_subpixel_dithers = int(element.text)
# handle the NIRISS AMI case
if number_of_subpixel_positions > number_of_subpixel_dithers:
number_of_subpixel_dithers = np.copy(number_of_subpixel_positions)
# Determine if there is an aperture override
override = obs.find('.//' + self.apt + 'FiducialPointOverride')
FiducialPointOverride = True if override is not None else False
# To reduce confusion, if this is the parallel instrument,
# set the number of dithers to zero, since the prime
# instrument controls the number of dithers
if parallel:
number_of_primary_dithers = 0
number_of_subpixel_dithers = 0
# Combine primary and subpixel dithers
number_of_dithers = str(number_of_primary_dithers * number_of_subpixel_dithers)
# Different SI conventions of how to list exposure parameters
if ((instrument.lower() == 'niriss') and (element_tag_stripped == 'ExposureList')) | \
((instrument.lower() == 'fgs') and (element_tag_stripped == 'Exposures'))| \
((instrument.lower() == 'miri') and (element_tag_stripped == 'ExposureList'))| \
((instrument.lower() == 'nirspec') and (element_tag_stripped == 'Exposures')):
for exposure in element.findall(ns + 'Exposure'):
exposure_dict = {}
# Load dither information into dictionary
exposure_dict['DitherPatternType'] = DitherPatternType
if (number_of_dithers is None) | (number_of_dithers == 'NONE'):
number_of_dithers = 1 * number_of_subpixel_positions
exposure_dict[dither_key_name] = np.int(number_of_dithers)
exposure_dict['number_of_dithers'] = exposure_dict[dither_key_name]
for exposure_parameter in exposure:
parameter_tag_stripped = exposure_parameter.tag.split(ns)[1]
# if verbose:
# print('{} {}'.format(parameter_tag_stripped, exposure_parameter.text))
exposure_dict[parameter_tag_stripped] = exposure_parameter.text
# fill dictionary to return
for key in self.APTObservationParams_keys:
if key in exposure_dict.keys():
value = exposure_dict[key]
elif key in proposal_parameter_dictionary.keys():
value = proposal_parameter_dictionary[key]
elif key == 'Instrument':
value = instrument
elif key == 'ParallelInstrument':
value = parallel_instrument
elif key == 'FiducialPointOverride':
value = str(FiducialPointOverride)
elif key == 'APTTemplate':
value = template_name
elif key == 'Tracking':
value = tracking
else:
value = str(None)
if (key in ['PrimaryDithers', 'ImageDithers']) and (str(value) == 'None'):
value = '1'
if (key == 'Mode'):
if template_name not in ['NirissAmi']:
value = 'imaging'
else:
value = 'ami'
exposures_dictionary[key].append(value)
# add keys that were not defined in self.APTObservationParams_keys
# (to be fixed in Class.__init__ later )
for key in exposure_dict.keys():
if key not in self.APTObservationParams_keys:
# if key not yet present, create entry
if key not in exposures_dictionary.keys():
exposures_dictionary[key] = [str(exposure_dict[key])]
else:
exposures_dictionary[key].append(str(exposure_dict[key]))
if not parallel:
self.logger.info('Number of dithers: {} primary * {} subpixel = {}'.format(number_of_primary_dithers,
number_of_subpixel_dithers,
number_of_dithers))
for key in exposures_dictionary.keys():
if type(exposures_dictionary[key]) is not list:
exposures_dictionary[key] = list(exposures_dictionary[key])
# make sure all list items in the returned dictionary have the same length
for key, item in exposures_dictionary.items():
if len(item) == 0:
exposures_dictionary[key] = [0] * len(exposures_dictionary['Instrument'])
return exposures_dictionary
def read_commissioning_template(self, template, template_name, obs, prop_params):
# Get proposal parameters
pi_name, prop_id, prop_title, prop_category, science_category, coordparallel, i_obs, obs_label, target_name = prop_params
# dictionary that holds the content of this observation only
exposures_dictionary = copy.deepcopy(self.empty_exposures_dictionary)
# Set namespace
ns = "{http://www.stsci.edu/JWST/APT/Template/WfscCommissioning}"
# Set parameters that are constant for all WFSC obs
#typeflag = template_name
typeflag = 'imaging'
grismval = 'N/A'
subarr = 'FULL'
amps = 4
pdithtype = 'NONE'
pdither = '1'
sdithtype = 'STANDARD'
sdither = '1'
# Find observation-specific parameters
mod = template.find(ns + 'Module').text
num_WFCgroups = int(template.find(ns + 'ExpectedWfcGroups').text)
# Find filter parameters for all filter configurations within obs
filter_configs = template.findall('.//' + ns + 'FilterConfig')
# Check the target type in order to decide whether the tracking should be
# sidereal or non-sidereal
tracking = self.get_tracking_type(obs)
for filt in filter_configs:
sfilt = filt.find(ns + 'ShortFilter').text
try:
lfilt = filt.find(ns + 'LongFilter').text
except AttributeError:
lfilt = 'F480M'
rpatt = filt.find(ns + 'ReadoutPattern').text
grps = filt.find(ns + 'Groups').text
ints = filt.find(ns + 'Integrations').text
# Separate pupil and filter in case of filter that is
# mounted in the pupil wheel
if ' + ' in sfilt:
split_ind = sfilt.find(' + ')
short_pupil = sfilt[0:split_ind]
sfilt = sfilt[split_ind + 1:]
else:
short_pupil = 'CLEAR'
if ' + ' in lfilt:
p = lfilt.find(' + ')
long_pupil = lfilt[0:p]
lfilt = lfilt[p + 1:]
else:
long_pupil = 'CLEAR'
# Repeat for the number of expected WFSC groups + 1
for j in range(num_WFCgroups + 1):
# Add all parameters to dictionary
tup_to_add = (pi_name, prop_id, prop_title, prop_category,
science_category, typeflag, mod, subarr, pdithtype,
pdither, sdithtype, sdither, sfilt, lfilt,
rpatt, grps, ints, short_pupil,
long_pupil, grismval, coordparallel,
i_obs , j + 1, template_name, 'NIRCAM', obs_label,
target_name, tracking)
exposures_dictionary = self.add_exposure(exposures_dictionary, tup_to_add)
self.obs_tuple_list.append(tup_to_add)
# Add the number of dithers
number_of_dithers = int(pdither) * int(sdither)
exposures_dictionary['number_of_dithers'] = [str(number_of_dithers)] * len(exposures_dictionary['Instrument'])
# Force 4 amp readout
exposures_dictionary['NumOutputs'] = [amps] * len(exposures_dictionary['Instrument'])
# make sure all list items in the returned dictionary have the same length
for key, item in exposures_dictionary.items():
if len(item) == 0:
exposures_dictionary[key] = [0] * len(exposures_dictionary['Instrument'])
# self.APTObservationParams = self.add_exposure(self.APTObservationParams, tup_to_add)
# self.obs_tuple_list.append(tup_to_add)
return exposures_dictionary, num_WFCgroups
def read_globalalignment_template(self, template, template_name, obs, prop_params):
# Get proposal parameters
pi_name, prop_id, prop_title, prop_category, science_category, coordparallel, i_obs, obs_label, target_name = prop_params
# dictionary that holds the content of this observation only
exposures_dictionary = copy.deepcopy(self.empty_exposures_dictionary)
ns = "{http://www.stsci.edu/JWST/APT/Template/WfscGlobalAlignment}"
# Set parameters that are constant for all WFSC obs
#typeflag = template_name
typeflag = 'imaging'
grismval = 'N/A'
short_pupil = 'CLEAR'
subarr = 'FULL'
pdither = '1'
pdithtype = 'NONE'
sdithtype = 'STANDARD'
sdither = '1'
# Determine the Global Alignment Iteration Type
GA_iteration = obs.find('.//' + ns + 'GaIteration').text
if GA_iteration == 'ADJUST1' or GA_iteration == 'CORRECT':
# 3 NIRCam and 1 FGS images
n_exp = 4
elif GA_iteration == 'ADJUST2' or GA_iteration == 'CORRECT+ADJUST':
# 5 NIRCam and 2 FGS
n_exp = 7
elif GA_iteration == 'BSCORRECT':
# 2 NIRCam and 1 FGS
n_exp = 3
# Find observation-specific parameters
mod = template.find(ns + 'Module').text
# num_WFCgroups = int(template.find(ns + 'ExpectedWfcGroups').text)
# Find filter parameters for all filter configurations within obs
ga_nircam_configs = template.findall('.//' + ns + 'NircamParameters')
ga_fgs_configs = template.findall('.//' + ns + 'FgsParameters')
# Check the target type in order to decide whether the tracking should be
# sidereal or non-sidereal
tracking = self.get_tracking_type(obs)
for conf in ga_nircam_configs:
sfilt = conf.find(ns + 'ShortFilter').text
try:
lfilt = conf.find(ns + 'LongFilter').text
except AttributeError:
lfilt = 'F480M'
rpatt = conf.find(ns + 'ReadoutPattern').text
grps = conf.find(ns + 'Groups').text
ints = conf.find(ns + 'Integrations').text
# Separate pupil and filter in case of filter that is
# mounted in the pupil wheel
if ' + ' in sfilt:
split_ind = sfilt.find(' + ')
short_pupil = sfilt[0:split_ind]
sfilt = sfilt[split_ind + 1:]
else:
short_pupil = 'CLEAR'
if ' + ' in lfilt:
p = lfilt.find(' + ')
long_pupil = lfilt[0:p]
lfilt = lfilt[p + 1:]
else:
long_pupil = 'CLEAR'
for fgs_conf in ga_fgs_configs:
fgs_grps = fgs_conf.find(ns + 'Groups').text
fgs_ints = fgs_conf.find(ns + 'Integrations').text
guider_det_num = get_guider_number_from_special_requirements(self.apt, obs)
fgs_subarr = "FGS{}_FULL".format(guider_det_num)
# Repeat for the number of exposures
for j in range(n_exp):
# Add all parameters to dictionary
if j==2 or j==5:
# This is an FGS image as part of GA
# Add FGS exposure to the dictionary
tup_to_add = (pi_name, prop_id, prop_title, prop_category,
science_category, typeflag, 'N/A', fgs_subarr, | |
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The new version script to evalute the word error rate (WER) for ASR tasks.
Tensorflow and Lingvo are not required to run this script.
Example of Usage:
a) `python simple_wer_v2.py file_hypothesis file_reference`
b) `python simple_wer_v2.py file_hypothesis file_reference file_keyphrases`
where `file_hypothesis` is the filename for hypothesis text,
`file_reference` is the filename for reference text, and
`file_keyphrases` is the optional filename for important phrases
(one phrase per line).
Note that the program will also generate a html to diagnose the errors,
and the html filename is `{$file_hypothesis}_diagnois.html`.
Another way is to use this file as a stand-alone library, by calling class
SimpleWER with the following member functions:
- AddHypRef(hyp, ref): Updates the evaluation for each (hyp,ref) pair.
- GetWER(): Computes word error rate (WER) for all the added hyp-ref pairs.
- GetSummaries(): Generates strings to summarize word and key phrase errors.
- GetKeyPhraseStats(): Measures stats for key phrases.
Stats include:
(1) Jaccard similarity: https://en.wikipedia.org/wiki/Jaccard_index.
(2) F1 score: https://en.wikipedia.org/wiki/Precision_and_recall.
"""
import re
import sys
def TxtPreprocess(txt):
"""Preprocess text before WER caculation."""
# Lowercase, remove \t and new line.
txt = re.sub(r'[\t\n]', ' ', txt.lower())
# Remove punctuation before space.
txt = re.sub(r'[,.\?!]+ ', ' ', txt)
# Remove punctuation before end.
txt = re.sub(r'[,.\?!]+$', ' ', txt)
# Remove punctuation after space.
txt = re.sub(r' [,.\?!]+', ' ', txt)
# Remove quotes, [, ], ( and ).
txt = re.sub(r'["\(\)\[\]]', '', txt)
# Remove extra space.
txt = re.sub(' +', ' ', txt.strip())
return txt
def RemoveCommentTxtPreprocess(txt):
"""Preprocess text and remove comments in the brancket, such as [comments]."""
# Remove comments surrounded by box brackets:
txt = re.sub(r'\[\w+\]', '', txt)
return TxtPreprocess(txt)
def HighlightAlignedHtml(hyp, ref, err_type):
"""Generate a html element to highlight the difference between hyp and ref.
Args:
hyp: Hypothesis string.
ref: Reference string.
err_type: one of 'none', 'sub', 'del', 'ins'.
Returns:
a html string where disagreements are highlighted.
Note `hyp` is highlighted in green, and marked with <del> </del>
`ref` is highlighted in yellow. If you want html with nother styles,
consider to write your own function.
Raises:
ValueError: if err_type is not among ['none', 'sub', 'del', 'ins'].
or if when err_type == 'none', hyp != ref
"""
highlighted_html = ''
if err_type == 'none':
if hyp != ref:
raise ValueError('hyp (%s) does not match ref (%s) for none error' %
(hyp, ref))
highlighted_html += '%s ' % hyp
elif err_type == 'sub':
highlighted_html += """<span style="background-color: yellow">
<del>%s</del></span><span style="background-color: yellow">
%s </span> """ % (hyp, ref)
elif err_type == 'del':
highlighted_html += """<span style="background-color: red">
%s </span> """ % (
ref)
elif err_type == 'ins':
highlighted_html += """<span style="background-color: green">
<del>%s</del> </span> """ % (
hyp)
else:
raise ValueError('unknown err_type ' + err_type)
return highlighted_html
def ComputeEditDistanceMatrix(hyp_words, ref_words):
"""Compute edit distance between two list of strings.
Args:
hyp_words: the list of words in the hypothesis sentence
ref_words: the list of words in the reference sentence
Returns:
Edit distance matrix (in the format of list of lists), where the first
index is the reference and the second index is the hypothesis.
"""
reference_length_plus = len(ref_words) + 1
hypothesis_length_plus = len(hyp_words) + 1
edit_dist_mat = [[]] * reference_length_plus
# Initialization.
for i in range(reference_length_plus):
edit_dist_mat[i] = [0] * hypothesis_length_plus
for j in range(hypothesis_length_plus):
if i == 0:
edit_dist_mat[0][j] = j
elif j == 0:
edit_dist_mat[i][0] = i
# Do dynamic programming.
for i in range(1, reference_length_plus):
for j in range(1, hypothesis_length_plus):
if ref_words[i - 1] == hyp_words[j - 1]:
edit_dist_mat[i][j] = edit_dist_mat[i - 1][j - 1]
else:
tmp0 = edit_dist_mat[i - 1][j - 1] + 1
tmp1 = edit_dist_mat[i][j - 1] + 1
tmp2 = edit_dist_mat[i - 1][j] + 1
edit_dist_mat[i][j] = min(tmp0, tmp1, tmp2)
return edit_dist_mat
class SimpleWER:
"""Compute word error rates after the alignment.
Attributes:
key_phrases: list of important phrases.
aligned_htmls: list of diagnois htmls, each of which corresponding to a pair
of hypothesis and reference.
hyp_keyphrase_counts: dict. `hyp_keyphrase_counts[w]` counts how often a key
phrases `w` appear in the hypotheses.
ref_keyphrase_counts: dict. `ref_keyphrase_counts[w]` counts how often a key
phrases `w` appear in the references.
matched_keyphrase_counts: dict. `matched_keyphrase_counts[w]` counts how
often a key phrase `w` appear in the aligned transcripts when the
reference and hyp_keyphrase match.
wer_info: dict with four keys: 'sub' (substitution error), 'ins' (insersion
error), 'del' (deletion error), 'nw' (number of words). We can use
wer_info to compute word error rate (WER) as
(wer_info['sub']+wer_info['ins']+wer_info['del'])*100.0/wer_info['nw']
"""
def __init__(self,
key_phrases=None,
html_handler=HighlightAlignedHtml,
preprocess_handler=RemoveCommentTxtPreprocess):
"""Initialize SimpleWER object.
Args:
key_phrases: list of strings as important phrases. If key_phrases is
None, no key_phrases related metric will be computed.
html_handler: function to generate a string with html tags.
preprocess_handler: function to preprocess text before computing WER.
"""
self._preprocess_handler = preprocess_handler
self._html_handler = html_handler
self.key_phrases = key_phrases
self.aligned_htmls = []
self.wer_info = {'sub': 0, 'ins': 0, 'del': 0, 'nw': 0}
if key_phrases:
# Pre-process key_phrase list
if self._preprocess_handler:
self.key_phrases = \
[self._preprocess_handler(k) for k in self.key_phrases]
# Init keyphrase_counts for every key phrase
self.ref_keyphrase_counts = {}
self.hyp_keyphrase_counts = {}
self.matched_keyphrase_counts = {}
for k in self.key_phrases:
self.ref_keyphrase_counts[k] = 0
self.hyp_keyphrase_counts[k] = 0
self.matched_keyphrase_counts[k] = 0
else:
self.ref_keyphrase_counts = None
self.hyp_keyphrase_counts = None
self.matched_keyphrase_counts = None
def AddHypRef(self, hypothesis, reference):
"""Update WER when adding one pair of strings: (hypothesis, reference).
Args:
hypothesis: Hypothesis string.
reference: Reference string.
Raises:
ValueError: when the program fails to parse edit distance matrix.
"""
if self._preprocess_handler:
hypothesis = self._preprocess_handler(hypothesis)
reference = self._preprocess_handler(reference)
# Compute edit distance.
hyp_words = hypothesis.split()
ref_words = reference.split()
distmat = ComputeEditDistanceMatrix(hyp_words, ref_words)
# Back trace, to distinguish different errors: ins, del, sub.
pos_hyp, pos_ref = len(hyp_words), len(ref_words)
wer_info = {'sub': 0, 'ins': 0, 'del': 0, 'nw': len(ref_words)}
aligned_html = ''
matched_ref = ''
while pos_hyp > 0 or pos_ref > 0:
err_type = ''
# Distinguish error type by back tracking
if pos_ref == 0:
err_type = 'ins'
elif pos_hyp == 0:
err_type = 'del'
else:
if hyp_words[pos_hyp - 1] == ref_words[pos_ref - 1]:
err_type = 'none' # correct error
elif distmat[pos_ref][pos_hyp] == distmat[pos_ref - 1][pos_hyp - 1] + 1:
err_type = 'sub' # substitute error
elif distmat[pos_ref][pos_hyp] == distmat[pos_ref - 1][pos_hyp] + 1:
err_type = 'del' # deletion error
elif distmat[pos_ref][pos_hyp] == distmat[pos_ref][pos_hyp - 1] + 1:
err_type = 'ins' # insersion error
else:
raise ValueError('fail to parse edit distance matrix.')
# Generate aligned_html
if self._html_handler:
if pos_hyp == 0 or not hyp_words:
tmph = ' '
else:
tmph = hyp_words[pos_hyp - 1]
if pos_ref == 0 or not ref_words:
tmpr = ' '
else:
tmpr = ref_words[pos_ref - 1]
aligned_html = self._html_handler(tmph, tmpr, err_type) + aligned_html
# If no error, go to previous ref and hyp.
if err_type == 'none':
matched_ref = hyp_words[pos_hyp - 1] + ' ' + matched_ref
pos_hyp, pos_ref = pos_hyp - 1, pos_ref - 1
continue
# Update error.
wer_info[err_type] += 1
# Adjust position of ref and hyp.
if err_type == 'del':
pos_ref = pos_ref - 1
elif err_type == 'ins':
pos_hyp = pos_hyp - 1
else: # err_type == 'sub'
pos_hyp, pos_ref = pos_hyp - 1, pos_ref - 1
# Verify the computation of edit distance finishes
assert distmat[-1][-1] == wer_info['ins'] + \
wer_info['del'] + wer_info['sub']
# Accumulate err_info before the next (hyp, ref).
for k in wer_info:
self.wer_info[k] += wer_info[k]
# Collect aligned_htmls.
if self._html_handler:
self.aligned_htmls += [aligned_html]
# Update key phrase info.
if self.key_phrases:
for w in self.key_phrases:
self.ref_keyphrase_counts[w] += reference.count(w)
self.hyp_keyphrase_counts[w] += hypothesis.count(w)
self.matched_keyphrase_counts[w] += matched_ref.count(w)
def GetWER(self):
"""Compute Word Error Rate | |
# Copyright (c) 2018 <NAME>, Inc.
#
# This file is part of Mycroft Skills Manager
# (see https://github.com/MycroftAI/mycroft-skills-manager).
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import logging
import os
import shutil
import subprocess
import yaml
from contextlib import contextmanager
from difflib import SequenceMatcher
from functools import wraps
from git import Repo, GitError
from git.exc import GitCommandError
from lazy import lazy
from os.path import exists, join, basename, dirname, isfile
from shutil import rmtree, move
from subprocess import PIPE, Popen
from tempfile import mktemp, gettempdir
from threading import Lock
from typing import Callable
from pako import PakoManager
from msm import SkillRequirementsException, git_to_msm_exceptions
from msm.exceptions import PipRequirementsException, \
SystemRequirementsException, AlreadyInstalled, SkillModified, \
AlreadyRemoved, RemoveException, CloneException, NotInstalled, GitException
from msm.util import cached_property, Git
LOG = logging.getLogger(__name__)
# Branches which can be switched from when updating
# TODO Make this configurable
SWITCHABLE_BRANCHES = ['master']
# default constraints to use if no are given
DEFAULT_CONSTRAINTS = '/etc/mycroft/constraints.txt'
FIVE_MINUTES = 300
@contextmanager
def work_dir(directory):
old_dir = os.getcwd()
os.chdir(directory)
try:
yield
finally:
os.chdir(old_dir)
def _backup_previous_version(func: Callable = None):
"""Private decorator to back up previous skill folder"""
@wraps(func)
def wrapper(self, *args, **kwargs):
self.old_path = None
if self.is_local:
self.old_path = join(gettempdir(), self.name)
if exists(self.old_path):
rmtree(self.old_path)
shutil.copytree(self.path, self.old_path)
try:
func(self, *args, **kwargs)
# Modified skill or GitError should not restore working copy
except (SkillModified, GitError, GitException):
raise
except Exception:
LOG.info('Problem performing action. Restoring skill to '
'previous state...')
if exists(self.path):
rmtree(self.path)
if self.old_path and exists(self.old_path):
shutil.copytree(self.old_path, self.path)
self.is_local = exists(self.path)
raise
finally:
# Remove temporary path if needed
if self.old_path and exists(self.old_path):
rmtree(self.old_path)
return wrapper
class SkillEntry(object):
pip_lock = Lock()
manifest_yml_format = {
'dependencies': {
'system': {},
'exes': [],
'skill': [],
'python': []
}
}
def __init__(self, name, path, url='', sha='', msm=None):
url = url.rstrip('/')
url = url[:-len('.git')] if url.endswith('.git') else url
self.path = path
self.url = url
self.sha = sha
self.msm = msm
if msm:
u = url.lower()
self.meta_info = msm.repo.skills_meta_info.get(u, {})
else:
self.meta_info = {}
if name is not None:
self.name = name
elif 'name' in self.meta_info:
self.name = self.meta_info['name']
else:
self.name = basename(path)
# TODO: Handle git:// urls as well
from_github = False
if url.startswith('https://'):
url_tokens = url.rstrip("/").split("/")
from_github = url_tokens[-3] == 'github.com' if url else False
self.author = self.extract_author(url) if from_github else ''
self.id = self.extract_repo_id(url) if from_github else self.name
self.is_local = exists(path)
self.old_path = None # Path of previous version while upgrading
@property
def is_beta(self):
return not self.sha or self.sha == 'HEAD'
@property
def is_dirty(self):
"""True if different from the version in the mycroft-skills repo.
Considers a skill dirty if
- the checkout sha doesn't match the mycroft-skills repo
- the skill doesn't exist in the mycroft-skills repo
- the skill is not a git repo
- has local modifications
"""
if not exists(self.path):
return False
try:
checkout = Git(self.path)
mod = checkout.status(porcelain=True, untracked_files='no') != ''
current_sha = checkout.rev_parse('HEAD')
except GitCommandError: # Not a git checkout
return True
skill_shas = {d[0]: d[3] for d in self.msm.repo.get_skill_data()}
return (self.name not in skill_shas or
current_sha != skill_shas[self.name] or
mod)
@cached_property(ttl=FIVE_MINUTES)
def skill_gid(self):
"""Format skill gid for the skill.
This property does some Git gymnastics to determine its return value.
When a device boots, each skill accesses this property several times.
To reduce the amount of boot time, cache the value returned by this
property. Cache expires five minutes after it is generated.
"""
LOG.debug('Generating skill_gid for ' + self.name)
gid = ''
if self.is_dirty:
gid += '@|'
if self.meta_info != {}:
gid += self.meta_info['skill_gid']
else:
name = self.name.split('.')[0]
gid += name
return gid
def __str__(self):
return self.name
def attach(self, remote_entry):
"""Attach a remote entry to a local entry"""
self.name = remote_entry.name
self.sha = remote_entry.sha
self.url = remote_entry.url
self.author = remote_entry.author
return self
@classmethod
def from_folder(cls, path, msm=None, use_cache=True):
"""Find or create skill entry from folder path.
Arguments:
path: path of skill folder
msm: msm instance to use for caching and extended information
retrieval.
use_cache: Enable/Disable cache usage. defaults to True
"""
if msm and use_cache:
skills = {skill.path: skill for skill in msm.local_skills.values()}
if path in skills:
return skills[path]
return cls(None, path, cls.find_git_url(path), msm=msm)
@classmethod
def create_path(cls, folder, url, name=''):
return join(folder, '{}.{}'.format(
name or cls.extract_repo_name(url), cls.extract_author(url)
).lower())
@staticmethod
def extract_repo_name(url):
s = url.rstrip('/').split("/")[-1]
a, b, c = s.rpartition('.git')
if not c:
return a
return s
@staticmethod
def extract_author(url):
return url.rstrip('/').split("/")[-2].split(':')[-1]
@classmethod
def extract_repo_id(cls, url):
return '{}:{}'.format(cls.extract_author(url).lower(),
cls.extract_repo_name(url)).lower()
@staticmethod
def _tokenize(x):
return x.replace('-', ' ').split()
@staticmethod
def _extract_tokens(s, tokens):
s = s.lower().replace('-', ' ')
extracted = []
for token in tokens:
extracted += [token] * s.count(token)
s = s.replace(token, '')
s = ' '.join(i for i in s.split(' ') if i)
tokens = [i for i in s.split(' ') if i]
return s, tokens, extracted
@classmethod
def _compare(cls, a, b):
return SequenceMatcher(a=a, b=b).ratio()
def match(self, query, author=None):
search, search_tokens, search_common = self._extract_tokens(
query, ['skill', 'fallback', 'mycroft']
)
name, name_tokens, name_common = self._extract_tokens(
self.name, ['skill', 'fallback', 'mycroft']
)
weights = [
(9, self._compare(name, search)),
(9, self._compare(name.split(' '), search_tokens)),
(2, self._compare(name_common, search_common)),
]
if author:
author_weight = self._compare(self.author, author)
weights.append((5, author_weight))
author_weight = author_weight
else:
author_weight = 1.0
return author_weight * (
sum(weight * val for weight, val in weights) /
sum(weight for weight, val in weights)
)
def run_pip(self, constraints=None):
if not self.dependent_python_packages:
return False
# Use constraints to limit the installed versions
if constraints and not exists(constraints):
LOG.error('Couldn\'t find the constraints file')
return False
elif exists(DEFAULT_CONSTRAINTS):
constraints = DEFAULT_CONSTRAINTS
LOG.info('Installing requirements.txt for ' + self.name)
can_pip = os.access(dirname(sys.executable), os.W_OK | os.X_OK)
pip_args = [sys.executable, '-m', 'pip', 'install']
if constraints:
pip_args += ['-c', constraints]
if not can_pip:
pip_args = ['sudo', '-n'] + pip_args
with self.pip_lock:
"""
Iterate over the individual Python packages and
install them one by one to enforce the order specified
in the manifest.
"""
for dependent_python_package in self.dependent_python_packages:
pip_command = pip_args + [dependent_python_package]
proc = Popen(pip_command, stdout=PIPE, stderr=PIPE)
pip_code = proc.wait()
if pip_code != 0:
stderr = proc.stderr.read().decode()
if pip_code == 1 and 'sudo:' in stderr and pip_args[0] == 'sudo':
raise PipRequirementsException(
2, '', 'Permission denied while installing pip '
'dependencies. Please run in virtualenv or use sudo'
)
raise PipRequirementsException(
pip_code, proc.stdout.read().decode(), stderr
)
return True
def install_system_deps(self):
self.run_requirements_sh()
system_packages = {
exe: (packages or '').split()
for exe, packages in self.dependent_system_packages.items()
}
LOG.info('Installing system requirements...')
all_deps = system_packages.pop('all', [])
try:
manager = PakoManager()
success = manager.install(all_deps, overrides=system_packages)
except RuntimeError as e:
LOG.warning('Failed to launch package manager: {}'.format(e))
success = False
missing_exes = [
exe for exe in self.dependencies.get('exes') or []
if not shutil.which(exe)
]
if missing_exes:
if not success:
LOG.warning('Failed to install dependencies.')
if all_deps:
LOG.warning('Please install manually: {}'.format(
' '.join(all_deps)
))
raise SkillRequirementsException('Could not find exes: {}'.format(
', '.join(missing_exes)
))
return success
def run_requirements_sh(self):
setup_script = join(self.path, "requirements.sh")
if not exists(setup_script):
return False
with work_dir(self.path):
rc = subprocess.call(["bash", setup_script])
if rc != 0:
LOG.error("Requirements.sh failed with error code: " + str(rc))
raise SystemRequirementsException(rc)
LOG.info("Successfully ran requirements.sh for " + self.name)
return True
def run_skill_requirements(self):
if not self.msm:
raise ValueError('Pass msm to SkillEntry to install skill deps')
try:
for skill_dep in self.dependent_skills:
LOG.info("Installing skill dependency: {}".format(skill_dep))
try:
self.msm.install(skill_dep)
except AlreadyInstalled:
pass
except Exception as e:
raise SkillRequirementsException(e)
def verify_info(self, info, fmt):
if not info:
return
if not isinstance(info, type(fmt)):
LOG.warning('Invalid value type manifest.yml for {}: {}'.format(
self.name, type(info)
))
return
if not isinstance(info, dict) or not fmt:
return
for key in info:
if key not in fmt:
LOG.warning('Unknown key in manifest.yml for {}: | |
import os
import subprocess
import sys
# import random
import time
from sysconf import cfg
from util import util
import numpy as np
import xml.etree.ElementTree as ElementTree
from space_expl_framework.abs_classes import AbstractProfiler
class HadoopProfiler(AbstractProfiler):
def __init__(self):
self.itertime = 1
# Here are some critical settings
self.original_confs = self.parse_orig_confs() # Chong
self.hadoop_conf_home = os.sep.join([str(cfg.hadoop_home), 'etc', 'hadoop'])
self.avg_run_time = 2000 # seconds
self.hibench_output = ''
def parse_orig_confs(self):
orig_conf = {}
hadoop_home = cfg.hadoop_home
hadoop_conf_home = os.sep.join([hadoop_home, 'etc', 'hadoop'])
files = ['mapred-site.xml', 'core-site.xml', 'yarn-site.xml', 'hdfs-site.xml']
for f in files:
full_path = hadoop_conf_home + os.sep + f
root = ElementTree.parse(full_path).getroot()
properties = root.findall('property')
for p in properties:
prop = p.find('name').text
if prop is None:
#print 'parsed wrong property'
continue
value = p.find('value').text
if value is None:
value = ''
orig_conf[prop] = value
# print 'Chong: original configurations: ', orig_conf
return orig_conf
def start(self):
self.backup_folder = 'backup'
util.make_folder_ready(self.backup_folder)
# read original hadoop configurations
self.backup_original_confs()
def finish(self):
self.restore_hadoop_confs()
def backup_original_confs(self):
'''
Some parameters related to resource have to be set correctly/
They are not configurable in a specific cluster. So we back up them and merge them with a new configuration.
This function will do two things.
1. copy the original Hadoop configuration files to a new folder
2. parse the original configuration files and save them in a dictionary
:return:
'''
# confs = []
conf_files = ['mapred-site.xml', 'core-site.xml', 'yarn-site.xml', 'hdfs-site.xml']
for conf_file in conf_files:
if cfg.platform == 'aws':
cmd = ' '.join(['sudo cp', self.hadoop_conf_home + os.sep + conf_file, self.backup_folder])
else:
cmd = ' '.join(['cp', self.hadoop_conf_home + os.sep + conf_file, self.backup_folder])
self.run_cmd(cmd)
def profile(self, conf):
print 'hadoop profiler called()'
return sys.maxsize
def profile(self, itertime, in_conf):
'''
Profile the Hadoop system with the given configuration
:param conf: a new configuration
:return: performance
'''
# return itertime
conf = in_conf.copy()
self.itertime = itertime
# prepare the system with new configurations
# generate configuration files
self.curr_genconf_folder = cfg.gen_confs + os.sep + 'conf' + str(self.itertime)
util.make_folder_ready(self.curr_genconf_folder)
# now merge the original configurations with new ones
for p, v in self.original_confs.iteritems():
# if p not in conf:
# print 'new configuration tries to update the old one:', p
conf[p] = v
# the default configuration
# print itertime
# if itertime == 0:
# print conf
# print 'Chong: updated configs: ', conf
confs = util.write_into_conf_file(conf, self.curr_genconf_folder)
self.copy_new_conf(confs)
'''
No need to restart Hadoop. Only need to copy new configuration files to
the configuration folder on Master node.
HiBench will use those configuration files when submit a new job.
'''
# if self.restart_hadoop_with_new_conf(confs) != 0:
# print 'Error....prepare system failed.'
# return sys.maxsize
# profile the system to get its performance
# execute HiBench here
cpu_times = []
for i in range(3):
success, elapsed_time = self.call_benchmark()
# print 'profile time: ', elapsed_time
if success:
cpu_time = self.get_cpu_time_from_output()
cpu_times.append(cpu_time)
else:
# clear output of the last run
self.hibench_output = ''
# clear cpu_times
cpu_times = []
# if any one of these runs failed, that means this configuration is bad
# no need to test more, fail fast
break
cpu_times = [t for t in cpu_times if t < sys.maxsize]
average_cpu_time = sys.maxsize # maximum time
if len(cpu_times) > 0:
average_cpu_time = np.mean(cpu_times)
return int(average_cpu_time)
def call_benchmark(self):
'''
This function will call HiBench benchmark suites to test the system performance.
:return: the time performance (Note: This is different as CPU_MILLISECONDS reported by Hadoop)
'''
success = True
hibench_home = cfg.hibench_home
run_all_script = os.sep.join(['./'+hibench_home, 'bin', 'run_all.sh'])
# run_all_script = run_all_script + '> /dev/null'
# print run_all_script
start_time = time.time()
success = self.run_benchmark_cmd(run_all_script)
if not success:
print 'run benchmark command is not success, exit..'
end_time = time.time()
elapsed = end_time - start_time
if success is True:
self.avg_run_time = elapsed * 2
# if cfg.platform == 'docker':
# self.kill_useless_process()
# print 'time to finish profile: ', self.avg_run_time
return success, elapsed
def wait_timeout(self, proc, seconds):
# Wait for a process to finish, or raise exception after timeout
start = time.time()
end = start + seconds
interval = 1 # query every 1s
self.stderr = ''
while True:
#print 'waiting...'
result = proc.poll()
self.stderr += proc.stderr.read()
if result is not None:
return result
if time.time() > end:
# raise RuntimeError("Process timed out")
print 'wait timeout:', seconds, 'kill process..., avg_run_time:', self.avg_run_time
return -100 # -100 as the error code
time.sleep(interval)
def run_benchmark_cmd(self, cmd):
ret = True
try:
# Using PIPE here could cause a deadlock problem. When there is too much content in stdout or stderr,
# the PIPE will be full and process will hang
cmd = cmd.split(' ')
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
wait_result = self.wait_timeout(p, self.avg_run_time)
stdout, stderr = p.communicate()
return_code = p.returncode
if wait_result == 0 or return_code == 0:
self.hibench_output = self.stderr
elif wait_result == -100:
# print cmd
# print stdout
# print stderr
p.kill()
ret = False
else:
# print '***return is not 0 *** cmd:', cmd, 'return code:', return_code
# print stdout
# print '============='
# print '============='
# print stderr
ret = False
except Exception as e:
# print 'Profiler: execute', cmd, 'failed. Exit msg =', e.message
# print 'error message:', e.output
# self.restore_hadoop_confs()
ret = False
return ret
def get_cpu_time_from_output(self):
find_real_job = False
find_reduce_job = False
times = []
start_line = 'map-reduce framework'
reduce_line = 'reduce'
for line in self.hibench_output.split('\n'):
# print line
line = line.strip().lower()
# if '_0002 completed successfully' in line:
if start_line in line:
find_real_job = True
# print 'found job'
if line.startswith(reduce_line):
find_reduce_job = True
if find_real_job and find_reduce_job and 'cpu time spent' in line:
time = int(line.split('=')[-1])
times.append(time)
find_real_job = False
find_reduce_job = False
self.hibench_output = ''
# if error, returns sys.maxsize
# print 'times:', times
if len(times) == 0:
return sys.maxsize
# print times
return sum(times)
def kill_useless_process(self):
killall_ssh = 'killall ssh'
self.run_cmd(killall_ssh)
# killall python processs on slave nodes
slave_nodes = ['hadoop-slave1', 'hadoop-slave2', 'hadoop-slave3', 'hadoop-slave4']
for s in slave_nodes:
kill_python = 'ssh ' + s + ' \"ps -ef|grep \'socket.SOCK\'|awk \'{ print \$2 }\' |xargs kill\"'
# kill_python = 'ssh ' + s + ' killall python'
self.run_cmd(kill_python)
def run_multiple_cmds(self, cmds):
for cmd in cmds:
#print 'run command:', cmd
if not self.run_cmd(cmd):
return False
return True
def run_cmd(self, cmd):
devnull = open(os.devnull, 'w')
return_code = subprocess.call(cmd, shell=True, stdout=devnull, stderr=devnull)
devnull.close()
#print return_code
if return_code == 0:
#print 'run', cmd, 'success'
return True
else:
print 'cmd', cmd, '===return is not 0:', return_code
return True
def restart_hadoop_with_new_conf(self, confs):
# stop hadoop first
stop_all = []
start_all = []
# if cfg.platform == 'docker':
# stop_all = [os.sep.join([cfg.hadoop_home, 'sbin', 'stop-all.sh'])]
# start_all = [os.sep.join([cfg.hadoop_home, 'sbin', 'start-all.sh'])]
# elif cfg.platform == 'aws':
# stop_all = ['sudo stop hadoop-yarn-resourcemanager']
# start_all = ['sudo start hadoop-yarn-resourcemanager']
if cfg.platform == 'aws':
stop_all = ['sudo stop hadoop-yarn-resourcemanager']
start_all = ['sudo start hadoop-yarn-resourcemanager']
else:
stop_all = [os.sep.join([cfg.hadoop_home, 'sbin', 'stop-all.sh'])]
start_all = [os.sep.join([cfg.hadoop_home, 'sbin', 'start-all.sh'])]
if not self.run_multiple_cmds(stop_all):
print 'Stop Hadoop failed, return...'
return -1
# copy the new configuration into place
self.copy_new_conf(confs)
# then start hadoop
if not self.run_multiple_cmds(start_all):
print 'Start Hadoop failed, return...'
return -1
leave_safe_mode = 'hdfs dfsadmin -safemode get'
leave_safe_mode = leave_safe_mode.split(' ')
while True:
output = subprocess.check_output(leave_safe_mode)
if output.lower().strip() == 'safe mode is off':
break
time.sleep(1)
return 0
def restore_hadoop_confs(self):
files_to_copy = [self.backup_folder + os.sep + f for f in os.listdir(self.backup_folder)]
if cfg.platform == 'aws':
cmd = ' '.join(['sudo cp', ' '.join(files_to_copy), self.hadoop_conf_home])
else:
cmd = ' '.join(['cp', ' '.join(files_to_copy), self.hadoop_conf_home])
self.run_cmd(cmd)
def copy_new_conf(self, confs):
'''
Copy configuration files to slave nodes using ssh
1. get all slave nodes from hadoop configuation files "slaves"
2. construct command to copy configuration files
Actually, there is no need to copy files to slave nodes.
HiBench will use the configuration files on the master node to submit jobs.
'''
# slave_nodes = self.get_slave_nodes()
files_to_copy = ''
for file_name in confs:
files_to_copy += self.curr_genconf_folder + os.sep + file_name + ' '
# copy configuration files to master node
master_target_folder = | |
<filename>test_haproxy.py
#!/usr/bin/env python
import collections
from mock import MagicMock
from mock import Mock
from mock import patch
from mock import call
import sys
class MockCollectd(MagicMock):
"""
Mocks the functions and objects provided by the collectd module
"""
@staticmethod
def log(log_str):
print log_str
debug = log
info = log
warning = log
error = log
class MockHAProxySocketSimple(object):
def __init__(self, socket_file="whatever"):
self.socket_file = socket_file
def get_resolvers(self):
return {}
def get_server_info(self):
sample_data = {'ConnRate': '3', 'CumReq': '5', 'Idle_pct': '78'}
return sample_data
def get_server_stats(self):
sample_data = [{'bin': '3120628', 'lastchg': '', 'lbt': '', 'weight': '',
'wretr': '', 'slim': '50', 'pid': '1', 'wredis': '', 'dresp': '0',
'ereq': '0', 'pxname': 'sample_proxy', 'stot': '39728',
'sid': '0', 'bout': '188112702395', 'qlimit': '', 'status': 'OPEN',
'smax': '2', 'dreq': '0', 'econ': '', 'iid': '2', 'chkfail': '',
'downtime': '', 'qcur': '', 'eresp': '', 'throttle': '', 'scur': '0',
'bck': '', 'qmax': '', 'act': '', 'chkdown': '', 'svname': 'FRONTEND'}]
return sample_data
sys.modules['collectd'] = MockCollectd()
import haproxy
ConfigOption = collections.namedtuple('ConfigOption', ('key', 'values'))
mock_config_default_values = Mock()
mock_config_default_values.children = [
ConfigOption('Testing', ('True',))
]
def test_default_config():
module_config = haproxy.config(mock_config_default_values)
assert module_config['socket'] == '/var/run/haproxy.sock'
assert module_config['proxy_monitors'] == ['server', 'frontend', 'backend']
assert module_config['testing']
class MockHAProxySocketComplex(object):
def __init__(self, socket_file="whatever"):
self.socket_file = socket_file
def get_resolvers(self):
return {'dns1': {'sent': '8', 'snd_error': '0', 'valid': '4', 'update': '0', 'cname': '0', 'cname_error': '4',
'any_err': '0', 'nx': '0', 'timeout': '0', 'refused': '0', 'other': '0', 'invalid': '0',
'too_big': '0', 'truncated': '0', 'outdated': '0'},
'dns2': {'sent': '0', 'snd_error': '0', 'valid': '0', 'update': '0', 'cname': '0', 'cname_error': '0',
'any_err': '0', 'nx': '0', 'timeout': '0', 'refused': '0', 'other': '0', 'invalid': '0',
'too_big': '0', 'truncated': '0', 'outdated': '0'}}
def get_server_info(self):
sample_data = {'ConnRate': '3', 'CumReq': '5', 'Idle_pct': '78'}
return sample_data
def get_server_stats(self):
sample_data = [{'lastchg': '321093', 'agent_health': '', 'check_desc': 'Layer7 check passed',
'smax': '2', 'agent_rise': '', 'req_rate': '', 'check_status': 'L7OK', 'wredis': '0',
'comp_out': '', 'conn_rate': '', 'cli_abrt': '0', 'pxname': 'elasticsearch_backend',
'check_code': '0', 'check_health': '4', 'check_fall': '3', 'qlimit': '', 'bin': '0',
'conn_rate_max': '', 'hrsp_5xx': '', 'stot': '344777', 'econ': '0', 'iid': '3',
'hrsp_4xx': '', 'hanafail': '', 'downtime': '0', 'eresp': '0', 'bout': '0', 'dses': '',
'qtime': '0', 'srv_abrt': '0', 'throttle': '', 'ctime': '0', 'scur': '0', 'type': '2',
'check_rise': '2', 'intercepted': '', 'hrsp_2xx': '', 'mode': 'tcp', 'agent_code': '',
'qmax': '0', 'agent_desc': '', 'weight': '1', 'slim': '', 'pid': '1', 'comp_byp': '',
'lastsess': '0', 'comp_rsp': '', 'agent_status': '', 'check_duration': '0', 'rate': '2',
'rate_max': '9', 'dresp': '0', 'ereq': '', 'addr': '192.168.1.1:6379', 'comp_in': '',
'dcon': '', 'last_chk': '(tcp-check)', 'sid': '1', 'ttime': '18', 'hrsp_1xx': '',
'agent_duration': '', 'hrsp_other': '', 'status': 'UP', 'wretr': '0', 'lbtot': '344777',
'dreq': '', 'req_rate_max': '', 'conn_tot': '', 'chkfail': '0', 'cookie': '', 'qcur': '0',
'tracked': '', 'rtime': '0', 'last_agt': '', 'bck': '0', 'req_tot': '', 'rate_lim': '',
'hrsp_3xx': '', 'algo': '', 'act': '1', 'chkdown': '0', 'svname': 'elasticache',
'agent_fall': ''},
{'lastchg': '321093', 'agent_health': '', 'check_desc': '', 'smax': '2',
'agent_rise': '', 'req_rate': '', 'check_status': '', 'wredis': '0', 'comp_out': '0',
'conn_rate': '', 'cli_abrt': '0', 'pxname': 'elasticsearch_backend', 'check_code': '',
'check_health': '', 'check_fall': '', 'qlimit': '', 'bin': '0', 'conn_rate_max': '',
'hrsp_5xx': '', 'stot': '515751', 'econ': '0', 'iid': '3', 'hrsp_4xx': '', 'hanafail': '',
'downtime': '0', 'eresp': '0', 'bout': '0', 'dses': '', 'qtime': '0', 'srv_abrt': '0',
'throttle': '', 'ctime': '0', 'scur': '0', 'type': '1', 'check_rise': '', 'intercepted': '',
'hrsp_2xx': '', 'mode': 'tcp', 'agent_code': '', 'qmax': '0', 'agent_desc': '', 'weight': '1',
'slim': '800', 'pid': '1', 'comp_byp': '0', 'lastsess': '0', 'comp_rsp': '0',
'agent_status': '', 'check_duration': '', 'rate': '3', 'rate_max': '9', 'dresp': '0',
'ereq': '', 'addr': '', 'comp_in': '0', 'dcon': '', 'last_chk': '', 'sid': '0', 'ttime': '18',
'hrsp_1xx': '', 'agent_duration': '', 'hrsp_other': '', 'status': 'UP', 'wretr': '0',
'lbtot': '344777', 'dreq': '0', 'req_rate_max': '', 'conn_tot': '', 'chkfail': '',
'cookie': '', 'qcur': '0', 'tracked': '', 'rtime': '0', 'last_agt': '', 'bck': '0',
'req_tot': '', 'rate_lim': '', 'hrsp_3xx': '', 'algo': 'roundrobin', 'act': '1',
'chkdown': '0', 'svname': 'BACKEND', 'agent_fall': ''},
{'lastchg': '', 'agent_health': None, 'check_desc': None, 'smax': '0',
'agent_rise': None, 'req_rate': '0', 'check_status': '', 'wredis': '', 'comp_out': None,
'conn_rate': None, 'cli_abrt': None, 'pxname': 'sensu_frontend', 'check_code': '',
'check_health': None, 'check_fall': None, 'qlimit': '', 'bin': '0', 'conn_rate_max': None,
'hrsp_5xx': '', 'stot': '0', 'econ': '', 'iid': '4', 'hrsp_4xx': '', 'hanafail': '',
'downtime': '', 'eresp': '', 'bout': '0', 'dses': None, 'qtime': None, 'srv_abrt': None,
'throttle': '', 'ctime': None, 'scur': '0', 'type': '0', 'check_rise': None,
'intercepted': None, 'hrsp_2xx': '', 'mode': None, 'agent_code': None, 'qmax': '',
'agent_desc': None, 'weight': '', 'slim': '8000', 'pid': '1', 'comp_byp': None,
'lastsess': None, 'comp_rsp': None, 'agent_status': None, 'check_duration': '',
'rate': '0', 'rate_max': '10', 'dresp': '0', 'ereq': '0', 'addr': None, 'comp_in': None,
'dcon': None, 'last_chk': None, 'sid': '0', 'ttime': None, 'hrsp_1xx': '',
'agent_duration': None, 'hrsp_other': '', 'status': 'OPEN', 'wretr': '', 'lbtot': '',
'dreq': '0', 'req_rate_max': '0', 'conn_tot': None, 'chkfail': '', 'cookie': None, 'qcur': '',
'tracked': '', 'rtime': None, 'last_agt': None, 'bck': '', 'req_tot': '', 'rate_lim': '0',
'hrsp_3xx': '', 'algo': None, 'act': '', 'chkdown': '', 'svname': 'FRONTEND',
'agent_fall': None}]
return sample_data
@patch('haproxy.HAProxySocket', MockHAProxySocketComplex)
def test_metrics_submitted_for_frontend_with_correct_names():
haproxy.submit_metrics = MagicMock()
mock_config = Mock()
mock_config.children = [
ConfigOption('ProxyMonitor', ('frontend',)),
ConfigOption('EnhancedMetrics', ('True',)),
ConfigOption('Testing', ('True',))
]
haproxy.collect_metrics(haproxy.config(mock_config))
haproxy.submit_metrics.assert_has_calls([call({'values': (3,), 'type_instance': 'connrate', 'type': 'gauge', 'plugin': 'haproxy'}),
call({'values': (5,), 'type_instance': 'cumreq', 'type': 'derive', 'plugin': 'haproxy'}),
call({'values': (78,), 'type_instance': 'idle_pct', 'type': 'gauge', 'plugin': 'haproxy'}),
call({'values': (0,), 'plugin_instance': 'frontend.sensu_frontend', 'type_instance': 'smax', 'type': 'gauge', 'plugin': 'haproxy'}),
call({'values': (0,), 'plugin_instance': 'frontend.sensu_frontend', 'type_instance': 'rate', 'type': 'gauge', 'plugin': 'haproxy'}),
call({'values': (0,), 'plugin_instance': 'frontend.sensu_frontend', 'type_instance': 'req_rate', 'type': 'gauge', 'plugin': 'haproxy'}),
call({'values': (0,), 'plugin_instance': 'frontend.sensu_frontend', 'type_instance': 'dresp', 'type': 'derive', 'plugin': 'haproxy'}),
call({'values': (0,), 'plugin_instance': 'frontend.sensu_frontend', 'type_instance': 'ereq', 'type': 'derive', 'plugin': 'haproxy'}),
call({'values': (0,), 'plugin_instance': 'frontend.sensu_frontend', 'type_instance': 'dreq', 'type': 'derive', 'plugin': 'haproxy'}),
call({'values': (0,), 'plugin_instance': 'frontend.sensu_frontend', 'type_instance': 'bin', 'type': 'derive', 'plugin': 'haproxy'}),
call({'values': (0,), 'plugin_instance': 'frontend.sensu_frontend', 'type_instance': 'stot', 'type': 'derive', 'plugin': 'haproxy'}),
call({'values': (0,), 'plugin_instance': 'frontend.sensu_frontend', 'type_instance': 'req_rate_max', 'type': 'gauge', 'plugin': 'haproxy'}),
call({'values': (8000,), 'plugin_instance': 'frontend.sensu_frontend', 'type_instance': 'slim', 'type': 'gauge', 'plugin': 'haproxy'}),
call({'values': (0,), 'plugin_instance': 'frontend.sensu_frontend', 'type_instance': 'rate_lim', 'type': 'gauge', 'plugin': 'haproxy'}),
call({'values': (0,), 'plugin_instance': 'frontend.sensu_frontend', 'type_instance': 'bout', 'type': 'derive', 'plugin': 'haproxy'}),
call({'values': (0,), 'plugin_instance': 'frontend.sensu_frontend', 'type_instance': 'scur', 'type': 'gauge', 'plugin': 'haproxy'}),
call({'values': (10,), 'plugin_instance': 'frontend.sensu_frontend', 'type_instance': 'rate_max', 'type': 'gauge', 'plugin': 'haproxy'})] )
@patch('haproxy.HAProxySocket', MockHAProxySocketComplex)
def test_metrics_submitted_for_backend_and_server_with_correct_names():
haproxy.submit_metrics = MagicMock()
mock_config = Mock()
mock_config.children = [
ConfigOption('ProxyMonitor', ('backend',)),
ConfigOption('EnhancedMetrics', ('True',)),
ConfigOption('Testing', ('True',))
]
haproxy.collect_metrics(haproxy.config(mock_config))
haproxy.submit_metrics.assert_has_calls([
call({'values': (0,), 'plugin_instance': 'backend.elasticsearch_backend.elasticache', 'type_instance': 'rtime', 'type': 'gauge', 'plugin': 'haproxy'}),
call({'values': (2,), 'plugin_instance': 'backend.elasticsearch_backend.elasticache', 'type_instance': 'smax', 'type': 'gauge', 'plugin': 'haproxy'}),
call({'values': (0,), 'plugin_instance': 'backend.elasticsearch_backend.elasticache', 'type_instance': 'lastsess', 'type': 'gauge', 'plugin': 'haproxy'}),
call({'values': (0,), 'plugin_instance': 'backend.elasticsearch_backend.elasticache', 'type_instance': 'check_duration', 'type': 'gauge', 'plugin': 'haproxy'}),
call({'values': (2,), 'plugin_instance': 'backend.elasticsearch_backend.elasticache', 'type_instance': 'rate', 'type': 'gauge', 'plugin': 'haproxy'}),
call({'values': (0,), 'plugin_instance': 'backend.elasticsearch_backend.elasticache', 'type_instance': 'wredis', 'type': 'derive', 'plugin': 'haproxy'}),
call({'values': (0,), 'plugin_instance': 'backend.elasticsearch_backend.elasticache', 'type_instance': 'eresp', 'type': 'derive', 'plugin': 'haproxy'}),
call({'values': (0,), 'plugin_instance': 'backend.elasticsearch_backend.elasticache', 'type_instance': 'dresp', 'type': 'derive', 'plugin': 'haproxy'}),
call({'values': (0,), 'plugin_instance': 'backend.elasticsearch_backend.elasticache', 'type_instance': 'cli_abrt', 'type': 'derive', 'plugin': 'haproxy'}),
call({'values': (0,), 'plugin_instance': 'backend.elasticsearch_backend.elasticache', 'type_instance': 'bin', 'type': 'derive', 'plugin': 'haproxy'}),
call({'values': (344777,), 'plugin_instance': 'backend.elasticsearch_backend.elasticache', 'type_instance': 'lbtot', 'type': 'counter', 'plugin': 'haproxy'}),
call({'values': (344777,), 'plugin_instance': 'backend.elasticsearch_backend.elasticache', 'type_instance': 'stot', 'type': 'derive', 'plugin': 'haproxy'}),
call({'values': (0,), 'plugin_instance': 'backend.elasticsearch_backend.elasticache', 'type_instance': 'econ', 'type': 'derive', 'plugin': 'haproxy'}),
call({'values': (18,), 'plugin_instance': 'backend.elasticsearch_backend.elasticache', 'type_instance': 'ttime', 'type': 'gauge', 'plugin': 'haproxy'}),
call({'values': (0,), 'plugin_instance': 'backend.elasticsearch_backend.elasticache', 'type_instance': 'downtime', 'type': 'derive', 'plugin': 'haproxy'}),
call({'values': (0,), 'plugin_instance': 'backend.elasticsearch_backend.elasticache', 'type_instance': 'qcur', 'type': 'gauge', 'plugin': 'haproxy'}),
call({'values': (0,), 'plugin_instance': 'backend.elasticsearch_backend.elasticache', 'type_instance': 'wretr', 'type': 'derive', 'plugin': 'haproxy'}),
call({'values': (0,), 'plugin_instance': 'backend.elasticsearch_backend.elasticache', 'type_instance': 'qtime', 'type': 'gauge', 'plugin': 'haproxy'}),
call({'values': (0,), 'plugin_instance': 'backend.elasticsearch_backend.elasticache', 'type_instance': 'srv_abrt', 'type': 'derive', 'plugin': 'haproxy'}),
call({'values': (0,), 'plugin_instance': 'backend.elasticsearch_backend.elasticache', 'type_instance': 'bout', 'type': 'derive', 'plugin': 'haproxy'}),
call({'values': (0,), 'plugin_instance': 'backend.elasticsearch_backend.elasticache', 'type_instance': 'ctime', 'type': 'gauge', 'plugin': 'haproxy'}),
call({'values': (0,), 'plugin_instance': 'backend.elasticsearch_backend.elasticache', 'type_instance': 'scur', 'type': 'gauge', 'plugin': 'haproxy'}),
call({'values': (0,), 'plugin_instance': 'backend.elasticsearch_backend.elasticache', 'type_instance': 'bck', 'type': 'gauge', 'plugin': 'haproxy'}),
call({'values': (0,), 'plugin_instance': 'backend.elasticsearch_backend.elasticache', 'type_instance': 'qmax', 'type': 'gauge', 'plugin': 'haproxy'}),
call({'values': (9,), 'plugin_instance': 'backend.elasticsearch_backend.elasticache', 'type_instance': 'rate_max', 'type': 'gauge', 'plugin': 'haproxy'}),
call({'values': (1,), 'plugin_instance': 'backend.elasticsearch_backend.elasticache', 'type_instance': 'act', 'type': 'gauge', 'plugin': 'haproxy'}),
call({'values': (0,), 'plugin_instance': 'backend.elasticsearch_backend.elasticache', 'type_instance': 'chkfail', 'type': 'derive', 'plugin': 'haproxy'}),
call({'values': (0,), 'plugin_instance': 'backend.elasticsearch_backend', 'type_instance': 'rtime', 'type': 'gauge', 'plugin': 'haproxy'}),
call({'values': (2,), 'plugin_instance': 'backend.elasticsearch_backend', 'type_instance': 'smax', 'type': 'gauge', 'plugin': 'haproxy'}),
call({'values': (0,), 'plugin_instance': 'backend.elasticsearch_backend', 'type_instance': 'comp_byp', 'type': 'derive', 'plugin': 'haproxy'}),
call({'values': (0,), 'plugin_instance': 'backend.elasticsearch_backend', 'type_instance': 'lastsess', 'type': 'gauge', 'plugin': 'haproxy'}),
call({'values': (3,), 'plugin_instance': 'backend.elasticsearch_backend', 'type_instance': 'rate', 'type': 'gauge', 'plugin': 'haproxy'}),
| |
# Copyright 2020 The KNIX Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from DataLayerClient import DataLayerClient
class DataLayerOperator:
def __init__(self, suid, sid, wid, datalayer):
self._storage_userid = suid
self._sandboxid = sid
self._workflowid = wid
self._datalayer = datalayer
# global data layer clients for either workflow-private data or user storage
self._data_layer_client = None
self._data_layer_client_private = None
# TODO (?): use the local data layer for operations regarding KV, maps, sets and counters instead of in-memory data structures (e.g., transient_data_output)
# and store the operations/data for is_queued = True operations,
# so that we can synchronize it with the global data layer
# (key, value) store
self.transient_data_output = {}
self.transient_data_output_private = {}
self.data_to_be_deleted = {}
self.data_to_be_deleted_private = {}
self.map_output = {}
self.set_output = {}
self.counter_output = {}
self.map_output_delete = {}
self.set_output_delete = {}
self.counter_output_delete = {}
# TODO: update to use local data layer for (key, value) operations
def put(self, key, value, is_private=False, is_queued=False, table=None):
if is_queued:
if is_private:
self.transient_data_output_private[key] = value
if key in self.data_to_be_deleted_private:
self.data_to_be_deleted_private.pop(key, None)
else:
self.transient_data_output[key] = value
if key in self.data_to_be_deleted:
self.data_to_be_deleted.pop(key, None)
else:
data_layer_client = self._get_data_layer_client(is_private)
data_layer_client.put(key, value, tableName=table)
def get(self, key, is_private=False, table=None):
# check first transient_output
# if not there, return the actual (global) data layer data item
# if not there either, return empty string (as defined in the DataLayerClient)
value = None
# if the put() or delete() were called with is_queued=False (default),
# then the below checks will still result in 'value is None'
# if not, then value will be obtained from the transient output
if is_private:
if key in self.data_to_be_deleted_private:
return ""
value = self.transient_data_output_private.get(key)
else:
if key in self.data_to_be_deleted:
return ""
value = self.transient_data_output.get(key)
if value is None:
data_layer_client = self._get_data_layer_client(is_private)
value = data_layer_client.get(key, tableName=table)
return value
def delete(self, key, is_private=False, is_queued=False, table=None):
if is_queued:
if is_private:
self.transient_data_output_private.pop(key, None)
self.data_to_be_deleted_private[key] = True
else:
self.transient_data_output.pop(key, None)
self.data_to_be_deleted[key] = True
else:
data_layer_client = self._get_data_layer_client(is_private)
data_layer_client.delete(key, tableName=table)
# map operations
def createMap(self, mapname, is_private=False, is_queued=False):
if is_queued:
# TODO: use transient data structure in memory when the operation is queued
pass
else:
dlc = self._get_data_layer_client(is_private)
dlc.createMap(mapname)
def putMapEntry(self, mapname, key, value, is_private=False, is_queued=False):
if is_queued:
# TODO: use transient data structure in memory when the operation is queued
pass
else:
dlc = self._get_data_layer_client(is_private)
dlc.putMapEntry(mapname, key, value)
def getMapEntry(self, mapname, key, is_private=False):
value = None
# TODO: check transient data structure first
if value is None:
dlc = self._get_data_layer_client(is_private)
value = dlc.getMapEntry(mapname, key)
return value
def deleteMapEntry(self, mapname, key, is_private=False, is_queued=False):
if is_queued:
# TODO: use transient data structure in memory when the operation is queued
pass
else:
dlc = self._get_data_layer_client(is_private)
dlc.deleteMapEntry(mapname, key)
def containsMapKey(self, mapname, key, is_private=False):
ret = False
# TODO: check transient data structure first
if not ret:
dlc = self._get_data_layer_client(is_private)
ret = dlc.containsMapKey(mapname, key)
return ret
def retrieveMap(self, mapname, is_private=False):
retmap = {}
# XXX: should follow "read your writes"
# the final result should include:
# 1. all created locally
# 2. all existing globally minus the ones deleted locally
# TODO: 1. check local data layer first: get locally created and deleted
# 2. retrieve all existing globally
dlc = self._get_data_layer_client(is_private)
retmap2 = dlc.retrieveMap(mapname)
if retmap2 is not None:
for k in retmap2:
retmap[k] = retmap2[k]
# TODO: 3. remove the ones deleted locally
return retmap
def getMapKeys(self, mapname, is_private=False):
keys = set()
# XXX: should follow "read your writes"
# the final result should include:
# 1. all created locally
# 2. all existing globally minus the ones deleted locally
# TODO: 1. check local data layer first: get locally created and deleted
# 2. retrieve all existing globally
dlc = self._get_data_layer_client(is_private)
k2 = dlc.getMapKeys(mapname)
if k2 is not None:
# TODO: 3. remove the ones deleted locally
keys = keys.union(k2)
return keys
def clearMap(self, mapname, is_private=False, is_queued=False):
if is_queued:
# TODO: use transient data structure in memory when the operation is queued
pass
else:
dlc = self._get_data_layer_client(is_private)
dlc.clearMap(mapname)
def deleteMap(self, mapname, is_private=False, is_queued=False):
if is_queued:
# TODO: use transient data structure in memory when the operation is queued
pass
else:
dlc = self._get_data_layer_client(is_private)
dlc.deleteMap(mapname)
def getMapNames(self, start_index=0, end_index=2147483647, is_private=False):
maps = set()
# XXX: should follow "read your writes"
# the final result should include:
# 1. all created locally
# 2. all existing globally minus the ones deleted locally
# TODO: 1. check local data layer first: get locally created and deleted
# 2. retrieve all existing globally
dlc = self._get_data_layer_client(is_private)
m2 = dlc.getMapNames(start_index, end_index)
if m2 is not None:
# TODO: 3. remove the ones deleted locally
maps = maps.union(m2)
return list(maps)
# set operations
def createSet(self, setname, is_private=False, is_queued=False):
if is_queued:
# TODO: use transient data structure in memory when the operation is queued
pass
else:
dlc = self._get_data_layer_client(is_private)
dlc.createSet(setname)
def addSetEntry(self, setname, item, is_private=False, is_queued=False):
if is_queued:
# TODO: use transient data structure in memory when the operation is queued
pass
else:
dlc = self._get_data_layer_client(is_private)
dlc.addSetEntry(setname, item)
def removeSetEntry(self, setname, item, is_private=False, is_queued=False):
if is_queued:
# TODO: use transient data structure in memory when the operation is queued
pass
else:
dlc = self._get_data_layer_client(is_private)
dlc.removeSetEntry(setname, item)
def containsSetItem(self, setname, item, is_private=False):
ret = False
# TODO: check transient data structure first
if not ret:
dlc = self._get_data_layer_client(is_private)
ret = dlc.containsSetItem(setname, item)
return ret
def retrieveSet(self, setname, is_private=False):
items = set()
# XXX: should follow "read your writes"
# the final result should include:
# 1. all created locally
# 2. all existing globally minus the ones deleted locally
# TODO: 1. check local data layer first: get locally created and deleted
# 2. retrieve all existing globally
dlc = self._get_data_layer_client(is_private)
i2 = dlc.retrieveSet(setname)
if i2 is not None:
# TODO: 3. remove the ones deleted locally
items = items.union(i2)
return items
def clearSet(self, setname, is_private=False, is_queued=False):
if is_queued:
# TODO: use transient data structure in memory when the operation is queued
pass
else:
dlc = self._get_data_layer_client(is_private)
dlc.clearSet(setname)
def deleteSet(self, setname, is_private=False, is_queued=False):
if is_queued:
# TODO: use transient data structure in memory when the operation is queued
pass
else:
dlc = self._get_data_layer_client(is_private)
dlc.deleteSet(setname)
def getSetNames(self, start_index=0, end_index=2147483647, is_private=False):
sets = set()
# XXX: should follow "read your writes"
# the final result should include:
# 1. all created locally
# 2. all existing globally minus the ones deleted locally
# TODO: 1. check local data layer first: get locally created and deleted
# 2. retrieve all existing globally
dlc = self._get_data_layer_client(is_private)
s2 = dlc.getSetNames(start_index, end_index)
if s2 is not None:
# TODO: 3. remove the ones deleted locally
sets = sets.union(s2)
return list(sets)
# counter operations
def createCounter(self, countername, count, is_private=False, is_queued=False):
if is_queued:
# TODO: use transient data structure in memory when the operation is queued
pass
else:
dlc = self._get_data_layer_client(is_private)
dlc.createCounter(countername, count)
def getCounterValue(self, countername, is_private=False):
value = 0
# TODO: check transient data structure first and apply any changes to the global value
dlc = self._get_data_layer_client(is_private)
value = dlc.getCounter(countername)
return value
def incrementCounter(self, countername, increment, is_private=False, is_queued=False):
if is_queued:
# TODO: use transient data structure in memory when the operation is queued
pass
else:
dlc = self._get_data_layer_client(is_private)
dlc.incrementCounter(countername, increment)
def decrementCounter(self, countername, decrement, is_private=False, is_queued=False):
if is_queued:
# TODO: use transient data structure in memory when the operation is queued
pass
else:
dlc = self._get_data_layer_client(is_private)
dlc.decrementCounter(countername, decrement)
def deleteCounter(self, countername, is_private=False, is_queued=False):
if is_queued:
# TODO: use transient data structure in memory when the operation is queued
pass
else:
| |
self.tseries), numpy.ma.masked_where(Dseries<=0, topstack+Dseries), topstack, color=color_D_E, alpha=0.5, label='$D_{all}$', zorder=2)
ax.plot( numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, topstack+Dseries), color=color_D_E, zorder=3)
topstack = topstack+Dseries
else:
if(any(D_Eseries) and plot_D_E=='stacked'):
ax.fill_between(numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, topstack+D_Eseries), topstack, color=color_D_E, alpha=0.5, label='$D_E$', zorder=2)
ax.plot( numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, topstack+D_Eseries), color=color_D_E, zorder=3)
topstack = topstack+D_Eseries
if(any(D_Iseries) and plot_D_I=='stacked'):
ax.fill_between(numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, topstack+D_Iseries), topstack, color=color_D_I, alpha=0.5, label='$D_I$', zorder=2)
ax.plot( numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, topstack+D_Iseries), color=color_D_I, zorder=3)
topstack = topstack+D_Iseries
if(any(Iseries) and plot_I=='stacked'):
ax.fill_between(numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, topstack+Iseries), topstack, color=color_I, alpha=0.5, label='$I$', zorder=2)
ax.plot( numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, topstack+Iseries), color=color_I, zorder=3)
topstack = topstack+Iseries
if(any(Rseries) and plot_R=='stacked'):
ax.fill_between(numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, topstack+Rseries), topstack, color=color_R, alpha=0.5, label='$R$', zorder=2)
ax.plot( numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, topstack+Rseries), color=color_R, zorder=3)
topstack = topstack+Rseries
if(any(Sseries) and plot_S=='stacked'):
ax.fill_between(numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, topstack+Sseries), topstack, color=color_S, alpha=0.5, label='$S$', zorder=2)
ax.plot( numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, topstack+Sseries), color=color_S, zorder=3)
topstack = topstack+Sseries
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the shaded variables:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(any(Fseries) and plot_F=='shaded'):
ax.fill_between(numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, Fseries), 0, color=color_F, alpha=0.5, label='$F$', zorder=4)
ax.plot( numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, Fseries), color=color_F, zorder=5)
if(any(Eseries) and plot_E=='shaded'):
ax.fill_between(numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, Eseries), 0, color=color_E, alpha=0.5, label='$E$', zorder=4)
ax.plot( numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, Eseries), color=color_E, zorder=5)
if(combine_D and (any(Dseries) and plot_D_E=='shaded' and plot_D_E=='shaded')):
ax.fill_between(numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, Dseries), 0, color=color_D_E, alpha=0.5, label='$D_{all}$', zorder=4)
ax.plot( numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, Dseries), color=color_D_E, zorder=5)
else:
if(any(D_Eseries) and plot_D_E=='shaded'):
ax.fill_between(numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, D_Eseries), 0, color=color_D_E, alpha=0.5, label='$D_E$', zorder=4)
ax.plot( numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, D_Eseries), color=color_D_E, zorder=5)
if(any(D_Iseries) and plot_D_I=='shaded'):
ax.fill_between(numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, D_Iseries), 0, color=color_D_I, alpha=0.5, label='$D_I$', zorder=4)
ax.plot( numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, D_Iseries), color=color_D_I, zorder=5)
if(any(Iseries) and plot_I=='shaded'):
ax.fill_between(numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, Iseries), 0, color=color_I, alpha=0.5, label='$I$', zorder=4)
ax.plot( numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, Iseries), color=color_I, zorder=5)
if(any(Sseries) and plot_S=='shaded'):
ax.fill_between(numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, Sseries), 0, color=color_S, alpha=0.5, label='$S$', zorder=4)
ax.plot( numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, Sseries), color=color_S, zorder=5)
if(any(Rseries) and plot_R=='shaded'):
ax.fill_between(numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, Rseries), 0, color=color_R, alpha=0.5, label='$R$', zorder=4)
ax.plot( numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, Rseries), color=color_R, zorder=5)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the line variables:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(any(Fseries) and plot_F=='line'):
ax.plot(numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, Fseries), color=color_F, label='$F$', zorder=6)
if(any(Eseries) and plot_E=='line'):
ax.plot(numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, Eseries), color=color_E, label='$E$', zorder=6)
if(combine_D and (any(Dseries) and plot_D_E=='line' and plot_D_E=='line')):
ax.plot(numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, Dseries), color=color_D_E, label='$D_{all}$', zorder=6)
else:
if(any(D_Eseries) and plot_D_E=='line'):
ax.plot(numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, D_Eseries), color=color_D_E, label='$D_E$', zorder=6)
if(any(D_Iseries) and plot_D_I=='line'):
ax.plot(numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, D_Iseries), color=color_D_I, label='$D_I$', zorder=6)
if(any(Iseries) and plot_I=='line'):
ax.plot(numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, Iseries), color=color_I, label='$I$', zorder=6)
if(any(Sseries) and plot_S=='line'):
ax.plot(numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, Sseries), color=color_S, label='$S$', zorder=6)
if(any(Rseries) and plot_R=='line'):
ax.plot(numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, Rseries), color=color_R, label='$R$', zorder=6)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the vertical line annotations:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(len(vlines)>0 and len(vline_colors)==0):
vline_colors = ['gray']*len(vlines)
if(len(vlines)>0 and len(vline_labels)==0):
vline_labels = [None]*len(vlines)
if(len(vlines)>0 and len(vline_styles)==0):
vline_styles = [':']*len(vlines)
for vline_x, vline_color, vline_style, vline_label in zip(vlines, vline_colors, vline_styles, vline_labels):
if(vline_x is not None):
ax.axvline(x=vline_x, color=vline_color, linestyle=vline_style, alpha=1, label=vline_label)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the plot labels:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ax.set_xlabel('days')
ax.set_ylabel('percent of population' if plot_percentages else 'number of individuals')
ax.set_xlim(0, (max(self.tseries) if not xlim else xlim))
ax.set_ylim(0, ylim)
if(plot_percentages):
ax.set_yticklabels(['{:,.0%}'.format(y) for y in ax.get_yticks()])
if(legend):
legend_handles, legend_labels = ax.get_legend_handles_labels()
ax.legend(legend_handles[::-1], legend_labels[::-1], loc='upper right', facecolor='white', edgecolor='none', framealpha=0.9, prop={'size': 8})
if(title):
ax.set_title(title, size=12)
if(side_title):
ax.annotate(side_title, (0, 0.5), xytext=(-45, 0), ha='right', va='center',
size=12, rotation=90, xycoords='axes fraction', textcoords='offset points')
return ax
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def figure_basic(self, plot_S='line', plot_E='line', plot_I='line',plot_R='line', plot_F='line',
plot_D_E='line', plot_D_I='line', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True,
figsize=(12,8), use_seaborn=True, show=True):
import matplotlib.pyplot as pyplot
fig, ax = pyplot.subplots(figsize=figsize)
if(use_seaborn):
import seaborn
seaborn.set_style('ticks')
seaborn.despine()
self.plot(ax=ax, plot_S=plot_S, plot_E=plot_E, plot_I=plot_I,plot_R=plot_R, plot_F=plot_F,
plot_D_E=plot_D_E, plot_D_I=plot_D_I, combine_D=combine_D,
color_S=color_S, color_E=color_E, color_I=color_I, color_R=color_R, color_F=color_F,
color_D_E=color_D_E, color_D_I=color_D_I, color_reference=color_reference,
dashed_reference_results=dashed_reference_results, dashed_reference_label=dashed_reference_label,
shaded_reference_results=shaded_reference_results, shaded_reference_label=shaded_reference_label,
vlines=vlines, vline_colors=vline_colors, vline_styles=vline_styles, vline_labels=vline_labels,
ylim=ylim, xlim=xlim, legend=legend, title=title, side_title=side_title, plot_percentages=plot_percentages)
if(show):
pyplot.show()
return fig, ax
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def figure_infections(self, plot_S=False, plot_E='stacked', plot_I='stacked',plot_R=False, plot_F=False,
plot_D_E='stacked', plot_D_I='stacked', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True,
figsize=(12,8), use_seaborn=True, show=True):
import matplotlib.pyplot as pyplot
fig, ax = pyplot.subplots(figsize=figsize)
if(use_seaborn):
import seaborn
seaborn.set_style('ticks')
seaborn.despine()
self.plot(ax=ax, plot_S=plot_S, plot_E=plot_E, plot_I=plot_I,plot_R=plot_R, plot_F=plot_F,
plot_D_E=plot_D_E, plot_D_I=plot_D_I, combine_D=combine_D,
color_S=color_S, color_E=color_E, color_I=color_I, color_R=color_R, color_F=color_F,
color_D_E=color_D_E, color_D_I=color_D_I, color_reference=color_reference,
dashed_reference_results=dashed_reference_results, dashed_reference_label=dashed_reference_label,
shaded_reference_results=shaded_reference_results, shaded_reference_label=shaded_reference_label,
vlines=vlines, vline_colors=vline_colors, vline_styles=vline_styles, vline_labels=vline_labels,
ylim=ylim, xlim=xlim, legend=legend, title=title, side_title=side_title, plot_percentages=plot_percentages)
if(show):
pyplot.show()
return fig, ax
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
class SEIRSNetworkModel():
"""
A class to simulate the SEIRS Stochastic Network Model
===================================================
Params: G Network adjacency matrix (numpy array) or Networkx graph object.
beta Rate of transmission (exposure) (global)
beta_local Rate(s) of transmission (exposure) for adjacent individuals (optional)
sigma Rate of infection (upon exposure)
gamma Rate of recovery (upon infection)
xi Rate of re-susceptibility (upon recovery)
mu_I Rate of infection-related death
mu_0 Rate of baseline death
nu Rate of baseline birth
p Probability of interaction outside adjacent nodes
Q Quarantine adjacency matrix (numpy array) or Networkx graph object.
beta_D Rate of transmission (exposure) for individuals with detected infections (global)
beta_local Rate(s) of transmission (exposure) for adjacent individuals with detected infections (optional)
sigma_D Rate of infection (upon exposure) for individuals with detected infections
gamma_D Rate of recovery (upon infection) for individuals with detected infections
mu_D Rate of infection-related death for individuals with detected infections
theta_E Rate of baseline testing for exposed individuals
theta_I Rate of baseline testing for infectious individuals
phi_E Rate of contact tracing testing for exposed individuals
phi_I Rate of contact tracing testing for infectious individuals
psi_E Probability of positive test results for exposed individuals
psi_I Probability of positive test results for exposed individuals
q Probability of quarantined individuals interaction outside adjacent nodes
initE Init number of exposed individuals
initI Init number of infectious individuals
initD_E Init number of detected infectious individuals
initD_I Init number of detected infectious individuals
initR Init number of recovered individuals
initF Init number of infection-related fatalities
(all remaining nodes initialized susceptible)
"""
def __init__(self, G, beta, sigma, gamma, xi=0, mu_I=0, mu_0=0, nu=0, beta_local=None, p=0,
Q=None, beta_D=None, sigma_D=None, gamma_D=None, mu_D=None, beta_D_local=None,
theta_E=0, theta_I=0, phi_E=0, phi_I=0, psi_E=1, psi_I=1, q=0,
initE=0, initI=10, initD_E=0, initD_I=0, initR=0, initF=0,
node_groups=None, store_Xseries=False):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Setup Adjacency matrix:
self.update_G(G)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Setup Quarantine Adjacency matrix:
if(Q is None):
Q = G # If no Q graph is provided, use G in its place
self.update_Q(Q)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model Parameters:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.parameters = { 'beta':beta, 'sigma':sigma, 'gamma':gamma, 'xi':xi, 'mu_I':mu_I, 'mu_0':mu_0, 'nu':nu,
'beta_D':beta_D, 'sigma_D':sigma_D, 'gamma_D':gamma_D, 'mu_D':mu_D,
'beta_local':beta_local, 'beta_D_local':beta_D_local, 'p':p,'q':q,
'theta_E':theta_E, 'theta_I':theta_I, 'phi_E':phi_E, 'phi_I':phi_I, 'psi_E':phi_E, 'psi_I':psi_I }
self.update_parameters()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Each node can undergo up to 4 transitions (sans vitality/re-susceptibility returns to S state),
# so there are ~numNodes*4 events/timesteps expected; initialize numNodes*5 timestep slots to start
# (will be expanded during run if needed)
self.tseries = numpy.zeros(5*self.numNodes)
self.numE = numpy.zeros(5*self.numNodes)
self.numI = numpy.zeros(5*self.numNodes)
self.numD_E = numpy.zeros(5*self.numNodes)
self.numD_I = numpy.zeros(5*self.numNodes)
self.numR = numpy.zeros(5*self.numNodes)
self.numF = numpy.zeros(5*self.numNodes)
self.numS = numpy.zeros(5*self.numNodes)
self.N = numpy.zeros(5*self.numNodes)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Timekeeping:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.t = 0
self.tmax = 0 # will be set when run() is called
self.tidx = 0
self.tseries[0] = 0
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Counts of inidividuals with each state:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.numE[0] = int(initE)
self.numI[0] = int(initI)
self.numD_E[0] = int(initD_E)
self.numD_I[0] = int(initD_I)
self.numR[0] = int(initR)
self.numF[0] = int(initF)
self.numS[0] = self.numNodes - self.numE[0] - self.numI[0] - self.numD_E[0] - self.numD_I[0] - self.numR[0] - self.numF[0]
self.N[0] = self.numS[0] + self.numE[0] + self.numI[0] + self.numD_E[0] + self.numD_I[0] + self.numR[0]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Node states:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.S = 1
self.E = 2
self.I = 3
self.D_E = 4
self.D_I = 5
self.R = 6
self.F = 7
self.X = numpy.array([self.S]*int(self.numS[0]) + [self.E]*int(self.numE[0]) + [self.I]*int(self.numI[0]) + [self.D_E]*int(self.numD_E[0]) + [self.D_I]*int(self.numD_I[0]) + [self.R]*int(self.numR[0]) + [self.F]*int(self.numF[0])).reshape((self.numNodes,1))
numpy.random.shuffle(self.X)
self.store_Xseries = store_Xseries
if(store_Xseries):
self.Xseries = numpy.zeros(shape=(5*self.numNodes, self.numNodes), dtype='uint8')
self.Xseries[0,:] = self.X.T
self.transitions = {
'StoE': {'currentState':self.S, 'newState':self.E},
'EtoI': {'currentState':self.E, 'newState':self.I},
'ItoR': {'currentState':self.I, 'newState':self.R},
'ItoF': {'currentState':self.I, 'newState':self.F},
'RtoS': {'currentState':self.R, 'newState':self.S},
'EtoDE': {'currentState':self.E, 'newState':self.D_E},
'ItoDI': {'currentState':self.I, 'newState':self.D_I},
'DEtoDI': {'currentState':self.D_E, 'newState':self.D_I},
'DItoR': {'currentState':self.D_I, 'newState':self.R},
'DItoF': {'currentState':self.D_I, 'newState':self.F},
'_toS': {'currentState':True, 'newState':self.S},
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize node subgroup data series:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.nodeGroupData = None
if(node_groups):
self.nodeGroupData | |
self._handle_id = self._common_name + "_" + str(id(self.primary))
else:
self._handle_id = self._common_name
def __getattr__(self, name):
if _enclosing_tpu_context() is None:
return super(TPUVariableMixin, self).__getattr__(name)
else:
raise AttributeError(
"'{}' not accessible within a TPU context.".format(name))
def get(self, device=None):
if (_enclosing_tpu_context() is None) or (device is not None):
return super(TPUVariableMixin, self).get(device=device)
else:
raise NotImplementedError(
"`TPUVariableMixin.get()` is not supported within a TPU context.")
def _get_as_operand(self):
return self.read_value()
def _get_closest(self):
if _enclosing_tpu_context() is None:
return super(TPUVariableMixin, self)._get_closest()
else:
return self.primary
def numpy(self):
if context.executing_eagerly():
return self.read_value().numpy()
else:
raise NotImplementedError(
"numpy() is only available when eager execution is enabled.")
@property
def handle(self):
# If we're in a tpu.rewrite(), return the replicated handle.
tpu_context = _enclosing_tpu_context()
if tpu_context is None:
return self._get_closest().handle
else:
return tpu_context.get_replicated_var_handle(
self._handle_id, self._values)
@property
def device(self):
return self.handle.device
def _read_variable_op(self):
if self.trainable:
tape.variable_accessed(self)
return gen_resource_variable_ops.read_variable_op(self.handle, self.dtype)
def read_value(self):
if _enclosing_tpu_context() is None:
return super(TPUVariableMixin, self).read_value()
else:
return self._read_variable_op()
@property
def constraint(self):
return self.primary.constraint
def _as_graph_element(self):
if _enclosing_tpu_context() is None:
return super(TPUVariableMixin, self)._as_graph_element() # pylint: disable=protected-access
else:
return None
@property
def op(self):
return DistributedVarOp(
self.primary.op.name, self.primary.op.graph, self.primary.op.traceback,
self.primary.op.type)
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
"""Converts a variable to a tensor."""
# pylint: disable=protected-access
if _enclosing_tpu_context() is None:
return super(TPUVariableMixin, self)._dense_var_to_tensor(
dtype=dtype, name=name, as_ref=as_ref)
# pylint: enable=protected-access
elif dtype is not None and dtype != self.dtype:
return math_ops.cast(self.read_value(), dtype)
else:
return self.handle if as_ref else self.read_value()
def _validate_colocate_extended(v, extended):
variable_strategy = v._distribute_strategy # pylint: disable=protected-access
if variable_strategy.extended is not extended:
raise ValueError(
"`colocate_vars_with` must only be passed a variable created in this "
"tf.distribute.Strategy.scope(), not %s created in scope: %s" %
(v, variable_strategy))
def validate_colocate_distributed_variable(v, extended):
if not isinstance(v, DistributedVariable):
raise ValueError(
"`colocate_vars_with` must only be passed a variable created in this "
"tf.distribute.Strategy.scope(), not: %r" % (v,))
_validate_colocate_extended(v, extended)
def validate_colocate(v, extended):
if not hasattr(v, "_distribute_strategy"):
raise ValueError(
"`colocate_vars_with` must only be passed a variable created in this "
"tf.distribute.Strategy.scope(), not: %r" % (v,))
_validate_colocate_extended(v, extended)
def _apply_aggregation(strategy, value, aggregation, destinations):
if aggregation == vs.VariableAggregation.ONLY_FIRST_REPLICA:
return strategy.extended.broadcast_to(
strategy.experimental_local_results(value)[0],
destinations=destinations)
reduce_op = reduce_util.ReduceOp.from_variable_aggregation(aggregation)
return strategy.extended.reduce_to(reduce_op, value, destinations)
_aggregation_error_msg = (
"You must specify an aggregation method to update a "
"{variable_type} in Replica Context. You can do so by passing "
"an explicit value for argument `aggregation` to tf.Variable(..)."
"e.g. `tf.Variable(..., aggregation=tf.VariableAggregation.SUM)`"
"`tf.VariableAggregation` lists the possible aggregation methods."
"This is required because {variable_type} should always be "
"kept in sync. When updating them or assigning to them in a "
"replica context, we automatically try to aggregate the values "
"before updating the variable. For this aggregation, we need to "
"know the aggregation method. "
"Another alternative is to not try to update such "
"{variable_type} in replica context, but in cross replica "
"context. You can enter cross replica context by calling "
"`tf.distribute.get_replica_context().merge_call(merge_fn, ..)`."
"Inside `merge_fn`, you can then update the {variable_type} "
"using `tf.distribute.StrategyExtended.update()`.")
class _MirroredSaveable(saver.BaseSaverBuilder.ResourceVariableSaveable):
"""Class for defining how to restore a MirroredVariable."""
def __init__(self, mirrored_variable, primary_variable, name):
self._mirrored_variable = mirrored_variable
super(_MirroredSaveable, self).__init__(primary_variable, "", name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into all variables."""
tensor, = restored_tensors
return control_flow_ops.group(tuple(
_assign_on_device(v.device, v, tensor)
for v in self._mirrored_variable.values))
class MirroredVariable(DistributedVariable, Mirrored):
"""Holds a map from replica to variables whose values are kept in sync."""
def __init__(
self, strategy, device_map, values, aggregation, logical_device=None):
super(MirroredVariable, self).__init__(
strategy, device_map, values, logical_device=logical_device)
self._aggregation = aggregation
# The arguments to update() are automatically unwrapped so the update()
# function would normally see regular variables, not MirroredVariables.
# However, the update function can still operate on wrapped MirroredVariables
# through object members, captured arguments, etc. This is more likely in an
# update_non_slot() function (like OptimizerV2._finish), which can
# update several non-slot variables in one call.
def _assign_func(self, *args, **kwargs):
with _enter_or_assert_strategy(self._distribute_strategy):
f = kwargs.pop("f")
if distribution_strategy_context.in_cross_replica_context():
update_device = distribute_lib.get_update_device()
if update_device is not None:
# We are calling an assign function on the mirrored variable in an
# update context.
v = self.get(device=update_device)
return f(v, *args, **kwargs)
# We are calling assign on the mirrored variable in cross replica
# context, use `strategy.extended.update()` to update the variable.
return self._distribute_strategy.extended.update(
self, f, args=args, kwargs=kwargs)
else:
_assert_replica_context(self._distribute_strategy)
# We are calling an assign function on the mirrored variable in replica
# context.
# We reduce the value we want to assign/add/sub. More details about how
# we handle the different use cases can be found in the _reduce method.
# We call the function on each of the mirrored variables with the
# reduced value.
if self._aggregation == vs.VariableAggregation.NONE:
raise ValueError(_aggregation_error_msg.format(
variable_type="MirroredVariable"))
def merge_fn(strategy, value, *other_args, **other_kwargs):
v = _apply_aggregation(strategy, value, self._aggregation, self)
return strategy.extended.update(
self, f, args=(v,) + other_args, kwargs=other_kwargs)
return distribution_strategy_context.get_replica_context().merge_call(
merge_fn, args=args, kwargs=kwargs)
def assign_sub(self, *args, **kwargs):
assign_sub_fn = lambda var, *a, **kw: var.assign_sub(*a, **kw)
return self._assign_func(f=assign_sub_fn, *args, **kwargs)
def assign_add(self, *args, **kwargs):
assign_add_fn = lambda var, *a, **kw: var.assign_add(*a, **kw)
return self._assign_func(f=assign_add_fn, *args, **kwargs)
def assign(self, *args, **kwargs):
assign_fn = lambda var, *a, **kw: var.assign(*a, **kw)
return self._assign_func(f=assign_fn, *args, **kwargs)
@property
def aggregation(self):
return self._aggregation
def _get_cross_replica(self):
device = device_util.canonicalize(device_util.current())
replica_id = self._device_map.replica_for_device(device)
if replica_id is None:
return array_ops.identity(self.primary)
return array_ops.identity(self._values[replica_id])
def _as_graph_element(self):
# pylint: disable=protected-access
if distribution_strategy_context.in_cross_replica_context():
return self.primary._as_graph_element()
return self.get()._as_graph_element()
def _gather_saveables_for_checkpoint(self):
"""Overrides Trackable method.
This allows both name-based and object-based save and restore of
MirroredVariables.
Returns:
A dictionary mapping attribute names to `SaveableObject` factories.
"""
def _saveable_factory(name=self._common_name):
return _MirroredSaveable(self, self.primary, name)
return {trackable.VARIABLE_VALUE_KEY: _saveable_factory}
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
"""Converts a variable to a tensor."""
# Try to avoid assignments to and other mutations of MirroredVariable
# state except through a DistributionStrategy.extended.update() call.
assert not as_ref
return ops.internal_convert_to_tensor(
self.get(), dtype=dtype, name=name, as_ref=as_ref)
# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
def _tensor_conversion_mirrored(var, dtype=None, name=None, as_ref=False):
return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access
ops.register_tensor_conversion_function(MirroredVariable,
_tensor_conversion_mirrored)
def _enclosing_tpu_context():
# pylint: disable=protected-access
tpu_context = ops.get_default_graph()._get_control_flow_context()
# pylint: enable=protected-access
while tpu_context is not None and not isinstance(
tpu_context, control_flow_ops.XLAControlFlowContext):
tpu_context = tpu_context.outer_context
return tpu_context
def is_distributed_variable(v):
"""Determine if a variable is ds variable or TPU mirrored variable."""
return isinstance(v, DistributedVariable)
class TPUMirroredVariable(TPUVariableMixin, MirroredVariable):
"""Holds a map from replica to TPU variables whose values are kept in sync."""
def _assign_func(self, *args, **kwargs):
with _enter_or_assert_strategy(self._distribute_strategy):
if (distribution_strategy_context.in_cross_replica_context()
and (_enclosing_tpu_context() is not None)):
f = kwargs.pop("f")
return self._distribute_strategy.extended.update(
self, f, args=args, kwargs=kwargs)
else:
return MirroredVariable._assign_func(self, *args, **kwargs)
def assign_sub(self, *args, **kwargs):
assign_sub_fn = _make_raw_assign_fn(
gen_resource_variable_ops.assign_sub_variable_op)
return self._assign_func(f=assign_sub_fn, *args, **kwargs)
def assign_add(self, *args, **kwargs):
assign_add_fn = _make_raw_assign_fn(
gen_resource_variable_ops.assign_add_variable_op)
return self._assign_func(f=assign_add_fn, *args, **kwargs)
def assign(self, *args, **kwargs):
assign_fn = _make_raw_assign_fn(
gen_resource_variable_ops.assign_variable_op)
return self._assign_func(f=assign_fn, *args, **kwargs)
class _SyncOnReadSaveable(saver.BaseSaverBuilder.SaveableObject):
"""Class for defining how to restore a SyncOnReadVariable."""
def __init__(self, sync_on_read_variable, name):
self._sync_on_read_variable = sync_on_read_variable
# We use a callable so that we don't have to evaluate this expression
# in the case where we are trying to restore instead of save.
def tensor():
strategy = sync_on_read_variable._distribute_strategy # pylint: disable=protected-access
return strategy.extended.read_var(sync_on_read_variable)
spec = saver.BaseSaverBuilder.SaveSpec(
tensor=tensor,
slice_spec="",
name=name,
dtype=sync_on_read_variable.dtype,
device=sync_on_read_variable.primary.device)
super(_SyncOnReadSaveable, self).__init__(tensor, [spec], name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into all variables."""
tensor, = restored_tensors
return self._sync_on_read_variable.assign(tensor)
def _assert_replica_context(strategy):
replica_context = distribution_strategy_context.get_replica_context()
if not replica_context:
raise RuntimeError(
"Replica-local variables may only be assigned in a replica context.")
if replica_context.strategy is not strategy:
raise RuntimeError(
"Replica-local variables may only be assigned in a replica context.")
class SyncOnReadVariable(DistributedVariable):
"""Holds a map from replica to variables whose values are reduced on save."""
def __init__(
self, strategy, device_map, values, aggregation, logical_device=None):
self._aggregation = aggregation
super(SyncOnReadVariable, self).__init__(
strategy, device_map, values, logical_device=logical_device)
def assign_sub(self, *args, **kwargs):
with _enter_or_assert_strategy(self._distribute_strategy):
if distribution_strategy_context.in_cross_replica_context():
if self._aggregation == vs.VariableAggregation.SUM:
raise ValueError(
"SyncOnReadVariable does not support `assign_sub` in "
"cross-replica context when aggregation is set to "
"`tf.VariableAggregation.SUM`.")
return control_flow_ops.group(tuple(
_assign_sub_on_device(v.device, v, args[0]) for v in self._values))
else:
return self.get().assign_sub(*args, **kwargs)
def assign_add(self, *args, **kwargs):
with _enter_or_assert_strategy(self._distribute_strategy):
if distribution_strategy_context.in_cross_replica_context():
if self._aggregation == vs.VariableAggregation.SUM:
raise ValueError(
"SyncOnReadVariable does not support `assign_add` in "
"cross-replica context when aggregation is set to "
"`tf.VariableAggregation.SUM`.")
return control_flow_ops.group(tuple(
_assign_add_on_device(v.device, v, args[0]) | |
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
import os.path, re, sys, glob
import ctypes
import ctypes.util
def _environ_path(name):
if name in os.environ:
return os.environ[name].split(":")
else:
return []
class LibraryLoader(object):
def __init__(self):
self.other_dirs=[]
def load_library(self,libname):
"""Given the name of a library, load it."""
paths = self.getpaths(libname)
for path in paths:
if os.path.exists(path):
return self.load(path)
raise ImportError("%s not found." % libname)
def load(self,path):
"""Given a path to a library, load it."""
try:
# Darwin requires dlopen to be called with mode RTLD_GLOBAL instead
# of the default RTLD_LOCAL. Without this, you end up with
# libraries not being loadable, resulting in "Symbol not found"
# errors
if sys.platform == 'darwin':
return ctypes.CDLL(path, ctypes.RTLD_GLOBAL)
else:
return ctypes.cdll.LoadLibrary(path)
except OSError,e:
raise ImportError(e)
def getpaths(self,libname):
"""Return a list of paths where the library might be found."""
if os.path.isabs(libname):
yield libname
else:
for path in self.getplatformpaths(libname):
yield path
path = ctypes.util.find_library(libname)
if path: yield path
def getplatformpaths(self, libname):
return []
# Darwin (Mac OS X)
class DarwinLibraryLoader(LibraryLoader):
name_formats = ["lib%s.dylib", "lib%s.so", "lib%s.bundle", "%s.dylib",
"%s.so", "%s.bundle", "%s"]
def getplatformpaths(self,libname):
if os.path.pathsep in libname:
names = [libname]
else:
names = [format % libname for format in self.name_formats]
for dir in self.getdirs(libname):
for name in names:
yield os.path.join(dir,name)
def getdirs(self,libname):
'''Implements the dylib search as specified in Apple documentation:
http://developer.apple.com/documentation/DeveloperTools/Conceptual/
DynamicLibraries/Articles/DynamicLibraryUsageGuidelines.html
Before commencing the standard search, the method first checks
the bundle's ``Frameworks`` directory if the application is running
within a bundle (OS X .app).
'''
dyld_fallback_library_path = _environ_path("DYLD_FALLBACK_LIBRARY_PATH")
if not dyld_fallback_library_path:
dyld_fallback_library_path = [os.path.expanduser('~/lib'),
'/usr/local/lib', '/usr/lib']
dirs = []
if '/' in libname:
dirs.extend(_environ_path("DYLD_LIBRARY_PATH"))
else:
dirs.extend(_environ_path("LD_LIBRARY_PATH"))
dirs.extend(_environ_path("DYLD_LIBRARY_PATH"))
dirs.extend(self.other_dirs)
dirs.append(".")
if hasattr(sys, 'frozen') and sys.frozen == 'macosx_app':
dirs.append(os.path.join(
os.environ['RESOURCEPATH'],
'..',
'Frameworks'))
dirs.extend(dyld_fallback_library_path)
return dirs
# Posix
class PosixLibraryLoader(LibraryLoader):
_ld_so_cache = None
def _create_ld_so_cache(self):
# Recreate search path followed by ld.so. This is going to be
# slow to build, and incorrect (ld.so uses ld.so.cache, which may
# not be up-to-date). Used only as fallback for distros without
# /sbin/ldconfig.
#
# We assume the DT_RPATH and DT_RUNPATH binary sections are omitted.
directories = []
for name in ("LD_LIBRARY_PATH",
"SHLIB_PATH", # HPUX
"LIBPATH", # OS/2, AIX
"LIBRARY_PATH", # BE/OS
):
if name in os.environ:
directories.extend(os.environ[name].split(os.pathsep))
directories.extend(self.other_dirs)
directories.append(".")
try: directories.extend([dir.strip() for dir in open('/etc/ld.so.conf')])
except IOError: pass
directories.extend(['/lib', '/usr/lib', '/lib64', '/usr/lib64'])
cache = {}
lib_re = re.compile(r'lib(.*)\.s[ol]')
ext_re = re.compile(r'\.s[ol]$')
for dir in directories:
try:
for path in glob.glob("%s/*.s[ol]*" % dir):
file = os.path.basename(path)
# Index by filename
if file not in cache:
cache[file] = path
# Index by library name
match = lib_re.match(file)
if match:
library = match.group(1)
if library not in cache:
cache[library] = path
except OSError:
pass
self._ld_so_cache = cache
def getplatformpaths(self, libname):
if self._ld_so_cache is None:
self._create_ld_so_cache()
result = self._ld_so_cache.get(libname)
if result: yield result
path = ctypes.util.find_library(libname)
if path: yield os.path.join("/lib",path)
# Windows
class _WindowsLibrary(object):
def __init__(self, path):
self.cdll = ctypes.cdll.LoadLibrary(path)
self.windll = ctypes.windll.LoadLibrary(path)
def __getattr__(self, name):
try: return getattr(self.cdll,name)
except AttributeError:
try: return getattr(self.windll,name)
except AttributeError:
raise
class WindowsLibraryLoader(LibraryLoader):
name_formats = ["%s.dll", "lib%s.dll", "%slib.dll"]
def load_library(self, libname):
try:
result = LibraryLoader.load_library(self, libname)
except ImportError:
result = None
if os.path.sep not in libname:
for name in self.name_formats:
try:
result = getattr(ctypes.cdll, name % libname)
if result:
break
except WindowsError:
result = None
if result is None:
try:
result = getattr(ctypes.cdll, libname)
except WindowsError:
result = None
if result is None:
raise ImportError("%s not found." % libname)
return result
def load(self, path):
return _WindowsLibrary(path)
def getplatformpaths(self, libname):
if os.path.sep not in libname:
for name in self.name_formats:
dll_in_current_dir = os.path.abspath(name % libname)
if os.path.exists(dll_in_current_dir):
yield dll_in_current_dir
path = ctypes.util.find_library(name % libname)
if path:
yield path
# Platform switching
# If your value of sys.platform does not appear in this dict, please contact
# the Ctypesgen maintainers.
loaderclass = {
"darwin": DarwinLibraryLoader,
"cygwin": WindowsLibraryLoader,
"win32": WindowsLibraryLoader
}
loader = loaderclass.get(sys.platform, PosixLibraryLoader)()
def add_library_search_dirs(other_dirs):
loader.other_dirs = other_dirs
load_library = loader.load_library
del loaderclass
# End loader
add_library_search_dirs([])
# No libraries
# No modules
sai_status_t = c_int32 # /home/omer/P4/SAI/inc/saitypes.h: 84
sai_switch_profile_id_t = c_uint32 # /home/omer/P4/SAI/inc/saitypes.h: 85
sai_vlan_id_t = c_uint16 # /home/omer/P4/SAI/inc/saitypes.h: 86
sai_attr_id_t = c_uint32 # /home/omer/P4/SAI/inc/saitypes.h: 87
sai_cos_t = c_uint8 # /home/omer/P4/SAI/inc/saitypes.h: 88
sai_queue_index_t = c_uint8 # /home/omer/P4/SAI/inc/saitypes.h: 89
sai_mac_t = c_uint8 * 6 # /home/omer/P4/SAI/inc/saitypes.h: 90
sai_ip4_t = c_uint32 # /home/omer/P4/SAI/inc/saitypes.h: 91
sai_ip6_t = c_uint8 * 16 # /home/omer/P4/SAI/inc/saitypes.h: 92
sai_switch_hash_seed_t = c_uint32 # /home/omer/P4/SAI/inc/saitypes.h: 93
sai_uint64_t = c_uint64 # /home/omer/P4/SAI/inc/saitypes.h: 107
sai_int64_t = c_int64 # /home/omer/P4/SAI/inc/saitypes.h: 108
sai_uint32_t = c_uint32 # /home/omer/P4/SAI/inc/saitypes.h: 109
sai_int32_t = c_int32 # /home/omer/P4/SAI/inc/saitypes.h: 110
sai_uint16_t = c_uint16 # /home/omer/P4/SAI/inc/saitypes.h: 111
sai_int16_t = c_int16 # /home/omer/P4/SAI/inc/saitypes.h: 112
sai_uint8_t = c_uint8 # /home/omer/P4/SAI/inc/saitypes.h: 113
sai_int8_t = c_int8 # /home/omer/P4/SAI/inc/saitypes.h: 114
sai_size_t = c_size_t # /home/omer/P4/SAI/inc/saitypes.h: 115
sai_object_id_t = c_uint64 # /home/omer/P4/SAI/inc/saitypes.h: 116
sai_pointer_t = POINTER(None) # /home/omer/P4/SAI/inc/saitypes.h: 117
# /home/omer/P4/SAI/inc/saitypes.h: 143
class struct__sai_object_list_t(Structure):
pass
struct__sai_object_list_t.__slots__ = [
'count',
'list',
]
struct__sai_object_list_t._fields_ = [
('count', c_uint32),
('list', POINTER(sai_object_id_t)),
]
sai_object_list_t = struct__sai_object_list_t # /home/omer/P4/SAI/inc/saitypes.h: 143
enum__sai_common_api_t = c_int # /home/omer/P4/SAI/inc/saitypes.h: 154
SAI_COMMON_API_CREATE = 0 # /home/omer/P4/SAI/inc/saitypes.h: 154
SAI_COMMON_API_REMOVE = 1 # /home/omer/P4/SAI/inc/saitypes.h: 154
SAI_COMMON_API_SET = 2 # /home/omer/P4/SAI/inc/saitypes.h: 154
SAI_COMMON_API_GET = 3 # /home/omer/P4/SAI/inc/saitypes.h: 154
SAI_COMMON_API_MAX = 4 # /home/omer/P4/SAI/inc/saitypes.h: 154
sai_common_api_t = enum__sai_common_api_t # /home/omer/P4/SAI/inc/saitypes.h: 154
enum__sai_object_type_t = c_int # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_NULL = 0 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_PORT = 1 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_LAG = 2 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_VIRTUAL_ROUTER = 3 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_NEXT_HOP = 4 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_NEXT_HOP_GROUP = 5 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_ROUTER_INTERFACE = 6 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_ACL_TABLE = 7 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_ACL_ENTRY = 8 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_ACL_COUNTER = 9 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_ACL_RANGE = 10 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_ACL_TABLE_GROUP = 11 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_ACL_TABLE_GROUP_MEMBER = 12 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_HOSTIF = 13 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_MIRROR_SESSION = 14 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_SAMPLEPACKET = 15 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_STP = 16 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_HOSTIF_TRAP_GROUP = 17 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_POLICER = 18 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_WRED = 19 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_QOS_MAP = 20 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_QUEUE = 21 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_SCHEDULER = 22 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_SCHEDULER_GROUP = 23 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_BUFFER_POOL = 24 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_BUFFER_PROFILE = 25 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP = 26 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_LAG_MEMBER = 27 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_HASH = 28 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_UDF = 29 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_UDF_MATCH = 30 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_UDF_GROUP = 31 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_FDB_ENTRY = 32 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_SWITCH = 33 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_HOSTIF_TRAP = 34 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_HOSTIF_TABLE_ENTRY = 35 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_NEIGHBOR_ENTRY = 36 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_ROUTE_ENTRY = 37 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_VLAN = 38 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_VLAN_MEMBER = 39 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_HOSTIF_PACKET = 40 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_TUNNEL_MAP = 41 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_TUNNEL = 42 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_TUNNEL_TERM_TABLE_ENTRY = 43 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_FDB_FLUSH = 44 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER = 45 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_STP_PORT = 46 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_RPF_GROUP = 47 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_RPF_GROUP_MEMBER = 48 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_L2MC_GROUP = 49 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_L2MC_GROUP_MEMBER = 50 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_IPMC_GROUP = 51 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_IPMC_GROUP_MEMBER = 52 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_L2MC_ENTRY = 53 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_IPMC_ENTRY = 54 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_MCAST_FDB_ENTRY = 55 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_HOSTIF_USER_DEFINED_TRAP = 56 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_BRIDGE = 57 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_BRIDGE_PORT = 58 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY = 59 # /home/omer/P4/SAI/inc/saitypes.h: 221
SAI_OBJECT_TYPE_MAX = 60 # /home/omer/P4/SAI/inc/saitypes.h: 221
sai_object_type_t = enum__sai_object_type_t # /home/omer/P4/SAI/inc/saitypes.h: 221
# /home/omer/P4/SAI/inc/saitypes.h: 226
class struct__sai_u8_list_t(Structure):
pass
struct__sai_u8_list_t.__slots__ = [
'count',
'list',
]
struct__sai_u8_list_t._fields_ = [
('count', c_uint32),
('list', POINTER(c_uint8)),
]
sai_u8_list_t = struct__sai_u8_list_t # /home/omer/P4/SAI/inc/saitypes.h: 226
# /home/omer/P4/SAI/inc/saitypes.h: 235
class struct__sai_s8_list_t(Structure):
pass
struct__sai_s8_list_t.__slots__ = [
'count',
'list',
]
struct__sai_s8_list_t._fields_ = [
('count', c_uint32),
('list', POINTER(c_int8)),
]
sai_s8_list_t = struct__sai_s8_list_t # /home/omer/P4/SAI/inc/saitypes.h: 235
# /home/omer/P4/SAI/inc/saitypes.h: 240
class struct__sai_u16_list_t(Structure):
pass
struct__sai_u16_list_t.__slots__ = [
'count',
'list',
]
struct__sai_u16_list_t._fields_ = [
('count', c_uint32),
('list', POINTER(c_uint16)),
]
sai_u16_list_t = struct__sai_u16_list_t # /home/omer/P4/SAI/inc/saitypes.h: 240
# /home/omer/P4/SAI/inc/saitypes.h: 245
class struct__sai_s16_list_t(Structure):
pass
struct__sai_s16_list_t.__slots__ = [
'count',
'list',
]
struct__sai_s16_list_t._fields_ = [
('count', c_uint32),
('list', POINTER(c_int16)),
]
sai_s16_list_t = struct__sai_s16_list_t # /home/omer/P4/SAI/inc/saitypes.h: 245
# /home/omer/P4/SAI/inc/saitypes.h: 250
class struct__sai_u32_list_t(Structure):
pass
struct__sai_u32_list_t.__slots__ = [
'count',
'list',
]
struct__sai_u32_list_t._fields_ = [
('count', c_uint32),
('list', POINTER(c_uint32)),
]
sai_u32_list_t = struct__sai_u32_list_t # /home/omer/P4/SAI/inc/saitypes.h: 250
# /home/omer/P4/SAI/inc/saitypes.h: 255
class struct__sai_s32_list_t(Structure):
pass
struct__sai_s32_list_t.__slots__ = [
'count',
'list',
]
struct__sai_s32_list_t._fields_ = [
('count', c_uint32),
('list', POINTER(c_int32)),
]
sai_s32_list_t = struct__sai_s32_list_t # /home/omer/P4/SAI/inc/saitypes.h: 255
# /home/omer/P4/SAI/inc/saitypes.h: 260
class struct__sai_u32_range_t(Structure):
pass
struct__sai_u32_range_t.__slots__ = [
'min',
'max',
]
struct__sai_u32_range_t._fields_ = [
('min', c_uint32),
('max', c_uint32),
]
sai_u32_range_t = struct__sai_u32_range_t # /home/omer/P4/SAI/inc/saitypes.h: 260
# /home/omer/P4/SAI/inc/saitypes.h: 265
class struct__sai_s32_range_t(Structure):
pass
struct__sai_s32_range_t.__slots__ = [
'min',
'max',
]
struct__sai_s32_range_t._fields_ = [
('min', c_int32),
('max', c_int32),
]
sai_s32_range_t = struct__sai_s32_range_t # /home/omer/P4/SAI/inc/saitypes.h: 265
# /home/omer/P4/SAI/inc/saitypes.h: 278
class struct__sai_vlan_list_t(Structure):
pass
struct__sai_vlan_list_t.__slots__ = [
'count',
'list',
]
struct__sai_vlan_list_t._fields_ = [
('count', c_uint32),
('list', POINTER(sai_vlan_id_t)),
]
sai_vlan_list_t = struct__sai_vlan_list_t | |
#!/usr/bin/env python
# coding=utf-8
"""
Trees for generating.
"""
from __future__ import unicode_literals
from collections import namedtuple, deque
from pytreex.core.node import T
__author__ = "<NAME>"
__date__ = "2014"
class NodeData(namedtuple('NodeData', ['t_lemma', 'formeme'])):
"""This stores the actual data of a node, without parent-child information"""
pass
def _group_lists(l_long, l_short):
"""Take two lists, a longer and a shorter one, and group them into equally long lists of
sublists. One of the resulting sublists is always composed of one-element sublists and
the other of longer sublists.
@param l_long: a "longer" list
@param l_short: a "shorter" list
@return: a pair of lists of sublists (in the order of parameters given)
"""
port_size, bigger_ports = divmod(len(l_long), len(l_short))
if port_size == 0: # call itself the other way round if l_long is actually shorter than l_short
l_short, l_long = _group_lists(l_short, l_long)
return l_long, l_short
new_long = []
for port_no in xrange(len(l_short)):
if port_no < bigger_ports:
new_long.append(l_long[(port_size + 1) * port_no: (port_size + 1) * (port_no + 1)])
else:
new_long.append(l_long[port_size * port_no + bigger_ports:port_size * (port_no + 1) + bigger_ports])
return new_long, [[item] for item in l_short]
class TreeData(object):
"""This stores all node data for a tree, as well as parent-child information."""
__slots__ = ['nodes', 'parents']
def __init__(self, nodes=None, parents=None):
if nodes and parents:
# copy structure if it's given
self.nodes = list(nodes)
self.parents = list(parents)
else:
# add just technical root
self.nodes = [NodeData(None, None)]
self.parents = [-1]
@staticmethod
def from_ttree(ttree):
"""Copy the tree from a T-tree representation (just structure, t-lemmas and formemes)."""
tree = TreeData()
tnodes = ttree.get_descendants(ordered=True)
id2ord = {tnode.id: num for num, tnode in enumerate(tnodes, start=1)}
id2ord[ttree.id] = 0
for tnode in tnodes:
tree.nodes.append(NodeData(tnode.t_lemma, tnode.formeme))
tree.parents.append(id2ord[tnode.parent.id])
return tree
@staticmethod
def from_string(string):
"""Parse a string representation of the tree, as returned by `__unicode__`."""
tree = TreeData()
for node in string.split(' ')[1:]:
_, parent, t_lemma, formeme = node.split('|')
tree.parents.append(int(parent))
tree.nodes.append(NodeData(t_lemma, formeme))
return tree
def create_child(self, parent_idx, child_idx, child_data):
"""Create a child of the given node at the given position, shifting remaining nodes
to the right.
@param parent_idx: index of the parent node
@param child_idx: index of the newly created child node (or boolean: left/right of the current parent?)
@param child_data: the child node itself as a `NodeData` instance
@return: the new child index
"""
if isinstance(child_idx, bool):
child_idx = parent_idx + 1 if child_idx else parent_idx
self.nodes.insert(child_idx, child_data)
self.parents.insert(child_idx, parent_idx)
self.parents = [idx + 1 if idx >= child_idx else idx for idx in self.parents]
return child_idx
def move_node(self, node_idx, target_pos):
"""Move the node on the given position to another position, shifting nodes in between
and updating parent indexes along the way.
@param node_idx: the index of the node to be moved
@param target_pos: the desired target position (index after the moving)
@return: None
"""
if node_idx > target_pos:
self.nodes = (self.nodes[:target_pos] + [self.nodes[node_idx]] +
self.nodes[target_pos:node_idx] + self.nodes[node_idx + 1:])
self.parents = (self.parents[:target_pos] + [self.parents[node_idx]] +
self.parents[target_pos:node_idx] + self.parents[node_idx + 1:])
for pos in xrange(len(self)):
if self.parents[pos] == node_idx:
self.parents[pos] = target_pos
elif self.parents[pos] >= target_pos and self.parents[pos] < node_idx:
self.parents[pos] += 1
elif node_idx < target_pos:
self.nodes = (self.nodes[:node_idx] + self.nodes[node_idx + 1:target_pos + 1] +
[self.nodes[node_idx]] + self.nodes[target_pos + 1:])
self.parents = (self.parents[:node_idx] + self.parents[node_idx + 1:target_pos + 1] +
[self.parents[node_idx]] + self.parents[target_pos + 1:])
for pos in xrange(len(self)):
if self.parents[pos] == node_idx:
self.parents[pos] = target_pos
elif self.parents[pos] > node_idx and self.parents[pos] <= target_pos:
self.parents[pos] -= 1
def remove_node(self, node_idx):
"""Remove a node, rehang all its children to its parent."""
for pos in xrange(len(self)):
if self.parents[pos] == node_idx:
self.parents[pos] = self.parents[node_idx]
self.move_node(node_idx, len(self)-1)
del self.parents[-1]
del self.nodes[-1]
def subtree_bound(self, parent_idx, right):
"""Return the subtree bound of the given node (furthermost index belonging to the subtree),
going left or right.
NB: This assumes the trees are projective.
@param parent_idx: index of the node to examine
@param right: if True, return rightmost subtree bound; if False, return leftmost bound
@return: the furthermost index belonging to the subtree of the given node in the given \
direction
"""
move = 1 if right else -1
cur_idx = parent_idx + move
while cur_idx >= 0 and cur_idx < len(self):
# the current node is not in the subtree => the last position was the boundary
if not self.is_descendant(parent_idx, cur_idx):
return cur_idx - move
cur_idx += move
# return 0 or len(self-1)
return cur_idx - move
def children_idxs(self, parent_idx, left_only=False, right_only=False):
"""Return the indexes of the children of the given node.
@param parent_idx: the node whose children should be found
@param left_only: only look for left children (preceding the parent)
@param right_only: only look for right children (following the parent)
@return: an array of node indexes matching the criteria (may be empty)
"""
if left_only:
return [idx for idx, val in enumerate(self.parents[:parent_idx]) if val == parent_idx]
if right_only:
return [idx for idx, val in enumerate(self.parents[parent_idx + 1:], start=parent_idx + 1)
if val == parent_idx]
return [idx for idx, val in enumerate(self.parents) if val == parent_idx]
def children_num(self, parent_idx):
return sum(1 for val in self.parents if val == parent_idx)
def node_depth(self, node_idx):
"""Return the depth of the given node (the technical root has a depth=0).
@param node_idx: index of the node to examine
@return: An integer indicating the length of the path from the node to the technical root
"""
depth = 0
while node_idx > 0:
node_idx = self.parents[node_idx]
depth += 1
return depth
def is_descendant(self, anc_idx, desc_idx):
"""Check if a node is a descendant of another node (there is a directed
path between them.
@param anc_idx: the "ancestor node" index – the node where the path should begin
@param desc_idx: the "descendant node" index – the node where the path should end
@return: True if the path between the nodes exist, False otherwise
"""
node_idx = desc_idx
while node_idx > 0:
node_idx = self.parents[node_idx]
if node_idx == anc_idx:
return True
return anc_idx == node_idx
def is_right_child(self, node_idx):
return self.parents[node_idx] < node_idx
def __hash__(self):
# TODO: this is probably slow... make it faster, possibly replace the lists with tuples?
return hash(tuple(self.nodes)) ^ hash(tuple(self.parents))
def __eq__(self, other):
return (self is other or
((self.parents is other.parents or self.parents == other.parents) and
(self.nodes is other.nodes or self.nodes == other.nodes)))
def __ne__(self, other):
return not self.__eq__(other)
def __unicode__(self):
return ' '.join(['%d|%d|%s|%s' % (idx, parent_idx, node.t_lemma, node.formeme)
for idx, (parent_idx, node)
in enumerate(zip(self.parents, self.nodes))])
def __getitem__(self, idx):
return self.nodes[idx]
def __str__(self):
return unicode(self).encode('UTF-8', 'replace')
def __repr__(self):
return str(self)
def __len__(self):
return len(self.nodes)
def clone(self):
return TreeData(nodes=self.nodes, parents=self.parents)
def to_tok_list(self):
"""Convert the tree to a list of tokens -- (word, empty tag) pairs."""
return [(n.t_lemma, None) for n in self.nodes[1:]]
def create_ttree(self):
"""Convert the TreeData structure to a regular t-tree."""
tnodes = [T(data={'ord': 0})] + [T(data={'t_lemma': node.t_lemma,
'formeme': node.formeme,
'ord': i})
for i, node in enumerate(self.nodes[1:], start=1)]
for parent_idx, tnode in zip(self.parents[1:], tnodes[1:]):
tnode.parent = tnodes[parent_idx]
return tnodes[0]
def get_subtree(self, node_idxs):
"""Return a subtree of the current tree that only contains the nodes whose indexes
are given in the parameter.
@param node_idxs: a set or list of valid node indexes (integers) that are to be included \
in the returned subtree. If a list is given, it may be changed.
"""
if isinstance(node_idxs, set):
node_idxs |= set([0])
node_idxs = sorted(node_idxs)
else:
if 0 not in node_idxs:
node_idxs.append(0)
node_idxs.sort()
idx_mapping = {old_idx: new_idx for old_idx, new_idx in zip(node_idxs, range(len(node_idxs)))}
idx_mapping[-1] = -1 # “mapping” for technical roots
new_parents = [idx_mapping[parent] for idx, parent in enumerate(self.parents)
if idx in node_idxs]
new_nodes = [node for idx, node in enumerate(self.nodes) if idx in node_idxs]
return TreeData(new_nodes, new_parents)
def get_subtrees_list(self, start_idxs, adding_idxs):
"""Return a list of subtrees that originate from the current tree; starting from a
subtree composed of nodes specified in start_idxs and gradually adding nodes specified
in adding_idxs.
Will not give the subtree with start_idxs only, starts with start_idxs and the first
element of adding_idxs.
@param start_idxs: list of indexes for a subtree to start with
@param adding_idxs: list of | |
newly sampled rows back onto the sample dataframe.
: param sample_df: Dataframe containing rows which were sampled from original_df
: param original_df: Dataframe from which the sample was drawn.
: param n_close_points: Number of points required by user to be 'close' to each point in the sample.
: return new_sample_df: Dataframe containing new sample of same size as input sample_df
"""
# Keep only those points which have > n_close_points close to them.
new_sample_df = sample_df.loc[sample_df['close_points'] > n_close_points]
# Create a list of the 'Slope/Elevation values of the points which are removed from the dataframe
criteria = sample_df.loc[sample_df['close_points'] <= n_close_points, 'Slope/Elevation']
# Add one row matching each of these criteria to the new sample dataframe
for i in range(0,len(criteria)):
# For each of these create a subset of df meeting criteria
df = original_df.loc[original_df['Slope/Elevation'] == criteria.iloc[i]]
# Randomly sample one row from those available
row_to_add = df.sample(n=1)
# Add it to dataframe
new_sample_df = new_sample_df.append(row_to_add, sort = True).reset_index(drop = True)
# Delete the close points column (so it can be recreated)
new_sample_df = new_sample_df.drop(['close_points'], axis=1)
return new_sample_df
def resample_prioritise_new_points (sample_df, original_df, n_close_points):
"""
Removes all points from a dataframe which were new samples and which have less than a specified number of points close to them.
For each row which is removed, randomly selects a row from the original dataframe which has the same
'Slope/Elevation' category as the removed row.
If there are no such points:
Then removes one of the original sample points which have less than the specified near neighbour number.
Join the newly sampled rows back onto the sample dataframe.
: param sample_df: Dataframe containing sample.
: param original_df: Dataframe from which the sample was drawn.
: param n_close_points: Number of points required by user to be 'close' to each point in the sample.
: return new_sample_df: Dataframe containing new sample of same size as input sample_df
"""
# If there are any new points with not enough near neighbours then resample these
if len(sample_df.loc[(sample_df['close_points'] < n_close_points) & (sample_df['origin'] == 'new_point')]) > 0:
# Keep only those points which have > n_close_points close to them and are NEW
new_sample_df = sample_df.loc[((sample_df['close_points'] >= n_close_points) & (sample_df['origin'] == 'new_point'))|(sample_df['origin'] == 'original_point')]
# Create a list of the 'Slope/Elevation values of the points which are removed from the dataframe
# And select new points which meet the same criteria.
criteria = sample_df.loc[(sample_df['close_points'] < n_close_points) & (sample_df['origin'] == 'new_point'), 'Slope/Elevation']
for i in range(0,len(criteria)):
# For each of these create a subset of df meeting criteria
df = original_df.loc[original_df['Slope/Elevation'] == criteria.iloc[i]]
# Randomly sample one row from those available
row_to_add = df.sample(n=1)
row_to_add['origin'] = 'new_point'
# Add it to dataframe
new_sample_df = new_sample_df.append(row_to_add, sort = True).reset_index(drop = True)
# Delete the close points column (so it can be recreated)
new_sample_df = new_sample_df.drop(['close_points'], axis=1)
# If there are no new points which dont have enough new neighbours, then replace one of the origina;
# points with another with same slope/elevation criteria.
else :
# Randomly select a row with no neighbours, by proxy this will be an old point
row_to_remove = sample_df.loc[(sample_df['close_points'] < n_close_points)].copy().sample(n=1)
# Drop from dataframe, using the index
sample_df = sample_df.drop(row_to_remove.index.values.astype(int)[0]).copy()
# Find the slope/elevation criteria of the row removes and choose a similar row to replace it.
criteria = row_to_remove['Slope/Elevation']
row_to_add = original_df.loc[original_df['Slope/Elevation'] == criteria.iloc[0]].copy().sample(n=1)
row_to_add['origin'] = 'new_point'
# Add it
new_sample_df = sample_df.append(row_to_add, sort = True).reset_index(drop = True)
# Delete the close points column (so it can be recreated)
new_sample_df = new_sample_df.drop(['close_points'], axis=1)
return new_sample_df
def create_df_sample (df, sample_constraints, print_preference):
'''
Creates a sample from a dataframe of coordinates, according to requirements that the sample should
have the same distribution of slope and elevation values as the original dataframe and that each point
should be within a certain distance of at least one other - as specified in sample_constraints.
: param df: A dataframe containing x and y coordinates as column from which a sample should be taken.
: param sample_constraints: A dictionary containing constraints for the sampling procedure.
: return sample: A dataframe containing containing a sample of the input dataframe sampled according to sample_constraints.
'''
start = time.time()
# Take a sample from the dataframe that matches the proportional split between slope and elevation in the whole AOI
sample = df.groupby(['Elevation_cuts', 'Slope_cuts'], as_index=False).apply(lambda x: x.sample (frac = sample_constraints['n_samples']/len(df))).reset_index(drop=True)
# For each point in the sample, find the number of neighbours it has within a range between min_dist and max_dist
sample = find_near_neighbours(sample, sample_constraints['min_dist'], sample_constraints['max_dist'], sample_constraints['n_close_points'], print_preference)
# If 0 rows have less than the requirements for n_close_points then sampling is done.
# Else, the dataset is resampled until this condition is met.
done = 'Not Done'
counter = 0
if sample.loc[sample.close_points <= sample_constraints['n_close_points'], 'close_points'].count() == 0:
print(f"Sampling complete after 0 resamples within {round(((time.time()-start)/60),2)} minutes.")
else:
while done != 'Done':
# Create a coutner to record how many iterations were needed.
counter = counter + 1
# Resample the dataset, removing any points without sufficient near neighbours
# and replacing them with a point with the same slope/elevation profile from the original dataset .
# Recount the near neighbours to each point.
sample = resample_far_points (sample, df, sample_constraints['n_close_points'])
sample = find_near_neighbours (sample, sample_constraints['min_dist'], sample_constraints['max_dist'], sample_constraints['n_close_points'], print_preference)
# If 0 rows have less than the requirements for n_close_points then sampling is done.
# Else, the dataset is returned for resampling.
if sample.loc[sample.close_points <= sample_constraints['n_close_points'], 'close_points'].count()== 0:
end = time.time()
print(f"Sampling complete after {counter} resamples within {round(((end-start)/60),2)} minutes.")
done = 'Done'
return sample
def find_area_of_rectangle (coordinates):
"""
Finds the area of the rectangle formed by 4 sets of British National Grid coordinates.
: param bb_coordinates: A 2D array with X coordinates in first column, and Y in second
: return area: A
"""
df = pd.DataFrame({'x': coordinates[:,0], 'y': coordinates[:,1]})
# Find the distance between the last coordinate and all of the rest.
dists = euclidean_distances(df.iloc[[3]][['x', 'y']], df.iloc[:3][['x', 'y']])[0].tolist()
# Keep only the two smallest distances, corresponding to the length and height of the square.
dists.remove(max(dists))
# Find the area as length * height.
area = dists[0] * dists[1]
return area
def run_sampling (df, n, sample_constraints, print_preference):
'''
Samples a dataframe of coordinates, according to requirements that the sample should
have the same distribution of slope and elevation values as the original dataframe and that each point
should be within a certain distance of at least one other - as specified in sample_constraints.
Repeats the sampling process n times, each time assessesing the areal coverage of the sample.
The final sample is that whose points covers the smallest area.
: param df: A dataframe containing x and y coordinates as column from which a sample should be taken.
: param n: An integer representing the number of times sampling should be repeated to attempt to compress the sample area.
: param sample_constraints: A dictionary containing constraints for the sampling procedure.
: return best_sample: A dataframe containing a sample of the input dataframe sampled according to sample_constraints.
'''
#print("Beginning sampling")
print(f"Beginning sampling with {sample_constraints['n_samples']} samples, and with each sample requiring {sample_constraints['n_close_points']} points within {sample_constraints['min_dist']} m to {sample_constraints['max_dist']}.")
# Set up bounding box with an area that should be greater than any created.
best_bb_area = 1000000000000000000000
# Repeat n times the sampling process from create_df_sample.
# Each time find the area of the smallest rectangle which all the sample points fit within.
# If area is greater than that found in previous iterations, then dispose of the sample and try again.
# If it is smaller then keep this sample stored as the best_sample
# At the end return the most compact sample found over the n iterations.
counter = 0
while counter <n:
print | |
a submodel is dynamic and hasn't overridden
# `compute_output_shape`.
outputs = None
self._set_output_attrs(outputs)
@trackable.no_automatic_dependency_tracking
def _set_input_attrs(self, inputs):
"""Sets attributes related to the inputs of the Model."""
if self.inputs:
raise ValueError('Model inputs are already set.')
if self.__class__.__name__ == 'Sequential' and not self.built:
if tensor_util.is_tensor(inputs):
input_shape = (None,) + tuple(inputs.shape.as_list()[1:])
elif isinstance(inputs, tensor_shape.TensorShape):
input_shape = (None,) + tuple(inputs.as_list()[1:])
elif isinstance(inputs, dict):
# We assert that the first layer is a FeatureLayer.
if not training_utils.is_feature_layer(self.layers[0]):
raise ValueError('Passing a dictionary input to a Sequential Model '
'which doesn\'t have FeatureLayer as the first layer'
' is an error.')
input_shape = (None,)
else:
input_shape = (None,) + tuple(inputs.shape[1:])
self._build_input_shape = input_shape
# Cast inputs to the compute dtype. This is primarily used
# when saving to determine the correct dtype in the input signature.
inputs = self._maybe_cast_inputs(inputs)
# On-the-fly setting of symbolic model inputs (either by using the tensor
# provided, or by creating a placeholder if Numpy data was provided).
model_inputs = training_utils.ModelInputs(inputs)
inputs = model_inputs.get_symbolic_inputs()
self.inputs = model_inputs.get_symbolic_inputs(return_single_as_list=True)
self.input_names = model_inputs.get_input_names()
self._feed_inputs = []
self._feed_input_names = []
self._feed_input_shapes = []
for k, v in model_inputs.as_dict():
if K.is_placeholder(v):
self._feed_input_names.append(k)
self._feed_inputs.append(v)
self._feed_input_shapes.append(K.int_shape(v))
return inputs
@trackable.no_automatic_dependency_tracking
def _set_output_attrs(self, outputs):
"""Sets attributes related to the outputs of the Model."""
# NOTE(taylorrobie): This convention cannot be changed without updating the
# data adapter since it assumes nest.flatten ordering.
outputs = nest.flatten(outputs)
self.outputs = outputs
self.output_names = training_utils.generic_output_names(outputs)
# TODO(scottzhu): Should we cleanup the self._training_endpoints here?
self.built = True
@property
def _targets(self):
"""The output target tensors for the model."""
return [
e.training_target.target
for e in self._training_endpoints
if e.has_training_target()
]
@property
def _feed_targets(self):
return [
e.training_target.target
for e in self._training_endpoints
if e.has_feedable_training_target()
]
@property
def _feed_output_names(self):
return [
e.output_name
for e in self._training_endpoints
if e.has_feedable_training_target()
]
@property
def _feed_output_shapes(self):
return [
e.feed_output_shape
for e in self._training_endpoints
if e.has_feedable_training_target()
]
@property
def _feed_loss_fns(self):
return [
e.loss_fn
for e in self._training_endpoints
if e.has_feedable_training_target()
]
@property
def _loss_weights_list(self):
return [e.loss_weight for e in self._training_endpoints]
@property
def _output_loss_metrics(self):
if hasattr(self, '_training_endpoints'):
return [
e.output_loss_metric
for e in self._training_endpoints
if e.output_loss_metric is not None
]
return None
@property
def sample_weights(self):
return [e.sample_weight for e in self._training_endpoints]
@property
def _sample_weight_modes(self):
return [e.sample_weight_mode for e in self._training_endpoints]
@property
def _feed_sample_weights(self):
return [e.sample_weight for e in self._training_endpoints
if e.sample_weight is not None]
def _maybe_load_initial_epoch_from_ckpt(self, initial_epoch, mode):
"""Maybe load initial epoch from ckpt considering possible worker recovery.
Refer to tensorflow/python/keras/distribute/multi_worker_training_state.py
for more information.
Arguments:
initial_epoch: The original initial_epoch user passes in in `fit()`.
mode: The mode for running `model.fit()`.
Returns:
If the training is recovering from previous failure under multi-worker
training setting, return the epoch the training is supposed to continue
at. Otherwise, return the `initial_epoch` the user passes in.
"""
if self._training_state is not None:
return self._training_state.maybe_load_initial_epoch_from_ckpt(
initial_epoch, mode)
return initial_epoch
def _get_training_eval_metrics(self):
"""Returns all the metrics that are to be reported.
This includes the output loss metrics, compile metrics/weighted metrics,
add_metric metrics.
"""
metrics = []
metrics.extend(getattr(self, '_output_loss_metrics', None) or [])
metrics.extend(getattr(self, 'metrics', None) or [])
return metrics
def _assert_compile_was_called(self):
# Checks whether `compile` has been called. If it has been called,
# then the optimizer is set. This is different from whether the
# model is compiled
# (i.e. whether the model is built and its inputs/outputs are set).
if not self._compile_was_called:
raise RuntimeError('You must compile your model before '
'training/testing. '
'Use `model.compile(optimizer, loss)`.')
def _in_multi_worker_mode(self):
"""Method to infer if this `Model` is working in multi-worker settings.
Multi-worker training refers to the setup where the training is
distributed across multiple workers, as opposed to the case where
only a local process performs the training. This function is
used to infer for example whether or not a distribute coordinator
should be run, and thus TensorFlow servers should be started for
communication with other servers in the cluster, or whether or not
saving/restoring checkpoints is relevant for preemption fault tolerance.
Experimental. Signature and implementation are subject to change.
Returns:
Whether this model indicates it's working in multi-worker settings.
"""
strategy = self._get_distribution_strategy()
return strategy and strategy.extended._in_multi_worker_mode() # pylint: disable=protected-access
def _get_distribution_strategy(self):
# If the model was compiled under the scope of a `tf.distribute.Strategy',
# `self._distribution_strategy` would have been set and model should infer
# that as the used strategy (even if it's out of strategy scope already).
strategy = self._distribution_strategy
# Otherwise, use the strategy whose scope this is in.
if not strategy and distribution_strategy_context.has_strategy():
strategy = distribution_strategy_context.get_strategy()
return strategy
@property
def _trackable_saved_model_saver(self):
return model_serialization.ModelSavedModelSaver(self)
def _get_compile_args(self):
self._assert_compile_was_called()
kwargs = {
'loss': self.loss,
'metrics': self._compile_metrics,
'loss_weights': self.loss_weights,
'sample_weight_mode': self.sample_weight_mode,
'weighted_metrics': self._compile_weighted_metrics,
}
return kwargs
@property
def _compile_was_called(self):
return self._v1_compile_was_called
class DistributedCallbackModel(Model):
"""Model that is used for callbacks with tf.distribute.Strategy."""
def __init__(self, model):
super(DistributedCallbackModel, self).__init__()
self.optimizer = model.optimizer
def set_original_model(self, orig_model):
self._original_model = orig_model
def save_weights(self, filepath, overwrite=True, save_format=None):
self._replicated_model.save_weights(filepath, overwrite=overwrite,
save_format=save_format)
def save(self, filepath, overwrite=True, include_optimizer=True):
# save weights from the distributed model to the original model
distributed_model_weights = self.get_weights()
self._original_model.set_weights(distributed_model_weights)
# TODO(anjalisridhar): Do we need to save the original model here?
# Saving the first replicated model works as well.
self._original_model.save(filepath, overwrite=True, include_optimizer=False)
def load_weights(self, filepath, by_name=False):
self._original_model.load_weights(filepath, by_name=False)
# Copy the weights from the original model to each of the replicated models.
orig_model_weights = self._original_model.get_weights()
distributed_training_utils.set_weights(
self._original_model._distribution_strategy, self, # pylint: disable=protected-access
orig_model_weights)
def __getattr__(self, item):
# Whitelisted attributes of the model that can be accessed by the user
# during a callback.
if item not in ('_setattr_tracking', '_layers'):
logging.warning('You are accessing attribute ' + item + ' of the '
'DistributedCallbackModel that may not have been set '
'correctly.')
return super(DistributedCallbackModel, self).__getattr__(item)
class _TrainingEndpoint(object):
"""A container for the training output/target and related entities.
In the case of model with multiple outputs, there is a one-to-one mapping
between model output (y_pred), model target (y_true), loss, metrics etc.
By unifying these entities into one class, different entity can access
information between each other, rather than currently access different list of
attributes of the model.
"""
def __init__(self,
output,
output_name,
loss_fn,
loss_weight=None,
training_target=None,
output_loss_metric=None,
sample_weight=None,
sample_weight_mode=None):
"""Initialize the _TrainingEndpoint.
Note that the output and output_name should be stable as long as the model
structure doesn't change. The training_target suppose to be mutable since
the information is provided via `compile()`
Args:
output: the output tensor of the model.
output_name: the unique name of the output tensor.
loss_fn: the loss function for the output tensor.
loss_weight: float, the weights for the loss.
training_target: the _TrainingTarget for the model.
output_loss_metric: the metric object for the loss function.
sample_weight: the weights for how a sample is weighted during metric and
loss calculation. Could be None.
sample_weight_mode: string, 'temporal', 'samplewise' or None. The mode for
how the sample_weight is populated.
"""
self._output = output
self._output_name = output_name
self._loss_fn = loss_fn
self._loss_weight = loss_weight
self._training_target = training_target
self._output_loss_metric = output_loss_metric
self._sample_weight = sample_weight
self._sample_weight_mode = sample_weight_mode
@property
def output(self):
return self._output
@property
def output_name(self):
return self._output_name
@property
def shape(self):
return K.int_shape(self.output)
@property
def loss_fn(self):
return self._loss_fn
@property
def loss_weight(self):
return self._loss_weight
@loss_weight.setter
def loss_weight(self, value):
self._loss_weight = value
@property
def training_target(self):
return self._training_target
@training_target.setter
def training_target(self, value):
self._training_target = value
def create_training_target(self, target, run_eagerly=False):
"""Create training_target instance and update the self.training_target.
Note that the input target should just be a tensor or None, and
corresponding training target will be created based on the output and
loss_fn.
Args:
target: the target tensor for the current output. Could be None.
run_eagerly: boolean, whether the model is in run_eagerly mode.
Raises:
ValueError if the training_target field for the current instance has
already been populated.
"""
if self.has_training_target():
raise ValueError('The training_target field for the _TrainingEndpoint '
'instance has already been populated')
if run_eagerly:
# When run_eagerly, the target tensor is ignored, and the None placeholder
# is created instead.
self.training_target = _TrainingTarget(
None, feedable=True, skip_target_weights=False)
return
if self.should_skip_target():
self.training_target = _TrainingTarget(None)
else:
if target is not None and not K.is_placeholder(target):
| |
<gh_stars>10-100
# Copyright (c) 2020 LightOn, All Rights Reserved.
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
"""
This module contains the OPU class
"""
import time
from math import sqrt
import pkg_resources
from lightonml.encoding.base import NoEncoding, NoDecoding
import warnings
from typing import Optional, Union, Tuple, TYPE_CHECKING
import numpy as np
from contextlib import ExitStack
import attr
import inspect
import lightonml
from lightonml.internal.config import get_host_option, opu_version
from lightonml.internal import config, output_roi, utils, types
from lightonml.internal.user_input import OpuUserInput, InputTraits
from lightonml.internal.simulated_device import SimulatedOpuDevice
from lightonml.context import ContextArray
from lightonml.internal.settings import OpuSettings, TransformSettings
from lightonml.internal.runner import TransformRunner, FitTransformRunner
from lightonml.internal.types import InputRoiStrategy, IntOrTuple, TransformOutput, AcqState
from lightonml.types import OutputRescaling
# Import lightonopu only for typechecking, as it's an optional module and may not be present
if TYPE_CHECKING:
from lightonopu.internal.device import OpuDevice
# noinspection PyPep8Naming
class OPU:
"""Interface to the OPU.
.. math:: \\mathbf{y} = \\lvert \\mathbf{R} \\mathbf{x} \\rvert^2 \\mbox{ (non-linear transform, the default)}
.. math:: \\mathbf{y} = \\mathbf{R}\\mathbf{x} \\mbox{ (linear transform)}
Main methods are `transform`, `linear_transform`, `fit1d` and `fit2d`,
and accept NumPy arrays or PyTorch tensors.
The non-linear transform (`transform`) is a native operation for the OPU, and performs at a higher
speed than `linear_transform`.
Acquiring/releasing hardware device resources is done by open/close and a
context-manager interface.
Unless `open_at_init=False`, these resources are acquired automatically at init.
If another process or kernel has not released the resources, an error will be
raised, call `close()` or shutdown the kernel on the OPU object to release it.
Parameters
----------
n_components : int,
dimensionality of the target projection space.
opu_device : OpuDevice or SimulatedOpuDevice, optional
optical processing unit instance linked to a physical or simulated device.
If not provided, a device is properly instantiated.
If opu_device is of type SimulatedOpuDevice, the random matrix is generated
at __init__, using max_n_features and n_components
max_n_features: int, optional
maximum number of binary features that the OPU will transform
used only if opu_device is a SimulatedOpuDevice,
in order to initiate the random matrix
config_file : str, optional
path to the configuration file (for dev purpose)
config_override: dict, optional
for override of the config_file (for dev purpose)
verbose_level: int, optional
deprecated, use lightonml.set_verbose_level() instead
.. seealso:: `lightonml.set_verbose_level`
input_roi_strategy: types.InputRoiStrategy, optional
describes how to display the features on the input device
.. seealso:: `lightonml.internal.types.InputRoiStrategy`
open_at_init: bool, optional
forces the setting of acquiring hardware resource at init. If
not provided, follow system's setting (usually True)
disable_pbar: bool, optional
disable display of the progress bar when verbose_level is set to 1
simulated: bool, optional
performs the random projection using CPU, in case no OPU is available on your machine
the random matrix is then generated at __init__, using max_n_features and n_components
rescale: types.OutputRescaling, optional,
output rescaling method for `linear_transform`.
Ignored by `transform`.
.. seealso:: `lightonml.types.OutputRescaling`
Attributes
----------
n_components: int
dimensionality of the target projection space.
rescale: types.OutputRescaling,
output rescaling method for `linear_transform`.
Ignored by `transform`.
max_n_features: int
maximum number of binary features that the OPU will transform
writeable only if opu_device is a SimulatedOpuDevice,
in order to initiate or resize the random matrix
device: OpuDevice or SimulatedOpuDevice
underlying hardware that performs transformation (read-only)
input_roi_strategy: types.InputRoiStrategy, optional
describes how to display the features on the input device
"""
def __init__(self, n_components: int = 200000,
opu_device: Optional[Union["OpuDevice", SimulatedOpuDevice]] = None,
max_n_features: int = 1000, config_file: str = "",
config_override: dict = None, verbose_level: int = -1,
input_roi_strategy: types.InputRoiStrategy = types.InputRoiStrategy.full,
open_at_init: bool = None, disable_pbar=False, simulated=False,
rescale: Union[OutputRescaling, str] = OutputRescaling.variance):
self.__opu_config = None
self.__config_file = config_file
self.__config_override = config_override
self._max_n_features = max_n_features
self.disable_pbar = disable_pbar
self.rescale = rescale
# Get trace and print functions
if verbose_level != -1:
warnings.warn("Verbose level arg will removed in 1.3, "
"Use lightonml.set_verbose_level instead",
DeprecationWarning)
lightonml.set_verbose_level(verbose_level)
else:
verbose_level = lightonml.get_verbose_level()
self._debug = lightonml.get_debug_fn()
self._trace = lightonml.get_trace_fn()
self._print = lightonml.get_print_fn()
no_config_msg = "No configuration files for the OPU was found on this machine.\n" \
"You may want to run the OPU in a simulated manner, by passing the " \
"simulated argument to True at init.\n" \
"See https://docs.lighton.ai/notes/get_started.html#Simulating-an-OPU " \
"for more details.\n" \
"See also https://lighton.ai/products for getting access to our technology."
if simulated and opu_device is not None:
raise ValueError("simulated and opu_device arguments are conflicting")
# Device init, or take the one passed as input
if opu_device:
if type(opu_device).__name__ not in ["SimulatedOpuDevice", "OpuDevice"]:
raise TypeError("opu_device must be of type SimulatedOpuDevice or OpuDevice")
self.device = opu_device
elif simulated:
self.device = SimulatedOpuDevice()
else:
# Instantiate device directly
from lightonopu.internal.device import OpuDevice
if not self.__config_file and not config.host_has_opu_config():
# Looks like there's no OPU on this host as we didn't find configuration files
raise RuntimeError(no_config_msg)
opu_type = self.config["type"]
frametime_us = self.config["input"]["frametime_us"]
exposure_us = self.config["output"]["exposure_us"]
seq_nb_prelim = self.config.get("sequence_nb_prelim", 0)
name = self.config["name"]
self.device = OpuDevice(opu_type, frametime_us, exposure_us, seq_nb_prelim,
None, verbose_level, name)
self._base_frametime_us = self.device.frametime_us
self._base_exposure_us = self.device.exposure_us
if self._s.simulated:
# build the random matrix if not done already
self._resize_rnd_matrix(max_n_features, n_components)
else:
# Make sure lightonopu is at 1.4.1 or later, needed for linear_reconstruction
pkg_resources.require("lightonopu>=1.4.1")
# initialize linear_reconstruction library
from lightonopu import linear_reconstruction
linear_reconstruction.init(np.prod(self.device.input_shape))
self._output_roi = output_roi.OutputRoi(self.device.output_shape_max,
self.device.output_roi_strategy,
self._s.allowed_roi, self._s.min_n_components)
# This also sets the output ROI
self.n_components = n_components
self.input_roi_strategy = input_roi_strategy
# Runner initialized when entering fit
self._runner = None # type: Optional[TransformRunner]
# ExitStack for device acquisition, initialized when entering fit
self._acq_stack = ExitStack()
self._trace("OPU initialized")
# Open at init, unless relevant host.json option is False
if open_at_init is None:
open_at_init = get_host_option("lightonml_open_at_init", True)
if open_at_init:
self.open()
def _tr_settings(self, no_input=False, **override) -> TransformSettings:
"""Returns transform settings for feeding to TransformRunner"""
init = TransformSettings(self.input_roi_strategy, self.n_components)
settings = attr.evolve(init, **override)
if no_input and self.input_roi_strategy is InputRoiStrategy.auto:
# If no input_roi, replace auto by full strategy
settings.input_roi_strategy = InputRoiStrategy.full
assert settings.input_roi is None
return settings
def fit1d(self, X=None, n_features: int = None,
packed: bool = False, online=False, **override):
"""
Configure OPU transform for 1d vectors
The function can be either called with input vector, for fitting OPU
parameters to it, or just vector dimensions, with ``n_features``.
When input is bit-packed the packed flag must be set to True.
When input vectors must be transformed one by one, performance will
be improved with the online flag set to True.
Parameters
----------
X: np.ndarray or torch.Tensor
Fit will be made on this vector to optimize transform parameters
n_features: int
Number of features for the input, necessary if X parameter isn't provided
packed: bool
Set to true if the input vectors will be already bit-packed
online: bool, optional
Set to true if the transforms will be made one vector after the other
defaults to False
override: dict, optional
keyword args for overriding transform settings (advanced parameters)
"""
return self.__fit(X, n_features, packed, online, False, **override)
def fit2d(self, X=None, n_features: Tuple[int, int] = None,
packed: bool = False, online=False, **override):
"""
Configure OPU transform for 2d vectors
The function can be either called with input vector, for fitting OPU
parameters to it, or just vector dimensions, with `n_features`.
When input is bit-packed the packed flag must be set to True.
Number of features must be then provided with `n_features`
When input vectors must be transformed one by one, performance will
be improved with the online flag set to True.
Parameters
----------
X: np.ndarray or torch.Tensor
a 2d input vector, or batch of 2d input_vectors, binary encoded, packed or not
n_features: tuple(int)
Number of features for the input, necessary if X parameter isn't provided, or
if input is bit-packed
packed: bool, optional
whether the input data is in bit-packed representation
if True, each input vector is assumed to be a 1d array, and the "real" number
of features must be provided as n_features
defaults to False
online: bool, optional
Set to true if the transforms will be made one vector after the other
defaults to False
override: dict, optional
keyword args for overriding transform settings (advanced parameters)
"""
return self.__fit(X, n_features, packed, online, True, **override)
def transform(self, X, encoder_cls=NoEncoding, decoder_cls=NoDecoding) -> TransformOutput:
"""
Performs the | |
"""JSON implementations of logging sessions."""
# pylint: disable=no-init
# Numerous classes don't require __init__.
# pylint: disable=too-many-public-methods,too-few-public-methods
# Number of methods are defined in specification
# pylint: disable=protected-access
# Access to protected methods allowed in package json package scope
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
from bson.objectid import ObjectId
from . import objects
from . import queries
from .. import utilities
from ..id.objects import IdList
from ..osid import sessions as osid_sessions
from ..osid.sessions import OsidSession
from ..primitives import *
from ..primitives import Id
from ..primitives import Type
from ..utilities import JSONClientValidated
from ..utilities import PHANTOM_ROOT_IDENTIFIER
from dlkit.abstract_osid.id.primitives import Id as ABCId
from dlkit.abstract_osid.logging_ import sessions as abc_logging_sessions
from dlkit.abstract_osid.logging_.objects import LogEntryForm as ABCLogEntryForm
from dlkit.abstract_osid.logging_.objects import LogForm as ABCLogForm
from dlkit.abstract_osid.osid import errors
from dlkit.abstract_osid.type.primitives import Type as ABCType
DESCENDING = -1
ASCENDING = 1
CREATED = True
UPDATED = True
ENCLOSURE_RECORD_TYPE = Type(
identifier='enclosure',
namespace='osid-object',
authority='ODL.MIT.EDU')
COMPARATIVE = 0
PLENARY = 1
class LoggingSession(abc_logging_sessions.LoggingSession, osid_sessions.OsidSession):
"""This session is used to log entries to a log."""
def __init__(self, catalog_id=None, proxy=None, runtime=None):
OsidSession.__init__(self)
self._catalog_class = objects.Log
self._catalog_name = 'Log'
OsidSession._init_object(self, catalog_id, proxy, runtime, db_name='logging', cat_name='Log', cat_class=objects.Log)
self._forms = dict()
lm = self._get_provider_manager('LOGGING')
self._leas = lm.get_log_entry_admin_session_for_log(self._catalog_id, proxy=self._proxy)
self._lels = lm.get_log_entry_lookup_session_for_log(self._catalog_id, proxy=self._proxy)
self._content_types = lm.get_content_types()
def get_log_id(self):
"""Gets the ``Log`` ``Id`` associated with this session.
return: (osid.id.Id) - the ``Log Id`` associated with this
session
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceLookupSession.get_bin_id
return self._catalog_id
log_id = property(fget=get_log_id)
def get_log(self):
"""Gets the ``Log`` associated with this session.
return: (osid.logging.Log) - the log
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceLookupSession.get_bin
return self._catalog
log = property(fget=get_log)
def can_log(self):
"""Tests if this user can log.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer logging
operations.
return: (boolean) - ``false`` if logging methods are not
authorized, ``true`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
return True
@utilities.arguments_not_none
def log(self, content, content_type):
"""Logs an item.
This method is a shortcut to ``createLogEntry()``.
arg: content (object): the entry to log
arg: content_type (osid.type.Type): the type of this entry
which must be one of the types returned by
``LoggingManager.getContentTypes()``
raise: InvalidArgument - ``content`` is not of ``content_type``
raise: NullArgument - ``content`` or ``content_type`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported -
``LoggingManager.supportsContentType(contentType)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
if content_type not in self._content_types:
raise errors.Unsupported()
lefc = self._leas.get_content_form_for_create([])
lefc.set_timestamp(DateTime.utcnow())
@utilities.arguments_not_none
def log_at_priority(self, priority_type, content, content_type):
"""Logs an item.
arg: priority_type (osid.type.Type): the entry priority
arg: content (object): the entry to log
arg: content_type (osid.type.Type): the type of this entry
which must be one of the types returned by
``LoggingManager.getContentTypes()``
raise: InvalidArgument - ``content`` is not of ``content_type``
raise: NullArgument - ``content`` , ``content_type`` or
``priority_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported -
``LoggingManager.supportsContentType(contentType)`` is
``false`` or
``LoggingManager.supportsPriorityType(priorityType)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_log_entry_form(self):
"""Gets a log entry form for creating a log entry.
return: (osid.logging.LogEntryForm) - the log entry form
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
log_entry_form = property(fget=get_log_entry_form)
@utilities.arguments_not_none
def create_log_entry(self, log_entry_form):
"""Logs an entry through the log entry form.
arg: log_entry_form (osid.logging.LogEntryForm): the log
entry form
raise: InvalidArgument - one or more of the form elements is
invalid
raise: NullArgument - ``log_entry_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``log_entry_form`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class LogEntryLookupSession(abc_logging_sessions.LogEntryLookupSession, osid_sessions.OsidSession):
"""This session provides methods for retrieving ``log entries``."""
def __init__(self, catalog_id=None, proxy=None, runtime=None, **kwargs):
OsidSession.__init__(self)
self._catalog_class = objects.Log
self._catalog_name = 'Log'
OsidSession._init_object(
self,
catalog_id,
proxy,
runtime,
db_name='logging',
cat_name='Log',
cat_class=objects.Log)
self._kwargs = kwargs
def get_log_id(self):
"""Gets the ``Log`` ``Id`` associated with this session.
return: (osid.id.Id) - the ``Log Id`` associated with this
session
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceLookupSession.get_bin_id
return self._catalog_id
log_id = property(fget=get_log_id)
def get_log(self):
"""Gets the ``Log`` associated with this session.
return: (osid.logging.Log) - the log
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceLookupSession.get_bin
return self._catalog
log = property(fget=get_log)
def can_read_log(self):
"""Tests if this user can read the log.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer reading
operations.
return: (boolean) - ``false`` if reading methods are not
authorized, ``true`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
"""Tests if this user can read the log.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer reading
operations.
return: (boolean) - ``false`` if reading methods are not
authorized, ``true`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return self.can_lookup_log_entries()
def can_lookup_log_entries(self):
"""Tests if a user can read logs :)"""
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
return True
def use_comparative_log_entry_view(self):
"""The returns from the lookup methods may omit or translate elements based on this session, such as authorization, and not result in an error.
This view is used when greater interoperability is desired at
the expense of precision.
*compliance: mandatory -- This method is must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.use_comparative_resource_view
self._use_comparative_object_view()
def use_plenary_log_entry_view(self):
"""A complete view of the ``LogEntry`` returns is desired.
Methods will return what is requested or result in an error.
This view is used when greater precision is desired at the
expense of interoperability.
*compliance: mandatory -- This method is must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.use_plenary_resource_view
self._use_plenary_object_view()
def use_federated_log_view(self):
"""Federates the view for methods in this session.
A federated view will include entries in logs which are children
of this log in the log hierarchy.
*compliance: mandatory -- This method is must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.use_federated_bin_view
self._use_federated_catalog_view()
def use_isolated_log_view(self):
"""Isolates the view for methods in this session.
An isolated view restricts retrievals to this log only.
*compliance: mandatory -- This method is must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.use_isolated_bin_view
self._use_isolated_catalog_view()
@utilities.arguments_not_none
def get_log_entry(self, log_entry_id):
"""Gets the ``LogEntry`` specified by its ``Id``.
In plenary mode, the exact ``Id`` is found or a ``NotFound``
results. Otherwise, the returned ``LogEntry`` may have a
different ``Id`` than requested, such as the case where a
duplicate ``Id`` was assigned to a ``LogEntry`` and retained for
compatibility.
arg: log_entry_id (osid.id.Id): the ``Id`` of the
``LogEntry`` to retrieve
return: (osid.logging.LogEntry) - the returned ``LogEntry``
raise: NotFound - no ``LogEntry`` found with the given ``Id``
raise: NullArgument - ``log_entry_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resource
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('logging',
collection='LogEntry',
runtime=self._runtime)
result = collection.find_one(
dict({'_id': ObjectId(self._get_id(log_entry_id, 'logging').get_identifier())},
**self._view_filter()))
return objects.LogEntry(osid_object_map=result, runtime=self._runtime, | |
= "+", c = "gray")
ax1.plot(x_ticks, knn_rr, "k-o", transform = trans0, label = "KNN")
ax1.plot(x_ticks, nn_rr, "k:^", transform = trans1, label = "NN")
ax1.legend(loc = rr_legend_loc, frameon = False, prop = {"size" : rr_legend_size})
ax1.set_xticks(x_ticks)
ax1.set_xticklabels(sample_size_ls)
ax1.set_ylabel("Null Rejection Rate")
ax1.set_xlabel("Test Set Size")
if rr_lim is not None:
ax1.set_ylim(rr_lim)
# Make accuracy line plot.
ax2.axhline(acc_line_loc, linestyle = "dashed", color = "lightgray", zorder = 1)
qnts = [np.quantile(a, [.25, .50, .75]) for a in knn_acc_ls_ls]
meds = [p[1] for p in qnts]
err1 = [abs(p[0] - p[1]) for p in qnts]
err2 = [abs(p[1] - p[2]) for p in qnts]
p1 = ax2.errorbar(x_ticks, meds, yerr = [err1, err2], capsize = 5, fmt = "k-o",
transform = trans2, markersize = 3, label = "KNN", zorder = 2)
qnts = [np.quantile(a, [.25, .50, .75]) for a in nn_acc_ls_ls]
meds = [p[1] for p in qnts]
err1 = [abs(p[0] - p[1]) for p in qnts]
err2 = [abs(p[1] - p[2]) for p in qnts]
p2 = ax2.errorbar(x_ticks, meds, yerr = [err1, err2], capsize = 5, fmt = "k:^",
transform = trans3, markersize = 3, label = "NN", zorder = 3)
handles, labels = ax2.get_legend_handles_labels()
handles = [h[0] for h in handles]
ax2.legend(handles, labels, loc = acc_legend_loc, frameon = False, prop = {"size" : acc_legend_size})
ax2.set_xticks(x_ticks)
ax2.set_xticklabels(sample_size_ls)
ax2.set_ylabel("Test Set Classification Accuracy")
ax2.set_xlabel("Test Set Size")
if acc_lim is not None:
ax2.set_ylim(acc_lim)
return fig
# Make rejection rate and accuracy plot with many C2STs.
def mul_rr_acc_plots(knn_p_val_res_ls,
nn_p_val_res_ls,
knn_acc_ls_ls,
nn_acc_ls_ls,
sample_size_ls,
plot_line = False,
rr_lim = None,
acc_lim = None,
rr_legend_loc = "upper left",
acc_legend_loc = "upper left",
rr_legend_size = 10,
acc_legend_size = 10,
acc_line_loc = 0.5,
rr_trans = False,
acc_trans = False):
"""
Args:
knn_p_val_res_ls (list of list): List holding KNN-based C2ST-RFI results.
nn_p_val_res_ls (list of list): List holding neural network-based C2ST-RFI results.
knn_acc_ls_ls (list of list): List of lists of KNN accuracies.
nn_acc_ls_ls (list of list): List of lists of neural network accuracies.
sample_size_ls (list of int): List of sample sizes.
plot_line (Boolean): Whether to put a dotted line at RR = 0.2.
rr_lim (list of int): y-axis limits for RR plot.
acc_lim (list of int): y-axis limits for accuracy plot.
rr_legend_loc (str): Location of legend for RR plot.
acc_legend_loc (str): Location of legend for accuracy plot.
acc_line_loc (float): Location of line on accuracy plot.
rr_trans (Boolean): Whether to translate RR line plots.
acc_trans (Boolean): Whether to translate accuracy line plots.
"""
x_ticks = np.arange(len(sample_size_ls) + 1).tolist()[1:]
# Compute rejection rates.
knn_approx_rr = [np.mean([p < 0.05 for p in ls]) for ls in knn_p_val_res_ls[0]]
nn_approx_rr = [np.mean([p < 0.05 for p in ls]) for ls in nn_p_val_res_ls[0]]
knn_exact_rr = [np.mean([p < 0.05 for p in ls]) for ls in knn_p_val_res_ls[1]]
nn_exact_rr = [np.mean([p < 0.05 for p in ls]) for ls in nn_p_val_res_ls[1]]
# Set plot layout.
fig = plt.figure()
fig.set_size_inches(12, 5, forward = True)
gs = fig.add_gridspec(2, 4)
gs.tight_layout(fig, rect = [0, 0.03, 1, 0.98])
ax1 = fig.add_subplot(gs[0:2, 0:2])
ax2 = fig.add_subplot(gs[0:2, 2:])
plt.subplots_adjust(wspace = 1.1, hspace = 0.05)
# Transformations to shift line plots horizontally.
if rr_trans:
trans0 = Affine2D().translate(-0.2, 0.0) + ax1.transData
trans1 = Affine2D().translate(-0.067, 0.0) + ax1.transData
trans2 = Affine2D().translate(+0.067, 0.0) + ax1.transData
trans3 = Affine2D().translate(+0.2, 0.0) + ax1.transData
else:
trans0 = Affine2D().translate(0.0, 0.0) + ax1.transData
trans1 = Affine2D().translate(0.0, 0.0) + ax1.transData
trans2 = Affine2D().translate(0.0, 0.0) + ax1.transData
trans3 = Affine2D().translate(0.0, 0.0) + ax1.transData
if acc_trans:
trans4 = Affine2D().translate(-0.1, 0.0) + ax2.transData
trans5 = Affine2D().translate(+0.1, 0.0) + ax2.transData
else:
trans4 = Affine2D().translate(0.0, 0.0) + ax2.transData
trans5 = Affine2D().translate(0.0, 0.0) + ax2.transData
# Make rejection rate line plot.
if plot_line:
ax1.axhline(0.05, linestyle = "dashed", color = "lightgray")
ax1.plot(x_ticks, knn_approx_rr, "k-o", transform = trans0, label = "KNN-Based Approx. C2ST")
ax1.plot(x_ticks, nn_approx_rr, "k:^", transform = trans1, label = "NN-Based Approx. C2ST")
ax1.plot(x_ticks, knn_exact_rr, transform = trans2, alpha = 0.75,
ls = "-", marker = "o", c = "gray", label = "KNN-Based Exact C2ST")
ax1.plot(x_ticks, nn_exact_rr, transform = trans3, alpha = 0.75,
ls = ":", marker = "^", c = "gray", label = "NN-Based Exact C2ST")
ax1.legend(loc = rr_legend_loc, frameon = False, prop = {"size" : rr_legend_size})
ax1.set_xticks(x_ticks)
ax1.set_xticklabels(sample_size_ls)
ax1.set_ylabel("Null Rejection Rate")
ax1.set_xlabel("Test Set Size")
if rr_lim is not None:
ax1.set_ylim(rr_lim)
# Make accuracy line plot.
ax2.axhline(acc_line_loc, linestyle = "dashed", color = "lightgray", zorder = 1)
qnts = [np.quantile(a, [.25, .50, .75]) for a in knn_acc_ls_ls]
meds = [p[1] for p in qnts]
err1 = [abs(p[0] - p[1]) for p in qnts]
err2 = [abs(p[1] - p[2]) for p in qnts]
p1 = ax2.errorbar(x_ticks, meds, yerr = [err1, err2], capsize = 5, fmt = "k-o",
transform = trans4, markersize = 3, label = "KNN", zorder = 2)
qnts = [np.quantile(a, [.25, .50, .75]) for a in nn_acc_ls_ls]
meds = [p[1] for p in qnts]
err1 = [abs(p[0] - p[1]) for p in qnts]
err2 = [abs(p[1] - p[2]) for p in qnts]
p2 = ax2.errorbar(x_ticks, meds, yerr = [err1, err2], capsize = 5, fmt = "k:^",
transform = trans5, markersize = 3, label = "NN", zorder = 3)
handles, labels = ax2.get_legend_handles_labels()
handles = [h[0] for h in handles]
ax2.legend(handles, labels, loc = acc_legend_loc, frameon = False, prop = {"size" : acc_legend_size})
ax2.set_xticks(x_ticks)
ax2.set_xticklabels(sample_size_ls)
ax2.set_ylabel("Test Set Classification Accuracy")
ax2.set_xlabel("Test Set Size")
if acc_lim is not None:
ax2.set_ylim(acc_lim)
return fig
# Make C2ST-RFI boxplots.
def c2st_rfi_boxplots(knn_rfi_res_ls,
nn_rfi_res_ls,
sample_size_ls,
abs_lims = None,
max_lims = None,
id_lims = None):
"""
Args:
knn_rfi_res_ls (list of list): List holding KNN-based C2ST-RFI results.
nn_rfi_res_ls (list of list): List holding neural network-based C2ST-RFI results.
sample_size_ls (list of int): List of sample sizes.
abs_lims (list of int): y-axis limits for C2ST-RFI with absolute value function.
max_lims (list of int): y-axis limits for C2ST-RFI with max function.
id_lims (list of int): y-axis limits for C2ST-RFI with identity function.
"""
x_ticks = np.arange(len(sample_size_ls) + 1).tolist()[1:]
# Create boxplots.
fig = plt.figure()
fig.set_size_inches(8, 8, forward = True)
gs = fig.add_gridspec(4, 4)
gs.tight_layout(fig, rect = [0, 0.03, 1, 0.98])
ax1 = fig.add_subplot(gs[0:2, 0:2])
ax2 = fig.add_subplot(gs[0:2, 2:])
ax3 = fig.add_subplot(gs[2:, 1:3])
plt.subplots_adjust(wspace = 1.1, hspace = 1.1)
# Mark perfect fit with dotted line.
ax1.axhline(y = 1, color = "lightgray", linestyle = "--")
ax2.axhline(y = 1, color = "lightgray", linestyle = "--")
ax3.axhline(y = 1, color = "lightgray", linestyle = "--")
# Set outlier marker style.
flierprops = dict(marker = "o", markerfacecolor = "black", markersize = 2, linestyle = "none")
# C2ST-RFI_abs boxplots.
b11 = ax1.boxplot(knn_rfi_res_ls[0],
positions = [0.5, 2.5, 4.5, 6.5, 8.5],
widths = 0.45,
flierprops = flierprops,
patch_artist = True)
for b in b11["medians"]:
setp(b, color = "black")
for b in b11["boxes"]:
b.set(facecolor = "white")
b12 = ax1.boxplot(nn_rfi_res_ls[0],
positions = [1, 3, 5, 7, 9],
widths = 0.45,
flierprops = flierprops,
patch_artist = True)
for b in b12["medians"]:
setp(b, color = "white")
for b in b12["boxes"]:
b.set(facecolor = "black")
ax1.set_xticks([0.75, 2.75, 4.75, 6.75, 8.75])
ax1.set_xticklabels(sample_size_ls)
ax1.set_xlabel("Test Set Size")
if abs_lims is not None:
ax1.set_ylim(abs_lims)
ax1.set_ylabel("C2ST-$\mathregular{RFI}_{\mathregular{abs}}$")
# Set legend on first plot.
circ1 = mpatches.Rectangle((0.5, 0.5), 1, 0.5, facecolor = "white", label = "KNN", edgecolor = "black")
circ2 = mpatches.Rectangle((0.5, 0.5), 1, 0.5, facecolor = "black", label = "NN", edgecolor = "black")
ax1.legend(handles = [circ1, circ2], loc = "lower right", frameon = False, prop = {"size" : 10})
# C2ST-RFI_max boxplots.
b21 = ax2.boxplot([[rfi for rfi in rfi_ls if not np.isnan(rfi)] for rfi_ls in knn_rfi_res_ls[1]],
positions = [0.5, 2.5, 4.5, 6.5, 8.5],
widths = 0.45,
flierprops = flierprops,
patch_artist = True)
for b in b21["medians"]:
setp(b, color = "white")
setp(b21["medians"][4], color = "black")
for b in b21["boxes"]:
b.set(facecolor = "white")
b22 = ax2.boxplot([[rfi for rfi in rfi_ls if not np.isnan(rfi)] for rfi_ls in nn_rfi_res_ls[1]],
positions = [1, 3, 5, 7, 9],
widths = 0.45,
flierprops = flierprops,
patch_artist = True)
for b in b22["medians"]:
setp(b, color = "black")
| |
<reponame>KnugiHK/synapse-admin-api-python<gh_stars>1-10
"""MIT License
Copyright (c) 2020 Knugi
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
import httpx
import os
import re
from configparser import ConfigParser
from datetime import datetime
from getpass import getpass
from pathlib import Path
from typing import Tuple, Any, Union
class SynapseException(Exception):
"""Error returned from the Admin API"""
def __init__(self, code, msg):
self.code = code
self.msg = msg
super().__init__(self.msg)
def __str__(self):
return f"SynapseException: [{self.code}] {self.msg}"
class Utility():
"""Some utilities"""
port_re = re.compile(r":[0-9]{1,5}/?$")
http_re = re.compile(r"^https?://")
mime_map = {
b"\xff\xd8": "image/jpeg", b"\x42\x4D": "image/bmp",
b"\x89\x50\x4E\x47\x0D\x0A\x1A\x0A": "image/png",
b"\x49\x49\x2A\x00": "image/tiff", b"\x4D\x4D\x00\x2A": "image/tiff",
b"\x47\x49\x46\x38\x37\x61": "image/gif",
b"\x47\x49\x46\x38\x39\x61": "image/gif",
b"\xFF\xFB": "audio/mpeg", b"\xFF\xF3": "audio/mpeg",
b"\xFF\xF2": "audio/mpeg", b"\x4F\x67\x67\x53": "audio/ogg",
b"\x4F\x70\x75\x73\x48\x65\x61\x64": "audio/opus",
b"\x1A\x45\xDF\xA3": "video/webm",
b"\x66\x74\x79\x70\x69\x73\x6F\x6D": "video/mp4",
}
@staticmethod
def get_bool(boolean: bool) -> str:
"""Covert Python bool to str
Returns:
str: string "true" or "false"
"""
if not isinstance(boolean, bool):
raise TypeError("Argument 'boolean' must be a "
f"bool not a {type(boolean)}")
if boolean:
return "true"
else:
return "false"
@staticmethod
def get_current_time() -> int:
"""Get the current timestamp in millisecond
Returns:
int: current timestamp in millisecond
"""
return int(datetime.now().timestamp() * 1000)
@staticmethod
def get_password(
prompt: str = "Enter a password: ",
validate: bool = True
) -> str:
"""Get a password interactively
Args:
prompt (str, optional): String to ask for input. Defaults to "Enter a password: ". # noqa: E501
Returns:
str: the password user entered
"""
password = getpass(prompt)
if validate:
again = getpass("Enter the password again: ")
if password == again:
return password
else:
print("The passwords you entered are not the same, try again.")
return Utility.get_password(prompt)
else:
return password
@staticmethod
def guess_type(stream: bytes) -> str:
"""Provide very limited guesses on the mime type base on the magic of the stream # noqa: E501
Args:
stream (bytes): the stream
Returns:
str: mime type
"""
for magic, mime in Utility.mime_map.items():
num = len(magic)
if stream[:num] == magic:
return mime
return None
class _BaseContents():
"""Base class for Contents"""
@property
def total(self):
return self._total
@property
def next(self):
return self._next
class Contents(list, _BaseContents):
"""Custom list class to handle data with next token and the total number of wanted data # noqa: E501
This is basically the same as list plus two more property (total, next). """
def __init__(
self,
data: list,
total: int,
next_token: Union[str, int] = None
):
if not isinstance(total, int):
raise TypeError("Argument total must be int")
if (next_token is not None and not isinstance(next_token, str)
and not isinstance(next_token, int)):
raise TypeError("Argument next_token must be str or int")
self._total, self._next = total, next_token
super(Contents, self).__init__(data)
class Admin():
"""Base class for storing common variable read configuration"""
def __init__(
self,
server_addr: str = None,
server_port: int = 443,
access_token: str = None,
server_protocol: str = None,
suppress_exception: bool = False
) -> None:
"""
Args:
server_addr (str, optional): homeserver address. Defaults to None.
server_port (int, optional): homeserver listening port. Defaults to 443.
access_token (str, optional): access token that has admin power. Defaults to None.
server_protocol (str, optional): "http://" or "https://". Defaults to None.
suppress_exception (bool, optional): suppress exception or not, if not return False and the error in dict. Defaults to False. # noqa: E501
"""
if server_addr is not None and access_token is not None:
self.access_token = access_token
self.server_addr = server_addr
self.server_port = server_port
if server_protocol is None:
self.server_protocol = \
self._parse_protocol_by_port(server_port)
else:
if "://" not in server_protocol:
self.server_protocol = server_protocol + "://"
else:
self.server_protocol = server_protocol
else:
# If homeserver address or/and access token are
# not provided, read from configuration file
if os.name == "nt":
path = os.path.join(
f"{os.environ['APPDATA']}\\Synapse-Admin-API\\")
if not os.path.isdir(path):
os.makedirs(path)
else:
path = str(Path.home())
self.config_path = os.path.join(path, "api.cfg")
if os.path.isfile(self.config_path):
self.read_config(self.config_path)
else:
# If configuration file not found, create one
self.create_config()
self._create_header()
self._create_conn()
self.suppress_exception = suppress_exception
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
...
def _create_conn(self) -> bool:
"""Create connection to the homeserver"""
self.connection = HTTPConnection(
self.server_protocol,
self.server_addr,
self.server_port,
self.header
)
return True
def _create_header(self) -> None:
"""Create header for connection"""
from .__init__ import __version__
self.header = {
"Authorization": f"Bearer {self.access_token}",
"User-Agent": f"matrix-synpase-admin-python/{__version__}"
}
def create_config(
self,
protocol: str = None,
host: str = None,
port: int = None,
access_token: str = None,
save_to_file: int = False
) -> bool:
"""Create configuration (interactively)
Args:
protocol (str, optional): "http://" or "https://". Defaults to None. # noqa: E501
host (str, optional): homeserver address. Defaults to None.
port (int, optional): homeserver listening port. Defaults to None.
access_token (str, optional): access token that has admin privilege. Defaults to None. # noqa: E501
save_to_file (int, optional): whether or not save the configuration to a file. Defaults to False. # noqa: E501
Returns:
bool: configuration saved
"""
if (protocol is None or host is None
or port is None or access_token is None):
while True:
url = input(
"Enter the homeserver URL with port "
"(e.g. https://example.com:443): "
)
try:
protocol, host, port = self._parse_homeserver_url(url)
except ValueError as e:
print(e)
continue
else:
break
while True:
access_token = input(
"Enter the access token (leave blank to"
"get the access token by logging in): "
)
if access_token == "":
from synapse_admin.client import ClientAPI
access_token = ClientAPI.admin_login(
protocol,
host,
port,
suppress_exception=True
)
if not access_token:
print(
"The account you logged in is not a server admin "
"or you entered an invalid username/password."
)
continue
else:
print("Token retrieved successfully")
break
while save_to_file not in {"y", "n", ""}:
save_to_file = input("Save to a config file? (Y/n) ").lower()
self.server_protocol = protocol
self.server_addr = host
self.server_port = int(port)
self.access_token = access_token
if (save_to_file == "n" or isinstance(save_to_file, bool)
and not save_to_file):
return True
return self._save_config(protocol, host, port, access_token)
def _parse_homeserver_url(self, url: str) -> Tuple[str, str, int]:
"""Parse a given URL to three parts.
Args:
url (str): URL that is needed to be parsed
Raises:
ValueError: Raised if neither port or protocol is specified
Returns:
Tuple[str, str, int]: protocol, host, port
"""
port = Utility.port_re.search(url)
protocol = Utility.http_re.search(url)
if port is None:
if protocol is None:
raise ValueError(
"You must specify at least "
"a port or a HTTP protocol"
)
elif protocol[0] == "https://":
port = 443
elif protocol[0] == "http://":
port = 80
else:
port = int(port[0][1:].replace("/", ""))
if protocol is None:
protocol = self._parse_protocol_by_port(port)
if protocol is not None and isinstance(protocol, re.Match):
protocol = protocol[0]
host = url
if protocol is not None:
host = host.replace(protocol, "")
if port is not None:
host = host.replace(f":{port}", "")
if host[-1] == "/":
host = host[:-1]
return protocol, host, port
def _parse_protocol_by_port(self, port: int) -> str:
"""Parse the given port to protocol automatically
Args:
port (int): port that is needed to be parsed
Raises:
ValueError: raised if the port is not 80 or 8008 or 443 or 8443
Returns:
str: either "http://" or "https://"
"""
if port == 80 or port == 8008:
return "http://"
elif port == 443 or port == 8443:
return "https://"
else:
raise ValueError(
"Cannot determine the protocol "
f"automatically by the port {port}."
)
def _save_config(
self,
protocol: str,
host: str,
port: int,
token: str
) -> bool:
"""Write the configuration to a file
Args:
protocol (str): "http://" or "https://". Defaults to None.
host (str): homeserver address. | |
"""
SAX style events again.
"""
# pylint: disable=too-many-lines
import ast
from collections import ChainMap
from dataclasses import dataclass
from functools import singledispatch
from typing import ChainMap as CM
from typing import Dict, Iterator, List, Optional, Set, Tuple, Union
from pytest import mark
from breakfast.position import Position
from breakfast.source import Source
from tests import make_source
class Event:
def apply( # pylint: disable=no-self-use
self, state: "State" # pylint: disable=unused-argument
) -> None:
...
@dataclass
class Occurrence(Event):
name: str
position: Position
node: ast.AST
class Regular(Occurrence):
def apply(self, state: "State") -> None:
state.add_occurrence(self)
class Nonlocal(Occurrence):
def apply(self, state: "State") -> None:
state.add_nonlocal(self)
class Definition(Occurrence):
def apply(self, state: "State") -> None:
state.add_definition(self)
@dataclass
class EnterScope(Event):
name: str
def apply(self, state: "State") -> None:
state.enter_scope(self.name)
class EnterFunctionScope(EnterScope):
def apply(self, state: "State") -> None:
state.enter_function_scope(self.name)
class EnterModuleScope(EnterScope):
def apply(self, state: "State") -> None:
state.enter_modulte_scope(self.name)
class EnterClassScope(EnterScope):
def apply(self, state: "State") -> None:
state.enter_class_scope(self.name)
@dataclass
class EnterAttributeScope(Event):
name: str
def apply(self, state: "State") -> None:
state.enter_attribute_scope(self.name)
@dataclass
class EnterSuperAttributeScope(Event):
def apply(self, state: "State") -> None:
state.enter_super_attribute_scope()
@dataclass
class LeaveScope(Event):
@staticmethod
def apply(state: "State") -> None:
state.leave_scope()
@dataclass
class Alias(Event):
existing: Tuple[str, ...]
new: Tuple[str, ...]
def apply(self, state: "State") -> None:
state.add_alias(existing=self.existing, new=self.new)
@dataclass
class SelfArgument(Event):
name: str
def apply(self, state: "State") -> None:
state.add_self(name=self.name)
class State: # pylint: disable=too-many-public-methods
def __init__(self) -> None:
self._namespace: List[str] = []
self.scopes: Dict[
Tuple[str, ...],
CM[str, List[Occurrence]], # pylint: disable=unsubscriptable-object
] = {(): ChainMap()}
self.aliases: Dict[Tuple[str, ...], Tuple[str, ...]] = {}
self.classes: Set[Tuple[str, ...]] = set()
self.module_scope: Optional[
CM[str, List[Occurrence]] # pylint: disable=unsubscriptable-object
] = None
self.attribute_scopes: Set[Tuple[str, ...]] = set()
@property
def namespace(self) -> Tuple[str, ...]:
return tuple(self._namespace)
@property
def current_scope(
self,
) -> CM[str, List[Occurrence]]: # pylint: disable=unsubscriptable-object
assert self.namespace in self.scopes
return self.scopes[self.namespace]
@property
def in_attribute_scope(self):
return self.namespace in self.attribute_scopes
def scope_for(
self, namespace: Tuple[str, ...]
) -> Optional[CM[str, List[Occurrence]]]: # pylint: disable=unsubscriptable-object
return self.scopes.get(namespace)
def lookup_existing(self, name: str) -> Optional[List[Occurrence]]:
from_current_scope = self.current_scope.get(name)
if from_current_scope:
return from_current_scope
if name in self.current_scope:
return self.current_scope[name]
alias = self.get_alias(name)
if alias:
return alias
prefix_alias = self.get_prefix_alias(name)
if prefix_alias:
return prefix_alias
return None
def lookup(self, name: str) -> List[Occurrence]:
existing = self.lookup_existing(name)
if existing:
return existing
return self.current_scope.setdefault(name, [])
def get_alias(self, name: str) -> Optional[List[Occurrence]]:
namespace = self.namespace
while namespace in self.aliases:
namespace = self.aliases[namespace]
alias_scope = self.scope_for(namespace)
if alias_scope and name in alias_scope:
return alias_scope[name]
return None
def get_prefix_alias(self, name: str) -> Optional[List[Occurrence]]:
namespace = self.namespace
for length in range(len(namespace), 0, -1):
prefix, suffix = namespace[:length], namespace[length:]
if prefix in self.aliases:
namespace = self.aliases[prefix] + suffix
alias_scope = self.scope_for(namespace)
if alias_scope and name in alias_scope:
return alias_scope[name]
return None
def process(self, event: Event):
event.apply(self)
def add_occurrence(self, occurrence: Occurrence) -> None:
existing = self.lookup_existing(occurrence.name)
if existing:
existing.append(occurrence)
elif self.in_attribute_scope:
self.lookup(occurrence.name).append(occurrence)
else:
# Use before definition, which means the definition has to come later. The
# only place that can happen is in the module scope.
assert self.module_scope is not None
self.module_scope.setdefault(occurrence.name, []).append(occurrence)
def add_nonlocal(self, occurrence: Occurrence) -> None:
self.add_occurrence(occurrence)
existing = self.lookup_existing(occurrence.name)
mapping = self.current_scope.maps[0]
assert not mapping.get(occurrence.name)
assert isinstance(mapping, dict)
# XXX: this is kind of hacky: We're aliasing the name in the current namespace
# to the one in the outer namespace. It works for now, but I may have to do
# something more like the aliases for this.
mapping[occurrence.name] = existing
def add_definition(self, occurrence: Occurrence) -> None:
mapping = self.current_scope.maps[0]
assert isinstance(mapping, dict)
mapping.setdefault(occurrence.name, []).append(occurrence)
def add_alias(self, new: Tuple[str, ...], existing: Tuple[str, ...]) -> None:
self.aliases[self.namespace + new] = self.namespace + existing
def add_self(self, name: str) -> None:
if self.namespace[:-1] in self.classes:
full_name = self.namespace + (name,)
self.aliases[full_name] = full_name[:-2]
def enter_modulte_scope(self, name: str) -> None:
self.enter_scope(name)
self.module_scope = self.current_scope
def enter_class_scope(self, name: str) -> None:
self.enter_scope(name)
self.classes.add(self.namespace)
def enter_function_scope(self, name: str) -> None:
if self.namespace in self.classes:
new_scope = ChainMap(*self.current_scope.maps[1:]).new_child()
else:
new_scope = self.current_scope.new_child()
self._enter_scope(name, new_scope)
def enter_scope(self, name: str) -> None:
new_scope = self.current_scope.new_child()
self._enter_scope(name, new_scope)
def enter_attribute_scope(self, name: str) -> None:
full_name = self.namespace + (name,)
if full_name in self.aliases and self.aliases[full_name] in self.classes:
new_scope = self.scopes[self.aliases[full_name]]
else:
new_scope = ChainMap()
self._enter_scope(name, new_scope)
self.attribute_scopes.add(self.namespace)
def enter_super_attribute_scope(self) -> None:
if self.namespace[:-1] in self.classes:
full_name = self.namespace[:-1]
new_scope = self.scopes[self.aliases[full_name]]
else:
full_name = ("super",)
new_scope = ChainMap()
self._enter_scope(full_name[-1], new_scope)
self.attribute_scopes.add(self.namespace)
def _enter_scope(
self,
name: str,
new_scope: CM[str, List[Occurrence]], # pylint: disable=unsubscriptable-object
):
self._namespace.append(name)
self.scopes.setdefault(self.namespace, new_scope)
def leave_scope(self) -> None:
self._namespace.pop()
def node_position(
node: ast.AST, source: Source, row_offset=0, column_offset=0
) -> Position:
return source.position(
row=(node.lineno - 1) + row_offset, column=node.col_offset + column_offset
)
@singledispatch
def visit(node: ast.AST, source: Source) -> Iterator[Event]:
"""Visit a node.
Copied and reworked from NodeVisitor in:
https://github.com/python/cpython/blob/master/Lib/ast.py
"""
yield from generic_visit(node, source)
@visit.register
def visit_name(node: ast.Name, source: Source) -> Iterator[Event]:
yield Regular(name=node.id, position=node_position(node, source), node=node)
@visit.register
def visit_module(node: ast.Module, source: Source) -> Iterator[Event]:
yield EnterModuleScope(source.module_name)
yield from generic_visit(node, source)
yield LeaveScope()
@visit.register
def visit_class(node: ast.ClassDef, source: Source) -> Iterator[Event]:
position = node_position(node, source, column_offset=len("class "))
yield Definition(node.name, position, node)
for base in node.bases:
if isinstance(base, ast.Name):
yield Alias(new=(node.name,), existing=(base.id,))
yield from visit(base, source)
yield EnterClassScope(node.name)
for statement in node.body:
yield from visit(statement, source)
yield LeaveScope()
@visit.register
def visit_function(node: ast.FunctionDef, source: Source) -> Iterator[Event]:
position = node_position(node, source, column_offset=len("def "))
yield Definition(node.name, position, node)
yield EnterFunctionScope(node.name)
for i, arg in enumerate(node.args.args):
if i == 0 and not is_static_method(node):
yield SelfArgument(name=arg.arg)
position = node_position(arg, source)
yield Definition(arg.arg, position, arg)
yield from generic_visit(node, source)
yield LeaveScope()
def visit_non_local_like(
node: Union[ast.Global, ast.Nonlocal], source: Source
) -> Iterator[Event]:
position = node_position(node, source)
for name in node.names:
position = source.find_after(name, position)
yield Nonlocal(name=name, position=position, node=node)
@visit.register
def visit_global(node: ast.Global, source: Source) -> Iterator[Event]:
yield from visit_non_local_like(node, source)
@visit.register
def visit_nonlocal(node: ast.Nonlocal, source: Source) -> Iterator[Event]:
yield from visit_non_local_like(node, source)
def is_static_method(node: ast.FunctionDef) -> bool:
return any(
n.id == "staticmethod" for n in node.decorator_list if isinstance(n, ast.Name)
)
@visit.register
def visit_attribute(node: ast.Attribute, source: Source) -> Iterator[Event]:
yield from visit(node.value, source)
position = node_position(node, source)
names = names_from(node.value)
if isinstance(node.value, ast.Call) and names == ("super",):
yield EnterSuperAttributeScope()
else:
for name in names:
position = source.find_after(name, position)
yield EnterAttributeScope(name)
position = source.find_after(node.attr, position)
yield Regular(node.attr, position, node)
for _ in names:
yield LeaveScope()
@visit.register
def visit_assign(node: ast.Assign, source: Source) -> Iterator[Event]:
for node_target in node.targets:
yield from visit_definition(node_target, source)
yield from visit(node.value, source)
target_names = get_names(node.targets[0])
value_names = get_names(node.value)
for target, value in zip(target_names, value_names):
if target and value:
yield Alias(new=target, existing=value)
@visit.register
def visit_import(node: ast.Import, source: Source) -> Iterator[Event]:
start = node_position(node, source)
for alias in node.names:
name = alias.name
position = source.find_after(name, start)
yield Regular(name, position, alias)
@visit.register
def visit_call(node: ast.Call, source: Source) -> Iterator[Event]:
call_position = node_position(node, source)
for arg in node.args:
yield from visit(arg, source)
names = names_from(node.func)
yield from visit(node.func, source)
for name in names[:-1]:
yield EnterAttributeScope(name)
yield EnterScope(names[-1])
for keyword in node.keywords:
if not keyword.arg:
continue
position = source.find_after(keyword.arg, call_position)
yield Regular(keyword.arg, position, node)
for _ in names:
yield LeaveScope()
@visit.register
def visit_dict_comp(node: ast.DictComp, source: Source) -> Iterator[Event]:
yield from visit_comp(node, source, node.key, node.value)
@visit.register
def visit_list_comp(node: ast.ListComp, source: Source) -> Iterator[Event]:
yield from visit_comp(node, source, node.elt)
@visit.register
def visit_set_comp(node: ast.SetComp, source: Source) -> Iterator[Event]:
yield from visit_comp(node, source, node.elt)
@visit.register
def visit_generator_exp(node: ast.GeneratorExp, source: Source) -> Iterator[Event]:
yield from visit_comp(node, source, node.elt)
@singledispatch
def visit_definition(node: ast.AST, source: Source) -> Iterator[Event]:
yield from visit(node, source)
@visit_definition.register
def visit_name_definition(node: ast.Name, source: Source) -> Iterator[Event]:
yield Definition(name=node.id, position=node_position(node, source), node=node)
def get_names(value: ast.AST) -> List[Tuple[str, ...]]:
if isinstance(value, ast.Tuple):
return [names_for(v) for v in value.elts]
return [names_for(value)]
@singledispatch
def names_for(node: ast.AST) -> Tuple[str, ...]: # pylint: disable= unused-argument
return ()
@names_for.register
def names_for_name(node: ast.Name) -> Tuple[str, ...]:
return (node.id,)
@names_for.register
def names_for_attribute(node: ast.Attribute) -> Tuple[str, ...]:
return names_for(node.value) + (node.attr,)
@names_for.register
def names_for_call(node: ast.Call) -> Tuple[str, ...]:
return names_for(node.func)
def visit_comp(
node: Union[ast.DictComp, ast.ListComp, ast.SetComp, ast.GeneratorExp],
source: Source,
*sub_nodes,
) -> Iterator[Event]:
name = f"{type(node)}-{id(node)}"
yield EnterScope(name)
for generator in node.generators:
yield from visit_definition(generator.target, source)
yield from visit(generator.iter, source)
for if_node in generator.ifs:
yield from visit(if_node, source)
for sub_node in sub_nodes:
yield from visit(sub_node, source)
yield LeaveScope()
def generic_visit(node, source: Source) -> Iterator[Event]:
"""Called if no explicit visitor function exists for a node.
Copied and reworked from NodeVisitor in:
https://github.com/python/cpython/blob/master/Lib/ast.py
"""
for _, value in ast.iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, ast.AST):
yield from visit(item, source)
elif isinstance(value, ast.AST):
yield from visit(value, source)
@singledispatch
def names_from(node: ast.AST) -> Tuple[str, ...]: # | |
+ m.x603 + m.x607 + m.x611 + m.x615 + m.x619 + m.x623 + m.x627 + m.x631 +
m.x635 + m.x639 + m.x643 + m.x647 + m.x651 + m.x655 + m.x659 + m.x663 +
m.x667 + m.x671 + m.x675 + m.x679 + m.x683 + m.x687 + m.x691 + m.x695 +
m.x699 + m.x703 + m.x707 + m.x711 + m.x715 + m.x719 + m.x723 + m.x727 +
m.x731 + m.x735 + m.x739 == 0)
m.e337 = Constraint(expr= -m.x219 + m.x584 + m.x588 + m.x592 + m.x596 + m.x600
+ m.x604 + m.x608 + m.x612 + m.x616 + m.x620 + m.x624 + m.x628 + m.x632 +
m.x636 + m.x640 + m.x644 + m.x648 + m.x652 + m.x656 + m.x660 + m.x664 +
m.x668 + m.x672 + m.x676 + m.x680 + m.x684 + m.x688 + m.x692 + m.x696 +
m.x700 + m.x704 + m.x708 + m.x712 + m.x716 + m.x720 + m.x724 + m.x728 +
m.x732 + m.x736 + m.x740 == 0)
m.e338 = Constraint(expr= -m.x221 + m.x741 + m.x745 + m.x749 + m.x753 + m.x757
+ m.x761 + m.x765 + m.x769 + m.x773 + m.x777 + m.x781 + m.x785 + m.x789 +
m.x793 + m.x797 + m.x801 + m.x805 + m.x809 + m.x813 + m.x817 + m.x821 +
m.x825 + m.x829 + m.x833 + m.x837 + m.x841 + m.x845 + m.x849 + m.x853 +
m.x857 + m.x861 + m.x865 + m.x869 + m.x873 + m.x877 + m.x881 + m.x885 +
m.x889 + m.x893 + m.x897 == 0)
m.e339 = Constraint(expr= -m.x223 + m.x742 + m.x746 + m.x750 + m.x754 + m.x758
+ m.x762 + m.x766 + m.x770 + m.x774 + m.x778 + m.x782 + m.x786 + m.x790 +
m.x794 + m.x798 + m.x802 + m.x806 + m.x810 + m.x814 + m.x818 + m.x822 +
m.x826 + m.x830 + m.x834 + m.x838 + m.x842 + m.x846 + m.x850 + m.x854 +
m.x858 + m.x862 + m.x866 + m.x870 + m.x874 + m.x878 + m.x882 + m.x886 +
m.x890 + m.x894 + m.x898 == 0)
m.e340 = Constraint(expr= -m.x225 + m.x743 + m.x747 + m.x751 + m.x755 + m.x759
+ m.x763 + m.x767 + m.x771 + m.x775 + m.x779 + m.x783 + m.x787 + m.x791 +
m.x795 + m.x799 + m.x803 + m.x807 + m.x811 + m.x815 + m.x819 + m.x823 +
m.x827 + m.x831 + m.x835 + m.x839 + m.x843 + m.x847 + m.x851 + m.x855 +
m.x859 + m.x863 + m.x867 + m.x871 + m.x875 + m.x879 + m.x883 + m.x887 +
m.x891 + m.x895 + m.x899 == 0)
m.e341 = Constraint(expr= -m.x227 + m.x744 + m.x748 + m.x752 + m.x756 + m.x760
+ m.x764 + m.x768 + m.x772 + m.x776 + m.x780 + m.x784 + m.x788 + m.x792 +
m.x796 + m.x800 + m.x804 + m.x808 + m.x812 + m.x816 + m.x820 + m.x824 +
m.x828 + m.x832 + m.x836 + m.x840 + m.x844 + m.x848 + m.x852 + m.x856 +
m.x860 + m.x864 + m.x868 + m.x872 + m.x876 + m.x880 + m.x884 + m.x888 +
m.x892 + m.x896 + m.x900 == 0)
m.e342 = Constraint(expr= -m.x229 + m.x901 + m.x905 + m.x909 + m.x913 + m.x917
+ m.x921 + m.x925 + m.x929 + m.x933 + m.x937 + m.x941 + m.x945 + m.x949 +
m.x953 + m.x957 + m.x961 + m.x965 + m.x969 + m.x973 + m.x977 + m.x981 +
m.x985 + m.x989 + m.x993 + m.x997 + m.x1001 + m.x1005 + m.x1009 + m.x1013
+ m.x1017 + m.x1021 + m.x1025 + m.x1029 + m.x1033 + m.x1037 + m.x1041 +
m.x1045 + m.x1049 + m.x1053 + m.x1057 == 0)
m.e343 = Constraint(expr= -m.x231 + m.x902 + m.x906 + m.x910 + m.x914 + m.x918
+ m.x922 + m.x926 + m.x930 + m.x934 + m.x938 + m.x942 + m.x946 + m.x950 +
m.x954 + m.x958 + m.x962 + m.x966 + m.x970 + m.x974 + m.x978 + m.x982 +
m.x986 + m.x990 + m.x994 + m.x998 + m.x1002 + m.x1006 + m.x1010 + m.x1014
+ m.x1018 + m.x1022 + m.x1026 + m.x1030 + m.x1034 + m.x1038 + m.x1042 +
m.x1046 + m.x1050 + m.x1054 + m.x1058 == 0)
m.e344 = Constraint(expr= -m.x233 + m.x903 + m.x907 + m.x911 + m.x915 + m.x919
+ m.x923 + m.x927 + m.x931 + m.x935 + m.x939 + m.x943 + m.x947 + m.x951 +
m.x955 + m.x959 + m.x963 + m.x967 + m.x971 + m.x975 + m.x979 + m.x983 +
m.x987 + m.x991 + m.x995 + m.x999 + m.x1003 + m.x1007 + m.x1011 + m.x1015
+ m.x1019 + m.x1023 + m.x1027 + m.x1031 + m.x1035 + m.x1039 + m.x1043 +
m.x1047 + m.x1051 + m.x1055 + m.x1059 == 0)
m.e345 = Constraint(expr= -m.x235 + m.x904 + m.x908 + m.x912 + m.x916 + m.x920
+ m.x924 + m.x928 + m.x932 + m.x936 + m.x940 + m.x944 + m.x948 + m.x952 +
m.x956 + m.x960 + m.x964 + m.x968 + m.x972 + m.x976 + m.x980 + m.x984 +
m.x988 + m.x992 + m.x996 + m.x1000 + m.x1004 + m.x1008 + m.x1012 + m.x1016
+ m.x1020 + m.x1024 + m.x1028 + m.x1032 + m.x1036 + m.x1040 + m.x1044 +
m.x1048 + m.x1052 + m.x1056 + m.x1060 == 0)
m.e346 = Constraint(expr= -10 * m.b1 + m.x261 <= 0)
m.e347 = Constraint(expr= -10 * m.b2 + m.x265 <= 0)
m.e348 = Constraint(expr= -10 * m.b3 + m.x269 <= 0)
m.e349 = Constraint(expr= -10 * m.b4 + m.x273 <= 0)
m.e350 = Constraint(expr= -10 * m.b5 + m.x277 <= 0)
m.e351 = Constraint(expr= -10 * m.b6 + m.x281 <= 0)
m.e352 = Constraint(expr= -10 * m.b7 + m.x285 <= 0)
m.e353 = Constraint(expr= -10 * m.b8 + m.x289 <= 0)
m.e354 = Constraint(expr= -10 * m.b9 + m.x293 <= 0)
m.e355 = Constraint(expr= -10 * m.b10 + m.x297 <= 0)
m.e356 = Constraint(expr= -10 * m.b11 + m.x301 <= 0)
m.e357 = Constraint(expr= -10 * m.b12 + m.x305 <= 0)
m.e358 = Constraint(expr= -10 * m.b13 + m.x309 <= 0)
m.e359 = Constraint(expr= -10 * m.b14 + m.x313 <= 0)
m.e360 = Constraint(expr= -10 * m.b15 + m.x317 <= 0)
m.e361 = Constraint(expr= -10 * m.b16 + m.x321 <= 0)
m.e362 = Constraint(expr= -10 * m.b17 + m.x325 <= 0)
m.e363 = Constraint(expr= -10 * m.b18 + m.x329 <= 0)
m.e364 = Constraint(expr= -10 * m.b19 + m.x333 <= 0)
m.e365 = Constraint(expr= -10 * m.b20 + m.x337 <= 0)
m.e366 = Constraint(expr= -10 * m.b21 + m.x341 <= 0)
m.e367 = Constraint(expr= -10 * m.b22 + m.x345 <= 0)
m.e368 = Constraint(expr= -10 * m.b23 + m.x349 <= 0)
m.e369 = Constraint(expr= -10 * m.b24 + m.x353 <= 0)
m.e370 = Constraint(expr= -10 * m.b25 + m.x357 <= 0)
m.e371 = Constraint(expr= -10 * m.b26 + m.x361 <= 0)
m.e372 = Constraint(expr= -10 * m.b27 + m.x365 <= 0)
m.e373 = Constraint(expr= -10 * m.b28 + m.x369 <= 0)
m.e374 = Constraint(expr= -10 * m.b29 + m.x373 <= 0)
m.e375 = Constraint(expr= -10 * m.b30 + m.x377 <= 0)
m.e376 = Constraint(expr= -10 * m.b31 + m.x381 <= 0)
m.e377 = Constraint(expr= -10 * m.b32 + m.x385 <= 0)
m.e378 = Constraint(expr= -10 * m.b33 + m.x389 <= 0)
m.e379 = Constraint(expr= -10 * m.b34 + m.x393 <= 0)
m.e380 = Constraint(expr= -10 * m.b35 + m.x397 <= 0)
m.e381 = Constraint(expr= -10 * m.b36 + m.x401 <= 0)
m.e382 = Constraint(expr= -10 * m.b37 + m.x405 <= 0)
m.e383 = Constraint(expr= -10 * m.b38 + m.x409 <= 0)
m.e384 = Constraint(expr= -10 * m.b39 + m.x413 <= 0)
m.e385 = Constraint(expr= -10 * m.b40 + m.x417 <= 0)
m.e386 = Constraint(expr= -10 * m.b1 + m.x262 <= 0)
m.e387 = Constraint(expr= -10 * m.b2 + m.x266 <= 0)
m.e388 = Constraint(expr= -10 * m.b3 + m.x270 <= 0)
m.e389 = Constraint(expr= -10 * m.b4 + m.x274 <= 0)
m.e390 = Constraint(expr= -10 * m.b5 + m.x278 <= 0)
m.e391 = Constraint(expr= -10 * m.b6 + m.x282 <= 0)
m.e392 = Constraint(expr= -10 * m.b7 + m.x286 <= 0)
m.e393 = Constraint(expr= -10 * m.b8 + m.x290 <= 0)
m.e394 = Constraint(expr= -10 * m.b9 + m.x294 <= 0)
m.e395 = Constraint(expr= | |
necessary, downloads the Stanford HARDI dataset into DIPY directory and
creates a BIDS compliant file-system structure in AFQ data directory:
~/AFQ_data/
└── stanford_hardi
├── dataset_description.json
└── derivatives
├── freesurfer
│ ├── dataset_description.json
│ └── sub-01
│ └── ses-01
│ └── anat
│ ├── sub-01_ses-01_T1w.nii.gz
│ └── sub-01_ses-01_seg.nii.gz
└── vistasoft
├── dataset_description.json
└── sub-01
└── ses-01
└── dwi
├── sub-01_ses-01_dwi.bval
├── sub-01_ses-01_dwi.bvec
└── sub-01_ses-01_dwi.nii.gz
If clear_previous_afq is True and there is an afq folder in derivatives,
it will be removed.
"""
logger = logging.getLogger('AFQ.data')
# fetches data for first subject and session
logger.info('fetching Stanford HARDI data')
dpd.fetch_stanford_hardi()
if path is None:
if not op.exists(afq_home):
logger.info(f'creating AFQ home directory: {afq_home}')
os.makedirs(afq_home, exist_ok=True)
path = afq_home
bids_path = op.join(path, 'stanford_hardi',)
derivatives_path = op.join(bids_path, 'derivatives')
dmriprep_folder = op.join(derivatives_path, 'vistasoft')
freesurfer_folder = op.join(derivatives_path, 'freesurfer')
if clear_previous_afq:
afq_folder = op.join(derivatives_path, 'afq')
if op.exists(afq_folder):
shutil.rmtree(afq_folder)
if not op.exists(derivatives_path):
logger.info(f'creating derivatives directory: {derivatives_path}')
# anatomical data
anat_folder = op.join(freesurfer_folder, 'sub-01', 'ses-01', 'anat')
os.makedirs(anat_folder, exist_ok=True)
t1_img = dpd.read_stanford_t1()
nib.save(t1_img, op.join(anat_folder, 'sub-01_ses-01_T1w.nii.gz'))
seg_img = dpd.read_stanford_labels()[-1]
nib.save(seg_img, op.join(anat_folder,
'sub-01_ses-01_seg.nii.gz'))
# diffusion-weighted imaging data
dwi_folder = op.join(dmriprep_folder, 'sub-01', 'ses-01', 'dwi')
os.makedirs(dwi_folder, exist_ok=True)
dwi_img, gtab = dpd.read_stanford_hardi()
nib.save(dwi_img, op.join(dwi_folder, 'sub-01_ses-01_dwi.nii.gz'))
np.savetxt(op.join(dwi_folder, 'sub-01_ses-01_dwi.bvec'), gtab.bvecs)
np.savetxt(op.join(dwi_folder, 'sub-01_ses-01_dwi.bval'), gtab.bvals)
else:
logger.info('Dataset is already in place. If you want to fetch it '
+ 'again please first remove the folder '
+ derivatives_path)
# Dump out the description of the dataset
to_bids_description(bids_path,
**{"Name": "<NAME>", "Subjects": ["sub-01"]})
# And descriptions of the pipelines in the derivatives:
to_bids_description(dmriprep_folder,
**{"Name": "<NAME>",
"PipelineDescription": {"Name": "vistasoft"}})
to_bids_description(freesurfer_folder,
**{"Name": "<NAME>",
"PipelineDescription": {"Name": "freesurfer"}})
fetch_hcp_atlas_16_bundles = _make_fetcher(
"fetch_hcp_atlas_16_bundles",
op.join(afq_home,
'hcp_atlas_16_bundles'),
'https://ndownloader.figshare.com/files/',
["11921522"],
["atlas_16_bundles.zip"],
md5_list=["b071f3e851f21ba1749c02fc6beb3118"],
doc="Download minimal Recobundles atlas",
unzip=True)
fetch_hcp_atlas_80_bundles = _make_fetcher(
"fetch_hcp_atlas_80_bundles",
op.join(afq_home,
'hcp_atlas_80_bundles'),
'https://ndownloader.figshare.com/files/',
["13638644"],
["Atlas_80_Bundles.zip"],
md5_list=["78331d527a10ec000d4f33bac472e099"],
doc="Download 80-bundle Recobundles atlas",
unzip=True)
def read_hcp_atlas(n_bundles=16):
"""
n_bundles : int
16 or 80, which selects among the two different
atlases:
https://figshare.com/articles/Simple_model_bundle_atlas_for_RecoBundles/6483614 #noqa
https://figshare.com/articles/Advanced_Atlas_of_80_Bundles_in_MNI_space/7375883 #noqa
"""
bundle_dict = {}
if n_bundles == 16:
_, folder = fetch_hcp_atlas_16_bundles()
atlas_folder = "Atlas_in_MNI_Space_16_bundles"
elif n_bundles == 80:
_, folder = fetch_hcp_atlas_80_bundles()
atlas_folder = "Atlas_80_Bundles"
whole_brain = load_tractogram(
op.join(
folder,
atlas_folder,
'whole_brain',
'whole_brain_MNI.trk'),
'same', bbox_valid_check=False).streamlines
bundle_dict['whole_brain'] = whole_brain
bundle_files = glob(
op.join(
folder,
atlas_folder,
"bundles", "*.trk"))
for bundle_file in bundle_files:
bundle = op.splitext(op.split(bundle_file)[-1])[0]
bundle_dict[bundle] = {}
bundle_dict[bundle]['sl'] = load_tractogram(
bundle_file,
'same',
bbox_valid_check=False).streamlines
feature = ResampleFeature(nb_points=100)
metric = AveragePointwiseEuclideanMetric(feature)
qb = QuickBundles(np.inf, metric=metric)
cluster = qb.cluster(bundle_dict[bundle]['sl'])
bundle_dict[bundle]['centroid'] = cluster.centroids[0]
# For some reason, this file-name has a 0 in it, instead of an O:
bundle_dict["IFOF_R"] = bundle_dict["IF0F_R"]
# In the 80-bundle case, there are two files, and both have identical
# content, so this is fine:
del bundle_dict["IF0F_R"]
return bundle_dict
fetch_aal_atlas = _make_fetcher(
"fetch_aal_atlas",
op.join(afq_home,
'aal_atlas'),
'https://ndownloader.figshare.com/files/',
["28416852",
"28416855"],
["MNI_AAL_AndMore.nii.gz",
"MNI_AAL.txt"],
md5_list=["69395b75a16f00294a80eb9428bf7855",
"59fd3284b17de2fbe411ca1c7afe8c65"],
doc="Download the AAL atlas",
unzip=False)
def read_aal_atlas(resample_to=None):
"""
Reads the AAL atlas [1]_.
Parameters
----------
template : nib.Nifti1Image class instance, optional
If provided, this is the template used and AAL atlas should be
registered and aligned to this template
.. [1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>. (2002). Automated anatomical
labeling of activations in SPM using a macroscopic anatomical
parcellation of the MNI MRI single-subject brain. Neuroimage. 2002;
15(1):273-89.
"""
file_dict, folder = fetch_aal_atlas()
out_dict = {}
for f in file_dict:
if f.endswith('.txt'):
out_dict['labels'] = pd.read_csv(op.join(folder, f))
else:
out_dict['atlas'] = nib.load(op.join(folder, f))
if resample_to is not None:
data = out_dict['atlas'].get_fdata()
oo = []
for ii in range(data.shape[-1]):
oo.append(resample(
data[..., ii],
resample_to,
out_dict['atlas'].affine,
resample_to.affine).get_fdata())
out_dict['atlas'] = nib.Nifti1Image(np.stack(oo, -1),
resample_to.affine)
return out_dict
def aal_to_regions(regions, atlas=None):
"""
Queries for large regions containing multiple AAL ROIs
Parameters
----------
regions : string or list of strings
The name of the requested region. This can either be an AAL-defined ROI
name (e.g, 'Occipital_Sup_L') or one of:
{'leftfrontal' | 'leftoccipital' | 'lefttemporal' | 'leftparietal'
| 'leftanttemporal' | 'leftparietal' | 'leftanttemporal'
| 'leftuncinatefront' | 'leftifoffront' | 'leftinfparietal'
| 'cerebellum' | 'leftarcfrontal' | 'leftarctemp' | 'leftcingpost'}
each of which there is an equivalent 'right' region for. In addition,
there are a few bilateral regions: {'occipital' | 'temporal'}, which
encompass both the right and left region of this name, as well as:
{'cstinferior' | 'cstsuperior'}
atlas : 4D array
Contains the AAL atlas in the correct coordinate frame with additional
volumes for CST and cingulate ROIs ("AAL and more").
Returns
------
3D indices to the requested region in the atlas volume
Notes
-----
Several regions can be referred to by multiple names:
'leftuncinatetemp' = 'leftilftemp'= 'leftanttemporal'
'rightuncinatetemp' = 'rightilftemp' = 'rightanttemporal'
'leftslfpar'] = 'leftinfparietal'
'rightslfpar' = 'rightinfparietal'
'leftslffrontal' = 'leftarcfrontal'
'rightslffrontal' = 'rightarcfrontal'
"""
if atlas is None:
atlas = read_aal_atlas()['atlas']
atlas_vals = {'leftfrontal': np.arange(1, 26, 2),
# Occipital regions do not include fusiform:
'leftoccipital': np.arange(43, 54, 2),
# Temporal regions include fusiform:
'lefttemporal': np.concatenate([np.arange(37, 42, 2),
np.array([55]),
np.arange(79, 90, 2)]),
'leftparietal': np.array([57, 67, 2]),
'leftanttemporal': np.array([41, 83, 87]),
'leftuncinatefront': np.array([5, 9, 15, 25]),
'leftifoffront': np.array([3, 5, 7, 9, 13, 15, 25]),
'leftinfparietal': np.array([61, 63, 65]),
'cerebellum': np.arange(91, 117),
'leftarcfrontal': np.array([1, 11, 13]),
'leftarctemp': np.array([79, 81, 85, 89]),
}
# Right symmetrical is off by one:
atlas_vals['rightfrontal'] = atlas_vals['leftfrontal'] + 1
atlas_vals['rightoccipital'] = atlas_vals['leftoccipital'] + 1
atlas_vals['righttemporal'] = atlas_vals['lefttemporal'] + 1
atlas_vals['rightparietal'] = atlas_vals['leftparietal'] + 1
atlas_vals['rightanttemporal'] = atlas_vals['leftanttemporal'] + 1
atlas_vals['rightuncinatefront'] = atlas_vals['leftuncinatefront'] + 1
atlas_vals['rightifoffront'] = atlas_vals['leftifoffront'] + 1
atlas_vals['rightinfparietal'] = atlas_vals['leftinfparietal'] + 1
atlas_vals['rightarcfrontal'] = atlas_vals['leftarcfrontal'] + 1
atlas_vals['rightarctemp'] = atlas_vals['leftarctemp'] + 1
# Multiply named regions:
atlas_vals['leftuncinatetemp'] = atlas_vals['leftilftemp'] =\
atlas_vals['leftanttemporal']
atlas_vals['rightuncinatetemp'] = atlas_vals['rightilftemp'] =\
atlas_vals['rightanttemporal']
atlas_vals['leftslfpar'] = atlas_vals['leftinfparietal']
atlas_vals['rightslfpar'] = atlas_vals['rightinfparietal']
atlas_vals['leftslffrontal'] = atlas_vals['leftarcfrontal']
atlas_vals['rightslffrontal'] = atlas_vals['rightarcfrontal']
# Bilateral regions:
atlas_vals['occipital'] = np.union1d(atlas_vals['leftoccipital'],
atlas_vals['rightoccipital'])
atlas_vals['temporal'] = np.union1d(atlas_vals['lefttemporal'],
atlas_vals['righttemporal'])
if isinstance(regions, str):
regions = [regions]
idxes = []
for region in regions:
region = region.lower() # Just to be sure
if region in atlas_vals.keys():
vol_idx = 0
vals = atlas_vals[region]
elif region == 'cstinferior':
vol_idx = 1
vals = np.array([1])
elif region == 'cstsuperior':
vol_idx = 2
vals = np.array([1])
elif region == 'leftcingpost':
vol_idx = 3
vals = np.array([1])
elif region == 'rightcingpost':
vol_idx = 4
vals = np.array([1])
# Broadcast vals, to test for equality over all three dimensions:
is_in = atlas[..., vol_idx] == vals[:, None, None, None]
# Then collapse the 4th dimension (each val), to get the 3D array:
is_in = np.sum(is_in, 0)
idxes.append(np.array(np.where(is_in)).T)
return np.concatenate(idxes, axis=0)
def bundles_to_aal(bundles, atlas=None):
"""
Given a sequence of AFQ bundle names, give back a sequence of lists
with [target0, target1] being each NX3 arrays of the endpoint indices
for the first and last node of the streamlines in this bundle.
"""
if atlas is None:
atlas = read_aal_atlas()['atlas']
endpoint_dict = {
"ATR_L": [['leftfrontal'], None],
"ATR_R": [['rightfrontal'], None],
"CST_L": [['cstinferior'], ['cstsuperior']],
"CST_R": [['cstinferior'], ['cstsuperior']],
"CGC_L": [['leftcingpost'], None],
"CGC_R": [['rightcingpost'], None],
"HCC_L": [None, None],
"HCC_R": [None, None],
"FP": [['rightoccipital'], ['leftoccipital']],
"FA": [['rightfrontal'], ['leftfrontal']],
"IFO_L": [['leftoccipital'], ['leftifoffront']],
"IFO_R": [['rightoccipital'], ['rightifoffront']],
"ILF_L": [['leftoccipital'], ['leftilftemp']],
"ILF_R": [['rightoccipital'], ['rightilftemp']],
"SLF_L": [['leftslffrontal'], ['leftinfparietal']],
"SLF_R": [['rightslffrontal'], ['rightinfparietal']],
"UNC_L": [['leftanttemporal'], ['leftuncinatefront']],
"UNC_R": [['rightanttemporal'], ['rightuncinatefront']],
"ARC_L": [['leftfrontal'], ['leftarctemp']],
"ARC_R": [['rightfrontal'], ['rightarctemp']],
"AntFrontal": [None, None],
"Motor": [None, None],
"Occipital": [None, None],
"Orbital": [None, None],
"PostParietal": [None, None],
"SupFrontal": [None, None],
"SupParietal": [None, None],
"Temporal": [None, None]}
targets = []
for bundle in bundles:
targets.append([])
if (endpoint_dict.get(bundle)):
for region in endpoint_dict[bundle]:
if region is None:
targets[-1].append(None)
else:
targets[-1].append(aal_to_regions(region, atlas=atlas))
else:
logger = logging.getLogger('AFQ.data')
logger.warning(f"Segmentation end points undefined for {bundle},"
+ " continuing without end points")
targets[-1] = [None, None]
return targets
def s3fs_nifti_write(img, fname, fs=None):
"""
Write a nifti file straight to S3
Paramters
---------
img : nib.Nifti1Image class instance
The image containing data to be written into S3
fname : string
Full path (including bucket name and extension) to the S3 location
where the file is to be saved.
fs : an s3fs.S3FileSystem class instance, optional
A file-system to refer to. Default to create a new file-system
"""
if fs is None:
fs = s3fs.S3FileSystem()
bio = BytesIO()
file_map = img.make_file_map({'image': bio, 'header': bio})
img.to_file_map(file_map)
data = gzip.compress(bio.getvalue())
with fs.open(fname, 'wb') as ff:
ff.write(data)
def s3fs_nifti_read(fname, fs=None, anon=False):
"""
Lazily | |
<reponame>SHIVJITH/Odoo_Machine_Test<filename>addons/purchase_stock/models/purchase.py
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, SUPERUSER_ID, _
from odoo.tools.float_utils import float_compare, float_round
from datetime import datetime
from dateutil.relativedelta import relativedelta
from odoo.exceptions import UserError
from odoo.addons.purchase.models.purchase import PurchaseOrder as Purchase
class PurchaseOrder(models.Model):
_inherit = 'purchase.order'
@api.model
def _default_picking_type(self):
return self._get_picking_type(self.env.context.get('company_id') or self.env.company.id)
incoterm_id = fields.Many2one('account.incoterms', 'Incoterm', states={'done': [('readonly', True)]}, help="International Commercial Terms are a series of predefined commercial terms used in international transactions.")
picking_count = fields.Integer(compute='_compute_picking', string='Picking count', default=0, store=True)
picking_ids = fields.Many2many('stock.picking', compute='_compute_picking', string='Receptions', copy=False, store=True)
picking_type_id = fields.Many2one('stock.picking.type', 'Deliver To', states=Purchase.READONLY_STATES, required=True, default=_default_picking_type, domain="['|', ('warehouse_id', '=', False), ('warehouse_id.company_id', '=', company_id)]",
help="This will determine operation type of incoming shipment")
default_location_dest_id_usage = fields.Selection(related='picking_type_id.default_location_dest_id.usage', string='Destination Location Type',
help="Technical field used to display the Drop Ship Address", readonly=True)
group_id = fields.Many2one('procurement.group', string="Procurement Group", copy=False)
is_shipped = fields.Boolean(compute="_compute_is_shipped")
effective_date = fields.Datetime("Effective Date", compute='_compute_effective_date', store=True, copy=False,
help="Completion date of the first receipt order.")
on_time_rate = fields.Float(related='partner_id.on_time_rate', compute_sudo=False)
@api.depends('order_line.move_ids.picking_id')
def _compute_picking(self):
for order in self:
pickings = order.order_line.mapped('move_ids.picking_id')
order.picking_ids = pickings
order.picking_count = len(pickings)
@api.depends('picking_ids.date_done')
def _compute_effective_date(self):
for order in self:
pickings = order.picking_ids.filtered(lambda x: x.state == 'done' and x.location_dest_id.usage == 'internal' and x.date_done)
order.effective_date = min(pickings.mapped('date_done'), default=False)
@api.depends('picking_ids', 'picking_ids.state')
def _compute_is_shipped(self):
for order in self:
if order.picking_ids and all(x.state in ['done', 'cancel'] for x in order.picking_ids):
order.is_shipped = True
else:
order.is_shipped = False
@api.onchange('picking_type_id')
def _onchange_picking_type_id(self):
if self.picking_type_id.default_location_dest_id.usage != 'customer':
self.dest_address_id = False
@api.onchange('company_id')
def _onchange_company_id(self):
p_type = self.picking_type_id
if not(p_type and p_type.code == 'incoming' and (p_type.warehouse_id.company_id == self.company_id or not p_type.warehouse_id)):
self.picking_type_id = self._get_picking_type(self.company_id.id)
# --------------------------------------------------
# CRUD
# --------------------------------------------------
def write(self, vals):
if vals.get('order_line') and self.state == 'purchase':
for order in self:
pre_order_line_qty = {order_line: order_line.product_qty for order_line in order.mapped('order_line')}
res = super(PurchaseOrder, self).write(vals)
if vals.get('order_line') and self.state == 'purchase':
for order in self:
to_log = {}
for order_line in order.order_line:
if pre_order_line_qty.get(order_line, False) and float_compare(pre_order_line_qty[order_line], order_line.product_qty, precision_rounding=order_line.product_uom.rounding) > 0:
to_log[order_line] = (order_line.product_qty, pre_order_line_qty[order_line])
if to_log:
order._log_decrease_ordered_quantity(to_log)
return res
# --------------------------------------------------
# Actions
# --------------------------------------------------
def button_approve(self, force=False):
result = super(PurchaseOrder, self).button_approve(force=force)
self._create_picking()
return result
def button_cancel(self):
for order in self:
for move in order.order_line.mapped('move_ids'):
if move.state == 'done':
raise UserError(_('Unable to cancel purchase order %s as some receptions have already been done.') % (order.name))
# If the product is MTO, change the procure_method of the closest move to purchase to MTS.
# The purpose is to link the po that the user will manually generate to the existing moves's chain.
if order.state in ('draft', 'sent', 'to approve', 'purchase'):
for order_line in order.order_line:
order_line.move_ids._action_cancel()
if order_line.move_dest_ids:
move_dest_ids = order_line.move_dest_ids
if order_line.propagate_cancel:
move_dest_ids._action_cancel()
else:
move_dest_ids.write({'procure_method': 'make_to_stock'})
move_dest_ids._recompute_state()
for pick in order.picking_ids.filtered(lambda r: r.state != 'cancel'):
pick.action_cancel()
order.order_line.write({'move_dest_ids':[(5,0,0)]})
return super(PurchaseOrder, self).button_cancel()
def action_view_picking(self):
""" This function returns an action that display existing picking orders of given purchase order ids. When only one found, show the picking immediately.
"""
result = self.env["ir.actions.actions"]._for_xml_id('stock.action_picking_tree_all')
# override the context to get rid of the default filtering on operation type
result['context'] = {'default_partner_id': self.partner_id.id, 'default_origin': self.name, 'default_picking_type_id': self.picking_type_id.id}
pick_ids = self.mapped('picking_ids')
# choose the view_mode accordingly
if not pick_ids or len(pick_ids) > 1:
result['domain'] = "[('id','in',%s)]" % (pick_ids.ids)
elif len(pick_ids) == 1:
res = self.env.ref('stock.view_picking_form', False)
form_view = [(res and res.id or False, 'form')]
if 'views' in result:
result['views'] = form_view + [(state,view) for state,view in result['views'] if view != 'form']
else:
result['views'] = form_view
result['res_id'] = pick_ids.id
return result
def _prepare_invoice(self):
invoice_vals = super()._prepare_invoice()
invoice_vals['invoice_incoterm_id'] = self.incoterm_id.id
return invoice_vals
# --------------------------------------------------
# Business methods
# --------------------------------------------------
def _log_decrease_ordered_quantity(self, purchase_order_lines_quantities):
def _keys_in_sorted(move):
""" sort by picking and the responsible for the product the
move.
"""
return (move.picking_id.id, move.product_id.responsible_id.id)
def _keys_in_groupby(move):
""" group by picking and the responsible for the product the
move.
"""
return (move.picking_id, move.product_id.responsible_id)
def _render_note_exception_quantity_po(order_exceptions):
order_line_ids = self.env['purchase.order.line'].browse([order_line.id for order in order_exceptions.values() for order_line in order[0]])
purchase_order_ids = order_line_ids.mapped('order_id')
move_ids = self.env['stock.move'].concat(*rendering_context.keys())
impacted_pickings = move_ids.mapped('picking_id')._get_impacted_pickings(move_ids) - move_ids.mapped('picking_id')
values = {
'purchase_order_ids': purchase_order_ids,
'order_exceptions': order_exceptions.values(),
'impacted_pickings': impacted_pickings,
}
return self.env.ref('purchase_stock.exception_on_po')._render(values=values)
documents = self.env['stock.picking']._log_activity_get_documents(purchase_order_lines_quantities, 'move_ids', 'DOWN', _keys_in_sorted, _keys_in_groupby)
filtered_documents = {}
for (parent, responsible), rendering_context in documents.items():
if parent._name == 'stock.picking':
if parent.state == 'cancel':
continue
filtered_documents[(parent, responsible)] = rendering_context
self.env['stock.picking']._log_activity(_render_note_exception_quantity_po, filtered_documents)
def _get_destination_location(self):
self.ensure_one()
if self.dest_address_id:
return self.dest_address_id.property_stock_customer.id
return self.picking_type_id.default_location_dest_id.id
@api.model
def _get_picking_type(self, company_id):
picking_type = self.env['stock.picking.type'].search([('code', '=', 'incoming'), ('warehouse_id.company_id', '=', company_id)])
if not picking_type:
picking_type = self.env['stock.picking.type'].search([('code', '=', 'incoming'), ('warehouse_id', '=', False)])
return picking_type[:1]
def _prepare_picking(self):
if not self.group_id:
self.group_id = self.group_id.create({
'name': self.name,
'partner_id': self.partner_id.id
})
if not self.partner_id.property_stock_supplier.id:
raise UserError(_("You must set a Vendor Location for this partner %s", self.partner_id.name))
return {
'picking_type_id': self.picking_type_id.id,
'partner_id': self.partner_id.id,
'user_id': False,
'date': self.date_order,
'origin': self.name,
'location_dest_id': self._get_destination_location(),
'location_id': self.partner_id.property_stock_supplier.id,
'company_id': self.company_id.id,
}
def _create_picking(self):
StockPicking = self.env['stock.picking']
for order in self.filtered(lambda po: po.state in ('purchase', 'done')):
if any(product.type in ['product', 'consu'] for product in order.order_line.product_id):
order = order.with_company(order.company_id)
pickings = order.picking_ids.filtered(lambda x: x.state not in ('done', 'cancel'))
if not pickings:
res = order._prepare_picking()
picking = StockPicking.with_user(SUPERUSER_ID).create(res)
else:
picking = pickings[0]
moves = order.order_line._create_stock_moves(picking)
moves = moves.filtered(lambda x: x.state not in ('done', 'cancel'))._action_confirm()
seq = 0
for move in sorted(moves, key=lambda move: move.date):
seq += 5
move.sequence = seq
moves._action_assign()
picking.message_post_with_view('mail.message_origin_link',
values={'self': picking, 'origin': order},
subtype_id=self.env.ref('mail.mt_note').id)
return True
def _add_picking_info(self, activity):
"""Helper method to add picking info to the Date Updated activity when
vender updates date_planned of the po lines.
"""
validated_picking = self.picking_ids.filtered(lambda p: p.state == 'done')
if validated_picking:
activity.note += _("<p>Those dates couldn’t be modified accordingly on the receipt %s which had already been validated.</p>") % validated_picking[0].name
elif not self.picking_ids:
activity.note += _("<p>Corresponding receipt not found.</p>")
else:
activity.note += _("<p>Those dates have been updated accordingly on the receipt %s.</p>") % self.picking_ids[0].name
def _create_update_date_activity(self, updated_dates):
activity = super()._create_update_date_activity(updated_dates)
self._add_picking_info(activity)
def _update_update_date_activity(self, updated_dates, activity):
# remove old picking info to update it
note_lines = activity.note.split('<p>')
note_lines.pop()
activity.note = '<p>'.join(note_lines)
super()._update_update_date_activity(updated_dates, activity)
self._add_picking_info(activity)
@api.model
def _get_orders_to_remind(self):
"""When auto sending reminder mails, don't send for purchase order with
validated receipts."""
return super()._get_orders_to_remind().filtered(lambda p: not p.effective_date)
class PurchaseOrderLine(models.Model):
_inherit = 'purchase.order.line'
qty_received_method = fields.Selection(selection_add=[('stock_moves', 'Stock Moves')])
move_ids = fields.One2many('stock.move', 'purchase_line_id', string='Reservation', readonly=True, copy=False)
orderpoint_id = fields.Many2one('stock.warehouse.orderpoint', 'Orderpoint')
move_dest_ids = fields.One2many('stock.move', 'created_purchase_line_id', 'Downstream Moves')
product_description_variants = fields.Char('Custom Description')
propagate_cancel = fields.Boolean('Propagate cancellation', default=True)
def _compute_qty_received_method(self):
super(PurchaseOrderLine, self)._compute_qty_received_method()
for line in self.filtered(lambda l: not l.display_type):
if line.product_id.type in ['consu', 'product']:
line.qty_received_method = 'stock_moves'
@api.depends('move_ids.state', 'move_ids.product_uom_qty', 'move_ids.product_uom')
def _compute_qty_received(self):
super(PurchaseOrderLine, self)._compute_qty_received()
for line in self:
if line.qty_received_method == 'stock_moves':
total = 0.0
# In case of a BOM in kit, the products delivered do not correspond to the products in
# the PO. Therefore, we can skip them since they will be handled later on.
for move in line.move_ids.filtered(lambda m: m.product_id == line.product_id):
if move.state == 'done':
if move.location_dest_id.usage == "supplier":
if move.to_refund:
total -= move.product_uom._compute_quantity(move.product_uom_qty, line.product_uom)
elif move.origin_returned_move_id and move.origin_returned_move_id._is_dropshipped() and not move._is_dropshipped_returned():
# Edge case: the dropship is returned to the stock, no to the supplier.
# In this case, the received quantity on the PO is set although we didn't
# receive the product physically in our stock. To avoid counting the
# quantity twice, we do nothing.
pass
elif (
move.location_dest_id.usage == "internal"
and move.to_refund
and move.location_dest_id
not in self.env["stock.location"].search(
[("id", "child_of", move.warehouse_id.view_location_id.id)]
)
):
total -= move.product_uom._compute_quantity(move.product_uom_qty, line.product_uom)
else:
total += move.product_uom._compute_quantity(move.product_uom_qty, line.product_uom)
line._track_qty_received(total)
line.qty_received = total
@api.model_create_multi
def create(self, vals_list):
lines = super(PurchaseOrderLine, self).create(vals_list)
lines.filtered(lambda l: l.order_id.state == 'purchase')._create_or_update_picking()
return lines
def write(self, values):
for line in self.filtered(lambda l: not l.display_type):
# PO date_planned overrides any PO line date_planned values
if values.get('date_planned'):
new_date = fields.Datetime.to_datetime(values['date_planned'])
self._update_move_date_deadline(new_date)
result = super(PurchaseOrderLine, self).write(values)
if 'product_qty' in values:
self.filtered(lambda l: l.order_id.state == 'purchase')._create_or_update_picking()
return result
def unlink(self):
self.move_ids._action_cancel()
ppg_cancel_lines = self.filtered(lambda line: line.propagate_cancel)
ppg_cancel_lines.move_dest_ids._action_cancel()
not_ppg_cancel_lines = self.filtered(lambda line: not line.propagate_cancel)
not_ppg_cancel_lines.move_dest_ids.write({'procure_method': 'make_to_stock'})
not_ppg_cancel_lines.move_dest_ids._recompute_state()
return super().unlink()
# --------------------------------------------------
# Business methods
# --------------------------------------------------
def _update_move_date_deadline(self, new_date):
""" Updates corresponding move picking line deadline dates that are not yet completed. """
moves_to_update = self.move_ids.filtered(lambda | |
('Start','End','Battery Level','Usage in Seconds','Day of the Week','GMT Offset','Entry Creation' )
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = 'KnowledgeC Battery Level'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = 'KnowledgeC Battery Level'
timeline(report_folder, tlactivity, data_list, data_headers)
if version.parse(iOSversion) >= version.parse("11"):
cursor.execute("""
select
datetime(zobject.zstartdate+978307200,'unixepoch'),
datetime(zobject.zenddate+978307200,'unixepoch'),
zstructuredmetadata.z_dkbluetoothmetadatakey__address,
zstructuredmetadata.z_dkbluetoothmetadatakey__name,
(zobject.zenddate - zobject.zstartdate),
(zobject.zenddate - zobject.zstartdate)/60.00,
case zobject.zstartdayofweek
when '1' then 'sunday'
when '2' then 'monday'
when '3' then 'tuesday'
when '4' then 'wednesday'
when '5' then 'thursday'
when '6' then 'friday'
when '7' then 'saturday'
end,
zobject.zsecondsfromgmt/3600,
datetime(zobject.zcreationdate+978307200,'unixepoch')
from
zobject,
zstructuredmetadata
on zobject.zstructuredmetadata = zstructuredmetadata.z_pk
where
zstreamname = '/bluetooth/isConnected'
"""
)
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
data_list = []
for row in all_rows:
data_list.append((row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8]))
description = ''
report = ArtifactHtmlReport('KnowledgeC Bluetooth Connections')
report.start_artifact_report(report_folder, 'Bluetooth Connections', description)
report.add_script()
data_headers = ('Start','End','Bluetooth Address','Bluetooth Name','Usage in Seconds','Usage in Minutes','Day of Week','GMT Offset','Entry Creation')
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = 'KnowledgeC Bluetooth Connections'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = 'KnowledgeC Bluetooth Connections'
timeline(report_folder, tlactivity, data_list, data_headers)
else:
logfunc('No data available in Bluetooth Connections')
if version.parse(iOSversion) >= version.parse("11"):
cursor.execute("""
select
datetime(zobject.zstartdate+978307200,'unixepoch'),
datetime(zobject.zenddate+978307200,'unixepoch'),
case zobject.zvalueinteger
when '0' then 'disconnected'
when '1' then 'connected'
end,
(zobject.zenddate - zobject.zstartdate),
(zobject.zenddate - zobject.zstartdate)/60.00,
case zobject.zstartdayofweek
when '1' then 'sunday'
when '2' then 'monday'
when '3' then 'tuesday'
when '4' then 'wednesday'
when '5' then 'thursday'
when '6' then 'friday'
when '7' then 'saturday'
end,
zobject.zsecondsfromgmt/3600,
datetime(zobject.zcreationdate+978307200,'unixepoch')
from
zobject
where
zstreamname is '/carplay/isConnected'
""")
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
data_list = []
for row in all_rows:
data_list.append((row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7]))
description = ''
report = ArtifactHtmlReport('KnowledgeC Car Play Connections')
report.start_artifact_report(report_folder, 'Car Play Connections', description)
report.add_script()
data_headers = ('Start','End','Car Play Connected','Usage in Seconds','Usage in Minutes','Day of Week','GMT Offset','Entry Creation')
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = 'KnowledgeC Car Play Connections'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = 'KnowledgeC Car Play Connections'
timeline(report_folder, tlactivity, data_list, data_headers)
else:
logfunc('No data available in Car Play Connections')
if version.parse(iOSversion) >= version.parse("13"):
cursor.execute("""
select
datetime(zobject.zstartdate+978307200,'unixepoch'),
datetime(zobject.zenddate+978307200,'unixepoch'),
zsource.zbundleid,
zobject.zvaluestring,
(zobject.zenddate - zobject.zstartdate),
(zobject.zenddate - zobject.zstartdate)/60.00,
case zobject.zstartdayofweek
when '1' then 'sunday'
when '2' then 'monday'
when '3' then 'tuesday'
when '4' then 'wednesday'
when '5' then 'thursday'
when '6' then 'friday'
when '7' then 'saturday'
end,
zobject.zsecondsfromgmt/3600,
datetime(zobject.zcreationdate+978307200,'unixepoch')
from
zobject, zsource
where zobject.zsource = zsource.z_pk
and zstreamname = '/disk/subsystemAccess'
""")
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
data_list = []
for row in all_rows:
data_list.append((row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8]))
description = ''
report = ArtifactHtmlReport('KnowledgeC Disk Subsystem Access')
report.start_artifact_report(report_folder, 'Disk Subsystem Access', description)
report.add_script()
data_headers = ('Start','End','Bundle ID','Value String','Usage in Seconds','Usage in Minutes','Day of Week','GMT Offset','Entry Creation')
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = 'KnowledgeC Disk Subsystem Access'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = 'KnowledgeC Disk Subsystem Access'
timeline(report_folder, tlactivity, data_list, data_headers)
else:
logfunc('No data available in Disk Subsystem Access')
if version.parse(iOSversion) >= version.parse("12"):
cursor.execute("""
select
datetime(zobject.zstartdate+978307200,'unixepoch'),
datetime(zobject.zenddate+978307200,'unixepoch'),
zobject.zvalueinteger,
(zobject.zenddate - zobject.zstartdate),
(zobject.zenddate - zobject.zstartdate)/60.00,
case zobject.zstartdayofweek
when '1' then 'sunday'
when '2' then 'monday'
when '3' then 'tuesday'
when '4' then 'wednesday'
when '5' then 'thursday'
when '6' then 'friday'
when '7' then 'saturday'
end,
zobject.zsecondsfromgmt/3600,
datetime(zobject.zcreationdate+978307200,'unixepoch')
from
zobject
where
zstreamname = '/inferred/motion'
""")
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
data_list = []
for row in all_rows:
data_list.append((row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7]))
description = ''
report = ArtifactHtmlReport('KnowledgeC Do Not Disturb')
report.start_artifact_report(report_folder, 'Do Not Disturb', description)
report.add_script()
data_headers = ('Start','End','Value','Usage in Seconds','Usage in Minutes','Day of Week','GMT Offset','Entry Creation')
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = 'KnowledgeC Do Not Disturb'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = 'KnowledgeC Do Not Disturb'
timeline(report_folder, tlactivity, data_list, data_headers)
else:
logfunc('No data available in Do Not Disturb')
data_list = []
if version.parse(iOSversion) >= version.parse("11"):
extension = ".bplist"
dump = True
# create directories
outpath = report_folder
try:
os.mkdir(os.path.join(report_folder, "clean"))
os.mkdir(os.path.join(report_folder, "dirty"))
except OSError:
logfunc("Error making directories")
file_found = str(files_found[0])
# connect sqlite databases
db = open_sqlite_db_readonly(file_found)
cursor = db.cursor()
# variable initializations
dirtcount = 0
cleancount = 0
intentc = {}
intentv = {}
cursor.execute(
"""
SELECT
Z_PK,
Z_DKINTENTMETADATAKEY__SERIALIZEDINTERACTION,
Z_DKINTENTMETADATAKEY__INTENTCLASS,
Z_DKINTENTMETADATAKEY__INTENTVERB
FROM ZSTRUCTUREDMETADATA
WHERE Z_DKINTENTMETADATAKEY__SERIALIZEDINTERACTION is not null
"""
)
all_rows = cursor.fetchall()
for row in all_rows:
pkv = str(row[0])
pkvplist = pkv + extension
f = row[1]
intentclass = str(row[2])
intententverb = str(row[3])
output_file = open(os.path.join(outpath, "dirty", "D_Z_PK" + pkvplist), "wb") # export dirty from DB
output_file.write(f)
output_file.close()
g = open(os.path.join(outpath, "dirty", "D_Z_PK" + pkvplist), "rb")
plistg = ccl_bplist.load(g)
if version.parse(iOSversion) < version.parse("12"):
ns_keyed_archiver_objg = ccl_bplist.deserialise_NsKeyedArchiver(plistg)
newbytearray = ns_keyed_archiver_objg
else:
ns_keyed_archiver_objg = ccl_bplist.deserialise_NsKeyedArchiver(plistg)
newbytearray = ns_keyed_archiver_objg["NS.data"]
dirtcount = dirtcount + 1
binfile = open(os.path.join(outpath, "clean", "C_Z_PK" + pkvplist), "wb")
binfile.write(newbytearray)
binfile.close()
# add to dictionaries
intentc["C_Z_PK" + pkvplist] = intentclass
intentv["C_Z_PK" + pkvplist] = intententverb
cleancount = cleancount + 1
'''
h = open(outpath + "/StrucMetadata.html", "w")
h.write("<html><body>")
h.write(
"<h2>iOS "
+ iOSversion
+ " - KnowledgeC ZSTRUCTUREDMETADATA bplist report</h2>"
)
h.write(
"<style> table, td {border: 1px solid black; border-collapse: collapse;}tr:nth-child(even) {background-color: #f2f2f2;} .table th { background: #888888; color: #ffffff}.table.sticky th{ position:sticky; top: 0; }</style>"
)
h.write("<br/>")
'''
for filename in glob.glob(outpath + "/clean/*" + extension):
p = open(filename, "rb")
cfilename = os.path.basename(filename)
plist = ccl_bplist.load(p)
ns_keyed_archiver_obj = ccl_bplist.deserialise_NsKeyedArchiver(
plist, parse_whole_structure=True
) # deserialize clean
# Get dictionary values
A = intentc.get(cfilename)
B = intentv.get(cfilename)
if A is None:
A = "No value"
if B is None:
A = "No value"
# logfunc some values from clean bplist
if version.parse(iOSversion) >= version.parse("13"):
try:
NSdata = ns_keyed_archiver_obj["root"]["intent"]["backingStore"][
"bytes"
]
except:
NSdata = ns_keyed_archiver_obj["root"]["intent"]["backingStore"][
"data"
]["NS.data"]
pass
else:
NSdata = ns_keyed_archiver_obj["root"]["intent"]["backingStore"][
"data"
]["NS.data"]
# logfunc(str(NSdata))
parsedNSData = ""
# Default true
if dump == True:
nsdata_file = os.path.join(outpath, "clean", cfilename + "_nsdata.bin")
binfile = open(nsdata_file, "wb")
if version.parse(iOSversion) >= version.parse("13"):
try:
binfile.write(
ns_keyed_archiver_obj["root"]["intent"]["backingStore"][
"bytes"
]
)
except:
binfile.write(
ns_keyed_archiver_obj["root"]["intent"]["backingStore"][
"data"
]["NS.data"]
)
pass
else:
binfile.write(
ns_keyed_archiver_obj["root"]["intent"]["backingStore"]["data"][
"NS.data"
]
)
binfile.close()
messages = ParseProto(nsdata_file)
messages_json_dump = json.dumps(
messages, indent=4, sort_keys=True, ensure_ascii=False
)
parsedNSData = str(messages_json_dump).encode(
encoding="UTF-8", errors="ignore"
)
NSstartDate = ccl_bplist.convert_NSDate(
(ns_keyed_archiver_obj["root"]["dateInterval"]["NS.startDate"])
)
NSendDate = ccl_bplist.convert_NSDate(
(ns_keyed_archiver_obj["root"]["dateInterval"]["NS.endDate"])
)
NSduration = ns_keyed_archiver_obj["root"]["dateInterval"]["NS.duration"]
Siri = ns_keyed_archiver_obj["root"]["_donatedBySiri"]
if parsedNSData:
parsedf = str(parsedNSData).replace("\\n", "<br>")
else:
parsedf = str(NSdata).replace("\\n", "<br>")
data_list.append(( str(NSstartDate),str(A), str(B), str(Siri), str(NSendDate), str(NSduration), parsedf, (textwrap.fill(str(NSdata), width=50)), cfilename))
logfunc("iOS - KnowledgeC ZSTRUCTUREDMETADATA bplist extractor")
logfunc("By: @phillmoore & @AlexisBrignoni")
logfunc("thinkdfir.com & abrignoni.com")
logfunc("")
logfunc("Bplists from the Z_DKINTENTMETADATAKEY__SERIALIZEDINTERACTION field.")
logfunc("Exported bplists (dirty): " + str(dirtcount))
logfunc("Exported bplists (clean): " + str(cleancount))
logfunc("")
logfunc("Incepted bplist extractions in KnowledgeC.db completed")
description = ''
report = ArtifactHtmlReport('KnowledgeC Intents')
report.start_artifact_report(report_folder, 'Intents', description)
report.add_script()
data_headers = ('NS Start Date','Intent Class','Intent Verb','Siri?','NS Send Date','NS Duration','NS Data Protobuf', 'NS Data', 'Traceback' )
report.write_artifact_data_table(data_headers, data_list, file_found, html_escape=False)
report.end_artifact_report()
tlactivity = 'KnowledgeC Intents'
timeline(report_folder, tlactivity, data_list, data_headers)
if version.parse(iOSversion) >= version.parse("9"):
cursor = db.cursor()
cursor.execute('''
select
datetime(zobject.zstartdate+978307200,'unixepoch'),
datetime(zobject.zenddate+978307200,'unixepoch'),
zobject.zvaluestring,
(zobject.zenddate-zobject.zstartdate),
(zobject.zenddate-zobject.zstartdate)/60.00,
case zobject.zstartdayofweek
when '1' then 'sunday'
when '2' then 'monday'
when '3' then 'tuesday'
when '4' then 'wednesday'
when '5' then 'thursday'
when '6' then 'friday'
when '7' then 'saturday'
end,
zobject.zsecondsfromgmt/3600,
datetime(zobject.zcreationdate+978307200,'unixepoch')
from zobject
where zstreamname is '/app/inFocus'
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
data_list = []
if version.parse(iOSversion) >= version.parse("9"):
for row in all_rows:
data_list.append((row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7]))
report = ArtifactHtmlReport('KnowledgeC Application In Focus')
report.start_artifact_report(report_folder, 'App In Focus')
report.add_script()
data_headers = ('Start','End','Bundle ID', 'Usage in Seconds', 'Usage in Minutes','Day of Week','GMT Offset','Entry Creation')
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = 'KnowledgeC Application in Focus'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = 'KnowledgeC Application in Focus'
timeline(report_folder, tlactivity, data_list, data_headers)
else:
logfunc('No data available in Application in Focus')
if version.parse(iOSversion) >= version.parse("12"):
cursor.execute('''
select
datetime(zobject.zstartdate+978307200,'unixepoch'),
datetime(zobject.zenddate+978307200,'unixepoch'),
zobject.zvaluestring,
zstructuredmetadata .z_dkappinstallmetadatakey__primarycategory,
zstructuredmetadata .z_dkappinstallmetadatakey__title,
case zobject.zstartdayofweek
when '1' then 'sunday'
when '2' then 'monday'
when '3' then 'tuesday'
when '4' then 'wednesday'
when '5' then 'thursday'
when '6' then 'friday'
when '7' then 'saturday'
end,
zobject.zsecondsfromgmt/3600,
datetime(zobject.zcreationdate+978307200,'unixepoch')
from
zobject, zstructuredmetadata
where zobject.zstructuredmetadata = zstructuredmetadata.z_pk
and zstreamname = '/app/install'
''')
else:
cursor.execute('''
select
datetime(zobject.zstartdate+978307200,'unixepoch'),
datetime(zobject.zenddate+978307200,'unixepoch'),
zobject.zvaluestring,
case zobject.zstartdayofweek
when '1' then 'sunday'
when '2' then 'monday'
when '3' then 'tuesday'
when '4' then 'wednesday'
when '5' then 'thursday'
when '6' then 'friday'
when '7' then 'saturday'
end,
zobject.zsecondsfromgmt/3600,
datetime(zobject.zcreationdate+978307200,'unixepoch')
from
zobject, zstructuredmetadata
where zobject.zstructuredmetadata = zstructuredmetadata.z_pk
and zstreamname = "/app/install"
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
data_list = []
if version.parse(iOSversion) >= version.parse("12"):
for row in all_rows:
data_list.append((row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7]))
report = ArtifactHtmlReport('KnowledgeC Installed Apps')
report.start_artifact_report(report_folder, 'Installed Apps')
report.add_script()
data_headers = ('Start','End','Bundle ID','App Category', 'App Name','Day of Week','GMT Offset','Entry Creation')
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = 'KnowledgeC Installed Apps'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = 'KnowledgeC Installed Apps'
timeline(report_folder, tlactivity, data_list, data_headers)
else:
for row in all_rows:
data_list.append((row[0],row[1],row[2],row[3],row[4],row[5]))
report = ArtifactHtmlReport('KnowledgeC Installed Apps')
report.start_artifact_report(report_folder, 'Installed Apps')
report.add_script()
data_headers = ('Start','End','Bundle ID','Day of Week','GMT Offset','Entry Creation' )
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = 'KnowledgeC Installed Apps'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = 'KnowledgeC Installed Apps'
timeline(report_folder, tlactivity, data_list, data_headers)
else:
logfunc('No data available in Installed Apps')
if version.parse(iOSversion) >= version.parse("12"):
cursor.execute("""
select
datetime(zobject.zstartdate+978307200,'unixepoch'),
datetime(zobject.zenddate+978307200,'unixepoch'),
zobject.zvaluestring,
zstructuredmetadata.z_dklocationapplicationactivitymetadatakey__locationname,
zstructuredmetadata.z_dklocationapplicationactivitymetadatakey__displayname,
zstructuredmetadata.z_dklocationapplicationactivitymetadatakey__fullyformattedaddress,
zstructuredmetadata.z_dklocationapplicationactivitymetadatakey__city,
zstructuredmetadata.z_dklocationapplicationactivitymetadatakey__stateorprovince,
zstructuredmetadata.z_dklocationapplicationactivitymetadatakey__country,
zstructuredmetadata.z_dklocationapplicationactivitymetadatakey__postalcode_v2,
zstructuredmetadata.z_dklocationapplicationactivitymetadatakey__subthoroughfare,
zstructuredmetadata.z_dklocationapplicationactivitymetadatakey__thoroughfare,
zstructuredmetadata.z_dklocationapplicationactivitymetadatakey__phonenumbers,
zstructuredmetadata.z_dklocationapplicationactivitymetadatakey__url,
zstructuredmetadata.z_dkapplicationactivitymetadatakey__activitytype,
zstructuredmetadata.z_dkapplicationactivitymetadatakey__contentdescription,
zstructuredmetadata.z_dkapplicationactivitymetadatakey__useractivityrequiredstring,
zstructuredmetadata.z_dkapplicationactivitymetadatakey__itemrelatedcontenturl,
zstructuredmetadata.z_dkapplicationactivitymetadatakey__itemrelateduniqueidentifier,
zstructuredmetadata.z_dklocationapplicationactivitymetadatakey__latitude,
zstructuredmetadata.z_dklocationapplicationactivitymetadatakey__longitude,
zsource.zsourceid,
zstructuredmetadata.z_dkapplicationactivitymetadatakey__useractivityuuid,
zsource.zitemid,
zsource.zsourceid,
case zobject.zstartdayofweek
when '1' then 'sunday'
when '2' then 'monday'
when '3' then 'tuesday'
when '4' then 'wednesday'
when '5' then 'thursday'
when '6' then 'friday'
when '7' then 'saturday'
end,
zobject.zsecondsfromgmt/3600,
datetime(zobject.zcreationdate+978307200,'unixepoch')
from
zobject, zstructuredmetadata, zsource
where zobject.zstructuredmetadata = zstructuredmetadata.z_pk
and zobject.zsource = zsource.z_pk
and zstreamname = '/app/locationActivity'
"""
)
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
data_list = []
for row in all_rows:
data_list.append((row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10], row[11], row[12], row[13], row[14], row[15], row[16], row[17], row[18], row[19], row[20], row[21], row[22], row[23], row[24], row[25], row[26], row[27]))
description = ''
report = ArtifactHtmlReport('KnowledgeC Location Activity')
report.start_artifact_report(report_folder, 'Location Activity', description)
report.add_script()
data_headers = ('Timestamp','End','Bundle ID','Name','Display Name','Formatted Address', 'City','State/Province','Country','Postal Code','Subthoroughfare','Thoroughfare','Phone Numebers','URL','Activity Type', 'Content Description','User Activity Required String','Content URL','Unique ID','Latitude','Longitude','Source ID','Activity UUID','Item ID','Source ID','Day of the Week','GMT Offset','Entry Creation')
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = 'KnowledgeC Location Activity'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = 'KnowledgeC Location Activity'
timeline(report_folder, tlactivity, data_list, data_headers)
kmlactivity = 'KnowledgeC Location Activity'
kmlgen(report_folder, kmlactivity, data_list, data_headers)
else:
logfunc('No data available in Location Activity')
if version.parse(iOSversion) >= version.parse("11"):
cursor.execute("""
select
datetime(zobject.zstartdate+978307200,'unixepoch'),
datetime(zobject.zenddate+978307200,'unixepoch'),
case zobject.zvalueinteger
when '0' then 'unlocked'
when '1' then 'locked'
end,
(zobject.zenddate - zobject.zstartdate),
case zobject.zstartdayofweek
when '1' then 'sunday'
when '2' then 'monday'
when '3' then 'tuesday'
when '4' then 'wednesday'
when '5' then 'thursday'
when '6' then 'friday'
when '7' then 'saturday'
end,
zobject.zsecondsfromgmt/3600,
datetime(zobject.zcreationdate+978307200,'unixepoch')
from
zobject
where zstreamname = '/device/isLocked'
""")
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
data_list = []
for row in all_rows:
data_list.append((row[0], row[1], row[2], row[3], row[4], row[5], row[6]))
description = ''
report = ArtifactHtmlReport('KnowledgeC Device Locked')
report.start_artifact_report(report_folder, 'Device Locked', description)
report.add_script()
data_headers = ('Start','End','Is Locked?','Usage in Seconds','Day of the Week','GMT Offset','Entry Creation' )
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = 'KnowledgeC Device Locked'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = 'KnowledgeC Device Locked'
timeline(report_folder, tlactivity, data_list, data_headers)
else:
logfunc('No data in KnowledgeC Device Locked')
if version.parse(iOSversion) >= version.parse("11"):
cursor.execute("""
select
datetime(zobject.zstartdate+978307200,'unixepoch'),
datetime(zobject.zenddate+978307200,'unixepoch'),
zobject.zvaluestring,
zstructuredmetadata.z_dknowplayingmetadatakey__album,
zstructuredmetadata.z_dknowplayingmetadatakey__artist,
zstructuredmetadata.z_dknowplayingmetadatakey__genre,
zstructuredmetadata.z_dknowplayingmetadatakey__title,
zstructuredmetadata.z_dknowplayingmetadatakey__duration,
(zobject.zenddate - zobject.zstartdate),
(zobject.zenddate - zobject.zstartdate)/60.00,
case zobject.zstartdayofweek
when '1' | |
<reponame>jefftc/changlab
"""
Glossary:
data A unit of information. Typically a file made by a program.
attribute Describes something about the data. e.g. logged="yes".
input attribute An attribute of an input data.
output attribute An attribute of an output data.
user attribute An attribute provided by the user that can specify
preferences
data type Describes a set of data that share the same attributes.
module Takes one or more data objects as input and produces a single
data object as output.
Modules "produce" data.
Modules can "convert" data from one type to another.
option Used in the modules, but does not affect the inferencing.
Classes:
AttributeDef Describes something about the Data.
Attribute
OptionDef Something that modifies how a Module works.
Option
DataType
DataNode
Constraint
Consequence
DefaultAttributesFrom
ModuleNode
ModuleDbSummary
Network
Functions:
make_network
find_paths
find_paths_by_start_ids
get_input_nodes Show all possible input nodes for the network.
get_input_datatypes Show all combinations of datatype that can be inputs.
group_nodes_by_datatype
summarize_moduledb
check_moduledb
print_modules
print_network
plot_network_gv
read_network
write_network
debug_print
"""
# Functions:
# _init_network
# _split_network
# _complete_network
#
# InData(s) -> Module -> OutData
# _bc_to_modules Module <- OutData
# _bc_to_inputs InDatas <- Module <- OutData INFERENCE
# _bc_to_one_input InDatas[i] <- Module <- OutData INFERENCE XXX
# DO NOT CALL. Helper for _bc_to_inputs.
# _bc_to_input_ids InDatas IDs <- Module ID <- OutData ID
# _fc_to_outputs InDatas -> Module -> OutData INFERENCE
# _fc_to_output_ids InDatas IDs -> Module ID -> OutData ID
# _resolve_constraint
#
# _is_valid_inputs InDatas (?) -> Module -> OutData
# _is_valid_input_i InData_i (?) -> Module -> OutData
# _is_valid_inputs_net InDatas (?) -> Module ID -> OutData (in network)
# _is_valid_input_i_net InData_i (?) -> Module ID -> OutData (in network)
# _is_valid_input_ids InData IDs (?) -> Module ID -> OutData ID
# _is_valid_output Module -> OutData (?) INFERENCE
# _is_valid_output_id_path path -> OutData ID (?)
#
# _find_same_data Find a DataNode that is an exact match.
# _find_compat_data Find a DataNode that is compatible.
# WAS _find_start_node
# _score_same_data
# _score_compat_data WAS _score_start_nodes.
# _is_data_same
# _is_data_compatible
# _is_attribute_same
# _is_attribute_compatible
# _merge_data_nodes
# _merge_attribute_values
#
# _get_attribute_type
# _assign_case_by_type
#
# _get_parents_of WAS _backchain_to_ids
# _get_children_of
# _make_parents_dict WAS _make_backchain_dict
# _make_ancestor_dict
# _make_descendent_dict
# _can_reach_by_bc
# _can_reach_by_fc
#
# _iter_upper_diag
# _intersect
# _is_subset
# _flatten
# _uniq
#
# _print_nothing
# _print_string
# _print_line
# _pretty_attributes
#
# _fix_node_id_pairs_after_merge
# _fix_node_id_dict_after_merge
# _product_network
# _product_and_chain
#
# _object_to_dict Use for writing and reading json file
# _dict_to_object Use for writing and reading json file
# These are also used in cbie3module.c. If these values change, you
# need to change that file too!
TYPE_ATOM = 100
TYPE_ENUM = 101
# Constraints
MUST_BE = 200
CAN_BE_ANY_OF = 201
SAME_AS = 202
# Consequences
SET_TO = 300
SET_TO_ONE_OF = 301
BASED_ON_DATA = 302
SAME_AS_CONSTRAINT = 303
# CONSTRAINT
# behavior arg1 input_index
# MUST_BE value <optional> (set to 0 by default)
# CAN_BE_ANY_OF list of values <optional> (set to 0 by default)
# SAME_AS index of datatype index of datatype
#
# input_index is the index of the DataType that this constraint
# applies to. So for SAME_AS, that means data[input_index] should get
# its value from data[arg1].
# CONSEQUENCE
# behavior arg1 arg2
# SET_TO <string> None
# SET_TO_ONE_OF <list> None
# BASED_ON_DATA <list> None
# SAME_AS_CONSTRAINT index of input data None
# o arg2 is not used? Can get rid of it.
CONST2STR = {
TYPE_ATOM: "TYPE_ATOM",
TYPE_ENUM: "TYPE_ENUM",
MUST_BE: "MUST_BE",
CAN_BE_ANY_OF: "CAN_BE_ANY_OF",
SAME_AS: "SAME_AS",
SET_TO: "SET_TO",
SET_TO_ONE_OF: "SET_TO_ONE_OF",
BASED_ON_DATA: "BASED_ON_DATA",
SAME_AS_CONSTRAINT: "SAME_AS_CONSTRAINT",
}
DEBUG = False
#DEBUG = True
# When backchaining, should we allow the attributes of the input data
# to be all possible values, or fix it to the default? All possible
# values is correct, but generates a combinatorial explosion that is
# difficult to manage.
DEFAULT_INPUT_ATTRIBUTE_IS_ALL_VALUES = False
#DEFAULT_INPUT_ATTRIBUTE_IS_ALL_VALUES = True
MAX_NETWORK_SIZE = 1024 * 8
class AttributeDef:
def __init__(self, name, values, default_in, default_out, help=None):
# Make sure name and values are valid.
assert type(name) is type("")
assert type(values) is type([]), "Value must be list: %s" % \
type(values)
for x in values:
assert type(x) is type("")
# Make sure no duplicated values.
seen = {}
for x in values:
assert x not in seen, "Duplicated value (%s) in %s." % (x, name)
seen[x] = 1
# Make sure default_in and default_out are valid values.
assert type(default_in) is type(""), "default_in must be ATOM"
assert type(default_out) is type(""), "default_out must be ATOM"
assert default_in in values, \
"Invalid value %r for attribute %r." % (default_in, name)
assert default_out in values, \
"Invalid value %r for attribute %r." % (default_out, name)
self.name = name
self.values = values
self.default_in = default_in
self.default_out = default_out
self.help = help
def is_valid_value(self, value):
if type(value) is type(""):
return value in self.values
elif type(value) is type([]):
return _is_subset(value, self.values)
raise AssertionError
def __cmp__(self, other):
if not isinstance(other, AttributeDef):
return cmp(id(self), id(other))
x1 = [self.name, self.values, self.default_in, self.default_out,
self.help]
x2 = [other.name, other.values, other.default_in, other.default_out,
self.help]
return cmp(x1, x2)
def __hash__(self):
x = self.name, tuple(self.values), self.default_in, self.default_out, \
self.help
return hash(x)
def __str__(self):
return self.__repr__()
def __repr__(self):
x = [repr(self.name),
repr(self.values),
repr(self.default_in),
repr(self.default_out), ]
if self.help is not None:
x.append("help=%r" % self.help)
x = "%s(%s)" % (self.__class__.__name__, ", ".join(x))
return x
@staticmethod
def __init_from_dict(args):
#inst = AttributeDef(**args)
inst = AttributeDef(
args["name"], args["values"], args["default_in"],
args["default_out"], help=args.get("help"))
return inst
class Attribute:
def __init__(self, datatype, name, value):
assert isinstance(datatype, DataType)
assert type(name) is type("")
assert type(value) is type("")
# Check if this is a valid attribute name for the datatype.
x = [x for x in datatype.attribute_defs if x == name]
#x = [x for x in datatype.attribute_defs if x.name == name]
assert len(x) == 1, "datatype %r does not have attribute %r." % (
datatype.name, name)
#attr = x[0]
assert datatype.is_valid_attribute_value(name, value), \
"Invalid value %r for attribute %r." % (value, name)
self.datatype = datatype
self.name = name
self.value = value
def __str__(self):
return self.__repr__()
def __repr__(self):
x = [self.datatype.name, repr(self.name), repr(self.value)]
x = "%s(%s)" % (self.__class__.__name__, ", ".join(x))
return x
class OptionDef:
def __init__(self, name, default=None, help=None):
assert type(name) is type("")
self.name = name
self.default = default
self.help = help
def __cmp__(self, other):
if not isinstance(other, OptionDef):
return cmp(id(self), id(other))
x1 = [self.name, self.default, self.help]
x2 = [other.name, other.default, self.help]
return cmp(x1, x2)
def __hash__(self):
x = self.name, self.default, self.help
return hash(x)
def __str__(self):
return self.__repr__()
def __repr__(self):
x = [repr(self.name), ]
if self.default is not None:
x.append("default=%r" % self.default)
if self.help is not None:
x.append("help=%r" % self.help)
x = "%s(%s)" % (self.__class__.__name__, ", ".join(x))
return x
@staticmethod
def __init_from_dict(args):
assert 'name' in args
assert 'default' in args
assert 'help' in args
inst = OptionDef(
args['name'], default=args['default'], help=args['help'])
return inst
class Option:
def __init__(self, module, name, value):
assert isinstance(module, ModuleNode)
assert type(name) is type("")
assert type(value) is type("")
self.module = module
self.name = name
self.value = value
class Constraint(object):
def __init__(self, name, behavior, arg1=None, input_index=None):
if behavior == MUST_BE:
# name Name of attribute.
# arg1 Value of the attribute.
assert type(arg1) is type("")
elif behavior == CAN_BE_ANY_OF:
# name Name of attribute.
# arg1 List of values of the attribute.
assert type(arg1) in [type([]), type(())]
for x in arg1:
assert type(x) is type("")
elif behavior == SAME_AS:
# name Name of attribute.
# arg1 Index of the datatype that this must match.
assert type(arg1) is type(0), (
"arg1 should be the index of the datatype with the "
"same attribute")
assert input_index is not None, (
"input_index must be given for SAME_AS constraint"
)
assert type(input_index) is type(0)
if input_index is not None:
assert arg1 != input_index
else:
raise AssertionError, "Invalid behavior (%s) for constraint %s." %\
(behavior, name)
assert input_index is None or type(input_index) is type(0)
if behavior == CAN_BE_ANY_OF and len(arg1) == 1:
behavior = MUST_BE
arg1 = arg1[0]
self.name = name
self.behavior = behavior
self.arg1 = arg1
self.input_index = input_index or 0
def __cmp__(self, other):
if not isinstance(other, Constraint):
return cmp(id(self), id(other))
x1 = [self.name, self.behavior, self.arg1, self.input_index]
x2 = [other.name, other.behavior, other.arg1, other.input_index]
return cmp(x1, x2)
def __str__(self):
return self.__repr__()
def __repr__(self):
x = [repr(self.name), CONST2STR[self.behavior], ]
if self.arg1 is not None:
x.append(repr(self.arg1))
if self.input_index is not None:
x = x + ["input_index=%s" % self.input_index]
x = "%s(%s)" % (self.__class__.__name__, ", ".join(x))
return x
@staticmethod
def __init_from_dict(args):
assert 'name' in args
assert 'behavior' in args
assert 'arg1' in args
assert 'input_index' in args
inst = Constraint(
args['name'], args['behavior'], arg1=args['arg1'],
input_index=args['input_index'])
return inst
class Consequence(object):
def __init__(self, name, behavior,
arg1=None,
arg2=None,
side_effect=False):
if behavior == SET_TO:
assert type(arg1) is type("")
assert arg2 is None
elif behavior in [SET_TO_ONE_OF, BASED_ON_DATA]:
assert type(arg1) in [type([]), type(())], "arg | |
to `values`' first dimension.
num_groups: A `Tensor`.
name: (string, optional) A name for the operation.
Returns:
A `Tensor` of the same type as `values`.
"""
with tf.name_scope(name):
return _unsorted_segment_reduction_or_zero(
tf.math.unsorted_segment_min, values, indices, num_groups)
def unsorted_segment_max_or_zero(values, indices, num_groups,
name="unsorted_segment_max_or_zero"):
"""Aggregates information using elementwise max.
Segments with no elements are given a "max" of zero instead of the most
negative finite value possible (which is what `tf.math.unsorted_segment_max`
would do).
Args:
values: A `Tensor` of per-element features.
indices: A 1-D `Tensor` whose length is equal to `values`' first dimension.
num_groups: A `Tensor`.
name: (string, optional) A name for the operation.
Returns:
A `Tensor` of the same type as `values`.
"""
with tf.name_scope(name):
return _unsorted_segment_reduction_or_zero(
tf.math.unsorted_segment_max, values, indices, num_groups)
class EdgeBlock(snt.AbstractModule):
"""Edge block.
A block that updates the features of each edge in a batch of graphs based on
(a subset of) the previous edge features, the features of the adjacent nodes,
and the global features of the corresponding graph.
See https://arxiv.org/abs/1806.01261 for more details.
"""
def __init__(self,
edge_model_fn,
use_edges=True,
use_receiver_nodes=True,
use_sender_nodes=True,
use_globals=True,
name="edge_block"):
"""Initializes the EdgeBlock module.
Args:
edge_model_fn: A callable that will be called in the variable scope of
this EdgeBlock and should return a Sonnet module (or equivalent
callable) to be used as the edge model. The returned module should take
a `Tensor` (of concatenated input features for each edge) and return a
`Tensor` (of output features for each edge). Typically, this module
would input and output `Tensor`s of rank 2, but it may also be input or
output larger ranks. See the `_build` method documentation for more
details on the acceptable inputs to this module in that case.
use_edges: (bool, default=True). Whether to condition on edge attributes.
use_receiver_nodes: (bool, default=True). Whether to condition on receiver
node attributes.
use_sender_nodes: (bool, default=True). Whether to condition on sender
node attributes.
use_globals: (bool, default=True). Whether to condition on global
attributes.
name: The module name.
Raises:
ValueError: When fields that are required are missing.
"""
super(EdgeBlock, self).__init__(name=name)
if not (use_edges or use_sender_nodes or use_receiver_nodes or use_globals):
raise ValueError("At least one of use_edges, use_sender_nodes, "
"use_receiver_nodes or use_globals must be True.")
self._use_edges = use_edges
self._use_receiver_nodes = use_receiver_nodes
self._use_sender_nodes = use_sender_nodes
self._use_globals = use_globals
with self._enter_variable_scope():
self._edge_model = edge_model_fn()
def _build(self, graph):
"""Connects the edge block.
Args:
graph: A `graphs.GraphsTuple` containing `Tensor`s, whose individual edges
features (if `use_edges` is `True`), individual nodes features (if
`use_receiver_nodes` or `use_sender_nodes` is `True`) and per graph
globals (if `use_globals` is `True`) should be concatenable on the last
axis.
Returns:
An output `graphs.GraphsTuple` with updated edges.
Raises:
ValueError: If `graph` does not have non-`None` receivers and senders, or
if `graph` has `None` fields incompatible with the selected `use_edges`,
`use_receiver_nodes`, `use_sender_nodes`, or `use_globals` options.
"""
_validate_graph(
graph, (SENDERS, RECEIVERS, N_EDGE), " when using an EdgeBlock")
edges_to_collect = []
if self._use_edges:
_validate_graph(graph, (EDGES,), "when use_edges == True")
edges_to_collect.append(graph.edges)
if self._use_receiver_nodes:
edges_to_collect.append(broadcast_receiver_nodes_to_edges(graph))
if self._use_sender_nodes:
edges_to_collect.append(broadcast_sender_nodes_to_edges(graph))
if self._use_globals:
edges_to_collect.append(broadcast_globals_to_edges(graph))
collected_edges = tf.concat(edges_to_collect, axis=-1)
updated_edges = self._edge_model(collected_edges)
return graph.replace(edges=updated_edges)
class NodeBlock(snt.AbstractModule):
"""Node block.
A block that updates the features of each node in batch of graphs based on
(a subset of) the previous node features, the aggregated features of the
adjacent edges, and the global features of the corresponding graph.
See https://arxiv.org/abs/1806.01261 for more details.
"""
def __init__(self,
node_model_fn,
use_received_edges=True,
use_sent_edges=False,
use_nodes=True,
use_globals=True,
received_edges_reducer=tf.math.unsorted_segment_sum,
sent_edges_reducer=tf.math.unsorted_segment_sum,
name="node_block"):
"""Initializes the NodeBlock module.
Args:
node_model_fn: A callable that will be called in the variable scope of
this NodeBlock and should return a Sonnet module (or equivalent
callable) to be used as the node model. The returned module should take
a `Tensor` (of concatenated input features for each node) and return a
`Tensor` (of output features for each node). Typically, this module
would input and output `Tensor`s of rank 2, but it may also be input or
output larger ranks. See the `_build` method documentation for more
details on the acceptable inputs to this module in that case.
use_received_edges: (bool, default=True) Whether to condition on
aggregated edges received by each node.
use_sent_edges: (bool, default=False) Whether to condition on aggregated
edges sent by each node.
use_nodes: (bool, default=True) Whether to condition on node attributes.
use_globals: (bool, default=True) Whether to condition on global
attributes.
received_edges_reducer: Reduction to be used when aggregating received
edges. This should be a callable whose signature matches
`tf.math.unsorted_segment_sum`.
sent_edges_reducer: Reduction to be used when aggregating sent edges.
This should be a callable whose signature matches
`tf.math.unsorted_segment_sum`.
name: The module name.
Raises:
ValueError: When fields that are required are missing.
"""
super(NodeBlock, self).__init__(name=name)
if not (use_nodes or use_sent_edges or use_received_edges or use_globals):
raise ValueError("At least one of use_received_edges, use_sent_edges, "
"use_nodes or use_globals must be True.")
self._use_received_edges = use_received_edges
self._use_sent_edges = use_sent_edges
self._use_nodes = use_nodes
self._use_globals = use_globals
with self._enter_variable_scope():
self._node_model = node_model_fn()
if self._use_received_edges:
if received_edges_reducer is None:
raise ValueError(
"If `use_received_edges==True`, `received_edges_reducer` "
"should not be None.")
self._received_edges_aggregator = ReceivedEdgesToNodesAggregator(
received_edges_reducer)
if self._use_sent_edges:
if sent_edges_reducer is None:
raise ValueError(
"If `use_sent_edges==True`, `sent_edges_reducer` "
"should not be None.")
self._sent_edges_aggregator = SentEdgesToNodesAggregator(
sent_edges_reducer)
def _build(self, graph):
"""Connects the node block.
Args:
graph: A `graphs.GraphsTuple` containing `Tensor`s, whose individual edges
features (if `use_received_edges` or `use_sent_edges` is `True`),
individual nodes features (if `use_nodes` is True) and per graph globals
(if `use_globals` is `True`) should be concatenable on the last axis.
Returns:
An output `graphs.GraphsTuple` with updated nodes.
"""
nodes_to_collect = []
if self._use_received_edges:
nodes_to_collect.append(self._received_edges_aggregator(graph))
if self._use_sent_edges:
nodes_to_collect.append(self._sent_edges_aggregator(graph))
if self._use_nodes:
_validate_graph(graph, (NODES,), "when use_nodes == True")
nodes_to_collect.append(graph.nodes)
if self._use_globals:
nodes_to_collect.append(broadcast_globals_to_nodes(graph))
collected_nodes = tf.concat(nodes_to_collect, axis=-1)
updated_nodes = self._node_model(collected_nodes)
return graph.replace(nodes=updated_nodes)
class GlobalBlock(snt.AbstractModule):
"""Global block.
A block that updates the global features of each graph in a batch based on
(a subset of) the previous global features, the aggregated features of the
edges of the graph, and the aggregated features of the nodes of the graph.
See https://arxiv.org/abs/1806.01261 for more details.
"""
def __init__(self,
global_model_fn,
use_edges=True,
use_nodes=True,
use_globals=True,
nodes_reducer=tf.math.unsorted_segment_sum,
edges_reducer=tf.math.unsorted_segment_sum,
name="global_block"):
"""Initializes the GlobalBlock module.
Args:
global_model_fn: A callable that will be called in the variable scope of
this GlobalBlock and should return a Sonnet module (or equivalent
callable) to be used as the global model. The returned module should
take a `Tensor` (of concatenated input features) and return a `Tensor`
(the global output features). Typically, this module would input and
output `Tensor`s of rank 2, but it may also input or output larger
ranks. See the `_build` method documentation for more details on the
acceptable inputs to this module in that case.
use_edges: (bool, default=True) Whether to condition on aggregated edges.
use_nodes: (bool, default=True) Whether to condition on node attributes.
use_globals: (bool, default=True) Whether to condition on global
attributes.
nodes_reducer: Reduction to be used when aggregating nodes. This should
be a callable whose signature matches tf.math.unsorted_segment_sum.
edges_reducer: Reduction to be used when aggregating edges. This should
be a callable whose signature matches tf.math.unsorted_segment_sum.
name: The module name.
Raises:
ValueError: When fields that are required are missing.
"""
super(GlobalBlock, self).__init__(name=name)
if not (use_nodes or use_edges or use_globals):
raise ValueError("At least one of use_edges, "
"use_nodes or use_globals must be True.")
self._use_edges = use_edges
self._use_nodes = use_nodes
self._use_globals = use_globals
with self._enter_variable_scope():
self._global_model = global_model_fn()
if self._use_edges:
if edges_reducer is None:
raise ValueError(
"If `use_edges==True`, `edges_reducer` should not be None.")
self._edges_aggregator = EdgesToGlobalsAggregator(
edges_reducer)
if self._use_nodes:
if nodes_reducer is None:
raise ValueError(
"If `use_nodes==True`, `nodes_reducer` should not be None.")
self._nodes_aggregator = NodesToGlobalsAggregator(
nodes_reducer)
def _build(self, graph):
"""Connects the global block.
Args:
graph: A `graphs.GraphsTuple` containing `Tensor`s, whose individual edges
(if `use_edges` is `True`), individual nodes (if `use_nodes` is True)
and per graph globals (if `use_globals` is `True`) should be
concatenable on the last axis.
Returns:
An output `graphs.GraphsTuple` with updated globals.
"""
globals_to_collect = []
if self._use_edges:
_validate_graph(graph, (EDGES,), "when use_edges == True")
globals_to_collect.append(self._edges_aggregator(graph))
if self._use_nodes:
_validate_graph(graph, (NODES,), "when use_nodes == True")
globals_to_collect.append(self._nodes_aggregator(graph))
if self._use_globals:
_validate_graph(graph, (GLOBALS,), "when use_globals == True")
globals_to_collect.append(graph.globals)
collected_globals = tf.concat(globals_to_collect, axis=-1)
updated_globals = self._global_model(collected_globals)
return | |
"""
Optimal binning algorithm for continuous target.
"""
# <NAME> <<EMAIL>>
# Copyright (C) 2019
import numbers
import time
from sklearn.utils import check_array
import numpy as np
from ..information import solver_statistics
from ..logging import Logger
from .auto_monotonic import auto_monotonic_continuous
from .auto_monotonic import peak_valley_trend_change_heuristic
from .binning import OptimalBinning
from .binning_statistics import continuous_bin_info
from .binning_statistics import ContinuousBinningTable
from .binning_statistics import target_info_special_continuous
from .continuous_cp import ContinuousBinningCP
from .preprocessing import preprocessing_user_splits_categorical
from .preprocessing import split_data
from .transformations import transform_continuous_target
logger = Logger(__name__).logger
def _check_parameters(name, dtype, prebinning_method, max_n_prebins,
min_prebin_size, min_n_bins, max_n_bins, min_bin_size,
max_bin_size, monotonic_trend, min_mean_diff, max_pvalue,
max_pvalue_policy, outlier_detector, outlier_params,
cat_cutoff, user_splits, user_splits_fixed,
special_codes, split_digits, time_limit, verbose):
if not isinstance(name, str):
raise TypeError("name must be a string.")
if dtype not in ("categorical", "numerical"):
raise ValueError('Invalid value for dtype. Allowed string '
'values are "categorical" and "numerical".')
if prebinning_method not in ("cart", "quantile", "uniform"):
raise ValueError('Invalid value for prebinning_method. Allowed string '
'values are "cart", "quantile" and "uniform".')
if not isinstance(max_n_prebins, numbers.Integral) or max_n_prebins <= 1:
raise ValueError("max_prebins must be an integer greater than 1; "
"got {}.".format(max_n_prebins))
if not 0. < min_prebin_size <= 0.5:
raise ValueError("min_prebin_size must be in (0, 0.5]; got {}."
.format(min_prebin_size))
if min_n_bins is not None:
if not isinstance(min_n_bins, numbers.Integral) or min_n_bins <= 0:
raise ValueError("min_n_bins must be a positive integer; got {}."
.format(min_n_bins))
if max_n_bins is not None:
if not isinstance(max_n_bins, numbers.Integral) or max_n_bins <= 0:
raise ValueError("max_n_bins must be a positive integer; got {}."
.format(max_n_bins))
if min_n_bins is not None and max_n_bins is not None:
if min_n_bins > max_n_bins:
raise ValueError("min_n_bins must be <= max_n_bins; got {} <= {}."
.format(min_n_bins, max_n_bins))
if min_bin_size is not None:
if (not isinstance(min_bin_size, numbers.Number) or
not 0. < min_bin_size <= 0.5):
raise ValueError("min_bin_size must be in (0, 0.5]; got {}."
.format(min_bin_size))
if max_bin_size is not None:
if (not isinstance(max_bin_size, numbers.Number) or
not 0. < max_bin_size <= 1.0):
raise ValueError("max_bin_size must be in (0, 1.0]; got {}."
.format(max_bin_size))
if min_bin_size is not None and max_bin_size is not None:
if min_bin_size > max_bin_size:
raise ValueError("min_bin_size must be <= max_bin_size; "
"got {} <= {}.".format(min_bin_size,
max_bin_size))
if monotonic_trend is not None:
if monotonic_trend not in ("auto", "auto_heuristic", "auto_asc_desc",
"ascending", "descending", "convex",
"concave", "peak", "valley",
"peak_heuristic", "valley_heuristic"):
raise ValueError('Invalid value for monotonic trend. Allowed '
'string values are "auto", "auto_heuristic", '
'"auto_asc_desc", "ascending", "descending", '
'"concave", "convex", "peak", "valley", '
'"peak_heuristic" and "valley_heuristic".')
if (not isinstance(min_mean_diff, numbers.Number) or min_mean_diff < 0):
raise ValueError("min_mean_diff must be >= 0; got {}."
.format(min_mean_diff))
if max_pvalue is not None:
if (not isinstance(max_pvalue, numbers.Number) or
not 0. < max_pvalue <= 1.0):
raise ValueError("max_pvalue must be in (0, 1.0]; got {}."
.format(max_pvalue))
if max_pvalue_policy not in ("all", "consecutive"):
raise ValueError('Invalid value for max_pvalue_policy. Allowed string '
'values are "all" and "consecutive".')
if outlier_detector is not None:
if outlier_detector not in ("range", "zscore"):
raise ValueError('Invalid value for outlier_detector. Allowed '
'string values are "range" and "zscore".')
if outlier_params is not None:
if not isinstance(outlier_params, dict):
raise TypeError("outlier_params must be a dict or None; "
"got {}.".format(outlier_params))
if cat_cutoff is not None:
if (not isinstance(cat_cutoff, numbers.Number) or
not 0. < cat_cutoff <= 1.0):
raise ValueError("cat_cutoff must be in (0, 1.0]; got {}."
.format(cat_cutoff))
if user_splits is not None:
if not isinstance(user_splits, (np.ndarray, list)):
raise TypeError("user_splits must be a list or numpy.ndarray.")
if user_splits_fixed is not None:
if user_splits is None:
raise ValueError("user_splits must be provided.")
else:
if not isinstance(user_splits_fixed, (np.ndarray, list)):
raise TypeError("user_splits_fixed must be a list or "
"numpy.ndarray.")
elif not all(isinstance(s, bool) for s in user_splits_fixed):
raise ValueError("user_splits_fixed must be list of boolean.")
elif len(user_splits) != len(user_splits_fixed):
raise ValueError("Inconsistent length of user_splits and "
"user_splits_fixed: {} != {}. Lengths must "
"be equal".format(len(user_splits),
len(user_splits_fixed)))
if special_codes is not None:
if not isinstance(special_codes, (np.ndarray, list, dict)):
raise TypeError("special_codes must be a dit, list or "
"numpy.ndarray.")
if isinstance(special_codes, dict) and not len(special_codes):
raise ValueError("special_codes empty. special_codes dict must "
"contain at least one special.")
if split_digits is not None:
if (not isinstance(split_digits, numbers.Integral) or
not 0 <= split_digits <= 8):
raise ValueError("split_digist must be an integer in [0, 8]; "
"got {}.".format(split_digits))
if not isinstance(time_limit, numbers.Number) or time_limit < 0:
raise ValueError("time_limit must be a positive value in seconds; "
"got {}.".format(time_limit))
if not isinstance(verbose, bool):
raise TypeError("verbose must be a boolean; got {}.".format(verbose))
class ContinuousOptimalBinning(OptimalBinning):
"""Optimal binning of a numerical or categorical variable with respect to a
continuous target.
Parameters
----------
name : str, optional (default="")
The variable name.
dtype : str, optional (default="numerical")
The variable data type. Supported data types are "numerical" for
continuous and ordinal variables and "categorical" for categorical
and nominal variables.
prebinning_method : str, optional (default="cart")
The pre-binning method. Supported methods are "cart" for a CART
decision tree, "quantile" to generate prebins with approximately same
frequency and "uniform" to generate prebins with equal width. Method
"cart" uses `sklearn.tree.DecisionTreeRegressor
<https://scikit-learn.org/stable/modules/generated/sklearn.tree.
DecisionTreeRegressor.html>`_.
max_n_prebins : int (default=20)
The maximum number of bins after pre-binning (prebins).
min_prebin_size : float (default=0.05)
The fraction of mininum number of records for each prebin.
min_n_bins : int or None, optional (default=None)
The minimum number of bins. If None, then ``min_n_bins`` is
a value in ``[0, max_n_prebins]``.
max_n_bins : int or None, optional (default=None)
The maximum number of bins. If None, then ``max_n_bins`` is
a value in ``[0, max_n_prebins]``.
min_bin_size : float or None, optional (default=None)
The fraction of minimum number of records for each bin. If None,
``min_bin_size = min_prebin_size``.
max_bin_size : float or None, optional (default=None)
The fraction of maximum number of records for each bin. If None,
``max_bin_size = 1.0``.
monotonic_trend : str or None, optional (default="auto")
The **mean** monotonic trend. Supported trends are “auto”,
"auto_heuristic" and "auto_asc_desc" to automatically determine the
trend minimize the L1-norm using a machine learning classifier,
"ascending", "descending", "concave", "convex", "peak" and
"peak_heuristic" to allow a peak change point, and "valley" and
"valley_heuristic" to allow a valley change point. Trends
"auto_heuristic", "peak_heuristic" and "valley_heuristic" use a
heuristic to determine the change point, and are significantly faster
for large size instances (``max_n_prebins> 20``). Trend "auto_asc_desc"
is used to automatically select the best monotonic trend between
"ascending" and "descending". If None, then the monotonic constraint
is disabled.
min_mean_diff : float, optional (default=0)
The minimum mean difference between consecutives bins. This
option currently only applies when ``monotonic_trend`` is "ascending"
or "descending".
max_pvalue : float or None, optional (default=0.05)
The maximum p-value among bins. The T-test is used to detect bins
not satisfying the p-value constraint.
max_pvalue_policy : str, optional (default="consecutive")
The method to determine bins not satisfying the p-value constraint.
Supported methods are "consecutive" to compare consecutive bins and
"all" to compare all bins.
outlier_detector : str or None, optional (default=None)
The outlier detection method. Supported methods are "range" to use
the interquartile range based method or "zcore" to use the modified
Z-score method.
outlier_params : dict or None, optional (default=None)
Dictionary of parameters to pass to the outlier detection method.
cat_cutoff : float or None, optional (default=None)
Generate bin others with categories in which the fraction of
occurrences is below the ``cat_cutoff`` value. This option is
available when ``dtype`` is "categorical".
user_splits : array-like or None, optional (default=None)
The list of pre-binning split points when ``dtype`` is "numerical" or
the list of prebins when ``dtype`` is "categorical".
user_splits_fixed : array-like or None (default=None)
The list of pre-binning split points that must be fixed.
special_codes : array-like, dict or None, optional (default=None)
List of special codes. Use special codes to specify the data values
that must be treated separately.
split_digits : int or None, optional (default=None)
The significant digits of the split points. If ``split_digits`` is set
to 0, the split points are integers. If None, then all significant
digits in the split points are considered.
time_limit : int (default=100)
The maximum time in seconds to run the optimization solver.
verbose : bool (default=False)
Enable verbose output.
**prebinning_kwargs : keyword arguments
The pre-binning keywrord arguments.
.. versionadded:: 0.6.1
Notes
-----
The parameter values ``max_n_prebins`` and ``min_prebin_size`` control
complexity | |
from scapy.all import *
from collections import Counter
# Otvorenie .pcap súboru
def otvor_pcap_subor(subor):
pakety = rdpcap("vzorky_pcap_na_analyzu/" + subor)
packet_list = PacketList([p for p in pakety])
return packet_list
# Zistenie dĺžky rámca prenašaného po médiu
def dlzka_ramca_po_mediu(dlzka_ramca):
if dlzka_ramca >= 60:
return dlzka_ramca + 4
else:
return 64
# Zistenie cieľovej IP adresy
def cielova_IP_f(upraveny_ramec):
dst_IP_int = []
for x in range(4):
dst_IP_int.append(str(int(upraveny_ramec[30 + x], 16)))
dst_IP = ".".join(dst_IP_int)
return dst_IP
# Zistenie zdrojovej IP adresy
def zdrojova_IP_f(upraveny_ramec):
src_IP_int = []
for x in range(4):
src_IP_int.append(str(int(upraveny_ramec[26 + x], 16)))
src_IP = ".".join(src_IP_int)
return src_IP
# Filter pre nájdenie protokolu z externého súboru
def filter(subor, bajty):
protokol = "Neznámy protokol"
for line in subor:
line = line.split(" ")
if line[0] == bajty:
protokol = line[1].strip()
break
return protokol
# Funkcia pre nájdenie IP adries a vnoreného IP protokolu
def IP_f(vnoreny_p, file_out, upraveny_ramec, prijimajuce_list):
src_IP = zdrojova_IP_f(upraveny_ramec)
dst_IP = cielova_IP_f(upraveny_ramec)
if vnoreny_p == "IPv4":
prijimajuce_list.append(dst_IP)
file_out.write("Zdrojová IP adresa: " + src_IP + "\n")
file_out.write("Cieľová IP adresa: " + dst_IP + "\n")
# Výpis rámca po 16 B
def vypis_ramca(dlzka_ramca, file_out, upraveny_ramec):
pom = dlzka_ramca % 16
pom2 = dlzka_ramca // 16
for i in range(0, pom2):
file_out.write(" ".join(upraveny_ramec[i * 16:16 * (i + 1)]) + "\n")
file_out.write(" ".join(upraveny_ramec[pom2 * 16:pom2 * 16 + pom]) + "\n" + "\n")
# Zistenie všetkých prijímacích uzlov IP + načastejší s počtom paketov
def zisti_IP_uzly(dst_IP_list, file_out):
file_out.write("IP adresy prijímajúcich uzlov: \n")
for item in dst_IP_list:
file_out.write(item + "\n")
pocet_IP = Counter(dst_IP_list)
najcastejsia_IP = pocet_IP.most_common(1)[0][0]
pocet_paketov_IP = str(pocet_IP.most_common(1)[0][1])
file_out.write("\nAdresa uzla s najväčším počtom prijatých paketov: \n" + najcastejsia_IP + " - " + pocet_paketov_IP + " paketov")
# Čítanie všetkých rámcov súboru
def vsetky_ramce_f(packetlist, file_out):
prijimajuce_IP = []
poradie = 1
for pkt in packetlist:
b = raw(pkt)
# Upravenie rámca
ramec = b.hex(" ").upper()
ramec_split = ramec.split(" ")
file_out.write("rámec " + str(poradie) + "\n")
# Dĺžka rámca
dlzka_ramca = len(pkt)
file_out.write("dĺžka rámca poskytnutá pcap api: " + str(dlzka_ramca) + "\n")
dlzka_ramca_m = dlzka_ramca_po_mediu(dlzka_ramca)
file_out.write("dĺžka rámca prenášaneho po mediu: " + str(dlzka_ramca_m) + "\n")
# Upravenie bytov pre zistenie, či ide o Ethernet II alebo IEEE 802.3
pole_3 = "".join(ramec_split[12:14])
pole_3_int = int(pole_3, 16)
vnoreny_protokol = []
if pole_3_int >= 1600:
file_out.write("Ethernet II\n")
f_E = open("EtherTypes.txt", "r")
vnoreny_protokol = filter(f_E, pole_3)
f_E.close()
file_out.write(vnoreny_protokol + "\n")
else:
pole_4 = "".join(ramec_split[14:15])
if pole_4 == "FF":
file_out.write("IEEE 802.3 - RAW\n")
vnoreny_protokol = "IPX"
file_out.write(vnoreny_protokol + "\n")
elif pole_4 == "AA":
file_out.write("IEEE 802.3 - LLC + SNAP\n")
else:
file_out.write("IEEE 802.3 - LLC\n")
pole_4 = ramec_split[14]
f_LSAP = open("LSAPs.txt", "r")
vnoreny_protokol = filter(f_LSAP, pole_4)
f_LSAP.close()
file_out.write(vnoreny_protokol + "\n")
# MAC adresy
dst_mac = ":".join(ramec_split[:6])
src_mac = ":".join(ramec_split[6:12])
file_out.write("Zdrojová MAC adresa: " + src_mac + "\n")
file_out.write("Cieľová MAC adresa: " + dst_mac + "\n")
# Ak je typ Ethernet II IP - zistím a vypíšem adresy + IP protokol
ip_protokol = ""
if vnoreny_protokol == "IPv4" or vnoreny_protokol == "IPv6":
IP_f(vnoreny_protokol, file_out, ramec_split, prijimajuce_IP)
f_IP = open("IPProtocolNumbers.txt", "r")
ip_protokol = filter(f_IP, ramec_split[23])
f_IP.close()
file_out.write(ip_protokol + "\n")
if ip_protokol == "TCP":
src_port = "".join(ramec_split[34:36])
src_port_dec = str(int(src_port, 16))
dst_port = "".join(ramec_split[36:38])
dst_port_dec = str(int(dst_port, 16))
f_tcp = open("TCPports.txt", "r")
tcp_port = ""
for line in f_tcp:
line = line.split(" ")
if line[0] == dst_port_dec or line[0] == src_port_dec:
tcp_port = line[1].strip()
break
f_tcp.close()
file_out.write(tcp_port + "\n" + "zdrojový port: " + src_port_dec + "\n" + "cieľový port: " + dst_port_dec + "\n")
elif ip_protokol == "UDP":
src_port = "".join(ramec_split[34:36])
src_port_dec = str(int(src_port, 16))
dst_port = "".join(ramec_split[36:38])
dst_port_dec = str(int(dst_port, 16))
f_tcp = open("UDPports.txt", "r")
udp_port = ""
for line in f_tcp:
line = line.split(" ")
if line[0] == dst_port_dec or line[0] == src_port_dec:
udp_port = line[1].strip()
break
f_tcp.close()
file_out.write(udp_port + "\n" + "zdrojový port: " + src_port_dec + "\n" + "cieľový port: " + dst_port_dec + "\n")
# Výpis rámca
vypis_ramca(dlzka_ramca, file_out, ramec_split)
poradie = poradie + 1
# Na konci výpis všetkých prijímajúcich IP uzlov
zisti_IP_uzly(prijimajuce_IP, file_out)
# Funkcia pre zistenie HTTP komunikácie
def HTTP_f(packetlist, file_out):
poradie = 1
ramce_http = []
cisla_ramcov = []
for pkt in packetlist:
b = raw(pkt)
# Upravenie rámca
ramec = b.hex(" ").upper()
ramec_split = ramec.split(" ")
pole_3 = "".join(ramec_split[12:14])
pole_3_int = int(pole_3, 16)
if pole_3_int >= 1600:
f_E = open("EtherTypes.txt", "r")
vnoreny_p = filter(f_E, pole_3)
f_E.close()
if vnoreny_p == "IPv4":
f_IP = open("IPProtocolNumbers.txt", "r")
ip_protokol = filter(f_IP, ramec_split[23])
f_IP.close()
if ip_protokol == "TCP":
src_port = "".join(ramec_split[34:36])
src_port_dec = str(int(src_port, 16))
dst_port = "".join(ramec_split[36:38])
dst_port_dec = str(int(dst_port, 16))
f_tcp = open("TCPports.txt", "r")
tcp_port = ""
for line in f_tcp:
line = line.split(" ")
if line[0] == dst_port_dec or line[0] == src_port_dec:
tcp_port = line[1].strip()
break
f_tcp.close()
# Uloženie HTTP rámcov
if tcp_port == "HTTP":
ramce_http.append(pkt)
cisla_ramcov.append(poradie)
poradie = poradie + 1
b = raw(ramce_http[0])
ramec_http = b.hex(" ").upper()
ramec_http_split = ramec_http.split(" ")
src_port = "".join(ramec_http_split[34:36])
src_ports = []
src_ports.append(str(int(src_port, 16)))
dst_ports = []
dst_port = "".join(ramec_http_split[36:38])
dst_ports.append(str(int(dst_port, 16)))
komunikacia1 = []
komunikacia1.append(ramce_http[0])
cisla_ramcov_komunikacie = []
cisla_ramcov_komunikacie.append(cisla_ramcov[0])
for i in range(1, len(ramce_http)):
b = raw(ramce_http[i])
# Upravenie rámca
ramec = b.hex(" ").upper()
ramec_split = ramec.split(" ")
src_port = "".join(ramec_split[34:36])
src_port_int = str(int(src_port, 16))
dst_port = "".join(ramec_split[36:38])
dst_port_int = str(int(dst_port, 16))
# Kontrolovanie komunikácie podľa portov
if (src_port_int == dst_ports[0] and dst_port_int == src_ports[0]) or (src_port_int == src_ports[0] and dst_port_int == dst_ports[0]):
komunikacia1.append(ramce_http[i])
cisla_ramcov_komunikacie.append(cisla_ramcov[i])
# TCP flagy
syn = 0
fin = 0
rst = 0
for pkt in komunikacia1:
b = raw(pkt)
# Upravenie rámca
ramec = b.hex(" ").upper()
ramec_split = ramec.split(" ")
flags_hex = ramec_split[47]
flags = "{0:08b}".format(int(flags_hex, 16))
# Zistenie flagov
if flags[6] == "1":
syn = 1
elif flags[7] == "1" and syn == 1:
fin = fin + 1
elif flags[5] == "1":
rst = 1
# Či ide o úplnú alebo neúplnú komunikáciu
if syn == 1 and (fin == 2 or rst == 1):
file_out.write("Úplná komunikácia\n")
i = 0
for pkt in komunikacia1:
b = raw(pkt)
# Upravenie rámca
ramec = b.hex(" ").upper()
ramec_split = ramec.split(" ")
dlzka_ramca = len(pkt)
dlzka_ramca_m = dlzka_ramca_po_mediu(dlzka_ramca)
dst_mac = ":".join(ramec_split[:6])
src_mac = ":".join(ramec_split[6:12])
src_port = "".join(ramec_split[34:36])
src_port_int = str(int(src_port, 16))
dst_port = "".join(ramec_split[36:38])
dst_port_int = str(int(dst_port, 16))
src_IP = zdrojova_IP_f(ramec_split)
dst_IP = cielova_IP_f(ramec_split)
file_out.write("rámec " + str(cisla_ramcov_komunikacie[i]) + "\ndĺžka rámca poskytnutá pcap api: " + str(
dlzka_ramca) + "\ndĺžka rámca prenášaného po médiu: " +
str(
dlzka_ramca_m) + "\n" + "Ethernet II\nZdrojová MAC adresa: " + src_mac + "\nCieľová MAC adresa: "
+ dst_mac + "\n" + "IPv4\nZdrojová IP: " + src_IP + "\nCieľová IP: " + dst_IP + "\nTCP\n" + "HTTP\nZdrojový port: " + src_port_int + "\nCieľový port: " + dst_port_int + "\n")
vypis_ramca(dlzka_ramca, file_out, ramec_split)
i = i + 1
else:
file_out.write("Neúplná komunikácia\n")
i = 0
for pkt in komunikacia1:
b = raw(pkt)
# Upravenie rámca
ramec = b.hex(" ").upper()
ramec_split = ramec.split(" ")
dlzka_ramca = len(pkt)
dlzka_ramca_m = dlzka_ramca_po_mediu(dlzka_ramca)
dst_mac = ":".join(ramec_split[:6])
src_mac = ":".join(ramec_split[6:12])
src_port = "".join(ramec_split[34:36])
src_port_int = str(int(src_port, 16))
dst_port = "".join(ramec_split[36:38])
dst_port_int = str(int(dst_port, 16))
src_IP = zdrojova_IP_f(ramec_split)
dst_IP = cielova_IP_f(ramec_split)
file_out.write("rámec " + str(cisla_ramcov_komunikacie[i]) + "\ndĺžka rámca poskytnutá pcap api: " + str(dlzka_ramca) + "\ndĺžka rámca prenášaného po médiu: " +
str(dlzka_ramca_m) + "\n" + "Ethernet II\nZdrojová MAC adresa: " + src_mac + "\nCieľová MAC adresa: "
+ dst_mac + "\n" + "IPv4\nZdrojová IP: " + src_IP + "\nCieľová IP: " + dst_IP + "\nTCP\n" + "HTTP\nZdrojový port: " + src_port_int + "\nCieľový port: " + dst_port_int + "\n")
vypis_ramca(dlzka_ramca, file_out, ramec_split)
i = i + 1
def HTTPS_f(packetlist, file_out):
poradie = 1
ramce_https = []
cisla_ramcov = []
for pkt in packetlist:
b = raw(pkt)
# Upravenie rámca
ramec = b.hex(" ").upper()
ramec_split = ramec.split(" ")
pole_3 = "".join(ramec_split[12:14])
pole_3_int = int(pole_3, 16)
if pole_3_int >= 1600:
f_E = open("EtherTypes.txt", "r")
vnoreny_p = filter(f_E, pole_3)
f_E.close()
if vnoreny_p == "IPv4":
f_IP = open("IPProtocolNumbers.txt", "r")
ip_protokol = filter(f_IP, ramec_split[23])
f_IP.close()
if ip_protokol == "TCP":
src_port = "".join(ramec_split[34:36])
src_port_dec = str(int(src_port, 16))
dst_port = "".join(ramec_split[36:38])
dst_port_dec = str(int(dst_port, 16))
f_tcp = | |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.scatter_nd."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
GRADIENT_TESTS_DTYPES = (dtypes.float16, dtypes.float32, dtypes.float64)
def _AsType(v, vtype):
return v.astype(vtype) if isinstance(v, np.ndarray) else vtype(v)
def _FlatInnerDims(tensor, ndims=2):
shape = list(tensor.shape)
return tensor.reshape([
functools.reduce(lambda x, y: x * y, shape[:-ndims + 1], 1)
] + shape[-ndims + 1:])
def _FlatOuterDims(tensor, ndims=2):
shape = list(tensor.shape)
return tensor.reshape(shape[:ndims - 1] + [
functools.reduce(lambda x, y: x * y, shape[ndims - 1:], 1)
])
def _NumpyScatterNd(ref, indices, updates, op):
ixdim = indices.shape[-1]
num_updates = indices.size // ixdim
total_nd = len(ref.shape)
slice_size = 1
for i in range(ixdim, total_nd):
slice_size *= ref.shape[i]
flat_indices = _FlatInnerDims(indices)
flat_updates = updates.reshape((num_updates, slice_size))
output_flat = _FlatOuterDims(ref, ixdim + 1)
for ix_updates, ix_output in enumerate(flat_indices):
ix_output = tuple(ix_output)
output_flat[ix_output] = op(output_flat[ix_output],
flat_updates[ix_updates])
return output_flat.reshape(ref.shape)
def _NumpyUpdate(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: u)
def _NumpyAdd(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: p + u)
def _NumpySub(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: p - u)
def _NumpyMul(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: p * u)
def _NumpyDiv(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: p / u)
class StatefulScatterNdTest(test.TestCase):
def _VariableRankTest(self,
np_scatter,
tf_scatter,
vtype,
itype,
repeat_indices=False):
np.random.seed(8)
ref_shapes = [(3, 6), (3, 6), (3, 6, 9), (3, 6, 9), (3, 6, 9), (3, 6, 9)]
indices_shapes = [(2,), (2, 2), (2,), (2, 2), (2, 3), (2, 3, 3)]
with self.cached_session(use_gpu=True):
for ref_shape, indices_shape in zip(ref_shapes, indices_shapes):
num_updates = indices_shape[0]
ixdim = indices_shape[-1]
indexable_area_shape = ()
for i in range(ixdim):
indexable_area_shape += (ref_shape[i],)
all_indices = [
list(coord)
for coord, _ in np.ndenumerate(
np.empty(indexable_area_shape, vtype))
]
np.random.shuffle(all_indices)
indices = np.array(all_indices[:num_updates])
if num_updates > 1 and repeat_indices:
indices = indices[:num_updates // 2]
for _ in range(num_updates - num_updates // 2):
indices = np.append(
indices, [indices[np.random.randint(num_updates // 2)]], axis=0)
np.random.shuffle(indices)
indices = _AsType(indices[:num_updates], itype)
updates_shape = (num_updates,)
for i in range(ixdim, len(ref_shape)):
updates_shape += (ref_shape[i],)
updates = _AsType(np.random.randn(*(updates_shape)), vtype)
ref = _AsType(np.random.randn(*(ref_shape)), vtype)
# Scatter via numpy
new = ref.copy()
np_scatter(new, indices, updates)
# Scatter via tensorflow
ref_var = variables.VariableV1(ref)
ref_var.initializer.run()
tf_scatter(ref_var, indices, updates).eval()
tol = 1e-03 if repeat_indices and vtype == np.float16 else 1e-06
# Compare
self.assertAllClose(new, self.evaluate(ref_var), atol=tol, rtol=tol)
def _VariableRankTests(self, np_scatter, tf_scatter):
for vtype in (np.int32, np.float16, np.float32, np.float64, np.complex64,
np.complex128):
for itype in (np.int32, np.int64):
self._VariableRankTest(np_scatter, tf_scatter, vtype, itype)
def testSimple(self):
indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32)
updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
ref = variables.Variable([0, 0, 0, 0, 0, 0, 0, 0], dtype=dtypes.float32)
expected = np.array([0, 11, 0, 10, 9, 0, 0, 12])
scatter = state_ops.scatter_nd_update(ref, indices, updates)
init = variables.global_variables_initializer()
with self.session(use_gpu=True) as sess:
self.evaluate(init)
result = self.evaluate(scatter)
self.assertAllClose(result, expected)
@test_util.run_deprecated_v1
def testSimpleResource(self):
indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32)
updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
ref = resource_variable_ops.ResourceVariable(
[0, 0, 0, 0, 0, 0, 0, 0], dtype=dtypes.float32)
expected = np.array([0, 11, 0, 10, 9, 0, 0, 12])
scatter = state_ops.scatter_nd_update(ref, indices, updates)
init = variables.global_variables_initializer()
with self.session(use_gpu=True) as sess:
self.evaluate(init)
self.evaluate(scatter)
self.assertAllClose(ref.eval(), expected)
def testSimple2(self):
indices = constant_op.constant([[1, 0], [1, 1]], dtype=dtypes.int32)
updates = constant_op.constant([11., 12.], dtype=dtypes.float32)
ref = variables.Variable(
[[0., 0.], [0., 0.], [0., 0.]], dtype=dtypes.float32)
expected = np.array([[0., 0.], [11., 12.], [0., 0.]])
scatter = state_ops.scatter_nd_update(ref, indices, updates)
init = variables.global_variables_initializer()
with self.session(use_gpu=True) as sess:
self.evaluate(init)
result = self.evaluate(scatter)
self.assertAllClose(result, expected)
def testSimple3(self):
indices = constant_op.constant([[1]], dtype=dtypes.int32)
updates = constant_op.constant([[11., 12.]], dtype=dtypes.float32)
ref = variables.Variable(
[[0., 0.], [0., 0.], [0., 0.]], dtype=dtypes.float32)
expected = np.array([[0., 0.], [11., 12.], [0., 0.]])
scatter = state_ops.scatter_nd_update(ref, indices, updates)
init = variables.global_variables_initializer()
with self.session(use_gpu=True) as sess:
self.evaluate(init)
result = self.evaluate(scatter)
self.assertAllClose(result, expected)
@test_util.run_deprecated_v1
def testVariableRankUpdate(self):
self._VariableRankTests(_NumpyUpdate, state_ops.scatter_nd_update)
@test_util.run_deprecated_v1
def testVariableRankAdd(self):
self._VariableRankTests(_NumpyAdd, state_ops.scatter_nd_add)
@test_util.run_deprecated_v1
def testVariableRankSub(self):
self._VariableRankTests(_NumpySub, state_ops.scatter_nd_sub)
# TODO(ebrevdo): Re-enable when we need ScatterNdMul.
# def testVariableRankMul(self):
# self._VariableRankTests(_NumpyMul, state_ops.scatter_nd_mul)
# TODO(ebrevdo): Re-enable when we need ScatterNdDiv.
# def testVariableRankDiv(self):
# self._VariableRankTests(_NumpyDiv, state_ops.scatter_nd_div)
def _ScatterRepeatIndicesTest(self, np_scatter, tf_scatter):
for vtype in (np.int32, np.float16, np.float32, np.float64):
for itype in (np.int32, np.int64):
self._VariableRankTest(
np_scatter, tf_scatter, vtype, itype, repeat_indices=True)
@test_util.run_v1_only("b/120545219")
def testScatterRepeatIndices(self):
"""This tests scatter_add using indices that repeat."""
self._ScatterRepeatIndicesTest(_NumpyAdd, state_ops.scatter_nd_add)
self._ScatterRepeatIndicesTest(_NumpySub, state_ops.scatter_nd_sub)
# TODO(ebrevdo): Re-enable when we need ScatterNdMul and ScatterNdDiv.
# self._ScatterRepeatIndicesTest(_NumpyMul, state_ops.scatter_nd_mul)
# self._ScatterRepeatIndicesTest(_NumpyDiv, state_ops.scatter_nd_div)
# TODO(simister): Re-enable once binary size increase due to
# extra templating is back under control and this op is re-enabled
# def testBooleanScatterUpdate(self):
# with self.session(use_gpu=False) as session:
# var = tf.Variable([True, False])
# update0 = tf.compat.v1.scatter_nd_update(var, [[1]], [True])
# update1 = tf.compat.v1.scatter_nd_update(
# var, tf.constant(
# [[0]], dtype=tf.int64), [False])
# var.initializer.run()
# session.run([update0, update1])
# self.assertAllEqual([False, True], self.evaluate(var))
@test_util.run_v1_only("b/120545219")
def testScatterOutOfRangeCpu(self):
# TODO(simister): Re-enable once binary size increase due to
# scatter_nd ops is under control.
# tf.scatter_nd_mul, tf.scatter_nd_div,
for op in (state_ops.scatter_nd_add, state_ops.scatter_nd_sub,
state_ops.scatter_nd_update):
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
with self.cached_session(use_gpu=False):
ref = variables.VariableV1(params)
ref.initializer.run()
# Indices all in range, no problem.
indices = np.array([[2], [0], [5]])
op(ref, indices, updates).eval()
# Test some out of range errors.
indices = np.array([[-1], [0], [5]])
with self.assertRaisesOpError(
r"indices\[0\] = \[-1\] does not index into shape \[6\]"):
op(ref, indices, updates).eval()
indices = np.array([[2], [0], [6]])
with self.assertRaisesOpError(
r"indices\[2\] = \[6\] does not index into shape \[6\]"):
op(ref, indices, updates).eval()
def testRank3ValidShape(self):
indices = array_ops.zeros([2, 2, 2], dtypes.int32)
updates = array_ops.zeros([2, 2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
self.assertAllEqual(
state_ops.scatter_nd_update(ref, indices,
updates).get_shape().as_list(), shape)
@test_util.run_v1_only("b/120545219")
@test_util.disable_xla("b/123337890") # Error messages differ
def testResVarInvalidOutputShape(self):
res = variables.Variable(
initial_value=lambda: array_ops.zeros(shape=[], dtype=dtypes.float32),
dtype=dtypes.float32)
with self.cached_session():
res.initializer.run()
with self.assertRaisesOpError("Output must be at least 1-D"):
state_ops.scatter_nd_update(res, [[0]], [0.22]).eval()
@test_util.run_deprecated_v1
def testExtraIndicesDimensions(self):
indices = array_ops.zeros([1, 1, 2], dtypes.int32)
updates = array_ops.zeros([1, 1], dtypes.int32)
shape = np.array([2, 2])
ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
scatter_update = state_ops.scatter_nd_update(ref, indices, updates)
self.assertAllEqual(scatter_update.get_shape().as_list(), shape)
expected_result = np.zeros([2, 2], dtype=np.int32)
with self.cached_session():
ref.initializer.run()
self.assertAllEqual(expected_result, self.evaluate(scatter_update))
@test_util.run_deprecated_v1
def testRank3InvalidShape1(self):
indices = array_ops.zeros([3, 2, 2], dtypes.int32)
updates = array_ops.zeros([2, 2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
with self.assertRaisesWithPredicateMatch(
ValueError, r"The outer \d+ dimensions of indices\.shape="):
state_ops.scatter_nd_update(ref, indices, updates)
@test_util.run_deprecated_v1
def testRank3InvalidShape2(self):
indices = array_ops.zeros([2, 2, 1], dtypes.int32)
updates = array_ops.zeros([2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
with self.assertRaisesWithPredicateMatch(
ValueError, r"The inner \d+ dimensions of input\.shape="):
state_ops.scatter_nd_update(ref, indices, updates)
@test_util.run_deprecated_v1
def testConcurrentUpdates(self):
num_updates = 10000
update_values = np.random.rand(num_updates)
ref = variables.Variable(np.zeros([2, 2]), dtype=dtypes.float64)
indices = constant_op.constant([[0, 1]] * num_updates, dtype=dtypes.int32)
updates = constant_op.constant(update_values, dtype=dtypes.float64)
expected_result = np.zeros([2, 2], dtype=np.float64)
expected_result[0, 1] = np.sum(update_values)
scatter = state_ops.scatter_nd_add(ref, indices, updates)
init = variables.global_variables_initializer()
with session.Session() as sess:
self.evaluate(init)
result = self.evaluate(scatter)
assert np.allclose(result, expected_result)
# TODO(fpmc): Re-enable this test when gpu_pip test actually runs on a GPU.
def _disabledTestScatterOutOfRangeGpu(self):
if not test.IsBuiltWithCuda():
return
# TODO(simister): Re-enable once binary size increase due to
# scatter_nd ops is under control.
# tf.scatter_nd_mul, tf.scatter_nd_div,
for op in (state_ops.scatter_nd_add, state_ops.scatter_nd_sub,
state_ops.scatter_nd_update):
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
# With GPU, the code ignores indices that are out of range.
# We don't test the implementation; just test there's no failures.
with self.cached_session(force_gpu=True):
ref = variables.Variable(params)
ref.initializer.run()
# Indices | |
<filename>single_objective_trainer.py
"""Copyright © 2020-present, Swisscom (Schweiz) AG.
All rights reserved.
This class is used for training models and is the core of the framework.
With the help of this class, the user of the framework is able to train and
develop models. The framework gets all the relevant objects as an input, and
all the parameters from a YAML file or a dictionary containing the parameters,
it instantiates all the relevant helper objects for training the model and does
the training.
"""
from validator import Validator
from paretomanager.pareto_manager_class import ParetoManager
from metric.metric_at_k import MetricAtK
from loss.loss_class import Loss
import torch.nn as nn
from dataloader.mamo_data_handler import MamoDataHandler
import time
import numpy as np
import os
import torch
import torch.optim as optim
from torch.autograd import Variable
import yaml
import logging
logger = logging.getLogger('main')
logger.setLevel(logging.INFO)
class SingleObjectiveTrainer():
"""The trainer class, the core of the framework, used for training models.
All the needed objects for this class have to be given through the
constructor.
Additionally, the other parameters needed by this trainer have to be
supplied in a YAML file named 'trainer_params.yaml' or a dictionary
containing the parameters.
For more details about the parameters supplied in this YAML file,
please refer to 'Attributes from the YAML file' section below.
Attributes:
data_handler: A MamoDataHandler object which feeds the data set to the trainer.
model: A torch.nn.Module object which is the model that is being trained.
loss: A single loss object which represent the loss/objective that the
model is trained on.
validation_metrics: A list of MetricAtK objects which are used to evaluate
the model while the training and validation process.
save_to_path: A path to a directory where the trained models from the Pareto
front will be saved during training.
device: A variable indicating whether the model is trained on the gpu or on
the cpu.
_train_dataloader: A dataloader object used for feeding the data to the trainer.
pareto_manager: A ParetoManager which is responsible for maintaining a pareto
front of models and saving these models on permanent storage.
validator: A Validator object which is used to evaluate the models on multiple
objective and multiple losses.
optimizer: A pytorch optimizer which is used to train the model.
Attributes from the YAML file:
seed: An integer, used to initialize the numpy and pytorch random seeds, default = 42.
learning_rate: A float value, the learning rate that is given to the pytorch
optimizer, default = 1e-3.
batch_size_training: An integer value, representing the batch sizes in which the data is
fed to the trainer, default = 500.
shuffle_training: A boolean value, indicating if the training data should be shuffled,
default = True.
drop_last_batch_training: A boolean value, indicating to drop the last incomplete batch,
if the training dataset size is not divisible by the batch size, default = True.
batch_size_validation: An integer value, representing the batch sizes in which the data is
fed to the validator, default = 500.
shuffle_validation: A boolean value, indicating if the validation data should be shuffled,
default = True.
drop_last_batch_validation: A boolean value, indicating to drop the last incomplete batch,
if the validation dataset size is not divisible by the batch size, default = False.
number_of_epochs: An integer value, indicating for how many epochs should the model
be trained, default = 50.
anneal: A boolean value, indicating if annealing should be used while training the
model, default = True.
beta_start: If the anneal is used, this will be the first value of the beta,
default = 0.
beta_cap: If the anneal is used, this will be the maximum value of the beta,
default = 3.
beta_step: If the anneal is used, this is the amount by which to increase the beta
every batch, default = 3/50.
"""
def __init__(self, data_handler, model, loss, validation_metrics,
save_to_path, params='yaml_files/trainer_params.yaml',
optimizer=None):
"""The constructor which initializes a trainer object.
Arguments:
data_handler: A MamoDataHandler object which feeds the data set to the trainer.
model: A torch.nn.Module object which is the model that is being trained.
loss: A loss object which represent the losse/objective that the model is
trained on.
validation_metrics: A list of MetricAtK objects which are used to evaluate
the model while the training and validation process.
save_to_path: A path to a directory where the trained models from the Pareto
front will be saved during training.
params: Path to the yaml file with the trainger parameters, or a dictionary
containing the parameters.
optimizer: A pytorch optimizer which is used to train the model, if it is None,
a default Adam optimizer is created.
Raises:
TypeError: If any of the arguments passed are not an instance of the expected
class or are None, a TypeError will be raised.
ValueError: If the directory which save_to_path references is not empty, a
ValueError will be raised.
"""
logger.info('Trainer: Started with initializing trainer...')
self._check_input_(data_handler, model, loss,
validation_metrics, save_to_path, optimizer)
self._read_params(params)
self.data_handler = data_handler
self.model = model
self.loss = loss
logger.info('Trainer: Loss: %s' % loss.name)
self.validation_metrics = validation_metrics
logger.info('Trainer: Validation metrics: ')
logger.info('Trainer: '.join(['%s ' % m.get_name() for m in self.validation_metrics]))
self.save_to_path = save_to_path
logger.info('Trainer: Saving models to: %s' % self.save_to_path)
self.optimizer = optimizer
# set cuda if available
self.device = torch.device(
'cuda' if torch.cuda.is_available() else 'cpu')
logger.info('Trainer: Training on device: %s' % self.device)
self._init_objects()
logger.info('Trainer: Initialization done.')
def _check_input_(self, data_handler, model, loss, validation_metrics, save_to_path, optimizer):
"""A helper function for the __init__ to check the input of the constructor.
"""
if not isinstance(data_handler, MamoDataHandler):
raise TypeError(
'Please check you are using the right data handler object, or the right order of the attributes!')
if not isinstance(model, nn.Module):
raise TypeError(
'Please check you are using the right model object, or the right order of the attributes!')
if not hasattr(model, 'initialize_model'):
raise TypeError(
'Please check if your models has initialize_model() method defined!')
# this checks also if loss is None
if not isinstance(loss, Loss):
raise TypeError(
'Please check you are using the right loss object, or the right order of the attributes!')
# check if validation metrics is None
if validation_metrics is None:
raise ValueError(
'The validation_metrics are None, please make sure to give valid validation_metrics!')
if not all([isinstance(x, MetricAtK) for x in validation_metrics]):
raise TypeError(
'Please check you are using the right metric objects, or the right order of the attributes!')
# check if length is at least 1
if len(validation_metrics) == 0:
raise ValueError(
'Please check you have defined at least one validation metric!')
if not os.path.exists(save_to_path):
os.mkdir(save_to_path)
# checking if the save_to_path directory is empty
if os.listdir(save_to_path):
raise ValueError(
'Please make sure that the directory where you want to save the models is empty!')
# if the optimizer is not None, than has to be pytorch optimizer object
if optimizer is not None:
if not isinstance(optimizer, optim.Optimizer):
raise TypeError(
'Please make sure that the optimizer is a pytorch Optimizer object!')
def _read_params(self, params):
"""A helper function for the __init__ to read the configuration yaml file.
"""
logger.info('Trainer: Reading yaml trainer parameters.')
if type(params) is str:
with open(params, 'r') as stream:
params = yaml.safe_load(stream)
self.seed = int(params.get('seed', 42))
logger.info('Trainer: Random seed: %d' % self.seed)
self.learning_rate = float(params.get('learning_rate', 1e-3))
logger.info('Trainer: Learning rate: %f' % self.learning_rate)
self.batch_size_training = int(params.get('batch_size_training', 500))
logger.info('Trainer: Batch size training: %d' %
self.batch_size_training)
self.shuffle_training = bool(params.get('shuffle_training', True))
logger.info('Trainer: Shuffle training: %d' %
self.shuffle_training)
self.drop_last_batch_training = bool(
params.get('drop_last_batch_training', True))
logger.info('Trainer: Drop last batch training: %d' %
self.drop_last_batch_training)
self.batch_size_validation = int(
params.get('batch_size_validation', 500))
logger.info('Trainer: Batch size validation: %d' %
self.batch_size_validation)
self.shuffle_validation = bool(params.get('shuffle_validation', True))
logger.info('Trainer: Shuffle validation: %d' %
self.shuffle_validation)
self.drop_last_batch_validation = bool(
params.get('drop_last_batch_validation', False))
logger.info('Trainer: Drop last batch validation: %d' %
self.drop_last_batch_validation)
self.number_of_epochs = int(params.get('number_of_epochs', 50))
logger.info('Trainer: Number of epochs: %f' % self.number_of_epochs)
self.anneal = bool(params.get('anneal', False))
logger.info('Trainer: Annealing: %s' % self.anneal)
if self.anneal and ('beta_start' not in params or 'beta_cap' not in params or 'beta_step' not in params):
raise ValueError(('Please make sure that if anneal is set to True, '
'the beta_start, beta_cap and beta_step are all '
'present in the parameters yaml file!'))
if self.anneal:
self.beta_start = float(params['beta_start'])
logger.info('Trainer: Beta start: %f' | |
typeLower, parameters=settingFunctionEventParameters))
self.SetFunctionCompletions(result.type, functions, False)
except:
return
if functions:
completions.extend(functions)
properties = self.GetPropertyCompletions(result.type)
if not properties:
try:
script = sem.GetCachedScript(result.type)
if script:
properties = []
typeLower = result.type.lower()
for name, obj in script.properties.items():
properties.append(SublimePapyrus.MakePropertyCompletion(obj, typeLower))
self.SetPropertyCompletions(result.type, properties)
except:
return
if properties:
completions.extend(properties)
return completions
return
except Linter.SemanticError as g:
return
return completions
else: # Not following a dot
for name, obj in e.functions[0].items():
if obj.type == syn.STAT_FUNCTIONDEF:
completions.append(SublimePapyrus.MakeFunctionCompletion(obj, sem, True, "parent", parameters=settingFunctionEventParameters))
elif obj.type == syn.STAT_EVENTDEF:
completions.append(SublimePapyrus.MakeEventCompletion(obj, sem, True, "parent", parameters=settingFunctionEventParameters))
for name, obj in e.functions[1].items():
if obj.type == syn.STAT_FUNCTIONDEF:
completions.append(SublimePapyrus.MakeFunctionCompletion(obj, sem, True, "self", parameters=settingFunctionEventParameters))
elif obj.type == syn.STAT_EVENTDEF:
completions.append(SublimePapyrus.MakeEventCompletion(obj, sem, True, "self", parameters=settingFunctionEventParameters))
for name, obj in e.variables[0].items():
if obj.type == syn.STAT_PROPERTYDEF:
completions.append(SublimePapyrus.MakePropertyCompletion(obj, "parent"))
for name, obj in e.variables[1].items():
if obj.type == syn.STAT_PROPERTYDEF:
completions.append(SublimePapyrus.MakePropertyCompletion(obj, "self"))
elif obj.type == syn.STAT_VARIABLEDEF:
completions.append(SublimePapyrus.MakeVariableCompletion(obj))
for scope in e.variables[2:]:
for name, obj in scope.items():
if obj.type == syn.STAT_VARIABLEDEF:
completions.append(SublimePapyrus.MakeVariableCompletion(obj))
elif obj.type == syn.STAT_PARAMETER:
completions.append(SublimePapyrus.MakeParameterCompletion(obj))
completions.extend(self.GetTypeCompletions(view, False))
completions.append(self.completionKeywordFalse)
completions.append(self.completionKeywordTrue)
completions.append(self.completionKeywordNone)
completions.append(self.completionKeywordAs)
if not sem.KW_GLOBAL in e.signature.data.flags:
completions.append(self.completionKeywordSelf)
completions.append(self.completionKeywordParent)
# Imported global functions
for imp in e.imports:
functions = self.GetFunctionCompletions(imp, True)
if not functions:
try:
script = sem.GetCachedScript(imp)
if script:
functions = []
impLower = imp.lower()
for name, obj in script.functions.items():
if lex.KW_GLOBAL in obj.data.flags:
functions.append(SublimePapyrus.MakeFunctionCompletion(obj, sem, True, impLower, parameters=settingFunctionEventParameters))
self.SetFunctionCompletions(imp, functions, True)
except:
return
if functions:
completions.extend(functions)
# Show info about function/event parameters
stack = syn.stack[:]
arguments = []
for item in reversed(stack):
if item.type == sem.NODE_FUNCTIONCALLARGUMENT:
arguments.insert(0, stack.pop())
elif item.type == sem.LEFT_PARENTHESIS:
break
stackLength = len(stack)
func = None
if stackLength >= 2 and stack[-2].type == sem.IDENTIFIER:
name = stack[-2].value.upper()
if stackLength >= 4 and stack[-3].type == sem.OP_DOT:
try:
result = sem.NodeVisitor(stack[-4])
if result.type != sem.KW_SELF:
try:
script = sem.GetCachedScript(result.type)
if script:
func = script.functions.get(name, None)
except Linter.SemanticError as e:
return
else:
for scope in reversed(e.functions):
func = scope.get(name, None)
if func:
break
except Linter.SemanticError as e:
return
else:
for scope in reversed(e.functions):
func = scope.get(name, None)
if func:
break
for imp in e.imports:
script = sem.GetCachedScript(imp)
temp = script.functions.get(name, None)
if temp:
if func:
func = None
else:
func = temp
break
if func and func.data.parameters:
for param in func.data.parameters:
completions.append(SublimePapyrus.MakeParameterCompletion(Linter.Statement(sem.STAT_PARAMETER, 0, param)))
global SUBLIME_VERSION
tooltipParameters = settings.get("tooltip_function_parameters", True)
tooltipDocstring = settings.get("tooltip_function_docstring", True)
if SUBLIME_VERSION >= 3070 and prefix == "" and (tooltipParameters or tooltipDocstring):
if not view.is_popup_visible():
self.ShowFunctionInfo(view, tokens, func, len(arguments), tooltipParameters, tooltipDocstring)
return completions
except Linter.SyntacticError as f:
if syn.stack and syn.stack[-2].type == syn.LEFT_PARENTHESIS and syn.stack[-1].type != syn.RIGHT_PARENTHESIS: # Expression enclosed by parentheses
completions.append(self.completionKeywordAs)
return completions
return
return
except Linter.PropertyDefinitionCancel as e:
if not lineString:
typ = None
if e.array:
typ = "%s[]" % e.type.capitalize()
else:
typ = "%s" % e.type.capitalize()
if not "GET" in e.functions:
completions.append(("get\t%s func." % typ, "%s Function Get()\n\t${0}\nEndFunction" % typ,))
if not "SET" in e.functions:
completions.append(("set\tfunc.", "Function Set(%s aParameter)\n\t${0}\nEndFunction" % typ,))
return completions
else:
tokens = []
try:
for token in lex.Process(lineString):
if token.type != lex.NEWLINE:
tokens.append(token)
except Linter.LexicalError as f:
return
if tokens:
if tokens[-1].type != lex.COMMENT_LINE:
pass
return
except Linter.SemanticError as e:
return
return
def IsValidScope(self, view):
if self.validScope:
return self.validScope in view.scope_name(0)
return False
def ClearCompletionCache(self, script):
global cacheLock
with cacheLock:
global completionCache
if completionCache.get("properties", None):
if completionCache["properties"].get(script, None):
del completionCache["properties"][script]
if completionCache.get("functions", None):
if completionCache["functions"].get(script, None):
del completionCache["functions"][script]
def ClearLinterCache(self, script):
global cacheLock
with cacheLock:
global linterCache
if linterCache.get(script, None):
del linterCache[script]
def ClearSemanticAnalysisCache(self, script):
global cacheLock
global sem
with cacheLock:
if sem:
if sem.cache.get(script, None):
del sem.cache[script]
children = []
for name, obj in sem.cache.items():
if script in obj.extends:
children.append(name)
for child in children:
del sem.cache[child]
def GetScript(self, bufferID):
global cacheLock
with cacheLock:
global linterCache
return linterCache.get(bufferID, None)
def SetScript(self, bufferID, script):
global cacheLock
with cacheLock:
global linterCache
linterCache[bufferID] = script
def GetFunctionCompletions(self, script, glob = False):
global cacheLock
with cacheLock:
global completionCache
functions = completionCache.get("functions", None)
if functions:
functions = functions.get(script, False)
if not functions:
return None
if glob:
return functions.get("global", None)
else:
return functions.get("nonglobal", None)
else:
return None
def SetFunctionCompletions(self, script, obj, glob = False):
global cacheLock
with cacheLock:
global completionCache
functions = completionCache.get("functions", None)
if not functions:
completionCache["functions"] = {}
functions = completionCache.get("functions", None)
if not functions.get(script, False):
functions[script] = {}
if glob:
functions[script]["global"] = obj
else:
functions[script]["nonglobal"] = obj
def GetPropertyCompletions(self, script):
global cacheLock
with cacheLock:
global completionCache
properties = completionCache.get("properties", None)
if properties:
return properties.get(script, None)
def SetPropertyCompletions(self, script, obj):
global cacheLock
with cacheLock:
global completionCache
properties = completionCache.get("properties", None)
if not properties:
completionCache["properties"] = {}
properties = completionCache.get("properties", None)
properties[script] = obj
def GetTypeCompletions(self, view, baseTypes = True):
global cacheLock
with cacheLock:
global completionCache
scripts = completionCache.get("types", None)
if not scripts:
scripts = []
paths = SublimePapyrus.GetSourcePaths(view)
for path in paths:
if os.path.isdir(path):
files = [f for f in os.listdir(path) if ".psc" in f]
for file in files:
scripts.append(("%s\tscript" % file[:-4].lower(), "%s" % file[:-4]))
scripts = list(set(scripts))
self.SetTypeCompletions(scripts)
if baseTypes:
scripts.extend([("bool\ttype", "Bool",), ("float\ttype", "Float",), ("int\ttype", "Int",), ("string\ttype", "String",)])
return scripts
def SetTypeCompletions(self, obj):
global cacheLock
with cacheLock:
global completionCache
completionCache["types"] = obj
class SublimePapyrusSkyrimClearCache(sublime_plugin.WindowCommand):
def run(self):
global cacheLock
global sem
global linterCache
global completionCache
with cacheLock:
linterCache = {}
completionCache = {}
sem.cache = {}
class SublimePapyrusSkyrimActorValueSuggestionsCommand(SublimePapyrus.SublimePapyrusShowSuggestionsCommand):
def get_items(self, **args):
items = {
"Aggression": "Aggression",
"Alchemy": "Alchemy",
"Alteration modifier": "AlterationMod",
"Alteration power modifier": "AlterationPowerMod",
"Alteration": "Alteration",
"Armor perks": "ArmorPerks",
"Assistance": "Assistance",
"Attack damage multiplier": "AttackDamageMult",
"Block": "Block",
"Bow speed bonus": "BowSpeedBonus",
"Brain condition": "BrainCondition",
"Bypass vendor keyword check": "BypassVendorKeywordCheck",
"Bypass vendor stolen check": "BypassVendorStolenCheck",
"Carry weight": "CarryWeight",
"Combat health regeneration multiplier modifier": "CombatHealthRegenMultMod",
"Combat health regeneration multiplier power modifier": "CombatHealthRegenMultPowerMod",
"Confidence": "Confidence",
"Conjuration modifier": "ConjurationMod",
"Conjuration power modifier": "ConjurationPowerMod",
"Conjuration": "Conjuration",
"Critical chance": "CritChance",
"Damage resistance": "DamageResist",
"Destruction modifier": "DestructionMod",
"Destruction power modifier": "DestructionPowerMod",
"Destruction": "Destruction",
"Detect life range": "DetectLifeRange",
"Disease resistance": "DiseaseResist",
"Dragon souls": "DragonSouls",
"Enchanting": "Enchanting",
"Endurance condition": "EnduranceCondition",
"Energy": "Energy",
"Equipped item charge": "EquippedItemCharge",
"Equipped staff charge": "EquippedStaffCharge",
"Fame": "Fame",
"Favor active": "FavorActive",
"Favor points bonus": "FavorPointsBonus",
"Favors per day timer": "FavorsPerDayTimer",
"Favors per day": "FavorsPerDay",
"Fire resistance": "FireResist",
"Frost resistance": "FrostResist",
"Heal rate": "HealRate",
"Health": "Health",
"Heavy armor": "HeavyArmor",
"Ignore crippled limbs": "IgnoreCrippledLimbs",
"Illusion modifier": "IllusionMod",
"Illusion power modifier": "IllusionPowerMod",
"Illusion": "Illusion",
"Infamy": "Infamy",
"Inventory weight": "InventoryWeight",
"Invisibility": "Invisibility",
"Jumping bonus": "JumpingBonus",
"Last bribed or intimidated": "LastBribedIntimidated",
"Last flattered": "LastFlattered",
"Left attack condition": "LeftAttackCondition",
"Left mobility condition": "LeftMobilityCondition",
"Light armor": "LightArmor",
"Lockpicking": "Lockpicking",
"Magic resistance": "MagicResist",
"Magicka rate": "MagickaRate",
"Magicka": "Magicka",
"Marksman": "Marksman",
"Mass": "Mass",
"Melee damage": "MeleeDamage",
"Mood": "Mood",
"Morality": "Morality",
"Night eye": "NightEye",
"One-handed": "OneHanded",
"Paralysis": "Paralysis",
"Perception condition": "PerceptionCondition",
"Pickpocket": "Pickpocket",
"Poison resistance": "PoisonResist",
"Restoration modifier": "RestorationMod",
"Restoration power modifier": "RestorationPowerMod",
"Restoration": "Restoration",
"Right attack condition": "RightAttackCondition",
"Right mobility condition": "RightMobilityCondition",
"Shield perks": "ShieldPerks",
"Shock resistance": "ElectricResist",
"Shout recovery multiplier": "ShoutRecoveryMult",
"Smithing": "Smithing",
"Sneak": "Sneak",
"Speech": "Speechcraft",
"Speed multiplier": "SpeedMult",
"Stamina rate": "StaminaRate",
"Stamina": "Stamina",
"Two-handed": "TwoHanded",
"Unarmed damage": "UnarmedDamage",
"Variable 01": "Variable01",
"Variable 02": "Variable02",
"Variable 03": "Variable03",
"Variable 04": "Variable04",
"Variable 05": "Variable05",
"Variable 06": "Variable06",
"Variable 07": "Variable07",
"Variable 08": "Variable08",
"Variable 09": "Variable09",
"Variable 10": "Variable10",
"Voice points": "VoicePoints",
"Voice rate": "VoiceRate",
"Waiting for player": "WaitingForPlayer",
"Ward deflection": "WardDeflection",
"Ward power": "WardPower",
"Water breating": "WaterBreathing",
"Water walking": "WaterWalking",
"Weapon speed multiplier": "WeaponSpeedMult",
}
return items
class SublimePapyrusSkyrimFormTypeSuggestionsCommand(SublimePapyrus.SublimePapyrusShowSuggestionsCommand):
def get_items(self, **args):
items = {
"(TLOD)": 74,
"(TOFT)": 86,
"Acoustic Space (ASPC)": 16,
"Action (AACT)": 6,
"Activator (ACTI)": 24,
"Actor (NPC_)": 43,
"ActorValueInfo (AVIF)": 95,
"Addon Node (ADDN)": 94,
"AI Package (PACK)": 79,
"Ammo (AMMO)": 42,
"Animated Object (ANIO)": 83,
"Apparatus (APPA)": 33,
"Armor (ARMO)": 26,
"Armor Addon (ARMA)": 102,
"Arrow Projectile (PARW)": 64,
"Art Object (ARTO)": 125,
"Association Type (ASTP)": 123,
"Barrier Projectile (PBAR)": 69,
"Beam Projectile (PBEA)": 66,
"Body Part Data (BPTD)": 93,
"Book (BOOK)": 27,
"Camera Path (CPTH)": 97,
"Camera Shot (CAMS)": 96,
"Cell (CELL)": 60,
"Character": 62,
"Class (CLAS)": 10,
"Climate (CLMT)": 55,
"Collision Layer (COLL)": 132,
"Color Form (CLFM)": 133,
"Combat Style (CSTY)": 80,
"Cone/Voice Projectile (PCON)": 68,
"Constructible Object (COBJ)": 49,
"Container (CONT)": 28,
"Debris (DEBR)": 88,
"Default Object Manager (DOBJ)": 107,
"Dialog View (DLVW)": 117,
"Dialogue Branch (DLBR)": 115,
"Door (DOOR)": 29,
"Dual Cast Data (DUAL)": 129,
"Effect Setting": 18,
"Effect Shader (EFSH)": 85,
"Enchantment (ENCH)": 21,
"Encounter Zone (ECZN)": 103,
"Equip Slot (EQUP)": 120,
"Explosion (EXPL)": 87,
"Eyes (EYES)": 13,
"Faction (FACT)": 11,
"Flame Projectile (PLFA)": 67,
"Flora (FLOR)": 39,
"Footstep (FSTP)": 110,
"Footstep Set (FSTS)": 111,
"FormID List (FLST)": 91,
"Furniture (FURN)": 40,
"Game Setting (GMST)": 3,
"Global Variable (GLOB)": 9,
"Grass (GRAS)": 37,
"Grenade Projectile (PGRE)": 65,
"Group (GRUP)": 2,
"Hazard (HAZD)": 51,
"Head Part (HDPT)": 12,
"Idle (IDLE)": 78,
"Idle Marker (IDLM)": 47,
"Image Space (IMGS)": 89,
"Image Space Modifier (IMAD)": 90,
"Impact Data (IPCT)": 100,
"Impact Data Set (IPDS)": 101,
"Ingredient (INGR)": 30,
"Key (KEYM)": 45,
"Keyword (KYWD)": 4,
"Land Texture (LTEX)": 20,
"Landscape (LAND)": 72,
"Leveled Actor (LVLN)": 44,
"Leveled Item (LVLI)": 53,
"Leveled Spell (LVLS)": 82,
"Light (LIGH)": 31,
"Lighting Template (LGTM)": 108,
"Load Screen (LSCR)": 81,
"Location (LCTN)": 104,
"Location Reference Type (LCRT)": 5,
"Main File Header (TES4)": 1,
"Material Object (MATO)": 126,
"Material Type (MATT)": 99,
"Menu Icon": 8,
"Message (MESG)": 105,
"Miscellaneous Object (MISC)": 32,
"Missile Projectile (PMIS)": 63,
"Movable Static (MSTT)": 36,
"Movement Type (MOVT)": 127,
"Music Track (MUST)": 116,
"Music Type (MUSC)": 109,
"Navigation (NAVI)": 59,
"Navigation Mesh (NAVM)": 73,
"None": 0,
"Note": 48,
"Object Reference (REFR)": 61,
"Outfit (OTFT)": 124,
"Perk (PERK)": 92,
"Placed Hazard (PHZD)": 70,
"Potion (ALCH)": 46,
"Projectile (PROJ)": 50,
"Quest (QUST)": 77,
"Race (RACE)": 14,
"Ragdoll (RGDL)": 106,
"Region (REGN)": 58,
"Relationship (RELA)": 121,
"Reverb Parameter (REVB)": 134,
"Scene (SCEN)": 122,
"Script (SCPT)": 19,
"Scroll Item (SCRL)": 23,
"Shader Particle Geometry Data (SPGD)": 56,
"Shout (SHOU)": 119,
"Skill": 17,
"Soul Gem (SLGM)": 52,
"Sound (SOUN)": 15,
"Sound Category (SNCT)": 130,
"Sound Descriptor (SNDR)": 128,
"Sound Output (SOPM)": 131,
"Spell (SPEL)": 22,
"Static (STAT)": 34,
"Static Collection": 35,
"Story Manager Branch Node (SMBN)": 112,
"Story Manager Event Node (SMEN)": 114,
"Story Manager Quest Node (SMQN)": 113,
"Talking Activator (TACT)": 25,
"Texture Set (TXST)": 7,
"Topic (DIAL)": 75,
"Topic Info (INFO)": 76,
"Tree (TREE)": 38,
"Visual/Reference Effect (RFCT)": 57,
"Voice Type (VTYP)": 98,
"Water (WATR)": 84,
"Weapon (WEAP)": 41,
"Weather (WTHR)": 54,
"Word of Power (WOOP)": 118,
"Worldspace (WRLD)": 71
}
return items
"""
class SublimePapyrusSkyrimAnimationEventNameSuggestionsCommand(SublimePapyrus.SublimePapyrusShowSuggestionsCommand):
# http://www.creationkit.com/Animation_Events
def get_items(self, **args):
items = {
# Magic
"BeginCastRight (magic)": "BeginCastRight",
"BeginCastLeft (magic)": "BeginCastLeft",
"MRh_SpellFire_Event (magic)": "MRh_SpellFire_Event",
"MLh_SpellFire_Event (magic)": "MLh_SpellFire_Event",
# Hand-to-hand
"preHitFrame (hand-to-hand)": "preHitFrame",
"weaponSwing (hand-to-hand)": "weaponSwing",
"SoundPlay.WPNSwingUnarmed (hand-to-hand)": "SoundPlay.WPNSwingUnarmed",
"HitFrame (hand-to-hand)": "HitFrame",
"weaponLeftSwing (hand-to-hand)": "weaponLeftSwing",
# Bows - Quick draw and shot
"bowDraw (bows)": "bowDraw",
"SoundPlay.WPNBowNockSD (bows)": "SoundPlay.WPNBowNockSD",
"BowZoomStop (bows)": "BowZoomStop",
"arrowAttach (bows)": "arrowAttach",
"bowDrawStart (bows)": "bowDrawStart",
"InitiateWinBegin (bows)": "InitiateWinBegin",
"BowRelease (bows)": "BowRelease",
"arrowRelease (bows)": "arrowRelease",
"tailCombatIdle (bows)": "tailCombatIdle",
"AttackWinStart (bows)": "AttackWinStart",
"bowEnd (bows)": "bowEnd",
"attackStop (bows)": "attackStop",
"tailCombatState (bows)": "tailCombatState",
"bowReset (bows)": "bowReset",
"arrowDetach (bows)": "arrowDetach",
# Bows - Full draw, held for a moment
"initiateWinEnd (bows)": "initiateWinEnd",
"BowDrawn (bows)": "BowDrawn",
# Blocking
"tailCombatIdle (blocking)": "tailCombatIdle",
"SoundPlay.NPCHumanCombatShieldBlock (blocking)": "SoundPlay.NPCHumanCombatShieldBlock",
"blockStartOut (blocking)": "blockStartOut",
"SoundPlay.NPCHumanCombatShieldRelease (blocking)": "SoundPlay.NPCHumanCombatShieldRelease",
"blockStop (blocking)": "blockStop",
"SoundPlay.NPCHumanCombatShieldBash (blocking)": "SoundPlay.NPCHumanCombatShieldBash",
"preHitFrame (blocking)": "preHitFrame",
"HitFrame (blocking)": "HitFrame",
"FootScuffRight (blocking)": "FootScuffRight",
# Sneak non-combat
"tailSneakIdle (sneaking, not in combat)": "tailSneakIdle",
"tailSneakLocomotion (sneaking, not in combat)": "tailSneakLocomotion",
"tailMTIdle (sneaking, not in combat)": "tailMTIdle",
"tailMTLocomotion (sneaking, not in combat)": "tailMTLocomotion",
# Sneak combat
"tailSneakIdle (sneaking, in combat)": "tailSneakIdle",
"tailSneakLocomotion (sneaking, in combat)": "tailSneakLocomotion",
"tailCombatIdle (sneaking, in combat)": "tailCombatIdle",
"tailCombatLocomotion (sneaking, in combat)": "tailCombatLocomotion",
# Water
"SoundPlay.FSTSwimSwim (swimming)": "SoundPlay.FSTSwimSwim",
"MTState (swimming)": "MTState",
# Walking, turning, and jumping
"Left foot in motion (walking)": "FootLeft",
"Right foot in motion (walking)": | |
computeMVEE(Q[:, :-1], alg_type=0) # compute the MVEE of Q
else: # otherwise
# get the index of the maximal and minimal point on the line, i.e., both its ends
idx_in_P = np.unique([np.argmin(Q[:, :-1]).astype(np.int),
np.argmax(Q[:, :-1]).astype(np.int)]).tolist()
return Q[idx_in_P], idx_in_P, Utils.UPPER_BOUND(r)
C = []
# idx_in_P_list = []
# C_list = []
# ts = time.time()
# for q in S: # for each boundary points along the axis of the MVEE of Q
# K = attainCaratheodorySet(P[:, :-1], q) # get d+1 indices of points from Q where q is their convex
# # combination
# idx_in_P_list += [int(idx) for idx in K] # get the indices of the coreset point in Q
# C_list += [int(Q[idx, -1]) for idx in K] # the actual coreset points
# # print('Time for list {}'.format(time.time() - ts))
idx_in_P = np.empty((2*(Utils.J + 1) ** 2, )).astype(np.int)
C = np.empty((2*(Utils.J + 1) ** 2, )).astype(np.int)
idx = 0
# ts = time.time()
for q in S: # for each boundary points along the axis of the MVEE of Q
K = attainCaratheodorySet(Q[:, :-1], q) # get d+1 indices of points from Q where q is their convex
# combination
idx_in_P[idx:idx+K.shape[0]] = K.astype(np.int) # get the indices of the coreset point in Q
C[idx:idx+K.shape[0]] = Q[idx_in_P[idx:idx+K.shape[0]], -1].astype(np.int)
idx += K.shape[0]
# print('Time for numpy {}'.format(time.time() - ts))
return np.unique(C[:idx]), np.unique(idx_in_P[:idx]), Utils.UPPER_BOUND(r)
####################################################### Bicriteria #####################################################
def attainClosestPointsToSubspaces(P, W, flats, indices):
"""
This function returns the closest n/2 points among all of the n points to a list of flats.
:param flats: A list of flats where each flat is represented by an orthogonal matrix and a translation vector.
:param indices: A list of indices of points in self.P.P
:return: The function returns the closest n/2 points to flats.
"""
dists = np.empty((P[indices, :].shape[0], ))
N = indices.shape[0]
if not Utils.ACCELERATE_BICRETERIA:
for i in range(N):
dists[i] = np.min([
Utils.computeDistanceToSubspace(P[np.array([indices[i]]), :], flats[j][0], flats[j][1])
for j in range(len(flats))])
else:
dists = Utils.computeDistanceToSubspace(P[indices, :], flats[0], flats[1])
idxs = np.argpartition(dists, N // 2)[:N//2]
return idxs.tolist()
return np.array(indices)[np.argsort(dists).astype(np.int)[:int(N / 2)]].tolist()
def sortDistancesToSubspace(P, X, v, points_indices):
"""
The function at hand sorts the distances in an ascending order between the points and the flat denoted by (X,v).
:param X: An orthogonal matrix which it's span is a subspace.
:param v: An numpy array denoting a translation vector.
:param points_indices: a numpy array of indices for computing the distance to a subset of the points.
:return: sorted distances between the subset points addressed by points_indices and the flat (X,v).
"""
dists = Utils.computeDistanceToSubspace(P[points_indices, :], X, v) # compute the distance between the subset
# of points towards
# the flat which is represented by (X,v)
return np.array(points_indices)[np.argsort(dists).astype(np.int)].tolist() # return sorted distances
def computeSubOptimalFlat(P, weights):
"""
This function computes the sub optimal flat with respect to l2^2 loss function, which relied on computing the
SVD factorization of the set of the given points, namely P.
:param P: A numpy matrix which denotes the set of points.
:param weights: A numpy array of weightes with respect to each row (point) in P.
:return: A flat which best fits P with respect to the l2^2 loss function.
"""
v = np.average(P, axis=0, weights=weights) # compute the weighted mean of the points
svd = TruncatedSVD(algorithm='randomized', n_iter=1, n_components=Utils.J).fit(P-v)
V = svd.components_
return V, v # return a flat denoted by an orthogonal matrix and a translation vector
def clusterIdxsBasedOnKSubspaces(P, B):
"""
This functions partitions the points into clusters a list of flats.
:param B: A list of flats
:return: A numpy array such each entry contains the index of the flat to which the point which is related to the
entry is assigned to.
"""
n = P.shape[0]
idxs = np.arange(n) # a numpy array of indices
centers = np.array(B) # a numpy array of the flats
dists = np.apply_along_axis(lambda x: Utils.computeDistanceToSubspace(P[idxs, :], x[0], x[1]), 1, centers) # compute the
# distance between
# each point and
# each flat
idxs = np.argmin(dists, axis=0)
return idxs # return the index of the closest flat to each point in self.P.P
def addFlats(P, W, S, B):
"""
This function is responsible for computing a set of all possible flats which passes through j+1 points.
:param S: list of j+1 subsets of points.
:return: None (Add all the aforementioned flats into B).
"""
indices = [np.arange(S[i].shape[0]) for i in range(len(S))]
points = np.meshgrid(*indices) # compute a mesh grid using the duplicated coefs
points = np.array([p.flatten() for p in points]) # flatten each point in the meshgrid for computing the
# all possible ordered sets of j+1 points
idx = len(B)
for i in range(points.shape[1]):
A = [S[j][points[j, i]][0] for j in range(points.shape[0])]
P_sub, W_sub = P[A, :], W[A]
B.append(computeSubOptimalFlat(P_sub, W_sub))
return np.arange(idx, len(B)), B
def computeBicriteria(P, W):
"""
The function at hand is an implemetation of Algorithm Approx-k-j-Flats(P, k, j) at the paper
"Bi-criteria Linear-time Approximations for Generalized k-Mean/Median/Center". The algorithm returns an
(2^j, O(log(n) * (jk)^O(j))-approximation algorithm for the (k,j)-projective clustering problem using the l2^2
loss function.
:return: A (2^j, O(log(n) * (jk)^O(j)) approximation solution towards the optimal solution.
"""
n = P.shape[0]
Q = np.arange(0, n, 1)
t = 1
B = []
tol_sample_size = Utils.K * (Utils.J + 1)
sample_size = (lambda t: int(np.ceil(Utils.K * (Utils.J + 1) * (2 + np.log(Utils.J + 1) +
np.log(Utils.K) +
min(t, np.log(np.log(n)))))))
while np.size(Q) >= tol_sample_size: # run we have small set of points
S = []
for i in range(0, Utils.J+1): # Sample j + 1 subsets of the points in an i.i.d. fashion
random_sample = np.random.choice(Q, size=sample_size(t))
S.append(random_sample[:, np.newaxis])
if not Utils.ACCELERATE_BICRETERIA:
F = addFlats(P, W, S, B)
else:
S = np.unique(np.vstack(S).flatten())
F = computeSubOptimalFlat(P[S, :], W[S])
B.append(F)
sorted_indices = attainClosestPointsToSubspaces(P, W, F, Q)
Q = np.delete(Q, sorted_indices)
t += 1
if not Utils.ACCELERATE_BICRETERIA:
_, B = addFlats(P, W, [Q for i in range(Utils.J + 1)], B)
else:
F = computeSubOptimalFlat(P[Q.flatten(), :], W[Q.flatten()])
B.append(F)
return B
################################################### L1Coreset ##########################################################
def applyBiCriterea(P, W):
"""
The function at hand runs a bicriteria algorithm, which then partition the rows of P into clusters.
:return:
- B: The set of flats which give the bicriteria algorithm, i.e., O((jk)^{j+1}) j-flats which attain 2^j
approximation towards the optimal (k,j)-projective clustering problem involving self.P.P.
- idxs: The set of indices where each entry is with respect to a point in P and contains
index of the flat in B which is assigned to respected point in P.
"""
B = computeBicriteria(P,W) # compute the set of flats which bi-cirteria algorithm returns
idxs = clusterIdxsBasedOnKSubspaces(P, B) # compute for each point which flat fits it best
return B, idxs
def initializeSens(P, B, idxs):
"""
This function initializes the sensitivities using the bicriteria algorithm, to be the distance between each
point to it's closest flat from the set of flats B divided by the sum of distances between self.P.P and B.
:param B: A set of flats where each flat is represented by an orthogonal matrix and a translation vector.
:param idxs: A numpy array which represents the clustering which B imposes on self.P.P
:return: None.
"""
centers_idxs = np.unique(idxs) # number of clusters imposed by B
sensitivity_additive_term = np.zeros((P.shape[0], ))
for center_idx in centers_idxs: # go over each cluster of points from self.P.P
cluster_per_center = np.where(idxs == center_idx)[0] # get all points in certain cluster
# compute the distance of each point in the cluster to its respect flat
cost_per_point_in_cluster = Utils.computeDistanceToSubspace(P[cluster_per_center, :-1],
B[center_idx][0], B[center_idx][1])
# ost_per_point_in_cluster = np.apply_along_axis(lambda x:
# Utils.computeDistanceToSubspace(x, B[center_idx][0],
# B[center_idx][1]), 1,
# self.set_P.P[cluster_per_center, :-1])
# set the sensitivity to the distance of each point from its respected flat divided by the total distance
# between cluster points and the respected flat
sensitivity_additive_term[cluster_per_center] = 2 ** Utils.J * \
np.nan_to_num(cost_per_point_in_cluster /
np.sum(cost_per_point_in_cluster))
return sensitivity_additive_term
def Level(P, k, V, desired_eps=0.01):
"""
The algorithm is an implementation of | |
#<NAME>
#Settlers of Catan, 2020
import pygame
from hexTile import *
from hexLib import *
pygame.init()
#Class to handle catan board display
class catanGameView():
'Class definition for Catan board display'
def __init__(self, catanBoardObject, catanGameObject):
self.board = catanBoardObject
self.game = catanGameObject
# #Use pygame to display the board
self.screen = pygame.display.set_mode(self.board.size)
pygame.display.set_caption('Settlers of Catan')
self.font_resource = pygame.font.SysFont('georgia', 15)
self.font_ports = pygame.font.SysFont('georgia', 12)
#self.font_button = pygame.font.SysFont('arialrounded', 12)
#self.font_diceRoll = pygame.font.SysFont('georgia', 25) #dice font
self.font_Robber = pygame.font.SysFont('arialblack', 50) #robber font
return None
#Function to display the initial board
def displayInitialBoard(self):
#Dictionary to store RGB Color values - new and improved!
colorDict_RGB = {"BRICK":(196,84,18), "ORE":(195,179,169), "WHEAT":(228,158,32), "WOOD":(59,122,48), "SHEEP":(120,179,31), "DESERT":(250,235,150)}
pygame.draw.rect(self.screen, pygame.Color(22,145,198), (0,0,self.board.width, self.board.height)) #blue background
#Render each hexTile
for hexTile in self.board.hexTileDict.values():
hexTileCorners = polygon_corners(self.board.flat, hexTile.hex)
hexTileColor_rgb = colorDict_RGB[hexTile.resource.type]
pygame.draw.polygon(self.screen, pygame.Color(hexTileColor_rgb[0],hexTileColor_rgb[1], hexTileColor_rgb[2]), hexTileCorners, self.board.width==0)
#print(hexTile.index, hexTileCorners)
hexTile.pixelCenter = hex_to_pixel(self.board.flat, hexTile.hex) #Get pixel center coordinates of hex
if(hexTile.resource.type != 'DESERT'): #skip desert text/number
resourceText = self.font_resource.render(str(hexTile.resource.type) + " (" +str(hexTile.resource.num) + ")", False, (0,0,0))
self.screen.blit(resourceText, (hexTile.pixelCenter.x -25, hexTile.pixelCenter.y)) #add text to hex
#Display the Ports
for vCoord, vertexInfo in self.board.boardGraph.items():
if(vertexInfo.port != False):
portText = self.font_ports.render(vertexInfo.port, False, (220,0,0))
self.screen.blit(portText, (vCoord.x, vCoord.y))
pygame.display.update()
return None
#Function to draw a road on the board
def draw_road(self, edgeToDraw, roadColor):
pygame.draw.line(self.screen, pygame.Color(roadColor), edgeToDraw[0], edgeToDraw[1], 10)
#Function to draw a potential road on the board - thin
def draw_possible_road(self, edgeToDraw, roadColor):
roadRect = pygame.draw.line(self.screen, pygame.Color(roadColor), edgeToDraw[0], edgeToDraw[1], 5)
return roadRect
#Function to draw a settlement on the board at vertexToDraw
def draw_settlement(self, vertexToDraw, color):
newSettlement = pygame.Rect(vertexToDraw.x-10, vertexToDraw.y-10, 25, 25)
pygame.draw.rect(self.screen, pygame.Color(color), newSettlement)
#Function to draw a potential settlement on the board - thin
def draw_possible_settlement(self, vertexToDraw, color):
possibleSettlement = pygame.draw.circle(self.screen, pygame.Color(color), (int(vertexToDraw.x), int(vertexToDraw.y)), 20, 3)
return possibleSettlement
#Function to draw a settlement on the board at vertexToDraw
def draw_city(self, vertexToDraw, color):
pygame.draw.circle(self.screen, pygame.Color(color), (int(vertexToDraw.x), int(vertexToDraw.y)), 24)
#Function to draw a potential settlement on the board - thin
def draw_possible_city(self, vertexToDraw, color):
possibleCity = pygame.draw.circle(self.screen, pygame.Color(color), (int(vertexToDraw.x), int(vertexToDraw.y)), 25, 5)
return possibleCity
#Function to draw the possible spots for a robber
def draw_possible_robber(self, vertexToDraw):
possibleRobber = pygame.draw.circle(self.screen, pygame.Color('black'), (int(vertexToDraw.x), int(vertexToDraw.y)), 50, 5)
return possibleRobber
#Function to draw possible players to rob
def draw_possible_players_to_rob(self, vertexCoord):
possiblePlayer = pygame.draw.circle(self.screen, pygame.Color('black'), (int(vertexCoord.x), int(vertexCoord.y)), 35, 5)
return possiblePlayer
"""#Function to render basic gameplay buttons
def displayGameButtons(self):
#Basic GamePlay Buttons
diceRollText = self.font_button.render("ROLL DICE", False, (0,0,0))
buildRoadText = self.font_button.render("ROAD", False, (0,0,0))
buildSettleText = self.font_button.render("SETTLE", False, (0,0,0))
buildCityText = self.font_button.render("CITY", False, (0,0,0))
endTurnText = self.font_button.render("END TURN", False, (0,0,0))
devCardText = self.font_button.render("DEV CARD", False, (0,0,0))
playDevCardText = self.font_button.render("USE DEV C.", False, (0,0,0))
self.rollDice_button = pygame.Rect(20, 10, 80, 40)
self.buildRoad_button = pygame.Rect(20, 70, 80, 40)
self.buildSettlement_button = pygame.Rect(20, 120, 80, 40)
self.buildCity_button = pygame.Rect(20, 170, 80, 40)
self.devCard_button = pygame.Rect(20, 220, 80, 40)
self.playDevCard_button = pygame.Rect(20, 270, 80, 40)
self.endTurn_button = pygame.Rect(20, 330, 80, 40)
pygame.draw.rect(self.screen, pygame.Color('white'), self.rollDice_button)
pygame.draw.rect(self.screen, pygame.Color('burlywood'), self.buildRoad_button)
pygame.draw.rect(self.screen, pygame.Color('burlywood'), self.buildSettlement_button)
pygame.draw.rect(self.screen, pygame.Color('burlywood'), self.buildCity_button)
pygame.draw.rect(self.screen, pygame.Color('burlywood'), self.devCard_button)
pygame.draw.rect(self.screen, pygame.Color('burlywood'), self.playDevCard_button)
pygame.draw.rect(self.screen, pygame.Color('red'), self.endTurn_button)
self.screen.blit(diceRollText,(30, 20))
self.screen.blit(buildRoadText,(30,80))
self.screen.blit(buildSettleText,(30,130))
self.screen.blit(buildCityText, (30,180))
self.screen.blit(devCardText, (30,230))
self.screen.blit(playDevCardText, (30,280))
self.screen.blit(endTurnText,(30,340))"""
#Function to display robber
def displayRobber(self):
#Robber text
robberText = self.font_Robber.render("R", False, (0,0,0))
#Get the coordinates for the robber
for hexTile in self.board.hexTileDict.values():
if(hexTile.robber):
robberCoords = hexTile.pixelCenter
self.screen.blit(robberText, (int(robberCoords.x) -20, int(robberCoords.y)-35))
#Function to display the gameState board - use to display intermediate build screens
#gameScreenState specifies which type of screen is to be shown
def displayGameScreen(self):
#First display all initial hexes and regular buttons
self.displayInitialBoard()
#self.displayGameButtons()
self.displayRobber()
#Loop through and display all existing buildings from players build graphs
for player_i in list(self.game.playerQueue.queue): #Build Settlements and roads of each player
for existingRoad in player_i.buildGraph['ROADS']:
self.draw_road(existingRoad, player_i.color)
for settlementCoord in player_i.buildGraph['SETTLEMENTS']:
self.draw_settlement(settlementCoord, player_i.color)
for cityCoord in player_i.buildGraph['CITIES']:
self.draw_city(cityCoord, player_i.color)
pygame.display.update()
return
#TO-DO Add screens for trades
def quitGameScreen(self):
pygame.display.quit()
#Function to display dice roll
def displayDiceRoll(self, diceNums):
#Reset blue background and show dice roll
pygame.draw.rect(self.screen, pygame.Color('royalblue2'), (100, 20, 50, 50)) #blue background
diceNum = self.font_diceRoll.render(str(diceNums), False, (0,0,0))
self.screen.blit(diceNum,(110, 20))
pygame.display.update()
return
def buildRoad_display(self, currentPlayer, roadsPossibleDict):
'''Function to control build-road action with display
args: player, who is building road; roadsPossibleDict - possible roads
returns: road edge of road to be built
'''
#Get all spots the player can build a road and display thin lines
#Get Rect representation of roads and draw possible roads
for roadEdge in roadsPossibleDict.keys():
if roadsPossibleDict[roadEdge]:
roadsPossibleDict[roadEdge] = self.draw_possible_road(roadEdge, currentPlayer.color)
print("displaying road")
pygame.display.update()
mouseClicked = False #Get player actions until a mouse is clicked
while(mouseClicked == False):
if(self.game.gameSetup):#during gameSetup phase only exit if road is built
for e in pygame.event.get():
if e.type == pygame.QUIT:
sys.exit(0)
if(e.type == pygame.MOUSEBUTTONDOWN):
for road, roadRect in roadsPossibleDict.items():
if(roadRect.collidepoint(e.pos)):
#currentPlayer.build_road(road[0], road[1], self.board)
mouseClicked = True
return road
else:
for e in pygame.event.get():
if(e.type == pygame.MOUSEBUTTONDOWN): #Exit this loop on mouseclick
for road, roadRect in roadsPossibleDict.items():
if(roadRect.collidepoint(e.pos)):
#currentPlayer.build_road(road[0], road[1], self.board)
return road
mouseClicked = True
return None
def buildSettlement_display(self, currentPlayer, verticesPossibleDict):
'''Function to control build-settlement action with display
args: player, who is building settlement; verticesPossibleDict - dictionary of possible settlement vertices
returns: vertex of settlement to be built
'''
#Get all spots the player can build a settlement and display thin circles
#Add in the Rect representations of possible settlements
for v in verticesPossibleDict.keys():
if verticesPossibleDict[v]:
verticesPossibleDict[v] = self.draw_possible_settlement(v, currentPlayer.color)
pygame.display.update()
mouseClicked = False #Get player actions until a mouse is clicked
while(mouseClicked == False):
if(self.game.gameSetup): #during gameSetup phase only exit if settlement is built
for e in pygame.event.get():
if e.type == pygame.QUIT:
sys.exit(0)
if(e.type == pygame.MOUSEBUTTONDOWN):
for vertex, vertexRect in verticesPossibleDict.items():
if(vertexRect.collidepoint(e.pos)):
#currentPlayer.build_settlement(vertex, self.board)
mouseClicked = True
return vertex
else:
for e in pygame.event.get():
if(e.type == pygame.MOUSEBUTTONDOWN): #Exit this loop on mouseclick
for vertex, vertexRect in verticesPossibleDict.items():
if(vertexRect.collidepoint(e.pos)):
#currentPlayer.build_settlement(vertex, self.board)
return vertex
mouseClicked = True
return None
def buildCity_display(self, currentPlayer, verticesPossibleDict):
'''Function to control build-city action with display
args: player, who is building city; verticesPossibleDict - dictionary of possible city vertices
returns: city vertex of city to be built
'''
#Get all spots the player can build a city and display circles
#Get Rect representation of roads and draw possible roads
for c in verticesPossibleDict.keys():
if verticesPossibleDict[c]:
verticesPossibleDict[c] = self.draw_possible_city(c, currentPlayer.color)
pygame.display.update()
mouseClicked = False #Get player actions until a mouse is clicked - whether a city is built or not
while(mouseClicked == False):
for e in pygame.event.get():
if(e.type == pygame.MOUSEBUTTONDOWN): #Exit this loop on mouseclick
for vertex, vertexRect in verticesPossibleDict.items():
if(vertexRect.collidepoint(e.pos)):
#currentPlayer.build_city(vertex, self.board)
return vertex
mouseClicked = True
return None
#Function to control the move-robber action with display
def moveRobber_display(self, currentPlayer, possibleRobberDict):
#Get all spots the player can move robber to and show circles
#Add in the Rect representations of possible robber spots
for R in possibleRobberDict.keys():
possibleRobberDict[R] = self.draw_possible_robber(possibleRobberDict[R].pixelCenter)
pygame.display.update()
mouseClicked = False #Get player actions until a mouse is clicked - whether a road is built or not
while(mouseClicked == False):
for e in pygame.event.get():
if(e.type == pygame.MOUSEBUTTONDOWN): #Exit this loop on mouseclick
for hexIndex, robberCircleRect in possibleRobberDict.items():
if(robberCircleRect.collidepoint(e.pos)):
#Add code to choose which player to rob depending on hex clicked on
possiblePlayerDict = self.board.get_players_to_rob(hexIndex)
playerToRob = self.choosePlayerToRob_display(possiblePlayerDict)
#Move robber to that hex and rob
#currentPlayer.move_robber(hexIndex, self.board, playerToRob) #Player moved robber to this hex
mouseClicked = True #Only exit out once a correct robber spot is chosen
return hexIndex, playerToRob
#Function to control the choice of player to rob with display
#Returns the choice of player to rob
def choosePlayerToRob_display(self, possiblePlayerDict):
#Get all other players the player can move robber to and show circles
for player, vertex in possiblePlayerDict.items():
possiblePlayerDict[player] = self.draw_possible_players_to_rob(vertex)
pygame.display.update()
#If dictionary is empty return None
if(possiblePlayerDict == {}):
return None
mouseClicked = False #Get player actions until a mouse is clicked - whether a road is built or not
while(mouseClicked == False):
for e in pygame.event.get():
if(e.type == | |
test_teams_id_team_data_records_count_get(self):
"""
Test case for teams_id_team_data_records_count_get
Count Dynamic Data records
"""
pass
def test_teams_id_team_data_records_delete(self):
"""
Test case for teams_id_team_data_records_delete
Delete all matching records.
"""
pass
def test_teams_id_team_data_records_fk_delete(self):
"""
Test case for teams_id_team_data_records_fk_delete
Delete a model instance by {{fk}} from the data source.
"""
pass
def test_teams_id_team_data_records_fk_get(self):
"""
Test case for teams_id_team_data_records_fk_get
Find a model instance by {{fk}} from the data source.
"""
pass
def test_teams_id_team_data_records_fk_property_name_upload_put(self):
"""
Test case for teams_id_team_data_records_fk_property_name_upload_put
Replace attributes for a model instance and persist it into the data source.
"""
pass
def test_teams_id_team_data_records_fk_put(self):
"""
Test case for teams_id_team_data_records_fk_put
Replace attributes for a model instance and persist it into the data source.
"""
pass
def test_teams_id_team_data_records_get(self):
"""
Test case for teams_id_team_data_records_get
Find all instances of the model matched by filter from the data source.
"""
pass
def test_teams_id_team_data_records_migrate_post(self):
"""
Test case for teams_id_team_data_records_migrate_post
Request migration for Dynamic Data records
"""
pass
def test_teams_id_team_data_records_post(self):
"""
Test case for teams_id_team_data_records_post
Create a new instance of the model and persist it into the data source.
"""
pass
def test_teams_id_team_data_records_upload_csv_post(self):
"""
Test case for teams_id_team_data_records_upload_csv_post
Upload CSV for this Dynamic Data
"""
pass
def test_teams_id_team_data_team_get(self):
"""
Test case for teams_id_team_data_team_get
Fetches belongsTo relation team.
"""
pass
def test_teams_id_team_members_count_get(self):
"""
Test case for teams_id_team_members_count_get
Counts teamMembers of Team.
"""
pass
def test_teams_id_team_members_delete(self):
"""
Test case for teams_id_team_members_delete
Deletes all teamMembers of this model.
"""
pass
def test_teams_id_team_members_fk_delete(self):
"""
Test case for teams_id_team_members_fk_delete
Delete a related item by id for teamMembers.
"""
pass
def test_teams_id_team_members_fk_get(self):
"""
Test case for teams_id_team_members_fk_get
Find a related item by id for teamMembers.
"""
pass
def test_teams_id_team_members_fk_put(self):
"""
Test case for teams_id_team_members_fk_put
Update a related item by id for teamMembers.
"""
pass
def test_teams_id_team_members_get(self):
"""
Test case for teams_id_team_members_get
Queries teamMembers of Team.
"""
pass
def test_teams_id_team_members_map_keys_get(self):
"""
Test case for teams_id_team_members_map_keys_get
Map teamMembers emails to teamMembers keys
"""
pass
def test_teams_id_team_members_post(self):
"""
Test case for teams_id_team_members_post
Creates a new instance in teamMembers of this model.
"""
pass
def test_teams_id_template_folders_count_get(self):
"""
Test case for teams_id_template_folders_count_get
Counts templateFolders of Team.
"""
pass
def test_teams_id_template_folders_delete(self):
"""
Test case for teams_id_template_folders_delete
Deletes all templateFolders of this model.
"""
pass
def test_teams_id_template_folders_fk_delete(self):
"""
Test case for teams_id_template_folders_fk_delete
Delete a related item by id for templateFolders.
"""
pass
def test_teams_id_template_folders_fk_get(self):
"""
Test case for teams_id_template_folders_fk_get
Find a related item by id for templateFolders.
"""
pass
def test_teams_id_template_folders_fk_put(self):
"""
Test case for teams_id_template_folders_fk_put
Update a related item by id for templateFolders.
"""
pass
def test_teams_id_template_folders_get(self):
"""
Test case for teams_id_template_folders_get
Queries templateFolders of Team.
"""
pass
def test_teams_id_template_folders_post(self):
"""
Test case for teams_id_template_folders_post
Creates a new instance in templateFolders of this model.
"""
pass
def test_teams_id_templates_count_get(self):
"""
Test case for teams_id_templates_count_get
Counts templates of Team.
"""
pass
def test_teams_id_templates_delete(self):
"""
Test case for teams_id_templates_delete
Deletes all templates of this model.
"""
pass
def test_teams_id_templates_fk_delete(self):
"""
Test case for teams_id_templates_fk_delete
Delete a related item by id for templates.
"""
pass
def test_teams_id_templates_fk_get(self):
"""
Test case for teams_id_templates_fk_get
Find a related item by id for templates.
"""
pass
def test_teams_id_templates_fk_put(self):
"""
Test case for teams_id_templates_fk_put
Update a related item by id for templates.
"""
pass
def test_teams_id_templates_get(self):
"""
Test case for teams_id_templates_get
Queries templates of Team.
"""
pass
def test_teams_id_templates_nk_designs_count_get(self):
"""
Test case for teams_id_templates_nk_designs_count_get
Counts designs of Template.
"""
pass
def test_teams_id_templates_nk_designs_fk_delete(self):
"""
Test case for teams_id_templates_nk_designs_fk_delete
Delete a related item by id for designs.
"""
pass
def test_teams_id_templates_nk_designs_fk_get(self):
"""
Test case for teams_id_templates_nk_designs_fk_get
Find a related item by id for designs.
"""
pass
def test_teams_id_templates_nk_designs_fk_put(self):
"""
Test case for teams_id_templates_nk_designs_fk_put
Update a related item by id for designs.
"""
pass
def test_teams_id_templates_nk_designs_get(self):
"""
Test case for teams_id_templates_nk_designs_get
Queries designs of Template.
"""
pass
def test_teams_id_templates_nk_designs_post(self):
"""
Test case for teams_id_templates_nk_designs_post
Creates a new instance in designs of this model.
"""
pass
def test_teams_id_templates_nk_members_count_get(self):
"""
Test case for teams_id_templates_nk_members_count_get
Counts members of Template.
"""
pass
def test_teams_id_templates_nk_members_delete(self):
"""
Test case for teams_id_templates_nk_members_delete
Deletes all members of this model.
"""
pass
def test_teams_id_templates_nk_members_fk_delete(self):
"""
Test case for teams_id_templates_nk_members_fk_delete
Delete a related item by id for members.
"""
pass
def test_teams_id_templates_nk_members_fk_get(self):
"""
Test case for teams_id_templates_nk_members_fk_get
Find a related item by id for members.
"""
pass
def test_teams_id_templates_nk_members_fk_put(self):
"""
Test case for teams_id_templates_nk_members_fk_put
Update a related item by id for members.
"""
pass
def test_teams_id_templates_nk_members_get(self):
"""
Test case for teams_id_templates_nk_members_get
Queries members of Template.
"""
pass
def test_teams_id_templates_nk_members_post(self):
"""
Test case for teams_id_templates_nk_members_post
Creates a new instance in members of this model.
"""
pass
def test_teams_id_templates_nk_members_rel_fk_delete(self):
"""
Test case for teams_id_templates_nk_members_rel_fk_delete
Remove the members relation to an item by id.
"""
pass
def test_teams_id_templates_nk_members_rel_fk_head(self):
"""
Test case for teams_id_templates_nk_members_rel_fk_head
Check the existence of members relation to an item by id.
"""
pass
def test_teams_id_templates_nk_members_rel_fk_put(self):
"""
Test case for teams_id_templates_nk_members_rel_fk_put
Add a related item by id for members.
"""
pass
def test_teams_id_templates_nk_permission_delete(self):
"""
Test case for teams_id_templates_nk_permission_delete
Deletes permission of this model.
"""
pass
def test_teams_id_templates_nk_permission_get(self):
"""
Test case for teams_id_templates_nk_permission_get
Fetches hasOne relation permission.
"""
pass
def test_teams_id_templates_nk_permission_post(self):
"""
Test case for teams_id_templates_nk_permission_post
Creates a new instance in permission of this model.
"""
pass
def test_teams_id_templates_nk_permission_put(self):
"""
Test case for teams_id_templates_nk_permission_put
Update permission of this model.
"""
pass
def test_teams_id_templates_nk_portal_folders_count_get(self):
"""
Test case for teams_id_templates_nk_portal_folders_count_get
Counts portalFolders of Template.
"""
pass
def test_teams_id_templates_nk_portal_folders_delete(self):
"""
Test case for teams_id_templates_nk_portal_folders_delete
Deletes all portalFolders of this model.
"""
pass
def test_teams_id_templates_nk_portal_folders_fk_delete(self):
"""
Test case for teams_id_templates_nk_portal_folders_fk_delete
Delete a related item by id for portalFolders.
"""
pass
def test_teams_id_templates_nk_portal_folders_fk_get(self):
"""
Test case for teams_id_templates_nk_portal_folders_fk_get
Find a related item by id for portalFolders.
"""
pass
def test_teams_id_templates_nk_portal_folders_fk_put(self):
"""
Test case for teams_id_templates_nk_portal_folders_fk_put
Update a related item by id for portalFolders.
"""
pass
def test_teams_id_templates_nk_portal_folders_get(self):
"""
Test case for teams_id_templates_nk_portal_folders_get
Queries portalFolders of Template.
"""
pass
def test_teams_id_templates_nk_portal_folders_post(self):
"""
Test case for teams_id_templates_nk_portal_folders_post
Creates a new instance in portalFolders of this model.
"""
pass
def test_teams_id_templates_nk_portal_folders_rel_fk_delete(self):
"""
Test case for teams_id_templates_nk_portal_folders_rel_fk_delete
Remove the portalFolders relation to an item by id.
"""
pass
def test_teams_id_templates_nk_portal_folders_rel_fk_head(self):
"""
Test case for teams_id_templates_nk_portal_folders_rel_fk_head
Check the existence of portalFolders relation to an item by id.
"""
pass
def test_teams_id_templates_nk_portal_folders_rel_fk_put(self):
"""
Test case for teams_id_templates_nk_portal_folders_rel_fk_put
Add a related item by id for portalFolders.
"""
pass
def test_teams_id_templates_nk_portals_count_get(self):
"""
Test case for teams_id_templates_nk_portals_count_get
Counts portals of Template.
"""
pass
def test_teams_id_templates_nk_portals_delete(self):
"""
Test case for teams_id_templates_nk_portals_delete
Deletes all portals of this model.
"""
pass
def test_teams_id_templates_nk_portals_fk_delete(self):
"""
Test case for teams_id_templates_nk_portals_fk_delete
Delete a related item by id for portals.
"""
pass
def test_teams_id_templates_nk_portals_fk_get(self):
"""
Test case for teams_id_templates_nk_portals_fk_get
Find a related item by id for portals.
"""
pass
def test_teams_id_templates_nk_portals_fk_put(self):
"""
Test case for teams_id_templates_nk_portals_fk_put
Update a related item by id for portals.
"""
pass
def test_teams_id_templates_nk_portals_get(self):
"""
Test case for teams_id_templates_nk_portals_get
Queries portals of Template.
"""
pass
def test_teams_id_templates_nk_portals_post(self):
"""
Test case for teams_id_templates_nk_portals_post
Creates a new instance in portals of this model.
"""
pass
def test_teams_id_templates_nk_portals_rel_fk_delete(self):
"""
Test case for teams_id_templates_nk_portals_rel_fk_delete
Remove the portals relation to an item by id.
"""
pass
def test_teams_id_templates_nk_portals_rel_fk_head(self):
"""
Test case for teams_id_templates_nk_portals_rel_fk_head
Check the existence of portals relation to an item by id.
"""
pass
def test_teams_id_templates_nk_portals_rel_fk_put(self):
"""
Test case for teams_id_templates_nk_portals_rel_fk_put
Add a related item by id for portals.
"""
pass
def test_teams_id_templates_nk_tags_count_get(self):
"""
Test case for teams_id_templates_nk_tags_count_get
Counts tags of Template.
"""
pass
def test_teams_id_templates_nk_tags_delete(self):
"""
Test case for teams_id_templates_nk_tags_delete
Deletes all tags of this model.
"""
pass
def test_teams_id_templates_nk_tags_fk_delete(self):
"""
Test case for teams_id_templates_nk_tags_fk_delete
Delete a related item by id for tags.
"""
pass
def test_teams_id_templates_nk_tags_fk_get(self):
"""
Test case for teams_id_templates_nk_tags_fk_get
Find a related item by id for tags.
"""
pass
def test_teams_id_templates_nk_tags_fk_put(self):
"""
Test case for teams_id_templates_nk_tags_fk_put
Update a related item by id for tags.
"""
pass
def test_teams_id_templates_nk_tags_get(self):
"""
Test case for teams_id_templates_nk_tags_get
Queries tags of Template.
"""
pass
def test_teams_id_templates_nk_tags_post(self):
"""
Test case for teams_id_templates_nk_tags_post
Creates a new instance in tags of this model.
"""
| |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Defines unit tests for :mod:`colour.plotting.geometry` module.
"""
from __future__ import division, unicode_literals
import numpy as np
import unittest
from colour.plotting import quad, grid, cube
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = ['TestQuad', 'TestGrid', 'TestCube']
class TestQuad(unittest.TestCase):
"""
Defines :func:`colour.plotting.geometry.quad` definition unit tests
methods.
"""
def test_quad(self):
"""
Tests :func:`colour.plotting.geometry.quad` definition.
"""
np.testing.assert_almost_equal(
quad(),
np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0]]),
decimal=7)
np.testing.assert_almost_equal(
quad('xz'),
np.array([[0, 0, 0], [1, 0, 0], [1, 0, 1], [0, 0, 1]]),
decimal=7)
np.testing.assert_almost_equal(
quad('yz'),
np.array([[0, 0, 0], [0, 1, 0], [0, 1, 1], [0, 0, 1]]),
decimal=7)
np.testing.assert_almost_equal(
quad(
'xy',
origin=np.array([0.2, 0.4]),
width=0.2,
height=0.4,
depth=0.6),
np.array([[0.2, 0.4, 0.6], [0.4, 0.4, 0.6], [0.4, 0.8, 0.6],
[0.2, 0.8, 0.6]]),
decimal=7)
np.testing.assert_almost_equal(
quad(
'xy',
origin=np.array([-0.2, -0.4]),
width=-0.2,
height=-0.4,
depth=-0.6),
np.array([[-0.2, -0.4, -0.6], [-0.4, -0.4, -0.6],
[-0.4, -0.8, -0.6], [-0.2, -0.8, -0.6]]),
decimal=7)
self.assertRaises(ValueError, lambda: quad(plane='Undefined'))
class TestGrid(unittest.TestCase):
"""
Defines :func:`colour.plotting.geometry.grid` definition unit tests
methods.
"""
def test_grid(self):
"""
Tests :func:`colour.plotting.geometry.grid` definition.
"""
np.testing.assert_almost_equal(
grid(),
np.array([[[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0]]]),
decimal=7)
np.testing.assert_almost_equal(
grid('xz'),
np.array([[[0, 0, 0], [1, 0, 0], [1, 0, 1], [0, 0, 1]]]),
decimal=7)
np.testing.assert_almost_equal(
grid('yz'),
np.array([[[0, 0, 0], [0, 1, 0], [0, 1, 1], [0, 0, 1]]]),
decimal=7)
np.testing.assert_almost_equal(
grid('xy',
origin=np.array([0.2, 0.4]),
width=0.2,
height=0.4,
depth=0.6,
width_segments=3,
height_segments=3),
np.array(
[[[0.20000000, 0.40000000, 0.60000000],
[0.26666667, 0.40000000, 0.60000000],
[0.26666667, 0.53333333, 0.60000000],
[0.20000000, 0.53333333, 0.60000000]],
[[0.20000000, 0.53333333, 0.60000000],
[0.26666667, 0.53333333, 0.60000000],
[0.26666667, 0.66666667, 0.60000000],
[0.20000000, 0.66666667, 0.60000000]],
[[0.20000000, 0.66666667, 0.60000000],
[0.26666667, 0.66666667, 0.60000000],
[0.26666667, 0.80000000, 0.60000000],
[0.20000000, 0.80000000, 0.60000000]],
[[0.26666667, 0.40000000, 0.60000000],
[0.33333333, 0.40000000, 0.60000000],
[0.33333333, 0.53333333, 0.60000000],
[0.26666667, 0.53333333, 0.60000000]],
[[0.26666667, 0.53333333, 0.60000000],
[0.33333333, 0.53333333, 0.60000000],
[0.33333333, 0.66666667, 0.60000000],
[0.26666667, 0.66666667, 0.60000000]],
[[0.26666667, 0.66666667, 0.60000000],
[0.33333333, 0.66666667, 0.60000000],
[0.33333333, 0.80000000, 0.60000000],
[0.26666667, 0.80000000, 0.60000000]],
[[0.33333333, 0.40000000, 0.60000000],
[0.40000000, 0.40000000, 0.60000000],
[0.40000000, 0.53333333, 0.60000000],
[0.33333333, 0.53333333, 0.60000000]],
[[0.33333333, 0.53333333, 0.60000000],
[0.40000000, 0.53333333, 0.60000000],
[0.40000000, 0.66666667, 0.60000000],
[0.33333333, 0.66666667, 0.60000000]],
[[0.33333333, 0.66666667, 0.60000000],
[0.40000000, 0.66666667, 0.60000000],
[0.40000000, 0.80000000, 0.60000000],
[0.33333333, 0.80000000, 0.60000000]]]
),
decimal=7) # yapf: disable
np.testing.assert_almost_equal(
grid('xy',
origin=np.array([-0.2, -0.4]),
width=-0.2,
height=-0.4,
depth=-0.6,
width_segments=3,
height_segments=3),
np.array(
[[[-0.20000000, -0.40000000, -0.60000000],
[-0.26666667, -0.40000000, -0.60000000],
[-0.26666667, -0.53333333, -0.60000000],
[-0.20000000, -0.53333333, -0.60000000]],
[[-0.20000000, -0.53333333, -0.60000000],
[-0.26666667, -0.53333333, -0.60000000],
[-0.26666667, -0.66666667, -0.60000000],
[-0.20000000, -0.66666667, -0.60000000]],
[[-0.20000000, -0.66666667, -0.60000000],
[-0.26666667, -0.66666667, -0.60000000],
[-0.26666667, -0.80000000, -0.60000000],
[-0.20000000, -0.80000000, -0.60000000]],
[[-0.26666667, -0.40000000, -0.60000000],
[-0.33333333, -0.40000000, -0.60000000],
[-0.33333333, -0.53333333, -0.60000000],
[-0.26666667, -0.53333333, -0.60000000]],
[[-0.26666667, -0.53333333, -0.60000000],
[-0.33333333, -0.53333333, -0.60000000],
[-0.33333333, -0.66666667, -0.60000000],
[-0.26666667, -0.66666667, -0.60000000]],
[[-0.26666667, -0.66666667, -0.60000000],
[-0.33333333, -0.66666667, -0.60000000],
[-0.33333333, -0.80000000, -0.60000000],
[-0.26666667, -0.80000000, -0.60000000]],
[[-0.33333333, -0.40000000, -0.60000000],
[-0.40000000, -0.40000000, -0.60000000],
[-0.40000000, -0.53333333, -0.60000000],
[-0.33333333, -0.53333333, -0.60000000]],
[[-0.33333333, -0.53333333, -0.60000000],
[-0.40000000, -0.53333333, -0.60000000],
[-0.40000000, -0.66666667, -0.60000000],
[-0.33333333, -0.66666667, -0.60000000]],
[[-0.33333333, -0.66666667, -0.60000000],
[-0.40000000, -0.66666667, -0.60000000],
[-0.40000000, -0.80000000, -0.60000000],
[-0.33333333, -0.80000000, -0.60000000]]]
),
decimal=7) # yapf: disable
class TestCube(unittest.TestCase):
"""
Defines :func:`colour.plotting.geometry.cube` definition unit tests
methods.
"""
def test_cube(self):
"""
Tests :func:`colour.plotting.geometry.cube` definition.
"""
np.testing.assert_almost_equal(
cube(),
np.array([
[[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0]],
[[0, 0, 1], [1, 0, 1], [1, 1, 1], [0, 1, 1]],
[[0, 0, 0], [1, 0, 0], [1, 0, 1], [0, 0, 1]],
[[0, 1, 0], [1, 1, 0], [1, 1, 1], [0, 1, 1]],
[[0, 0, 0], [0, 1, 0], [0, 1, 1], [0, 0, 1]],
[[1, 0, 0], [1, 1, 0], [1, 1, 1], [1, 0, 1]],
]),
decimal=7)
np.testing.assert_almost_equal(
cube(('+x', )),
np.array([[[1, 0, 0], [1, 1, 0], [1, 1, 1], [1, 0, 1]]]),
decimal=7)
np.testing.assert_almost_equal(
cube(('-x', )),
np.array([[[0, 0, 0], [0, 1, 0], [0, 1, 1], [0, 0, 1]]]),
decimal=7)
np.testing.assert_almost_equal(
cube(('+y', )),
np.array([[[0, 1, 0], [1, 1, 0], [1, 1, 1], [0, 1, 1]]]),
decimal=7)
np.testing.assert_almost_equal(
cube(('-y', )),
np.array([[[0, 0, 0], [1, 0, 0], [1, 0, 1], [0, 0, 1]]]),
decimal=7)
np.testing.assert_almost_equal(
cube(('+z', )),
np.array([[[0, 0, 1], [1, 0, 1], [1, 1, 1], [0, 1, 1]]]),
decimal=7)
np.testing.assert_almost_equal(
cube(('-z', )),
np.array([[[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0]]]),
decimal=7)
np.testing.assert_almost_equal(
cube(origin=np.array([0.2, 0.4, 0.6]),
width=0.2,
height=0.4,
depth=0.6,
width_segments=3,
height_segments=3,
depth_segments=3),
np.array(
[[[0.20000000, 0.60000000, 0.40000000],
[0.26666667, 0.60000000, 0.40000000],
[0.26666667, 0.80000000, 0.40000000],
[0.20000000, 0.80000000, 0.40000000]],
[[0.20000000, 0.80000000, 0.40000000],
[0.26666667, 0.80000000, 0.40000000],
[0.26666667, 1.00000000, 0.40000000],
[0.20000000, 1.00000000, 0.40000000]],
[[0.20000000, 1.00000000, 0.40000000],
[0.26666667, 1.00000000, 0.40000000],
[0.26666667, 1.20000000, 0.40000000],
[0.20000000, 1.20000000, 0.40000000]],
[[0.26666667, 0.60000000, 0.40000000],
[0.33333333, 0.60000000, 0.40000000],
[0.33333333, 0.80000000, 0.40000000],
[0.26666667, 0.80000000, 0.40000000]],
[[0.26666667, 0.80000000, 0.40000000],
[0.33333333, 0.80000000, 0.40000000],
[0.33333333, 1.00000000, 0.40000000],
[0.26666667, 1.00000000, 0.40000000]],
[[0.26666667, 1.00000000, 0.40000000],
[0.33333333, 1.00000000, 0.40000000],
[0.33333333, 1.20000000, 0.40000000],
[0.26666667, 1.20000000, 0.40000000]],
[[0.33333333, 0.60000000, 0.40000000],
[0.40000000, 0.60000000, 0.40000000],
[0.40000000, 0.80000000, 0.40000000],
[0.33333333, 0.80000000, 0.40000000]],
[[0.33333333, 0.80000000, 0.40000000],
[0.40000000, 0.80000000, 0.40000000],
[0.40000000, 1.00000000, 0.40000000],
[0.33333333, 1.00000000, 0.40000000]],
[[0.33333333, 1.00000000, 0.40000000],
[0.40000000, 1.00000000, 0.40000000],
[0.40000000, 1.20000000, 0.40000000],
[0.33333333, 1.20000000, 0.40000000]],
[[0.20000000, 0.60000000, 0.80000000],
[0.26666667, 0.60000000, 0.80000000],
[0.26666667, 0.80000000, 0.80000000],
[0.20000000, 0.80000000, 0.80000000]],
[[0.20000000, 0.80000000, 0.80000000],
[0.26666667, 0.80000000, 0.80000000],
[0.26666667, 1.00000000, 0.80000000],
[0.20000000, 1.00000000, 0.80000000]],
[[0.20000000, 1.00000000, 0.80000000],
[0.26666667, 1.00000000, 0.80000000],
[0.26666667, 1.20000000, 0.80000000],
[0.20000000, 1.20000000, 0.80000000]],
[[0.26666667, 0.60000000, 0.80000000],
[0.33333333, 0.60000000, 0.80000000],
[0.33333333, 0.80000000, 0.80000000],
[0.26666667, 0.80000000, 0.80000000]],
[[0.26666667, 0.80000000, 0.80000000],
[0.33333333, 0.80000000, 0.80000000],
[0.33333333, 1.00000000, 0.80000000],
[0.26666667, 1.00000000, 0.80000000]],
[[0.26666667, 1.00000000, 0.80000000],
[0.33333333, 1.00000000, 0.80000000],
[0.33333333, 1.20000000, 0.80000000],
[0.26666667, 1.20000000, 0.80000000]],
[[0.33333333, 0.60000000, 0.80000000],
[0.40000000, 0.60000000, 0.80000000],
[0.40000000, 0.80000000, 0.80000000],
[0.33333333, 0.80000000, 0.80000000]],
[[0.33333333, 0.80000000, 0.80000000],
[0.40000000, 0.80000000, 0.80000000],
[0.40000000, 1.00000000, 0.80000000],
[0.33333333, 1.00000000, 0.80000000]],
[[0.33333333, 1.00000000, 0.80000000],
[0.40000000, 1.00000000, 0.80000000],
[0.40000000, 1.20000000, 0.80000000],
[0.33333333, 1.20000000, 0.80000000]],
[[0.20000000, 0.60000000, 0.40000000],
[0.26666667, 0.60000000, 0.40000000],
[0.26666667, 0.60000000, 0.53333333],
[0.20000000, 0.60000000, 0.53333333]],
[[0.20000000, 0.60000000, 0.53333333],
[0.26666667, 0.60000000, 0.53333333],
[0.26666667, 0.60000000, 0.66666667],
[0.20000000, 0.60000000, 0.66666667]],
[[0.20000000, 0.60000000, 0.66666667],
[0.26666667, 0.60000000, 0.66666667],
[0.26666667, 0.60000000, 0.80000000],
[0.20000000, 0.60000000, 0.80000000]],
[[0.26666667, 0.60000000, 0.40000000],
[0.33333333, 0.60000000, 0.40000000],
[0.33333333, 0.60000000, 0.53333333],
[0.26666667, 0.60000000, 0.53333333]],
[[0.26666667, 0.60000000, 0.53333333],
[0.33333333, 0.60000000, 0.53333333],
[0.33333333, 0.60000000, 0.66666667],
[0.26666667, 0.60000000, 0.66666667]],
[[0.26666667, 0.60000000, 0.66666667],
[0.33333333, 0.60000000, 0.66666667],
[0.33333333, 0.60000000, 0.80000000],
[0.26666667, 0.60000000, 0.80000000]],
[[0.33333333, 0.60000000, 0.40000000],
[0.40000000, 0.60000000, 0.40000000],
[0.40000000, 0.60000000, 0.53333333],
[0.33333333, 0.60000000, 0.53333333]],
[[0.33333333, 0.60000000, 0.53333333],
[0.40000000, 0.60000000, 0.53333333],
[0.40000000, 0.60000000, 0.66666667],
[0.33333333, 0.60000000, 0.66666667]],
[[0.33333333, 0.60000000, 0.66666667],
[0.40000000, 0.60000000, 0.66666667],
[0.40000000, 0.60000000, 0.80000000],
[0.33333333, 0.60000000, 0.80000000]],
[[0.20000000, 1.20000000, 0.40000000],
[0.26666667, 1.20000000, 0.40000000],
[0.26666667, 1.20000000, 0.53333333],
[0.20000000, 1.20000000, 0.53333333]],
[[0.20000000, 1.20000000, 0.53333333],
[0.26666667, 1.20000000, 0.53333333],
[0.26666667, 1.20000000, 0.66666667],
[0.20000000, 1.20000000, 0.66666667]],
[[0.20000000, 1.20000000, 0.66666667],
[0.26666667, 1.20000000, 0.66666667],
[0.26666667, 1.20000000, 0.80000000],
[0.20000000, 1.20000000, 0.80000000]],
[[0.26666667, 1.20000000, 0.40000000],
[0.33333333, 1.20000000, 0.40000000],
[0.33333333, 1.20000000, 0.53333333],
[0.26666667, 1.20000000, 0.53333333]],
[[0.26666667, 1.20000000, 0.53333333],
[0.33333333, 1.20000000, 0.53333333],
[0.33333333, 1.20000000, 0.66666667],
[0.26666667, 1.20000000, 0.66666667]],
[[0.26666667, 1.20000000, 0.66666667],
[0.33333333, 1.20000000, 0.66666667],
[0.33333333, 1.20000000, 0.80000000],
[0.26666667, 1.20000000, 0.80000000]],
[[0.33333333, 1.20000000, 0.40000000],
[0.40000000, 1.20000000, 0.40000000],
[0.40000000, 1.20000000, 0.53333333],
[0.33333333, 1.20000000, 0.53333333]],
[[0.33333333, 1.20000000, 0.53333333],
[0.40000000, 1.20000000, 0.53333333],
[0.40000000, 1.20000000, 0.66666667],
[0.33333333, 1.20000000, 0.66666667]],
[[0.33333333, 1.20000000, 0.66666667],
[0.40000000, 1.20000000, 0.66666667],
[0.40000000, 1.20000000, 0.80000000],
[0.33333333, 1.20000000, 0.80000000]],
[[0.20000000, 0.60000000, 0.40000000],
[0.20000000, 0.80000000, 0.40000000],
[0.20000000, 0.80000000, 0.53333333],
[0.20000000, 0.60000000, 0.53333333]],
[[0.20000000, 0.60000000, 0.53333333],
[0.20000000, 0.80000000, 0.53333333],
[0.20000000, 0.80000000, 0.66666667],
[0.20000000, 0.60000000, 0.66666667]],
[[0.20000000, 0.60000000, 0.66666667],
[0.20000000, 0.80000000, 0.66666667],
[0.20000000, 0.80000000, 0.80000000],
[0.20000000, 0.60000000, 0.80000000]],
[[0.20000000, 0.80000000, 0.40000000],
[0.20000000, 1.00000000, 0.40000000],
[0.20000000, 1.00000000, 0.53333333],
[0.20000000, 0.80000000, 0.53333333]],
[[0.20000000, 0.80000000, 0.53333333],
[0.20000000, 1.00000000, 0.53333333],
[0.20000000, 1.00000000, 0.66666667],
[0.20000000, 0.80000000, 0.66666667]],
[[0.20000000, 0.80000000, 0.66666667],
[0.20000000, 1.00000000, 0.66666667],
[0.20000000, 1.00000000, 0.80000000],
[0.20000000, 0.80000000, 0.80000000]],
[[0.20000000, 1.00000000, 0.40000000],
[0.20000000, 1.20000000, 0.40000000],
[0.20000000, 1.20000000, 0.53333333],
[0.20000000, 1.00000000, 0.53333333]],
[[0.20000000, 1.00000000, 0.53333333],
[0.20000000, 1.20000000, 0.53333333],
[0.20000000, 1.20000000, 0.66666667],
[0.20000000, 1.00000000, 0.66666667]],
[[0.20000000, 1.00000000, 0.66666667],
[0.20000000, 1.20000000, 0.66666667],
[0.20000000, 1.20000000, 0.80000000],
[0.20000000, 1.00000000, 0.80000000]],
[[0.40000000, 0.60000000, 0.40000000],
[0.40000000, 0.80000000, 0.40000000],
[0.40000000, 0.80000000, 0.53333333],
[0.40000000, 0.60000000, 0.53333333]],
[[0.40000000, 0.60000000, 0.53333333],
[0.40000000, 0.80000000, 0.53333333],
[0.40000000, 0.80000000, 0.66666667],
[0.40000000, 0.60000000, 0.66666667]],
[[0.40000000, 0.60000000, 0.66666667],
[0.40000000, 0.80000000, 0.66666667],
[0.40000000, 0.80000000, 0.80000000],
[0.40000000, 0.60000000, 0.80000000]],
[[0.40000000, 0.80000000, 0.40000000],
[0.40000000, 1.00000000, 0.40000000],
[0.40000000, 1.00000000, 0.53333333],
[0.40000000, 0.80000000, 0.53333333]],
[[0.40000000, 0.80000000, 0.53333333],
[0.40000000, 1.00000000, 0.53333333],
[0.40000000, 1.00000000, 0.66666667],
[0.40000000, 0.80000000, 0.66666667]],
[[0.40000000, 0.80000000, 0.66666667],
[0.40000000, 1.00000000, 0.66666667],
[0.40000000, 1.00000000, 0.80000000],
[0.40000000, 0.80000000, 0.80000000]],
[[0.40000000, 1.00000000, 0.40000000],
[0.40000000, 1.20000000, 0.40000000],
| |
An EVE character ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[GetCharactersCharacterIdMailLists200Ok]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_character_id_mail_lists" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `get_characters_character_id_mail_lists`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `get_characters_character_id_mail_lists`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/characters/{character_id}/mail/lists/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[GetCharactersCharacterIdMailLists200Ok]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_characters_character_id_mail_mail_id(self, character_id, mail_id, **kwargs): # noqa: E501
"""Return a mail # noqa: E501
Return the contents of an EVE mail --- This route is cached for up to 30 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_mail_mail_id(character_id, mail_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param int mail_id: An EVE mail ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCharactersCharacterIdMailMailIdOk
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_characters_character_id_mail_mail_id_with_http_info(character_id, mail_id, **kwargs) # noqa: E501
else:
(data) = self.get_characters_character_id_mail_mail_id_with_http_info(character_id, mail_id, **kwargs) # noqa: E501
return data
def get_characters_character_id_mail_mail_id_with_http_info(self, character_id, mail_id, **kwargs): # noqa: E501
"""Return a mail # noqa: E501
Return the contents of an EVE mail --- This route is cached for up to 30 seconds # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_characters_character_id_mail_mail_id_with_http_info(character_id, mail_id, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param int mail_id: An EVE mail ID (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetCharactersCharacterIdMailMailIdOk
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'mail_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_characters_character_id_mail_mail_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `get_characters_character_id_mail_mail_id`") # noqa: E501
# verify the required parameter 'mail_id' is set
if ('mail_id' not in params or
params['mail_id'] is None):
raise ValueError("Missing the required parameter `mail_id` when calling `get_characters_character_id_mail_mail_id`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `get_characters_character_id_mail_mail_id`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
if 'mail_id' in params:
path_params['mail_id'] = params['mail_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['evesso'] # noqa: E501
return self.api_client.call_api(
'/v1/characters/{character_id}/mail/{mail_id}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetCharactersCharacterIdMailMailIdOk', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_characters_character_id_mail(self, character_id, mail, **kwargs): # noqa: E501
"""Send a new mail # noqa: E501
Create and send a new mail --- # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_characters_character_id_mail(character_id, mail, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param PostCharactersCharacterIdMailMail mail: The mail to send (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: int
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.post_characters_character_id_mail_with_http_info(character_id, mail, **kwargs) # noqa: E501
else:
(data) = self.post_characters_character_id_mail_with_http_info(character_id, mail, **kwargs) # noqa: E501
return data
def post_characters_character_id_mail_with_http_info(self, character_id, mail, **kwargs): # noqa: E501
"""Send a new mail # noqa: E501
Create and send a new mail --- # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_characters_character_id_mail_with_http_info(character_id, mail, async=True)
>>> result = thread.get()
:param async bool
:param int character_id: An EVE character ID (required)
:param PostCharactersCharacterIdMailMail mail: The mail to send (required)
:param str datasource: The server name you would like data from
:param str token: Access token to use if unable to set a header
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: int
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['character_id', 'mail', 'datasource', 'token', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_characters_character_id_mail" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'character_id' is set
if ('character_id' not in params or
params['character_id'] is None):
raise ValueError("Missing the required parameter `character_id` when calling `post_characters_character_id_mail`") # noqa: E501
# verify the required parameter 'mail' is set
if ('mail' not in params or
params['mail'] is None):
raise ValueError("Missing the required parameter `mail` when calling `post_characters_character_id_mail`") # noqa: E501
if 'character_id' in params and params['character_id'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `character_id` when calling `post_characters_character_id_mail`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'character_id' in params:
path_params['character_id'] = params['character_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'token' in params:
query_params.append(('token', | |
= Constraint(expr= - m.b28 + m.x1074 <= 0)
m.c1035 = Constraint(expr= - m.b28 + m.x1075 <= 0)
m.c1036 = Constraint(expr= - m.b28 + m.x1076 <= 0)
m.c1037 = Constraint(expr= - m.b28 + m.x1077 <= 0)
m.c1038 = Constraint(expr= - m.b28 + m.x1078 <= 0)
m.c1039 = Constraint(expr= - m.b28 + m.x1079 <= 0)
m.c1040 = Constraint(expr= - m.b28 + m.x1080 <= 0)
m.c1041 = Constraint(expr= - m.b28 + m.x1081 <= 0)
m.c1042 = Constraint(expr= - m.b29 + m.x1082 <= 0)
m.c1043 = Constraint(expr= - m.b29 + m.x1083 <= 0)
m.c1044 = Constraint(expr= - m.b29 + m.x1084 <= 0)
m.c1045 = Constraint(expr= - m.b29 + m.x1085 <= 0)
m.c1046 = Constraint(expr= - m.b29 + m.x1086 <= 0)
m.c1047 = Constraint(expr= - m.b29 + m.x1087 <= 0)
m.c1048 = Constraint(expr= - m.b29 + m.x1088 <= 0)
m.c1049 = Constraint(expr= - m.b29 + m.x1089 <= 0)
m.c1050 = Constraint(expr= - m.b29 + m.x1090 <= 0)
m.c1051 = Constraint(expr= - m.b29 + m.x1091 <= 0)
m.c1052 = Constraint(expr= - m.b29 + m.x1092 <= 0)
m.c1053 = Constraint(expr= - m.b29 + m.x1093 <= 0)
m.c1054 = Constraint(expr= - m.b29 + m.x1094 <= 0)
m.c1055 = Constraint(expr= - m.b29 + m.x1095 <= 0)
m.c1056 = Constraint(expr= - m.b29 + m.x1096 <= 0)
m.c1057 = Constraint(expr= - m.b29 + m.x1097 <= 0)
m.c1058 = Constraint(expr= - m.b29 + m.x1098 <= 0)
m.c1059 = Constraint(expr= - m.b29 + m.x1099 <= 0)
m.c1060 = Constraint(expr= - m.b29 + m.x1100 <= 0)
m.c1061 = Constraint(expr= - m.b29 + m.x1101 <= 0)
m.c1062 = Constraint(expr= - m.b30 + m.x1102 <= 0)
m.c1063 = Constraint(expr= - m.b30 + m.x1103 <= 0)
m.c1064 = Constraint(expr= - m.b30 + m.x1104 <= 0)
m.c1065 = Constraint(expr= - m.b30 + m.x1105 <= 0)
m.c1066 = Constraint(expr= - m.b30 + m.x1106 <= 0)
m.c1067 = Constraint(expr= - m.b30 + m.x1107 <= 0)
m.c1068 = Constraint(expr= - m.b30 + m.x1108 <= 0)
m.c1069 = Constraint(expr= - m.b30 + m.x1109 <= 0)
m.c1070 = Constraint(expr= - m.b30 + m.x1110 <= 0)
m.c1071 = Constraint(expr= - m.b30 + m.x1111 <= 0)
m.c1072 = Constraint(expr= - m.b30 + m.x1112 <= 0)
m.c1073 = Constraint(expr= - m.b30 + m.x1113 <= 0)
m.c1074 = Constraint(expr= - m.b30 + m.x1114 <= 0)
m.c1075 = Constraint(expr= - m.b30 + m.x1115 <= 0)
m.c1076 = Constraint(expr= - m.b30 + m.x1116 <= 0)
m.c1077 = Constraint(expr= - m.b30 + m.x1117 <= 0)
m.c1078 = Constraint(expr= - m.b30 + m.x1118 <= 0)
m.c1079 = Constraint(expr= - m.b30 + m.x1119 <= 0)
m.c1080 = Constraint(expr= - m.b30 + m.x1120 <= 0)
m.c1081 = Constraint(expr= - m.b30 + m.x1121 <= 0)
m.c1082 = Constraint(expr= - m.b31 + m.x1122 <= 0)
m.c1083 = Constraint(expr= - m.b31 + m.x1123 <= 0)
m.c1084 = Constraint(expr= - m.b31 + m.x1124 <= 0)
m.c1085 = Constraint(expr= - m.b31 + m.x1125 <= 0)
m.c1086 = Constraint(expr= - m.b31 + m.x1126 <= 0)
m.c1087 = Constraint(expr= - m.b31 + m.x1127 <= 0)
m.c1088 = Constraint(expr= - m.b31 + m.x1128 <= 0)
m.c1089 = Constraint(expr= - m.b31 + m.x1129 <= 0)
m.c1090 = Constraint(expr= - m.b31 + m.x1130 <= 0)
m.c1091 = Constraint(expr= - m.b31 + m.x1131 <= 0)
m.c1092 = Constraint(expr= - m.b31 + m.x1132 <= 0)
m.c1093 = Constraint(expr= - m.b31 + m.x1133 <= 0)
m.c1094 = Constraint(expr= - m.b31 + m.x1134 <= 0)
m.c1095 = Constraint(expr= - m.b31 + m.x1135 <= 0)
m.c1096 = Constraint(expr= - m.b31 + m.x1136 <= 0)
m.c1097 = Constraint(expr= - m.b31 + m.x1137 <= 0)
m.c1098 = Constraint(expr= - m.b31 + m.x1138 <= 0)
m.c1099 = Constraint(expr= - m.b31 + m.x1139 <= 0)
m.c1100 = Constraint(expr= - m.b31 + m.x1140 <= 0)
m.c1101 = Constraint(expr= - m.b31 + m.x1141 <= 0)
m.c1102 = Constraint(expr= - m.b32 + m.x1142 <= 0)
m.c1103 = Constraint(expr= - m.b32 + m.x1143 <= 0)
m.c1104 = Constraint(expr= - m.b32 + m.x1144 <= 0)
m.c1105 = Constraint(expr= - m.b32 + m.x1145 <= 0)
m.c1106 = Constraint(expr= - m.b32 + m.x1146 <= 0)
m.c1107 = Constraint(expr= - m.b32 + m.x1147 <= 0)
m.c1108 = Constraint(expr= - m.b32 + m.x1148 <= 0)
m.c1109 = Constraint(expr= - m.b32 + m.x1149 <= 0)
m.c1110 = Constraint(expr= - m.b32 + m.x1150 <= 0)
m.c1111 = Constraint(expr= - m.b32 + m.x1151 <= 0)
m.c1112 = Constraint(expr= - m.b32 + m.x1152 <= 0)
m.c1113 = Constraint(expr= - m.b32 + m.x1153 <= 0)
m.c1114 = Constraint(expr= - m.b32 + m.x1154 <= 0)
m.c1115 = Constraint(expr= - m.b32 + m.x1155 <= 0)
m.c1116 = Constraint(expr= - m.b32 + m.x1156 <= 0)
m.c1117 = Constraint(expr= - m.b32 + m.x1157 <= 0)
m.c1118 = Constraint(expr= - m.b32 + m.x1158 <= 0)
m.c1119 = Constraint(expr= - m.b32 + m.x1159 <= 0)
m.c1120 = Constraint(expr= - m.b32 + m.x1160 <= 0)
m.c1121 = Constraint(expr= - m.b32 + m.x1161 <= 0)
m.c1122 = Constraint(expr= - m.b33 + m.x1162 <= 0)
m.c1123 = Constraint(expr= - m.b33 + m.x1163 <= 0)
m.c1124 = Constraint(expr= - m.b33 + m.x1164 <= 0)
m.c1125 = Constraint(expr= - m.b33 + m.x1165 <= 0)
m.c1126 = Constraint(expr= - m.b33 + m.x1166 <= 0)
m.c1127 = Constraint(expr= - m.b33 + m.x1167 <= 0)
m.c1128 = Constraint(expr= - m.b33 + m.x1168 <= 0)
m.c1129 = Constraint(expr= - m.b33 + m.x1169 <= 0)
m.c1130 = Constraint(expr= - m.b33 + m.x1170 <= 0)
m.c1131 = Constraint(expr= - m.b33 + m.x1171 <= 0)
m.c1132 = Constraint(expr= - m.b33 + m.x1172 <= 0)
m.c1133 = Constraint(expr= - m.b33 + m.x1173 <= 0)
m.c1134 = Constraint(expr= - m.b33 + m.x1174 <= 0)
m.c1135 = Constraint(expr= - m.b33 + m.x1175 <= 0)
m.c1136 = Constraint(expr= - m.b33 + m.x1176 <= 0)
m.c1137 = Constraint(expr= - m.b33 + m.x1177 <= 0)
m.c1138 = Constraint(expr= - m.b33 + m.x1178 <= 0)
m.c1139 = Constraint(expr= - m.b33 + m.x1179 <= 0)
m.c1140 = Constraint(expr= - m.b33 + m.x1180 <= 0)
m.c1141 = Constraint(expr= - m.b33 + m.x1181 <= 0)
m.c1142 = Constraint(expr= - m.b34 + m.x1182 <= 0)
m.c1143 = Constraint(expr= - m.b34 + m.x1183 <= 0)
m.c1144 = Constraint(expr= - m.b34 + m.x1184 <= 0)
m.c1145 = Constraint(expr= - m.b34 + m.x1185 <= 0)
m.c1146 = Constraint(expr= - m.b34 + m.x1186 <= 0)
m.c1147 = Constraint(expr= - m.b34 + m.x1187 <= 0)
m.c1148 = Constraint(expr= - m.b34 + m.x1188 <= 0)
m.c1149 = Constraint(expr= - m.b34 + m.x1189 <= 0)
m.c1150 = Constraint(expr= - m.b34 + m.x1190 <= 0)
m.c1151 = Constraint(expr= - m.b34 + m.x1191 <= 0)
m.c1152 = Constraint(expr= - m.b34 + m.x1192 <= 0)
m.c1153 = Constraint(expr= - m.b34 + m.x1193 <= 0)
m.c1154 = Constraint(expr= - m.b34 + m.x1194 <= 0)
m.c1155 = Constraint(expr= - m.b34 + m.x1195 <= 0)
m.c1156 = Constraint(expr= - m.b34 + m.x1196 <= 0)
m.c1157 = Constraint(expr= - m.b34 + m.x1197 <= 0)
m.c1158 = Constraint(expr= - m.b34 + m.x1198 <= 0)
m.c1159 = Constraint(expr= - m.b34 + m.x1199 <= 0)
m.c1160 = Constraint(expr= - m.b34 + m.x1200 <= 0)
m.c1161 = Constraint(expr= - m.b34 + m.x1201 <= 0)
m.c1162 = Constraint(expr= - m.b35 + m.x1202 <= 0)
m.c1163 = Constraint(expr= - m.b35 + m.x1203 <= 0)
m.c1164 = Constraint(expr= - m.b35 + m.x1204 <= 0)
m.c1165 = Constraint(expr= - m.b35 + m.x1205 <= 0)
m.c1166 = Constraint(expr= - m.b35 + m.x1206 <= 0)
m.c1167 = Constraint(expr= - m.b35 + m.x1207 <= 0)
m.c1168 = Constraint(expr= - m.b35 + m.x1208 <= 0)
m.c1169 = Constraint(expr= - m.b35 + m.x1209 <= 0)
m.c1170 = Constraint(expr= - m.b35 + m.x1210 <= 0)
m.c1171 = Constraint(expr= - m.b35 + m.x1211 <= 0)
m.c1172 = Constraint(expr= - m.b35 + m.x1212 <= 0)
m.c1173 = Constraint(expr= - m.b35 + m.x1213 <= 0)
m.c1174 = Constraint(expr= - m.b35 + m.x1214 <= 0)
m.c1175 = Constraint(expr= - m.b35 + m.x1215 <= 0)
m.c1176 = Constraint(expr= - m.b35 + m.x1216 <= 0)
m.c1177 = Constraint(expr= - m.b35 + m.x1217 <= 0)
m.c1178 = Constraint(expr= - m.b35 + m.x1218 <= 0)
m.c1179 = Constraint(expr= - m.b35 + m.x1219 <= 0)
m.c1180 = Constraint(expr= - m.b35 + m.x1220 <= 0)
m.c1181 = Constraint(expr= - m.b35 + m.x1221 <= 0)
m.c1182 = Constraint(expr= - m.b36 + m.x1222 <= 0)
m.c1183 = Constraint(expr= - m.b36 + m.x1223 <= 0)
m.c1184 = Constraint(expr= - m.b36 + m.x1224 <= 0)
m.c1185 = Constraint(expr= - m.b36 + m.x1225 <= 0)
m.c1186 = Constraint(expr= - m.b36 + m.x1226 <= 0)
m.c1187 = Constraint(expr= - m.b36 + m.x1227 <= 0)
m.c1188 = Constraint(expr= - m.b36 + m.x1228 <= 0)
m.c1189 = Constraint(expr= - m.b36 + m.x1229 <= 0)
m.c1190 = Constraint(expr= - m.b36 + m.x1230 <= 0)
m.c1191 = Constraint(expr= - m.b36 + m.x1231 <= 0)
m.c1192 = Constraint(expr= - m.b36 + m.x1232 <= 0)
m.c1193 = Constraint(expr= - m.b36 + m.x1233 <= 0)
m.c1194 | |
old_dict : dict
remaining undefined mappings
combined_dict : dict
1.8 nested config dictionary plus remaining undefined mappings
Examples
--------
>>> a, b, c = update_config_dict({'pipelineName': 'example-pipeline', '2': None})
>>> a
{'pipeline_setup': {'pipeline_name': 'example-pipeline'}}
>>> b
{'2': None}
>>> c
{'pipeline_setup': {'pipeline_name': 'example-pipeline'}, '2': None}
''' # noqa
def _append_to_list(current_value, new_value):
'''Helper function to add new_value to the current_value list
or create a list if one does not exist. Skips falsy elements
in new_value
Parameters
----------
current_value : list
new_value : list, bool, None, or str
Returns
-------
list
Examples
--------
>>> _append_to_list([1], [2])
[1, 2]
>>> _append_to_list([1, 2], [2])
[1, 2]
>>> _append_to_list(None, [2])
[2]
>>> _append_to_list([1], [1, 2])
[1, 2]
>>> _append_to_list([1], [None, 2])
[1, 2]
'''
if not isinstance(current_value, list):
if current_value is not None:
current_value = [current_value]
else:
current_value = []
else:
current_value = [v for v in current_value if v is not None]
if isinstance(new_value, list):
for i in new_value:
if i and i not in current_value and i != 'Off':
current_value.append(i)
elif (
new_value and new_value not in current_value and
new_value != 'Off'
):
current_value.append(new_value)
return current_value
def _bool_to_str(old_value, value_if_true):
'''Helper function to convert a True or a list containing a
True to a given string
Parameters
----------
old_value : list, bool, None, or str
value_if_true : str
Returns
-------
str or None
Examples
--------
>>> _bool_to_str([0], 'test_str')
>>> _bool_to_str([1], 'test_str')
'test_str'
>>> _bool_to_str(0, 'test_str')
>>> _bool_to_str(1, 'test_str')
'test_str'
>>> _bool_to_str([True, False], 'test_str')
'test_str'
>>> _bool_to_str(None, 'test_str')
>>> _bool_to_str([0, None, False], 'test_str')
>>> _bool_to_str([0, None, False, 1], 'test_str')
'test_str'
'''
if isinstance(old_value, list):
if any([bool(i) for i in old_value]):
return value_if_true
elif bool(old_value):
return value_if_true
return None
def _get_old_values(old_dict, new_dict, key):
'''Helper function to get old and current values of a special key
being updated.
Parameters
----------
old_dict : dict
new_dict : dict
key : str
Returns
-------
old_dict : dict
new_dict : dict
old_value : any
current_value : any
'''
old_value = old_dict.pop(key)
current_value = lookup_nested_value(
new_dict, NESTED_CONFIG_MAPPING[key]
)
return old_dict, new_dict, old_value, current_value
new_dict = {}
for key in old_dict.copy():
if key in NESTED_CONFIG_MAPPING:
# handle special cases
special_cases = {
'acpc_run_preprocessing',
'acpc_template_brain',
'ANTs_prior_based_segmentation',
'func_reg_input',
'runRegisterFuncToTemplate',
'runRegisterFuncToEPI',
'fsl_linear_reg_only',
'functional_registration',
'template_for_resample',
'fnirtConfig',
'run_smoothing',
'runZScoring',
'run_longitudinal'
}
if key in special_cases:
try:
(
old_dict, new_dict, old_value, current_value
) = _get_old_values(old_dict, new_dict, key)
except KeyError:
continue
# longitudinal_template_generation.run
if key == 'run_longitudinal':
if 'anat' in old_value or 'func' in old_value:
current_value = True
else:
current_value = False
# anatomical_preproc.acpc_alignment.run_before_preproc
if key == 'acpc_run_preprocessing':
current_value = True if old_value.lower(
) == 'before' else False if old_value.lower(
) == 'after' else None
# anatomical_preproc.acpc_alignment.acpc_target
if key == 'acpc_template_brain':
if current_value in {'None', None, ''}:
new_dict = set_nested_value(
new_dict,
['anatomical_preproc', 'acpc_alignment',
'acpc_target'],
'whole-head'
)
# segmentation.tissue_segmentation.using
elif key == 'ANTs_prior_based_segmentation':
new_value = _bool_to_str(old_value, 'ANTs_Prior_Based')
if new_value == 'ANTs_Prior_Based':
new_dict = set_nested_value(
new_dict,
NESTED_CONFIG_MAPPING[key][:-1] +
[new_value, 'run'],
old_value
)
# registration_workflows.functional_registration.
# coregistration.func_input_prep.input
elif key == 'func_reg_input':
new_value = _replace_in_value_list(old_value, (' ', '_'))
current_value = _replace_in_value_list(
current_value, (' ', '_'))
# registration_workflows.functional_registration.
# func_registration_to_template.target_template.using
elif key in {
'runRegisterFuncToTemplate', 'runRegisterFuncToEPI'
}:
current_value = _replace_in_value_list(
current_value, (' ', '_'))
if key == 'runRegisterFuncToTemplate':
current_value = [
v for v in current_value if v not in {
'Off', 'False', False
}
]
new_value = []
new_dict = set_nested_value(
new_dict,
['registration_workflows',
'functional_registration',
'func_registration_to_template', 'run'],
bool(current_value)
)
if key == 'runRegisterFuncToEPI':
new_value = _bool_to_str(old_value, 'EPI_template')
# registration_workflows.anatomical_registration.registration.
# using
elif key == 'fsl_linear_reg_only':
new_value = _bool_to_str(old_value, 'FSL-linear')
# registration_workflows.functional_registration.
# func_registration_to_template.target_template.
# EPI_template.EPI_template_for_resample
elif key == 'template_for_resample':
new_dict = set_nested_value(
new_dict,
['registration_workflows', 'functional_registration',
'func_registration_to_template', 'target_template',
'EPI_template', 'EPI_template_for_resample'],
current_value
)
# registration_workflows.functional_registration.
# EPI_registration.FSL-FNIRT.fnirt_config
elif key == 'fnirtConfig':
current_value = old_value
new_dict = set_nested_value(
new_dict,
['registration_workflows', 'functional_registration',
'EPI_registration', 'FSL-FNIRT', 'fnirt_config'],
current_value
)
# post_processing.spatial_smoothing.output
elif key == 'run_smoothing':
new_value = [_bool_to_str(old_value, 'smoothed')]
if any([not bool(value) for value in old_value]):
new_value.append('nonsmoothed')
current_value = new_value
# post_processing.z-scoring.output
elif key == 'runZScoring':
new_value = [_bool_to_str(old_value, 'z-scored')]
if any([not bool(value) for value in old_value]):
new_value.append('raw')
current_value = new_value
# make sure list values are cast as lists
if key not in { # if key not in non-list-valued keys
'acpc_run_preprocessing', 'acpc_template_brain',
'functional_registration', 'template_for_resample',
'fnirtConfig', 'run_longitudinal'
}:
current_value = _append_to_list(current_value, new_value)
# update remaining keys
else:
current_value = old_dict.pop(key)
if current_value == 'None':
current_value = None
new_dict = set_nested_value(
new_dict, NESTED_CONFIG_MAPPING[key], current_value)
elif key in NESTED_CONFIG_DEPRECATIONS:
old_dict.pop(key)
return new_dict, old_dict, update_nested_dict(new_dict.copy(), old_dict)
def update_nested_dict(d_base, d_update, fully_specified=False):
"""Update dictionary of varying depth.
Parameters
----------
d_base : dict
original dictionary
d_update : dict
dictionary with updates
fully_specified : bool
if True, overwrite instead of update
Returns
-------
d_base : dict
original dictionary with updates
Examples
--------
>>> d_base = {'pipeline_name': 'cpac-default-pipeline',
... 'output_directory': {'path': '/output',
... 'write_func_outputs': False,
... 'write_debugging_outputs': False,
... 'output_tree': 'default',
... 'generate_quality_control_images': True},
... 'working_directory': {'path': '/tmp', 'remove_working_dir': True},
... 'log_directory': {'run_logging': True, 'path': '/logs'},
... 'system_config': {'maximum_memory_per_participant': 1,
... 'max_cores_per_participant': 1,
... 'num_ants_threads': 4,
... 'num_participants_at_once': 1},
... 'Amazon-AWS': {'aws_output_bucket_credentials': None,
... 's3_encryption': False}}
>>> d_update = {'pipeline_name': 'cpac_fmriprep-options',
... 'system_config': {'num_ants_threads': 1},
... 'Amazon-AWS': {'s3_encryption': True}}
>>> str(update_nested_dict(d_base, d_update)) == str({
... 'pipeline_name': 'cpac_fmriprep-options', 'output_directory': {
... 'path': '/output', 'write_func_outputs': False,
... 'write_debugging_outputs': False, 'output_tree': 'default',
... 'generate_quality_control_images': True
... }, 'working_directory': {
... 'path': '/tmp', 'remove_working_dir': True
... }, 'log_directory': {'run_logging': True, 'path': '/logs'},
... 'system_config': {
... 'maximum_memory_per_participant': 1,
... 'max_cores_per_participant': 1,
... 'num_ants_threads': 1, 'num_participants_at_once': 1
... }, 'Amazon-AWS': {
... 'aws_output_bucket_credentials': None, 's3_encryption': True}})
True
>>> tse_base = {'timeseries_extraction': {'run': True, 'tse_roi_paths': {
... '/cpac_templates/CC400.nii.gz': 'Avg',
... '/cpac_templates/aal_mask_pad.nii.gz': 'Avg'
... }, 'realignment': 'ROI_to_func'}}
>>> str(update_nested_dict(tse_base, {})) == str({
... 'timeseries_extraction': {'run': True, 'tse_roi_paths': {
... '/cpac_templates/CC400.nii.gz': 'Avg',
... '/cpac_templates/aal_mask_pad.nii.gz': 'Avg'
... }, 'realignment': 'ROI_to_func'}})
True
>>> str(update_nested_dict(tse_base, {'timeseries_extraction': {
... 'tse_roi_paths': {'/cpac_templates/rois_3mm.nii.gz': 'Voxel'}
... }})) == str({'timeseries_extraction': {'run': True, 'tse_roi_paths': {
... '/cpac_templates/rois_3mm.nii.gz': 'Voxel'
... }, 'realignment': 'ROI_to_func'}})
True
>>> str(update_nested_dict(tse_base, {'timeseries_extraction': {
... 'roi_paths_fully_specified': False,
... 'tse_roi_paths': {'/cpac_templates/rois_3mm.nii.gz': 'Voxel'}
... }})) == str({'timeseries_extraction': {'run': True, 'tse_roi_paths': {
... '/cpac_templates/CC400.nii.gz': 'Avg',
... '/cpac_templates/aal_mask_pad.nii.gz': 'Avg',
... '/cpac_templates/rois_3mm.nii.gz': 'Voxel'
... }, 'realignment': 'ROI_to_func'}})
True
>>> str(update_nested_dict(tse_base, {'timeseries_extraction': {
... 'roi_paths_fully_specified': False,
... 'tse_roi_paths': {'/cpac_templates/aal_mask_pad.nii.gz': 'Voxel'}
... }})) == str({'timeseries_extraction': {'run': True,
... 'tse_roi_paths': {
... '/cpac_templates/CC400.nii.gz': 'Avg',
... '/cpac_templates/aal_mask_pad.nii.gz': 'Voxel'
... }, 'realignment': 'ROI_to_func'}})
True
>>> str(update_nested_dict(tse_base, {'timeseries_extraction': {
... 'tse_roi_paths': {'/cpac_templates/aal_mask_pad.nii.gz': 'Voxel'}
... }})) == str({'timeseries_extraction': {'run': True, 'tse_roi_paths': {
... '/cpac_templates/aal_mask_pad.nii.gz': 'Voxel'
... }, 'realignment': 'ROI_to_func'}})
True
""" # noqa
# short-circuit if d_update has `*_roi_paths` and
# `roi_paths_fully_specified` children
if fully_specified:
return d_update
if any([k.endswith('_roi_paths') for k in d_update.keys()]):
fully_specified = d_update.pop('roi_paths_fully_specified', True)
else:
fully_specified = False
d_new = {} if d_base is None else deepcopy(d_base)
for k, v in d_update.items():
if isinstance(v, collections.abc.Mapping):
d_new[k] = update_nested_dict(
d_new.get(k, {}), v, fully_specified)
else:
d_new[k] = v
return d_new
def update_pipeline_values_1_8(d_old):
'''Function to update pipeline config values that changed from
C-PAC 1.7 to 1.8.
Parameters
----------
d_old : dict
Returns
-------
d : dict
updated
Examples
--------
>>> update_pipeline_values_1_8({'segmentation': {'tissue_segmentation': {
... 'using': ['FSL-FAST Thresholding', 'Customized Thresholding']}}})
{'segmentation': {'tissue_segmentation': {'using': ['FSL-FAST'], 'FSL-FAST': {'thresholding': {'use': 'Custom'}}}}}
>>> update_pipeline_values_1_8({'segmentation': {'tissue_segmentation': {
... 'using': ['FSL-FAST Thresholding']}}})
{'segmentation': {'tissue_segmentation': {'using': ['FSL-FAST'], 'FSL-FAST': {'thresholding': {'use': 'Auto'}}}}}
''' # noqa
from CPAC.pipeline.schema import valid_options
d = replace_in_strings(d_old.copy())
d = _replace_changed_values(
d,
['anatomical_preproc', 'brain_extraction', 'using'],
[('AFNI', '3dSkullStrip'), ('FSL', 'BET'), ('unet', 'UNet')]
)
d = _replace_changed_values(
d,
['functional_preproc', 'func_masking', 'using'],
[('3dAutoMask', 'AFNI'), ('BET', 'FSL')]
)
try:
seg_use_threshold = lookup_nested_value(d, [
'segmentation', 'tissue_segmentation', 'using'])
except KeyError:
seg_use_threshold = []
if not isinstance(seg_use_threshold, list):
seg_use_threshold = [seg_use_threshold]
if 'FSL-FAST Thresholding' in seg_use_threshold:
if 'using' in d['segmentation'].get(
'tissue_segmentation', {}
):
d['segmentation'][
'tissue_segmentation'
]['using'].append('FSL-FAST')
else:
d = set_nested_value(d, [
'segmentation', 'tissue_segmentation',
'using'], ['FSL-FAST'])
seg_use_threshold.remove('FSL-FAST Thresholding')
if 'Customized Thresholding' in seg_use_threshold:
seg_use_threshold.remove('Customized Thresholding')
d = set_nested_value(d, [
'segmentation', 'tissue_segmentation',
'FSL-FAST', 'thresholding', 'use'], 'Custom')
else:
d = set_nested_value(d, [
'segmentation', 'tissue_segmentation',
'FSL-FAST', 'thresholding', 'use'], 'Auto')
for centr in ['degree_centrality', 'eigenvector_centrality',
'local_functional_connectivity_density']:
centr_keys = ['network_centrality', centr, 'weight_options']
try:
centr_value = | |
#
# Copyright (c) 2019 <NAME> <<EMAIL>>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
"""Common Implementation of GCC-Compatible C/C++ Toolchain."""
load(
"@bazel_tools//tools/cpp:cc_toolchain_config_lib.bzl",
"action_config",
"artifact_name_pattern",
"feature",
"flag_group",
"flag_set",
"variable_with_value",
"with_feature_set",
)
load(
":common.bzl",
"ACTION_NAMES",
"ALL_COMPILE_ACTIONS",
"ALL_LINK_ACTIONS",
"CPP_COMPILE_ACTIONS",
"FEATURE_NAMES",
"PREPROCESSOR_ACTIONS",
"make_default_compile_flags_feature",
"make_default_link_flags_feature",
"make_linkstamps_feature",
"make_tool",
"make_user_compile_flags_feature",
"make_user_link_flags_feature",
)
load("//cc:rules.bzl", "cc_toolchain")
#
# GCC-Compatible Features
#
def gcc_archiver_flags_feature(ctx):
return feature(
name = FEATURE_NAMES.archiver_flags,
flag_sets = [flag_set(
actions = [ACTION_NAMES.cpp_link_static_library],
flag_groups = [flag_group(
flags = ["rcsD", "%{output_execpath}"],
), flag_group(
iterate_over = "libraries_to_link",
flag_groups = [flag_group(
expand_if_equal = variable_with_value(
"libraries_to_link.type",
"object_file",
),
flags = ["%{libraries_to_link.name}"],
), flag_group(
expand_if_equal = variable_with_value(
"libraries_to_link.type",
"object_file_group",
),
iterate_over = "libraries_to_link.object_files",
flags = ["%{libraries_to_link.object_files}"],
)],
)],
)],
)
def gcc_compiler_input_flags_feature(ctx):
return feature(
name = FEATURE_NAMES.compiler_input_flags,
flag_sets = [flag_set(
actions = ALL_COMPILE_ACTIONS,
flag_groups = [flag_group(flags = ["-c", "%{source_file}"])],
)],
)
def gcc_compiler_output_flags_feature(ctx):
return feature(
name = FEATURE_NAMES.compiler_output_flags,
flag_sets = [flag_set(
actions = ALL_COMPILE_ACTIONS,
flag_groups = [flag_group(flags = ["-o", "%{output_file}"])],
)],
)
def gcc_dependency_file_feature(ctx):
return feature(
name = FEATURE_NAMES.dependency_file,
enabled = True,
flag_sets = [flag_set(
actions = ALL_COMPILE_ACTIONS,
flag_groups = [flag_group(
expand_if_available = "dependency_file",
flags = ["-MD", "-MF", "%{dependency_file}"],
)],
)],
)
def gcc_fission_support(ctx):
return feature(
name = FEATURE_NAMES.fission_support,
flag_sets = [flag_set(
actions = ALL_LINK_ACTIONS,
flag_groups = [flag_group(
expand_if_available = "is_using_fission",
flags = ["-Wl,--gdb-index"],
)],
)],
)
def gcc_force_pic_flags_feature(ctx):
return feature(
name = FEATURE_NAMES.force_pic_flags,
flag_sets = [flag_set(
actions = [ACTION_NAMES.cpp_link_executable],
flag_groups = [flag_group(
expand_if_available = "force_pic",
flags = ["-pie"],
)],
)],
)
def gcc_fully_static_link(ctx):
return feature(
name = FEATURE_NAMES.fully_static_link,
flag_sets = [flag_set(
actions = ALL_LINK_ACTIONS,
flag_groups = [flag_group(flags = ["-static"])],
)],
)
def gcc_includes_feature(ctx):
return feature(
name = FEATURE_NAMES.includes,
enabled = True,
flag_sets = [flag_set(
actions = PREPROCESSOR_ACTIONS,
flag_groups = [flag_group(
expand_if_available = "includes",
iterate_over = "includes",
flags = ["-include", "%{includes}"],
)],
)],
)
def gcc_include_paths_feature(ctx):
return feature(
name = FEATURE_NAMES.include_paths,
enabled = True,
flag_sets = [flag_set(
actions = PREPROCESSOR_ACTIONS,
flag_groups = [flag_group(
iterate_over = "quote_include_paths",
flags = ["-iquote", "%{quote_include_paths}"],
), flag_group(
iterate_over = "include_paths",
flags = ["-I%{include_paths}"],
), flag_group(
iterate_over = "system_include_paths",
flags = ["-isystem", "%{system_include_paths}"],
)],
)],
)
def gcc_libraries_to_link_feature(ctx):
whole_archive = flag_group(
expand_if_true = "libraries_to_link.is_whole_archive",
flags = ["-Wl,--whole-archive"],
)
no_whole_archive = flag_group(
expand_if_true = "libraries_to_link.is_whole_archive",
flags = ["-Wl,--no-whole-archive"],
)
return feature(
name = FEATURE_NAMES.libraries_to_link,
flag_sets = [flag_set(
actions = ALL_LINK_ACTIONS,
flag_groups = [flag_group(
iterate_over = "libraries_to_link",
flag_groups = [flag_group(
expand_if_equal = variable_with_value(
"libraries_to_link.type",
"dynamic_library",
),
flags = ["-l%{libraries_to_link.name}"],
), flag_group(
expand_if_equal = variable_with_value(
"libraries_to_link.type",
"interface_library",
),
flags = ["%{libraries_to_link.name}"],
), flag_group(
expand_if_equal = variable_with_value(
"libraries_to_link.type",
"object_file",
),
flags = ["%{libraries_to_link.name}"],
), flag_group(
expand_if_equal = variable_with_value(
"libraries_to_link.type",
"object_file_group",
),
flag_groups = [
whole_archive,
flag_group(flags = ["-Wl,--start-lib"]),
flag_group(
iterate_over = "libraries_to_link.object_files",
flags = ["%{libraries_to_link.object_files}"],
),
flag_group(flags = ["-Wl,--end-lib"]),
no_whole_archive,
],
), flag_group(
expand_if_equal = variable_with_value(
"libraries_to_link.type",
"static_library",
),
flag_groups = [
whole_archive,
flag_group(flags = ["%{libraries_to_link.name}"]),
no_whole_archive,
],
), flag_group(
expand_if_equal = variable_with_value(
"libraries_to_link.type",
"versioned_dynamic_library",
),
flags = ["-l:%{libraries_to_link.name}"],
)],
)],
)],
)
def gcc_library_search_directories_feature(ctx):
return feature(
name = FEATURE_NAMES.library_search_directories,
flag_sets = [flag_set(
actions = ALL_LINK_ACTIONS,
flag_groups = [flag_group(
expand_if_available = "library_search_directories",
iterate_over = "library_search_directories",
flags = ["-L%{library_search_directories}"],
)],
)],
)
def gcc_linker_param_file_feature(ctx):
return feature(
name = FEATURE_NAMES.linker_param_file,
flag_sets = [flag_set(
actions = ALL_LINK_ACTIONS + [ACTION_NAMES.cpp_link_static_library],
flag_groups = [flag_group(
expand_if_available = "linker_param_file",
flags = ["@%{linker_param_file}"],
)],
)],
)
def gcc_output_execpath_flags_feature(ctx):
return feature(
name = FEATURE_NAMES.output_execpath_flags,
flag_sets = [flag_set(
actions = ALL_LINK_ACTIONS,
flag_groups = [flag_group(
flags = ["-o", "%{output_execpath}"],
)],
)],
)
def gcc_per_object_debug_info_feature(ctx):
return feature(
name = FEATURE_NAMES.per_object_debug_info,
enabled = True,
flag_sets = [flag_set(
actions = ALL_COMPILE_ACTIONS,
flag_groups = [flag_group(
expand_if_available = "per_object_debug_info_file",
flags = ["-gsplit-dwarf"],
)],
)],
)
def gcc_pic_feature(ctx):
return feature(
name = FEATURE_NAMES.pic,
enabled = True,
flag_sets = [flag_set(
actions = ALL_COMPILE_ACTIONS,
flag_groups = [flag_group(
expand_if_available = "pic",
flags = ["-fPIC"],
)],
)],
)
def gcc_preprocessor_defines_feature(ctx):
return feature(
name = FEATURE_NAMES.preprocessor_defines,
enabled = True,
flag_sets = [flag_set(
actions = PREPROCESSOR_ACTIONS,
flag_groups = [flag_group(
iterate_over = "preprocessor_defines",
flags = ["-D%{preprocessor_defines}"],
)],
)],
)
def gcc_random_seed_feature(ctx):
return feature(
name = FEATURE_NAMES.random_seed,
enabled = True,
flag_sets = [flag_set(
actions = CPP_COMPILE_ACTIONS,
flag_groups = [flag_group(flags = ["-frandom-seed=%{output_file}"])],
)],
)
def gcc_runtime_library_search_directories_feature(ctx):
origin = "-Wl,-rpath,$ORIGIN/"
return feature(
name = FEATURE_NAMES.runtime_library_search_directories,
flag_sets = [flag_set(
actions = ALL_LINK_ACTIONS,
flag_groups = [flag_group(
expand_if_available = "runtime_library_search_directories",
iterate_over = "runtime_library_search_directories",
flags = [origin + "%{runtime_library_search_directories}"],
)],
)],
)
def gcc_shared_flag_feature(ctx):
return feature(
name = FEATURE_NAMES.shared_flag,
flag_sets = [flag_set(
actions = [
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
],
flag_groups = [flag_group(flags = ["-shared"])],
)],
)
def gcc_static_libgcc_feature(ctx):
return feature(
name = FEATURE_NAMES.static_libgcc,
enabled = True,
flag_sets = [flag_set(
actions = ALL_LINK_ACTIONS,
with_features = [with_feature_set(features = [
FEATURE_NAMES.static_link_cpp_runtimes,
])],
flag_groups = [flag_group(flags = ["-static-libgcc"])],
)],
)
def gcc_strip_debug_symbols_feature(ctx):
return feature(
name = FEATURE_NAMES.strip_debug_symbols,
flag_sets = [flag_set(
actions = ALL_LINK_ACTIONS,
flag_groups = [flag_group(
expand_if_available = "strip_debug_symbols",
flags = ["-Wl,-S"],
)],
)],
)
def gcc_strip_flag_set(ctx, **kwargs):
"""Generates list of `flag_set` for the `strip` executable."""
return [flag_set(
flag_groups = [flag_group(
flags = ctx.attr.stripopts,
), flag_group(
flags = ["%{stripopts}"],
iterate_over = "stripopts",
), flag_group(
flags = ["-o", "%{output_file}", "%{input_file}"],
)],
**kwargs
)]
def gcc_sysroot_feature(ctx):
return feature(
name = FEATURE_NAMES.sysroot,
flag_sets = [flag_set(
actions = ALL_COMPILE_ACTIONS + ALL_LINK_ACTIONS,
flag_groups = [flag_group(
expand_if_available = "sysroot",
flags = ["--sysroot=%{sysroot}"],
)],
)],
)
def gcc_unfiltered_compile_flags_feature(ctx):
return feature(
name = FEATURE_NAMES.unfiltered_compile_flags,
enabled = True,
flag_sets = [flag_set(
actions = ALL_COMPILE_ACTIONS,
flag_groups = [flag_group(flags = [
"-no-canonical-prefixes",
"-Wno-builtin-macro-redefined",
"-D__DATE__=\"redacted\"",
"-D__TIME__=\"redacted\"",
"-D__TIMESTAMP__=\"redacted\"",
])],
)],
)
#
# Core Rule Implementation
#
def gcc_cc_toolchain_config_impl(ctx, copts = [], linkopts = []):
# Generate List of Actions
action_configs = []
for action_name in ALL_COMPILE_ACTIONS:
action_configs.append(action_config(
action_name = action_name,
implies = [
FEATURE_NAMES.default_compile_flags,
FEATURE_NAMES.user_compile_flags,
FEATURE_NAMES.sysroot,
FEATURE_NAMES.unfiltered_compile_flags,
FEATURE_NAMES.compiler_input_flags,
FEATURE_NAMES.compiler_output_flags,
],
tools = make_tool(ctx, ctx.file.cctool),
))
for action_name in ALL_LINK_ACTIONS:
if action_name == ACTION_NAMES.cpp_link_executable:
implies = [FEATURE_NAMES.force_pic_flags]
else:
implies = [FEATURE_NAMES.shared_flag]
action_configs.append(action_config(
action_name = action_name,
implies = implies + [
FEATURE_NAMES.default_link_flags,
FEATURE_NAMES.strip_debug_symbols,
FEATURE_NAMES.linkstamps,
FEATURE_NAMES.output_execpath_flags,
FEATURE_NAMES.runtime_library_search_directories,
FEATURE_NAMES.library_search_directories,
FEATURE_NAMES.libraries_to_link,
FEATURE_NAMES.user_link_flags,
FEATURE_NAMES.linker_param_file,
FEATURE_NAMES.fission_support,
FEATURE_NAMES.sysroot,
],
tools = make_tool(ctx, ctx.file.linktool),
))
action_configs.append(action_config(
action_name = ACTION_NAMES.cpp_link_static_library,
implies = [
FEATURE_NAMES.archiver_flags,
FEATURE_NAMES.linker_param_file,
],
tools = make_tool(ctx, ctx.file.artool),
))
action_configs.append(action_config(
action_name = ACTION_NAMES.strip,
flag_sets = gcc_strip_flag_set(ctx),
tools = make_tool(ctx, ctx.file.strip),
))
# Construct List of Artifacts
artifact_name_patterns = [
artifact_name_pattern("alwayslink_static_library", "lib", ".lo"),
artifact_name_pattern("executable", None, None),
artifact_name_pattern("included_file_list", None, ".d"),
artifact_name_pattern("object_file", None, ".o"),
artifact_name_pattern("static_library", "lib", ".a"),
]
# Support List
features = [
feature(name = FEATURE_NAMES.dbg),
feature(name = FEATURE_NAMES.fastbuild),
feature(name = FEATURE_NAMES.opt),
feature(name = FEATURE_NAMES.no_legacy_features),
]
if FEATURE_NAMES.supports_dynamic_linker in ctx.features:
artifact_name_patterns.extend([
artifact_name_pattern("dynamic_library", "lib", ".so"),
artifact_name_pattern("interface_library", "lib", ".ifso"),
])
features.append(feature(
name = FEATURE_NAMES.supports_dynamic_linker,
enabled = True,
))
if FEATURE_NAMES.supports_pic in ctx.features:
artifact_name_patterns.extend([
artifact_name_pattern("pic_file", None, ".pic"),
artifact_name_pattern("pic_object_file", None, ".pic.o"),
])
features.append(feature(
name = FEATURE_NAMES.supports_pic,
enabled = True,
))
if FEATURE_NAMES.supports_start_end_lib in ctx.features:
features.append(feature(
name = FEATURE_NAMES.supports_start_end_lib,
enabled = True,
))
# Action Groups
features += [
# Compiler Flags
make_default_compile_flags_feature(ctx, copts),
gcc_dependency_file_feature(ctx),
gcc_pic_feature(ctx),
gcc_per_object_debug_info_feature(ctx),
gcc_preprocessor_defines_feature(ctx),
gcc_includes_feature(ctx),
gcc_include_paths_feature(ctx),
# Linker Flags
make_default_link_flags_feature(ctx, linkopts),
gcc_shared_flag_feature(ctx),
make_linkstamps_feature(ctx),
gcc_output_execpath_flags_feature(ctx),
gcc_runtime_library_search_directories_feature(ctx),
gcc_library_search_directories_feature(ctx),
gcc_archiver_flags_feature(ctx),
gcc_libraries_to_link_feature(ctx),
gcc_force_pic_flags_feature(ctx),
make_user_link_flags_feature(ctx),
gcc_static_libgcc_feature(ctx),
gcc_fission_support(ctx),
gcc_strip_debug_symbols_feature(ctx),
gcc_fully_static_link(ctx),
# Trailing Flags
make_user_compile_flags_feature(ctx),
gcc_sysroot_feature(ctx),
gcc_unfiltered_compile_flags_feature(ctx),
gcc_linker_param_file_feature(ctx),
gcc_compiler_input_flags_feature(ctx),
gcc_compiler_output_flags_feature(ctx),
]
# Additional Parameters
sysroot = ctx.file.sysroot
if sysroot:
sysroot = sysroot.path
# Construct CcToolchainConfigInfo
config = cc_common.create_cc_toolchain_config_info(
ctx = ctx,
abi_libc_version = "local",
abi_version = "local",
action_configs = action_configs,
artifact_name_patterns = artifact_name_patterns,
builtin_sysroot = sysroot,
cc_target_os = None,
compiler = ctx.attr.compiler,
cxx_builtin_include_directories = ctx.attr.builtin_include_directories,
features = features,
host_system_name = "local",
make_variables = [],
tool_paths = [],
target_cpu = ctx.attr.cpu,
target_libc = "local",
target_system_name = ctx.attr.target,
toolchain_identifier = ctx.attr.name,
)
# Write out CcToolchainConfigInfo to file (diagnostics)
pbtxt = ctx.actions.declare_file(ctx.attr.name + ".pbtxt")
ctx.actions.write(
output = pbtxt,
content = config.proto,
)
return [DefaultInfo(files = depset([pbtxt])), config]
gcc_cc_toolchain_config = rule(
attrs = {
"artool": attr.label(
allow_single_file = True,
cfg = "host",
executable = True,
mandatory = | |
# -*- coding: utf-8 -*-
from datetime import datetime
CSV_BOM = [u"\uFEFF他们 (für)"]
CSV_HEADER_COMMON = ["format_s","arkid","filename"]
# The Main CSV File: columns with below headers are extracted,updated and written to ESRI ISO;
# An extracted value from ESIR ISO is stored at a column with a header of "*_o".
# with one field exception: "resourceType" is gotten from ArcPy, it is not written to ESRI ISO, but with column "*_o" in column
CSV_HEADER_TRANSFORM = [
"title_s",
"alternativeTitle",
"summary",
"description",
"language",
"resourceType",
"subject",
"date_s",
"spatialSubject",
"collectionTitle",
"rights_general",
"rights_legal",
"rights_security",
"modified_date_dt",
"topicISO",
"keyword",
"temporalCoverage"]
# Main CSV File headers: the order of this array define the order the main CSV file
CSV_ORDERED_HEADERS = [
"title_s_o",
"title_s",
"alternativeTitle_o",
"alternativeTitle",
"date_s_o",
"date_s",
"summary_o",
"summary",
"description_o",
"description",
"topicISO_o",
"topicISO",
"subject_o",
"subject",
"keyword_o",
"keyword",
"spatialSubject_o",
"spatialSubject",
"temporalCoverage_o",
"temporalCoverage",
"solrYear",
"dateRange_drsim",
"language_o",
"language",
"resourceClass",
"resourceType_o",
"resourceType",
"collectionTitle_o",
"collectionTitle",
"relation",
"isPartOf",
"isMemberOf",
"source",
"isVersionOf",
"replaces",
"isReplacedBy",
"accessRights_s",
"rights_general_o",
"rights_general",
"rights_legal_o",
"rights_legal",
"rights_security_o",
"rights_security",
"rightsHolder",
"license",
"suppressed_b",
"georeferenced_b",
"modified_date_dt_o",
"modified_date_dt"
]
# Mapping between csv header and geoblacklight elements:
# "dateRange_drsim" using string "[1980 TO 1995]"
CSV_HEADER_GEOBLACKLIGHT_MAP = {
"format_s": "dct_format_s",
"title_s" : "dct_title_s",
"alternativeTitle" : "dct_alternative_sm",
"description" : "dct_description_sm",
"language" : "dct_language_sm",
"resourceType":"gbl_resourceType_sm",
"subject" : "dct_subject_sm",
"topicISO" : "dcat_theme_sm",
"keyword" : "dcat_keyword_sm",
"temporalCoverage" : "dct_temporal_sm",
"date_s" : "dct_issued_s",
"solrYear" : "gbl_indexYear_im",
"dateRange_drsim" : "gbl_dateRange_drsim",
"relation" : "dct_relation_sm",
"spatialSubject" : "dct_spatial_sm",
"collectionTitle" : "pcdm_memberOf_sm",
"isPartOf" : "dct_isPartOf_sm",
"source" : "dct_source_sm",
"isVersionOf" : "dct_isVersionOf_sm",
"replaces" : "dct_replaces_sm",
"isReplacedBy" : "dct_isReplacedBy_sm",
"license" : "dct_license_sm",
"accessRights_s" : "dct_accessRights_s",
"modified_date_dt" : "gbl_mdModified_dt",
"resourceClass" : "gbl_resourceClass_sm",
"suppressed_b" : "gbl_suppressed_b",
"georeferenced_b" : "gbl_georeferenced_b"
}
# Combine three rights to "dct_rights_sm" in Geoblacklight
CSV_HEADER_COLUMNS_RIGHTS = [ "rights_general","rights_legal","rights_security"]
# CSV file for ingestion app
CSV_HEADER_GEOBLACKLIGHT = [
"dct_format_s",
"dct_title_s",
"dct_alternative_sm",
"dct_description_sm",
"dct_language_sm",
"gbl_resourceType_sm",
"dct_subject_sm",
"dcat_theme_sm",
"dcat_keyword_sm",
"dct_temporal_sm",
"dct_issued_s",
"gbl_indexYear_im",
"gbl_dateRange_drsim",
"dct_relation_sm",
"dct_spatial_sm",
"pcdm_memberOf_sm",
"dct_isPartOf_sm",
"dct_source_sm",
"dct_isVersionOf_sm",
"dct_replaces_sm",
"dct_isReplacedBy_sm",
"dct_license_sm",
"dct_accessRights_s",
"gbl_mdModified_dt",
"gbl_resourceClass_sm",
"gbl_suppressed_b",
"gbl_georeferenced_b",
"dct_creator_sm",
"dct_publisher_sm",
# "schema_provider_s",
"locn_geometry",
"dct_rights_sm",
"dct_rightsHolder_sm",
# gbl_wxsIdentifier_s,
# dct_references_s,
"id"
# dct_identifier_sm,
# gbl_mdVersion_s,
]
CSV_HEADER_RESPONSIBLE_PARTY = [ "from",
"individual",
"role",
"contact_name",
"position",
"organization",
"contact_info",
"email",
"address_type",
"address",
"city",
"state",
"zip",
"country",
"phone_no",
"fax_no",
"hours",
"instruction"]
UCB_RESPONSIBLE_PARTY = {
"organization":"UC Berkeley Library",
"email":"<EMAIL>",
"address_Type":"Both",
"address": "50 McCone Hall",
"city":"Berkeley",
"state": "CA",
"zip":"94720-6000",
"country":"UNITED STATES"
}
PROCESS_SUB_DIRECTORY = ["Results","Source","Work"]
RESULT_DIRECTORY_PARENTS = ["precessed_result","final_result"]
RESULT_DIRECTORY = ["GeoFiles for Downloading","GeoFiles for Geoserver(Projected)","Geoblacklight Json Files","The CSV File","ISO19139 Metadata Files","Updated CSV Files"]
PREFIX = "berkeley-"
INSTITUTION = "Berkeley"
GEOBLACKLGITH_VERSION = "Aardvark"
HOSTS = {
"geoserver_host":"geoservices.lib.berkeley.edu",
"ISO139": "\"http://www.isotc211.org/schemas/2005/gmd/\":\"https://spatial.lib.berkeley.edu/metadata/",
"download": "\"http://schema.org/downloadUrl\":\"https://spatial.lib.berkeley.edu/public/",
"wfs":"\"http://www.opengis.net/def/serviceType/ogc/wfs\":\"https://geoservices.lib.berkeley.edu/geoserver/wfs\",",
"wms":"\"http://www.opengis.net/def/serviceType/ogc/wms\":\"https://geoservices.lib.berkeley.edu/geoserver/wms\","
}
HOSTS_SECURE = {
"geoserver_host":"geoservices-secure.lib.berkeley.edu",
"ISO139": "\"http://www.isotc211.org/schemas/2005/gmd/\":\"https://spatial.lib.berkeley.edu/metadata/",
"download": "\"http://schema.org/downloadUrl\":\"https://spatial.lib.berkeley.edu/UCB/",
"wfs":"\"http://www.opengis.net/def/serviceType/ogc/wfs\":\"https://geoservices-secure.lib.berkeley.edu/geoserver/wfs\",",
"wms":"\"http://www.opengis.net/def/serviceType/ogc/wms\":\"https://geoservices-secure.lib.berkeley.edu/geoserver/wms\","
}
############# Messages for ArcCatalog ######
WARNING_ARK_MAPFILE_NUMBER_DIFFERENT = "**** Arks ({0}), work directory geofiles ({1}), source directory geofiles ({2}), Please make sure that the numbers of geofiles and arks are the same. ****"
ARKS_HAVE_BEEN_ASSIGNED = "**** Have Ark ids been already assigned ? ****"
ARKS_HAVE_BEEN_RE_ASSIGNED = "**** Arks have been re-assinged! ****"
SUCCESSFUL_ASSINGED_ARKS = "Successfully assigned {0} ark ids !"
NOT_SINGLE_MAP_FILES = "**** Directory {0} has less or more than one Map files ****"
INCORRECT_PROJECTION = "**** {0} - projection is incorrect ***"
PROJECT_SUCCEEDED = "**** {0} - is projected."
PROJECT_FAILED = "**** Cannot project shapefile '{0}'."
FAILED_TO_ASSIGN_ARKS = "Failed to assign ark ids !"
ARK_NOT_ASSIGNED = "*** No ark assigned yet: {0} ***"
MISSING_FILES = "*** Missing Files: "
ABNORMAL_FILES = "*** Files do not belong to any GeoTIFF/Shapefile: "
PYRIMID_ADDED = "*** Pyrimid added to these GeoTIFFs: "
PYRIMID_NEEDED = "*** Pyrimid needed for these GeoTIFFs: "
FGDC_TO_ISO = "*** {0} - is in FGDC standard, it has been transfered to ESRI ISO. "
NOT_ESRI_ISO = "*** {0} - A metadata file detected, but it is not in ESRI ISO METADATA STANDARd, PLEASE CHECK. "
NO_ESRIISO_XML = "*** No ESRIISO metadata file: {0} "
New_ESRIIO_XML = "*** New ESRIISO metedata file created: {0}"
SAVE_TO_CSV = "*** CSV files exported: {0}."
OLD_ARK_FILES_REMOVED = "*** Old Ark files are removed: "
MISSING_CVS_VALUES = '*** Found invalid metadata in this csv file: {0}'
INCORRECT_ROLE_FOR_INDIVIDUAL = 'Line {2}: {0} - Role "{1}" should not have individual. Only role 6 could have individual'
PASS_PROJECTION_VALIDATION = "*** All projections are valid ***"
PASS_CSV_VALIDATION = "*** The updated CSV files are valid ***"
FILES_NOT_MOVED = "*** Files not moved to work batch - name not good,please check, or move manually."
REQUIRED_FIELD = "Required field '{0}' - missing value."
# Keys are used as CSV headers of responsible party csv file
responsibleparty_elements = {
"contact_name": {
"path": "rpIndName",
"type": "string"},
"position": {
"path": "rpPosName",
"type": "string"},
"organization": {
"path": "rpOrgName",
"type": "string"},
"contact_info": {
"path": "rpCntInfo",
"type": "string"},
"email": {
"path": "rpCntInfo/cntAddress/eMailAdd",
"type": "string"},
"address_type": {
"path": "rpCntInfo/cntAddress",
"type": "attribute",
"key": "addressType",
"values": [("postal", "postal"),
("physical", "physical"),
("both", "both")]},
"address": {
"path": "rpCntInfo/cntAddress/delPoint",
"type": "string"
},
"city": {
"path": "rpCntInfo/cntAddress/city",
"type": "string"},
"state": {
"path": "rpCntInfo/cntAddress/adminArea",
"type": "string"},
"zip": {
"path": "rpCntInfo/cntAddress/postCode",
"type": "string"},
"country": {
"path": "rpCntInfo/cntAddress/country",
"type": "string"},
"phone_no": {
"path": "rpCntInfo/voiceNum",
"type": "string"},
"fax_no": {
"path": "rpCntInfo/faxNum",
"type": "string"},
"hours": {
"path": "rpCntInfo/cntHours",
"type": "string"},
"instruction": {
"path": "rpCntInfo/cntInstr",
"type": "string"}
}
# 1) Keys are used as CSV headers of the main CSV file, header sequence is from CSV_HEADER_TRANSFORM
# 2) Elements with "key_path":True are supposed to have multiple occurrences in ISO19139
transform_elements = {
"title_s": {
"path": "dataIdInfo/idCitation/resTitle",
"type": "string"},
"alternativeTitle": {
"path": "dataIdInfo/idCitation/resAltTitle",
"type": "string"},
"summary": {
"path": "dataIdInfo/idPurp",
"type": "string"},
"description": {
"path": "dataIdInfo/idAbs",
"type": "string",
"html": True},
"language": {
"path": "dataIdInfo/dataLang/languageCode",
"attribute": True,
"type": "string"},
"subject": {
"path": "dataIdInfo/themeKeys/keyword",
"key_path":True,
"type": "string"},
"date_s": {
"path": "dataIdInfo/idCitation/date/pubDate",
"type": "string"},
"spatialSubject": {
"path": "dataIdInfo/placeKeys/keyword",
"key_path": True,
"type": "string"},
"rights_general": {
"path": "dataIdInfo/resConst/Consts/useLimit",
"html": True,
"type": "string"},
"rights_legal": {
"path": "dataIdInfo/resConst/LegConsts/useLimit",
"type": "string"},
"rights_security": {
"path": "dataIdInfo/resConst/SecConsts/useLimit",
"type": "string"},
"modified_date_dt": {
"path": "Esri/ModDate",
"type": "date",
"default": datetime.today().strftime('%Y%m%d')},
"topicISO": {
"path": "dataIdInfo/tpCat/TopicCatCd",
"attribute": True,
"key_path": True,
"type": "string"},
"keyword": {
"path": "dataIdInfo/searchKeys/keyword",
"key_path": True,
"type": "string"
},
"temporalCoverage": {
"path": "dataIdInfo/tempKeys/keyword",
"key_path": True,
"type": "string"},
"collectionTitle": {
"path":'dataIdInfo/idCitation/collTitle',
"type": "string"
}
}
# required elements - header names : "title_s", "solrYear",,"accessRights_s","modified_date_dt","resourceClass"
ResourceClass_Codes = [
"collections",
"datasets",
"imagery",
"maps",
"web services",
"websites",
"other"
]
resourceType = [
"LiDAR",
"Line data",
"Mesh data",
"Multi-spectral data",
"Oblique photographs",
"Point cloud data",
"Point data",
"Polygon data",
"Raster data",
"Satellite imagery",
"Table data",
"Aerial photographs",
"Aerial views",
"Aeronautical charts",
"Armillary spheres",
"Astronautical charts",
"Astronomical models",
"Atlases",
"Bathymetric maps",
"Block diagrams",
"Bottle-charts",
"Cadastral maps",
"Cartographic materials",
"Cartographic materials for people with visual disabilities",
"Celestial charts",
"Celestial globes",
"Census data",
"Children's atlases",
"Children's maps",
"Comparative maps",
"Composite atlases",
"Digital elevation models",
"Digital maps",
"Early maps",
"Ephemerides",
"Ethnographic maps",
"Fire insurance maps",
"Flow maps",
"Gazetteers",
"Geological cross-sections",
"Geological maps",
"Globes",
"Gores (Maps)",
"Gravity anomaly maps",
"Index maps",
"Linguistic atlases",
"Loran charts",
"Manuscript maps",
"Mappae mundi",
"Mental maps",
"Meteorological charts",
"Military maps",
"Mine maps",
"Miniature maps",
"Nautical charts",
"Outline maps",
"Photogrammetric maps",
"Photomaps",
"Physical maps",
"Pictorial maps",
"Plotting charts",
"Portolan charts",
"Quadrangle maps",
"Relief models",
"Remote-sensing maps",
"Road maps",
"Statistical maps",
"Stick charts",
"Strip maps",
"Thematic maps",
"Topographic maps",
"Tourist maps",
"Upside-down maps",
"Wall maps",
"World atlases",
"World maps",
"Worm's-eye views",
"Zoning maps"
]
isoTopic = {
"001":"Farming",
"002":"Biota",
"003":"Boundaries",
"004":"Climatology, Meteorology and Atmosphere",
"005":"Economy",
"006":"Elevation",
"007":"Environment",
"008":"Geoscientific Information",
"009":"Health",
"010":"Imagery and Base Maps",
"011":"Intelligence and Military",
"012":"Inland Waters",
"013":"Location",
"014":"Oceans",
"015":"Planning and Cadastral",
"016":"Society",
"017":"Structure",
"018":"Transportation",
"019":"Utilities and Communication"
}
raster_exts = [".tif",".aux",".tfw",".prj",".tif.ovr"]
vector_exts = [".cpg",
".dbf",
".prj",
".dbf"
".sbn",
".sbx",
".shp",
".shp.xml",
".shx"]
## geoblacklight metadata got from other places:
# dct_creator_sm
# dct_publisher_sm
# schema_provider_s
# locn_geometry
# dct_rights_sm
# dct_rightsHolder_sm
# gbl_wxsIdentifier_s
# dct_references_s
# id
# dct_identifier_sm
# gbl_mdVersion_s
## geoblacklight metadata not included by UCB:
# dcat_centroid_ss
# gbl_fileSize_s
# not used in code, for future reference
# geoblacklight_rolecodes = ["006","010"]
# other_rolecodes = ["001","002","003","004","005","007","008","009","011"]
# ISO 19139 role codes
# ("resource provider", "001"),
# ("custodian", "002"),
# ("owner", "003"),
# ("user", "004"),
# ("distributer", "005"),
# ("originator", "006"),
# ("point of contact", "007"),
# ("principal investigator", "008"),
# ("processor", "009"),
# ("publisher", "010"),
# ("author", "011")]
# transform_elements = {
#
# "title_s": { #1
# "path": "dataIdInfo/idCitation/resTitle",
# "type": "string"},
#
# "alternativeTitle": { #2
# "path": "dataIdInfo/idCitation/resAltTitle",
# "type": "string"},
#
# "description": { #3
# "path": "dataIdInfo/idAbs",
# "type": "string"},
#
# "language": { #4
# "path": "dataIdInfo/dataLang/languageCode",
# "attribute": True,
# "type": "string"},
# # 5 - originator
# # 6 - Publisher
# "subject": { #7
# "path": "dataIdInfo/themeKeys/keyword",
# "type": "string"},
#
# "date_s": { #8
# "path": "dataIdInfo/idCitation/date/pubDate",
# "type": "string"},
#
# "spatialSubject": { # 9
# "path": "dataIdInfo/placeKeys/keyword",
# "type": "string"},
#
# # 10 -collectionTitle
# # "collectionTitle_1": { # complicated
# | |
= len(verts)
Deg_Step = 360.0 /float(6)
for i in range((6/2)+1):
x = sin(radians(i*Deg_Step))* Flat_Radius
y = cos(radians(i*Deg_Step))* Flat_Radius
verts.append([x,y,0-Outter_Radius_Height])
faces.extend(Allen_Fill(FaceStart_Outside,0))
FaceStart_Bottom = len(verts)
Deg_Step = 360.0 /float(6)
for i in range((6/2)+1):
x = sin(radians(i*Deg_Step))* Flat_Radius
y = cos(radians(i*Deg_Step))* Flat_Radius
verts.append([x,y,0-HEIGHT])
faces.extend(Build_Face_List_Quads(FaceStart_Inside,3,1,TRUE))
faces.extend(Fill_Ring_Face(FaceStart_Bottom,4))
M_Verts,M_Faces = Mirror_Verts_Faces(verts,faces,'y')
verts.extend(M_Verts)
faces.extend(M_Faces)
return verts,faces,OUTTER_RADIUS * 2.0
##########################################################################################
##########################################################################################
## Create Phillips Bit
##########################################################################################
##########################################################################################
def Phillips_Fill(OFFSET,FLIP= 0):
faces = []
Lookup = [[0,1,10],
[1,11,10],
[1,2,11],
[2,12,11],
[2,3,12],
[3,4,12],
[4,5,12],
[5,6,12],
[6,7,12],
[7,13,12],
[7,8,13],
[8,14,13],
[8,9,14],
[10,11,16,15],
[11,12,16],
[12,13,16],
[13,14,17,16],
[15,16,17,18]
]
for i in Lookup:
if FLIP:
if len(i) == 3:
faces.append([OFFSET+i[2],OFFSET+i[1],OFFSET+i[0]])
else:
faces.append([OFFSET+i[3],OFFSET+i[2],OFFSET+i[1],OFFSET+i[0]])
else:
if len(i) == 3:
faces.append([OFFSET+i[0],OFFSET+i[1],OFFSET+i[2]])
else:
faces.append([OFFSET+i[0],OFFSET+i[1],OFFSET+i[2],OFFSET+i[3]])
return faces
def Create_Phillips_Bit(FLAT_DIA,FLAT_WIDTH,HEIGHT):
Div = 36
verts = []
faces = []
FLAT_RADIUS = FLAT_DIA * 0.5
OUTTER_RADIUS = FLAT_RADIUS * 1.05
Flat_Half = float(FLAT_WIDTH)/2.0
FaceStart_Outside = len(verts)
Deg_Step = 360.0 /float(Div)
for i in range((Div/4)+1): # only do half and mirror later
x = sin(radians(i*Deg_Step))*OUTTER_RADIUS
y = cos(radians(i*Deg_Step))*OUTTER_RADIUS
verts.append([x,y,0])
FaceStart_Inside = len(verts)
verts.append([0,FLAT_RADIUS,0]) #10
verts.append([Flat_Half,FLAT_RADIUS,0]) #11
verts.append([Flat_Half,Flat_Half,0]) #12
verts.append([FLAT_RADIUS,Flat_Half,0]) #13
verts.append([FLAT_RADIUS,0,0]) #14
verts.append([0,Flat_Half,0-HEIGHT]) #15
verts.append([Flat_Half,Flat_Half,0-HEIGHT]) #16
verts.append([Flat_Half,0,0-HEIGHT]) #17
verts.append([0,0,0-HEIGHT]) #18
faces.extend(Phillips_Fill(FaceStart_Outside,TRUE))
Spin_Verts,Spin_Face = SpinDup(verts,faces,360,4,'z')
return Spin_Verts,Spin_Face,OUTTER_RADIUS * 2
##########################################################################################
##########################################################################################
## Create Head Types
##########################################################################################
##########################################################################################
def Max_Pan_Bit_Dia(HEAD_DIA):
HEAD_RADIUS = HEAD_DIA * 0.5
XRad = HEAD_RADIUS * 1.976
return (sin(radians(10))*XRad) * 2.0
def Create_Pan_Head(HOLE_DIA,HEAD_DIA,SHANK_DIA,HEIGHT,RAD1,RAD2,FACE_OFFSET):
DIV = 36
HOLE_RADIUS = HOLE_DIA * 0.5
HEAD_RADIUS = HEAD_DIA * 0.5
SHANK_RADIUS = SHANK_DIA * 0.5
verts = []
faces = []
Row = 0
BEVEL = HEIGHT * 0.01
#Dome_Rad = HEAD_RADIUS * (1.0/1.75)
Dome_Rad = HEAD_RADIUS * 1.12
RAD_Offset = HEAD_RADIUS * 0.96
OtherRad = HEAD_RADIUS * 0.16
OtherRad_X_Offset = HEAD_RADIUS * 0.84
OtherRad_Z_Offset = HEAD_RADIUS * 0.504
XRad = HEAD_RADIUS * 1.976
ZRad = HEAD_RADIUS * 1.768
EndRad = HEAD_RADIUS * 0.284
EndZOffset = HEAD_RADIUS * 0.432
HEIGHT = HEAD_RADIUS * 0.59
# Dome_Rad = 5.6
# RAD_Offset = 4.9
# OtherRad = 0.8
# OtherRad_X_Offset = 4.2
# OtherRad_Z_Offset = 2.52
# XRad = 9.88
# ZRad = 8.84
# EndRad = 1.42
# EndZOffset = 2.16
# HEIGHT = 2.95
FaceStart = FACE_OFFSET
z = cos(radians(10))*ZRad
verts.append([HOLE_RADIUS,0.0,(0.0-ZRad)+z])
Start_Height = 0 - ((0.0-ZRad)+z)
Row += 1
#for i in range(0,30,10): was 0 to 30 more work needed to make this look good.
for i in range(10,30,10):
x = sin(radians(i))*XRad
z = cos(radians(i))*ZRad
verts.append([x,0.0,(0.0-ZRad)+z])
Row += 1
for i in range(20,140,10):
x = sin(radians(i))*EndRad
z = cos(radians(i))*EndRad
if ((0.0 - EndZOffset)+z) < (0.0-HEIGHT):
verts.append([(HEAD_RADIUS -EndRad)+x,0.0,0.0 - HEIGHT])
else:
verts.append([(HEAD_RADIUS -EndRad)+x,0.0,(0.0 - EndZOffset)+z])
Row += 1
verts.append([SHANK_RADIUS,0.0,(0.0-HEIGHT)])
Row += 1
verts.append([SHANK_RADIUS,0.0,(0.0-HEIGHT)-Start_Height])
Row += 1
sVerts,sFaces = SpinDup(verts,faces,360,DIV,'z')
sVerts.extend(verts) #add the start verts to the Spin verts to complete the loop
faces.extend(Build_Face_List_Quads(FaceStart,Row-1,DIV))
Global_Head_Height = HEIGHT ;
return Move_Verts_Up_Z(sVerts,Start_Height),faces,HEIGHT
def Create_Dome_Head(HOLE_DIA,HEAD_DIA,SHANK_DIA,HEIGHT,RAD1,RAD2,FACE_OFFSET):
DIV = 36
HOLE_RADIUS = HOLE_DIA * 0.5
HEAD_RADIUS = HEAD_DIA * 0.5
SHANK_RADIUS = SHANK_DIA * 0.5
verts = []
faces = []
Row = 0
BEVEL = HEIGHT * 0.01
#Dome_Rad = HEAD_RADIUS * (1.0/1.75)
Dome_Rad = HEAD_RADIUS * 1.12
#Head_Height = HEAD_RADIUS * 0.78
RAD_Offset = HEAD_RADIUS * 0.98
Dome_Height = HEAD_RADIUS * 0.64
OtherRad = HEAD_RADIUS * 0.16
OtherRad_X_Offset = HEAD_RADIUS * 0.84
OtherRad_Z_Offset = HEAD_RADIUS * 0.504
# Dome_Rad = 5.6
# RAD_Offset = 4.9
# Dome_Height = 3.2
# OtherRad = 0.8
# OtherRad_X_Offset = 4.2
# OtherRad_Z_Offset = 2.52
#
FaceStart = FACE_OFFSET
verts.append([HOLE_RADIUS,0.0,0.0])
Row += 1
for i in range(0,60,10):
x = sin(radians(i))*Dome_Rad
z = cos(radians(i))*Dome_Rad
if ((0.0-RAD_Offset)+z) <= 0:
verts.append([x,0.0,(0.0-RAD_Offset)+z])
Row += 1
for i in range(60,160,10):
x = sin(radians(i))*OtherRad
z = cos(radians(i))*OtherRad
z = (0.0-OtherRad_Z_Offset)+z
if z < (0.0-Dome_Height):
z = (0.0-Dome_Height)
verts.append([OtherRad_X_Offset+x,0.0,z])
Row += 1
verts.append([SHANK_RADIUS,0.0,(0.0-Dome_Height)])
Row += 1
sVerts,sFaces = SpinDup(verts,faces,360,DIV,'z')
sVerts.extend(verts) #add the start verts to the Spin verts to complete the loop
faces.extend(Build_Face_List_Quads(FaceStart,Row-1,DIV))
return sVerts,faces,Dome_Height
def Create_Cap_Head(HOLE_DIA,HEAD_DIA,SHANK_DIA,HEIGHT,RAD1,RAD2):
DIV = 36
HOLE_RADIUS = HOLE_DIA * 0.5
HEAD_RADIUS = HEAD_DIA * 0.5
SHANK_RADIUS = SHANK_DIA * 0.5
verts = []
faces = []
Row = 0
BEVEL = HEIGHT * 0.01
FaceStart = len(verts)
verts.append([HOLE_RADIUS,0.0,0.0])
Row += 1
#rad
for i in range(0,100,10):
x = sin(radians(i))*RAD1
z = cos(radians(i))*RAD1
verts.append([(HEAD_RADIUS-RAD1)+x,0.0,(0.0-RAD1)+z])
Row += 1
verts.append([HEAD_RADIUS,0.0,0.0-HEIGHT+BEVEL])
Row += 1
verts.append([HEAD_RADIUS-BEVEL,0.0,0.0-HEIGHT])
Row += 1
#rad2
for i in range(0,100,10):
x = sin(radians(i))*RAD2
z = cos(radians(i))*RAD2
verts.append([(SHANK_RADIUS+RAD2)-x,0.0,(0.0-HEIGHT-RAD2)+z])
Row += 1
sVerts,sFaces = SpinDup(verts,faces,360,DIV,'z')
sVerts.extend(verts) #add the start verts to the Spin verts to complete the loop
faces.extend(Build_Face_List_Quads(FaceStart,Row-1,DIV))
return sVerts,faces,HEIGHT+RAD2
def Create_Hex_Head(FLAT,HOLE_DIA,SHANK_DIA,HEIGHT):
verts = []
faces = []
HOLE_RADIUS = HOLE_DIA * 0.5
Half_Flat = FLAT/2
TopBevelRadius = Half_Flat - (Half_Flat* (0.05/8))
Undercut_Height = (Half_Flat* (0.05/8))
Shank_Bevel = (Half_Flat* (0.05/8))
Flat_Height = HEIGHT - Undercut_Height - Shank_Bevel
#Undercut_Height = 5
SHANK_RADIUS = SHANK_DIA/2
Row = 0;
verts.append([0.0,0.0,0.0])
FaceStart = len(verts)
#inner hole
x = sin(radians(0))*HOLE_RADIUS
y = cos(radians(0))*HOLE_RADIUS
verts.append([x,y,0.0])
x = sin(radians(60/6))*HOLE_RADIUS
y = cos(radians(60/6))*HOLE_RADIUS
verts.append([x,y,0.0])
x = sin(radians(60/3))*HOLE_RADIUS
y = cos(radians(60/3))*HOLE_RADIUS
verts.append([x,y,0.0])
x = sin(radians(60/2))*HOLE_RADIUS
y = cos(radians(60/2))*HOLE_RADIUS
verts.append([x,y,0.0])
Row += 1
#bevel
x = sin(radians(0))*TopBevelRadius
y = cos(radians(0))*TopBevelRadius
vec1 = Mathutils.Vector([x,y,0.0])
verts.append([x,y,0.0])
x = sin(radians(60/6))*TopBevelRadius
y = cos(radians(60/6))*TopBevelRadius
vec2 = Mathutils.Vector([x,y,0.0])
verts.append([x,y,0.0])
x = sin(radians(60/3))*TopBevelRadius
y = cos(radians(60/3))*TopBevelRadius
vec3 = Mathutils.Vector([x,y,0.0])
verts.append([x,y,0.0])
x = sin(radians(60/2))*TopBevelRadius
y = cos(radians(60/2))*TopBevelRadius
vec4 = Mathutils.Vector([x,y,0.0])
verts.append([x,y,0.0])
Row += 1
#Flats
x = tan(radians(0))*Half_Flat
dvec = vec1 - Mathutils.Vector([x,Half_Flat,0.0])
verts.append([x,Half_Flat,-dvec.length])
x = tan(radians(60/6))*Half_Flat
dvec = vec2 - Mathutils.Vector([x,Half_Flat,0.0])
verts.append([x,Half_Flat,-dvec.length])
x = tan(radians(60/3))*Half_Flat
dvec = vec3 - Mathutils.Vector([x,Half_Flat,0.0])
Lowest_Point = -dvec.length
verts.append([x,Half_Flat,-dvec.length])
x = tan(radians(60/2))*Half_Flat
dvec = vec4 - Mathutils.Vector([x,Half_Flat,0.0])
Lowest_Point = -dvec.length
verts.append([x,Half_Flat,-dvec.length])
Row += 1
#down Bits Tri
x = tan(radians(0))*Half_Flat
verts.append([x,Half_Flat,Lowest_Point])
x = tan(radians(60/6))*Half_Flat
verts.append([x,Half_Flat,Lowest_Point])
x = tan(radians(60/3))*Half_Flat
verts.append([x,Half_Flat,Lowest_Point])
x = tan(radians(60/2))*Half_Flat
verts.append([x,Half_Flat,Lowest_Point])
Row += 1
#down Bits
x = tan(radians(0))*Half_Flat
verts.append([x,Half_Flat,-Flat_Height])
x = tan(radians(60/6))*Half_Flat
verts.append([x,Half_Flat,-Flat_Height])
x = tan(radians(60/3))*Half_Flat
verts.append([x,Half_Flat,-Flat_Height])
x = tan(radians(60/2))*Half_Flat
verts.append([x,Half_Flat,-Flat_Height])
Row += 1
#under cut
x = sin(radians(0))*Half_Flat
y = cos(radians(0))*Half_Flat
vec1 = Mathutils.Vector([x,y,0.0])
verts.append([x,y,-Flat_Height])
x = sin(radians(60/6))*Half_Flat
y = cos(radians(60/6))*Half_Flat
vec2 = Mathutils.Vector([x,y,0.0])
verts.append([x,y,-Flat_Height])
x = sin(radians(60/3))*Half_Flat
y = cos(radians(60/3))*Half_Flat
vec3 = Mathutils.Vector([x,y,0.0])
verts.append([x,y,-Flat_Height])
x = sin(radians(60/2))*Half_Flat
y = cos(radians(60/2))*Half_Flat
vec3 = Mathutils.Vector([x,y,0.0])
verts.append([x,y,-Flat_Height])
Row += 1
#under cut down bit
x = sin(radians(0))*Half_Flat
y = cos(radians(0))*Half_Flat
vec1 = Mathutils.Vector([x,y,0.0])
verts.append([x,y,-Flat_Height-Undercut_Height])
x = sin(radians(60/6))*Half_Flat
y = cos(radians(60/6))*Half_Flat
vec2 = Mathutils.Vector([x,y,0.0])
verts.append([x,y,-Flat_Height-Undercut_Height])
x = sin(radians(60/3))*Half_Flat
y = cos(radians(60/3))*Half_Flat
vec3 = Mathutils.Vector([x,y,0.0])
verts.append([x,y,-Flat_Height-Undercut_Height])
x = sin(radians(60/2))*Half_Flat
y = cos(radians(60/2))*Half_Flat
vec3 = Mathutils.Vector([x,y,0.0])
verts.append([x,y,-Flat_Height-Undercut_Height])
Row += 1
#under cut to Shank BEVEAL
x = sin(radians(0))*(SHANK_RADIUS+Shank_Bevel)
y = cos(radians(0))*(SHANK_RADIUS+Shank_Bevel)
vec1 = Mathutils.Vector([x,y,0.0])
verts.append([x,y,-Flat_Height-Undercut_Height])
x = sin(radians(60/6))*(SHANK_RADIUS+Shank_Bevel)
y = cos(radians(60/6))*(SHANK_RADIUS+Shank_Bevel)
vec2 = Mathutils.Vector([x,y,0.0])
verts.append([x,y,-Flat_Height-Undercut_Height])
x = sin(radians(60/3))*(SHANK_RADIUS+Shank_Bevel)
y = cos(radians(60/3))*(SHANK_RADIUS+Shank_Bevel)
vec3 = Mathutils.Vector([x,y,0.0])
verts.append([x,y,-Flat_Height-Undercut_Height])
x = sin(radians(60/2))*(SHANK_RADIUS+Shank_Bevel)
y = cos(radians(60/2))*(SHANK_RADIUS+Shank_Bevel)
vec3 = Mathutils.Vector([x,y,0.0])
verts.append([x,y,-Flat_Height-Undercut_Height])
Row += 1
#under cut to Shank BEVEAL
x = sin(radians(0))*SHANK_RADIUS
y = cos(radians(0))*SHANK_RADIUS
vec1 = Mathutils.Vector([x,y,0.0])
verts.append([x,y,-Flat_Height-Undercut_Height-Shank_Bevel])
x = sin(radians(60/6))*SHANK_RADIUS
y = cos(radians(60/6))*SHANK_RADIUS
vec2 = Mathutils.Vector([x,y,0.0])
verts.append([x,y,-Flat_Height-Undercut_Height-Shank_Bevel])
x = sin(radians(60/3))*SHANK_RADIUS
y = cos(radians(60/3))*SHANK_RADIUS
vec3 = Mathutils.Vector([x,y,0.0])
verts.append([x,y,-Flat_Height-Undercut_Height-Shank_Bevel])
x = sin(radians(60/2))*SHANK_RADIUS
y = cos(radians(60/2))*SHANK_RADIUS
vec3 = Mathutils.Vector([x,y,0.0])
verts.append([x,y,-Flat_Height-Undercut_Height-Shank_Bevel])
Row += 1
#Global_Head_Height = 0 - (-HEIGHT-0.1)
faces.extend(Build_Face_List_Quads(FaceStart,3,Row - 1))
Mirror_Verts,Mirror_Faces = Mirror_Verts_Faces(verts,faces,'y')
verts.extend(Mirror_Verts)
faces.extend(Mirror_Faces)
Spin_Verts,Spin_Faces = SpinDup(verts,faces,360,6,'z')
return Spin_Verts,Spin_Faces,0 - (-HEIGHT)
##########################################################################################
##########################################################################################
## Create Bolt
##########################################################################################
##########################################################################################
def MakeBolt():
global Phillips_Bit_Depth
global Philips_Bit_Dia
global Allen_Bit_Depth
global Allen_Bit_Flat_Distance
global Hex_Head_Height
global Hex_Head_Flat_Distance
global Cap_Head_Dia
global Cap_Head_Height
global Dome_Head_Dia
global Pan_Head_Dia
global Shank_Dia
global Shank_Length
global Thread_Length
global Major_Dia
global Minor_Dia
global Pitch
global Crest_Percent
global Root_Percent
verts | |
default=False)
stack_field = models.ForeignKey(Stack, verbose_name='Pilha', blank=True, null=True)
# warranty = models.DateTimeField('Garantia', blank=True, null=True)
snmp = models.BooleanField(u'Habilitado para SNMP?', default=False, editable=False)
snmpcom = models.CharField(u'SNMP Community - RO', max_length=25, blank=True, null=True, editable=False)
objects = SwitchManager()
changed_by = models.ForeignKey(User, related_name="switch_changed_by", null=True, blank=True)
history = HistoricalRecords()
@property
def _history_user(self):
return self.changed_by
@_history_user.setter
def _history_user(self, value):
self.changed_by = value
def ports_total(self):
return self._ports_total or 0
# validacao
def clean(self):
g = self.manageable
# o = self.ownerid
u = self.url
user = self.admuser
pwd = self.admpass
# if o == None:
# raise ValidationError("Campo Patrimônio é obrigatório")
if self.active:
if not self.rack:
raise ValidationError("Um switch ativado deve estar instalado em um rack")
if self.stacked:
if not self.stack_field:
raise ValidationError("Necessário selecionar a pilha a que o switch pertence")
if self.url:
raise ValidationError("Switch empilhado não deve conter endereço de interface de gerência")
if not self.active:
raise ValidationError("Um switch empilhado deve estar ativado")
if not self.stacked:
if self.stack_field:
raise ValidationError("Campo pilha deve ser preenchico apenas se o switch estiver empilhado")
if g == True:
if u == u'':
raise ValidationError("Switch gerenciável - Informe o endereço da interface de gerência do switch")
if user == u'':
raise ValidationError("Switch gerenciável - Informe nome do usuário")
if pwd == u'':
raise ValidationError("Switch gerenciável - Informe senha")
def __unicode__(self):
# return self.name
if self.rack:
return u'%s (%s)' % (self.name, self.rack.name)
else:
return u'%s' % (self.name)
class Patchpanel(models.Model):
class Meta:
verbose_name = 'Patch Panel'
verbose_name_plural = 'Patch Panels'
ordering = ['num']
num = models.CharField('Num/Id', max_length=50)
rack = models.ForeignKey(Rack)
ports = models.IntegerField(verbose_name=u'nº de portas')
def clean(self):
try:
pp = Patchpanel.objects.all().filter(num=self.num, rack=self.rack).exclude(pk=self.id)
if pp:
raise ValidationError(u"Este patchpanel já foi cadastrado neste rack")
except:
raise ValidationError(u"Verifique o preenchimento dos campos abaixo")
def __unicode__(self):
return u'%s (%s)' % (self.num, self.rack)
# return self.num
class Patchpanelport(models.Model):
class Meta:
verbose_name = 'Patch panel - porta'
verbose_name_plural = 'Patch panel - portas'
ordering = ['num']
num = models.CharField('Num/Id da Porta', max_length=60)
patchpanel = models.ForeignKey(Patchpanel)
comments = models.TextField(u'Observações', max_length=2000, blank=True)
"""
verificar portas_cadastradas - pegando todas as portas....
Portasw.objects.get(id=1).switch.portas .
Portasw.objects.all().filter(switch="1").count()
"""
def clean(self):
pp = Patchpanelport.objects.all().filter(num=self.num, patchpanel=self.patchpanel).exclude(pk=self.id)
if pp:
raise ValidationError("Esta porta já foi cadastrada neste patchpanel")
portas_cadastradas = Patchpanelport.objects.all().filter(patchpanel=self.patchpanel).count()
portas_disponiveis = self.patchpanel.ports
if portas_disponiveis <= portas_cadastradas:
raise ValidationError("O switch selecionado ja possui todas as portas ocupadas")
def __unicode__(self):
return '%s (%s)' % (self.num, self.patchpanel)
class Phonecategory(models.Model):
class Meta:
verbose_name = 'Telefone/ramal - Categoria/Classe'
verbose_name_plural = 'Telefone/ramal - Categorias'
ordering = ['name']
name = models.CharField(u'Descrição', max_length=100, unique=True)
def __unicode__(self):
return unicode(self.name)
class Phonetype(models.Model):
class Meta:
verbose_name = 'Telefone - Tipo/tecnologia de telefone'
verbose_name_plural = 'Telefone - Tipo/tecnologia de telefone'
ordering = ['name']
name = models.CharField(max_length=100, unique=True, help_text="Digital, analógico, IP")
comments = models.TextField(u'Observações', max_length=2000, blank=True)
def __unicode__(self):
return unicode(self.name)
class Phone(models.Model):
class Meta:
verbose_name = 'Telefone/Senha'
verbose_name_plural = 'Telefones/Senhas'
ordering = ['num']
permissions = (
("view_password", "Can see password"),
("change_password", "Can change password"),
)
num = models.CharField('Número', max_length=14, unique=True, help_text='Número do ramal ou código da senha')
user = models.ForeignKey(Person, blank=True, null=True, verbose_name=u'Usuário')
place = models.ForeignKey(Place, verbose_name=u'Local', blank=True, null=True)
active = models.BooleanField(default=False, verbose_name='Ativo', help_text='Indica se número está em uso ou não')
password = models.BooleanField(default=False, verbose_name='É senha',
help_text='Indica se número se refere a uma senha')
#newpassword = models.BooleanField(default=True, verbose_name='Senha nova',
# help_text='Indica se a senha é nova ou não')
phonecategory = models.ForeignKey(Phonecategory, verbose_name=u'Categoria', blank=True,
null=True, help_text='Indica o tipo de chamada telefônica permitida')
telephonetype = models.ForeignKey(Phonetype, verbose_name=u'Tipo/tecnologia', blank=True, null=True,
help_text="Digital, analógico, IP")
phonehw = models.CharField('Aparelho telefônico', max_length=50, help_text='Identificação do aparelho telefônico', blank=True, null=True)
comments = models.TextField(u'Observações', max_length=2000, blank=True)
date_creation = models.DateField(u'Data de cadastro', editable=False)
date_modification = models.DateTimeField(u'Data de modificação', editable=False)
dist = models.IntegerField(blank=True, null=True, help_text="Posição no quadro de distribuição")
bloco = models.IntegerField(blank=True, null=True, help_text="Identificação no bloco")
par = models.IntegerField(blank=True, null=True, help_text="posição no bloco")
dg = models.IntegerField(blank=True, null=True, help_text="identificação ou posição no DG")
changed_by = models.ForeignKey(User, related_name="phone_changed_by", null=True, blank=True)
history = HistoricalRecords()
@property
def _history_user(self):
return self.changed_by
@_history_user.setter
def _history_user(self, value):
self.changed_by = value
def clean(self):
p = self.password
pl = self.place
a = self.active
u = self.user
#n = self.newpassword
if p == True:
if pl != None:
raise ValidationError("Senhas não são associadas a locais")
#if a == True:
# if u == None:
# raise ValidationError("Campo Usuário é obrigatório para senhas ativas")
if u and not self.phonecategory:
raise ValidationError("Informe a categoria ou tipo de ligação que esta senha poderá realizar")
#if n == True:
# raise ValidationError("Senhas novas ou disponíveis devem estar desativadas")
#if a == False:
# if n == False:
# if u == None:
# raise ValidationError("Nome do antigo usuário é obrigatório para senhas desativadas")
#if n == True:
# if u != None:
# raise ValidationError("Senhas novas não podem estar associadas a usuários")
if a == True:
# if u == None:
# raise ValidationError("Campo Usuário é obrigatório")
if p == False:
if pl == None:
raise ValidationError("Campo local é obrigatório")
def save(self):
# type: () -> object
if not self.id:
self.date_creation = datetime.date.today()
self.date_modification = datetime.datetime.today()
super(Phone, self).save()
def __unicode__(self):
return unicode(self.num)
class Phoneownership(models.Model):
class Meta:
verbose_name = 'Posse de telefone'
verbose_name_plural = 'Posses de telefone'
active = models.BooleanField(editable=False, default=True, verbose_name="Ativo(a)")
phone = models.ForeignKey(Phone, blank=True, null=True, verbose_name='Telefone')
user = models.ForeignKey(Person, blank=True, null=True, verbose_name=u'Usuário')
date_activation = models.DateTimeField(u'Data de ativação', blank=True, null=True, editable=False,
auto_now_add=True)
date_deactivation = models.DateTimeField(u'Data de desativação', blank=True, null=True, editable=False)
def save(self):
# type: () -> object
if not self.id:
# self.date_activation = datetime.date.today()
self.active = True
super(Phoneownership, self).save()
def __unicode__(self):
return unicode(self.user.get_full_name())
class Switchport(models.Model):
class Meta:
verbose_name = 'Switch - Porta'
verbose_name_plural = 'Switch - Portas'
ordering = ['num']
UTP = 'UTP'
FIBRA = 'Fibra'
TIPO_PORTA_CHOICES = (
(UTP, 'UTP'),
(FIBRA, 'Fibra'),
)
num = models.IntegerField('Num da Porta', )
# vlan = models.IntegerField(blank=True,null=True)
vlans = models.ManyToManyField(Vlan, verbose_name='VLANs', blank=True)
# vln = models.ForeignKey(Vlan,verbose_name='VLAN',blank=True,null=True,related_name="vln")
switch = models.ForeignKey(Switch)
tipo = models.CharField(max_length=20, choices=TIPO_PORTA_CHOICES)
#host = models.ForeignKey(Host, blank=True, null=True)
host = models.ForeignKey(Device, blank=True, null=True, related_name='swport_host')
obs = models.CharField(u'Observações', max_length=100, blank=True)
"""
verificar portas_cadastradas - pegando todas as portas....
Portasw.objects.get(id=1).switch.portas .
Portasw.objects.all().filter(switch="1").count()
"""
def clean(self):
portas_cadastradas = Switchport.objects.filter(switch=self.switch).count()
portas_disponiveis = self.switch.ports
if portas_disponiveis <= portas_cadastradas:
raise ValidationError("O switch selecionado ja possui todas as portas ocupadas")
porta = Switchport.objects.all().filter(num=self.num, switch=self.switch, tipo=self.tipo).exclude(pk=self.id)
if porta:
raise ValidationError("Já existe uma porta com este número neste switch")
# if self.host != None:
# p = Switchport.objects.filter(host=self.host).exclude(pk = self.id)
# if p:
# raise ValidationError("Já existe uma porta associada a este host")
def __unicode__(self):
# return u'%s (%s)' % (self.num, self.switch.name)
return u'%s' % (self.num)
class Netpoint(models.Model):
class Meta:
verbose_name = 'Ponto de rede'
verbose_name_plural = 'Pontos de rede'
ordering = ['num']
DESATIVADO = 'desativado'
DADOS = 'dados'
VOZ = 'voz'
VOIP = 'voip'
TIPO_PONTO_CHOICES = (
('', '---------'),
(DESATIVADO, 'Desativado'),
(DADOS, 'Dados'),
(VOZ, 'Voz'),
(VOIP, 'VoIP'),
)
num = models.CharField('Num/Id', max_length=10)
pointtype = models.CharField(max_length=20, default='desativado', choices=TIPO_PONTO_CHOICES, verbose_name='Tipo')
rack = models.ForeignKey(Rack, blank=True, null=True)
patchpanel = models.ForeignKey(Patchpanel, blank=True, null=True)
patchpanelport = models.ForeignKey(Patchpanelport, blank=True, null=True, verbose_name=u'Porta do patchpanel')
switch = models.ForeignKey(Switch, blank=True, null=True)
swport = models.ForeignKey(Switchport, blank=True, null=True, verbose_name='Porta do switch')
place = models.ForeignKey(Place, verbose_name='Localização', blank=True, null=True,
help_text='localização do ponto de rede')
phone = models.ForeignKey(Phone, blank=True, null=True, verbose_name='Telefone/Ramal',
help_text='Telefone associado ao ponto de rede')
# dist = models.IntegerField(blank=True, null=True)
# bloco = models.IntegerField(blank=True, null=True)
# par = models.IntegerField(blank=True, null=True)
# dg = models.IntegerField(blank=True, null=True)
comments = models.TextField(u'Observações', max_length=2000, blank=True)
creation_date = models.DateField(u'Data de cadastro', editable=False)
modification_date = models.DateTimeField(u'Data de modificação', editable=False)
# validacao
def clean(self):
t = self.pointtype
r = self.phone
# d = self.dist
# b = self.bloco
# p = self.par
# dg = self.dg
l = self.place
s = self.switch
ps = self.swport
pp = self.patchpanel
ppp = self.patchpanelport
rck = self.rack
points = Netpoint.objects.filter(phone=self.phone).exclude(pk=self.id)
if t == 'voz' or t == 'voip':
if points and self.phone != None:
raise ValidationError("Já existe um ponto de rede relacionado a este ramal")
if rck == None:
raise ValidationError("Ponto deve estar ligado a um rack")
""" Validação de Patchpanel - Descomente os trechos abaixo para forçar o cadastro obrigatório de patchpanel e portas """
# if pp == None:
# | |
self.conf[DEFAULT_CONTEXT_KEY] = manifest_data.get(DEFAULT_CONTEXT_KEY)
self.conf[IMPORTS_CONTEXT_KEY] = manifest_data.get(IMPORTS_CONTEXT_KEY)
self.conf[CLASS_REGISTRY_CONTEXT_KEY] = manifest_data.get(CLASS_REGISTRY_CONTEXT_KEY)
indexed_db_path = p(bundle_directory, BUNDLE_INDEXED_DB_NAME)
store_name, store_conf = self._store_config_builder.build(
indexed_db_path,
manifest_data.get('dependencies', ()))
self.conf['rdf.store'] = store_name
self.conf['rdf.store_conf'] = store_conf
self.connection = connect(conf=self.conf)
def _fetch_bundle(self, bundle_ident, version):
remotes_list = list(retrieve_remotes(self.remotes_directory))
f = Fetcher(self.bundles_directory, remotes_list)
return f.fetch(bundle_ident, version, self.remotes)
@property
def contexts(self):
'''
`List <list>` of `str`. Context IDs in this bundle
'''
# Since bundles are meant to be immutable, we won't need to add
if self._contexts is not None:
return self._contexts
bundle_directory = self.resolve()
contexts = list()
graphs_directory = p(bundle_directory, 'graphs')
idx_fname = p(graphs_directory, 'index')
if not exists(idx_fname):
raise Exception('Cannot find an index at {}'.format(repr(idx_fname)))
with open(idx_fname, 'rb') as index_file:
for l in index_file:
l = l.strip()
if not l:
continue
ctx, _ = l.split(b'\x00')
contexts.append(ctx.decode('UTF-8'))
self._contexts = frozenset(contexts)
return self._contexts
@property
def rdf(self):
self.initdb()
return self.conf['rdf.graph']
def __str__(self):
return f'Bundle({self.ident}' + (')' if self.version is None else f', {self.version})')
def __enter__(self):
self.initdb()
return self
def __exit__(self, exc_type, exc_value, traceback):
# Close the database connection
self.connection.disconnect()
self.connection = None
self.conf = None
def dependencies(self):
return self.manifest_data.get('dependencies', ())
def load_dependencies_transitive(self):
'''
Load dependencies from this bundle transitively
Yields
------
Bundle
A direct or indirect dependency of this bundle
'''
return self._bundle_dep_mgr.load_dependencies_transitive()
def load_dependencies(self):
'''
Load direct dependencies of this bundle
Yields
------
Bundle
A direct dependency of this bundle
'''
return self._bundle_dep_mgr._load_dependencies()
def _lookup_context_bundle(self, context_id):
owner = self._bundle_dep_mgr.lookup_context_bundle(
self.contexts,
context_id)
if owner is self._bundle_dep_mgr:
return self
def _load_dependency(self, dependencies_item):
try:
return self._bundle_dep_mgr._load_dependency(dependencies_item)
except BundleDependencyConfigIsMalformed as e:
bundle_directory = self.resolve()
raise MalformedBundle(bundle_directory, str(e)) from e
def __call__(self, target):
if not target or not hasattr(target, 'contextualize'):
return target
self.initdb()
if self._bundle_context is None:
self._bundle_context = _BundleContext(
None, conf=self.conf, bundle=self).stored
return target.contextualize(self._bundle_context)
class BundleDependencyManager(object):
'''
Finds the bundle in which a context is defined.
For a given bundle graph, that there is *one* Bundle that "owns" a given context.
Although multiple bundles may provide that context, the one closest to the root of the
graph which provides some statements in that context is called the owner. Note that
this does not mean that bundles on which the owner depends do not also be queried;
however, the exact behavior is up to the component that uses this component.
'''
def __init__(self, dependencies, **common_bundle_arguments):
self._loaded_dependencies = dict()
self._common_bundle_arguments = common_bundle_arguments
self.dependencies = dependencies
def load_dependencies_transitive(self):
'''
Load dependencies from this bundle transitively
Yields
------
Bundle
A direct or indirect dependency of this bundle
'''
border = {None: self}
seen = set()
while border:
new_border = {}
for bnd in border.values():
for d_bnd in bnd.load_dependencies():
key = (d_bnd.ident, d_bnd.version)
if key in seen:
continue
seen.add(key)
new_border[key] = d_bnd
yield d_bnd
border = new_border
def lookup_context_bundle(self, contexts, context_id):
if context_id is None or str(context_id) in contexts:
return self
for d in self.dependencies():
d_excludes = frozenset(d.get('excludes', ()))
if context_id in d_excludes:
continue
d_bnd = self._load_dependency(d)
match = d_bnd._lookup_context_bundle(context_id)
if match:
return match
return None
def _load_dependencies(self):
for d in self.dependencies():
yield self._load_dependency(d)
load_dependencies = _load_dependencies
def _load_dependency(self, dependencies_item):
d_id = dependencies_item.get('id')
if not d_id:
raise BundleDependencyConfigIsMalformed('Dependency entry is missing an identifier')
d_version = dependencies_item.get('version')
if not d_version:
raise BundleDependencyConfigIsMalformed(f'Dependency entry for {d_id} is'
' missing a version number')
bundle = self._loaded_dependencies.get((d_id, d_version))
if not bundle:
bundle = Bundle(d_id, version=d_version,
**self._common_bundle_arguments)
self._loaded_dependencies[(d_id, d_version)] = bundle
return bundle
class BundleDependencyConfigIsMalformed(Exception):
pass
class BundleDependentStoreConfigBuilder(object):
'''
Builds an RDFLib store configuration that depends on bundles.
The process of building the store configurationi requires traversing the graph of
dependencies so that duplicate dependencies in the graph can be omitted. To support
this process, this builder will fetch bundles as needed to resolve transitive
dependencies
'''
def __init__(self, bundles_directory=None, remotes_directory=None, remotes=None,
read_only=True):
if not bundles_directory:
bundles_directory = DEFAULT_BUNDLES_DIRECTORY
self.bundles_directory = realpath(expandvars(expanduser(bundles_directory)))
if not remotes_directory:
remotes_directory = DEFAULT_REMOTES_DIRECTORY
self.remotes_directory = realpath(expandvars(expanduser(remotes_directory)))
self.remotes = remotes
self.read_only = read_only
def build(self, indexed_db_path, dependencies, bundle_directory=None):
'''
Builds the store configuration
Parameters
----------
indexed_db_path : str
Path to the indexed database of the store that depends on the listed
dependenices
dependencies : list of dict
List of dependencies info at least including keys for 'id' and 'version'
bundle_directory : str, optional
Path to the bundle directory for the dependent store, if the dependent store
is a bundle. Used for information in an exceptional path, but not otherwise
used
Returns
-------
str
The type of the store. This is the name used to look up the RDFLib store plugin
object
The configuration for the store. This is the object that will be passed to
`rdflib.store.Store.open` to configure the store.
'''
return 'agg', self._construct_store_config(indexed_db_path, dependencies,
read_only=self.read_only)
__call__ = build
def _construct_store_config(self, indexed_db_path, dependencies,
current_path=None, paths=None, bundle_directory=None,
read_only=True):
if paths is None:
paths = set()
if current_path is None:
current_path = _BDTD()
dependency_configs = self._gather_dependency_configs(dependencies, current_path, paths, bundle_directory)
fs_store_config = dict(url=indexed_db_path, read_only=read_only)
return [
('FileStorageZODB', fs_store_config)
] + dependency_configs
@aslist
def _gather_dependency_configs(self, dependencies, current_path, paths, bundle_directory=None):
for dd in dependencies:
dep_path = current_path.merge_excludes(dd.get('excludes', ()))
dep_ident = dd.get('id')
dep_version = dd.get('version')
if not dep_ident:
if bundle_directory:
raise MalformedBundle(bundle_directory, 'bundle dependency descriptor is lacking an identifier')
else:
raise ValueError('bundle dependency descriptor is lacking an identifier')
if (dep_path, (dep_ident, dep_version)) in paths:
return
paths.add((dep_path, (dep_ident, dep_version)))
tries = 0
while tries < 2:
try:
bundle_directory = find_bundle_directory(self.bundles_directory, dep_ident, dep_version)
with open(p(bundle_directory, BUNDLE_MANIFEST_FILE_NAME)) as mf:
manifest_data = json.load(mf)
break
except (BundleNotFound, FileNotFoundError):
bundle_directory = self._fetch_bundle(dep_ident, dep_version)
tries += 1
# We don't want to include items in the configuration that aren't specified by
# the dependency descriptor. Also, all of the optionals have defaults that
# BundleDependencyStore handles itself, so we don't want to impose them here.
addl_dep_confs = {k: v for k, v in dd.items()
if k in ('excludes',) and v}
yield ('owmeta_core_bds', dict(type='agg',
conf=self._construct_store_config(
p(bundle_directory, BUNDLE_INDEXED_DB_NAME),
manifest_data.get('dependencies', ()),
dep_path, paths, bundle_directory),
**addl_dep_confs))
def _fetch_bundle(self, bundle_ident, version):
remotes_list = list(retrieve_remotes(self.remotes_directory))
f = Fetcher(self.bundles_directory, remotes_list)
return f.fetch(bundle_ident, version, self.remotes)
class _BDTD(namedtuple('_BDTD', ('excludes',))):
'''
Bundle Dependency Traversal Data (BDTD)
Holds data we use in traversing bundle dependencies. Looks a lot like a dependency
descriptor, but without an ID and version
'''
__slots__ = ()
def __new__(cls, *args, excludes=(), **kwargs):
return super(_BDTD, cls).__new__(cls, *args, excludes=excludes, **kwargs)
def merge_excludes(self, excludes):
return self._replace(excludes=self.excludes +
tuple(e for e in excludes if e not in self.excludes))
class _BundleContext(Context):
'''
`Context` for a bundle.
'''
def __init__(self, *args, bundle, **kwargs):
super().__init__(*args, **kwargs)
self.bundle = bundle
self._mapper = None
@property
def mapper(self):
if self._mapper is None:
self._mapper = _BundleMapper(bundle=self.bundle)
return self._mapper
class _BundleMapper(Mapper):
def __init__(self, bundle):
try:
bundle_conf = bundle.conf
except AttributeError:
raise Exception('Bundle connection has not been established.'
' Call `initdb` or use the bundle in a context manager')
super().__init__(name=f'{bundle.ident}' +
(f'@{bundle.version}' if bundle.version else ''),
conf=bundle_conf)
self.bundle = bundle
self._resolved_classes = dict()
def resolve_class(self, rdf_type, context):
prev_resolved_class = self._resolved_classes.get((rdf_type, context.identifier))
if prev_resolved_class:
return prev_resolved_class
own_resolved_class = super().resolve_class(rdf_type, context)
if own_resolved_class:
self._resolved_classes[(rdf_type, context.identifier)] = own_resolved_class
return own_resolved_class
target_id = context.identifier
target_bundle = self.bundle._lookup_context_bundle(target_id)
deps = target_bundle.load_dependencies_transitive()
for bnd in deps:
crctx_id = bnd.manifest_data.get(CLASS_REGISTRY_CONTEXT_KEY, None)
if not crctx_id:
continue
with bnd:
resolved_class = bnd.connection.mapper.resolve_class(rdf_type, context)
if resolved_class:
self._resolved_classes[(rdf_type, context.identifier)] = resolved_class
return resolved_class
return None
class _RemoteHandlerMixin(object):
'''
Utility mixin for handling remotes
The mixed-in class must have a `remotes` attribute which is a list of `Remote`
'''
def __init__(self, load_entry_points=True, **kwargs):
'''
Parameters
----------
load_entry_points : bool, optional
If `False`, then entry points will not be loaded
'''
super(_RemoteHandlerMixin, self).__init__(**kwargs)
self.load_entry_points = load_entry_points
def _get_remotes(self, remotes):
''''
Get remotes
Parameters
----------
remotes : iterable of Remote or str
A subset of names of remotes to act on and additional remotes to act on
'''
if self.load_entry_points:
load_entry_point_loaders()
instance_remotes = []
additional_remotes = []
if remotes:
configured_remotes = {r.name: r for r in self.remotes}
for r in remotes:
if isinstance(r, six.text_type):
instance_remotes.append(configured_remotes.get(r))
elif isinstance(r, Remote):
additional_remotes.append(r)
else:
instance_remotes = self.remotes
has_remote = False
for rem in chain(additional_remotes, instance_remotes):
has_remote = True
yield rem
if not has_remote:
raise NoRemoteAvailable()
class Fetcher(_RemoteHandlerMixin):
'''
Fetches bundles from `Remotes <Remote>`
| |
from models import *
import sqlalchemy as sqla
import enum
DIMENSIONS = 8
class State(enum.Enum):
Win = 0
Loss = 1
Draw = 2
Playing = 2
def exists(board: dict, piece: Piece):
return board.get((piece.row, piece.column)) is not None
# Determines if you can jump an already existing piece. If it can, it returns a piece location
# Otherwise, it returns none
def show_jump(board: dict, piece: Piece, pos: Piece):
# See if it's an enemy piece
if piece.owner_id != pos.owner_id:
# Get the new position
new_row, new_column = 0, 0
# Make sure it's valid
if piece.row > pos.row:
new_row = piece.row - 2
else:
new_row = piece.row + 2
if piece.column > pos.column:
new_column = piece.column - 2
else:
new_column = piece.column + 2
# Make sure it's still in the bounds
if (0 <= new_row < DIMENSIONS) and (0 <= new_column < DIMENSIONS):
new_pos = Piece(new_row, new_column, piece.owner_id)
# See if this exists
if not exists(board, new_pos):
return new_pos
# Returns a list of jumps it can make, otherwise returns none
def check_jump(board: dict, piece: Piece, pos: Piece):
# Jump scenario
jumps = []
if exists(board, pos):
new_piece = show_jump(board, piece, pos)
if new_piece is not None:
# Recursively see if these can jump
for i in [-1, 1]:
# Either moving up the board or down the board
direction = 1
if piece.row > pos.row:
direction = -1
jumps += check_jump(
board,
new_piece,
Piece(new_piece.row + direction, new_piece.column + i)
)
# Add any future jumps
for path in jumps:
# Add the jump that got us here first
path.insert(0, pos)
# Add the single jump as a path regardless
jumps.append([pos])
return jumps
def get_moves(board: dict, piece: Piece):
# Check the bounds
potential_moves = []
# Get all possible piece moves without jumps
if piece.row < DIMENSIONS - 1:
# Correct direction for player movement or backwards movement for ai
if piece.player_owned() or piece.king:
if piece.column > 0:
potential_moves.append([Piece(piece.row + 1, piece.column - 1)])
if piece.column < DIMENSIONS - 1:
potential_moves.append([Piece(piece.row + 1, piece.column + 1)])
if piece.row > 0:
# Correct direction for ai movement or backwards movement for player
if not piece.player_owned() or piece.king:
if piece.column > 0:
potential_moves.append([Piece(piece.row - 1, piece.column - 1)])
if piece.column < DIMENSIONS - 1:
potential_moves.append([Piece(piece.row - 1, piece.column + 1)])
# See if pieces already exist in those positions
moves = []
for move_paths in potential_moves.copy():
# Try to place real piece in if possible
temp = move_paths[0]
m = board.get((temp.row, temp.column))
if m is not None:
move_paths[0] = m
# Check the jump scenario
current_jumps = check_jump(board, piece, m)
# Jumps exist so add them
if len(current_jumps) > 0:
moves += current_jumps
else:
# Add the single move
moves.append([temp])
return moves
def new_game(session: Session, user_id: str, turn=True):
"""
:param turn:
:param session:
:param user_id:
:return: The id of the game created
"""
pieces = []
i = 0
uid = user_id
user = session.query(User).where(User.id == user_id).scalar()
if user is not None:
user.turn = turn
# Add all of the pieces to the game
while i < DIMENSIONS ** 2:
if DIMENSIONS * 3 <= i < DIMENSIONS * (DIMENSIONS - 3):
i = DIMENSIONS * 5 + 1
uid = encode(b"ai").decode()
continue
pieces.append(Piece(i // DIMENSIONS, i % DIMENSIONS, uid))
# Need to skip an extra one to find the next black
if (i // DIMENSIONS) % 2 == 0 and i % DIMENSIONS == DIMENSIONS - 2:
i += 3
# Don't skip at all, just go to the next black
elif (i // DIMENSIONS) % 2 == 1 and i % DIMENSIONS == DIMENSIONS - 1:
i += 1
# Normal rules when in a row
else:
i += 2
# Add all of the pieces to the table
session.add_all(pieces)
# Get the ids of the pieces that have been committed
session.flush(pieces)
# Now get the next game id
new_game_id = session.query(
# Make sure it returns 0 instead of none
sqla.func.coalesce(
# Get the max board state
sqla.func.max(GameState.id),
0
)
).scalar() + 1
# Add players to the game and create them
game_states = [GameState(new_game_id, user_id)]
session.add_all(game_states)
session.flush(game_states)
# Add all of the board states for this game
session.add_all(BoardState(new_game_id, piece.id) for piece in pieces)
# Commit the transaction
session.commit()
return new_game_id
def make_king(session: Session, piece: Piece):
# Do nothing if it is already a king
if piece.king:
return
# Make sure it's on a valid piece
if ((piece.player_owned() and piece.row == DIMENSIONS - 1) or
(not piece.player_owned() and piece.row == 0)):
# King the piece
piece.king = True
session.commit()
def try_king(piece: Piece):
# Do nothing if it is already a king
if piece.king:
return
# Make sure it's on a valid piece
if ((piece.player_owned() and piece.row == DIMENSIONS - 1) or
(not piece.player_owned() and piece.row == 0)):
# King the piece
piece.king = True
return piece
def try_move(board: dict, piece: Piece, pos: tuple):
# Destructure tuple so it's easier to read
row, column = pos
# Make sure move is within bounds of the board
if not (0 <= row <= DIMENSIONS and 0 <= column <= DIMENSIONS):
raise InvalidMove("Cannot place move off of the board")
# Get correct movement direction
direction = 1
if piece.owner_id == encode(b"ai").decode():
direction = -1
# Get tiles moves
row_diff = direction * (row - piece.row)
col_diff = column - piece.column
if abs(row_diff) != 1 or abs(col_diff) != 1:
raise InvalidMove("Tried to move too many spaces")
elif row_diff != 1 and not piece.king:
raise InvalidMove("Cannot move non-king pieces backwards")
# See if a piece is already there or not
if board.get((row, column)) is not None:
raise InvalidMove("Another piece is already here")
# Remove the position of this piece
del board[(piece.row, piece.column)]
# Update the position in the piece itself for consistency
piece.row, piece.column = row, column
# See if the piece is now a king
piece = try_king(piece)
# Update the piece in the board state
board[(piece.row, piece.column)] = piece
# return the new board state
return board
def try_jump(board: dict, piece: Piece, pos: tuple, end_turn: bool):
# Try to see if we can jump
new_pos = show_jump(board, piece, pos)
# If we can't jump say we can't
if new_pos is None:
raise InvalidMove("cannot jump piece")
can_jump = False
if not end_turn:
moves = get_moves(board, new_pos)
if len(moves) != 0:
direction = pos.row - piece.row
for path in moves:
for move in path:
if (new_pos.row + 2) * direction == move.row:
can_jump = True
break
else:
continue
break
# Delete the piece from the current position before moving
del board[(piece.row, piece.column)]
# Delete the piece that got jumped
del board[pos]
# Update the piece position
piece.row, piece.column = new_pos.row, new_pos.column
# See if the piece is now a king
piece = try_king(piece)
# Add the piece back to the board state
board[(piece.row, piece.column)] = piece
# Return the board state
return board
def make_move(session: Session, game_id: int, piece: Piece, position: dict):
# Get the piece from the database if it exists
# If it doesn't, don't handle the exception
piece = piece.get_from_db(session, game_id)
user = session.query(User).where(User.id == piece.owner_id).scalar()
session.commit()
# Make sure move is within bounds of the board
if not (0 <= position["row"] <= DIMENSIONS and 0 <= position["column"] <= DIMENSIONS):
raise InvalidMove("Cannot place move off of the board")
# Get correct movement direction
direction = 1
if piece.owner_id == encode(b"ai").decode():
direction = -1
# Get tiles moves
row_diff = direction * (position["row"] - piece.row)
col_diff = position["column"] - piece.column
if abs(row_diff) != 1 or abs(col_diff) != 1:
raise InvalidMove("Tried to move too many spaces")
elif row_diff != 1 and not piece.king:
raise InvalidMove("Cannot move non-king pieces backwards")
# See if a piece is already there or not
if Piece(**position).exists(session, game_id):
raise InvalidMove("Another piece is already here")
# Update the new position of the piece
piece.row, piece.column = position["row"], position["column"]
# Commit to the db
session.commit()
# See | |
<gh_stars>1-10
#!/usr/bin/env python
"""Unit tests run as PYTHONPATH=../../.. python3 ./test_valve_stack.py."""
# Copyright (C) 2015 Research and Innovation Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2019 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import unittest
import ipaddress
import yaml
from ryu.lib import mac
from ryu.ofproto import ofproto_v1_3 as ofp
from faucet import valves_manager
from faucet import valve_of
from faucet.port import (
STACK_STATE_INIT, STACK_STATE_UP,
LACP_PORT_SELECTED, LACP_PORT_UNSELECTED)
from clib.fakeoftable import CONTROLLER_PORT
from clib.valve_test_lib import (
BASE_DP1_CONFIG, CONFIG, STACK_CONFIG, STACK_LOOP_CONFIG, ValveTestBases)
import networkx
class ValveEdgeVLANTestCase(ValveTestBases.ValveTestNetwork):
CONFIG1 = """
dps:
s1:
dp_id: 1
hardware: 'GenericTFM'
stack:
priority: 1
interfaces:
1:
stack:
dp: s2
port: 1
s2:
dp_id: 2
hardware: 'GenericTFM'
interfaces:
1:
stack:
dp: s1
port: 1
2:
stack:
dp: s3
port: 1
s3:
dp_id: 3
hardware: 'GenericTFM'
interfaces:
1:
stack:
dp: s2
port: 2
"""
CONFIG2 = """
dps:
s1:
dp_id: 1
hardware: 'GenericTFM'
stack:
priority: 1
interfaces:
1:
stack:
dp: s2
port: 1
s2:
dp_id: 2
hardware: 'GenericTFM'
interfaces:
1:
stack:
dp: s1
port: 1
2:
stack:
dp: s3
port: 1
s3:
dp_id: 3
hardware: 'GenericTFM'
interfaces:
1:
stack:
dp: s2
port: 2
2:
native_vlan: 100
3:
native_vlan: 100
"""
def setUp(self):
self.setup_valves(self.CONFIG1)
self.activate_stack()
def activate_stack(self):
self.activate_all_ports()
for valve in self.valves_manager.valves.values():
for port in valve.dp.ports.values():
if port.stack:
self.set_stack_port_up(port.number, valve)
def test_edge_vlan(self):
self.update_config(self.CONFIG2, reload_type=None)
self.activate_stack()
s1 = self.valves_manager.valves[1].dp
self.assertTrue(s1.is_stack_root())
self.assertFalse(s1.is_stack_edge())
s2 = self.valves_manager.valves[2].dp
self.assertFalse(s2.is_stack_root())
self.assertFalse(s2.is_stack_edge())
s3 = self.valves_manager.valves[3].dp
self.assertFalse(s3.is_stack_root())
self.assertTrue(s3.is_stack_edge())
match = {'in_port': 2, 'vlan_vid': 0, 'eth_src': self.P2_V100_MAC}
self.network.tables[3].is_output(match, port=3)
match = {'in_port': 3, 'vlan_vid': 0, 'eth_src': self.P2_V100_MAC}
self.network.tables[3].is_output(match, port=2)
class ValveStackMCLAGTestCase(ValveTestBases.ValveTestNetwork):
"""Test stacked MCLAG"""
CONFIG = """
dps:
s1:
%s
stack:
priority: 1
interfaces:
1:
description: p1
stack:
dp: s2
port: 1
2:
description: p2
native_vlan: 100
3:
description: p3
native_vlan: 100
lacp: 1
4:
description: p4
native_vlan: 100
lacp: 1
s2:
hardware: 'GenericTFM'
dp_id: 0x2
interfaces:
1:
description: p1
stack:
dp: s1
port: 1
2:
description: p2
native_vlan: 100
3:
description: p3
native_vlan: 100
lacp: 1
4:
description: p4
native_vlan: 100
lacp: 1
""" % BASE_DP1_CONFIG
def setUp(self):
"""Setup basic loop config"""
self.setup_valves(self.CONFIG)
def get_other_valves(self, valve):
"""Return other running valves"""
return self.valves_manager._other_running_valves(valve) # pylint: disable=protected-access
def test_dpid_nominations(self):
"""Test dpids are nominated correctly"""
self.activate_all_ports()
lacp_ports = {}
for valve in self.valves_manager.valves.values():
for port in valve.dp.ports.values():
if port.lacp:
lacp_ports.setdefault(valve.dp.dp_id, [])
lacp_ports[valve.dp.dp_id].append(port)
port.actor_up()
valve = self.valves_manager.valves[0x1]
other_valves = self.get_other_valves(valve)
# Equal number of LAG ports, choose root DP
nominated_dpid = valve.switch_manager.get_lacp_dpid_nomination(1, valve, other_valves)[0]
self.assertEqual(
nominated_dpid, 0x1,
'Expected nominated DPID %s but found %s' % (0x1, nominated_dpid))
# Choose DP with most UP LAG ports
lacp_ports[0x1][0].actor_nosync()
nominated_dpid = valve.switch_manager.get_lacp_dpid_nomination(1, valve, other_valves)[0]
self.assertEqual(
nominated_dpid, 0x2,
'Expected nominated DPID %s but found %s' % (0x2, nominated_dpid))
def test_no_dpid_nominations(self):
"""Test dpid nomination doesn't nominate when no LACP ports are up"""
self.activate_all_ports()
valve = self.valves_manager.valves[0x1]
other_valves = self.get_other_valves(valve)
# No actors UP so should return None
nominated_dpid = valve.switch_manager.get_lacp_dpid_nomination(1, valve, other_valves)[0]
self.assertEqual(
nominated_dpid, None,
'Did not expect to nominate DPID %s' % nominated_dpid)
# No other valves so should return None
for valve in self.valves_manager.valves.values():
for port in valve.dp.ports.values():
if port.lacp:
port.actor_up()
nominated_dpid = valve.switch_manager.get_lacp_dpid_nomination(1, valve, None)[0]
self.assertEqual(
nominated_dpid, None,
'Did not expect to nominate DPID %s' % nominated_dpid)
def test_nominated_dpid_port_selection(self):
"""Test a nominated port selection state is changed"""
self.activate_all_ports()
lacp_ports = {}
for valve in self.valves_manager.valves.values():
for port in valve.dp.ports.values():
if port.lacp:
lacp_ports.setdefault(valve, [])
lacp_ports[valve].append(port)
port.actor_up()
for valve, ports in lacp_ports.items():
other_valves = self.get_other_valves(valve)
for port in ports:
valve.lacp_update(port, True, 1, 1, other_valves)
# Testing accuracy of varz port_lacp_role
port_labels = {
'port': port.name,
'port_description': port.description,
'dp_name': valve.dp.name,
'dp_id': '0x%x' % valve.dp.dp_id
}
lacp_role = self.get_prom('port_lacp_role', labels=port_labels, bare=True)
self.assertEqual(
port.lacp_port_state(), lacp_role,
'Port %s DP %s role %s differs from varz value %s'
% (port, valve, port.lacp_port_state(), lacp_role))
if valve.dp.dp_id == 0x1:
self.assertEqual(
port.lacp_port_state(), LACP_PORT_SELECTED,
'Expected LACP port %s DP %s to be SELECTED' % (port, valve))
else:
self.assertEqual(
port.lacp_port_state(), LACP_PORT_UNSELECTED,
'Expected LACP port %s DP %s to be UNSELECTED' % (port, valve))
def test_lag_flood(self):
"""Test flooding is allowed for UP & SELECTED LAG links only"""
self.activate_all_ports()
main_valve = self.valves_manager.valves[0x1]
main_other_valves = self.get_other_valves(main_valve)
# Start with all LAG links INIT & UNSELECTED
self.validate_flood(2, 0, 3, False, 'Flooded out UNSELECTED & INIT LAG port')
self.validate_flood(2, 0, 4, False, 'Flooded out UNSELECTED & INIT LAG port')
# Set UP & SELECTED one s1 LAG link
port3 = main_valve.dp.ports[3]
port4 = main_valve.dp.ports[4]
self.apply_ofmsgs(main_valve.lacp_update(port4, True, 1, 1, main_other_valves))
self.apply_ofmsgs(main_valve.lacp_update(port3, False, 1, 1, main_other_valves))
self.validate_flood(2, 0, 3, False, 'Flooded out NOSYNC LAG port')
self.validate_flood(2, 0, 4, True, 'Did not flood out SELECTED LAG port')
# Set UP & SELECTED s2 LAG links
valve = self.valves_manager.valves[0x2]
other_valves = self.get_other_valves(valve)
for port in valve.dp.ports.values():
if port.lacp:
valve.lacp_update(port, True, 1, 1, other_valves)
self.apply_ofmsgs(main_valve.lacp_update(port4, True, 1, 1, main_other_valves))
self.apply_ofmsgs(main_valve.lacp_update(port3, False, 1, 1, main_other_valves))
self.validate_flood(2, 0, 3, False, 'Flooded out UNSELECTED & NOSYNC LAG port')
self.validate_flood(2, 0, 4, False, 'Flooded out UNSELECTED LAG port')
# Set UP & SELECTED both s1 LAG links
self.apply_ofmsgs(main_valve.lacp_update(port3, True, 1, 1, main_other_valves))
self.apply_ofmsgs(main_valve.lacp_update(port4, True, 1, 1, main_other_valves))
self.validate_flood(2, 0, 3, True, 'Did not flood out SELECTED LAG port')
self.validate_flood(2, 0, 4, False, 'Flooded out multiple LAG ports')
def test_lag_pipeline_accept(self):
"""Test packets entering through UP & SELECTED LAG links"""
self.activate_all_ports()
main_valve = self.valves_manager.valves[0x1]
main_other_valves = self.get_other_valves(main_valve)
# Packet initially rejected
self.validate_flood(
3, 0, None, False, 'Packet incoming through UNSELECTED & INIT port was accepted')
self.validate_flood(
4, 0, None, False, 'Packet incoming through UNSELECTED & INIT port was accepted')
# Set one s1 LAG port 4 to SELECTED & UP
port3 = main_valve.dp.ports[3]
port4 = main_valve.dp.ports[4]
self.apply_ofmsgs(main_valve.lacp_update(port4, True, 1, 1, main_other_valves))
self.apply_ofmsgs(main_valve.lacp_update(port3, False, 1, 1, main_other_valves))
self.validate_flood(
3, 0, None, False, 'Packet incoming through NOSYNC port was accepted')
self.validate_flood(
4, 0, None, True, 'Packet incoming through SELECTED port was not accepted')
# Set UP & SELECTED s2 LAG links, set one s1 port down
valve = self.valves_manager.valves[0x2]
other_valves = self.get_other_valves(valve)
for port in valve.dp.ports.values():
if port.lacp:
valve.lacp_update(port, True, 1, 1, other_valves)
self.apply_ofmsgs(main_valve.lacp_update(port4, True, 1, 1, main_other_valves))
self.apply_ofmsgs(main_valve.lacp_update(port3, False, 1, 1, main_other_valves))
self.validate_flood(
3, 0, None, False, 'Packet incoming through UNSELECTED & NOSYNC port was accepted')
self.validate_flood(
4, 0, None, False, 'Packet incoming through UNSELECTED port was accepted')
# Set UP & SELECTED both s1 LAG links
self.apply_ofmsgs(main_valve.lacp_update(port3, True, 1, 1, main_other_valves))
self.apply_ofmsgs(main_valve.lacp_update(port4, True, 1, 1, main_other_valves))
self.validate_flood(
3, 0, None, True, 'Packet incoming through SELECTED port was not accepted')
self.validate_flood(
4, 0, None, True, 'Packet incoming through SELECTED port was not accepted')
class ValveStackMCLAGRestartTestCase(ValveTestBases.ValveTestNetwork):
"""Test stacked MCLAG"""
CONFIG = """
dps:
s1:
%s
stack:
priority: 1
interfaces:
1:
description: p1
stack:
dp: s2
port: 1
2:
description: p2
native_vlan: 100
3:
description: p3
native_vlan: 100
lacp: 1
4:
description: p4
native_vlan: 100
lacp: 1
s2:
hardware: 'GenericTFM'
dp_id: 0x2
interfaces:
1:
description: p1
stack:
dp: s1
port: 1
2:
description: p2
native_vlan: 100
3:
description: p3
native_vlan: 100
lacp: 1
4:
description: p4
native_vlan: 100
lacp: 1
""" % BASE_DP1_CONFIG
def setUp(self):
"""Setup basic loop config"""
self.setup_valves(self.CONFIG)
def get_other_valves(self, valve):
"""Return other running valves"""
return self.valves_manager._other_running_valves(valve) # pylint: disable=protected-access
def test_mclag_cold_start(self):
"""Test cold-starting a switch with a downed port resets LACP states"""
self.activate_all_ports()
valve = self.valves_manager.valves[0x1]
other_valves = self.get_other_valves(valve)
port = valve.dp.ports[3]
# Make sure LACP state has been updated
self.assertTrue(valve.lacp_update(port, True, 1, 1, other_valves), 'No OFMSGS returned')
self.assertTrue(port.is_actor_up(), 'Actor not UP')
# Set port DOWN
valve.port_delete(3, other_valves=other_valves)
self.assertTrue(port.is_actor_none(), 'Actor not NONE')
# Restart switch & LACP port
self.cold_start()
self.assertTrue(valve.port_add(3), 'No OFMSGS returned')
# Successfully restart LACP from downed
self.assertTrue(valve.lacp_update(port, True, 1, 1, other_valves), 'No OFMSGS returned')
self.assertTrue(port.is_actor_up(), 'Actor not UP')
class ValveStackMCLAGStandbyTestCase(ValveTestBases.ValveTestNetwork):
"""Test MCLAG with standby port option overrules unselected states"""
CONFIG = """
dps:
s1:
%s
| |
self.assertEqual(normalize("won't"), "will not")
self.assertEqual(normalize("won't've"), "will not have")
self.assertEqual(normalize("would've"), "would have")
self.assertEqual(normalize("wouldn't"), "would not")
self.assertEqual(normalize("wouldn't've"), "would not have")
self.assertEqual(normalize("ya'll"), "you all")
self.assertEqual(normalize("y'all"), "you all")
self.assertEqual(normalize("y'ain't"), "you are not")
# TODO: Ambiguous with "you had"
self.assertEqual(normalize("you'd"), "you would")
self.assertEqual(normalize("you'd've"), "you would have")
self.assertEqual(normalize("you'll"), "you will")
self.assertEqual(normalize("you're"), "you are")
self.assertEqual(normalize("you aren't"), "you are not")
self.assertEqual(normalize("you've"), "you have")
self.assertEqual(normalize("you haven't"), "you have not")
def test_combinations(self):
self.assertEqual(normalize("I couldn't have guessed there'd be two"),
"I could not have guessed there would be 2")
self.assertEqual(normalize("I wouldn't have"), "I would not have")
self.assertEqual(normalize("I hadn't been there"),
"I had not been there")
self.assertEqual(normalize("I would've"), "I would have")
self.assertEqual(normalize("it hadn't"), "it had not")
self.assertEqual(normalize("it hadn't have"), "it had not have")
self.assertEqual(normalize("it would've"), "it would have")
self.assertEqual(normalize("she wouldn't have"), "she would not have")
self.assertEqual(normalize("she would've"), "she would have")
self.assertEqual(normalize("someone wouldn't have"),
"someone would not have")
self.assertEqual(normalize("someone would've"), "someone would have")
self.assertEqual(normalize("what's the weather like"),
"what is weather like")
self.assertEqual(normalize("that's what I told you"),
"that is what I told you")
self.assertEqual(normalize("whats 8 + 4"), "what is 8 + 4")
def test_gender(self):
self.assertEqual(get_gender("person"),
False)
# Pt-pt
def test_articles_pt(self):
self.assertEqual(normalize(u"isto � o teste",
lang="pt", remove_articles=True),
u"isto teste")
self.assertEqual(
normalize(u"isto � a frase", lang="pt", remove_articles=True),
u"isto frase")
self.assertEqual(
normalize("e outro teste", lang="pt", remove_articles=True),
"outro teste")
# TODO: Fix this test and/or code
# self.assertEqual(normalize(u"isto � o teste extra", lang="pt",
# remove_articles=False),
# u"isto e o teste extra")
def test_extractnumber_pt(self):
self.assertEqual(extractnumber("isto e o primeiro teste", lang="pt"),
1)
self.assertEqual(extractnumber("isto e o 2 teste", lang="pt"), 2)
self.assertEqual(extractnumber("isto e o segundo teste", lang="pt"),
2)
self.assertEqual(extractnumber(u"isto e um ter�o de teste",
lang="pt"), 1.0 / 3.0)
self.assertEqual(extractnumber("isto e o teste numero quatro",
lang="pt"), 4)
self.assertEqual(extractnumber(u"um ter�o de chavena", lang="pt"),
1.0 / 3.0)
self.assertEqual(extractnumber("3 canecos", lang="pt"), 3)
self.assertEqual(extractnumber("1/3 canecos", lang="pt"), 1.0 / 3.0)
self.assertEqual(extractnumber("quarto de hora", lang="pt"), 0.25)
self.assertEqual(extractnumber("1/4 hora", lang="pt"), 0.25)
self.assertEqual(extractnumber("um quarto hora", lang="pt"), 0.25)
self.assertEqual(extractnumber("2/3 pinga", lang="pt"), 2.0 / 3.0)
self.assertEqual(extractnumber("3/4 pinga", lang="pt"), 3.0 / 4.0)
self.assertEqual(extractnumber("1 e 3/4 cafe", lang="pt"), 1.75)
self.assertEqual(extractnumber("1 cafe e meio", lang="pt"), 1.5)
self.assertEqual(extractnumber("um cafe e um meio", lang="pt"), 1.5)
self.assertEqual(
extractnumber("tres quartos de chocolate", lang="pt"),
3.0 / 4.0)
self.assertEqual(extractnumber(u"tr�s quarto de chocolate",
lang="pt"), 3.0 / 4.0)
self.assertEqual(extractnumber("sete ponto cinco", lang="pt"), 7.5)
self.assertEqual(extractnumber("sete ponto 5", lang="pt"), 7.5)
self.assertEqual(extractnumber("sete e meio", lang="pt"), 7.5)
self.assertEqual(extractnumber("sete e oitenta", lang="pt"), 7.80)
self.assertEqual(extractnumber("sete e oito", lang="pt"), 7.8)
self.assertEqual(extractnumber("sete e zero oito",
lang="pt"), 7.08)
self.assertEqual(extractnumber("sete e zero zero oito",
lang="pt"), 7.008)
self.assertEqual(extractnumber("vinte treze avos", lang="pt"),
20.0 / 13.0)
self.assertEqual(extractnumber("seis virgula seiscentos e sessenta",
lang="pt"), 6.66)
self.assertEqual(extractnumber("seiscentos e sessenta e seis",
lang="pt"), 666)
self.assertEqual(extractnumber("seiscentos ponto zero seis",
lang="pt"), 600.06)
self.assertEqual(extractnumber("seiscentos ponto zero zero seis",
lang="pt"), 600.006)
self.assertEqual(extractnumber("seiscentos ponto zero zero zero seis",
lang="pt"), 600.0006)
def test_agressive_pruning_pt(self):
self.assertEqual(normalize("uma palavra", lang="pt"),
"1 palavra")
self.assertEqual(normalize("esta palavra um", lang="pt"),
"palavra 1")
self.assertEqual(normalize("o homem batia-lhe", lang="pt"),
"homem batia")
self.assertEqual(normalize("quem disse asneira nesse dia", lang="pt"),
"quem disse asneira dia")
def test_spaces_pt(self):
self.assertEqual(normalize(" isto e o teste", lang="pt"),
"isto teste")
self.assertEqual(normalize(" isto sao os testes ", lang="pt"),
"isto sao testes")
self.assertEqual(normalize(" isto e um teste", lang="pt",
remove_articles=False),
"isto e 1 teste")
def test_numbers_pt(self):
self.assertEqual(normalize(u"isto e o um dois tr�s teste", lang="pt"),
u"isto 1 2 3 teste")
self.assertEqual(normalize(u"� a sete oito nove test", lang="pt"),
u"7 8 9 test")
self.assertEqual(
normalize("teste zero dez onze doze treze", lang="pt"),
"teste 0 10 11 12 13")
self.assertEqual(
normalize("teste mil seiscentos e sessenta e seis", lang="pt",
remove_articles=False),
"teste 1000 600 e 66")
self.assertEqual(
normalize("teste sete e meio", lang="pt",
remove_articles=False),
"teste 7 e meio")
self.assertEqual(
normalize("teste dois ponto nove", lang="pt"),
"teste 2 ponto 9")
self.assertEqual(
normalize("teste cento e nove", lang="pt",
remove_articles=False),
"teste 100 e 9")
self.assertEqual(
normalize("teste vinte e 1", lang="pt"),
"teste 20 1")
def test_extractdatetime_pt(self):
def extractWithFormat(text):
date = datetime(2017, 06, 27, 00, 00)
[extractedDate, leftover] = extract_datetime(text, date,
lang="pt")
extractedDate = extractedDate.strftime("%Y-%m-%d %H:%M:%S")
return [extractedDate, leftover]
def testExtract(text, expected_date, expected_leftover):
res = extractWithFormat(text)
self.assertEqual(res[0], expected_date)
self.assertEqual(res[1], expected_leftover)
testExtract(u"que dia � hoje",
"2017-06-27 00:00:00", u"dia")
testExtract(u"que dia � amanha",
"2017-06-28 00:00:00", u"dia")
testExtract(u"que dia foi ontem",
"2017-06-26 00:00:00", u"dia")
testExtract(u"que dia foi antes de ontem",
"2017-06-25 00:00:00", u"dia")
testExtract(u"que dia foi ante ontem",
"2017-06-25 00:00:00", u"dia")
testExtract(u"que dia foi ante ante ontem",
"2017-06-24 00:00:00", u"dia")
testExtract("marca o jantar em 5 dias",
"2017-07-02 00:00:00", "marca jantar")
testExtract("como esta o tempo para o dia depois de amanha?",
"2017-06-29 00:00:00", "como tempo")
# TODO: Fix this test and/or code
# testExtract(u"lembra me �s 10:45 pm",
# "2017-06-27 22:45:00", u"lembra")
testExtract("como esta o tempo na sexta de manha",
"2017-06-30 08:00:00", "como tempo")
# TODO: Fix this test and/or code
# testExtract(u"lembra me para ligar a m�e daqui " \
# u"a 8 semanas e 2 dias",
# "2017-08-24 00:00:00", u"lembra ligar mae")
testExtract("Toca black metal 2 dias a seguir a sexta",
"2017-07-02 00:00:00", "toca black metal")
testExtract("Toca satanic black metal 2 dias para esta sexta",
"2017-07-02 00:00:00", "toca satanic black metal")
testExtract("Toca super black metal 2 dias a partir desta sexta",
"2017-07-02 00:00:00", "toca super black metal")
# TODO: Fix this test and/or code
# testExtract(u"Come�a a invas�o �s 3:45 pm de quinta feira",
# "2017-06-29 15:45:00", "comeca invasao")
testExtract("na segunda, compra queijo",
"2017-07-03 00:00:00", "compra queijo")
# TODO: Fix this test and/or code
# testExtract(u"Toca os parab�ns daqui a 5 anos",
# "2022-06-27 00:00:00", "toca parabens")
# TODO: Fix this test and/or code
# testExtract(u"manda Skype a M�e �s 12:45 pm pr�xima quinta",
# "2017-06-29 12:45:00", "manda skype mae")
# TODO: Fix this test and/or code
# testExtract(u"como est� o tempo esta sexta?",
# "2017-06-30 00:00:00", "como tempo")
# TODO: Fix this test and/or code
# testExtract(u"como est� o tempo esta sexta de tarde?",
# "2017-06-30 15:00:00", "como tempo")
# TODO: Fix this test and/or code
# testExtract(u"como est� o tempo esta sexta as tantas da manha?",
# "2017-06-30 04:00:00", "como tempo")
# TODO: Fix this test and/or code
# testExtract(u"como est� o tempo esta sexta a meia noite?",
# "2017-06-30 00:00:00", "como tempo")
# TODO: Fix this test and/or code
# testExtract(u"como est� o tempo esta sexta ao meio dia?",
# "2017-06-30 12:00:00", "como tempo")
# TODO: Fix this test and/or code
# testExtract(u"como est� o tempo esta sexta ao fim da tarde?",
# "2017-06-30 19:00:00", "como tempo")
# TODO: Fix this test and/or code
# testExtract(u"como est� o tempo esta sexta ao meio da manha?",
# "2017-06-30 10:00:00", "como tempo")
testExtract("lembra me para ligar a mae no dia 3 de agosto",
"2017-08-03 00:00:00", "lembra ligar mae")
testExtract(u"compra facas no 13� dia de maio",
"2018-05-13 00:00:00", "compra facas")
testExtract(u"gasta dinheiro no maio dia 13",
"2018-05-13 00:00:00", "gasta dinheiro")
testExtract(u"compra velas a maio 13",
"2018-05-13 00:00:00", "compra velas")
testExtract(u"bebe cerveja a 13 maio",
"2018-05-13 00:00:00", "bebe cerveja")
testExtract("como esta o tempo 1 dia a seguir a amanha",
"2017-06-29 00:00:00", "como tempo")
# TODO: Fix this test and/or code
# testExtract(u"como esta o tempo �s 0700 horas",
# "2017-06-27 07:00:00", "como tempo")
# TODO: Fix this test and/or code
# testExtract(u"como esta o tempo amanha �s 7 em ponto",
# "2017-06-28 07:00:00", "como tempo")
testExtract(u"como esta o tempo amanha pelas 2 da tarde",
"2017-06-28 14:00:00", "como tempo")
testExtract(u"como esta o tempo amanha pelas 2",
"2017-06-28 02:00:00", "como tempo")
testExtract(u"como esta o tempo pelas 2 da tarde da proxima sexta",
"2017-06-30 14:00:00", "como tempo")
testExtract("lembra-me de acordar em 4 anos",
"2021-06-27 00:00:00", "lembra acordar")
testExtract("lembra-me de acordar em 4 anos e 4 dias",
"2021-07-01 00:00:00", "lembra acordar")
testExtract("dorme 3 dias depois de amanha",
"2017-07-02 00:00:00", "dorme")
testExtract("marca consulta para 2 semanas e 6 dias depois de Sabado",
"2017-07-21 00:00:00", "marca consulta")
# TODO: Fix this test and/or code
# testExtract(u"come�a a festa �s 8 em ponto da noite de quinta",
# "2017-06-29 20:00:00", "comeca festa")
def test_gender_pt(self):
self.assertEqual(get_gender("vaca", lang="pt"),
"f")
self.assertEqual(get_gender("cavalo", lang="pt"),
"m")
self.assertEqual(get_gender("vacas", lang="pt"),
"f")
self.assertEqual(get_gender("boi", "o boi come erva", lang="pt"),
"m")
self.assertEqual(get_gender("boi", lang="pt"),
False)
self.assertEqual(get_gender("homem", "estes homem come merda",
lang="pt"),
"m")
self.assertEqual(get_gender("ponte", lang="pt"),
"m")
self.assertEqual(get_gender("ponte", "essa ponte caiu",
lang="pt"),
"f")
#
# Spanish
#
def test_articles_es(self):
self.assertEqual(normalize("esta | |
the FST.
Returns:
self.
See also: `encode`.
"""
self._ops.decode(self, encoder)
self._check_mutating_imethod()
return self
def delete_arcs(self, state, n=None):
"""
Deletes arcs leaving a particular state.
Args:
state: The integer index of a state.
n: An optional argument indicating how many arcs to be deleted.
If this argument is None, all arcs from this state are deleted.
Returns:
self.
Raises:
IndexError: State index out of range.
See also: `delete_states`.
"""
if not self._valid_state_id(state):
raise IndexError("State index out of range")
self._delete_arcs(state, n) if n else self._delete_all_arcs(state)
self._check_mutating_imethod()
return self
def delete_states(self, states=None):
"""
Deletes states.
Args:
states: An optional iterable of integer indices of the states to be
deleted. If this argument is omitted, all states are deleted.
Returns:
self.
Raises:
IndexError: State index out of range.
See also: `delete_arcs`.
"""
if states:
for state in states:
if not self._valid_state_id(state):
raise IndexError("State index out of range")
self._delete_states(states)
else:
self._delete_all_states()
self._check_mutating_imethod()
return self
def encode(self, encoder):
"""
Encodes labels and/or weights.
This operation allows for the representation of a weighted transducer as
a weighted acceptor, an unweighted transducer, or an unweighted acceptor
by considering the pair (input label, output label), the pair (input
label, weight), or the triple (input label, output label, weight) as a
single label. Applying this operation mutates the EncodeMapper argument,
which can then be used to decode.
Args:
encoder: An EncodeMapper object used to encode the FST.
Returns:
self.
See also: `decode`.
"""
self._ops.encode(self, encoder)
self._check_mutating_imethod()
return self
def invert(self):
"""
Inverts the FST's transduction.
This operation destructively inverts the FST's transduction by
exchanging input and output labels.
Returns:
self.
"""
self._ops.invert(self)
self._check_mutating_imethod()
return self
def minimize(self, delta=DELTA, allow_nondet=False):
"""
Minimizes the FST.
This operation destructively performs the minimization of deterministic
weighted automata and transducers. If the input FST A is an acceptor,
this operation produces the minimal acceptor B equivalent to A, i.e. the
acceptor with a minimal number of states that is equivalent to A. If the
input FST A is a transducer, this operation internally builds an
equivalent transducer with a minimal number of states. However, this
minimality is obtained by allowing transitions to have strings of
symbols as output labels, this is known in the literature as a real-time
transducer. Such transducers are not directly supported by the library.
This function will convert such transducers by expanding each
string-labeled transition into a sequence of transitions. This will
result in the creation of new states, hence losing the minimality
property.
Args:
delta: Comparison/quantization delta (default: 0.0009765625).
allow_nondet: Attempt minimization of non-deterministic FST?
Returns:
self.
"""
self._ops.minimize(self, delta, allow_nondet)
self._check_mutating_imethod()
return self
def mutable_arcs(self, state):
"""
Returns a mutable iterator over arcs leaving the specified state.
Args:
state: The source state index.
Returns:
A MutableArcIterator.
See also: `arcs`, `states`.
"""
return self._mutable_arc_iterator_type(self, state)
def project(self, project_output=False):
"""
Converts the FST to an acceptor using input or output labels.
This operation destructively projects an FST onto its domain or range by
either copying each arc's input label to its output label (the default)
or vice versa.
Args:
project_output: Project onto output labels?
Returns:
self.
See also: `decode`, `encode`, `relabel`, `relabel_tables`.
"""
self._ops.project(self, _getters.GetProjectType(project_output))
self._check_mutating_imethod()
return self
def prune(self, weight=None, nstate=NO_STATE_ID, delta=DELTA):
"""
Removes paths with weights below a certain threshold.
This operation deletes states and arcs in the input FST that do not
belong to a successful path whose weight is no more (w.r.t the natural
semiring order) than the threshold \otimes the weight of the shortest
path in the input FST. Weights must be commutative and have the path
property.
Args:
weight: A Weight in the FST semiring or an object that can be
converted to a Weight in the FST semiring indicating the desired
weight threshold below which paths are pruned; if None, no paths
are pruned.
nstate: State number threshold (default: -1).
delta: Comparison/quantization delta (default: 0.0009765625).
Returns:
self.
See also: The constructive variant.
"""
# Threshold is set to semiring Zero (no pruning) if weight is None.
weight = _get_weight_or_default(self._weight_factory, weight, False)
self._ops.prune(self, weight, nstate, delta)
self._check_mutating_imethod()
return self
def push(self, to_final=False, delta=DELTA,
remove_total_weight=False):
"""
Pushes weights towards the initial or final states.
This operation destructively produces an equivalent transducer by
pushing the weights towards the initial state or toward the final
states. When pushing weights towards the initial state, the sum of the
weight of the outgoing transitions and final weight at any non-initial
state is equal to one in the resulting machine. When pushing weights
towards the final states, the sum of the weight of the incoming
transitions at any state is equal to one. Weights need to be left
distributive when pushing towards the initial state and right
distributive when pushing towards the final states.
Args:
to_final: Push towards final states?
delta: Comparison/quantization delta (default: 0.0009765625).
remove_total_weight: If pushing weights, should the total weight be
removed?
Returns:
self.
See also: The constructive variant, which also supports label pushing.
"""
self._ops.push(self, _getters.GetReweightType(to_final),
delta, remove_total_weight)
self._check_mutating_imethod()
return self
def relabel(self, ipairs=None, opairs=None):
"""
Replaces input and/or output labels using pairs of labels.
This operation destructively relabels the input and/or output labels of
the FST using pairs of the form (old_ID, new_ID); omitted indices are
identity-mapped.
Args:
ipairs: An iterable containing (old index, new index) integer pairs.
opairs: An iterable containing (old index, new index) integer pairs.
Returns:
self.
Raises:
ValueError: No relabeling pairs specified.
See also: `decode`, `encode`, `project`, `relabel_tables`.
"""
if not ipairs:
ipairs = []
if not opairs:
opairs = []
if len(ipairs) == 0 and len(opairs) == 0:
raise ValueError("No relabeling pairs specified.")
self._ops.relabel(self, ipairs, opairs)
self._check_mutating_imethod()
return self
def relabel_tables(self, old_isymbols=None, new_isymbols=None,
unknown_isymbol="", attach_new_isymbols=True,
old_osymbols=None, new_osymbols=None,
unknown_osymbol="", attach_new_osymbols=True):
"""
Replaces input and/or output labels using SymbolTables.
This operation destructively relabels the input and/or output labels of
the FST using user-specified symbol tables; omitted symbols are
identity-mapped.
Args:
old_isymbols: The old SymbolTable for input labels, defaulting to the
FST's input symbol table.
new_isymbols: A SymbolTable used to relabel the input labels
unknown_isymbol: Input symbol to use to relabel OOVs (if empty,
OOVs raise an exception)
attach_new_isymbols: Should new_isymbols be made the FST's input
symbol table?
old_osymbols: The old SymbolTable for output labels, defaulting to
the FST's output symbol table.
new_osymbols: A SymbolTable used to relabel the output labels.
unknown_osymbol: Outnput symbol to use to relabel OOVs (if empty,
OOVs raise an exception)
attach_new_osymbols: Should new_osymbols be made the FST's output
symbol table?
Returns:
self.
Raises:
ValueError: No SymbolTable specified.
See also: `decode`, `encode`, `project`, `relabel`.
"""
if new_isymbols is None and new_osymbols is None:
raise ValueError("No new symbol tables specified")
self._ops.relabel_tables(self,
self._input_symbols() if old_isymbols is None else old_isymbols,
new_isymbols, unknown_isymbol, attach_new_isymbols,
self._output_symbols() if old_osymbols is None else old_osymbols,
new_osymbols, unknown_osymbol, attach_new_osymbols)
self._check_mutating_imethod()
return self
def reserve_arcs(self, state, n):
"""
Reserve n arcs at a particular state (best effort).
Args:
state: The integer index of a state.
n: The number of arcs to reserve.
Returns:
self.
Raises:
IndexError: State index out of range.
See also: `reserve_states`.
"""
if not self._valid_state_id(state):
raise IndexError("State index out of range")
self._reserve_arcs(state, n)
self._check_mutating_imethod()
return self
def reserve_states(self, n):
"""
Reserve n states (best effort).
Args:
n: The number of states to reserve.
Returns:
self.
See also: `reserve_arcs`.
"""
self._reserve_states(n)
self._check_mutating_imethod()
return self
def reweight(self, potentials, to_final=False):
"""
Reweights an FST using an iterable of potentials.
This operation destructively reweights an FST according to the
potentials and in the direction specified by the user. An arc of weight
w, with an origin state of potential p and destination state of
potential q, is reweighted by p^{-1} \otimes (w \otimes q) when
reweighting towards the initial state, and by (p \otimes w) \otimes
q^{-1} when reweighting towards the final states. The weights must be
left distributive when reweighting towards the initial state and right
distributive when reweighting towards | |
reference system for the georeferenced
array. Defaults to EPSG 4326 ('epsg:4326').
Returns
-------
metadata : dict
Dictionary containing the export metadata.
Example
-------
>>> # Imports
>>> import numpy as np
>>> from rasterio.transform import from_origin
>>> # Create array
>>> arr = np.array([[1,2],[3,4]])
>>> transform = from_origin(-73.0, 43.0, 0.5, 0.5)
>>> meta = create_metadata(arr, transform)
# Display metadata
>>> meta
{'driver': 'GTiff',
'dtype': dtype('int32'),
'nodata': 0,
'width': 2,
'height': 2,
'count': 1,
'crs': 'epsg:4326',
'transform': Affine(0.5, 0.0, -73.0,
0.0, -0.5, 43.0)}
"""
# Define metadata
metadata = {
"driver": driver,
"dtype": array.dtype,
"nodata": nodata,
"width": array.shape[1],
"height": array.shape[0],
"count": count,
"crs": crs,
"transform": transform
}
# Return metadata
return metadata
def store_monthly_mean(radiance_daily, dates):
"""Calculates monthly mean radiance values
for each entry (year/month) in a list of and
stores the monthly means in a dictionary.
Parameters
----------
radiance_daily : dict
Dictionary containing daily radiance arrays,
indexed by radiance['YYYY']['MM']['DD'].
dates : list (of str)
List containing strings of format 'YYYY-MM'.
Returns
-------
radiance_monthly_mean : dict
Dictionary containig monthly mean radiance
arrays, indexed by radiance_monthly_mean['YYYY']['MM'].
Example
-------
>>> # Define months list
>>> months = [
... '2018-09',
... '2018-10',
... '2018-11',
... '2018-12'
... ]
>>> # Store monthly means in dictionary
>>> radiance_monthtly_mean = store_monthly_mean(
... radiance_daily=radiance_sept_2018_may_2020, dates=months)
>>> # Show top-level keys (years)
>>> radiance_monthtly_mean.keys()
dict_keys(['2018'])
>>> # Show 2018 keys (months)
>>> radiance_monthtly_mean.get('2018').keys()
dict_keys(['09', '10', '11', '12'])
"""
# Initialize dictionary to store monthly mean radiance arrays
radiance_monthtly_mean = {}
# Loop through all dates
for day in dates:
# Extract year and month
year, month = day.split('-')
# Add year to dictionary if not existing key
if year not in radiance_monthtly_mean.keys():
radiance_monthtly_mean[year] = {}
# Get dictionary of daily arrays for full month
radiance_dict = radiance_daily.get(year).get(month)
# Flatten dictionary to list of arrays
radiance_arrays = flatten_data(radiance_dict)
# Calculate mean of arrays
radiance_mean = calculate_mean(radiance_arrays)
# Add mean array to dictionary
radiance_monthtly_mean[year][month] = radiance_mean
# Return monthly means
return radiance_monthtly_mean
def store_continuous_range_mean(radiance_daily, date_range_list):
"""Calculates monthly mean radiance values
for each entry (year/month) in a list of and
stores the monthly means in a dictionary.
Parameters
----------
radiance_daily : dict
Dictionary containing daily radiance arrays,
indexed by radiance['YYYY']['MM']['DD'].
date_ranges : list (of str)
List containing strings of format 'YYYY-MM-DD'.
Returns
-------
radiance_date_range_mean : dict
Dictionary containig date range mean radiance arrays,
indexed by radiance_date_range_mean['YYYYMMDD-YYYYMMDD'].
Example
-------
>>> # Define date ranges
>>> fall_2018_date_range_list = [
... ('2018-09-01', '2018-12-16'),
... ('2018-11-18', '2018-11-24'),
... ('2018-12-08', '2018-12-14'),
... ('2018-12-17', '2019-01-04'),
... ]
>>> # Store means
>>> fall_2018_means = store_continuous_range_mean(
... radiance_daily=radiance_sept_2018_may_2020,
... date_range_list=fall_2018_date_range_list)
>>> # Show keys
>>> for key in fall_2018_means.keys():
... print(key)
20180901-20181216
20181118-20181124
20181208-20181214
20181217-20190104
"""
# Create list of date ranges for start/end date combo
date_ranges = [create_date_list(start_date, end_date)
for start_date, end_date in date_range_list]
# Initialize dictionary to store monthly mean radiance arrays
radiance_date_range_mean = {}
# Loop through all months
for date_range in date_ranges:
# Create index based on date range
date_key = f"{date_range[0].replace('-', '')}-{date_range[-1].replace('-', '')}"
# Get array for each date into list
radiance_arrays = extract_data(
radiance=radiance_daily, dates=date_range)
# Calculate mean of arrays
radiance_mean = calculate_mean(radiance_arrays)
# Add mean array to dictionary
if date_key not in radiance_date_range_mean.keys():
radiance_date_range_mean[date_key] = radiance_mean
# Return date range means
return radiance_date_range_mean
def store_weekly_range_mean(radiance_daily, start_date, end_date):
"""Calculates mean radiance values
for each entry (year/month) in a list and
stores the means in a dictionary.
Parameters
----------
radiance_daily : dict
Dictionary containing daily radiance arrays,
indexed by radiance['YYYY']['MM']['DD'].
start_date : str
String of format 'YYYY-MM-DD'.
start_date : str
String of format 'YYYY-MM-DD'.
Returns
-------
radiance_weekly_range_mean : dict
Dictionary containing recurring weekly mean radiance arrays,
indexed by radiance_date_range_mean['YYYYMMDD-YYYYMMDD-DAY'].
Example
-------
>>> # Store Fall 2018 data
>>> fall_2018_weekly = store_weekly_range_mean(
... radiance_daily=radiance_sept_2018_may_2020,
... start_date='2018-09-01', end_date='2018-12-16')
>>> # Display dictionary keys
>>> for key in fall_2018_weekly.keys():
... print(key)
20180901-20181216-SUN
20180901-20181216-MON
20180901-20181216-TUE
20180901-20181216-WED
20180901-20181216-THU
20180901-20181216-FRI
20180901-20181216-SAT
20180901-20181216-BUS
"""
# Define date frequencies to loop through for creating date ranges
day_list = ["W-SUN", "W-MON", "W-TUE", "W-WED", "W-THU", "W-FRI", "W-SAT", "B"]
# Create list of date ranges for each day in date list (all by default)
date_ranges = [create_date_list(start_date, end_date, date_frequency=day)
for day in day_list]
# Create string for adding to the end of the index string
day_str = [day[-3:] if "W-" in day else f"{day}US" for day in day_list]
# Initialize index for looping through day list
day_index = 0
# Initialize dictionary to store weekly range mean radiance arrays
radiance_weekly_range_mean = {}
# Loop through all months
for date_range in date_ranges:
# Create index based on date range and recurring day
date_key = f"{start_date.replace('-', '')}-{end_date.replace('-', '')}-{day_str[day_index]}"
# Get array for each date into list
radiance_arrays = extract_data(
radiance=radiance_daily, dates=date_range)
# Calculate mean of arrays
radiance_mean = calculate_mean(radiance_arrays)
# Add mean array to dictionary
if date_key not in radiance_weekly_range_mean.keys():
radiance_weekly_range_mean[date_key] = radiance_mean
# Add one to day index
day_index += 1
# Return weekly range means
return radiance_weekly_range_mean
def unpack_dictionaries(dictionaries):
"""Flattens/unpacks a list of dictionaries into
a single dictionary.
Parameters
----------
dictionaries : list
List containing multiple dictionaries
Returns
-------
unpacked : dict
Dictionary containing all keys/values of
all dictionaries in the input list.
Example
-------
>>> # Define dictionaries
>>> week_1 = {'radiance-week1': 200}
>>> week_2 = {'radiance-week2': 300}
>>> # Create list of dictionaries
>>> week_list = [week_1, week_2]
>>> week_list
[{'radiance-week1': 200}, {'radiance-week2': 300}]
>>> # Unpack dictionaries
>>> unpacked = unpack_dictionaries(week_list)
{'radiance-week1': 200, 'radiance-week2': 300}
"""
# Reverse input list
dictionaries_reversed = list(reversed(dictionaries))
# Flatten/unpack all semester dictionaries into single dictionary
unpacked = dict(ChainMap(*dictionaries_reversed))
# Return unpacked dictionary
return unpacked
def plot_values(radiance, location='Penn State Campus', title='Radiance', data_source='NASA Black Marble', difference=False):
"""Plots the values in a radiance array.
Parameters
----------
radiance : numpy array
Array containing raw values, mean values,
or difference values.
location : str, optional
Name of study area location. Included in plot
super-title. Default value is 'Penn State Campus'.
title : str, optional
Plot sub-title. Default value is 'Radiance'.
Intended for 'September 2019 Mean Radiance' or
'Change in Mean Radiance (September 2019 vs.
March 2020)'.
data_source : str, optional
Sources of data used in the plot.
Default value is 'NASA Black Marble'.
difference : bool, optional
Boolean indicating if the array contains raw
values or mean values (False) or contains
difference values (True). Default value is False.
Returns
-------
tuple
fig : matplotlib.figure.Figure object
The figure object associated with the plot.
ax : matplotlib.axes._subplots.AxesSubplot object
The axes object associated with the plot.
Example
-------
>>> # Plot difference from Sept 2019 to March 2020
>>> fig, ax = plot_values(
... diff_sep_2019_march_2020,
... title="Change in Mean Radiance (September 2019 vs. March 2020)",
... difference=True)
"""
# Find absolute values for radiance min & max
radiance_min_abs = np.absolute(radiance.min())
radiance_max_abs = np.absolute(radiance.max())
# Determine max value (for plotting vmin/vmax)
plot_max = radiance_min_abs if (
radiance_min_abs > radiance_max_abs) else radiance_max_abs
# Define vmin and vmax
plot_vmin = -plot_max if difference else 0
plot_vmax = plot_max
# Define radiance units
units = "$\mathrm{nWatts \cdot cm^{−2} \cdot sr^{−1}}$"
# Define title
plot_title = f"{title} ({units})"
# Define colormap
plot_cmap = 'RdBu_r' if difference else "Greys_r"
# Use dark background
with plt.style.context('dark_background'):
# Create figure and axes object
fig, ax = plt.subplots(figsize=(16, 8))
# Adjust spacing
plt.subplots_adjust(top=0.95)
# Add super title
plt.suptitle(f"{location} Cloud Free Radiance", size=24)
# Plot array
ep.plot_bands(
radiance,
scale=False,
title=plot_title,
vmin=plot_vmin,
vmax=plot_vmax,
cmap=plot_cmap,
ax=ax)
# Set title size
ax.title.set_size(20)
# Add caption
fig.text(0.5, .15, f"Data Source: {data_source}",
ha='center', fontsize=16)
# Return figure and axes object
return fig, ax
def plot_histogram(radiance, location='Penn State Campus', title='Distribution of Radiance', xlabel='Radiance',
ylabel='Pixel Count', data_source='NASA Black Marble', difference=False):
"""Plots the distribution of values in a radiance array.
Parameters
----------
radiance : numpy array
Array containing raw values, mean values,
or difference values.
location : str, optional
Name of study area location. Included in plot
super-title. Default value is 'Penn State Campus'.
title : str, | |
<reponame>gfreewind/sonic-swss<filename>tests/test_mirror_port_span.py
# This test suite covers the functionality of mirror feature in SwSS
import pytest
@pytest.mark.usefixtures("testlog")
@pytest.mark.usefixtures('dvs_vlan_manager')
@pytest.mark.usefixtures('dvs_lag_manager')
@pytest.mark.usefixtures('dvs_mirror_manager')
@pytest.mark.usefixtures('dvs_policer_manager')
class TestMirror(object):
def check_syslog(self, dvs, marker, log, expected_cnt):
(ec, out) = dvs.runcmd(['sh', '-c', "awk \'/%s/,ENDFILE {print;}\' /var/log/syslog | grep \'%s\' | wc -l" % (marker, log)])
assert out.strip() == str(expected_cnt)
def test_PortMirrorQueue(self, dvs, testlog):
"""
This test covers valid and invalid values of the queue parameter. All sessions have source & dest port.
Operation flow:
1. Create mirror session with queue 0, verify session becomes active and error not written to log.
2. Create mirror session with queue max valid value, verify session becomes active and error not written to log.
3. Create mirror session with queue max valid value + 1, verify session doesnt get created and error written to log.
Due to lag in table operations, verify_no_mirror is necessary at the end of each step, to ensure cleanup before next step.
Note that since orchagent caches max valid value during initialization, this test cannot simulate a value from SAI, e.g.
by calling setReadOnlyAttr, because orchagent has already completed initialization and would never read the simulated value.
Therefore, the default value must be used, MIRROR_SESSION_DEFAULT_NUM_TC which is defined in mirrororch.cpp as 255.
"""
session = "TEST_SESSION"
dst_port = "Ethernet16"
src_ports = "Ethernet12"
# Sub Test 1
marker = dvs.add_log_marker()
self.dvs_mirror.create_span_session(session, dst_port, src_ports, direction="BOTH", queue="0")
self.dvs_mirror.verify_session_status(session)
self.dvs_mirror.remove_mirror_session(session)
self.dvs_mirror.verify_no_mirror()
self.check_syslog(dvs, marker, "Failed to get valid queue 0", 0)
# Sub Test 2
marker = dvs.add_log_marker()
self.dvs_mirror.create_span_session(session, dst_port, src_ports, direction="RX", queue="254")
self.dvs_mirror.verify_session_status(session)
self.dvs_mirror.remove_mirror_session(session)
self.dvs_mirror.verify_no_mirror()
self.check_syslog(dvs, marker, "Failed to get valid queue 254", 0)
# Sub Test 3
marker = dvs.add_log_marker()
self.dvs_mirror.create_span_session(session, dst_port, src_ports, direction="TX", queue="255")
self.dvs_mirror.verify_session_status(session, expected=0)
self.dvs_mirror.remove_mirror_session(session)
self.dvs_mirror.verify_no_mirror()
self.check_syslog(dvs, marker, "Failed to get valid queue 255", 1)
def test_PortMirrorAddRemove(self, dvs, testlog):
"""
This test covers the basic SPAN mirror session creation and removal operations
Operation flow:
1. Create mirror session with only dst_port , verify session becomes active.
2. Create mirror session with invalid dst_port, verify session doesnt get created.
3. Create mirror session with invalid source port, verify session doesnt get created.
4. Create mirror session with source port, verify session becomes active
5. Create mirror session with Vlan as dst_port, verify session doesnt get created.
6. Create mirror session with Vlan as source port, verify session doesnt get created.
"""
pmap = dvs.counters_db.get_entry("COUNTERS_PORT_NAME_MAP", "")
pmap = dict(pmap)
session = "TEST_SESSION"
dst_port = "Ethernet16"
# Sub Test 1
self.dvs_mirror.create_span_session(session, dst_port)
self.dvs_mirror.verify_session_status(session)
a = {"SAI_MIRROR_SESSION_ATTR_MONITOR_PORT": pmap.get("Ethernet16"),
"SAI_MIRROR_SESSION_ATTR_TYPE": "SAI_MIRROR_SESSION_TYPE_LOCAL"}
self.dvs_mirror.verify_session(dvs, session, asic_db=a)
self.dvs_mirror.remove_mirror_session(session)
# Sub Test 2
self.dvs_mirror.create_span_session(session, "Invalid")
self.dvs_mirror.verify_session_status(session, expected=0)
# Sub Test 3
self.dvs_mirror.create_span_session(session, dst_port, "Invalid", "RX")
self.dvs_mirror.verify_session_status(session, expected=0)
# Sub Test 4
# create mirror session with dst_port, src_port, direction
src_ports = "Ethernet12"
src_asic_ports = ["Ethernet12"]
self.dvs_mirror.create_span_session(session, dst_port, src_ports, "RX")
self.dvs_mirror.verify_session_status(session)
self.dvs_mirror.verify_session(dvs, session, asic_db=a, src_ports=src_asic_ports, direction = "RX")
self.dvs_mirror.remove_mirror_session(session)
self.dvs_mirror.verify_no_mirror()
## Sub Test 5
self.dvs_vlan.create_vlan("10")
self.dvs_mirror.create_span_session(session, dst_port="Vlan10")
self.dvs_mirror.verify_session_status(session, expected=0)
## Sub Test 6
self.dvs_mirror.create_span_session(session, dst_port, src_ports="Vlan10")
self.dvs_mirror.verify_session_status(session, expected=0)
self.dvs_mirror.remove_mirror_session(session)
self.dvs_mirror.verify_no_mirror()
self.dvs_vlan.remove_vlan("10")
self.dvs_vlan.get_and_verify_vlan_ids(0)
def test_PortMirrorMultiSpanAddRemove(self, dvs, testlog):
"""
This test covers the Multiple SPAN mirror session creation and removal operations
Operation flow:
1. Create mirror session with multiple source ports, verify that session is active
2. Create mirror session with multiple source with valid,invalid ports, session doesnt get created.
3. Create mirror session with multiple source with invalid destination, session doesnt get created.
4. Create two mirror sessions with multiple source ports.
5. Verify session config in both sessions.
"""
pmap = dvs.counters_db.get_entry("COUNTERS_PORT_NAME_MAP", "")
pmap = dict(pmap)
session1 = "TEST_SESSION1"
session2 = "TEST_SESSION2"
dst_port1 = "Ethernet16"
dst_oid1 = pmap.get(dst_port1)
dst_port2 = "Ethernet20"
dst_oid2 = pmap.get(dst_port2)
# Sub test 1
src_ports = "Ethernet0,Ethernet4,Ethernet8"
src_asic_ports = ["Ethernet0","Ethernet4","Ethernet8"]
self.dvs_mirror.create_span_session(session1, dst_port1, src_ports)
a = {"SAI_MIRROR_SESSION_ATTR_MONITOR_PORT": dst_oid1,
"SAI_MIRROR_SESSION_ATTR_TYPE": "SAI_MIRROR_SESSION_TYPE_LOCAL"}
self.dvs_mirror.verify_session_status(session1)
self.dvs_mirror.verify_session(dvs, session1, asic_db=a, src_ports=src_asic_ports)
self.dvs_mirror.remove_mirror_session(session1)
self.dvs_mirror.verify_no_mirror()
#Subtest 2
# create mirror session with valid and invalid ports.
src_ports = "Ethernet0,Invalid,Ethernet8"
self.dvs_mirror.create_span_session(session1, dst_port1, src_ports)
self.dvs_mirror.verify_session_status(session1, expected=0)
# Subtest 3
src_ports = "Ethernet0,Ethernet4,Ethernet8"
self.dvs_mirror.create_span_session(session1, "Invalid", src_ports)
self.dvs_mirror.verify_session_status(session1, expected=0)
# create mirror session
src_ports1 = "Ethernet0,Ethernet4"
src_asic_ports1 = ["Ethernet0","Ethernet4"]
self.dvs_mirror.create_span_session(session1, dst_port1, src_ports1)
src_ports2 = "Ethernet8,Ethernet12"
src_asic_ports2 = ["Ethernet8","Ethernet12"]
self.dvs_mirror.create_span_session(session2, dst_port2, src_ports2)
a1 = {"SAI_MIRROR_SESSION_ATTR_MONITOR_PORT": dst_oid1,
"SAI_MIRROR_SESSION_ATTR_TYPE": "SAI_MIRROR_SESSION_TYPE_LOCAL"}
a2 = {"SAI_MIRROR_SESSION_ATTR_MONITOR_PORT": dst_oid2,
"SAI_MIRROR_SESSION_ATTR_TYPE": "SAI_MIRROR_SESSION_TYPE_LOCAL"}
self.dvs_mirror.verify_session_status(session1, expected = 2)
self.dvs_mirror.verify_session_status(session2, expected = 2)
self.dvs_mirror.verify_session(dvs, session1, dst_oid=dst_oid1, asic_db=a1, src_ports=src_asic_ports1, expected = 2)
self.dvs_mirror.verify_session(dvs, session2, dst_oid=dst_oid2, asic_db=a2, src_ports=src_asic_ports2, expected = 2)
self.dvs_mirror.remove_mirror_session(session1)
self.dvs_mirror.remove_mirror_session(session2)
self.dvs_mirror.verify_no_mirror()
def test_PortMirrorPolicerAddRemove(self, dvs, testlog):
"""
This test covers the basic SPAN mirror session creation and removal operations
Operation flow:
1. Create mirror session with only dst_port and policer , verify session becomes active
2. Create session with invalid policer, verify session doesnt get created.
2. Create mirror with policer and multiple source ports, verify session config on all ports.
"""
pmap = dvs.counters_db.get_entry("COUNTERS_PORT_NAME_MAP", "")
pmap = dict(pmap)
session = "TEST_SESSION"
dst_port = "Ethernet16"
policer="POLICER"
#Sub Test 1
self.dvs_policer.create_policer(policer)
self.dvs_policer.verify_policer(policer)
self.dvs_mirror.create_span_session(session, dst_port, policer="POLICER")
self.dvs_mirror.verify_session_status(session)
a = {"SAI_MIRROR_SESSION_ATTR_MONITOR_PORT": pmap.get("Ethernet16"),
"SAI_MIRROR_SESSION_ATTR_TYPE": "SAI_MIRROR_SESSION_TYPE_LOCAL"}
self.dvs_mirror.verify_session(dvs, session, asic_db=a, policer="POLICER")
self.dvs_mirror.remove_mirror_session(session)
self.dvs_policer.remove_policer("POLICER")
self.dvs_policer.verify_no_policer()
self.dvs_mirror.verify_no_mirror()
# Sub test 2
src_ports = "Ethernet0,Ethernet4,Ethernet8"
src_asic_ports = ["Ethernet0","Ethernet4","Ethernet8"]
self.dvs_mirror.create_span_session(session, dst_port, src_ports, policer="POLICER")
self.dvs_mirror.verify_session_status(session, expected=0)
# Sub test 3
self.dvs_policer.create_policer(policer)
self.dvs_policer.verify_policer(policer)
self.dvs_mirror.create_span_session(session, dst_port, src_ports, policer="POLICER")
self.dvs_mirror.verify_session_status(session)
self.dvs_mirror.verify_session(dvs, session, asic_db=a, src_ports=src_asic_ports, policer="POLICER")
self.dvs_mirror.remove_mirror_session(session)
self.dvs_policer.remove_policer("POLICER")
self.dvs_policer.verify_no_policer()
self.dvs_mirror.verify_no_mirror()
def test_PortMultiMirrorPolicerAddRemove(self, dvs, testlog):
"""
This test covers the basic SPAN mirror session creation and removal operations
Operation flow:
1. Create mirror session with multiple source with multiple policer.
2. Verify port/policer/session config on all.
"""
pmap = dvs.counters_db.get_entry("COUNTERS_PORT_NAME_MAP", "")
pmap = dict(pmap)
session1 = "TEST_SESSION1"
session2 = "TEST_SESSION2"
dst_port1 = "Ethernet16"
dst_oid1 = pmap.get(dst_port1)
dst_port2 = "Ethernet20"
dst_oid2 = pmap.get(dst_port2)
policer1 = "POLICER1"
policer2 = "POLICER2"
#Sub Test 1
self.dvs_policer.create_policer(policer1, cir="600")
self.dvs_policer.verify_policer(policer1)
self.dvs_policer.create_policer(policer2, cir="800")
self.dvs_policer.verify_policer(policer2, expected = 2)
src_ports1 = "Ethernet0,Ethernet4"
src_asic_ports1 = ["Ethernet0","Ethernet4"]
self.dvs_mirror.create_span_session(session1, dst_port1, src_ports1, policer=policer1)
src_ports2 = "Ethernet8,Ethernet12"
src_asic_ports2 = ["Ethernet8","Ethernet12"]
self.dvs_mirror.create_span_session(session2, dst_port2, src_ports2, policer=policer2)
a1 = {"SAI_MIRROR_SESSION_ATTR_MONITOR_PORT": dst_oid1,
"SAI_MIRROR_SESSION_ATTR_TYPE": "SAI_MIRROR_SESSION_TYPE_LOCAL"}
a2 = {"SAI_MIRROR_SESSION_ATTR_MONITOR_PORT": dst_oid2,
"SAI_MIRROR_SESSION_ATTR_TYPE": "SAI_MIRROR_SESSION_TYPE_LOCAL"}
self.dvs_mirror.verify_session_status(session1, expected = 2)
self.dvs_mirror.verify_session_status(session2, expected = 2)
self.dvs_mirror.verify_session(dvs, session1, dst_oid=dst_oid1, asic_db=a1, src_ports=src_asic_ports1, expected = 2, policer=policer1)
self.dvs_mirror.verify_session(dvs, session2, dst_oid=dst_oid2, asic_db=a2, src_ports=src_asic_ports2, expected = 2, policer=policer2)
self.dvs_mirror.remove_mirror_session(session1)
self.dvs_mirror.remove_mirror_session(session2)
self.dvs_policer.remove_policer(policer1)
self.dvs_policer.remove_policer(policer2)
self.dvs_policer.verify_no_policer()
self.dvs_mirror.verify_no_mirror()
def test_LAGMirrorSpanAddRemove(self, dvs, testlog):
"""
This test covers the LAG mirror session creation and removal operations
Operation flow:
1. Create port channel with 2 members.
2. Create mirror session with LAG as source port.
3. Verify that source ports have proper mirror config.
4. Remove port from port-channel and verify mirror config is removed from the port.
5. Remove second port and verify mirror config is removed.
"""
dvs.setup_db()
pmap = dvs.counters_db.get_entry("COUNTERS_PORT_NAME_MAP", "")
pmap = dict(pmap)
session = "TEST_SESSION"
dst_port = "Ethernet16"
src_port1="Ethernet8"
src_port2="Ethernet4"
po_src_port="PortChannel008"
src_ports = "PortChannel008,Ethernet12"
src_asic_ports = ["Ethernet8", "Ethernet4", "Ethernet12"]
# create port channel; create port channel member
self.dvs_lag.create_port_channel("008")
self.dvs_vlan.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_LAG", 1)
self.dvs_lag.create_port_channel_member("008", src_port1)
self.dvs_lag.create_port_channel_member("008", src_port2)
# bring up port channel and port channel member
dvs.set_interface_status(po_src_port, "up")
dvs.set_interface_status(src_port1, "up")
dvs.set_interface_status(src_port2, "up")
# Sub Test 1
self.dvs_mirror.create_span_session(session, dst_port, src_ports)
self.dvs_mirror.verify_session_status(session)
# verify asicdb
# Check src_port state.
expected_asic_db = {"SAI_MIRROR_SESSION_ATTR_MONITOR_PORT": pmap.get(dst_port),
"SAI_MIRROR_SESSION_ATTR_TYPE": "SAI_MIRROR_SESSION_TYPE_LOCAL"}
self.dvs_mirror.verify_session(dvs, session, asic_db=expected_asic_db, src_ports=src_asic_ports, asic_size=2)
# Sub Test 2
# remove port channel member; remove port channel
self.dvs_lag.remove_port_channel_member("008", src_port1)
src_asic_ports = ["Ethernet4", "Ethernet12"]
self.dvs_mirror.verify_session(dvs, session, asic_db=expected_asic_db, src_ports=src_asic_ports, asic_size=2)
self.dvs_lag.remove_port_channel_member("008", src_port2)
self.dvs_lag.remove_port_channel("008")
self.dvs_mirror.remove_mirror_session(session)
self.dvs_mirror.verify_no_mirror()
def test_PortMirrorPolicerWithAcl(self, dvs, dvs_acl, testlog):
"""
This test covers the port mirroring with policer and ACL configurations.
Operation flow:
1. Create port mirror session with policer.
2. Create ACL and configure mirror
2. Verify mirror and ACL config is proper.
"""
dvs.setup_db()
session = "MIRROR_SESSION"
policer= "POLICER"
dst_port = "Ethernet16"
# create policer
self.dvs_policer.create_policer(policer)
self.dvs_policer.verify_policer(policer)
# create mirror session
self.dvs_mirror.create_span_session(session, dst_port, policer=policer)
self.dvs_mirror.verify_session_status(session)
member_ids = dvs.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_MIRROR_SESSION", 1)
# create acl table
bind_ports = ["Ethernet0", "Ethernet4"]
dvs_acl.create_acl_table("test", "mirror", bind_ports)
dvs_acl.verify_acl_table_count(1)
dvs_acl.verify_acl_table_groups(len(bind_ports))
config_qualifiers = {"DSCP": "8/56"}
mirror_oid = "1:" + member_ids[0]
expected_sai_qualifiers = {
"SAI_ACL_ENTRY_ATTR_FIELD_DSCP": dvs_acl.get_simple_qualifier_comparator("8&mask:0x38")
}
dvs_acl.create_mirror_acl_rule("test", "mirror_rule", config_qualifiers, session)
dvs_acl.verify_mirror_acl_rule(expected_sai_qualifiers, mirror_oid)
dvs_acl.remove_acl_rule("test", "mirror_rule")
dvs_acl.verify_no_acl_rules()
dvs_acl.remove_acl_table("test")
dvs_acl.verify_acl_table_count(0)
self.dvs_mirror.remove_mirror_session(session)
self.dvs_policer.remove_policer(policer)
self.dvs_policer.verify_no_policer()
self.dvs_mirror.verify_no_mirror()
def test_PortMirrorLAGPortSpanAddRemove(self, dvs, testlog):
"""
This test covers the LAG mirror session creation and removal | |
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
__author__ = "<NAME>"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
import ctypes
import os
import threading
import time
from datetime import date
from functools import partial
from pathlib import Path
from typing import Any, Dict, List, Optional
import bokeh
import panel as pn
import param
import tornado
import tornado.gen
from saklib.sak import root_cmd
from saklib.sakcmd import SakArg, SakCmd, SakCompleterArg, sak_arg_parser
from saklib.sakio import (
get_stdout_buffer_for_thread,
unregister_stderr_thread_id,
unregister_stdout_thread_id,
)
has_pandas = False
try:
import pandas as pd
has_pandas = True
except Exception as e:
print("WARNING! Failed to import pandas", str(e))
SCRIPT_PATH = Path(__file__).resolve()
RESOURCES_PATH = SCRIPT_PATH.parent / "web"
class StopableThread(threading.Thread):
def get_id(self) -> Optional[int]:
# returns id of the respective thread
# if hasattr(self, '_thread_id'):
# return self._thread_id
for thread_id, thread in threading._active.items(): # type: ignore
if thread is self:
return thread_id # type: ignore
return None
def raise_exception(self) -> None:
print("Raise exception")
thread_id = self.get_id()
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(
thread_id, ctypes.py_object(SystemExit)
)
if res > 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, 0)
print("Exception raise failure")
class SakWebCmdArg:
def __init__(self, arg: SakArg):
self.arg = arg
# TODO(witt): Maybe I can populate the arg defaults with what comes from the get params?
# TODO(witt): What is the type of request?
def getAsDict(self, request: Optional[Any] = None) -> Dict[str, Any]:
action = self.arg.vargs.get("action", "")
default = self.arg.vargs.get("default", None)
choices = list(self.arg.vargs.get("choices", []))
arg_type = self.arg.vargs.get("type", None)
nargs = self.arg.vargs.get("nargs", None)
# Work around for list
if nargs is not None:
if nargs in ["*", "+"]:
arg_type = list
if arg_type is None:
if action in ["store_true", "store_false"]:
arg_type = bool
if action in ["append"] or nargs in ["*", "+"]:
arg_type = list
if default is None:
if action == "store_true":
default = False
elif action == "store_false":
default = True
type_lut: Dict[Any, str] = {
bool: "bool",
str: "string",
list: "list",
int: "int",
float: "float",
date: "date",
}
# TODO(witt): Should I override the default or give another value?
request_default = None
if request is not None:
request_default = request.json.get(self.arg.name, None)
ret: Dict[str, Any] = {
"name": self.arg.name,
"help": self.arg.helpmsg,
"type": type_lut.get(arg_type, "string"),
"default": request_default or default,
"choices": choices,
"nargs": nargs,
"action": action,
"orig_type": type_lut.get(self.arg.orig_type, None),
}
# ret.update(self.vargs)
return ret
def getRequestArgList(self, request: Dict[str, Any]) -> List[str]:
type_lut: Dict[str, Any] = {
"bool": bool,
"string": str,
"list": list,
"int": int,
"float": float,
"date": date,
}
cfg = self.getAsDict()
name = cfg["name"]
arg_type = type_lut.get(cfg["orig_type"], None) or type_lut.get(
cfg["type"], "string"
)
arg_action = cfg["action"]
req_arg = request.get(name, None)
ret = []
if req_arg is not None:
if arg_type is list:
tmp_ret = []
tmp_ret.append("--%s" % name)
tmp_ret_value = []
if isinstance(req_arg, list):
tmp_ret_value += [str(x) for x in req_arg]
elif isinstance(req_arg, str):
if req_arg.strip():
if "\n" in req_arg:
tmp_ret_value += req_arg.split("\n")
else:
tmp_ret_value += req_arg.split(",")
else:
raise Exception("No known way of handling list parameter")
if tmp_ret_value:
ret += tmp_ret
ret += tmp_ret_value
elif arg_type is float:
if req_arg:
ret.append("--%s" % name)
ret.append(str(req_arg))
elif arg_type is int:
if req_arg:
ret.append("--%s" % name)
ret.append(str(req_arg))
elif arg_type is date:
if req_arg:
ret.append("--%s" % name)
ret.append(str(req_arg))
else:
if arg_type is bool:
if "store_true" == arg_action:
if req_arg not in ["yes", "true", "1", True]:
return []
if "store_false" == arg_action:
if req_arg not in ["false", "no", "0", False]:
return []
else:
if req_arg == "":
return []
ret.append("--%s" % name)
if arg_type is not bool:
if isinstance(req_arg, list):
ret += req_arg
else:
ret.append(req_arg)
return ret
class CallbackObject:
def __init__(
self, doc: bokeh.document.document.Document, root_cmd: SakCmd, args: List[str]
) -> None:
web_ret: Dict[str, Any] = {}
self.path = "/".join(args)
# _root_cmd = root_cmd()
# Get only the metadata.
ret = sak_arg_parser(root_cmd, args + ["-h"])
if args:
if args[-1] != ret["cmd"].name:
web_ret["error"] = True
web_ret["error_message"] = "Could not find the path for %s" % (
self.path
)
raise Exception(web_ret)
# return web_ret
cmd = ret["cmd"]
params = {}
for arg in cmd.args:
webarg = SakWebCmdArg(arg).getAsDict()
name = webarg["name"]
default = webarg["default"]
choices = webarg["choices"]
arg_type = webarg["type"]
if webarg["orig_type"] is not None:
arg_type = webarg["orig_type"]
if arg_type in ["string"]:
_params = {}
if choices:
if default is not None:
_params["value"] = str(default)
params[name] = pn.widgets.Select(
name=name, options=choices, **_params
)
elif arg.completercb is not None:
completer_args = SakCompleterArg(None, None, None, None)
choices = arg.completercb(completer_args)
params[name] = pn.widgets.Select(
name=name, options=choices, **_params
)
else:
if default is not None:
_params["value"] = str(default)
params[name] = pn.widgets.TextInput(name=name, **_params)
elif arg_type in ["date"]:
_params = {}
if default is not None:
_params["value"] = default
params[name] = pn.widgets.DatePicker(name=name, **_params)
elif arg_type in ["int"]:
_params = {}
if default is not None:
_params["value"] = default
params[name] = pn.widgets.IntInput(name=name, **_params)
elif arg_type in ["float"]:
_params = {}
if default is not None:
_params["value"] = default
params[name] = pn.widgets.FloatInput(name=name, **_params)
elif arg_type in ["bool"]:
params[name] = pn.widgets.Checkbox(name=name, value=default)
elif arg_type in ["list"]:
if name not in params:
_params = {}
if default:
_params["value"] = default
if choices:
_params["options"] = choices
if arg.completercb is not None:
completer_args = SakCompleterArg(None, None, None, None)
_params["options"] = arg.completercb(completer_args)
if "options" in _params:
params[name] = pn.widgets.MultiChoice(name=name, **_params)
if name not in params:
_params = {}
if default:
_params["value"] = default
if choices:
_params["options"] = choices
if arg.completercb:
completer_args = SakCompleterArg(None, None, None, None)
_params["options"] = arg.completercb(completer_args)
if "options" in _params:
params[name] = pn.widgets.CrossSelector(name=name, **_params)
if name not in params:
_params = {}
if default is not None:
if isinstance(default, list):
_params["value"] = "\n".join(default)
else:
_params["value"] = str(default)
params[name] = pn.widgets.TextAreaInput(name=name, **_params)
self.doc = doc
self.params = params
self.root_cmd = root_cmd
self.args = args
self.cmd = cmd
self.output = pn.Column(sizing_mode="stretch_both")
self.run_button = pn.widgets.Button(
name="Run", button_type="primary", width=250
)
self.abort_button = pn.widgets.Button(name="Abort", button_type="primary")
self.stdout = pn.pane.Str("", sizing_mode="stretch_both")
# self.layout = pn.Row( self.output, sizing_mode="stretch_width")
self.run_button.on_click(self.start_callback)
self.abort_button.on_click(self.abort_callback)
self.thread: Optional[StopableThread] = None
def stdout_view(self) -> pn.pane.Str:
return self.stdout
def parameters_view(self) -> pn.Column:
mk_content = f"""
# {self.cmd.name.capitalize()}
"""
if self.cmd.helpmsg:
mk_content += f"""
## Help
{self.cmd.helpmsg}
"""
ret = pn.Column(pn.pane.Markdown(mk_content, sizing_mode="stretch_width"))
for obj in self.params.values():
ret.append(obj)
if self.cmd.callback is not None:
ret.append(self.run_button)
return ret
def view(self) -> pn.Column:
return self.output
@tornado.gen.coroutine
def update_doc(self, new_output: pn.pane.PaneBase, stdout_str: pn.pane.Str) -> None:
# TODO(witt): This coroutine is the one that will actually update the content
# source.stream(dict(x=[x], y=[y]))
self.output.clear()
self.output.append(new_output)
# print(stdout_str)
self.stdout.object = stdout_str
@tornado.gen.coroutine
def update_stdout(self, stdout_str: str) -> None:
# TODO(witt): This coroutine is the one that will actually update the content
# print(stdout_str)
self.stdout.object = stdout_str
def abort_callback(self, event: Any) -> None:
print("Raise exception!!!")
if self.thread:
self.thread.raise_exception()
self.thread = None
def start_callback(self, event: Any) -> None:
vargs = {param_name: param.value for param_name, param in self.params.items()}
# Start thread in another callback.
self.thread = StopableThread(target=self.callback, kwargs=vargs)
# self.thread = threading.Thread(target=self.callback, kwargs=vargs)
self.thread.start()
def callback(self, **vargs: Any) -> None:
new_output = None
# TODO: Get from my resources here
loading = pn.indicators.LoadingSpinner(value=True, width=100, height=100)
self.doc.add_next_tick_callback(
partial(self.update_doc, new_output=loading, stdout_str=None)
)
# TODO(witt): This is a work around. Try to remove.
# Make sure will start from a clean buffer.
unregister_stdout_thread_id()
unregister_stderr_thread_id()
try:
# Start a thread to update the stdout every 1s
do_update_stdout = True
def simple_update_stdout() -> None:
UPDATE_PERIOD = 2
# MAX_SIZE = -1
MAX_SIZE = 10 * 1024
while do_update_stdout:
if self.thread is None:
raise Exception("Thread was not set")
stdout_strio = get_stdout_buffer_for_thread(self.thread.ident)
stdout_str = ""
if stdout_strio is not None:
stdout_str = stdout_strio.getvalue()[-MAX_SIZE:]
if self.stdout.object != stdout_str:
# loading = pn.pane.GIF('https://upload.wikimedia.org/wikipedia/commons/b/b1/Loading_icon.gif')
self.doc.add_next_tick_callback(
partial(self.update_stdout, stdout_str=stdout_str)
)
if do_update_stdout:
time.sleep(UPDATE_PERIOD)
update_thread = threading.Thread(target=simple_update_stdout)
update_thread.start()
# This is running in another thread.
# Run callback code.
new_output = self._callback(**vargs)
# Stop the update thread
do_update_stdout = False
# Will not joing to allow a biffer sleep :)
# update_thread.join()
finally:
# Schedule document update into tornado
stdout_strio = get_stdout_buffer_for_thread()
stdout_str = ""
if stdout_strio is not None:
stdout_str = stdout_strio.getvalue()
if (new_output is not None) and hasattr(new_output, "panel"):
new_output = new_output.panel()
self.doc.add_next_tick_callback(
partial(
self.update_doc,
new_output=new_output,
stdout_str=stdout_str + "\nDONE!",
)
)
# Clean the thread buffers.
unregister_stdout_thread_id()
unregister_stderr_thread_id()
# TODO(witt): Should I do some thread cleaning?
def _callback(self, **vargs: Any) -> Any:
ret: Any = None
param_args = []
| |
9': 34, 'rock': 3, 'side l': 23, 'side r': 14, 'sidefill1': 6, 'sidefill2': 23, }
subs[('*2-01-0-dance', 'white')]={ 'b32': 43, 'b34': 30, 'cycleft': 19, 'cycright': 19, 'desk2': 24, 'hotback': 100, 'hotbox1': 42, 'hotbox2': 78, 'main 10': 100, 'main 11': 46, 'main 2': 46, 'main 3': 46, 'main 4': 46, 'main 5': 100, 'main 7': 100, 'main 8': 46, 'main 9': 46, 'red1': 100, 'red2': 100, 'red3': 100, 'red4': 100, 'upfill1': 26, 'upfill2': 46, 'upfill3': 32, 'upfill4': 26, }
subs['*2-01-01-solo']={ 'b23': 100, 'b24': 100, 'b32': 43, 'cycleft': 55, 'cycright': 55, 'desk2': 24, 'hotback': 100, 'hotbox1': 42, 'hotbox2': 78, 'main 7': 100, 'red1': 100, 'red2': 100, 'red3': 100, 'red4': 100, 'upfill1': 67, 'upfill2': 1, 'upfill4': 67, }
subs['*2-01-1-after dance']={ 'b32': 43, 'b34': 30, 'cycleft': 19, 'cycright': 19, 'desk2': 24, 'hotback': 100, 'hotbox1': 42, 'hotbox2': 78, 'main 10': 100, 'main 11': 46, 'main 2': 46, 'main 3': 46, 'main 4': 46, 'main 5': 100, 'main 7': 100, 'main 8': 46, 'main 9': 46, 'red1': 100, 'red2': 100, 'red3': 100, 'red4': 100, 'upfill1': 26, 'upfill2': 46, 'upfill3': 32, 'upfill4': 26, }
subs['*2-01-1-darker dance']={ 'b23': 93, 'b24': 93, 'b32': 4, 'b34': 9, 'cycleft': 19, 'cycright': 19, 'desk2': 10, 'hotback': 100, 'hotbox1': 42, 'hotbox2': 52, 'red1': 100, 'red2': 100, 'red3': 100, 'red4': 100, 'upfill4': 26, }
subs['*2-01-2-table']={ 'b22': 100, 'cuba1': 22, 'desk2': 58, 'hotbox2': 46, 'main 2': 62, 'main 3': 80, 'main 4': 100, 'main 5': 34, 'main 7': 10, 'main 8': 10, 'main 9': 64, 'red1': 100, 'rock': 22, 'upfill1': 76, 'upfill2': 76, 'upfill3': 76, 'upfill4': 76, }
subs['*2-01-3-small table']={ 'b22': 56, 'b25': 5, 'desk1': 47, 'desk2': 58, 'main 3': 18, 'main 9': 11, 'red1': 100, 'upfill1': 62, 'upfill4': 62, }
subs[('*2-02-0', 'white')]={ 'b23': 76, 'b24': 52, 'main 10': 53, 'main 2': 53, 'main 4': 24, 'main 5': 18, 'main 7': 42, 'main 8': 36, 'main 9': 60, 'marry2': 38, 'side r': 34, }
subs['*2-02-1-works']={'upfill2':50,'upfill3':50,'cycright':50}
subs[('*2-03-00-open dance', 'white')]={ 'b22': 11, 'blue1': 70, 'blue2': 70, 'blue3': 70, 'blue4': 92, 'cuba1': 20, 'gree1': 75, 'gree2': 75, 'gree3': 75, 'gree4': 75, 'hotback': 40, 'main 10': 40, 'main 11': 28, 'main 2': 60, 'main 4': 45, 'main 5': 20, 'main 8': 26, 'main 9': 42, 'red1': 75, 'red2': 75, 'red3': 75, 'red4': 97, 'side l': 31, 'side r': 31, 'upfill1': 27, 'upfill2': 31, 'upfill3': 26, 'upfill4': 17, }
subs['*2-03-10-dialogue']={ 'b13': 60, 'b22': 62, 'b23': 64, 'b24': 19, 'b25': 16, 'blue1': 70, 'blue2': 70, 'blue3': 70, 'blue4': 92, 'cuba1': 59, 'gree1': 75, 'gree2': 75, 'gree3': 75, 'gree4': 75, 'hotback': 40, 'main 10': 48, 'main 11': 40, 'main 2': 54, 'main 4': 45, 'main 5': 20, 'main 8': 22, 'main 9': 73, 'red1': 75, 'red2': 75, 'red3': 75, 'red4': 97, 'side l': 31, 'side r': 31, 'sidefill1': 20, 'sidefill2': 20, 'upfill1': 27, 'upfill2': 31, 'upfill3': 26, 'upfill4': 17, }
subs['*2-03-20-luckcover']={ 'b22': 20, 'b23': 20, 'b24': 20, 'b25': 20, 'blue1': 70, 'blue2': 70, 'blue3': 70, 'blue4': 92, 'cuba1': 5, 'gree1': 75, 'gree2': 75, 'gree3': 75, 'gree4': 75, 'hotback': 40, 'main 7': 100, 'main 8': 57, 'red1': 75, 'red2': 75, 'red3': 75, 'red4': 97, 'side l': 31, 'side r': 31, 'upfill1': 27, 'upfill2': 31, 'upfill3': 26, 'upfill4': 17, }
subs['*2-03-20-luck-l']={ 'b22': 100, }
subs['*2-03-20-luck-c']={ 'b23': 100, 'b24': 100, }
subs['*2-03-20-luck-r']={ 'b25': 100, }
subs[('*2-04-0', 'white')]={ 'b13': 39, 'b22': 50, 'b23': 67, 'b24': 67, 'b25': 71, 'b32': 57, 'b34': 34, 'blue1': 63, 'blue2': 63, 'blue3': 63, 'blue4': 63, 'cycright': 18, 'desk1': 24, 'desk2': 26, 'hotbox2': 59, 'main 10': 5, 'main 11': 5, 'main 2': 5, 'main 3': 45, 'main 5': 56, 'main 7': 5, 'main 8': 5, 'main 9': 5, 'marry2': 50, 'rock': 20, 'side r': 34, 'upfill1': 70, 'upfill4': 70, }
subs[('*2-05-0', 'white')]={ 'b22': 100, 'b23': 100, 'b24': 100, 'b32': 14, 'cuba2': 9, 'desk1': 53, 'desk2': 65, 'hotbox1': 25, 'hotbox2': 100, 'main 10': 100, 'main 11': 100, 'main 4': 100, 'main 5': 70, 'main 7': 100, 'main 8': 100, 'main 9': 100, 'marry1': 61, 'marry2': 47, 'rock': 23, 'sidefill2': 25, 'upfill2': 6, 'upfill3': 34, }
subs['*2-05-1-dream']={ 'desk2': 42, 'dream': 100, 'main 11': 7, 'upfill2': 16, }
subs['*2-05-2-boat']={ 'b22': 100, 'b23': 100, 'b24': 100, 'b32': 52, 'cuba2': 65, 'cycright': 55, 'desk1': 44, 'desk2': 84, 'hotbox1': 21, 'hotbox2': 95, 'main 10': 84, 'main 11': 84, 'main 3': 72, 'main 4': 100, 'main 5': 83, 'main 7': 100, 'main 8': 100, 'main 9': 100, 'marry1': 75, 'marry2': 100, 'rock': 43, 'sidefill2': 43, 'upfill2': 55, 'upfill3': 31, }
subs[('*2-06-0', 'white')]={ 'b22': 14, 'b23': 100, 'b24': 100, 'b32': 23, 'b34': 30, 'cycright': 100, 'desk2': 23, 'hotbox1': 49, 'hotbox2': 43, 'main 10': 55, 'main 11': 55, 'main 2': 30, 'main 7': 30, 'main 9': 30, 'marry1': 69, 'marry2': 34, 'rock': 17, 'side r': 30, 'upfill1': 48, 'upfill4': 48, }
subs[('*2-07-0', 'white')]={ 'b22': 100, 'b23': 100, 'b24': 100, 'b25': 100, 'b34': 100, 'cycleft': 41, 'cycright': 41, 'desk2': 78, 'edge': 63, 'hotbox1': 14, 'hotbox2': 5, 'main 10': 100, 'main 11': 100, 'main 2': 100, 'main 3': 83, 'main 4': 100, 'main 5': 100, 'main 7': 100, 'main 8': 100, 'main 9': 100, 'marry1': 100, 'marry2': 100, 'phone': 62, 'side l': 100, 'side r': 100, 'sidefill1': 83, 'sidefill2': 100, 'upfill1': 56, 'upfill2': 100, 'upfill3': 69, 'upfill4': 56, }
subs['*curtain']={ 'b22': 73, 'b24': 73, 'b25': 73, 'b34': 73, 'desk2': 57, 'edge': 58, 'hotbox2': 73, 'main 10': 73, 'main 11': 73, 'main 2': 73, 'main 3': 73, 'main 4': 73, 'main 5': 73, 'main 7': 73, 'main 8': 73, 'main 9': 73, 'marry1': 73, 'marry2': 73, 'phone': 58, 'side l': 73, 'side r': 73, 'sidefill1': 23, 'sidefill2': 23, 'upfill1': 9, 'upfill2': 68, 'upfill3': 18, 'upfill4': 9, }
subs['*phone booth']={ 'phone': 100, }
subs['*spare']={ }
subs['bank1ctr']={ 'b22': 100, 'b23': 100, 'b24': 100, 'b25': 100, }
subs['cyc']={ 'cycleft': 100, 'cycright': 100, }
subs['god']={ 'god': 100, }
subs['patio left']={ 'patio1': 100, }
subs['patio right']={ 'patio2': 100, }
subs['sidefill']={ 'sidefill1': 100, 'sidefill2': 100, }
subs['sidepost']={ 'side l': 100, 'side r': 100, }
subs['upfill sides']={ 'upfill1': 100, 'upfill4': 100, }
subs["*interscene"] = {}
subs["*interscene"] = { "blue1" : 49, "blue3" : 49, "blue2" : 49,
"blue4" : 49,}
subs["*1-08-30-full"] = { "cycright" : 15, "main 11" : 38, "main 10" : 38,
"upfill1" : 0, "sidefill2" : 38, "b25" : 100, "side l" : 38,
"b23" : 100, "b22" : 100, "desk2" : 30, "oran3" : 82, "upfill4" : 0,
"side r" : 38, "upfill3" : 0, "blue3" : 15, "upfill2" : 0, "gree2" : 15,
"gree3" : 15, "cafe2" : 100, "gree1" : 15, "gree4" : 15, "marry2" : 38,
"marry1" : 38, "cuba1" : 100, "cuba2" : 100, "red3" : 77, "red2" : 77,
"sidefill1" : 38, "b24" : 100, "red4" : 95, "b34" : 54, "cycleft" : 15,
"b32" : 43, "hotbox2" : 38, "hotbox1" : 38, "blue1" : 15, "oran2" : 82,
"oran1" : 82, "blue2" : 15, "blue4" : 15, "oran4" : 82, "main 3" : 38,
"main 2" : 38, "main 5" : 38, "main 4" : 38, "main 7" : 38, "phone" : 31,
"main 9" : 38, "main 8" : 38, "edge" : 31, "cafe1" : 100, "red1" : 77,}
subs["*2-03-20-luck-c"] = { "hotbox2" : 0, "b23" : 100, "b24" : 100,
"main 5" : 52, "marry2" : 37,}
subs["*2-07-0"] = { "sidefill2" : 100, "sidefill1" : 83, "cycright" : 41,
"main 11" : 100, "main 10" : 100, "upfill1" : 56, "b34" : 100,
"b25" : 100, "cycleft" : 41, "b23" : 100, "b22" : 100, "side l" : 100,
"hotbox2" : 5, "hotbox1" : 14, "upfill4" : 56, "b24" : 100, "desk2" : 78,
"upfill3" : 69, "upfill2" : 100, "main 3" : 83, "main 2" : 100,
"main 5" : 100, "main 4" : 100, "main 7" : 100, "phone" : 62,
"main 9" : 100, "main 8" : 100, "edge" : 63, "marry2" : 100,
"marry1" : 100, "xmas" : 80, "side r" : 100,}
subs["*1-01-0-sarah"] = { "sidefill2" : 100, "sidefill1" : 100,
"cycright" : 41, "upfill3" : 60, "upfill2" : 91, "upfill1" : 56,
"side l" : 100, "b25" : 100, "cycleft" : | |
<reponame>evereux/catia_python
#! /usr/bin/python3.6
# module initially auto generated using V5Automation.chm from CATIA R25
from pywintypes import com_error
from pathlib import Path
from pycatia.enumeration.enumeration_types import cat_script_language
from pycatia.exception_handling.exceptions import CATIAApplicationException
from pycatia.in_interfaces.document import Document
from pycatia.in_interfaces.documents import Documents
from pycatia.in_interfaces.file_system import FileSystem
from pycatia.in_interfaces.printer import Printer
from pycatia.in_interfaces.printers import Printers
from pycatia.in_interfaces.send_to_service import SendToService
from pycatia.in_interfaces.system_configuration import SystemConfiguration
from pycatia.in_interfaces.window import Window
from pycatia.in_interfaces.windows import Windows
from pycatia.system_interfaces.any_object import AnyObject
from pycatia.system_interfaces.system_service import SystemService
from pycatia.in_interfaces.setting_controllers import SettingControllers
from pycatia.types.document_types import document_type
class Application(AnyObject):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| Application
|
| Represents the current CNext application and its frame window.
| The application is the root object for all the other objects you can use and
| access from scripts. It directly aggregates:
|
| The document collection represented by the Documents object. This
| collection contains all the documents currently opened by the
| application
| The window collection represented by the Windows object. This collection
| contains all the windows currently opened by the application, each window
| displaying one of the documents contained in the document
| collection
| The SystemService object, providing information about the system
| environment.
|
| The active document and the active window are two key objects for the
| application you can access using the ActiveDocument and ActiveWindow properties
| respectively. The active window is the window the end user is currently working
| in, and the active document is the document displayed in this active window and
| that the end user is being editing. This document sets its workshop, that is
| the available menus and toolbars that make it possible to edit it, according to
| its type.
|
| When you create or use macros for in-process access, the application is always
| referred to as CATIA.
"""
def __init__(self, com_object):
super().__init__(com_object)
self.com_object = com_object
@property
def active_document(self) -> Document:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property ActiveDocument() As Document (Read Only)
|
| Returns the active document. The active document is the document the end
| user is being editing.
|
| Example:
| This example retrieves in ActiveDoc the active document of the CATIA
| application.
|
| Dim ActiveDoc As Document
| Set ActiveDoc = CATIA.ActiveDocument
:return: Document
:rtype: Document
"""
try:
active_doc_com = self.com_object.ActiveDocument
doc_suffix = active_doc_com.Name.split('.')[-1]
return document_type[doc_suffix](active_doc_com)
except com_error:
raise CATIAApplicationException('Is there an active document?')
@property
def active_printer(self) -> Printer:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property ActivePrinter() As Printer
|
| Returns or sets the active printer. The active printer is the printer on
| which documents are printed
|
| Example:
| This example retrieves in ActivePrinter the active printer of the CATIA
| application.
|
| Dim ActivePrinter As Printer
| Set ActivePrinter = CATIA.ActivePrinter
:return: Printer
:rtype: Printer
"""
return Printer(self.com_object.ActivePrinter)
@active_printer.setter
def active_printer(self, value: Printer):
"""
:param Printer value:
"""
self.com_object.ActivePrinter = value
@property
def active_window(self) -> Window:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property ActiveWindow() As Window (Read Only)
|
| Returns the active window. The active window is the window in which the end
| user is currently editing the active document.
|
| Example:
| This example retrieves in ActiveWin the active window of the CATIA
| application.
|
| Dim ActiveWin As Window
| Set ActiveWin = CATIA.ActiveWindow
:return: Window
:rtype: Window
"""
return Window(self.com_object.ActiveWindow)
@property
def cache_size(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property CacheSize() As long
|
| Returns or sets the default local cache size used by the
| application.
|
| Example:
| This example sets the cache size for by the CATIA application to those
| defined in LocalCacheSize.
|
| LocalCacheSize= 10
| CATIA.CacheSize = LocalCacheSize
:return: int
:rtype: int
"""
return self.com_object.CacheSize
@cache_size.setter
def cache_size(self, value: int):
"""
:param int value:
"""
self.com_object.CacheSize = value
@property
def caption(self) -> str:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property Caption() As CATBSTR
|
| Returns or sets the application's window title. This title is displayed in
| the application's window title bar.
|
| Example:
| This example retrieves in Title the CATIA application's window
| title.
|
| Title = CATIA.Caption
|
|
| The returned value is like this:
|
| CNext
:return: str
:rtype: str
"""
return self.com_object.Caption
@caption.setter
def caption(self, value: str):
"""
:param str value:
"""
self.com_object.Caption = value
@property
def display_file_alerts(self) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property DisplayFileAlerts() As boolean
|
| Returns or sets the application ability to display file
| alerts.
| True if the application enables file alert display.
| True is the default. A file alert is, for example, the dialog box that
| prompts you that the file you want to save is in read only mode, or that the
| file you want to close needs to be saved. It could be handy to disable these
| file alerts for automation since they may freeze your macro execution, waiting
| for an end user input in the displayed dialog box.
|
| Example:
| This example disables file alerts for the CATIA
| application.
|
| CATIA.DisplayFileAlerts = False
:return: bool
:rtype: bool
"""
return self.com_object.DisplayFileAlerts
@display_file_alerts.setter
def display_file_alerts(self, value: bool):
"""
:param bool value:
"""
self.com_object.DisplayFileAlerts = value
@property
def documents(self) -> Documents:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property Documents() As Documents (Read Only)
|
| Returns the collection of documents currently managed by the
| application.
|
| Example:
| This example retrieves in DocCollection the collection of documents
| currently managed by the CATIA application.
|
| Dim DocCollection As Documents
| Set DocCollection = CATIA.Documents
:return: Documents
:rtype: Documents
"""
return Documents(self.com_object.Documents)
@property
def file_search_order(self) -> str:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property FileSearchOrder() As CATBSTR
|
| Returns or sets the default path concatenation.
| Role: This property returns or sets the default path concatenation used by
| Other folders setting of the Linked Documents Localization function. The
| primary aim of the Linked Documents Localization function is to resolve
| document links and to manage the strategy that will be used to locate your
| linked documents.
|
| Example:
| This example sets the paths to search for by the CATIA application to
| those defined in PathConcatenation.
|
| PathConcatenation = "/u/users/fbq/db/model:/u/users/psr/db/model"
| CATIA.FileSearchOrder = PathConcatenation
|
|
|
| Theese methods require the installation of CATIA - PPR xPDM Gateway 1
| Product (PX1) In case this product is not granted, the first invocation to one
| of the methods will fail.
:return: str
:rtype: str
"""
return self.com_object.FileSearchOrder
@file_search_order.setter
def file_search_order(self, value: str):
"""
:param str value:
"""
self.com_object.FileSearchOrder = value
@property
def file_system(self) -> FileSystem:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property FileSystem() As FileSystem (Read Only)
|
| Returns the file system. The file system provides access to a computer's
| file system.
|
| Example:
| This example retrieves in AppliFileSys the file sytem of the CATIA
| application.
|
| Dim AppliFileSys As FileSystem
| Set AppliFileSys = CATIA.FileSystem
:return: FileSystem
:rtype: FileSystem
"""
return FileSystem(self.com_object.FileSystem)
@property
def full_name(self) -> str:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property FullName() As CATBSTR (Read Only)
|
| Returns | |
# --------------------------------------------------------
# Sparse Steerable Convolutions
# Common utils w.r.t rotation
# Written by <NAME>
# Modified from https://github.com/tscohen/se3cnn
# --------------------------------------------------------
import torch
import os
import numpy as np
dir_path = os.path.dirname(os.path.abspath(__file__))
_Jd = torch.load(os.path.join(dir_path, 'new_constants.pt'))
def normalize_vector( v, dim =1, return_mag =False):
v_mag = torch.sqrt(v.pow(2).sum(dim=dim, keepdim=True))# batch
v_mag = torch.max(v_mag, torch.autograd.Variable(torch.FloatTensor([1e-8]).cuda()))
v_mag = v_mag.expand_as(v)
v = v/v_mag
return v
# return v/(torch.norm(v, dim=dim, keepdim=True)+1e-8)
# u, v batch*n
def cross_product(u, v):
batch = u.size(0)
i = u[:,1]*v[:,2] - u[:,2]*v[:,1]
j = u[:,2]*v[:,0] - u[:,0]*v[:,2]
k = u[:,0]*v[:,1] - u[:,1]*v[:,0]
out = torch.cat((i.unsqueeze(1), j.unsqueeze(1), k.unsqueeze(1)),1)#batch*3
return out
def compute_rotation_matrix_from_ortho6d(x_raw, y_raw):
y = normalize_vector(y_raw)
z = cross_product(x_raw, y)
z = normalize_vector(z)#batch*3
x = cross_product(y,z)#batch*3
x = x.unsqueeze(2)
y = y.unsqueeze(2)
z = z.unsqueeze(2)
matrix = torch.cat((x,y,z), 2) #batch*3*3
return matrix
def compute_rotation_matrix_from_ortho6d_xy(x_raw, y_raw):
x = normalize_vector(x_raw)
z = cross_product(x, y_raw)
z = normalize_vector(z)#batch*3
y = cross_product(z, x)#batch*3
x = x.unsqueeze(2)
y = y.unsqueeze(2)
z = z.unsqueeze(2)
matrix = torch.cat((x,y,z), 2) #batch*3*3
return matrix
def compute_rotation_matrix_from_ortho6d_xz(x_raw, z_raw):
x = normalize_vector(x_raw)
y = cross_product(z_raw, x)
y = normalize_vector(y)#batch*3
z = cross_product(x, y)#batch*3
x = x.unsqueeze(2)
y = y.unsqueeze(2)
z = z.unsqueeze(2)
matrix = torch.cat((x,y,z), 2) #batch*3*3
return matrix
def inverse_quaternion(q):
r"""inverse of a quaternion
Works only for unit quaternions.
Parameters
----------
q : `torch.Tensor`
tensor of shape :math:`(..., 4)`
Returns
-------
`torch.Tensor`
tensor of shape :math:`(..., 4)`
"""
q = q.clone()
q[..., 1:].neg_()
return q
def compose_quaternion(q1, q2):
r"""compose two quaternions: :math:`q_1 \circ q_2`
Parameters
----------
q1 : `torch.Tensor`
tensor of shape :math:`(..., 4)`, (applied second)
q2 : `torch.Tensor`
tensor of shape :math:`(..., 4)`, (applied first)
Returns
-------
`torch.Tensor`
tensor of shape :math:`(..., 4)`
"""
q1, q2 = torch.broadcast_tensors(q1, q2)
return torch.stack([
q1[..., 0] * q2[..., 0] - q1[..., 1] * q2[..., 1] - q1[..., 2] * q2[..., 2] - q1[..., 3] * q2[..., 3],
q1[..., 1] * q2[..., 0] + q1[..., 0] * q2[..., 1] + q1[..., 2] * q2[..., 3] - q1[..., 3] * q2[..., 2],
q1[..., 0] * q2[..., 2] - q1[..., 1] * q2[..., 3] + q1[..., 2] * q2[..., 0] + q1[..., 3] * q2[..., 1],
q1[..., 0] * q2[..., 3] + q1[..., 1] * q2[..., 2] - q1[..., 2] * q2[..., 1] + q1[..., 3] * q2[..., 0],
], dim=-1)
def xyz_to_angles(xyz):
r"""convert a point :math:`\vec r = (x, y, z)` on the sphere into angles :math:`(\alpha, \beta)`
.. math::
\vec r = R(\alpha, \beta, 0) \vec e_z
Parameters
----------
xyz : `torch.Tensor`
tensor of shape :math:`(..., 3)`
Returns
-------
alpha : `torch.Tensor`
tensor of shape :math:`(...)`
beta : `torch.Tensor`
tensor of shape :math:`(...)`
"""
xyz = torch.nn.functional.normalize(xyz, p=2, dim=-1) # forward 0's instead of nan for zero-radius
xyz = xyz.clamp(-1, 1)
beta = torch.acos(xyz[..., 1])
alpha = torch.atan2(xyz[..., 0], xyz[..., 2])
return alpha, beta
def matrix_x(angle: torch.Tensor) -> torch.Tensor:
r"""matrix of rotation around X axis
Parameters
----------
angle : `torch.Tensor`
tensor of any shape :math:`(...)`
Returns
-------
`torch.Tensor`
matrices of shape :math:`(..., 3, 3)`
"""
c = angle.cos()
s = angle.sin()
o = torch.ones_like(angle)
z = torch.zeros_like(angle)
return torch.stack([
torch.stack([o, z, z], dim=-1),
torch.stack([z, c, -s], dim=-1),
torch.stack([z, s, c], dim=-1),
], dim=-2)
def matrix_y(angle: torch.Tensor) -> torch.Tensor:
r"""matrix of rotation around Y axis
Parameters
----------
angle : `torch.Tensor`
tensor of any shape :math:`(...)`
Returns
-------
`torch.Tensor`
matrices of shape :math:`(..., 3, 3)`
"""
c = angle.cos()
s = angle.sin()
o = torch.ones_like(angle)
z = torch.zeros_like(angle)
return torch.stack([
torch.stack([c, z, s], dim=-1),
torch.stack([z, o, z], dim=-1),
torch.stack([-s, z, c], dim=-1),
], dim=-2)
def matrix_z(angle: torch.Tensor) -> torch.Tensor:
r"""matrix of rotation around Z axis
Parameters
----------
angle : `torch.Tensor`
tensor of any shape :math:`(...)`
Returns
-------
`torch.Tensor`
matrices of shape :math:`(..., 3, 3)`
"""
c = angle.cos()
s = angle.sin()
o = torch.ones_like(angle)
z = torch.zeros_like(angle)
return torch.stack([
torch.stack([c, -s, z], dim=-1),
torch.stack([s, c, z], dim=-1),
torch.stack([z, z, o], dim=-1)
], dim=-2)
def angles_to_matrix(alpha, beta, gamma):
r"""conversion from angles to matrix
Parameters
----------
alpha : `torch.Tensor`
tensor of shape :math:`(...)`
beta : `torch.Tensor`
tensor of shape :math:`(...)`
gamma : `torch.Tensor`
tensor of shape :math:`(...)`
Returns
-------
`torch.Tensor`
matrices of shape :math:`(..., 3, 3)`
"""
alpha, beta, gamma = torch.broadcast_tensors(alpha, beta, gamma)
return matrix_y(alpha) @ matrix_x(beta) @ matrix_y(gamma)
def matrix_to_angles(R):
r"""conversion from matrix to angles
Parameters
----------
R : `torch.Tensor`
matrices of shape :math:`(..., 3, 3)`
Returns
-------
alpha : `torch.Tensor`
tensor of shape :math:`(...)`
beta : `torch.Tensor`
tensor of shape :math:`(...)`
gamma : `torch.Tensor`
tensor of shape :math:`(...)`
"""
assert torch.allclose(torch.det(R), R.new_tensor(1))
x = R @ R.new_tensor([0.0, 1.0, 0.0])
a, b = xyz_to_angles(x)
R = angles_to_matrix(a, b, torch.zeros_like(a)).transpose(-1, -2) @ R
c = torch.atan2(R[..., 0, 2], R[..., 0, 0])
return a, b, c
def axis_angle_to_matrix(axis, angle):
r"""conversion from axis-angle to matrix
Parameters
----------
axis : `torch.Tensor`
tensor of shape :math:`(..., 3)`
angle : `torch.Tensor`
tensor of shape :math:`(...)`
Returns
-------
`torch.Tensor`
tensor of shape :math:`(..., 3, 3)`
"""
axis, angle = torch.broadcast_tensors(axis, angle[..., None])
alpha, beta = xyz_to_angles(axis)
R = angles_to_matrix(alpha, beta, torch.zeros_like(beta))
Ry = matrix_y(angle[..., 0])
return R @ Ry @ R.transpose(-2, -1)
def quaternion_to_axis_angle(q):
r"""convertion from quaternion to axis-angle
Parameters
----------
q : `torch.Tensor`
tensor of shape :math:`(..., 4)`
Returns
-------
axis : `torch.Tensor`
tensor of shape :math:`(..., 3)`
angle : `torch.Tensor`
tensor of shape :math:`(...)`
"""
angle = 2 * torch.acos(q[..., 0].clamp(-1, 1))
axis = torch.nn.functional.normalize(q[..., 1:], dim=-1)
return axis, angle
def quaternion_to_matrix(q):
r"""convertion from quaternion to matrix
Parameters
----------
q : `torch.Tensor`
tensor of shape :math:`(..., 4)`
Returns
-------
`torch.Tensor`
tensor of shape :math:`(..., 3, 3)`
"""
return axis_angle_to_matrix(*quaternion_to_axis_angle(q))
def quaternion_to_angles(q):
r"""convertion from quaternion to angles
Parameters
----------
q : `torch.Tensor`
tensor of shape :math:`(..., 4)`
Returns
-------
alpha : `torch.Tensor`
tensor of shape :math:`(...)`
beta : `torch.Tensor`
tensor of shape :math:`(...)`
gamma : `torch.Tensor`
tensor of shape :math:`(...)`
"""
return matrix_to_angles(quaternion_to_matrix(q))
def _z_rot_mat(angle, l):
r"""
Create the matrix representation of a z-axis rotation by the given angle,
in the irrep l of dimension 2 * l + 1, in the basis of real centered
spherical harmonics (RC basis in rep_bases.py).
Note: this function is easy to use, but inefficient: only the entries
on the diagonal and anti-diagonal are non-zero, so explicitly constructing
this matrix is unnecessary.
"""
shape, device, dtype = angle.shape, angle.device, angle.dtype
M = angle.new_zeros((*shape, 2 * l + 1, 2 * l + 1))
inds = torch.arange(0, 2 * l + 1, 1, device=device)
reversed_inds = torch.arange(2 * l, -1, -1, device=device)
frequencies = torch.arange(l, -l - 1, -1, dtype=dtype, device=device)
M[..., inds, reversed_inds] = torch.sin(frequencies * angle[..., None])
M[..., inds, inds] = torch.cos(frequencies * angle[..., None])
return M
def wigner_D(l, alpha, beta, gamma):
r"""Wigner D matrix representation of :math:`SO(3)`.
It satisfies the following properties:
* :math:`D(\text{identity rotation}) = \text{identity matrix}`
* :math:`D(R_1 \circ R_2) = D(R_1) \circ D(R_2)`
* :math:`D(R^{-1}) = D(R)^{-1} = D(R)^T`
* :math:`D(\text{rotation around Y axis})` has some property that allows us to use FFT in `ToS2Grid`
Code of this function has beed copied from `lie_learn <https://github.com/AMLab-Amsterdam/lie_learn>`_ made by <NAME>.
Parameters
----------
l : int
:math:`l`
alpha : `torch.Tensor`
tensor of shape :math:`(...)`
Rotation :math:`\alpha` around Y axis, applied third.
beta : `torch.Tensor`
tensor of shape :math:`(...)`
Rotation :math:`\beta` around X axis, applied second.
gamma : `torch.Tensor`
tensor of shape :math:`(...)`
Rotation :math:`\gamma` around Y axis, applied first.
Returns
-------
`torch.Tensor`
tensor :math:`D^l(\alpha, \beta, \gamma)` of shape :math:`(2l+1, 2l+1)`
"""
if not l < len(_Jd):
raise NotImplementedError(f'wigner D maximum l implemented is {len(_Jd) - 1}, send us an email to ask for more')
alpha, beta, gamma = torch.broadcast_tensors(alpha, beta, gamma)
batchsize = alpha.size(0)
J = _Jd[l].to(dtype=alpha.dtype, device=alpha.device)
J = J.unsqueeze(0).expand(batchsize, 2*l+1, 2*l+1)
Xa = _z_rot_mat(alpha, l)
Xb = _z_rot_mat(beta, l)
Xc = _z_rot_mat(gamma, l)
return Xa @ J @ Xb @ J @ Xc
def D_from_angles(alpha, beta, gamma, l, k=None):
r"""Matrix :math:`p^k D^l(\alpha, \beta, \gamma)`
(matrix) Representation of :math:`O(3)`. :math:`D` is the representation of :math:`SO(3)`, see `wigner_D`.
Parameters
----------
alpha : `torch.Tensor`
tensor of shape :math:`(...)`
Rotation :math:`\alpha` around Y axis, applied third.
beta : `torch.Tensor`
| |
1:
print('Reverse degrees')
return
m = zeros(deg_f - deg_g + 2, col_num)
for i in range(deg_f - deg_g + 1):
m[i, :] = rotate_r(row1, i)
m[deg_f - deg_g + 1, :] = row2
return m
def find_degree(M, deg_f):
'''
Finds the degree of the poly corresponding (after triangularization)
to the _last_ row of the ``small'' matrix M, created by create_ma().
deg_f is the degree of the divident poly.
If _last_ row is all 0's returns None.
'''
j = deg_f
for i in range(0, M.cols):
if M[M.rows - 1, i] == 0:
j = j - 1
else:
return j if j >= 0 else 0
def final_touches(s2, r, deg_g):
"""
s2 is sylvester2, r is the row pointer in s2,
deg_g is the degree of the poly last inserted in s2.
After a gcd of degree > 0 has been found with Van Vleck's
method, and was inserted into s2, if its last term is not
in the last column of s2, then it is inserted as many
times as needed, rotated right by one each time, until
the condition is met.
"""
R = s2.row(r-1)
# find the first non zero term
for i in range(s2.cols):
if R[0,i] == 0:
continue
else:
break
# missing rows until last term is in last column
mr = s2.cols - (i + deg_g + 1)
# insert them by replacing the existing entries in the row
i = 0
while mr != 0 and r + i < s2.rows :
s2[r + i, : ] = rotate_r(R, i + 1)
i += 1
mr -= 1
return s2
def subresultants_vv(p, q, x, method = 0):
"""
p, q are polynomials in Z[x] (intended) or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the subresultant prs of p, q by triangularizing,
in Z[x] or in Q[x], all the smaller matrices encountered in the
process of triangularizing sylvester2, Sylvester's matrix of 1853;
see references 1 and 2 for Van Vleck's method. With each remainder,
sylvester2 gets updated and is prepared to be printed if requested.
If sylvester2 has small dimensions and you want to see the final,
triangularized matrix use this version with method=1; otherwise,
use either this version with method=0 (default) or the faster version,
subresultants_vv_2(p, q, x), where sylvester2 is used implicitly.
Sylvester's matrix sylvester1 is also used to compute one
subresultant per remainder; namely, that of the leading
coefficient, in order to obtain the correct sign and to
force the remainder coefficients to become subresultants.
If the subresultant prs is complete, then it coincides with the
Euclidean sequence of the polynomials p, q.
If the final, triangularized matrix s2 is printed, then:
(a) if deg(p) - deg(q) > 1 or deg( gcd(p, q) ) > 0, several
of the last rows in s2 will remain unprocessed;
(b) if deg(p) - deg(q) == 0, p will not appear in the final matrix.
References:
===========
1. <NAME>.: ``A new method for computing polynomial greatest
common divisors and polynomial remainder sequences.''
Numerische MatheMatik 52, 119-127, 1988.
2. <NAME>., <NAME> and <NAME>: ``On a Theorem
by Van Vleck Regarding Sturm Sequences.''
Serdica Journal of Computing, 7, No 4, 101–134, 2013.
3. <NAME>.:``Three New Methods for Computing Subresultant
Polynomial Remainder Sequences (PRS’s).'' Serdica Journal of Computing,
to appear.
"""
# make sure neither p nor q is 0
if p == 0 or q == 0:
return [p, q]
# make sure proper degrees
f, g = p, q
n = deg_f = degree(f, x)
m = deg_g = degree(g, x)
if n == 0 and m == 0:
return [f, g]
if n < m:
n, m, deg_f, deg_g, f, g = m, n, deg_g, deg_f, g, f
if n > 0 and m == 0:
return [f, g]
# initialize
s1 = sylvester(f, g, x, 1)
s2 = sylvester(f, g, x, 2)
sr_list = [f, g]
col_num = 2 * n # columns in s2
# make two rows (row0, row1) of poly coefficients
row0 = Poly(f, x, domain = QQ).all_coeffs()
leng0 = len(row0)
for i in range(col_num - leng0):
row0.append(0)
row0 = Matrix([row0])
row1 = Poly(g,x, domain = QQ).all_coeffs()
leng1 = len(row1)
for i in range(col_num - leng1):
row1.append(0)
row1 = Matrix([row1])
# row pointer for deg_f - deg_g == 1; may be reset below
r = 2
# modify first rows of s2 matrix depending on poly degrees
if deg_f - deg_g > 1:
r = 1
# replacing the existing entries in the rows of s2,
# insert row0 (deg_f - deg_g - 1) times, rotated each time
for i in range(deg_f - deg_g - 1):
s2[r + i, : ] = rotate_r(row0, i + 1)
r = r + deg_f - deg_g - 1
# insert row1 (deg_f - deg_g) times, rotated each time
for i in range(deg_f - deg_g):
s2[r + i, : ] = rotate_r(row1, r + i)
r = r + deg_f - deg_g
if deg_f - deg_g == 0:
r = 0
# main loop
while deg_g > 0:
# create a small matrix M, and triangularize it;
M = create_ma(deg_f, deg_g, row1, row0, col_num)
# will need only the first and last rows of M
for i in range(deg_f - deg_g + 1):
M1 = pivot(M, i, i)
M = M1[:, :]
# treat last row of M as poly; find its degree
d = find_degree(M, deg_f)
if d == None:
break
exp_deg = deg_g - 1
# evaluate one determinant & make coefficients subresultants
sign_value = correct_sign(n, m, s1, exp_deg, exp_deg - d)
poly = row2poly(M[M.rows - 1, :], d, x)
temp2 = LC(poly, x)
poly = simplify((poly / temp2) * sign_value)
# update s2 by inserting first row of M as needed
row0 = M[0, :]
for i in range(deg_g - d):
s2[r + i, :] = rotate_r(row0, r + i)
r = r + deg_g - d
# update s2 by inserting last row of M as needed
row1 = rotate_l(M[M.rows - 1, :], deg_f - d)
row1 = (row1 / temp2) * sign_value
for i in range(deg_g - d):
s2[r + i, :] = rotate_r(row1, r + i)
r = r + deg_g - d
# update degrees
deg_f, deg_g = deg_g, d
# append poly with subresultant coeffs
sr_list.append(poly)
# final touches to print the s2 matrix
if method != 0 and s2.rows > 2:
s2 = final_touches(s2, r, deg_g)
pprint(s2)
elif method != 0 and s2.rows == 2:
s2[1, :] = rotate_r(s2.row(1), 1)
pprint(s2)
return sr_list
def subresultants_vv_2(p, q, x):
"""
p, q are polynomials in Z[x] (intended) or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the subresultant prs of p, q by triangularizing,
in Z[x] or in Q[x], all the smaller matrices encountered in the
process of triangularizing sylvester2, Sylvester's matrix of 1853;
see references 1 and 2 for Van Vleck's method.
If the sylvester2 matrix has big dimensions use this version,
where sylvester2 is used implicitly. If you want to see the final,
triangularized matrix sylvester2, then use the first version,
subresultants_vv(p, q, x, 1).
sylvester1, Sylvester's matrix of 1840, is also used to compute
one subresultant per remainder; namely, that of the leading
coefficient, in order to obtain the correct sign and to
``force'' the remainder coefficients to become subresultants.
If the subresultant prs is complete, then it coincides with the
Euclidean sequence of the polynomials p, q.
References:
===========
1. <NAME>.: ``A new method for computing polynomial greatest
common divisors and polynomial remainder sequences.''
Numerische MatheMatik 52, 119-127, 1988.
2. <NAME>., <NAME> and <NAME>klas: ``On a Theorem
by Van Vleck Regarding Sturm Sequences.''
Serdica Journal of Computing, 7, No 4, 101–134, 2013.
3. <NAME>.:``Three New Methods for Computing Subresultant
Polynomial Remainder | |
import os
import logging
import copy
import numpy as np
import pandas as pd
from oemof.solph import EnergySystem, Bus, Sink, Source
import oemof.tabular.tools.postprocessing as pp
from oemof.tools.economics import annuity
from oemof_flexmex.helpers import delete_empty_subdirs, load_elements, load_scalar_input_data,\
load_yaml
from oemof_flexmex.parametrization_scalars import get_parameter_values
from oemof_flexmex.facades import TYPEMAP
basic_columns = ['region', 'name', 'type', 'carrier', 'tech']
# Path definitions
module_path = os.path.abspath(os.path.dirname(__file__))
MODEL_CONFIG = 'model_config'
PATH_MAPPINGS_REL = '../flexmex_config'
path_mappings = os.path.abspath(os.path.join(module_path, PATH_MAPPINGS_REL))
path_map_output_timeseries = os.path.join(path_mappings, 'mapping-output-timeseries.yml')
path_map_input_scalars = os.path.join(path_mappings, 'mapping-input-scalars.yml')
# Load mappings
map_output_timeseries = load_yaml(path_map_output_timeseries)
FlexMex_Parameter_Map = load_yaml(path_map_input_scalars)
def create_postprocessed_results_subdirs(postprocessed_results_dir):
for parameters in map_output_timeseries.values():
for subdir in parameters.values():
path = os.path.join(postprocessed_results_dir, subdir)
if not os.path.exists(path):
os.makedirs(path)
def get_capacities(es):
r"""
Calculates the capacities of all components.
Adapted from oemof.tabular.tools.postprocessing.write_results()
Parameters
----------
es : oemof.solph.EnergySystem
EnergySystem containing the results.
Returns
-------
capacities : pd.DataFrame
DataFrame containing the capacities.
"""
def get_facade_attr(attr):
# Function constructor for getting a specific property from
# the Facade object in bus_results() DataFrame columns "from" or "to"
def fnc(flow):
# Get property from the Storage object in "from" for the discharge device
if isinstance(flow['from'], (TYPEMAP["storage"],
TYPEMAP["asymmetric storage"])):
return getattr(flow['from'], attr, np.nan)
# Get property from the Storage object in "to" for the charge device
if isinstance(flow['to'], (TYPEMAP["storage"],
TYPEMAP["asymmetric storage"])):
return getattr(flow['to'], attr, np.nan)
# Get property from other object in "from"
return getattr(flow['from'], attr, np.nan)
return fnc
def get_parameter_name(flow):
if isinstance(flow['from'], (TYPEMAP["storage"],
TYPEMAP["asymmetric storage"])):
return "capacity_discharge_invest"
if isinstance(flow['to'], (TYPEMAP["storage"],
TYPEMAP["asymmetric storage"])):
return "capacity_charge_invest"
return np.nan
try:
flows = pp.bus_results(es, es.results, select="scalars", concat=True)
flows.name = "var_value"
endogenous = flows.reset_index()
# Results already contain a column named "type". Call this "var_name" to
# preserve its content ("invest" for now)
endogenous.rename(columns={"type": "var_name"}, inplace=True)
# Update "var_name" with Storage specific parameter names for charge and discharge devices
df = pd.DataFrame({'var_name': endogenous.apply(get_parameter_name, axis=1)})
endogenous.update(df)
endogenous["region"] = endogenous.apply(get_facade_attr('region'), axis=1)
endogenous["name"] = endogenous.apply(get_facade_attr('label'), axis=1)
endogenous["type"] = endogenous.apply(get_facade_attr('type'), axis=1)
endogenous["carrier"] = endogenous.apply(get_facade_attr('carrier'), axis=1)
endogenous["tech"] = endogenous.apply(get_facade_attr('tech'), axis=1)
endogenous.drop(['from', 'to'], axis=1, inplace=True)
endogenous.set_index(
["region", "name", "type", "carrier", "tech", "var_name"], inplace=True
)
except ValueError:
endogenous = pd.DataFrame()
d = dict()
for node in es.nodes:
if not isinstance(node, (Bus, Sink, TYPEMAP["shortage"], TYPEMAP["link"])):
# Specify which parameters to read depending on the technology
parameters_to_read = []
if isinstance(node, TYPEMAP["storage"]):
# TODO for brownfield optimization
# parameters_to_read = ['capacity', 'storage_capacity']
# WORKAROUND Skip 'capacity' to safe some effort in aggregation and elsewhere
# possible because storages are greenfield optimized only: 'capacity' = 0
parameters_to_read = ['storage_capacity']
elif isinstance(node, TYPEMAP["asymmetric storage"]):
parameters_to_read = ['capacity_charge', 'capacity_discharge', 'storage_capacity']
elif getattr(node, "capacity", None) is not None:
parameters_to_read = ['capacity']
# Update dict with values in oemof's parameter->value structure
for p in parameters_to_read:
key = (
node.region,
node.label,
# [n for n in node.outputs.keys()][0],
node.type,
node.carrier,
node.tech, # tech & carrier are oemof-tabular specific
p
) # for oemof logic
d[key] = {'var_value': getattr(node, p)}
exogenous = pd.DataFrame.from_dict(d).T # .dropna()
if not exogenous.empty:
exogenous.index = exogenous.index.set_names(
['region', 'name', 'type', 'carrier', 'tech', 'var_name']
)
# Read storage capacities (from oemof.heat)
# only component_results() knows about 'storage_capacity'
try:
components = pd.concat(pp.component_results(es, es.results, select='scalars'))
components.name = 'var_value'
storage = components.reset_index()
storage.drop('level_0', 1, inplace=True)
storage.columns = ['name', 'to', 'var_name', 'var_value']
storage['region'] = [
getattr(t, "region", np.nan) for t in components.index.get_level_values('from')
]
storage['type'] = [
getattr(t, "type", np.nan) for t in components.index.get_level_values('from')
]
storage['carrier'] = [
getattr(t, "carrier", np.nan) for t in components.index.get_level_values('from')
]
storage['tech'] = [
getattr(t, "tech", np.nan) for t in components.index.get_level_values('from')
]
storage = storage.loc[storage['to'].isna()]
storage.drop('to', 1, inplace=True)
storage = storage[['region', 'name', 'type', 'carrier', 'tech', 'var_name', 'var_value']]
# Delete unused 'init_cap' rows - parameter name misleading! (oemof issue)
storage.drop(storage.loc[storage['var_name'] == 'init_cap'].index, axis=0, inplace=True)
storage.replace(
['invest'],
['storage_capacity_invest'],
inplace=True
)
storage.set_index(
['region', "name", "type", "carrier", "tech", "var_name"], inplace=True
)
except ValueError:
storage = pd.DataFrame()
capacities = pd.concat([endogenous, exogenous, storage])
return capacities
def format_capacities(oemoflex_scalars, capacities):
df = pd.DataFrame(columns=oemoflex_scalars.columns)
df.loc[:, 'name'] = capacities.reset_index().loc[:, 'name']
df.loc[:, 'tech'] = capacities.reset_index().loc[:, 'tech']
df.loc[:, 'carrier'] = capacities.reset_index().loc[:, 'carrier']
df.loc[:, 'var_name'] = capacities.reset_index().loc[:, 'var_name']
df.loc[:, 'var_value'] = capacities.reset_index().loc[:, 'var_value']
df.loc[:, 'type'] = capacities.reset_index().loc[:, 'type']
df.loc[:, 'region'] = capacities.reset_index().loc[:, 'region']
df['var_unit'] = 'MW'
return df
def get_sequences_by_tech(results):
r"""
Creates a dictionary with carrier-tech as keys with the sequences of the components
from optimization results.
Parameters
----------
results : dict
Dictionary containing oemof.solph.Model results.
Returns
-------
sequences_by_tech : dict
Dictionary containing sequences with carrier-tech as keys.
"""
# copy to avoid manipulating the data in es.results
sequences = copy.deepcopy({key: value['sequences'] for key, value in results.items()})
sequences_by_tech = []
# Get internal busses for all 'ReservoirWithPump' and 'Bev' nodes to be ignored later
internal_busses = get_subnodes_by_type(sequences, Bus)
# Get inflows for all 'ReservoirWithPump' nodes
reservoir_inflows = get_subnodes_by_type(sequences, Source)
for key, df in sequences.items():
if isinstance(key[0], Bus):
component = key[1]
bus = key[0]
if isinstance(component, TYPEMAP["link"]):
if bus == component.from_bus:
var_name = 'flow_gross_forward'
elif bus == component.to_bus:
var_name = 'flow_gross_backward'
elif isinstance(component, (TYPEMAP["extraction"], TYPEMAP["backpressure"])):
var_name = 'flow_fuel'
else:
var_name = 'flow_in'
if isinstance(key[1], Bus):
bus = key[1]
component = key[0]
if isinstance(component, TYPEMAP["link"]):
if bus == component.to_bus:
var_name = 'flow_net_forward'
elif bus == component.from_bus:
var_name = 'flow_net_backward'
elif isinstance(component, (TYPEMAP["extraction"], TYPEMAP["backpressure"])):
if bus == component.electricity_bus:
var_name = 'flow_electricity'
elif bus == component.heat_bus:
var_name = 'flow_heat'
elif component in reservoir_inflows:
var_name = 'flow_inflow'
else:
var_name = 'flow_out'
if key[1] is None:
component = key[0]
var_name = 'storage_content'
# Ignore sequences FROM internal busses (concerns ReservoirWithPump, Bev)
if bus in internal_busses and component not in reservoir_inflows:
continue
carrier_tech = component.carrier + '-' + component.tech
if isinstance(component, TYPEMAP["link"]):
# Replace AT-DE by AT_DE to be ready to be merged with DataFrames from preprocessing
region = component.label.replace('-', '_')
else:
# Take AT from AT-ch4-gt, string op since sub-nodes lack of a 'region' attribute
region = component.label.split('-')[0]
df.columns = pd.MultiIndex.from_tuples([(region, carrier_tech, var_name)])
df.columns.names = ['region', 'carrier_tech', 'var_name']
sequences_by_tech.append(df)
sequences_by_tech = pd.concat(sequences_by_tech, axis=1)
return sequences_by_tech
def get_subnodes_by_type(sequences, cls):
r"""
Get all the subnodes of type 'cls' in the <to> nodes of 'sequences'
Parameters
----------
sequences : dict (special format, see get_sequences_by_tech() and before)
key: tuple of 'to' node and 'from' node: (from, to)
value: timeseries DataFrame
cls : Class
Class to check against
Returns
-------
A list of all subnodes of type 'cls'
"""
# Get a list of all the components
to_nodes = []
for k in sequences.keys():
# It's sufficient to look into one side of the flows ('to' node, k[1])
to_nodes.append(k[1])
subnodes_list = []
for component in to_nodes:
if hasattr(component, 'subnodes'):
# Only get subnodes of type 'cls'
subnodes_per_component = [n for n in component.subnodes if isinstance(n, cls)]
subnodes_list.extend(subnodes_per_component)
return subnodes_list
def get_summed_sequences(sequences_by_tech, prep_elements):
# Put component definitions into one DataFrame - drops 'carrier_tech' information in the keys
base = pd.concat(prep_elements.values())
df = base.loc[:, basic_columns]
sum = sequences_by_tech.sum()
sum.name = 'var_value'
sum_df = sum.reset_index()
# Form helper column for proper merging with component definition
df['carrier_tech'] = df['carrier'] + '-' + df['tech']
summed_sequences = pd.merge(df, sum_df, on=['region', 'carrier_tech'])
# Drop helper column
summed_sequences.drop('carrier_tech', axis=1, inplace=True)
summed_sequences = summed_sequences.loc[summed_sequences['var_name'] != 'storage_content']
summed_sequences['var_unit'] = 'MWh'
return summed_sequences
def get_re_generation(oemoflex_scalars):
renewable_carriers = ['solar', 'wind']
re_generation = pd.DataFrame(columns=oemoflex_scalars.columns)
re_flow = oemoflex_scalars.loc[(oemoflex_scalars['carrier'].isin(renewable_carriers)) &
(oemoflex_scalars['var_name'] == 'flow_out')]
curtailment = oemoflex_scalars.loc[(oemoflex_scalars['carrier'] == 'electricity') &
(oemoflex_scalars['tech'] == 'curtailment') &
(oemoflex_scalars['var_name'] == 'flow_in')]
sum = re_flow.groupby('region').sum() - curtailment.groupby('region').sum()
re_generation['region'] = sum.index
re_generation['carrier'] = 're'
re_generation['type'] = 'none'
re_generation['tech'] = 'none'
re_generation['var_name'] = 're_generation'
re_generation = re_generation.drop('var_value', 1)
re_generation = pd.merge(re_generation, sum['var_value'], on='region')
re_generation['var_unit'] = 'MWh'
return re_generation
def get_transmission_losses(oemoflex_scalars):
r"""Calculates losses_forward losses_backward for each link."""
def gross_minus_net_flow(direction):
flow_gross = oemoflex_scalars.loc[
oemoflex_scalars['var_name'] == f'flow_gross_{direction}'].set_index('name')
flow_net = oemoflex_scalars.loc[
oemoflex_scalars['var_name'] == f'flow_net_{direction}'].set_index('name')
loss = flow_gross.copy()
loss['var_name'] = f'loss_{direction}'
loss['var_value'] = flow_gross['var_value'] - flow_net['var_value']
return loss
losses = []
for direction in ['forward', 'backward']:
loss = gross_minus_net_flow(direction)
losses.append(loss)
losses = pd.concat(losses)
losses = losses.reset_index()
return losses
def get_storage_losses(oemoflex_scalars):
storage_data = oemoflex_scalars.loc[
oemoflex_scalars['type'].isin(['storage', 'asymmetric storage'])
]
flow_in = storage_data.loc[storage_data['var_name'] == 'flow_in'].set_index('name')
flow_out = storage_data.loc[storage_data['var_name'] == 'flow_out'].set_index('name')
losses = flow_in.copy()
losses['var_name'] = 'loss'
losses['var_value'] = flow_in['var_value'] - flow_out['var_value']
losses = losses.reset_index()
return losses
def get_reservoir_losses(oemoflex_scalars):
reservoir_data = oemoflex_scalars.loc[
oemoflex_scalars['type'].isin(['reservoir'])
]
flow_in = reservoir_data.loc[reservoir_data['var_name'] == 'flow_in'].set_index('name')
flow_out = reservoir_data.loc[reservoir_data['var_name'] == 'flow_out'].set_index('name')
flow_inflow = reservoir_data.loc[reservoir_data['var_name'] == 'flow_inflow'].set_index('name')
losses = flow_in.copy()
losses['var_name'] = 'losses'
losses['var_value'] = flow_inflow['var_value'] - | |
to tweak (increase) parameter `tokens_in_batch`, though result is "
f"not guaranteed."
)
return batch_beginnings, batch_sizes, batch_seq_lengths
num_cut = 0
for ss in [8, 1]: # ss - split_size
old_num_batches = len(batch_sizes)
# Starting from the last batch because its size is likely to be not multiple of 8. Thus number of
# batches which size is not multiple of 8 can be reduced by 1.
original_batch_index = old_num_batches - 1
while original_batch_index >= 0 and num_cut < num_missing_batches:
bs, bb = batch_sizes[original_batch_index], batch_beginnings[original_batch_index]
rb = 0 # an index of sliced first element of sliced batch in original batch (relative beginning)
if rb < bs - ss:
while rb < bs - ss and num_cut < num_missing_batches:
batch_sizes.append(ss)
batch_beginnings.append(bb + rb)
batch_seq_lengths.append(
self.calc_batch_seq_length(input_ids[bb + rb : bb + rb + ss], length_is_multiple_of=8)
)
rb += ss
num_cut += 1
assert len(input_ids[bb + rb : bb + bs]) > 0
batch_sizes[original_batch_index] = bs - rb
batch_beginnings[original_batch_index] = bb + rb
batch_seq_lengths[original_batch_index] = self.calc_batch_seq_length(
input_ids[bb + rb : bb + bs], length_is_multiple_of=8
)
original_batch_index -= 1
# Keeping order of batches.
batch_beginnings, batch_sizes, batch_seq_lengths = map(
list, zip(*sorted(zip(batch_beginnings, batch_sizes, batch_seq_lengths), key=lambda x: x[0]))
)
assert len(batch_beginnings) % self.number_of_batches_is_multiple_of == 0
assert len(batch_sizes) % self.number_of_batches_is_multiple_of == 0
assert len(batch_seq_lengths) % self.number_of_batches_is_multiple_of == 0
return batch_beginnings, batch_sizes, batch_seq_lengths
def _mark_up_batches(self, input_ids: List[np.ndarray]) -> Tuple[List[int], List[int], List[int]]:
"""
Computes indices of first samples in batch, batch sizes, seq lengths for batches. ``input_ids`` has to be
sorted by number of tokens in ascending order.
Batches are marked up with respect to following conditions:
- total number of tokens in batch including paddings is less or equal to ``self.tokens_in_batch``
- batch size is evenly divisible by 8 (except for the last batch)
- seq length (elements of the third returned object) is evenly divisible by 8
If ``self.batch_mark_up_progress_queue`` is not None, then the progress in mark up is reported via
``self.batch_mark_up_progress_queue``. Otherwise, ``tqdm`` instance is created in this function.
Args:
input_ids: a list of 1D int32 arrays. Elements of ``input_ids`` have to be sorted by length in ascending
order
Returns:
batch_beginnings: a list of indices in ``input_ids`` of first samples of every batch
batch_sizes: a list of numbers of samples in batches
batch_seq_lengths: a list of sequence lengths after padding for every batch
"""
batch_beginnings, batch_sizes, batch_seq_lengths = [], [], []
current_max_length = 0
start = 0
if self.batch_mark_up_progress_queue is None:
inp_iterator = tqdm(enumerate(input_ids), total=len(input_ids), desc="Batch mark up", unit="query")
else:
inp_iterator = enumerate(input_ids)
progress_made = 0
for i, inp in inp_iterator:
current_max_length = max(current_max_length, ceil(len(inp) / 8) * 8)
if current_max_length * (i + 1 - start) > self.tokens_in_batch:
batch_size = (i - start) // 8 * 8
if batch_size == 0:
if i > start:
batch_size = i - start
logging.warning(
f"Could not create batch with multiple of 8 size. Probably, there is a too long sequence "
f"in the dataset or parameter `tokens_in_batch` is too small. Current length of sequences "
f"in batch is {current_max_length}. Batch size will be reduced to {batch_size}. "
f"tokens_in_batch={self.tokens_in_batch}. The batch includes sequences from "
f"{start} to {i - 1}."
)
else:
logging.warning(
f"Input sequence number {i - 1} is too long. Could not fit it into batch with "
f"{self.tokens_in_batch} tokens. Sequence number {i - 1} will not be added to batches."
)
start = i
current_max_length = ceil(len(inp) / 8) * 8
continue
seq_length = self.calc_batch_seq_length(input_ids[start : start + batch_size], length_is_multiple_of=8)
batch_beginnings.append(start)
batch_sizes.append(batch_size)
batch_seq_lengths.append(seq_length)
start += batch_size
current_max_length = self.calc_batch_seq_length(input_ids[start : i + 1], length_is_multiple_of=8)
if self.batch_mark_up_progress_queue is not None:
progress_made += 1
if progress_made >= BATCH_MARK_UP_PROGRESS_REPORT_PERIOD:
self.batch_mark_up_progress_queue.put(progress_made)
progress_made = 0
if start < len(input_ids):
seq_length = self.calc_batch_seq_length(input_ids[start:], length_is_multiple_of=8)
batch_beginnings.append(start)
batch_sizes.append(len(input_ids) - start)
batch_seq_lengths.append(seq_length)
if self.batch_mark_up_progress_queue is not None:
self.batch_mark_up_progress_queue.put(progress_made)
if len(batch_beginnings) % self.number_of_batches_is_multiple_of:
batch_beginnings, batch_sizes, batch_seq_lengths = self._adjust_number_of_batches(
input_ids, batch_beginnings, batch_sizes, batch_seq_lengths
)
assert sum(batch_sizes) == len(input_ids)
for i in range(len(batch_beginnings) - 1):
assert batch_beginnings[i] + batch_sizes[i] == batch_beginnings[i + 1]
assert batch_seq_lengths[i] >= max(
[len(inp) for inp in input_ids[batch_beginnings[i] : batch_beginnings[i] + batch_sizes[i]]]
)
return batch_beginnings, batch_sizes, batch_seq_lengths
def _pack_into_batches(
self,
input_ids: List[np.ndarray],
subtokens_mask: List[np.ndarray],
punct_labels: List[np.ndarray],
capit_labels: List[np.ndarray],
) -> List[Dict[str, np.ndarray]]:
"""
Shuffle input sequences, sort them by number of tokens, pad, and pack into batches which satisfy following
conditions:
- total number of tokens in batch including paddings is less or equal to ``self.tokens_in_batch``
- batch size is evenly divisible by 8 (except for the last batch)
- seq length (elements of the third returned object) is evenly divisible by 8
Created batches are shuffled before returning.
If ``self.add_masks_and_segment_ids_to_batch`` is ``True``, then ``'segment_ids'``, ``'loss_mask'``, and
``'input_mask'`` are added to the batch.
If ``self.batch_building_progress_queue`` is not ``None``, then padding progress is reported to
``self.batch_building_progress_queue``. Otherwise, a new ``tqdm`` instance is created in ``pack_into_batches``
method.
Args:
input_ids: a list of 1D int32 arrays which contain token ids of dataset source
subtokens_mask: a list of 1D boolean arrays which elements are ``True`` if corresponding token is the
first token in some word
punct_labels: a list of 1D int32 arrays which contain encoded punctuation labels
capit_labels: a list of 1D int32 arrays which contain encoded capitalization labels
Returns:
a list of batches. Each batch is a dictionary with items:
- ``'input_ids'``: a ``np.int32`` numpy array;
- ``'subtokens_mask'``: a boolean numpy array;
- ``'punct_labels'``: a ``np.int32`` numpy array;
- ``'capit_labels'``: a ``np.int32`` numpy array.
If ``self.add_masks_and_segment_ids_to_batch`` is ``True``, then a batch also contain items
- ``'segment_ids'``: a ``np.int8`` numpy array;
- ``'input_mask'``: a boolean numpy array;
- ``'loss_mask'``: a boolean numpy array.
The values of a batch dictionary are numpy arrays of identical shape.
"""
zipped = list(zip(input_ids, subtokens_mask, punct_labels, capit_labels))
self.batch_shuffling_random_state.shuffle(zipped)
input_ids, subtokens_mask, punct_labels, capit_labels = zip(*sorted(zipped, key=lambda x: x[0].shape[0]))
batch_beginnings, batch_sizes, batch_seq_lengths = self._mark_up_batches(input_ids)
batches = []
if self.batch_building_progress_queue is None:
inp_iterator = tqdm(
zip(batch_beginnings, batch_sizes, batch_seq_lengths),
total=len(batch_beginnings),
desc="Batch building",
unit="batch",
)
else:
# In this case we report number of queries not number of batches
inp_iterator = zip(batch_beginnings, batch_sizes, batch_seq_lengths)
progress_made = 0
for start, size, length in inp_iterator:
batch_input_ids = pad(input_ids[start : start + size], length, self.tokenizer.pad_id)
batch_subtokens_mask = pad(subtokens_mask[start : start + size], length, False)
batch = {
"input_ids": batch_input_ids,
"subtokens_mask": batch_subtokens_mask,
"punct_labels": pad(
punct_labels[start : start + size], length, self.punct_label_ids[self.pad_label]
).astype(np.int64),
"capit_labels": pad(
capit_labels[start : start + size], length, self.capit_label_ids[self.pad_label]
).astype(np.int64),
}
if self.add_masks_and_segment_ids_to_batch:
batch_segment_ids, batch_input_mask, batch_loss_mask = create_masks_and_segment_ids(
batch_input_ids,
batch_subtokens_mask,
self.tokenizer.pad_id,
self.tokenizer.cls_id,
self.tokenizer.sep_id,
self.ignore_start_end,
self.ignore_extra_tokens,
)
batch['segment_ids'] = batch_segment_ids
batch['input_mask'] = batch_input_mask
batch['loss_mask'] = batch_loss_mask
batches.append(batch)
if self.batch_building_progress_queue is not None:
progress_made += size
if progress_made >= BATCH_BUILDING_PROGRESS_REPORT_PERIOD:
self.batch_building_progress_queue.put(progress_made)
progress_made = 0
if self.batch_building_progress_queue is not None:
self.batch_building_progress_queue.put(progress_made)
self.batch_shuffling_random_state.shuffle(batches)
return batches
def repack_batches_with_shuffle(self) -> None:
"""A function for proper shuffling of a dataset. Pytorch data loader shuffing will only permute batches."""
logging.info("Shuffling training dataset")
self.batches = self._pack_into_batches(
self.input_ids, self.subtokens_mask, self.punct_labels, self.capit_labels
)
def _calculate_and_save_label_frequencies(self, all_labels: List[np.ndarray], name: str) -> Dict[str, float]:
"""Calculates and saves labels frequencies in :attr:`label_info_save_dir`."""
merged_labels = itertools.chain.from_iterable(all_labels)
if self.verbose:
logging.info('Three most popular labels')
self.label_info_save_dir.mkdir(parents=True, exist_ok=True)
_, label_frequencies, _ = get_label_stats(
merged_labels, str(self.label_info_save_dir / f'label_count_{name}.tsv')
)
return label_frequencies
def save_labels_and_get_file_paths(
self, punct_labels_file_name: str, capit_labels_file_name: str
) -> Tuple[Path, Path]:
"""
Saves label ids into files located in ``self.label_info_save_dir``. Saved label ids are usually used for
``.nemo`` checkpoint creation.
The signatures of this method and the signature of the method
:meth:`~nemo.collections.nlp.data.token_classification.BertPunctuationCapitalizationTarredDataset.save_labels_and_get_file_paths`
must be identical.
Args:
punct_labels_file_name (:obj:`str`): a name of a punctuation labels file
capit_labels_file_name (:obj:`str`): a name of a capitalization labels file
Returns:
:obj:`Tuple[pathlib.Path, pathlib.Path]`: a tuple containing:
- :obj:`pathlib.Path`: a path to the saved punctuation labels file
- :obj:`pathlib.Path`: a path to the saved capitalization labels file
"""
nemo_dir = self.label_info_save_dir / LABEL_ID_DIR_FOR_NEMO_CHECKPOINT
punct_labels_file = nemo_dir / punct_labels_file_name
capit_labels_file = nemo_dir / capit_labels_file_name
save_label_ids(self.punct_label_ids, punct_labels_file)
save_label_ids(self.capit_label_ids, capit_labels_file)
return punct_labels_file, capit_labels_file
def __len__(self) -> int:
if self.use_features:
return len(self.batches)
return len(self.input_ids)
def collate_fn(self, batches: List[Dict[str, np.ndarray]]) -> Dict[str, | |
trajectories are to be saved
model_utils: ModelUtils instance
"""
args = parse_arguments()
forecasted_trajectories = {}
for i, (_input, target, helpers) in enumerate(test_loader):
_input = _input.to(device)
batch_helpers = list(zip(*helpers))
helpers_dict = {}
for k, v in config.LSTM_HELPER_DICT_IDX.items():
helpers_dict[k] = batch_helpers[v]
# Set to eval mode
encoder.eval()
decoder.eval()
# Encoder
batch_size = _input.shape[0]
input_length = _input.shape[1]
input_shape = _input.shape[2]
# Initialize encoder hidden state
encoder_hidden = model_utils.init_hidden(
batch_size,
encoder.module.hidden_size if use_cuda else encoder.hidden_size)
# Encode observed trajectory
for ei in range(input_length):
encoder_input = _input[:, ei, :]
encoder_hidden = encoder(encoder_input, encoder_hidden)
# Initialize decoder input with last coordinate in encoder
decoder_input = encoder_input[:, :2]
# Initialize decoder hidden state as encoder hidden state
decoder_hidden = encoder_hidden
decoder_outputs = torch.zeros(
(batch_size, args.pred_len, 2)).to(device)
# Decode hidden state in future trajectory
for di in range(args.pred_len):
decoder_output, decoder_hidden = decoder(decoder_input,
decoder_hidden)
decoder_outputs[:, di, :] = decoder_output
# Use own predictions as inputs at next step
decoder_input = decoder_output
# Get absolute trajectory
abs_helpers = {}
abs_helpers["REFERENCE"] = np.array(helpers_dict["DELTA_REFERENCE"])
abs_helpers["TRANSLATION"] = np.array(helpers_dict["TRANSLATION"])
abs_helpers["ROTATION"] = np.array(helpers_dict["ROTATION"])
abs_inputs, abs_outputs = baseline_utils.get_abs_traj(
_input.clone().cpu().numpy(),
decoder_outputs.detach().clone().cpu().numpy(),
args,
abs_helpers,
)
for i in range(abs_outputs.shape[0]):
seq_id = int(helpers_dict["SEQ_PATHS"][i])
forecasted_trajectories[seq_id] = [abs_outputs[i]]
with open(os.path.join(forecasted_save_dir, f"{start_idx}.pkl"),
"wb") as f:
pkl.dump(forecasted_trajectories, f)
def infer_maml_map(
test_loader: Any,
support_loader: Any,
encoder: Any,
decoder: Any,
start_idx: int,
forecasted_save_dir: str,
model_utils: ModelUtils,
epoch: int,
loader_len : int,
support_loader_len: int,
):
"""Infer function for map-based LSTM baselines and save the forecasted trajectories.
Args:
test_loader: DataLoader for the test set
encoder: Encoder network instance
decoder: Decoder network instance
start_idx: start index for the current joblib batch
forecasted_save_dir: Directory where forecasted trajectories are to be saved
model_utils: ModelUtils instance
epoch: Epoch at which we ended training at
"""
forecasted_trajectories = {}
args = parse_arguments()
per_step_loss_importance_vecor = get_per_step_loss_importance_vector(args, epoch) if args.num_training_steps_per_iter > 0 else None
criterion = nn.MSELoss()
total_loss = []
with tqdm(total=loader_len, desc='Testing on epoch: {}'.format(epoch), position=0) as pbar:
for i, data_batch in enumerate(test_loader):
# Support task data batch:
support_batch = next(iter(support_loader))
encoder.eval()
decoder.eval()
pbar.update(1)
loss = 0
(support_input_seqs, support_obs_seqs, test_input_seq, test_obs_seq, helpers) = data_batch
batch_helpers = list(zip(*helpers))
helpers_dict = {}
for k, v in config.LSTM_HELPER_DICT_IDX.items():
helpers_dict[k] = batch_helpers[v]
batch_size = test_input_seq.shape[0]
input_length = test_input_seq.shape[2]
with tqdm(total=batch_size, desc='Iterating over batch', position=1) as pbar2:
for batch_idx in range(batch_size):
pbar2.update(1)
num_candidates = len(
helpers_dict["CANDIDATE_CENTERLINES"][batch_idx])
curr_centroids = helpers_dict["CENTROIDS"][batch_idx]
seq_id = int(helpers_dict["SEQ_PATHS"][batch_idx])
abs_outputs = []
# Predict using every centerline candidate for the current trajectory
#import pdb; pdb.set_trace();
for candidate_idx in range(num_candidates):
curr_centerline = helpers_dict["CANDIDATE_CENTERLINES"][
batch_idx][candidate_idx]
curr_nt_dist = helpers_dict["CANDIDATE_NT_DISTANCES"][
batch_idx][candidate_idx]
# Since this is test set all our inputs are gonna be None, gotta build
# them ourselves.
test_input_seq = torch.FloatTensor(
np.expand_dims(curr_nt_dist[:args.obs_len].astype(float),
0)).to(device)
# Update support batch and feed to maml_forward
#import pdb; pdb.set_trace()
tempbatch = list(support_batch)
tempbatch[2] = test_input_seq.unsqueeze(0)
support_batch = tuple(tempbatch)
loss, preds = maml_forward(
args = args,
data_batch = support_batch,
epoch = epoch,
criterion = criterion,
encoder = encoder,
decoder = decoder,
model_utils = model_utils,
per_step_loss_importance_vecor = per_step_loss_importance_vecor,
second_order = False,
rollout_len = args.pred_len,
encoder_learning_rule = None,
decoder_learning_rule = None,
)
# Preds has been broadcasted to the shape of output, which means it has batch size,
# but actually it's just copied, so take one of the elements only
preds = preds[0,:,:].unsqueeze(0)
# Get absolute trajectory
abs_helpers = {}
abs_helpers["REFERENCE"] = np.expand_dims(
np.array(helpers_dict["CANDIDATE_DELTA_REFERENCES"]
[batch_idx][candidate_idx]),
0,
)
abs_helpers["CENTERLINE"] = np.expand_dims(curr_centerline, 0)
abs_input, abs_output = baseline_utils.get_abs_traj(
test_input_seq.clone().cpu().numpy(),
preds.detach().clone().cpu().numpy(),
args,
abs_helpers,
)
# array of shape (1,30,2) to list of (30,2)
abs_outputs.append(abs_output[0])
forecasted_trajectories[seq_id] = abs_outputs
os.makedirs(forecasted_save_dir, exist_ok=True)
with open(os.path.join(forecasted_save_dir, f"{start_idx}.pkl"),
"wb") as f:
pkl.dump(forecasted_trajectories, f)
def infer_maml_map_simplified(
test_loader: Any,
support_loader: Any,
encoder: Any,
decoder: Any,
start_idx: int,
forecasted_save_dir: str,
model_utils: ModelUtils,
epoch: int,
loader_len : int,
encoder_learning_rules = None,
decoder_learning_rules = None,
):
"""Infer function for map-based LSTM baselines and save the forecasted trajectories.
Args:
test_loader: DataLoader for the test set
encoder: Encoder network instance
decoder: Decoder network instance
start_idx: start index for the current joblib batch
forecasted_save_dir: Directory where forecasted trajectories are to be saved
model_utils: ModelUtils instance
epoch: Epoch at which we ended training at
"""
forecasted_trajectories = {}
args = parse_arguments()
criterion = nn.MSELoss()
total_loss = []
for i, (support_batch, data_batch) in enumerate(zip(support_loader, test_loader)):
encoder.eval()
decoder.eval()
#import pdb; pdb.set_trace()
encoder_parameters, decoder_parameters = maml_infer_forward(
args = args,
data_batch = support_batch,
epoch = epoch,
criterion = criterion,
encoder = encoder,
decoder = decoder,
model_utils = model_utils,
encoder_learning_rules = encoder_learning_rules,
decoder_learning_rules = decoder_learning_rules,
)
(_, _, _, _, helpers) = data_batch
batch_helpers = list(zip(*helpers))
helpers_dict = {}
for k, v in config.LSTM_HELPER_DICT_IDX.items():
helpers_dict[k] = batch_helpers[v]
batch_size = data_batch[2].shape[0]
with tqdm(total=batch_size, desc='Iterating over batch', position=1) as pbar2:
for batch_idx in range(batch_size):
pbar2.update(1)
num_candidates = len(helpers_dict["CANDIDATE_CENTERLINES"][batch_idx])
curr_centroids = helpers_dict["CENTROIDS"][batch_idx]
seq_id = int(helpers_dict["SEQ_PATHS"][batch_idx])
abs_outputs = []
# Predict using every centerline candidate for the current trajectory
for candidate_idx in range(num_candidates):
curr_centerline = helpers_dict["CANDIDATE_CENTERLINES"][
batch_idx][candidate_idx]
curr_nt_dist = helpers_dict["CANDIDATE_NT_DISTANCES"][
batch_idx][candidate_idx]
# Since this is test set all our inputs are gonna be None, gotta build
# them ourselves.
test_input_seq = torch.FloatTensor(
np.expand_dims(curr_nt_dist[:args.obs_len].astype(float),
0)).to(device)
test_target_seq = torch.zeros(test_input_seq.shape[0], 30, 2)
preds = lstm_infer_forward(
num_layers = args.num_layers,
encoder = encoder,
decoder = decoder,
encoder_params = encoder_parameters,
decoder_params = decoder_parameters,
input_seq = test_input_seq,
target_seq = test_target_seq,
obs_len = args.obs_len,
pred_len = args.pred_len,
model_utils = model_utils,
)
abs_helpers = {}
abs_helpers["REFERENCE"] = np.expand_dims(
np.array(helpers_dict["CANDIDATE_DELTA_REFERENCES"]
[batch_idx][candidate_idx]),
0,
)
abs_helpers["CENTERLINE"] = np.expand_dims(curr_centerline, 0)
abs_input, abs_output = baseline_utils.get_abs_traj(
test_input_seq.clone().cpu().numpy(),
preds.detach().clone().cpu().numpy(),
args,
abs_helpers,
)
# array of shape (1,30,2) to list of (30,2)
abs_outputs.append(abs_output[0])
forecasted_trajectories[seq_id] = abs_outputs
os.makedirs(forecasted_save_dir, exist_ok=True)
with open(os.path.join(forecasted_save_dir, f"{start_idx}.pkl"),
"wb") as f:
pkl.dump(forecasted_trajectories, f)
def infer_map(
test_loader: torch.utils.data.DataLoader,
encoder: EncoderRNN,
decoder: DecoderRNN,
start_idx: int,
forecasted_save_dir: str,
model_utils: ModelUtils,
):
"""Infer function for map-based LSTM baselines and save the forecasted trajectories.
Args:
test_loader: DataLoader for the test set
encoder: Encoder network instance
decoder: Decoder network instance
start_idx: start index for the current joblib batch
forecasted_save_dir: Directory where forecasted trajectories are to be saved
model_utils: ModelUtils instance
"""
args = parse_arguments()
global best_loss
forecasted_trajectories = {}
for i, (_input, target, helpers) in enumerate(test_loader):
_input = _input.to(device)
batch_helpers = list(zip(*helpers))
helpers_dict = {}
for k, v in config.LSTM_HELPER_DICT_IDX.items():
helpers_dict[k] = batch_helpers[v]
# Set to eval mode
encoder.eval()
decoder.eval()
# Encoder
batch_size = _input.shape[0]
input_length = _input.shape[1]
# Iterate over every element in the batch
for batch_idx in range(batch_size):
num_candidates = len(
helpers_dict["CANDIDATE_CENTERLINES"][batch_idx])
curr_centroids = helpers_dict["CENTROIDS"][batch_idx]
seq_id = int(helpers_dict["SEQ_PATHS"][batch_idx])
abs_outputs = []
# Predict using every centerline candidate for the current trajectory
for candidate_idx in range(num_candidates):
curr_centerline = helpers_dict["CANDIDATE_CENTERLINES"][
batch_idx][candidate_idx]
curr_nt_dist = helpers_dict["CANDIDATE_NT_DISTANCES"][
batch_idx][candidate_idx]
_input = torch.FloatTensor(
np.expand_dims(curr_nt_dist[:args.obs_len].astype(float),
0)).to(device)
# Initialize encoder hidden state
encoder_hidden = model_utils.init_hidden(
1, encoder.module.hidden_size
if use_cuda else encoder.hidden_size)
# Encode observed trajectory
for ei in range(input_length):
encoder_input = _input[:, ei, :]
encoder_hidden = encoder(encoder_input, encoder_hidden)
# Initialize decoder input with last coordinate in encoder
decoder_input = encoder_input[:, :2]
# Initialize decoder hidden state as encoder hidden state
decoder_hidden = encoder_hidden
decoder_outputs = torch.zeros((1, args.pred_len, 2)).to(device)
# Decode hidden state in future trajectory
for di in range(args.pred_len):
decoder_output, decoder_hidden = decoder(
decoder_input, decoder_hidden)
decoder_outputs[:, di, :] = decoder_output
# Use own predictions as inputs at next step
decoder_input = decoder_output
# Get absolute trajectory
abs_helpers = {}
abs_helpers["REFERENCE"] = np.expand_dims(
np.array(helpers_dict["CANDIDATE_DELTA_REFERENCES"]
[batch_idx][candidate_idx]),
0,
)
abs_helpers["CENTERLINE"] = np.expand_dims(curr_centerline, 0)
abs_input, abs_output = baseline_utils.get_abs_traj(
_input.clone().cpu().numpy(),
decoder_outputs.detach().clone().cpu().numpy(),
args,
abs_helpers,
)
# array of shape (1,30,2) to list of (30,2)
abs_outputs.append(abs_output[0])
forecasted_trajectories[seq_id] = abs_outputs
os.makedirs(forecasted_save_dir, exist_ok=True)
with open(os.path.join(forecasted_save_dir, f"{start_idx}.pkl"),
"wb") as f:
pkl.dump(forecasted_trajectories, f)
def infer_helper(
curr_data_dict: Dict[str, Any],
support_data_dict: Dict[str, Any],
start_idx: int,
encoder: EncoderRNN,
decoder: DecoderRNN,
model_utils: ModelUtils,
forecasted_save_dir: str,
epoch: int,
encoder_learning_rules = None,
decoder_learning_rules = None,
):
"""Run inference on the current joblib batch.
Args:
curr_data_dict: Data dictionary for the current joblib batch
start_idx: Start idx of the current joblib batch
encoder: Encoder network instance
decoder: Decoder network instance
model_utils: ModelUtils instance
forecasted_save_dir: Directory where forecasted trajectories are to be saved
epoch: The epoch which we stopped training at
"""
args = parse_arguments()
curr_test_dataset = LSTMDataset_maml_simplified(curr_data_dict, args, "test", | |
<reponame>milos-korenciak/2018.ossconf.sk<filename>views.py
#!/usr/bin/python
# -*- coding: utf8 -*-
import os
import re
import textwrap
import requests
import unicodedata
from datetime import datetime, timedelta
from flask import Flask, g, request, render_template, abort, make_response
from flask_babel import Babel, gettext
from jinja2 import evalcontextfilter, Markup
app = Flask(__name__, static_url_path='/static')
app.config['BABEL_DEFAULT_LOCALE'] = 'sk'
app.jinja_options = {'extensions': ['jinja2.ext.with_', 'jinja2.ext.i18n']}
babel = Babel(app)
EVENT = gettext('PyCon SK 2018')
DOMAIN = 'https://2018.pycon.sk'
API_DOMAIN = 'https://api.pycon.sk'
LANGS = ('en', 'sk')
TIME_FORMAT = '%Y-%m-%dT%H:%M:%S+00:00'
NOW = datetime.utcnow().strftime(TIME_FORMAT)
SRC_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__)))
LOGO_PYCON = 'logo/pycon_logo_square.svg'
LDJSON_SPY = {
"@type": "Organization",
"name": "SPy o. z.",
"url": "https://spy.pycon.sk",
"logo": "https://spy.pycon.sk/img/logo/spy-logo.png",
"sameAs": [
"https://facebook.com/pyconsk",
"https://twitter.com/pyconsk",
"https://www.linkedin.com/company/spy-o--z-",
"https://github.com/pyconsk",
]
}
LDJSON_PYCON = {
"@context": "http://schema.org",
"@type": "Event",
"name": EVENT,
"description": gettext("PyCon will be back at Slovakia in 2018 again. PyCon SK is a community-organized conference "
"for the Python programming language."),
"startDate": "2018-03-09T9:00:00+01:00",
"endDate": "2018-03-11T18:00:00+01:00",
"image": DOMAIN + "/static/img/logo/pycon_long_2018.png",
"location": {
"@type": "Place",
"name": "<NAME>",
"address": {
"@type": "PostalAddress",
"streetAddress": "Ilkovičova 2",
"addressLocality": "Bratislava 4",
"postalCode": "842 16",
"addressCountry": gettext("Slovak Republic")
},
},
"url": DOMAIN,
"workPerformed": {
"@type": "CreativeWork",
"name": EVENT,
"creator": LDJSON_SPY
}
}
# calendar settings
ICAL_LEN = 70 # length of a calendar (ical) line
ICAL_NL = '\\n\n' # calendar newline
IGNORE_TALKS = ['Break', 'Coffee Break']
TYPE = {
'talk': gettext('Talk'),
'workshop': gettext('Workshop'),
}
TAGS = {
'ai': gettext('Machine Learning / AI'),
'community': gettext('Community / Diversity / Social'),
'data': gettext('Data Science'),
'devops': 'DevOps',
'docs': gettext('Documentation'),
'edu': gettext('Education'),
'generic': gettext('Python General'),
'security': gettext('Security'),
'softskills': gettext('Soft Skills'),
'hardware': gettext('Hardware'),
'web': gettext('Web Development'),
'other': gettext('Other'),
}
FRIDAY_START = datetime(2018, 3, 9, hour=9)
SATURDAY_START = datetime(2018, 3, 10, hour=9)
SUNDAY_START = datetime(2018, 3, 11, hour=10, minute=15)
FRIDAY_TRACK1 = (
{"pause": 5, 'title': gettext("Conference Opening"), 'duration': 25, 'flag': 'other', 'type': 'talk'},
{"pause": 15, 'title': gettext("FaaS and Furious - Zero to Serverless in 60 seconds - Anywhere")},
{"pause": 15, 'title': gettext("Docs or it didn't happen")},
{"pause": 5, 'title': gettext("GraphQL is the new black")},
{"pause": 60, 'title': gettext("To the Google in 80 Days")},
{"pause": 5, 'title': gettext("Unsafe at Any Speed")},
{"pause": 15, 'title': gettext("Protecting Privacy and Security — For Yourself and Your Community")},
{"pause": 5, 'title': gettext("ZODB: The Graph database for Python Developers.")},
{"pause": 15, 'title': gettext("Differentiable programming in Python and Gluon for (not only medical) image analysis")},
{"pause": 5, 'title': gettext("Vim your Python, Python your Vim")},
)
FRIDAY_TRACK2 = (
{"pause": 5, 'title': gettext("Conference Opening in Kiwi.com Hall"), 'duration': 25},
{"pause": 5, 'title': gettext("Python Days in Martin and follow-up activities")},
{"pause": 15, 'title': gettext("Python programming till graduation")},
{"pause": 5, 'title': gettext("Open educational resources for learning Python")},
{"pause": 60, 'title': gettext("About Ninjas and Mentors: CoderDojo in Slovakia")},
{"pause": 5, 'title': gettext("Community based courses")},
{"pause": 15, 'title': gettext("How do we struggle with Python in Martin?")},
{"pause": 5, 'title': gettext("Why hardware attracts kids and adults to IT")},
{"pause": 5, 'title': gettext("Panel discussion: Teaching IT in Slovakia - where is it heading?")},
{"pause": 5, 'title': gettext("EDU Talks"), 'duration': 30, 'language': 'SK', 'flag': 'edu', 'type': 'talk'},
)
FRIDAY_WORKSHOPS1 = (
{"pause": 10, 'title': gettext("How to create interactive maps in Python / R")},
{"pause": 60, 'title': gettext("Working with XML")},
{"pause": 5, 'title': gettext("Managing high-available applications in production")},
)
FRIDAY_WORKSHOPS2 = (
{"pause": 40, 'title': gettext("Workshop: An Introduction to Ansible")},
{"pause": 5, 'title': gettext("Introduction to Machine Learning with Python")},
)
FRIDAY_HALLWAY = (
{"pause": 0, 'title': gettext("OpenPGP key-signing party"), 'duration': 30, 'link': 'https://github.com/pyconsk/2018.pycon.sk/tree/master/openpgp-key-signing-party', 'flag': 'security'},
)
SATURDAY_TRACK1 = (
{"pause": 5, 'title': gettext("Conference Opening"), 'duration': 25, 'flag': 'other', 'type': 'talk'},
{"pause": 5, 'title': gettext("Solutions Reviews")},
{"pause": 15, 'title': gettext("Campaign Automation & Abusing Celery Properly")},
{"pause": 5, 'title': gettext("The Truth about Mastering Big Data")},
{"pause": 5, 'title': gettext("Industrial Machine Learning: Building scalable distributed machine learning pipelines with Python")},
{"pause": 25, 'title': gettext("Programming contest Semi finale"), 'duration': 30, 'flag': 'other', 'link': 'https://app.pycon.sk'},
{"pause": 5, 'title': gettext("Pythonic code, by example")},
{"pause": 15, 'title': gettext("Our DevOps journey, is SRE the next stop?")},
{"pause": 5, 'title': gettext("Implementing distributed systems with Consul")},
{"pause": 15, 'title': gettext("Designing fast and scalable Python MicroServices with django")},
{"pause": 5, 'title': gettext("When your wetware has too many threads - Tips from an ADHDer on how to improve your focus")},
{"pause": 5, 'title': gettext("Programming Python as performance: live coding with FoxDot")},
{"pause": 5, 'title': gettext("Programming Contest Grand Finale"), 'duration': 30, 'flag': 'other', 'type': 'talk', 'language': 'EN'},
{"pause": 5, 'title': gettext("Lightning Talks"), 'duration': 45, 'flag': 'other', 'type': 'talk'},
)
SATURDAY_TRACK2 = (
{"pause": 5, 'title': gettext("Conference Opening in Kiwi.com Hall"), 'duration': 25},
{"pause": 5, 'title': gettext("Meteo data in Python. Effectively.")},
{"pause": 15, 'title': gettext("Around the World in 30 minutes")},
{"pause": 5, 'title': gettext("LOCKED SHIELDS: What a good cyber testing looks like")},
{"pause": 60, 'title': gettext("Kiwi.com in ZOO")},
{"pause": 5, 'title': gettext("Keynote in Kiwi.com Hall"), 'duration': 30, 'flag': 'generic', 'type': 'talk'},
{"pause": 15, 'title': gettext("Skynet your Infrastructure with QUADS")},
{"pause": 5, 'title': gettext("Automated network OS testing")},
{"pause": 15, 'title': gettext("Tools to interact with Bitcoin and Ethereum")},
{"pause": 5, 'title': gettext("7 Steps to a Clean Issue Tracker")},
{"pause": 5, 'title': gettext("The Concierge Paradigm")},
)
SATURDAY_WORKSHOPS1 = (
{"pause": 55, 'title': gettext("Effectively running python applications in Kubernetes/OpenShift")},
{"pause": 5, 'title': gettext("Roboworkshop")},
)
SATURDAY_WORKSHOPS2 = (
{"pause": 55, 'title': gettext("Microbit:Slovakia")},
{"pause": 5, 'title': gettext("Coding in Python: A high-school programming lesson")},
)
SATURDAY_HALLWAY1 = (
{"pause": 0, 'title': gettext("Pandas documentation sprint"), 'duration': 360, 'link': 'https://python-sprints.github.io/pandas/', 'flag': 'docs'},
)
SATURDAY_HALLWAY2 = (
{"pause": 145, 'title': gettext("Programming contest"), 'duration': 95, 'flag': 'other', 'link': 'https://app.pycon.sk'},
{"pause": 5, 'title': gettext("Conference organizers meetup"), 'duration': 30, 'flag': 'community'},
)
SUNDAY_TRACK1 = (
{"pause": 5, 'title': gettext("Charon and the way out from a pickle hell")},
{"pause": 15, 'title': gettext("Making Python Behave")},
{"pause": 5, 'title': gettext("“Secret” information about the code we write")},
{"pause": 60, 'title': gettext("How to connect objects with each other in different situations with Pythonic ways - association, aggregation, composition and etc.")},
{"pause": 5, 'title': gettext("APIs: Gateway to world's data")},
{"pause": 15, 'title': gettext("Getting started with HDF5 and PyTables")},
{"pause": 5, 'title': gettext("Real-time personalized recommendations using embeddings")},
{"pause": 5, 'title': gettext("Quiz"), 'duration': 30, 'flag': 'other', 'type': 'talk'},
)
SUNDAY_WORKSHOPS1 = (
{"pause": 40, 'title': gettext("Real-time transcription and sentiment analysis of audio streams; on the phone and in the browser")},
{"pause": 5, 'title': gettext("Learn MongoDB by modeling PyPI in a document database")},
)
SUNDAY_WORKSHOPS2 = (
{"pause": 15, 'title': gettext("Testing Essentials for Scientists and Engineers")},
{"pause": 5, 'title': gettext("Cython: Speed up your code without going insane")},
)
SUNDAY_WORKSHOPS3 = (
{"pause": 15, 'title': gettext("Meet the pandas")},
{"pause": 5, 'title': gettext("Serverless with OpenFaaS and Python")},
)
SUNDAY_WORKSHOPS4 = (
{"pause": 5, 'title': gettext("Django Girls"), 'duration': 540, 'flag': 'web', 'type': 'workshop'},
)
SUNDAY_HALLWAY = (
{"pause": 5, 'title': gettext("Documentation clinic/helpdesk")},
)
AULA1 = {
'name': gettext('Kiwi.com Hall'),
'number': '-1.61',
}
AULA2 = {
'name': gettext('Python Software Foundation Hall'),
'number': '-1.65',
}
AULA3 = {
'name': gettext('SPy - Hall A'),
'number': '-1.57',
}
AULA4 = {
'name': gettext('SPy - Hall B'),
'number': '-1.57',
}
AULA5 = {
'name': gettext('Django Girls Auditorium'),
'number': '+1.31',
}
HALLWAY = {
'name': gettext('Hallway'),
'number': '',
}
def get_conference_data(url='', filters=''):
"""Connect to API and get public talks and speakers data."""
url = API_DOMAIN + url
if filters:
url = url + '&' + filters
r = requests.get(url)
return r.json()
API_DATA_SPEAKERS = get_conference_data(url='/event/2018/speakers/')
API_DATA_TALKS = get_conference_data(url='/event/2018/talks/')
@app.before_request
def before():
if request.view_args and 'lang_code' in request.view_args:
g.current_lang = request.view_args['lang_code']
if request.view_args['lang_code'] not in LANGS:
return abort(404)
request.view_args.pop('lang_code')
@babel.localeselector
def get_locale():
# try to guess the language from the user accept
# header the browser transmits. The best match wins.
# return request.accept_languages.best_match(['de', 'sk', 'en'])
return g.get('current_lang', app.config['BABEL_DEFAULT_LOCALE'])
@app.template_filter()
@evalcontextfilter
def linebreaks(eval_ctx, value):
"""Converts newlines into <p> and <br />s."""
value = re.sub(r'\r\n|\r|\n', '\n', value) # normalize newlines
paras = re.split('\n{2,}', value)
paras = [u'<p>%s</p>' % p.replace('\n', '<br />') for p in paras]
paras = u'\n\n'.join(paras)
return Markup(paras)
@app.template_filter()
@evalcontextfilter
def linebreaksbr(eval_ctx, value):
"""Converts newlines into <p> and <br />s."""
value = re.sub(r'\r\n|\r|\n', '\n', value) # normalize newlines
paras = re.split('\n{2,}', value)
paras = [u'%s' % p.replace('\n', '<br />') for p in paras]
paras = u'\n\n'.join(paras)
return Markup(paras)
@app.template_filter()
@evalcontextfilter
def strip_accents(eval_ctx, value):
"""Strip non ASCII characters and convert them to ASCII."""
return unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode("utf-8")
def _get_template_variables(**kwargs):
"""Collect variables for template that repeats, e.g. are in body.html template"""
lang = get_locale()
variables = {
'title': EVENT,
'logo': LOGO_PYCON, # TODO: Do we need this?
'ld_json': LDJSON_PYCON
}
variables['ld_json']['url'] = DOMAIN + '/' + lang + '/'
variables.update(kwargs)
if 'current_lang' in g:
variables['lang_code'] = g.current_lang
else:
variables['lang_code'] = app.config['BABEL_DEFAULT_LOCALE']
return variables
def generate_track(api_data, track_data, | |
print(prop_get_resp)
assert(len(prop_get_resp["properties"]) > 0)
# Update an existing Property
print("Update Property")
prop_upd = AeselProperty()
prop_upd.name = "testProperty2"
prop_upd_resp = None
try:
prop_upd_resp = transaction_client.update_property("propTestScene", prop_key, prop_upd)
except Exception as e:
print(e)
assert(False)
print(prop_upd_resp)
# Query for Properties
print("Query Properties")
prop_query = AeselProperty()
prop_query.name = "testProperty2"
prop_query_resp = None
try:
prop_query_resp = transaction_client.property_query("propTestScene", prop_query)
except Exception as e:
print(e)
assert(False)
print(prop_query_resp)
assert(len(prop_query_resp["properties"]) > 0)
# Add a Property Action
print("Add Property Action")
prop_action = AeselAction()
prop_action.name = "testPropAction"
prop_action.description = "this is a Property Action"
prop_frame_initial = AeselPropertyFrame()
prop_frame_initial.frame = 1
pfi_value = AeselPropertyValue()
pfi_value.value = 100.0
pfi_value.left_type = "test"
pfi_value.left_x = 10.0
pfi_value.left_y = 10.2
pfi_value.right_type = "test2"
pfi_value.right_x = 10.1
pfi_value.right_y = 10.3
prop_frame_initial.values = [pfi_value]
prop_action.keyframes = [prop_frame_initial]
try:
prop_action_add_resp = transaction_client.create_property_action("propTestScene", prop_key, prop_action)
except Exception as e:
print(e)
assert(False)
print(prop_action_add_resp)
assert(prop_action_add_resp["err_code"] == 100)
# Update a Property Action
print("Update Property Action")
prop_action2 = AeselAction()
prop_action2.name = "testPropAction"
prop_action2.description = "this is an updated Property Action"
try:
prop_action_upd_resp = transaction_client.update_property_action("propTestScene", prop_key, prop_action2)
except Exception as e:
print(e)
assert(False)
print(prop_action_upd_resp)
assert(prop_action_upd_resp["err_code"] == 100)
# Add a Property Frame to the Action
print("Add Property Frame to Action")
prop_frame2 = AeselPropertyFrame()
prop_frame2.frame = 10
pfi2_value = AeselPropertyValue()
pfi2_value.value = 100.0
pfi2_value.left_type = "test3"
pfi2_value.left_x = 10.0
pfi2_value.left_y = 10.2
pfi2_value.right_type = "test22"
pfi2_value.right_x = 10.1
pfi2_value.right_y = 10.3
prop_frame2.values = [pfi2_value]
try:
prop_frame_add_resp = transaction_client.create_property_frame("propTestScene", prop_key, "testPropAction", prop_frame2)
except Exception as e:
print(e)
assert(False)
print(prop_frame_add_resp)
assert(prop_frame_add_resp["err_code"] == 100)
# Update a Property Frame in the Action
print("Update Property Frame")
prop_frame3 = AeselPropertyFrame()
prop_frame3.frame = 10
pfi3_value = AeselPropertyValue()
pfi3_value.value = 110.0
pfi3_value.left_type = "test4"
pfi3_value.left_x = 10.1
pfi3_value.left_y = 10.4
pfi3_value.right_type = "test32"
pfi3_value.right_x = 12.1
pfi3_value.right_y = 13.3
prop_frame3.values = [pfi3_value]
try:
prop_frame_upd_resp = transaction_client.update_property_frame("propTestScene", prop_key, "testPropAction", prop_frame3)
except Exception as e:
print(e)
assert(False)
print(prop_frame_upd_resp)
assert(prop_frame_upd_resp["err_code"] == 100)
# Delete a Property Frame in the Action
try:
prop_frame_del_resp = transaction_client.delete_property_frame("propTestScene", prop_key, "testPropAction", 10)
except Exception as e:
print(e)
assert(False)
print(prop_frame_upd_resp)
assert(prop_frame_upd_resp["err_code"] == 100)
# Delete a Property Action
print("Delete Property Action")
try:
prop_action_del_resp = transaction_client.delete_property_action("propTestScene", prop_key, "testPropAction")
except Exception as e:
print(e)
assert(False)
print(prop_action_del_resp)
assert(prop_action_del_resp["err_code"] == 100)
# Delete a Property
print("Delete Property")
try:
transaction_client.delete_property("propTestScene", prop_key)
except Exception as e:
print(e)
assert(False)
# Execute tests on the Object API
def test_object_api(transaction_client):
print("Testing Object API")
# Save a base scene to store the objects in
print("Create base scene")
scn = AeselScene()
scn.name = "test"
scn.region = "US-MD"
scn.latitude = 100.0
scn.longitude = 100.0
scn.tags = []
scn.devices = []
scn_crt_resp = None
try:
scn_crt_resp = transaction_client.create_scene("objTestScene", scn)
except Exception as e:
print(e)
assert(False)
print(scn_crt_resp)
# Create a new Object
print("Create Object")
obj = AeselObject()
obj.name = "testObject"
obj.scene = "objTestScene"
obj.type = "mesh"
obj.subtype = "cube"
obj.frame = 0
obj.translation = [1, 1, 1]
obj_crt_resp = None
try:
obj_crt_resp = transaction_client.create_object("objTestScene", obj)
except Exception as e:
print(e)
assert(False)
print(obj_crt_resp)
assert(len(obj_crt_resp["objects"]) > 0)
assert(len(obj_crt_resp["objects"][0]["key"]) > 0)
obj_key = obj_crt_resp["objects"][0]["key"]
# Get the object
print("Get Object")
obj_get_resp = None
try:
obj_get_resp = transaction_client.get_object("objTestScene", obj_key)
except Exception as e:
print(e)
assert(False)
print(obj_get_resp)
assert(len(obj_get_resp["objects"]) > 0)
# Update an existing Object
print("Update Object")
obj_upd = AeselObject()
obj_upd.name = "testObject2"
obj_upd.type = "curve"
obj_upd.subtype = "circle"
obj_upd_resp = None
try:
obj_upd_resp = transaction_client.update_object("objTestScene", obj_key, obj_upd)
except Exception as e:
print(e)
assert(False)
print(obj_upd_resp)
assert(len(obj_upd_resp["objects"]) > 0)
# Query for Objects
print("Query Objects")
obj_query = AeselObject()
obj_query.name = "testObject2"
obj_query.frame = 0
obj_query_resp = None
try:
obj_query_resp = transaction_client.object_query("objTestScene", obj_query)
except Exception as e:
print(e)
assert(False)
print(obj_query_resp)
assert(len(obj_query_resp["objects"]) > 0)
# Add an Object Action
print("Add Object Action")
obj_action = AeselAction()
obj_action.name = "testObjAction"
obj_action.description = "this is an Object Action"
obj_frame_initial = AeselObjectFrame()
obj_frame_initial.frame = 1
obj_frame_initial.transform = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
for i in range(0, 3):
handle = AeselGraphHandle()
handle.left_type = "test"
handle.left_x = 10.0 + i
handle.left_y = 10.2 + i
handle.right_type = "test2"
handle.right_x = 10.1 + i
handle.right_y = 10.3 + i
obj_frame_initial.translation_handle.append(handle)
for i in range(0, 4):
handle = AeselGraphHandle()
handle.left_type = "test"
handle.left_x = 10.0 + i
handle.left_y = 10.2 + i
handle.right_type = "test2"
handle.right_x = 10.1 + i
handle.right_y = 10.3 + i
obj_frame_initial.rotation_handle.append(handle)
for i in range(0, 3):
handle = AeselGraphHandle()
handle.left_type = "test"
handle.left_x = 10.0 + i
handle.left_y = 10.2 + i
handle.right_type = "test2"
handle.right_x = 10.1 + i
handle.right_y = 10.3 + i
obj_frame_initial.scale_handle.append(handle)
obj_action.keyframes = [obj_frame_initial]
try:
obj_action_add_resp = transaction_client.create_object_action("objTestScene", obj_key, obj_action)
except Exception as e:
print(e)
assert(False)
print(obj_action_add_resp)
assert(obj_action_add_resp["err_code"] == 100)
# Update an Object Action
print("Update Object Action")
obj_action2 = AeselAction()
obj_action2.name = "testObjAction"
obj_action2.description = "this is an updated Object Action"
try:
obj_action_upd_resp = transaction_client.update_object_action("objTestScene", obj_key, obj_action2)
except Exception as e:
print(e)
assert(False)
print(obj_action_upd_resp)
assert(obj_action_upd_resp["err_code"] == 100)
# Add an Object Frame
print("Add Object Frame")
obj_frame2 = AeselObjectFrame()
obj_frame2.frame = 10
obj_frame2.transform = [1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
for i in range(0, 3):
handle = AeselGraphHandle()
handle.left_type = "test2"
handle.left_x = 11.0 + i
handle.left_y = 11.2 + i
handle.right_type = "test23"
handle.right_x = 12.1 + i
handle.right_y = 12.3 + i
obj_frame2.translation_handle.append(handle)
for i in range(0, 4):
handle = AeselGraphHandle()
handle.left_type = "test4"
handle.left_x = 13.0 + i
handle.left_y = 13.2 + i
handle.right_type = "test25"
handle.right_x = 14.1 + i
handle.right_y = 14.3 + i
obj_frame2.rotation_handle.append(handle)
for i in range(0, 3):
handle = AeselGraphHandle()
handle.left_type = "test6"
handle.left_x = 15.0 + i
handle.left_y = 15.2 + i
handle.right_type = "test27"
handle.right_x = 16.1 + i
handle.right_y = 16.3 + i
obj_frame2.scale_handle.append(handle)
try:
obj_frame_add_resp = transaction_client.create_object_frame("objTestScene", obj_key, "testObjAction", obj_frame2)
except Exception as e:
print(e)
assert(False)
print(obj_frame_add_resp)
assert(obj_frame_add_resp["err_code"] == 100)
# Update an Object Frame
print("Update Object Frame")
obj_frame3 = AeselObjectFrame()
obj_frame3.frame = 10
obj_frame3.transform = [1.0, 0.0, 1.0, 0.0, 2.0, 1.0, 0.0, 1.0,
1.0, 1.0, 1.0, 0.0, 2.0, 1.0, 1.0, 1.0]
for i in range(0, 3):
handle = AeselGraphHandle()
handle.left_type = "test23"
handle.left_x = 12.0 + i
handle.left_y = 12.2 + i
handle.right_type = "test234"
handle.right_x = 13.1 + i
handle.right_y = 13.3 + i
obj_frame3.translation_handle.append(handle)
for i in range(0, 4):
handle = AeselGraphHandle()
handle.left_type = "test45"
handle.left_x = 14.0 + i
handle.left_y = 14.2 + i
handle.right_type = "test256"
handle.right_x = 15.1 + i
handle.right_y = 15.3 + i
obj_frame3.rotation_handle.append(handle)
for i in range(0, 3):
handle = AeselGraphHandle()
handle.left_type = "test67"
handle.left_x = 16.0 + i
handle.left_y = 16.2 + i
handle.right_type = "test278"
handle.right_x = 17.1 + i
handle.right_y = 17.3 + i
obj_frame3.scale_handle.append(handle)
try:
obj_frame_upd_resp = transaction_client.update_object_frame("objTestScene", obj_key, "testObjAction", obj_frame3)
except Exception as e:
print(e)
assert(False)
print(obj_frame_upd_resp)
assert(obj_frame_upd_resp["err_code"] == 100)
# Delete an Object Frame
try:
obj_frame_del_resp = transaction_client.delete_object_frame("objTestScene", obj_key, "testObjAction", 10)
except Exception as e:
print(e)
assert(False)
print(obj_frame_del_resp)
assert(obj_frame_del_resp["err_code"] == 100)
# Delete an Object Action
print("Delete Object Action")
try:
obj_action_del_resp = transaction_client.delete_object_action("objTestScene", obj_key, "testObjAction")
except Exception as e:
print(e)
assert(False)
print(obj_action_del_resp)
assert(obj_action_del_resp["err_code"] == 100)
# Lock an Object
print("Lock Object")
try:
transaction_client.lock_object("objTestScene", obj_key, "testDevice")
except Exception as e:
print(e)
assert(False)
# Unlock an Object
print("Unlock Object")
try:
transaction_client.unlock_object("objTestScene", obj_key, "testDevice")
except Exception as e:
print(e)
assert(False)
# Delete an Object
print("Delete Object")
try:
transaction_client.delete_object("objTestScene", obj_key)
except Exception as e:
print(e)
assert(False)
# Execute tests on the Asset API
def test_asset_api(transaction_client):
print("Testing Asset API")
# Save a new file with metadata
print("Create Asset")
metadata = AeselAssetMetadata()
metadata.file_type = "json"
metadata.asset_type = "test"
relationship = AeselAssetRelationship()
relationship.type = "scene"
relationship.related = "12345"
new_key = None
try:
new_key = transaction_client.create_asset("test/resources/testupload.txt", metadata, relationship)
except Exception as e:
print(e)
assert(False)
assert(len(new_key) > 0)
# Pull down the file and validate the contents
print("Asset Get")
file_contents = None
try:
file_contents = transaction_client.get_asset(new_key)
except Exception as e:
print(e)
assert(False)
print(file_contents)
assert(file_contents == b"""{"test": 1}\n""")
# Query for the asset by metadata
print("Asset Metadata Query")
metadata_query = AeselAssetMetadata()
metadata_query.file_type = "json"
mquery_return = | |
<reponame>ToDuyHung/tool-facebook<gh_stars>0
# -*- coding: utf-8 -*-
import re
import requests
import modules.make_up.miscellaneous.normalize.convention as convention
from modules.make_up.miscellaneous.normalize.Price import Price
s1 = "ÀÁÂÃÈÉÊÌÍÒÓÔÕÙÚÝàáâãèéêìíòóôõùúýĂăĐđĨĩŨũƠơƯưẠạẢảẤấẦầẨẩẪẫẬậẮắẰằẲẳẴẵẶặẸẹẺẻẼẽẾếỀềỂểỄễỆệỈỉỊịỌọỎỏỐốỒồỔổỖỗỘộỚớỜờỞởỠỡỢợỤụỦủỨứỪừỬửỮữỰựỲỳỴỵỶỷỸỹ"
s0 = "AAAAEEEIIOOOOUUYaaaaeeeiioooouuyAaDdIiUuOoUuAaAaAaAaAaAaAaAaAaAaAaAaEeEeEeEeEeEeEeEeIiIiOoOoOoOoOoOoOoOoOoOoOoOoUuUuUuUuUuUuUuYyYyYyYy"
def remove_accents(input_str):
s = ""
for c in input_str:
if c in s1:
s += s0[s1.index(c)]
else:
s += c
return s.lower()
def compound2unicode(text):
text = text.replace("\u0065\u0309", "\u1EBB") # ẻ
text = text.replace("\u0065\u0301", "\u00E9") # é
text = text.replace("\u0065\u0300", "\u00E8") # è
text = text.replace("\u0065\u0323", "\u1EB9") # ẹ
text = text.replace("\u0065\u0303", "\u1EBD") # ẽ
text = text.replace("\u00EA\u0309", "\u1EC3") # ể
text = text.replace("\u00EA\u0301", "\u1EBF") # ế
text = text.replace("\u00EA\u0300", "\u1EC1") # ề
text = text.replace("\u00EA\u0323", "\u1EC7") # ệ
text = text.replace("\u00EA\u0303", "\u1EC5") # ễ
text = text.replace("\u0079\u0309", "\u1EF7") # ỷ
text = text.replace("\u0079\u0301", "\u00FD") # ý
text = text.replace("\u0079\u0300", "\u1EF3") # ỳ
text = text.replace("\u0079\u0323", "\u1EF5") # ỵ
text = text.replace("\u0079\u0303", "\u1EF9") # ỹ
text = text.replace("\u0075\u0309", "\u1EE7") # ủ
text = text.replace("\u0075\u0301", "\u00FA") # ú
text = text.replace("\u0075\u0300", "\u00F9") # ù
text = text.replace("\u0075\u0323", "\u1EE5") # ụ
text = text.replace("\u0075\u0303", "\u0169") # ũ
text = text.replace("\u01B0\u0309", "\u1EED") # ử
text = text.replace("\u01B0\u0301", "\u1EE9") # ứ
text = text.replace("\u01B0\u0300", "\u1EEB") # ừ
text = text.replace("\u01B0\u0323", "\u1EF1") # ự
text = text.replace("\u01B0\u0303", "\u1EEF") # ữ
text = text.replace("\u0069\u0309", "\u1EC9") # ỉ
text = text.replace("\u0069\u0301", "\u00ED") # í
text = text.replace("\u0069\u0300", "\u00EC") # ì
text = text.replace("\u0069\u0323", "\u1ECB") # ị
text = text.replace("\u0069\u0303", "\u0129") # ĩ
text = text.replace("\u006F\u0309", "\u1ECF") # ỏ
text = text.replace("\u006F\u0301", "\u00F3") # ó
text = text.replace("\u006F\u0300", "\u00F2") # ò
text = text.replace("\u006F\u0323", "\u1ECD") # ọ
text = text.replace("\u006F\u0303", "\u00F5") # õ
text = text.replace("\u01A1\u0309", "\u1EDF") # ở
text = text.replace("\u01A1\u0301", "\u1EDB") # ớ
text = text.replace("\u01A1\u0300", "\u1EDD") # ờ
text = text.replace("\u01A1\u0323", "\u1EE3") # ợ
text = text.replace("\u01A1\u0303", "\u1EE1") # ỡ
text = text.replace("\u00F4\u0309", "\u1ED5") # ổ
text = text.replace("\u00F4\u0301", "\u1ED1") # ố
text = text.replace("\u00F4\u0300", "\u1ED3") # ồ
text = text.replace("\u00F4\u0323", "\u1ED9") # ộ
text = text.replace("\u00F4\u0303", "\u1ED7") # ỗ
text = text.replace("\u0061\u0309", "\u1EA3") # ả
text = text.replace("\u0061\u0301", "\u00E1") # á
text = text.replace("\u0061\u0300", "\u00E0") # à
text = text.replace("\u0061\u0323", "\u1EA1") # ạ
text = text.replace("\u0061\u0303", "\u00E3") # ã
text = text.replace("\u0103\u0309", "\u1EB3") # ẳ
text = text.replace("\u0103\u0301", "\u1EAF") # ắ
text = text.replace("\u0103\u0300", "\u1EB1") # ằ
text = text.replace("\u0103\u0323", "\u1EB7") # ặ
text = text.replace("\u0103\u0303", "\u1EB5") # ẵ
text = text.replace("\u00E2\u0309", "\u1EA9") # ẩ
text = text.replace("\u00E2\u0301", "\u1EA5") # ấ
text = text.replace("\u00E2\u0300", "\u1EA7") # ầ
text = text.replace("\u00E2\u0323", "\u1EAD") # ậ
text = text.replace("\u00E2\u0303", "\u1EAB") # ẫ
text = text.replace("\u0045\u0309", "\u1EBA") # Ẻ
text = text.replace("\u0045\u0301", "\u00C9") # É
text = text.replace("\u0045\u0300", "\u00C8") # È
text = text.replace("\u0045\u0323", "\u1EB8") # Ẹ
text = text.replace("\u0045\u0303", "\u1EBC") # Ẽ
text = text.replace("\u00CA\u0309", "\u1EC2") # Ể
text = text.replace("\u00CA\u0301", "\u1EBE") # Ế
text = text.replace("\u00CA\u0300", "\u1EC0") # Ề
text = text.replace("\u00CA\u0323", "\u1EC6") # Ệ
text = text.replace("\u00CA\u0303", "\u1EC4") # Ễ
text = text.replace("\u0059\u0309", "\u1EF6") # Ỷ
text = text.replace("\u0059\u0301", "\u00DD") # Ý
text = text.replace("\u0059\u0300", "\u1EF2") # Ỳ
text = text.replace("\u0059\u0323", "\u1EF4") # Ỵ
text = text.replace("\u0059\u0303", "\u1EF8") # Ỹ
text = text.replace("\u0055\u0309", "\u1EE6") # Ủ
text = text.replace("\u0055\u0301", "\u00DA") # Ú
text = text.replace("\u0055\u0300", "\u00D9") # Ù
text = text.replace("\u0055\u0323", "\u1EE4") # Ụ
text = text.replace("\u0055\u0303", "\u0168") # Ũ
text = text.replace("\u01AF\u0309", "\u1EEC") # Ử
text = text.replace("\u01AF\u0301", "\u1EE8") # Ứ
text = text.replace("\u01AF\u0300", "\u1EEA") # Ừ
text = text.replace("\u01AF\u0323", "\u1EF0") # Ự
text = text.replace("\u01AF\u0303", "\u1EEE") # Ữ
text = text.replace("\u0049\u0309", "\u1EC8") # Ỉ
text = text.replace("\u0049\u0301", "\u00CD") # Í
text = text.replace("\u0049\u0300", "\u00CC") # Ì
text = text.replace("\u0049\u0323", "\u1ECA") # Ị
text = text.replace("\u0049\u0303", "\u0128") # Ĩ
text = text.replace("\u004F\u0309", "\u1ECE") # Ỏ
text = text.replace("\u004F\u0301", "\u00D3") # Ó
text = text.replace("\u004F\u0300", "\u00D2") # Ò
text = text.replace("\u004F\u0323", "\u1ECC") # Ọ
text = text.replace("\u004F\u0303", "\u00D5") # Õ
text = text.replace("\u01A0\u0309", "\u1EDE") # Ở
text = text.replace("\u01A0\u0301", "\u1EDA") # Ớ
text = text.replace("\u01A0\u0300", "\u1EDC") # Ờ
text = text.replace("\u01A0\u0323", "\u1EE2") # Ợ
text = text.replace("\u01A0\u0303", "\u1EE0") # Ỡ
text = text.replace("\u00D4\u0309", "\u1ED4") # Ổ
text = text.replace("\u00D4\u0301", "\u1ED0") # Ố
text = text.replace("\u00D4\u0300", "\u1ED2") # Ồ
text = text.replace("\u00D4\u0323", "\u1ED8") # Ộ
text = text.replace("\u00D4\u0303", "\u1ED6") # Ỗ
text = text.replace("\u0041\u0309", "\u1EA2") # Ả
text = text.replace("\u0041\u0301", "\u00C1") # Á
text = text.replace("\u0041\u0300", "\u00C0") # À
text = text.replace("\u0041\u0323", "\u1EA0") # Ạ
text = text.replace("\u0041\u0303", "\u00C3") # Ã
text = text.replace("\u0102\u0309", "\u1EB2") # Ẳ
text = text.replace("\u0102\u0301", "\u1EAE") # Ắ
text = text.replace("\u0102\u0300", "\u1EB0") # Ằ
text = text.replace("\u0102\u0323", "\u1EB6") # Ặ
text = text.replace("\u0102\u0303", "\u1EB4") # Ẵ
text = text.replace("\u00C2\u0309", "\u1EA8") # Ẩ
text = text.replace("\u00C2\u0301", "\u1EA4") # Ấ
text = text.replace("\u00C2\u0300", "\u1EA6") # Ầ
text = text.replace("\u00C2\u0323", "\u1EAC") # Ậ
text = text.replace("\u00C2\u0303", "\u1EAA") # Ẫ
return text
re_addr1 = r"(\d*.*\d*[^\sa-z\W])"
re_addr2 = r"\d{5,8}" # search for the mobile number
def normalize_address(str_addr):
if str_addr is None or str_addr == "" or re.search(re_addr2, str_addr):
return None
str_addr = remove_accents(str_addr)
print("addr: ", str_addr)
if re.search(re_addr1, str_addr):
return re.findall(re_addr1, str_addr)[0]
else:
return str_addr
def normalize_street(str_street):
if str_street is None or str_street == "":
return None
str_street = re.sub(r"ql", "quốc lộ", str_street)
str_street = re.sub(r"dt|đt", "đường tỉnh", str_street)
str_street = re.sub(r"tl", "tỉnh lộ", str_street)
str_street = re.sub(r"tnhau", "thoại ngọc hầu", str_street)
str_street = re.sub(r"hl", "hương lộ", str_street)
str_street = re.sub(r"\d{4,}", "", str_street)
return str_street
re_district_1 = r"Q|q\D*(\d{1,2})"
re_district_2 = r"q \. "
def normalize_district(str_district):
if str_district is None or str_district == "":
return None
if re.search(re_district_1, str_district) and re.search(r"\d+", str_district):
return "quận " + re.search(r"\d+", str_district).group()
str_district = re.sub(re_district_2, "", str_district)
str_district = re.sub(r'pn', "phú nhuận", str_district)
str_district = re.sub(r'tp', "", str_district)
str_district = re.sub(r'tx', "", str_district)
str_district = re.sub(r'huy.n', "", str_district)
str_district = re.sub(r'huy.n', "", str_district)
return str_district
re_ward_1 = r"p|P|f|F\D*(\d{1,2})"
re_ward_2 = r"f|p \. "
def normalize_ward(str_ward):
if str_ward is None or str_ward == "":
return None
str_ward = str_ward.lower()
if re.search(re_ward_1, str_ward) and re.search(r"\d+", str_ward):
return "phường " + re.search(r"\d+", str_ward).group()
str_ward = re.sub(re_ward_2, "", str_ward)
str_ward = re.sub(r"hbp", "hiệp bình phước", str_ward)
str_ward = re.sub(r"xã\s?", "", str_ward)
str_ward = re.sub(r"btd", "bình trưng đông", str_ward)
str_ward = re.sub(r"btd", "bình trưng đông", str_ward)
return str_ward
def normalize_city(str_city):
'''Return the standardized name of the city
In this version, it returns the alias names of the city
'''
if str_city is None or str_city == "":
return None
str_city = re.sub(r"tp [\. ]?", "", str_city).lower()
tmp = str_city
str_city = remove_accents(str_city)
for tag, value in convention.CITIES.items():
for alias in value['alias']:
if re.search(alias, str_city):
return tag
return tmp
def normalize_position(str_position):
if str_position is None or str_position == "":
return None
str_position = remove_accents(str_position)
if re.search(r"mat|mt", str_position):
return "mặt tiền"
elif re.search(r"hem|ngo|hxh", str_position):
return "hẻm"
else:
return "khác"
def normalize_transaction_type(str_transaction_type):
'''Return the standardized transaction type
In this version, it returns the alias names of the transaction
'''
if str_transaction_type is None or str_transaction_type == "":
return None
tmp = str_transaction_type
str_transaction_type = remove_accents(str_transaction_type)
for tag, value in convention.TRANSACTION_TYPE.items():
for alias in value['aliases']:
if re.search(alias, str_transaction_type):
return tag
return "khác"
def normalize_realestate_type(str_realestate_type):
'''Return the standardized real estate type
In this version, it returns the alias names of the real estate
'''
if str_realestate_type is None or str_realestate_type == "":
return None
tmp = str_realestate_type
str_realestate_type = remove_accents(str_realestate_type)
for tag, value in convention.REALESTATE_TYPE.items():
for alias in value['aliases']:
if re.search(alias, str_realestate_type):
return tag
return "khác"
def normalize_legal(str_legal):
if str_legal is None or str_legal == "":
return None
str_legal = remove_accents(str_legal)
if re.search(r"hong|sh", str_legal):
return "sổ hồng"
elif re.search(r"do|sd", str_legal):
return "sổ đỏ"
else:
return "khác"
def normalize_price(str_price):
'''Extract price_min, price_max
May detect that price is of price per meter square or price of the whole real estate
May detect area
:Args:
str_price - string of price extracted by NLP API
:Returns:
a 5-element tuple of: (price_min, price_max, price_min_m2, price_min_m2, area), None value may be one of 5 elements in tuple
'''
print("Price: ", str_price)
# basic processing
str_price = compound2unicode(str_price)
str_price = re.sub('Mười','10', str_price)
str_price = re.sub('mười','10', str_price)
str_price = re.sub('tỏi', 'ty', str_price)
str_price = remove_accents(str_price)
| |
<filename>tools/gcmole/gcmole.py
#!/usr/bin/env python3
# Copyright 2020 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is main driver for gcmole tool. See README for more details.
# Usage: CLANG_BIN=clang-bin-dir python tools/gcmole/gcmole.py [arm|arm64|ia32|x64]
# for py2/py3 compatibility
from __future__ import print_function
from multiprocessing import cpu_count
import collections
import difflib
import json
import optparse
import os
import re
import subprocess
import sys
import threading
if sys.version_info.major > 2:
from pathlib import Path
import queue
else:
import Queue as queue
default_open = open
def open(path, *args, **kwargs):
return default_open(str(path), *args, **kwargs)
class Path(object):
def __init__(self, path, *args):
if args:
self._path = os.path.join(str(path), *args)
else:
self._path = str(path)
def __div__(self, other):
return Path(self._path, str(other))
def __str__(self):
return self._path
def resolve(self):
return Path(os.path.abspath(self._path))
@property
def parent(self):
return Path(os.path.dirname(self._path))
@property
def parents(self):
current = self
parents = []
while current._path != "" and current._path != "/":
current = current.parent
parents.append(current)
return parents
def is_file(self):
return os.path.isfile(self._path)
def is_dir(self):
return os.path.isdir(self._path)
def exists(self):
return os.path.exists(self._path)
def mkdir(self, parents=False, exist_ok=False):
if parents and not self.parent.exists():
self.parent.mkdir(parents=True, exist_ok=True)
if exist_ok and self.exists():
return
os.mkdir(self._path)
ArchCfg = collections.namedtuple(
"ArchCfg", ["name", "cpu", "triple", "arch_define", "arch_options"])
# TODO(cbruni): use gn desc by default for platform-specific settings
OPTIONS_64BIT = [
"-DV8_COMPRESS_POINTERS",
"-DV8_COMPRESS_POINTERS_IN_SHARED_CAGE",
"-DV8_EXTERNAL_CODE_SPACE",
"-DV8_SHORT_BUILTIN_CALLS",
"-DV8_SHARED_RO_HEAP",
]
ARCHITECTURES = {
"ia32":
ArchCfg(
name="ia32",
cpu="x86",
triple="i586-unknown-linux",
arch_define="V8_TARGET_ARCH_IA32",
arch_options=["-m32"],
),
"arm":
ArchCfg(
name="arm",
cpu="arm",
triple="i586-unknown-linux",
arch_define="V8_TARGET_ARCH_ARM",
arch_options=["-m32"],
),
# TODO(cbruni): Use detailed settings:
# arch_options = OPTIONS_64BIT + [ "-DV8_WIN64_UNWINDING_INFO" ]
"x64":
ArchCfg(
name="x64",
cpu="x64",
triple="x86_64-unknown-linux",
arch_define="V8_TARGET_ARCH_X64",
arch_options=[]),
"arm64":
ArchCfg(
name="arm64",
cpu="arm64",
triple="x86_64-unknown-linux",
arch_define="V8_TARGET_ARCH_ARM64",
arch_options=[],
),
}
ARCHITECTURES['x86'] = ARCHITECTURES['ia32']
def log(format, *args, **kwargs):
mark = ("#", "=", "-", ".")[kwargs.get("level", 0)]
print(mark * 2, str(format).format(*list(map(str, args))))
def fatal(format):
log(format)
sys.exit(1)
# -----------------------------------------------------------------------------
# Clang invocation
def make_clang_command_line(plugin, plugin_args, options):
arch_cfg = ARCHITECTURES[options.v8_target_cpu]
prefixed_plugin_args = []
if plugin_args:
for arg in plugin_args:
prefixed_plugin_args += [
"-Xclang",
"-plugin-arg-" + plugin,
"-Xclang",
arg,
]
log("Using generated files in {}", options.v8_build_dir / 'gen')
icu_src_dir = options.v8_root_dir / 'third_party/icu/source'
return ([
options.clang_bin_dir / "clang++",
"-std=c++17",
"-c",
"-Xclang",
"-load",
"-Xclang",
options.clang_plugins_dir / "libgcmole.so",
"-Xclang",
"-plugin",
"-Xclang",
plugin,
] + prefixed_plugin_args + [
"-Xclang",
"-triple",
"-Xclang",
arch_cfg.triple,
"-fno-exceptions",
"-Wno-everything",
"-D",
arch_cfg.arch_define,
"-DENABLE_DEBUGGER_SUPPORT",
"-DV8_ENABLE_WEBASSEMBLY",
"-DV8_GC_MOLE",
"-DV8_INTL_SUPPORT",
"-I{}".format(options.v8_root_dir),
"-I{}".format(options.v8_root_dir / 'include'),
"-I{}".format(options.v8_build_dir / 'gen'),
"-I{}".format(icu_src_dir / 'common'),
"-I{}".format(icu_src_dir / 'i18n'),
] + arch_cfg.arch_options)
def invoke_clang_plugin_for_file(filename, cmd_line, verbose):
args = cmd_line + [filename]
args = list(map(str, args))
if verbose:
print("popen ", " ".join(args))
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return p.returncode, stdout.decode("utf-8"), stderr.decode("utf-8")
def invoke_clang_plugin_for_files_in_queue(i, input_queue, output_queue,
cancel_event, cmd_line, verbose):
success = False
try:
while not cancel_event.is_set():
filename = input_queue.get_nowait()
ret, stdout, stderr = invoke_clang_plugin_for_file(
filename, cmd_line, verbose)
output_queue.put_nowait((filename, ret, stdout, stderr))
if ret != 0:
break
except KeyboardInterrupt:
log("[{}] Interrupting", i, level=1)
except queue.Empty:
success = True
finally:
# Emit a success bool so that the reader knows that there was either an
# error or all files were processed.
output_queue.put_nowait(success)
def invoke_clang_plugin_for_each_file(filenames, plugin, plugin_args, options):
cmd_line = make_clang_command_line(plugin, plugin_args, options)
verbose = options.verbose
if options.sequential:
log("Sequential execution.")
for filename in filenames:
log(filename, level=1)
returncode, stdout, stderr = invoke_clang_plugin_for_file(
filename, cmd_line, verbose)
if returncode != 0:
sys.stderr.write(stderr)
sys.exit(returncode)
yield filename, stdout, stderr
else:
log("Parallel execution.")
cpus = cpu_count()
input_queue = queue.Queue()
output_queue = queue.Queue()
threads = []
try:
for filename in filenames:
input_queue.put(filename)
cancel_event = threading.Event()
for i in range(min(len(filenames), cpus)):
threads.append(
threading.Thread(
target=invoke_clang_plugin_for_files_in_queue,
args=(i, input_queue, output_queue, cancel_event, cmd_line,
verbose)))
for t in threads:
t.start()
num_finished = 0
while num_finished < len(threads):
output = output_queue.get()
if type(output) == bool:
if output:
num_finished += 1
continue
else:
break
filename, returncode, stdout, stderr = output
log(filename, level=2)
if returncode != 0:
sys.stderr.write(stderr)
sys.exit(returncode)
yield filename, stdout, stderr
finally:
cancel_event.set()
for t in threads:
t.join()
# -----------------------------------------------------------------------------
def parse_gn_file(options, for_test):
if for_test:
return {"all": [options.v8_root_dir / "tools/gcmole/gcmole-test.cc"]}
result = {}
gn_files = [
("BUILD.gn", re.compile('"([^"]*?\.cc)"'), ""),
("test/cctest/BUILD.gn", re.compile('"(test-[^"]*?\.cc)"'),
Path("test/cctest/")),
]
for filename, pattern, prefix in gn_files:
path = options.v8_root_dir / filename
with open(path) as gn_file:
gn = gn_file.read()
for condition, sources in re.findall("### gcmole\((.*?)\) ###(.*?)\]", gn,
re.MULTILINE | re.DOTALL):
if condition not in result:
result[condition] = []
for file in pattern.findall(sources):
result[condition].append(options.v8_root_dir / prefix / file)
return result
def evaluate_condition(cond, props):
if cond == "all":
return True
m = re.match("(\w+):(\w+)", cond)
if m is None:
fatal("failed to parse condition: {}", cond)
p, v = m.groups()
if p not in props:
fatal("undefined configuration property: {}", p)
return props[p] == v
def build_file_list(options, for_test):
sources = parse_gn_file(options, for_test)
props = {
"os": "linux",
"arch": options.v8_target_cpu,
"mode": "debug",
"simulator": ""
}
ret = []
for condition, files in list(sources.items()):
if evaluate_condition(condition, props):
ret += files
return ret
# -----------------------------------------------------------------------------
# GCSuspects Generation
# Note that the gcsuspects file lists functions in the form:
# mangled_name,unmangled_function_name
#
# This means that we can match just the function name by matching only
# after a comma.
ALLOWLIST = [
# The following functions call CEntryStub which is always present.
"MacroAssembler.*,CallRuntime",
"CompileCallLoadPropertyWithInterceptor",
"CallIC.*,GenerateMiss",
# DirectCEntryStub is a special stub used on ARM.
# It is pinned and always present.
"DirectCEntryStub.*,GenerateCall",
# TODO GCMole currently is sensitive enough to understand that certain
# functions only cause GC and return Failure simulataneously.
# Callsites of such functions are safe as long as they are properly
# check return value and propagate the Failure to the caller.
# It should be possible to extend GCMole to understand this.
"Heap.*,TryEvacuateObject",
# Ignore all StateTag methods.
"StateTag",
# Ignore printing of elements transition.
"PrintElementsTransition",
# CodeCreateEvent receives AbstractCode (a raw ptr) as an argument.
"CodeCreateEvent",
"WriteField",
]
GC_PATTERN = ",.*Collect.*Garbage"
SAFEPOINT_PATTERN = ",SafepointSlowPath"
ALLOWLIST_PATTERN = "|".join("(?:{})".format(p) for p in ALLOWLIST)
def merge_regexp(pattern_dict):
return re.compile("|".join("(?P<{}>{})".format(key, value)
for (key, value) in list(pattern_dict.items())))
IS_SPECIAL_WITHOUT_ALLOW_LIST = merge_regexp({
"gc": GC_PATTERN,
"safepoint": SAFEPOINT_PATTERN
})
IS_SPECIAL_WITH_ALLOW_LIST = merge_regexp({
"gc": GC_PATTERN,
"safepoint": SAFEPOINT_PATTERN,
"allow": ALLOWLIST_PATTERN
})
class GCSuspectsCollector:
def __init__(self, options):
self.gc = {}
self.gc_caused = collections.defaultdict(lambda: set())
self.funcs = {}
self.current_caller = None
self.allowlist = options.allowlist
self.is_special = IS_SPECIAL_WITH_ALLOW_LIST if self.allowlist else IS_SPECIAL_WITHOUT_ALLOW_LIST
def add_cause(self, name, cause):
self.gc_caused[name].add(cause)
def parse(self, lines):
for funcname in lines:
if not funcname:
continue
if funcname[0] != "\t":
self.resolve(funcname)
self.current_caller = funcname
else:
name = funcname[1:]
callers_for_name = self.resolve(name)
callers_for_name.add(self.current_caller)
def resolve(self, name):
if name not in self.funcs:
self.funcs[name] = set()
m = self.is_special.search(name)
if m:
if m.group("gc"):
self.gc[name] = True
self.add_cause(name, "<GC>")
elif m.group("safepoint"):
self.gc[name] = True
self.add_cause(name, "<Safepoint>")
elif m.group("allow"):
self.gc[name] = False
return self.funcs[name]
def propagate(self):
log("Propagating GC information")
def mark(funcname, callers):
for caller in callers:
if caller not in self.gc:
self.gc[caller] = True
mark(caller, self.funcs[caller])
self.add_cause(caller, funcname)
for funcname, callers in list(self.funcs.items()):
if self.gc.get(funcname, False):
mark(funcname, callers)
def generate_gc_suspects(files, options):
# Reset the global state.
collector = GCSuspectsCollector(options)
log("Building GC Suspects for {}", options.v8_target_cpu)
for _, stdout, _ in invoke_clang_plugin_for_each_file(files, "dump-callees",
[], options):
collector.parse(stdout.splitlines())
collector.propagate()
# TODO(cbruni): remove once gcmole.cc is migrated
write_gcmole_results(collector, options, options.v8_root_dir)
write_gcmole_results(collector, options, options.out_dir)
def write_gcmole_results(collector, options, dst):
# gcsuspects contains a list("mangled_full_name,name") of all functions that
# could cause a gc (directly or indirectly).
#
# EXAMPLE
# _ZN2v88internal4Heap16CreateApiObjectsEv,CreateApiObjects
# _ZN2v88internal4Heap17CreateInitialMapsEv,CreateInitialMaps
# ...
with open(dst / "gcsuspects", "w") as out:
for name, value in list(collector.gc.items()):
if value:
out.write(name + "\n")
# gccauses contains a map["mangled_full_name,name"] => list(inner gcsuspects)
# Where the inner gcsuspects are functions directly called in the outer
# function that can cause a gc. The format is encoded for simplified
# deserialization in gcmole.cc.
#
# EXAMPLE:
# _ZN2v88internal4Heap17CreateHeapObjectsEv,CreateHeapObjects
# start,nested
# _ZN2v88internal4Heap16CreateApiObjectsEv,CreateApiObjects
# _ZN2v88internal4Heap17CreateInitialMapsEv,CreateInitialMaps
# ...
# end,nested
# ...
with open(dst / "gccauses", "w") as out:
for name, causes in list(collector.gc_caused.items()):
out.write("{}\n".format(name))
out.write("start,nested\n")
for cause in causes:
out.write("{}\n".format(cause))
out.write("end,nested\n")
log("GCSuspects and gccauses generated for {} in '{}'", options.v8_target_cpu,
dst)
# ------------------------------------------------------------------------------
# Analysis
def check_correctness_for_arch(options, for_test):
files = build_file_list(options, for_test)
if not options.reuse_gcsuspects:
generate_gc_suspects(files, options)
else:
log("Reusing GCSuspects for {}", options.v8_target_cpu)
processed_files = 0
errors_found = False
output = ""
log("Searching for evaluation order problems " +
(' and dead variables' if options.dead_vars else '') + "for" +
options.v8_target_cpu)
plugin_args = []
if options.dead_vars:
plugin_args.append("--dead-vars")
if options.verbose:
plugin_args.append("--verbose")
if options.verbose_trace:
plugin_args.append("--verbose-trace")
for _, _, stderr in invoke_clang_plugin_for_each_file(files, "find-problems",
plugin_args, options):
processed_files = processed_files + 1
| |
<reponame>jart/rules_closure
# Copyright 2016 The Closure Rules Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""External dependencies for Closure Rules."""
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file")
load("//closure/private:java_import_external.bzl", "java_import_external")
load("//closure/private:platform_http_file.bzl", "platform_http_file")
load("//closure:filegroup_external.bzl", "filegroup_external")
def closure_repositories(
omit_aopalliance=False,
omit_args4j=False,
omit_clang=False,
omit_com_google_auto_common=False,
omit_com_google_auto_factory=False,
omit_com_google_auto_value=False,
omit_com_google_auto_value_annotations=False,
omit_com_google_closure_stylesheets=False,
omit_com_google_code_findbugs_jsr305=False,
omit_com_google_code_gson=False,
omit_com_google_common_html_types=False,
omit_com_google_common_html_types_html_proto=False,
omit_com_google_dagger=False,
omit_com_google_dagger_compiler=False,
omit_com_google_dagger_producers=False,
omit_com_google_dagger_spi=False,
omit_com_google_errorprone_error_prone_annotations=False,
omit_com_google_errorprone_javac_shaded=False,
omit_com_google_guava=False,
omit_com_google_inject_extensions_guice_assistedinject=False,
omit_com_google_inject_extensions_guice_multibindings=False,
omit_com_google_inject_guice=False,
omit_com_google_java_format=False,
omit_com_google_javascript_closure_compiler=False,
omit_com_google_javascript_closure_library=False,
omit_com_google_jsinterop_annotations=False,
omit_com_google_protobuf=False,
omit_com_google_protobuf_java=False,
omit_com_google_protobuf_js=False,
omit_com_google_template_soy=False,
omit_com_google_template_soy_jssrc=False,
omit_com_ibm_icu_icu4j=False,
omit_com_squareup_javapoet=False,
omit_fonts_noto_hinted_deb=False,
omit_fonts_noto_mono_deb=False,
omit_javax_annotation_jsr250_api=False,
omit_javax_inject=False,
omit_libexpat_amd64_deb=False,
omit_libfontconfig_amd64_deb=False,
omit_libfreetype_amd64_deb=False,
omit_libpng_amd64_deb=False,
omit_org_json=False,
omit_org_jsoup=False,
omit_org_ow2_asm=False,
omit_org_ow2_asm_analysis=False,
omit_org_ow2_asm_commons=False,
omit_org_ow2_asm_tree=False,
omit_org_ow2_asm_util=False,
omit_phantomjs=False):
"""Imports dependencies for Closure Rules."""
if omit_com_google_protobuf_java:
fail("omit_com_google_protobuf_java no longer supported and must be not be passed to closure_repositories()")
if not omit_aopalliance:
aopalliance()
if not omit_args4j:
args4j()
if not omit_clang:
clang()
if not omit_com_google_auto_common:
com_google_auto_common()
if not omit_com_google_auto_factory:
com_google_auto_factory()
if not omit_com_google_auto_value:
com_google_auto_value()
if not omit_com_google_auto_value_annotations:
com_google_auto_value_annotations()
if not omit_com_google_closure_stylesheets:
com_google_closure_stylesheets()
if not omit_com_google_code_findbugs_jsr305:
com_google_code_findbugs_jsr305()
if not omit_com_google_code_gson:
com_google_code_gson()
if not omit_com_google_common_html_types:
com_google_common_html_types()
if not omit_com_google_common_html_types_html_proto:
com_google_common_html_types_html_proto()
if not omit_com_google_dagger:
com_google_dagger()
if not omit_com_google_dagger_compiler:
com_google_dagger_compiler()
if not omit_com_google_dagger_producers:
com_google_dagger_producers()
if not omit_com_google_dagger_spi:
com_google_dagger_spi()
if not omit_com_google_errorprone_error_prone_annotations:
com_google_errorprone_error_prone_annotations()
if not omit_com_google_errorprone_javac_shaded:
com_google_errorprone_javac_shaded()
if not omit_com_google_guava:
com_google_guava()
if not omit_com_google_inject_extensions_guice_assistedinject:
com_google_inject_extensions_guice_assistedinject()
if not omit_com_google_inject_extensions_guice_multibindings:
com_google_inject_extensions_guice_multibindings()
if not omit_com_google_inject_guice:
com_google_inject_guice()
if not omit_com_google_java_format:
com_google_java_format()
if not omit_com_google_javascript_closure_compiler:
com_google_javascript_closure_compiler()
if not omit_com_google_javascript_closure_library:
com_google_javascript_closure_library()
if not omit_com_google_jsinterop_annotations:
com_google_jsinterop_annotations()
if not omit_com_google_protobuf:
com_google_protobuf()
if not omit_com_google_protobuf_js:
com_google_protobuf_js()
if not omit_com_google_template_soy:
com_google_template_soy()
if not omit_com_google_template_soy_jssrc:
com_google_template_soy_jssrc()
if not omit_com_ibm_icu_icu4j:
com_ibm_icu_icu4j()
if not omit_com_squareup_javapoet:
com_squareup_javapoet()
if not omit_fonts_noto_hinted_deb:
fonts_noto_hinted_deb()
if not omit_fonts_noto_mono_deb:
fonts_noto_mono_deb()
if not omit_javax_annotation_jsr250_api:
javax_annotation_jsr250_api()
if not omit_javax_inject:
javax_inject()
if not omit_libexpat_amd64_deb:
libexpat_amd64_deb()
if not omit_libfontconfig_amd64_deb:
libfontconfig_amd64_deb()
if not omit_libfreetype_amd64_deb:
libfreetype_amd64_deb()
if not omit_libpng_amd64_deb:
libpng_amd64_deb()
if not omit_org_json:
org_json()
if not omit_org_jsoup:
org_jsoup()
if not omit_org_ow2_asm:
org_ow2_asm()
if not omit_org_ow2_asm_analysis:
org_ow2_asm_analysis()
if not omit_org_ow2_asm_commons:
org_ow2_asm_commons()
if not omit_org_ow2_asm_tree:
org_ow2_asm_tree()
if not omit_org_ow2_asm_util:
org_ow2_asm_util()
if not omit_phantomjs:
phantomjs()
# BEGIN_DECLARATIONS
def aopalliance():
java_import_external(
name = "aopalliance",
jar_sha256 = "0addec670fedcd3f113c5c8091d783280d23f75e3acb841b61a9cdb079376a08",
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/aopalliance/aopalliance/1.0/aopalliance-1.0.jar",
"https://repo1.maven.org/maven2/aopalliance/aopalliance/1.0/aopalliance-1.0.jar",
"http://maven.ibiblio.org/maven2/aopalliance/aopalliance/1.0/aopalliance-1.0.jar",
],
licenses = ["unencumbered"], # public domain
)
def args4j():
java_import_external(
name = "args4j",
jar_sha256 = "989bda2321ea073a03686e9d4437ea4928c72c99f993f9ca6fab24615f0771a4",
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/args4j/args4j/2.0.26/args4j-2.0.26.jar",
"https://repo1.maven.org/maven2/args4j/args4j/2.0.26/args4j-2.0.26.jar",
],
licenses = ["notice"], # MIT License
)
def clang():
platform_http_file(
name = "clang",
amd64_urls = [
"https://mirror.bazel.build/llvm.org/releases/3.8.0/clang+llvm-3.8.0-x86_64-linux-gnu-ubuntu-14.04.tar.xz",
"http://llvm.org/releases/3.8.0/clang+llvm-3.8.0-x86_64-linux-gnu-ubuntu-14.04.tar.xz",
],
amd64_sha256 = "3120c3055ea78bbbb6848510a2af70c68538b990cb0545bac8dad01df8ff69d7",
macos_urls = [
"https://mirror.bazel.build/llvm.org/releases/3.8.0/clang+llvm-3.8.0-x86_64-apple-darwin.tar.xz",
"http://llvm.org/releases/3.8.0/clang+llvm-3.8.0-x86_64-apple-darwin.tar.xz",
],
macos_sha256 = "e5a961e04b0e1738bbb5b824886a34932dc13b0af699d1fe16519d814d7b776f",
)
def com_google_auto_common():
java_import_external(
name = "com_google_auto_common",
jar_sha256 = "eee75e0d1b1b8f31584dcbe25e7c30752545001b46673d007d468d75cf6b2c52",
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/com/google/auto/auto-common/0.7/auto-common-0.7.jar",
"https://repo1.maven.org/maven2/com/google/auto/auto-common/0.7/auto-common-0.7.jar",
"http://maven.ibiblio.org/maven2/com/google/auto/auto-common/0.7/auto-common-0.7.jar",
],
licenses = ["notice"], # Apache 2.0
deps = ["@com_google_guava"],
default_visibility = ["@com_google_auto_factory//:__pkg__"],
)
def com_google_auto_factory():
java_import_external(
name = "com_google_auto_factory",
licenses = ["notice"], # Apache 2.0
jar_sha256 = "e6bed6aaa879f568449d735561a6a26a5a06f7662ed96ca88d27d2200a8dc6cf",
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/com/google/auto/factory/auto-factory/1.0-beta5/auto-factory-1.0-beta5.jar",
"https://repo1.maven.org/maven2/com/google/auto/factory/auto-factory/1.0-beta5/auto-factory-1.0-beta5.jar",
],
# Auto Factory ships its annotations, runtime, and processor in the same
# jar. The generated code must link against this jar at runtime. So our
# goal is to introduce as little bloat as possible.The only class we need
# at runtime is com.google.auto.factory.internal.Preconditions. So we're
# not going to specify the deps of this jar as part of the java_import().
generated_rule_name = "jar",
extra_build_file_content = "\n".join([
"java_library(",
" name = \"processor\",",
" exports = [\":jar\"],",
" runtime_deps = [",
" \"@com_google_auto_common\",",
" \"@com_google_auto_value\",",
" \"@com_google_guava\",",
" \"@com_google_java_format\",",
" \"@com_squareup_javapoet\",",
" \"@javax_inject\",",
" ],",
")",
"",
"java_plugin(",
" name = \"AutoFactoryProcessor\",",
" output_licenses = [\"unencumbered\"],",
" processor_class = \"com.google.auto.factory.processor.AutoFactoryProcessor\",",
" generates_api = 1,",
" tags = [\"annotation=com.google.auto.factory.AutoFactory;genclass=${package}.${outerclasses}@{className|${classname}Factory}\"],",
" deps = [\":processor\"],",
")",
"",
"java_library(",
" name = \"com_google_auto_factory\",",
" exported_plugins = [\":AutoFactoryProcessor\"],",
" exports = [",
" \":jar\",",
" \"@com_google_code_findbugs_jsr305\",",
" \"@javax_annotation_jsr250_api\",",
" \"@javax_inject\",",
" ],",
")",
]),
)
def com_google_auto_value():
# AutoValue 1.6+ shades Guava, Auto Common, and JavaPoet. That's OK
# because none of these jars become runtime dependencies.
java_import_external(
name = "com_google_auto_value",
jar_sha256 = "fd811b92bb59ae8a4cf7eb9dedd208300f4ea2b6275d726e4df52d8334aaae9d",
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/com/google/auto/value/auto-value/1.6/auto-value-1.6.jar",
"https://repo1.maven.org/maven2/com/google/auto/value/auto-value/1.6/auto-value-1.6.jar",
],
licenses = ["notice"], # Apache 2.0
generated_rule_name = "processor",
exports = ["@com_google_auto_value_annotations"],
extra_build_file_content = "\n".join([
"java_plugin(",
" name = \"AutoAnnotationProcessor\",",
" output_licenses = [\"unencumbered\"],",
" processor_class = \"com.google.auto.value.processor.AutoAnnotationProcessor\",",
" tags = [\"annotation=com.google.auto.value.AutoAnnotation;genclass=${package}.AutoAnnotation_${outerclasses}${classname}_${methodname}\"],",
" deps = [\":processor\"],",
")",
"",
"java_plugin(",
" name = \"AutoOneOfProcessor\",",
" output_licenses = [\"unencumbered\"],",
" processor_class = \"com.google.auto.value.processor.AutoOneOfProcessor\",",
" tags = [\"annotation=com.google.auto.value.AutoValue;genclass=${package}.AutoOneOf_${outerclasses}${classname}\"],",
" deps = [\":processor\"],",
")",
"",
"java_plugin(",
" name = \"AutoValueProcessor\",",
" output_licenses = [\"unencumbered\"],",
" processor_class = \"com.google.auto.value.processor.AutoValueProcessor\",",
" tags = [\"annotation=com.google.auto.value.AutoValue;genclass=${package}.AutoValue_${outerclasses}${classname}\"],",
" deps = [\":processor\"],",
")",
"",
"java_library(",
" name = \"com_google_auto_value\",",
" exported_plugins = [",
" \":AutoAnnotationProcessor\",",
" \":AutoOneOfProcessor\",",
" \":AutoValueProcessor\",",
" ],",
" exports = [\"@com_google_auto_value_annotations\"],",
")",
]),
)
def com_google_auto_value_annotations():
# It should be sufficient to simply depend on @com_google_auto_value.
java_import_external(
name = "com_google_auto_value_annotations",
jar_sha256 = "d095936c432f2afc671beaab67433e7cef50bba4a861b77b9c46561b801fae69",
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/com/google/auto/value/auto-value-annotations/1.6/auto-value-annotations-1.6.jar",
"https://repo1.maven.org/maven2/com/google/auto/value/auto-value-annotations/1.6/auto-value-annotations-1.6.jar",
],
licenses = ["notice"], # Apache 2.0
neverlink = True,
default_visibility = ["@com_google_auto_value//:__pkg__"],
)
def com_google_closure_stylesheets():
java_import_external(
name = "com_google_closure_stylesheets",
licenses = ["notice"], # Apache 2.0
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/com/google/closure-stylesheets/closure-stylesheets/1.5.0/closure-stylesheets-1.5.0.jar",
"https://repo1.maven.org/maven2/com/google/closure-stylesheets/closure-stylesheets/1.5.0/closure-stylesheets-1.5.0.jar",
],
jar_sha256 = "fef768d4f7cead3c0c0783891118e7d3d6ecf17a3093557891f583d842362e2b",
deps = [
"@args4j",
"@com_google_javascript_closure_compiler",
"@com_google_code_gson",
"@com_google_guava",
"@com_google_code_findbugs_jsr305",
],
extra_build_file_content = "\n".join([
"java_binary(",
" name = \"ClosureCommandLineCompiler\",",
" main_class = \"com.google.common.css.compiler.commandline.ClosureCommandLineCompiler\",",
" output_licenses = [\"unencumbered\"],",
" runtime_deps = [\":com_google_closure_stylesheets\"],",
")",
]),
)
def com_google_code_findbugs_jsr305():
java_import_external(
name = "com_google_code_findbugs_jsr305",
licenses = ["notice"], # BSD 3-clause
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/com/google/code/findbugs/jsr305/2.0.3/jsr305-2.0.3.jar",
"https://repo1.maven.org/maven2/com/google/code/findbugs/jsr305/2.0.3/jsr305-2.0.3.jar",
"http://maven.ibiblio.org/maven2/com/google/code/findbugs/jsr305/2.0.3/jsr305-2.0.3.jar",
],
jar_sha256 = "bec0b24dcb23f9670172724826584802b80ae6cbdaba03bdebdef9327b962f6a",
)
def com_google_code_gson():
java_import_external(
name = "com_google_code_gson",
licenses = ["notice"], # Apache 2.0
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/com/google/code/gson/gson/2.7/gson-2.7.jar",
"https://repo1.maven.org/maven2/com/google/code/gson/gson/2.7/gson-2.7.jar",
"http://maven.ibiblio.org/maven2/com/google/code/gson/gson/2.7/gson-2.7.jar",
],
jar_sha256 = "2d43eb5ea9e133d2ee2405cc14f5ee08951b8361302fdd93494a3a997b508d32",
deps = ["@com_google_code_findbugs_jsr305"],
)
def com_google_common_html_types():
java_import_external(
name = "com_google_common_html_types",
licenses = ["notice"], # Apache 2.0
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/com/google/common/html/types/types/1.0.7/types-1.0.7.jar",
"https://repo1.maven.org/maven2/com/google/common/html/types/types/1.0.7/types-1.0.7.jar",
],
jar_sha256 = "78b6baa2ecc56435dc0ae88c57f442bd2d07127cb50424d400441ddccc45ea24",
deps = [
"@com_google_code_findbugs_jsr305",
"@com_google_errorprone_error_prone_annotations",
"@com_google_guava",
"@com_google_jsinterop_annotations",
"@com_google_protobuf//:protobuf_java",
"@javax_annotation_jsr250_api",
],
)
def com_google_common_html_types_html_proto():
http_file(
name = "com_google_common_html_types_html_proto",
sha256 = "6ece202f11574e37d0c31d9cf2e9e11a0dbc9218766d50d211059ebd495b49c3",
urls = [
"https://mirror.bazel.build/raw.githubusercontent.com/google/safe-html-types/release-1.0.5/proto/src/main/protobuf/webutil/html/types/proto/html.proto",
"https://raw.githubusercontent.com/google/safe-html-types/release-1.0.5/proto/src/main/protobuf/webutil/html/types/proto/html.proto",
],
)
def com_google_dagger():
java_import_external(
name = "com_google_dagger",
jar_sha256 = "374cfee26c9c93f44caa1946583c9edc135bb9a42838476522551ec46aa55c7c",
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/com/google/dagger/dagger/2.14.1/dagger-2.14.1.jar",
"https://repo1.maven.org/maven2/com/google/dagger/dagger/2.14.1/dagger-2.14.1.jar",
],
licenses = ["notice"], # Apache 2.0
deps = ["@javax_inject"],
generated_rule_name = "runtime",
extra_build_file_content = "\n".join([
"java_library(",
" name = \"com_google_dagger\",",
" exported_plugins = [\"@com_google_dagger_compiler//:ComponentProcessor\"],",
" exports = [",
" \":runtime\",",
" \"@javax_inject\",",
" ],",
")",
]),
)
def com_google_dagger_compiler():
java_import_external(
name = "com_google_dagger_compiler",
jar_sha256 = "ff16d55273e375349537fc82292b00de04d8a2caca2d4aa6c642692b1a68194d",
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/com/google/dagger/dagger-compiler/2.14.1/dagger-compiler-2.14.1.jar",
"https://repo1.maven.org/maven2/com/google/dagger/dagger-compiler/2.14.1/dagger-compiler-2.14.1.jar",
],
licenses = ["notice"], # Apache 2.0
deps = [
"@com_google_code_findbugs_jsr305",
"@com_google_dagger//:runtime",
"@com_google_dagger_producers//:runtime",
"@com_google_dagger_spi",
"@com_google_guava",
"@com_google_java_format",
"@com_squareup_javapoet",
],
extra_build_file_content = "\n".join([
"java_plugin(",
" name = \"ComponentProcessor\",",
" output_licenses = [\"unencumbered\"],",
" processor_class = \"dagger.internal.codegen.ComponentProcessor\",",
" generates_api = 1,",
" tags = [",
" \"annotation=dagger.Component;genclass=${package}.Dagger${outerclasses}${classname}\",",
" \"annotation=dagger.producers.ProductionComponent;genclass=${package}.Dagger${outerclasses}${classname}\",",
" ],",
" deps = [\":com_google_dagger_compiler\"],",
")",
]),
)
def com_google_dagger_producers():
java_import_external(
name = "com_google_dagger_producers",
jar_sha256 = "96f950bc4b94d013b0c538632a4bc630f33eda8b01f63ae752b76c5e48783859",
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/com/google/dagger/dagger-producers/2.14.1/dagger-producers-2.14.1.jar",
"https://repo1.maven.org/maven2/com/google/dagger/dagger-producers/2.14.1/dagger-producers-2.14.1.jar",
],
licenses = ["notice"], # Apache 2.0
deps = [
"@com_google_dagger//:runtime",
"@com_google_guava",
],
generated_rule_name = "runtime",
extra_build_file_content = "\n".join([
"java_library(",
" name = \"com_google_dagger_producers\",",
" exported_plugins = [\"@com_google_dagger_compiler//:ComponentProcessor\"],",
" exports = [",
" \":runtime\",",
" \"@com_google_dagger//:runtime\",",
" \"@javax_inject\",",
" ],",
")",
]),
)
def com_google_dagger_spi():
java_import_external(
name = "com_google_dagger_spi",
jar_sha256 = "6a20d6c6620fefe50747e9e910e0d0c178cf39d76b67ccffb505ac9a167302cb",
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/com/google/dagger/dagger-spi/2.14.1/dagger-spi-2.14.1.jar",
"https://repo1.maven.org/maven2/com/google/dagger/dagger-spi/2.14.1/dagger-spi-2.14.1.jar",
],
licenses = ["notice"], # Apache 2.0
)
def com_google_errorprone_error_prone_annotations():
java_import_external(
name = "com_google_errorprone_error_prone_annotations",
licenses = ["notice"], # Apache 2.0
jar_sha256 = "03d0329547c13da9e17c634d1049ea2ead093925e290567e1a364fd6b1fc7ff8",
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/com/google/errorprone/error_prone_annotations/2.1.3/error_prone_annotations-2.1.3.jar",
"https://repo1.maven.org/maven2/com/google/errorprone/error_prone_annotations/2.1.3/error_prone_annotations-2.1.3.jar",
],
)
def com_google_errorprone_javac_shaded():
# Please note that, while this is GPL, the output of programs that use
# this library, e.g. annotation processors, should be unencumbered.
java_import_external(
name = "com_google_errorprone_javac_shaded",
# GNU General Public License, version 2, with the Classpath Exception
# http://openjdk.java.net/legal/gplv2+ce.html
licenses = ["restricted"],
jar_sha256 = "65bfccf60986c47fbc17c9ebab0be626afc41741e0a6ec7109e0768817a36f30",
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/com/google/errorprone/javac-shaded/9-dev-r4023-3/javac-shaded-9-dev-r4023-3.jar",
"https://repo1.maven.org/maven2/com/google/errorprone/javac-shaded/9-dev-r4023-3/javac-shaded-9-dev-r4023-3.jar",
],
)
def com_google_guava():
java_import_external(
name = "com_google_guava",
licenses = ["notice"], # Apache 2.0
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/com/google/guava/guava/24.1-jre/guava-24.1-jre.jar",
"https://repo1.maven.org/maven2/com/google/guava/guava/24.1-jre/guava-24.1-jre.jar",
],
jar_sha256 = "31bfe27bdf9cba00cb4f3691136d3bc7847dfc87bfe772ca7a9eb68ff31d79f5",
exports = [
"@com_google_code_findbugs_jsr305",
"@com_google_errorprone_error_prone_annotations",
],
)
def com_google_inject_extensions_guice_assistedinject():
java_import_external(
name = "com_google_inject_extensions_guice_assistedinject",
licenses = ["notice"], # Apache 2.0
jar_urls = [
"https://mirror.bazel.build/repo1.maven.org/maven2/com/google/inject/extensions/guice-assistedinject/4.1.0/guice-assistedinject-4.1.0.jar",
"https://repo1.maven.org/maven2/com/google/inject/extensions/guice-assistedinject/4.1.0/guice-assistedinject-4.1.0.jar",
"http://maven.ibiblio.org/maven2/com/google/inject/extensions/guice-assistedinject/4.1.0/guice-assistedinject-4.1.0.jar",
],
jar_sha256 = "663728123fb9a6b79ea39ae289e5d56b4113e1b8e9413eb792f91e53a6dd5868",
deps = [
"@com_google_guava",
"@com_google_inject_guice",
"@javax_inject",
],
)
def com_google_inject_extensions_guice_multibindings():
java_import_external(
name = "com_google_inject_extensions_guice_multibindings",
| |
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
'''
###############################################################################
#
# This code is used to parcellate the cortical surface from dMRI information
# (tractograms in nii.gz files) using the Mutual Nearest Neighbor Condition
#
###############################################################################
# <NAME>
# Version 1.0
# Inria Sophia Antipolis
# University of Nice Sophia Antipolis
# <EMAIL>
# <EMAIL>
# If you use this code, please acknowledge <NAME>.
# The best single reference is:
# <NAME>, <NAME> and <NAME>, “Cortical Surface
# Parcellation via dMRI Using Mutual Nearset Neighbor Condition”, International
# Symposium on Biomedical Imaging: From Nano to Macro, Prague, Czech Republic.
# pp. 903-906, April 2016.
# Author: <NAME> 2015
# Any questions, please contact <EMAIL>
###############################################################################
'''
from . import Region_preparation as RP
from . import Prepare_tractogram as PT
from . import Similarity_Measures
import numpy as np
from .Python2Vtk import WritePython2Vtk
from copy import deepcopy
import os
from scipy.stats import variation as cv
import time
from .util import mat2cond_index
def Add_void(Parc, Reg, Regions, Excluded_seeds, Label_non_excluded):
''' function used to add the labels to the viod tractograms
Object of the parcellation, Object to region processing class,
labels, excluded seeds, non excluded seeds'''
region_labels = np.unique(Regions)
NBR_REGIONS = len(region_labels)
SizeRegion = np.zeros(NBR_REGIONS)
for ii in range(NBR_REGIONS):
insideregion = np.where(np.array(Regions) == region_labels[ii])[0]
SizeRegion[ii] = len(insideregion)
Regions[np.array(insideregion)] = ii
if len(Parc.zero_tracto) > 0:
RX = RP.Add_zero_tracto_label(Parc, Regions)
# add void tractograms
RX = RX+1 # label {1,..,NBR_REGIONS}
else:
RX = Regions
if len(Excluded_seeds) > 0:
RX = RP.Excluded_label(Excluded_seeds, RX, Label_non_excluded)
return RX, NBR_REGIONS
def Merge_till_R(Parc, SimilarityMeasures, Reg, SizeRegion, Regions, mesh,
R_coef):
'''# function used to merge the small regions
# so that in total you will have regions with the highest structural
# connecitvity and total number of regions == R_coef'''
Un = np.unique(Regions)
# the uniqe labels
RegionsX = np.array(Regions)
while len(Un) > R_coef:
# loop to merge small regions with bigger ones
Reg_small = SizeRegion.argmin()
sth, X = 0, Reg_small
insideregion, connected_regions = RP.Neighbor_region(RegionsX,
Reg_small, mesh)
# get the neighbors and seeds of region Z[i]
for j in range(len(connected_regions)):
# loop over all regions
if connected_regions[j] != 0:
outeregion = np.where(RegionsX == connected_regions[j])[0]
S_mean = SimilarityMeasures(Parc, insideregion, outeregion)
if (S_mean > sth):
# if connected_r[j] and Z[i] have high similarity measure
sth = S_mean
# merge Z[i] to connected_regions[j]
X = connected_regions[j]
# merge Z[i] to connected_regions[j]
RegionsX2 = np.array(RegionsX)
RegionsX2[np.array(insideregion)] = X
Un = np.unique(RegionsX2) # new unique labels
nbr_r = len(Un) # new number of regions
SizeRegion = np.zeros(nbr_r)
RegionX_ = np.zeros(len(RegionsX2))
for i in range(nbr_r): # get the size of the new regions
ind = np.where(RegionsX2 == Un[i])[0]
SizeRegion[i] = len(ind)
RegionX_[np.array(ind)] = i
RegionsX = RegionX_
return RegionsX # label of seeds after merging small regions with big one
def Read_from_SM(Parc, ind):
# this function is used to read sm values from the SM vector (n(n-1)/2)
cv_array = []
for iix in ind:
for jjx in ind:
if iix != jjx:
ix = mat2cond_index(Parc.nbr_seeds, iix, jjx)
cv_array.append(Parc.Similarity_Matrix[ix])
return np.array(cv_array)
def Statistics_SM(Parc, Regions):
'''function used to extract the the similarity measure values between all
the pairs in each region as a vector'''
Un = np.unique(Regions)
Reg_SM = []
for i in Un:
ind = np.array(np.where(Regions == i)[0])
cv_array = Read_from_SM(Parc, ind)
if len(cv_array):
Reg_SM.extend(cv_array)
# return the similarity measures values of all pairs inside each
return np.array(Reg_SM)
class Parcellation():
# main class to parcellate the cortical surface
def __init__(self, path_tractogram, Prefix_name, save_path, nodif_mask,
VERBOSE=False, merge=0, write_data=True):
# initialize; prepare the paths
self.path_tractogram = path_tractogram
# path tractogram's location
self.Prefix_name = Prefix_name
# prefix Prefix_name_x_y_z.nii.gz
self.save_path = save_path
# folder that will contain the results
self.nodif_mask = nodif_mask
# path to mask of the brain fron b0 image
self.Time = []
# array contains the execution time
self.save_results_path = '_'
# folder of each execution
self.verbose = VERBOSE
# enable terminal display of the results
self.merge = merge
# type of postprocessing (after the MNN parcellation)
self.Labels = []
self.write_data = write_data
self.cvth = np.Inf
def PrintResults(self, Data):
# print the different results in the terminal
if self.verbose: # The result is saved in a dictionary
for i in Data.keys():
print(i, ' = ', Data[i]) # print the dictionary
def Write2file_results(self, Similarity_Measure, nbr_r, t, mean_v,
std_v, R):
# writesome results of the regions
# path to save, Similarity measure, nbr of regions, time of execution,
# mean values of SM, std of SM, stopping condition R.
if self.write_data:
resultfile = open(self.save_results_path + '/results.txt', 'w')
resultfile.write('Similarity Measure:\t' + Similarity_Measure +
'\t R_th \t' + str(self.region_th) +
'\t R:=' + str(R)+'\n')
hd = 'nbr i \t nbr R \t t(min) \t mean SM \t STD SM: \n'
resultfile.write(hd)
for i in range(len(nbr_r)):
nbr = "%03d" % nbr_r[i]
resultfile.write(str(i + 1) + '\t' + nbr + '\t' +
str(t[i]) + '\t' + str(mean_v[i]) +
'\t'+str(std_v[i]) + '\n')
resultfile.close()
def Write2file_zero_tracto(self):
''' writesome results of the regions
path to save, Similarity measure, nbr of regions, time of execution,
mean values of SM, std of SM, stopping condition R.'''
if self.write_data:
if len(self.Parc.zero_tracto) > 0:
resultfile = open(self.save_path + '/zero_tractogram.txt', 'w')
resultfile.write('index_zero_tracto\t' +
'index_replacement'+'\n')
zero_t, repla_c = self.Parc.zero_tracto, self.Parc.replacement
for i in range(len(zero_t)):
st = str(zero_t[i]) + '\t' + str(repla_c[zero_t[i]]) + '\n'
resultfile.write(st)
resultfile.close()
def PrepareData(self, coordinate, Connectivity, Excluded_seeds):
all_Seeds = np.array([i for i in range(len(coordinate[:, 0]))])
# they will be removed from the coordinates
self.nbr_seedsX = len(all_Seeds) # number of vertices
self.Label_non_excluded = list(set(all_Seeds))
# Mesh_back_up = RP.Mesh(coordinate,[],[], Connectivity)
if len(Excluded_seeds) > 0:
# if some seeds are exclude from the parcellation
self.Label_non_excluded = list(set(all_Seeds) -
set(Excluded_seeds))
# and the tess conneectivity matrix
self.Label_non_excluded = np.array(self.Label_non_excluded)
Coord_non_excluded = coordinate[self.Label_non_excluded, :]
Connectivity = Connectivity[self.Label_non_excluded, :]
Connectivity = Connectivity[:, self.Label_non_excluded]
coordinate = Coord_non_excluded
self.nbr = len(coordinate[:, 0])
# number of vertices
self.Connectivity_X = deepcopy(Connectivity)
# this mesh connectivity will not
# be modefied used at the end of the code to merge the small regions
self.mesh = RP.Mesh(coordinate, [], [], Connectivity)
# create an object containing the coordinate and mesh connectivity
# Prepare the parcellation by seeting the different paths
printData = {}
printData['Loading tractograms '] = str(self.nbr_seedsX)
self.PrintResults(printData)
self.Parc = PT.Parcellation_data(self.path_tractogram,
self.Prefix_name, self.mesh,
self.nodif_mask)
# Parc.Repeated_Coordinate(coordinate)
self.Parc.Detect_void_tracto()
# detect zero tracto(tractogram that has sum < 3*max(tractogram))
if len(self.Parc.zero_tracto) > 0: # if there are void tractograms
self.Parc.Replace_void_tracto()
# replace void tractograms by the nearest neighbor non void
self.mesh.Remove_void_tracto(np.array(self.Parc.zero_tracto),
np.array(self.Parc.nonzero_tracto))
def data2bprinted(self, Excluded_seeds, nbr_seeds):
# This function is used to disp the input info
# This dictionary is used to save the different results
printData = {}
printData['# Excluded seeds:'] = len(Excluded_seeds)
printData['Path to tractogram:'] = self.path_tractogram
printData['Prefix name:'] = self.Prefix_name
printData['Path to nodif mask:'] = self.nodif_mask
printData['Save path:'] = self.save_path
n_zero_t = len(self.Parc.zero_tracto)
printData['# Tracto, # Void tracto'] = nbr_seeds, n_zero_t
return printData
def result2bprinted(self, R, SM, nbr_iteration):
# This function is used to print info of the parcellation
printData = {}
printData[' # Region '] = R
printData['Similarity Measure'] = SM
printData['# Iterations'] = nbr_iteration
printData['Stop merging at'] = self.region_th
return printData
def find_mergingcondidate(self, NBR_REGIONS, Regions, SimilarityMeasure):
# dictionary used to display results
Merg_Condidates = []
# vector contains the Mutual nearest N condidates
# between all pairs of regions
for i in range(NBR_REGIONS): # loop over the regions
insideregion, connected_regions = RP.Neighbor_region(Regions, i,
self.mesh)
nbr_connected = len(connected_regions)
if nbr_connected > 0:
S = np.zeros(nbr_connected)
for l in range(nbr_connected):
# loop over i neighbors
outeregion = np.where(np.array(Regions) ==
connected_regions[l])[0]
S[l] = SimilarityMeasure(self.Parc, insideregion,
outeregion)
Reg_cond = list(np.where(S == S.max())[0])
Reg_list = [connected_regions[u] for u in Reg_cond]
Merg_Condidates.append(Reg_list)
else:
# if no neighbor i is merged with itself.
Merg_Condidates.append([i])
return Merg_Condidates
def merging_step(self, region_labels, Merg_Condidates, Regions):
# this function is used to merge condiate regions
RegionsX = np.array(Regions)
for i in range(len(region_labels)):
# check if the mutual nearest neighbor is valid
Reg_cond = Merg_Condidates[i]
# candidates of merging to region i
a = np.where(np.array(Regions) == region_labels[i])[0]
# get seeds with label region_labels[i]
if len(a) < self.region_th:
for u in Reg_cond:
Reg_list = Merg_Condidates[u]
if i in Reg_list:
# if region i is | |
y x x)
"""
with h5py.File(path_form + str(file_idx) + ".hdf5","r") as input_file:
if to_8bit:
trench_array_list = np.empty_like(input_file[key], dtype=np.uint8)
for tr in range(trench_array_list.shape[0]):
for t in range(trench_array_list.shape[1]):
trench_array_list[tr,t,:,:] = self.to_8bit(input_file[key][tr,t,:,:])
else:
trench_array_list = input_file[key][:]
return trench_array_list
def save_masks_to_hdf(self, file_idx, final_masks_future):
"""Save segmented data to hdf5 archives.
Args:
file_idx (int): file index of the hdf5 kymograph
final_masks_future (numpy.ndarray, int): (tr x t x y x x) stack of segmented trenches
Returns:
"Done"
"""
with h5py.File(self.phasesegmentationpath + "/segmentation_" + str(file_idx) + ".hdf5", "w") as h5pyfile:
hdf5_dataset = h5pyfile.create_dataset("data", data=final_masks_future, dtype=np.uint8)
return "Done"
def generate_trench_loading(self, file_idx):
"""Measure trench loading for all trenches in file.
Args:
file_idx (int): file index of the hdf5 kymograph
Returns:
trench_output (numpy.ndarray): (tr x t) array of trench laoding
"""
# Load file
with h5py.File(self.kymographpath + "/kymograph_" + str(file_idx) + ".hdf5","r") as input_file:
input_data = input_file[self.seg_channel]
trench_output = []
# Measure loading for each trench
for trench_idx in range(input_data.shape[0]):
trench_array = input_data[trench_idx]
trench_loading_array = self.measure_trench_loading(trench_array)
trench_output.append(trench_loading_array[np.newaxis])
trench_output = np.concatenate(trench_output,axis=0)
return trench_output
def dask_segment(self,dask_controller, file_list=None, overwrite=True):
"""Segment kymographs in parallel using Dask.
Args:
dask_controller (trenchripper.dask_controller): Helper object to handle dask jobs
file_list (list, int): Subset kymograph files
overwrite (bool): Whether to overwrite the output directory
Returns:
None
"""
# Make/overwrite output directory
writedir(self.phasesegmentationpath,overwrite=overwrite)
dask_controller.futures = {}
if file_list is None:
file_list = self.meta_handle.read_df("kymograph",read_metadata=True)["File Index"].unique().tolist()
num_file_jobs = len(file_list)
# Send dask jobs with increasing priority (to reduce memory usage)
random_priorities = np.random.uniform(size=(num_file_jobs,6))
for k,file_idx in enumerate(file_list):
# Load trenches
future = dask_controller.daskclient.submit(self.load_trench_array_list, self.kymographpath + "/kymograph_", file_idx, self.seg_channel, True, retries=1,priority=random_priorities[k,0]*0.1)
# Find trench masks and median filter images
future = dask_controller.daskclient.submit(self.find_trench_masks_and_median_filtered_list,future,retries=1,priority=random_priorities[k,1]*0.4)
# Get cell regions
future = dask_controller.daskclient.submit(self.find_watershed_mask_list,future,retries=1,priority=random_priorities[k,2]*1.6)
# Get watershed seeds
future = dask_controller.daskclient.submit(self.find_watershed_maxima_list,future,retries=1,priority=random_priorities[k,3]*6.4)
# Get connected components
future = dask_controller.daskclient.submit(self.find_conn_comp_list,future,retries=1,priority=random_priorities[k,4]*25.6)
# Save to file
future = dask_controller.daskclient.submit(self.save_masks_to_hdf,file_idx,future,retries=1,priority=random_priorities[k,5]*51.2)
dask_controller.futures["Segmentation: " + str(file_idx)] = future
def dask_characterize_trench_loading(self, dask_controller, file_list=None):
"""Measure trench loading for the whole dataset in parallel.
Args:
dask_controller (trenchripper.dask_controller): Helper object to handle dask jobs
file_list (list, int): Subset kymograph files
Returns:
None
"""
dask_controller.futures = {}
dask_controller.futures["Trench Loading"] = []
if file_list is None:
file_list = self.meta_handle.read_df("kymograph",read_metadata=True)["File Index"].unique().tolist()
num_file_jobs = len(file_list)
random_priorities = np.random.uniform(size=(num_file_jobs,2))
for k,file_idx in enumerate(file_list):
# Load data
future = dask_controller.daskclient.submit(self.load_trench_array_list, self.kymographpath + "/kymograph_", file_idx, self.seg_channel, True, retries=1,priority=random_priorities[k,0]*0.1)
# Measure loading
future = dask_controller.daskclient.submit(self.measure_trench_loading,future,retries=1,priority=random_priorities[k,0]*0.8)
# Save to futures
dask_controller.futures["Trench Loading"].append(future)
def dask_postprocess_trench_loading(self, dask_controller):
"""Add trench loading to metadata.
Args:
dask_controller (trenchripper.dask_controller): Helper object to handle dask jobs
Returns:
None
"""
# Concatenate future results
trench_loadings = np.concatenate(dask_controller.daskclient.gather(dask_controller.futures["Trench Loading"]), axis=0)
# Add to metadata frame
kymodf = self.meta_handle.read_df("kymograph",read_metadata=True)
kymodf["Trench Loading"] = trench_loadings
# Save
self.meta_handle.write_df("kymograph", kymodf, metadata=kymodf.metadata)
def props_to_dict(self, regionprops, props_to_grab):
"""Select properties from skimage regionprops object and turn into
dictionary.
Args:
regionprops (skimage.regionprops): regionprops objects for each cell
props_to_grab(list, str): metrics to extract from regionprops data
Returns:
props_dict(dict): dictionary of morphology metrics
"""
props_dict = {}
for prop in props_to_grab:
props_dict[prop] = list(map(lambda x: x[prop], regionprops))
del regionprops
return props_dict
def dask_extract_cell_data(self, dask_controller, props_to_grab, file_list=None, overwrite=True):
"""Extract cell morphology measurements.
Args:
dask_controller (trenchripper.dask_controller): Helper object to handle dask jobs
props_to_grab(list, str): metrics to extract from regionprops data
file_list (list, int): Subset kymograph files
overwrite (bool): Whether to overwrite the output directory
Returns:
None
"""
dask_controller.futures = {}
# write directory
writedir(self.phasedatapath,overwrite=overwrite)
# load metadata
kymodf = self.meta_handle.read_df("kymograph",read_metadata=True)
metadata = kymodf.metadata
globaldf = self.meta_handle.read_df("global", read_metadata=True)
# get pixel scaling so that measurements are in micrometers
pixel_scaling = metadata["pixel_microns"]
if file_list is None:
file_list = kymodf["File Index"].unique().tolist()
num_file_jobs = len(file_list)
# index according to how the final cell data should be organized
kymodf = kymodf.reset_index()
kymodf = kymodf.set_index(["File Index", "File Trench Index", "timepoints"])
random_priorities = np.random.uniform(size=(num_file_jobs,2))
for k,file_idx in enumerate(file_list):
segmented_masks_file = "segmentation_" + str(file_idx) + ".hdf5"
if segmented_masks_file in os.listdir(self.phasesegmentationpath):
times = kymodf.loc[file_idx, "time (s)"]
global_trench_indices = kymodf.loc[file_idx, "trenchid"]
trench_loadings = kymodf.loc[file_idx, "Trench Loading"]
fov_idx = kymodf.loc[file_idx, "fov"]
dask_controller.futures["Cell Props %d: " % file_idx] = dask_controller.daskclient.submit(self.extract_cell_data, file_idx, fov_idx, times, global_trench_indices, trench_loadings, props_to_grab, pixel_scaling, metadata, priority=random_priorities[k, 1]*8)
def dask_extract_cell_data_mask(self, dask_controller, channels, props_to_grab, file_list=None, overwrite=False):
"""Extract cell fluorescence properties using phase segmentation graph.
Args:
dask_controller (trenchripper.dask_controller): Helper object to handle dask jobs
props_to_grab(list, str): metrics to extract from regionprops data
file_list (list, int): Subset kymograph files
overwrite (bool): Whether to overwrite the output directory
Returns:
None
"""
dask_controller.futures = {}
# write directory
writedir(self.phasedatapath,overwrite=overwrite)
# load metadata
kymodf = self.meta_handle.read_df("kymograph",read_metadata=True)
metadata = kymodf.metadata
globaldf = self.meta_handle.read_df("global", read_metadata=True)
# get pixel scaling so that measurements are in micrometers
pixel_scaling = globaldf.metadata["pixel_microns"]
if file_list is None:
file_list = kymodf["File Index"].unique().tolist()
num_file_jobs = len(file_list)
num_channels = len(channels)
# index according to how the final cell data should be organized
kymodf = kymodf.reset_index()
kymodf = kymodf.set_index(["File Index", "File Trench Index", "timepoints"])
random_priorities = np.random.uniform(size=(num_file_jobs,num_channels))
for k,file_idx in enumerate(file_list):
for k2, channel in enumerate(channels):
segmented_masks_file = "segmentation_" + str(file_idx) + ".hdf5"
if segmented_masks_file in os.listdir(self.phasesegmentationpath):
times = kymodf.loc[file_idx, "time (s)"]
global_trench_indices = kymodf.loc[file_idx, "trenchid"]
trench_loadings = kymodf.loc[file_idx, "Trench Loading"]
fov_idx = kymodf.loc[file_idx, "fov"]
dask_controller.futures["Cell Props %d: " % file_idx] = dask_controller.daskclient.submit(self.extract_cell_data_mask, file_idx, fov_idx, channel, times, global_trench_indices, trench_loadings, props_to_grab, pixel_scaling, metadata, priority=random_priorities[k, k2]*8)
def extract_cell_data(self, file_idx, fov_idx, times, global_trench_indices, trench_loadings, props_to_grab, pixel_scaling, metadata=None):
"""Get cell morphology data from segmented trenches.
Args:
file_idx (int): hdf5 file index
fov_idx (int): Field of view in the original data
times: (list, float): Time indices to look at
global_trench_indices (list, int): Trench IDs
props_to_grab (list, str): list of properties to grab
pixel_scaling (float): microns/pixel
metadata (pandas.Dataframe): metdata dataframe
Returns:
"Done"
"""
# Get segmented masks
segmented_mask_array = self.load_trench_array_list(self.phasesegmentationpath + "/segmentation_", file_idx, "data", False)
# Load output file
with HDFStore(os.path.join(self.phasedatapath, "data_%d.h5" % file_idx)) as store:
if "/metrics" in store.keys():
store.remove("/metrics")
trench_time_dataframes = {}
first_dict_flag = True
# Iterate through trenches and times
for trench_idx in range(segmented_mask_array.shape[0]):
for time_idx in range(segmented_mask_array.shape[1]):
# get regionprops
seg_props = sk.measure.regionprops(segmented_mask_array[trench_idx, time_idx,:,:],cache=False)
if len(seg_props) > 0:
# Convert to dict
seg_props = self.props_to_dict(seg_props, props_to_grab)
# Add metadata
seg_props["trenchid"] = [global_trench_indices.loc[trench_idx,time_idx]]*len(seg_props["label"])
seg_props["file_trench_index"] = [trench_idx]*len(seg_props["label"])
seg_props["time_s"] = [times.loc[trench_idx, time_idx]]*len(seg_props["label"])
seg_props["trench_loadings"] = [trench_loadings.loc[trench_idx,time_idx]]*len(seg_props["label"])
seg_props["fov"] = [fov_idx.loc[trench_idx,time_idx]]*len(seg_props["label"])
if first_dict_flag:
trench_time_dataframes = seg_props
first_dict_flag = False
else:
for prop in trench_time_dataframes.keys():
trench_time_dataframes[prop].extend(seg_props[prop])
del seg_props
del segmented_mask_array
# Convert to dataframe
seg_props = pd.DataFrame(trench_time_dataframes)
# Rename label to trench index
seg_props["trench_cell_index"] = seg_props["label"]
seg_props = seg_props.drop("label", axis=1)
# Convert bounding box to multiple columns
if "bbox" in props_to_grab:
seg_props[['min_row', 'min_col', 'max_row', 'max_col']] = pd.DataFrame(seg_props['bbox'].tolist(), index=seg_props.index)
seg_props = seg_props.drop("bbox", axis=1)
# Convert centroid to multiple columns
if "centroid" in props_to_grab:
seg_props[['centy', 'centx']] = pd.DataFrame(seg_props['centroid'].tolist(), index=seg_props.index)
seg_props = seg_props.drop("centroid", axis=1)
# Convert area and lengths to pixels
length_scale_measurements = set(["major_axis_length", "equivalent_diameter", "minor_axis_length", "perimeter"])
for prop in props_to_grab:
if prop in length_scale_measurements:
seg_props[prop] = seg_props[prop]*pixel_scaling
if "area" in props_to_grab:
seg_props["area"] = seg_props["area"]*(pixel_scaling)**2
# Index
seg_props = seg_props.set_index(["file_trench_index", "time_s", "trench_cell_index"])
# Save file
store.put("metrics", seg_props, data_columns=True)
if metadata is not None:
store.get_storer("metrics").attrs.metadata = metadata
del seg_props
del trench_time_dataframes
return "Done"
def extract_cell_data_mask(self, file_idx, fov_idx, channel, times, global_trench_indices, trench_loadings, props_to_grab, pixel_scaling, metadata=None):
"""Get cell morphology data from segmented trenches.
Args:
file_idx (int): hdf5 file index
fov_idx (int): Field of view in the original data
channel (str): channel to measure intensities on
times: (list, float): Time indices to look at
global_trench_indices (list, int): Trench IDs
props_to_grab (list, str): list of properties to grab
pixel_scaling (float): microns/pixel
metadata (pandas.Dataframe): metdata dataframe
Returns:
"Done"
"""
# Get segmented masks
segmented_mask_array = self.load_trench_array_list(self.phasesegmentationpath + "/segmentation_", file_idx, "data", False)
# Get kymographs for other channel
kymograph = self.load_trench_array_list(self.kymographpath + "/kymograph_", file_idx, self.seg_channel, False)
# Load output file
with HDFStore(os.path.join(self.phasedatapath, "data_%d.h5" % file_idx)) as store:
if "/metrics_%s" % channel in store.keys():
store.remove("/metrics_%s" % channel)
trench_time_dataframes = {}
first_dict_flag = True
# Iterate through trenches and times
for trench_idx in range(segmented_mask_array.shape[0]):
for time_idx in range(segmented_mask_array.shape[1]):
# get regionprops
seg_props = sk.measure.regionprops(segmented_mask_array[trench_idx, time_idx,:,:], intensity_image=kymograph[trench_idx, time_idx,:,:], cache=False)
if len(seg_props) > 0:
# Convert to dict
seg_props = self.props_to_dict(seg_props, props_to_grab)
# Add metadata
seg_props["trenchid"] = [global_trench_indices.loc[trench_idx,time_idx]]*len(seg_props["label"])
seg_props["file_trench_index"] = [trench_idx]*len(seg_props["label"])
seg_props["time_s"] = [times.loc[trench_idx, time_idx]]*len(seg_props["label"])
seg_props["trench_loadings"] = [trench_loadings.loc[trench_idx,time_idx]]*len(seg_props["label"])
seg_props["fov"] = | |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import streamlit as st
import time
import seaborn as sns
import squarify
import pycountry
import geopandas
import plotly.graph_objects as go
from PIL import Image
from wordcloud import WordCloud, STOPWORDS,ImageColorGenerator
st.title("Nobelevo4ka")
st.image('1.jpg')
"В этой проге я постараюсь визаулизировать данные по Нобелевской премии 1901-2019. " \
"Данные взяты с Kagle (https://www.kaggle.com/imdevskp/nobel-prize)"
"В датасете содержится информация о литературе/медицине/премии мира/физике/химии и экономике (основана в 1969)"
"Стоит также упомянуть, что в период 1940-1942 Нобелевская премия не вручалась(("
with st.echo(code_location='below'):
data = pd.read_csv('complete.csv', delimiter=',').sort_values("awardYear").reset_index()
del data["index"]
with st.echo(code_location='below'):
data["gender"] = data["gender"].fillna("Organization")
data_year = pd.DataFrame({"Year": range(1901, 1940), "male": 0, "female": 0, "Organization": 0})
data_year2 = pd.DataFrame({"Year": range(1943, 2020), "male": 0, "female": 0, "Organization": 0})
data_year = pd.concat([data_year, data_year2], ignore_index=True)
for j in ["male", "female", "Organization"]:
for i in range(1901, 1939):
data_year.loc[i - 1901][j] = int(data[data["awardYear"] == int(i)]["gender"].to_list().count(j))
for i in range(1943, 2020):
data_year.loc[i - 1904][j] = int(data[data["awardYear"] == int(i)]["gender"].to_list().count(j))
with st.echo(code_location='below'):
number = data_year.sum()[1:4].to_list()
genders = ["male", "female", "Organization"]
a, b = plt.subplots()
### FROM https://stackoverflow.com/questions/6170246/how-do-i-use-matplotlib-autopct"
def make_autopct(values):
def my_autopct(pct):
total = sum(values)
val = int(round(pct * total / 100.0))
return '{p:.1f}% ({v:d})'.format(p=pct, v=val)
return my_autopct
### END FROM https://stackoverflow.com/questions/6170246/how-do-i-use-matplotlib-autopct
b.pie(number, (0, 0, 0.1), genders, ['#60b3ff', '#ff9999', '#99ff99'], autopct=make_autopct(number),
shadow=True, startangle=11)
b.axis('equal')
plt.tight_layout()
plt.legend(title='Гендерное равенство?! Ну типа', bbox_to_anchor=(1, 1), loc='upper center')
''''Получился нелепый мужской пакмэн, проглотивший остальных. Действительно, пока что нет никаких новостей,
с куммулятивным гендерным распределением всё и так был понятно.'''
st.pyplot(plt)
with st.echo(code_location='below'):
b = ["Chemistry", "Literature", "Physiology or Medicine", "Peace", "Physics", "Economic Sciences"]
subj = pd.DataFrame({"Category": b, "Total": 0, "Female": 0})
for j in b:
data_year[j] = 0
for i in data_year["Year"]:
data_year.loc[i - 1901 - 3 * int(i / 1943)][j] = int(
data[data["awardYear"] == int(i)]["category"].to_list().count(j))
subj.iloc[b.index(j), 1] = data_year[j].sum()
subj.iloc[b.index(j), 2] = data[data["category"] == j]["gender"].to_list().count("female")
"Стой!!! Совсем забыл сказать, ты можешь воспользоваться уникальной поисковой системой! Она бесполезная, но вдруг тебе пригодится..." \
"Код я спрятал, потому что он большой и некрасивый"
cat = st.selectbox('Выберите интересующую вас область:',
["Literature", "Chemistry", "Physiology or Medicine", "Physics", "Economic Sciences"])
year = st.selectbox('Выберите интересующий вас год:', range(1901, 2020))
if year > 1939 and year < 1943:
"В этом году нобелевскую премию по данному предмету никто не получал. Да и по другим претметам тоже. " \
"Война всё-так дело серьезное"
else:
if len(data.loc[data["awardYear"] == year].loc[data["category"] == cat]) == 0:
"В этом году по Экономике никто не получал премию. Знаете почему? Её тогда ещё не было)) " \
"Она появилась в 1969."
elif len(data.loc[data["awardYear"] == year].loc[data["category"] == cat]) == 1:
"В этом году Нобелевскую премию по " + str(cat) + " была вручена " + \
data.loc[data["awardYear"] == year].loc[data["category"] == cat]["name"].iloc[0]
"За что получил? Тут всё очев: " + str(data.loc[data["awardYear"] == year].loc[data["category"] == cat]["motivation"].iloc[0])
if data.loc[data["awardYear"] == year].loc[data["category"] == cat]["birth_date"].iloc[0] == "":
st.write("Датафрейм не знает, когда этот человек родился, значит и нам не положено")
else:
st.write("Дата рождения " + str(data.loc[data["awardYear"] == year].loc[data["category"] == cat]["name"].iloc[0])
+ " - " + \
data[data["awardYear"] == year].loc[data["category"] == cat]["birth_date"].iloc[0])
if data.loc[data["awardYear"] == year].loc[data["category"] == cat]["birth_countryNow"].iloc[0] == "":
st.write("Датафрейм не знает, где она родилась, значит и нам не положено")
else:
st.write("Место рождения " + str(data.loc[data["awardYear"] == year].loc[data["category"] == cat]["name"].iloc[0])
+ " - " + \
data.loc[data["awardYear"] == year].loc[data["category"] == cat]["birth_countryNow"].iloc[0])
else:
st.write("В "+str(year)+" году нобелевской премией по "+str(cat)+ " было награждено сразу несколько человек!!")
chel = st.selectbox(
"Выберите, кто именно вас интересует: ", data[data["awardYear"] == year][data["category"] == cat]["name"].to_list())
"За что получил? Тут всё очев: " + str(
data[data["awardYear"] == year][data["category"] == cat][data["name"] == chel]["motivation"].iloc[0])
if data[data["awardYear"] == year][data["category"] == cat][data["name"] == chel]["birth_date"].iloc[0] == "":
st.write("Датафрейм не знает, когда этот человек родился, значит и нам не положено")
else:
st.write("Дата рождения " + str(data[data["awardYear"] == year][data["category"] == cat][data["name"] == chel]["name"].iloc[0])
+ " - " + \
data[data["awardYear"] == year][data["category"] == cat][data["name"] == chel]["birth_date"].iloc[0])
if data[data["awardYear"] == year][data["category"] == cat][data["name"] == chel]["birth_countryNow"].iloc[0] == "":
st.write("Датафрейм не знает, где она родилась, значит и нам не положено")
else:
st.write("Место рождения " + str(data[data["awardYear"] == year][data["category"] == cat][data["name"] == chel]["name"].iloc[0])
+ " - " + \
data[data["awardYear"] == year][data["category"] == cat]["birth_countryNow"][data["name"] == chel].iloc[0])
with st.echo(code_location='below'):
sns.set_theme(style="whitegrid")
f, ax = plt.subplots(figsize=(4, 4))
subjects = subj.sort_values("Total", ascending=False)
sns.barplot(x="Total", y="Category", data=subjects, label="Total", color="#60b3ff")
sns.set_color_codes("muted")
sns.barplot(x="Female", y="Category", data=subjects, label="Female", color="#ff9999")
ax.legend(ncol=1, loc="lower right", frameon=True)
ax.set(xlim=(0, 250), ylabel="", xlabel="Number of prizes")
plt.title("Female distribution per categories")
st.set_option('deprecation.showPyplotGlobalUse', False)
st.pyplot(sns.despine(left=True, bottom=True))
"Как мы видим, процентные соотношения женщин в каждой из категорий очень разнятся. Наибольшую долю они составляют " \
"в премии Мира и премии по литературе, откуда можно сделать вывод о том, что женщины лучше преуспевают в гуманитарных науках (литература)" \
"и в социальной активности/иницитивности (премия Мира), нежели в естественных науках ('преуспевают' в данном контексте относится именно" \
"к Нобелевской премии)"
"Хмммммм. А вы заметили, что во всех категориях количество врученных премий значительно превышает временной промежуток, на протяжении " \
"которого эти премии присуждались (раз в год)??? Омагадддд, получается, одну премию могут получать сразу несколько человек!!!"
with st.echo(code_location='below'):
y = data[["awardYear", "category"]].copy()
y["1"] = 1
y = y.groupby(["awardYear", "category"]).sum("1")
x = data_year[["Year", "Chemistry", "Literature", "Physiology or Medicine", "Peace",
"Physics", "Economic Sciences"]].pivot_table(["Chemistry", "Literature",
"Physiology or Medicine", "Peace", "Physics",
"Economic Sciences"], "Year")
x = pd.pivot_table(x, values=["Chemistry", "Literature", "Physiology or Medicine", "Peace",
"Physics", "Economic Sciences"], columns="Year")
sns.set_theme()
f, ax = plt.subplots(figsize=(10, 3))
sns.heatmap(x, annot=False, fmt="d", linewidths=0.05, ax=ax)
"""Цветовая палитра показывает, сколько человек в конкретный год взяли Нобеля в конкретной категории. По литературе,
например, почти во все года был награждён один человек, что достаточно логично. Действительно интересный момент,
который мы видим - в естественных науках с момента появления премии количество дуэтов/трио постепенно росло и в
последние десятителия стало модно брать нобеля не одному, а со своими корешами."""
st.pyplot()
with st.echo(code_location='below'):
quant = subj[["Category", "Total"]]
colors = [plt.cm.Spectral(i / float(len(quant["Category"]))) for i in range(len(quant["Category"]))]
plt.figure(figsize=(15, 8), dpi=80)
squarify.plot(sizes=quant["Total"], label=quant["Category"], color=colors, alpha=0.8, value=quant["Total"])
plt.title('Treemap of the number of the Nobel prizes per category')
plt.axis('off')
st.pyplot()
with st.echo(code_location='below'):
x = data_year[["Chemistry", "Literature", "Physiology or Medicine", "Peace", "Physics", "Economic Sciences"]].copy()
x.index = data_year["Year"]
x.plot.area(color=colors)
st.pyplot()
"А вот тут можно сломать мозг, но всё на самом деле проще. По оси Y отложено количество человек, которые взяли нобеля " \
"в каждый год. Верхняя огибающая - тотал, а если мы рассмотрим закрашенные зоны, то поймём, разбиение на предметы внутри " \
"этого тотала. В отличии от прошлого графика, этот ещё выресовываает общую тенденцию в виде роста количества премий с теч времени"
with st.echo(code_location='below'):
strany = data_year[["Year", "male"]].copy()
data["birth_countryNow"] = data["birth_countryNow"].fillna(data["org_founded_countryNow"])
for i in set(data["birth_countryNow"].to_list()):
strany[i] = 0
strany = strany.drop(strany.columns[1], axis=1)
for j in strany.columns.to_list()[1:-1]:
for i in data_year["Year"]:
strany.loc[i - 1901 - 3 * int(i / 1943)][j] = int(
data[data["awardYear"] == int(i)]["birth_countryNow"].to_list().count(j))
po_strane = pd.DataFrame(strany[strany.columns[1:]].sum().sort_values(ascending=False))
po_strane = po_strane.iloc[0:25, :]
plt.figure(figsize=(9, 9))
ax = plt.subplot(111, polar=True)
plt.axis('off')
lowerLimit = 30
labelPadding = 4
max = int(po_strane.max())
# Idea taken FROM https://www.python-graph-gallery.com/circular-barplot-basic
slope = (max - lowerLimit) / max
heights = slope * po_strane.iloc[:, 0] + lowerLimit
width = 2 * np.pi / len(po_strane.index)
indexes = list(range(1, len(po_strane.index) + 1))
angles = [element * width for element in indexes]
bars = ax.bar(angles, height=heights, width=width, bottom=lowerLimit, linewidth=2, edgecolor="white", color=colors)
for bar, angle, height, label in zip(bars, angles, heights, po_strane.index.to_list()):
rotation = np.rad2deg(angle)
alignment = ""
if angle >= np.pi / 2 and angle < 3 * np.pi / 2:
alignment = "right"
rotation = rotation + 180
else:
alignment = "left"
ax.text(x=angle, y=lowerLimit + bar.get_height() + labelPadding, s=str(label) + " " +
str(int(
po_strane[po_strane.index == label].iloc[
:, 0])), ha=alignment, va='center',
rotation=rotation, rotation_mode="anchor")
# END FROM https://www.python-graph-gallery.com/circular-barplot-basic
"тут и так всё понятно"
"Btw, я бы на вашем месте не проверял числа, потому что они не сойдутся, во всём виноват датафрейм((( Я честно пытался ручками сделать его лучше, но он всё ещё дефектный парень"
st.pyplot()
with st.echo(code_location='below'):
country = pd.DataFrame(strany[strany.columns[2:]].sum())
country = country.reset_index(level=0, drop=False)
# FROM https://melaniesoek0120.medium.com/data-visualization-how-to-plot-a-map-with-geopandas-in-python-73b10dcd4b4b
# Аббревиатура страны по названию
def alpha3code(column):
CODE = []
for country in column:
try:
code = pycountry.countries.get(name=country).alpha_3
# .alpha_3 means 3-letter country code
# .alpha_2 means 2-letter country code
CODE.append(code)
except:
CODE.append('None')
return CODE
# END FROM https://melaniesoek0120.medium.com/data-visualization-how-to-plot-a-map-with-geopandas-in-python-73b10dcd4b4b
# Плохой датасет, некоторое пришлось прописывать ручками
country['CODE'] = alpha3code(country["index"])
country.loc[country['index'] == "Czech Republic", 'CODE'] = "CZE"
country.loc[country['index'] == "United Kingdom", 0] = 105
| |
<filename>build/lib/thoryvos/main_ui.py
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Main.ui'
#
# Created by: PyQt5 UI code generator 5.15.0
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PySide2 import QtCore, QtGui, QtWidgets
from .GUI_helper import DragDropWidget
from .fonts import *
import os
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
self.fontDB = QtGui.QFontDatabase()
self.fontDB.addApplicationFont(f":/fonts/fonts/CabinSketch-Bold.ttf")
self.fontDB.addApplicationFont(
f":/fonts/fonts/CabinSketch-Regular.ttf")
self.fontDB.addApplicationFont(
f":/fonts/fonts/CFNightofTerrorPERSONAL-Reg.ttf")
self.fontDB.addApplicationFont(f":/fonts/fonts/Courgette-Regular.ttf")
self.fontDB.addApplicationFont(
f":/fonts/fonts/DEADLY KILLERS.ttf")
self.fontDB.addApplicationFont(
f":/fonts/fonts/FEASFBI_.ttf")
self.fontDB.addApplicationFont(f":/fonts/fonts/FEASFBI.ttf")
self.fontDB.addApplicationFont(
f":/fonts/fonts/FEASFBRG.ttf")
self.fontDB.addApplicationFont(
f":/fonts/fonts/Lemon-Regular.ttf")
self.fontDB.addApplicationFont(
f":/fonts/fonts/Martyric_PersonalUse.ttf")
self.fontDB.addApplicationFont(
f":/fonts/fonts/monbaiti.ttf")
self.fontDB.addApplicationFont(
f":/fonts/fonts/PirataOne-Regular.ttf")
self.fontDB.addApplicationFont(
":/fonts/fonts/RW-creepsville.ttf")
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1080, 400)
MainWindow.setMinimumSize(QtCore.QSize(1080, 400))
MainWindow.setMaximumSize(QtCore.QSize(1080, 500))
MainWindow.setStyleSheet("background-color: rgb(45, 45, 45);")
MainWindow.setIconSize(QtCore.QSize(80, 80))
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName("verticalLayout")
self.TopBar = QtWidgets.QFrame(self.centralwidget)
self.TopBar.setMaximumSize(QtCore.QSize(16777215, 45))
self.TopBar.setStyleSheet("background-color: qlineargradient(spread:pad, x1:0.542, y1:0.557, x2:1, y2:0, stop:0 rgba(180, 35, 22, 255), stop:0.631841 rgba(211, 75, 21, 254));")
self.TopBar.setFrameShape(QtWidgets.QFrame.NoFrame)
self.TopBar.setFrameShadow(QtWidgets.QFrame.Raised)
self.TopBar.setObjectName("TopBar")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.TopBar)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setSpacing(0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.Menu = QtWidgets.QFrame(self.TopBar)
self.Menu.setMaximumSize(QtCore.QSize(80, 45))
self.Menu.setStyleSheet("background-color: qlineargradient(spread:pad, x1:0.447473, y1:0.836, x2:0.537313, y2:0, stop:0 rgba(180, 35, 22, 255), stop:0.631841 rgba(211, 75, 21, 254));\n"
"selection-background-color: rgb(159, 8, 23);")
self.Menu.setFrameShape(QtWidgets.QFrame.NoFrame)
self.Menu.setFrameShadow(QtWidgets.QFrame.Raised)
self.Menu.setLineWidth(0)
self.Menu.setObjectName("Menu")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.Menu)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.ToggleMenu = QtWidgets.QPushButton(self.Menu)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.ToggleMenu.sizePolicy().hasHeightForWidth())
self.ToggleMenu.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Pirata One")
font.setPointSize(18)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
font.setStyleStrategy(QtGui.QFont.PreferAntialias)
self.ToggleMenu.setFont(font)
self.ToggleMenu.setStyleSheet("QPushButton {\n"
" color: qlineargradient(spread:pad, x1:0.498, y1:0, x2:0.492, y2:0.903, stop:0 rgba(159, 8, 23, 255), stop:0.393035 rgba(123, 37, 61, 255));\n"
"border: 0px solid;\n"
"}\n"
"\n"
"QPushButton::hover {\n"
" color: rgba(255, 255, 255, 180);\n"
" border: 0px solid;\n"
" background-color: qlineargradient(spread:pad, x1:0.184, y1:0.705, x2:0.923851, y2:0.182, stop:0.0298507 rgba(183, 39, 22, 255), stop:0.701493 rgba(45, 45, 45, 246));\n"
"}")
self.ToggleMenu.setObjectName("ToggleMenu")
self.verticalLayout_2.addWidget(self.ToggleMenu)
self.horizontalLayout.addWidget(self.Menu)
self.Spacer = QtWidgets.QFrame(self.TopBar)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Spacer.sizePolicy().hasHeightForWidth())
self.Spacer.setSizePolicy(sizePolicy)
self.Spacer.setMaximumSize(QtCore.QSize(16777215, 45))
self.Spacer.setStyleSheet("background-color: qlineargradient(spread:pad, x1:0.507955, y1:1, x2:0.497299, y2:0, stop:0.0199005 rgba(45, 45, 45, 255), stop:0.880597 rgba(0, 0, 0, 255));")
self.Spacer.setFrameShape(QtWidgets.QFrame.NoFrame)
self.Spacer.setFrameShadow(QtWidgets.QFrame.Raised)
self.Spacer.setObjectName("Spacer")
self.AppName = QtWidgets.QLabel(self.Spacer)
self.AppName.setGeometry(QtCore.QRect(0, 0, 1001, 43))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.AppName.sizePolicy().hasHeightForWidth())
self.AppName.setSizePolicy(sizePolicy)
self.AppName.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setFamily("Creepsville")
font.setPointSize(20)
font.setBold(True)
font.setItalic(True)
font.setUnderline(False)
font.setWeight(75)
font.setKerning(True)
font.setStyleStrategy(QtGui.QFont.PreferAntialias)
self.AppName.setFont(font)
self.AppName.setStyleSheet("color: rgb(159, 8, 23);\n"
"background-color: rgba(0,0,0,0);")
self.AppName.setAlignment(QtCore.Qt.AlignCenter)
self.AppName.setObjectName("AppName")
self.CreatedBy = QtWidgets.QLabel(self.Spacer)
self.CreatedBy.setGeometry(QtCore.QRect(14, 20, 971, 20))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.CreatedBy.sizePolicy().hasHeightForWidth())
self.CreatedBy.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Comic Sans MS")
font.setPointSize(8)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
font.setKerning(True)
font.setStyleStrategy(QtGui.QFont.PreferAntialias)
self.CreatedBy.setFont(font)
self.CreatedBy.setStyleSheet("color: rgb(61, 57, 63);\n"
"border-radius: 100px;\n"
"background-color: rgba(0, 0, 0, 0);\n"
"")
self.CreatedBy.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing)
self.CreatedBy.setObjectName("CreatedBy")
self.horizontalLayout.addWidget(self.Spacer)
self.verticalLayout.addWidget(self.TopBar)
self.Content = QtWidgets.QFrame(self.centralwidget)
self.Content.setMinimumSize(QtCore.QSize(0, 0))
self.Content.setFrameShape(QtWidgets.QFrame.NoFrame)
self.Content.setFrameShadow(QtWidgets.QFrame.Raised)
self.Content.setObjectName("Content")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.Content)
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_2.setSpacing(0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.LeftMenu = QtWidgets.QFrame(self.Content)
self.LeftMenu.setMinimumSize(QtCore.QSize(45, 0))
self.LeftMenu.setMaximumSize(QtCore.QSize(80, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.LeftMenu.setFont(font)
self.LeftMenu.setStyleSheet("background-color: qlineargradient(spread:pad, x1:0.472, y1:0, x2:0.4669, y2:0.813, stop:0.0298507 rgba(180, 35, 22, 255), stop:0.706468 rgba(74, 74, 74, 100));\n"
"selection-background-color: qlineargradient(spread:pad, x1:0.507955, y1:1, x2:0.497299, y2:0, stop:0.0199005 rgba(45, 45, 45, 255), stop:0.880597 rgba(0, 0, 0, 255));")
self.LeftMenu.setFrameShape(QtWidgets.QFrame.NoFrame)
self.LeftMenu.setFrameShadow(QtWidgets.QFrame.Raised)
self.LeftMenu.setObjectName("LeftMenu")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.LeftMenu)
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_3.setSpacing(0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.Buttons = QtWidgets.QFrame(self.LeftMenu)
self.Buttons.setMinimumSize(QtCore.QSize(80, 0))
font = QtGui.QFont()
font.setStyleStrategy(QtGui.QFont.PreferAntialias)
self.Buttons.setFont(font)
self.Buttons.setLayoutDirection(QtCore.Qt.LeftToRight)
self.Buttons.setStyleSheet("")
self.Buttons.setFrameShape(QtWidgets.QFrame.NoFrame)
self.Buttons.setFrameShadow(QtWidgets.QFrame.Raised)
self.Buttons.setObjectName("Buttons")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.Buttons)
self.verticalLayout_4.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_4.setSpacing(0)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.Home = QtWidgets.QPushButton(self.Buttons)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Home.sizePolicy().hasHeightForWidth())
self.Home.setSizePolicy(sizePolicy)
self.Home.setMinimumSize(QtCore.QSize(45, 0))
self.Home.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setFamily("Martyric Personal Use Only")
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.Home.setFont(font)
self.Home.setStyleSheet("QPushButton {\n"
" color: rgba(255, 255, 255, 150);\n"
" background-color: rgba(0, 0, 0, 0);\n"
" border: 0px solid;\n"
"}\n"
"\n"
"QPushButton::hover {\n"
" \n"
" color: rgba(255, 255, 255, 195);\n"
" border: 0px solid;\n"
" background-color: qlineargradient(spread:pad, x1:0.472, y1:0, x2:0.4669, y2:0.813, stop:0.0298507 rgba(180, 35, 22, 255), stop:0.706468 rgba(74, 74, 74, 255));\n"
"}")
self.Home.setObjectName("Home")
self.verticalLayout_4.addWidget(self.Home)
self.Crypto = QtWidgets.QPushButton(self.Buttons)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Crypto.sizePolicy().hasHeightForWidth())
self.Crypto.setSizePolicy(sizePolicy)
self.Crypto.setMinimumSize(QtCore.QSize(45, 0))
self.Crypto.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setFamily("Martyric Personal Use Only")
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.Crypto.setFont(font)
self.Crypto.setStyleSheet("QPushButton {\n"
" color: rgba(255, 255, 255, 150);\n"
" background-color: rgba(0, 0, 0, 0);\n"
" border: 0px solid;\n"
"}\n"
"\n"
"QPushButton::hover {\n"
" \n"
" color: rgba(255, 255, 255, 195);\n"
" border: 0px solid;\n"
" background-color: qlineargradient(spread:pad, x1:0.472, y1:0, x2:0.4669, y2:0.813, stop:0.0298507 rgba(180, 35, 22, 255), stop:0.706468 rgba(74, 74, 74, 255));\n"
"}")
self.Crypto.setObjectName("Crypto")
self.verticalLayout_4.addWidget(self.Crypto)
self.FileShare = QtWidgets.QPushButton(self.Buttons)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.FileShare.sizePolicy().hasHeightForWidth())
self.FileShare.setSizePolicy(sizePolicy)
self.FileShare.setMinimumSize(QtCore.QSize(45, 0))
self.FileShare.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setFamily("Martyric Personal Use Only")
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.FileShare.setFont(font)
self.FileShare.setStyleSheet("QPushButton {\n"
" color: rgba(255, 255, 255, 150);\n"
" background-color: rgba(0, 0, 0, 0);\n"
" border: 0px solid;\n"
"}\n"
"\n"
"QPushButton::hover {\n"
" \n"
" color: rgba(255, 255, 255, 195);\n"
" border: 0px solid;\n"
" background-color: qlineargradient(spread:pad, x1:0.472, y1:0, x2:0.4669, y2:0.813, stop:0.0298507 rgba(180, 35, 22, 255), stop:0.706468 rgba(74, 74, 74, 255));\n"
"}")
self.FileShare.setObjectName("FileShare")
self.verticalLayout_4.addWidget(self.FileShare)
self.Stego = QtWidgets.QPushButton(self.Buttons)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Stego.sizePolicy().hasHeightForWidth())
self.Stego.setSizePolicy(sizePolicy)
self.Stego.setMinimumSize(QtCore.QSize(45, 0))
self.Stego.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setFamily("Martyric Personal Use Only")
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.Stego.setFont(font)
self.Stego.setStyleSheet("QPushButton {\n"
" color: rgba(255, 255, 255, 150);\n"
" background-color: rgba(0, 0, 0, 0);\n"
" border: 0px solid;\n"
"}\n"
"\n"
"QPushButton::hover {\n"
" \n"
" color: rgba(255, 255, 255, 195);\n"
" border: 0px solid;\n"
" background-color: qlineargradient(spread:pad, x1:0.526726, y1:0.966, x2:0.487, y2:0, stop:0.0845771 rgba(78, 59, 58, 255), stop:0.567164 rgba(74, 74, 74, 255));\n"
"}")
self.Stego.setObjectName("Stego")
self.verticalLayout_4.addWidget(self.Stego)
self.MarosMenuButton = QtWidgets.QPushButton(self.Buttons)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.MarosMenuButton.sizePolicy().hasHeightForWidth())
self.MarosMenuButton.setSizePolicy(sizePolicy)
self.MarosMenuButton.setMinimumSize(QtCore.QSize(45, 0))
self.MarosMenuButton.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setFamily("Martyric Personal Use Only")
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.MarosMenuButton.setFont(font)
self.MarosMenuButton.setStyleSheet("QPushButton {\n"
" color: rgba(255, 255, 255, 150);\n"
" background-color: rgba(0, 0, 0, 0);\n"
" border: 0px solid;\n"
"}\n"
"\n"
"QPushButton::hover {\n"
" color: rgba(255, 255, 255, 195);\n"
" border: 0px solid;\n"
" background-color: qlineargradient(spread:pad, x1:0.526726, y1:0.966, x2:0.487, y2:0, stop:0.0845771 rgba(78, 59, 58, 255), stop:0.567164 rgba(74, 74, 74, 255));\n"
"}")
self.MarosMenuButton.setObjectName("MarosMenuButton")
self.verticalLayout_4.addWidget(self.MarosMenuButton)
self.verticalLayout_3.addWidget(self.Buttons)
self.horizontalLayout_2.addWidget(self.LeftMenu)
self.Pages = QtWidgets.QFrame(self.Content)
self.Pages.setFrameShape(QtWidgets.QFrame.NoFrame)
self.Pages.setFrameShadow(QtWidgets.QFrame.Raised)
self.Pages.setObjectName("Pages")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.Pages)
self.verticalLayout_5.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_5.setSpacing(0)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.Stack = QtWidgets.QStackedWidget(self.Pages)
self.Stack.setObjectName("Stack")
self.HomePage = QtWidgets.QWidget()
self.HomePage.setObjectName("HomePage")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.HomePage)
self.verticalLayout_6.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_6.setSpacing(10)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.HomePageDecorations = QtWidgets.QFrame(self.HomePage)
self.HomePageDecorations.setMinimumSize(QtCore.QSize(0, 175))
self.HomePageDecorations.setMaximumSize(QtCore.QSize(16777215, 175))
self.HomePageDecorations.setStyleSheet("background-color: rgba(0,0,0,0);")
self.HomePageDecorations.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.HomePageDecorations.setFrameShadow(QtWidgets.QFrame.Raised)
self.HomePageDecorations.setObjectName("HomePageDecorations")
self.AppNameBig = QtWidgets.QLabel(self.HomePageDecorations)
self.AppNameBig.setGeometry(QtCore.QRect(0, 0, 981, 111))
font = QtGui.QFont()
font.setFamily("Creepsville")
font.setPointSize(45)
font.setBold(True)
font.setItalic(True)
font.setUnderline(False)
font.setWeight(75)
font.setKerning(True)
font.setStyleStrategy(QtGui.QFont.PreferAntialias)
self.AppNameBig.setFont(font)
self.AppNameBig.setStyleSheet("color: rgb(159, 8, 23);\n"
"background-color: rgba(0, 0, 0, 0)")
self.AppNameBig.setAlignment(QtCore.Qt.AlignCenter)
self.AppNameBig.setWordWrap(False)
self.AppNameBig.setObjectName("AppNameBig")
self.AppDesc = QtWidgets.QLabel(self.HomePageDecorations)
self.AppDesc.setGeometry(QtCore.QRect(0, 100, 981, 41))
font = QtGui.QFont()
font.setFamily("CF Night of Terror PERSONAL")
font.setPointSize(14)
font.setBold(False)
font.setItalic(True)
font.setWeight(50)
font.setKerning(True)
font.setStyleStrategy(QtGui.QFont.PreferAntialias)
self.AppDesc.setFont(font)
self.AppDesc.setStyleSheet("color: rgba(135, 42, 70, 220);")
self.AppDesc.setAlignment(QtCore.Qt.AlignCenter)
self.AppDesc.setObjectName("AppDesc")
self.verticalLayout_6.addWidget(self.HomePageDecorations)
self.PageLinks = QtWidgets.QFrame(self.HomePage)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.PageLinks.sizePolicy().hasHeightForWidth())
self.PageLinks.setSizePolicy(sizePolicy)
self.PageLinks.setStyleSheet("background-color: rgba(0,0,0,0);")
self.PageLinks.setFrameShape(QtWidgets.QFrame.NoFrame)
self.PageLinks.setFrameShadow(QtWidgets.QFrame.Raised)
self.PageLinks.setObjectName("PageLinks")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.PageLinks)
self.horizontalLayout_3.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_3.setSpacing(0)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.CryptoPageButton = QtWidgets.QPushButton(self.PageLinks)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.CryptoPageButton.sizePolicy().hasHeightForWidth())
self.CryptoPageButton.setSizePolicy(sizePolicy)
self.CryptoPageButton.setMinimumSize(QtCore.QSize(250, 0))
font = QtGui.QFont()
font.setFamily("Feast of Flesh BB")
font.setPointSize(18)
self.CryptoPageButton.setFont(font)
self.CryptoPageButton.setStyleSheet("QPushButton {\n"
" color: rgba(255, 255, 255, 150);\n"
" background-color: rgba(0, 0, 0, 0);\n"
" border: 0px solid;\n"
"}\n"
"\n"
"QPushButton::hover {\n"
" \n"
" color: rgba(255, 255, 255, 195);\n"
" border: 0px solid;\n"
" background-color: qlineargradient(spread:pad, x1:0.462049, y1:0.921, x2:0.452, y2:0, stop:0.174129 rgba(78, 59, 58, 231), stop:0.567164 rgba(74, 74, 74, 234));\n"
"}")
self.CryptoPageButton.setDefault(False)
self.CryptoPageButton.setObjectName("CryptoPageButton")
self.horizontalLayout_3.addWidget(self.CryptoPageButton)
self.TransferPageButton = QtWidgets.QPushButton(self.PageLinks)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.TransferPageButton.sizePolicy().hasHeightForWidth())
self.TransferPageButton.setSizePolicy(sizePolicy)
self.TransferPageButton.setMinimumSize(QtCore.QSize(250, 0))
font = QtGui.QFont()
font.setFamily("Feast of Flesh BB")
font.setPointSize(18)
self.TransferPageButton.setFont(font)
self.TransferPageButton.setStyleSheet("QPushButton {\n"
" color: rgba(255, 255, 255, 150);\n"
" background-color: rgba(0, 0, 0, 0);\n"
" border: 0px solid;\n"
"}\n"
"\n"
"QPushButton::hover {\n"
" \n"
" color: rgba(255, 255, 255, 195);\n"
" border: 0px solid;\n"
" background-color: qlineargradient(spread:pad, x1:0.462049, y1:0.921, x2:0.452, y2:0, stop:0.174129 rgba(78, 59, 58, 231), stop:0.567164 rgba(74, 74, 74, 234));\n"
"}")
self.TransferPageButton.setObjectName("TransferPageButton")
self.horizontalLayout_3.addWidget(self.TransferPageButton)
self.StegoPageButton = QtWidgets.QPushButton(self.PageLinks)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.StegoPageButton.sizePolicy().hasHeightForWidth())
self.StegoPageButton.setSizePolicy(sizePolicy)
self.StegoPageButton.setMinimumSize(QtCore.QSize(250, 0))
font = QtGui.QFont()
font.setFamily("Feast of Flesh BB")
font.setPointSize(18)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.StegoPageButton.setFont(font)
self.StegoPageButton.setStyleSheet("QPushButton {\n"
" color: rgba(255, 255, 255, 150);\n"
" background-color: rgba(0, 0, 0, 0);\n"
" border: 0px solid;\n"
"}\n"
"\n"
"QPushButton::hover {\n"
" \n"
" color: rgba(255, 255, 255, 195);\n"
" border: 0px solid;\n"
" background-color: qlineargradient(spread:pad, x1:0.462049, y1:0.921, x2:0.452, y2:0, stop:0.174129 rgba(78, 59, 58, 231), stop:0.567164 rgba(74, 74, 74, 234));\n"
"}")
self.StegoPageButton.setObjectName("StegoPageButton")
self.horizontalLayout_3.addWidget(self.StegoPageButton)
self.MacroPageButton = QtWidgets.QPushButton(self.PageLinks)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.MacroPageButton.sizePolicy().hasHeightForWidth())
self.MacroPageButton.setSizePolicy(sizePolicy)
self.MacroPageButton.setMinimumSize(QtCore.QSize(250, 0))
font = QtGui.QFont()
font.setFamily("Feast of Flesh BB")
font.setPointSize(18)
self.MacroPageButton.setFont(font)
self.MacroPageButton.setStyleSheet("QPushButton {\n"
" color: rgba(255, 255, 255, 150);\n"
" background-color: rgba(0, 0, 0, 0);\n"
" border: 0px solid;\n"
"}\n"
"\n"
"QPushButton::hover {\n"
" \n"
" color: rgba(255, 255, 255, 195);\n"
" border: 0px solid;\n"
" background-color: qlineargradient(spread:pad, x1:0.462049, y1:0.921, x2:0.452, y2:0, stop:0.174129 rgba(78, 59, 58, 231), stop:0.567164 rgba(74, 74, 74, 234));\n"
"}")
self.MacroPageButton.setObjectName("MacroPageButton")
self.horizontalLayout_3.addWidget(self.MacroPageButton)
self.verticalLayout_6.addWidget(self.PageLinks)
self.Stack.addWidget(self.HomePage)
self.CryptoPage = QtWidgets.QWidget()
self.CryptoPage.setObjectName("CryptoPage")
self.verticalLayout_15 = QtWidgets.QVBoxLayout(self.CryptoPage)
self.verticalLayout_15.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_15.setSpacing(0)
self.verticalLayout_15.setObjectName("verticalLayout_15")
self.CryptoDragDropFrame = QtWidgets.QFrame(self.CryptoPage)
self.CryptoDragDropFrame.setMinimumSize(QtCore.QSize(1000, 125))
self.CryptoDragDropFrame.setMaximumSize(QtCore.QSize(1000, 16777215))
self.CryptoDragDropFrame.setAcceptDrops(True)
self.CryptoDragDropFrame.setStyleSheet("background-color: rgba(0, 0, 0, 0);")
self.CryptoDragDropFrame.setFrameShape(QtWidgets.QFrame.NoFrame)
self.CryptoDragDropFrame.setFrameShadow(QtWidgets.QFrame.Raised)
self.CryptoDragDropFrame.setObjectName("CryptoDragDropFrame")
self.horizontalLayout_8 = QtWidgets.QHBoxLayout(self.CryptoDragDropFrame)
self.horizontalLayout_8.setContentsMargins(2, 2, 2, 0)
self.horizontalLayout_8.setSpacing(0)
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.CryptoDragDrop = DragDropWidget(self.CryptoDragDropFrame)
self.CryptoDragDrop.setAcceptDrops(True)
self.CryptoDragDrop.setStyleSheet("border: 2px dashed #aaa;\n"
"color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0 rgba(158, 7, 23, 180), stop:1 rgba(255, 130, 20, 200));\n"
"background-color: qlineargradient(spread:pad, x1:0.462049, y1:0.921, x2:0.452, y2:0, stop:0.174129 rgba(74, 74, 74, 42), | |
= syn_tyme.hour*10000 + syn_tyme.minute*100 + syn_tyme.second
if filename is None:
#filename = '%s/%s.obs.%d_%02dz.ods'%(dir,expid,nymd,nhms/10000)
filename = '%s/%s.obs.%d.ods'%(dir,expid,nymd)
# Interval for this synoptic time
# -------------------------------
dt = timedelta(seconds=60*60*int(24/nsyn)/2) # usually 3 hours
t1 = syn_tyme - dt
t2 = syn_tyme + dt
I = (self.tyme>=t1)&(self.tyme<t2)
# Create and populated ODS object
# -------------------------------
lon = self.lon[I]
nobs = len(lon)
ods = ODS(nobs=nobs, kx=KX, kt=KT['AOD'])
ods.ks[:] = range(1,1+nobs)
ods.lat[:] = self.lat[I]
ods.lon[:] = self.lon[I]
ods.qch[:] = zeros(nobs).astype('int')
ods.qcx[:] = zeros(nobs).astype('int')
ods.time[:] = zeros(nobs).astype('int') # fix this if needed
if doNNR:
ods.lev[:] = 550. * ones(nobs)
ods.obs[:] = self.tau_550[I]
ods.xvec[:] = self.ref_860[I]
ods.xm[:] = self.ref_630[I]
else:
ods.lev[:] = 630. * ones(nobs)
ods.obs[:] = self.tau_630[I]
ods.xvec[:] = self.tau_860[I]
ods.xm[:] = self.ref_630[I]
if self.verb:
print "[w] Writing file <"+filename+"> with %d observations at %dZ"%\
(ods.nobs,nhms/10000)
ods.write(filename,nymd,nhms,nsyn=nsyn)
#---
def writeGridded(self, syn_tyme,
filename=None,dir='.',expid='avhrr',refine=8,res=None,
nsyn=8,doNNR=False,doFilter=True):
"""
Writes gridded AVHRR AOD to a GFIO file.
refine -- refinement level for a base 4x5 GEOS-5 grid
refine=1 produces a 4 x 5 grid
refine=2 produces a 2 x2.50 grid
refine=4 produces a 1 x1,25 grid
refine=8 produces a 0.50x0.625 grid
refine=16 produces a 0.25x0.3125 grid
Alternatively, one can specify the grid resolution with a
single letter:
res -- single letter denoting GEOS-5 resolution,
res='a' produces a 4 x 5 grid
res='b' produces a 2 x2.50 grid
res='c' produces a 1 x1,25 grid
res='d' produces a 0.50x0.625 grid
res='e' produces a 0.25x0.3125 grid
NOTE: *res*, if specified, supersedes *refine*.
"""
from binObs_ import binobs2d, binobs3d
from gfio import GFIO
# Interval for this synoptic time
# -------------------------------
if doFilter:
dt = timedelta(seconds=60*60*int(24/nsyn)/2) # usually 3/2 hours
t1 = syn_tyme - dt
t2 = syn_tyme + dt
I = (self.tyme>=t1)&(self.tyme<t2)
else:
I = ones(self.lon.shape).astype('bool') # all that comes in
lon = self.lon[I]
nobs = len(lon)
# Stop here is no good obs available
# ----------------------------------
if nobs == 0:
return # no data to work with
# Output grid resolution
# ----------------------
if res is not None:
if res=='a': refine = 1
if res=='b': refine = 2
if res=='c': refine = 4
if res=='d': refine = 8
if res=='e': refine = 16
# Lat lon grid
# ------------
dx = 5. / refine
dy = 4. / refine
im = int(360. / dx)
jm = int(180. / dy + 1)
glon = linspace(-180.,180.,im,endpoint=False)
glat = linspace(-90.,90.,jm)
nymd = syn_tyme.year*10000 + syn_tyme.month*100 + syn_tyme.day
nhms = syn_tyme.hour*10000 + syn_tyme.minute*100 + syn_tyme.second
vtitle = [ 'AVHRR Aerosol Optical Depth at 630nm (NOAA CDR)',]
vname = ['tau_630', ]
vunits = [ '1', ]
kmvar = [ 0 , ]
levs = array([630.,])
if doNNR:
vtitle += [ 'AVHRR Aerosol Optical Depth at 550nm (NASA NNR)',]
vname += ['tau_550', ]
vunits += [ '1', ]
kmvar += [ 0 , ]
levs = array([550.,])
title = 'Gridded AVHRR Aerosol Retrievals'
source = 'NASA/GSFC GEOS-5 Aerosol Group'
contact = '<EMAIL>'
if filename is None:
#filename = '%s/%s.sfc.%d_%02dz.nc4'%(dir,expid,nymd,nhms/10000)
filename = '%s/%s.aod.%d.nc4'%(dir,expid,nymd)
# Create the file
# ---------------
if os.path.exists(filename):
f = GFIO(filename,mode='w')
else:
f = GFIO()
timinc = (24/nsyn) * 10000
f.create(filename, vname, nymd, nhms, timinc=timinc,
lon=glon, lat=glat, levs=levs, levunits='nm',
vtitle=vtitle, vunits=vunits,kmvar=kmvar,amiss=MISSING,
title=title, source=source, contact=contact)
# Grid variable and write to file
# -------------------------------
f.write('tau_630', nymd, nhms,
binobs2d(self.lon[I],self.lat[I],self.tau_630[I],im,jm,MISSING) )
if doNNR:
f.write('tau_550', nymd, nhms,
binobs2d(self.lon[I],self.lat[I],self.tau_550[I],im,jm,MISSING) )
if self.verb:
print "[w] Wrote file "+filename
#---
def reduce(self,I):
"""
Reduce observations according to index I.
"""
Nicknames = ALIAS.values()
for name in self.__dict__:
if name in Nicknames:
continue # alias do not get reduced
q = self.__dict__[name]
if type(q) is type(self.lon):
if len(q) == self.nobs:
# print "{} Reducing "+name
self.__dict__[name] = q[I]
Alias = ALIAS.keys()
for sds in self.Names:
if sds in Alias:
self.__dict__[ALIAS[sds]] = self.__dict__[sds] # redefine aliases
self.nobs = len(self.lon)
#---
def speciate(self,aer_x,FineMode=False):
"""
Use GAAS to derive fractional composition.
"""
from gfio import GFIOHandle
self.sampleFile(aer_x,onlyVars=('TOTEXTTAU',
'DUEXTTAU',
'SSEXTTAU',
'BCEXTTAU',
'OCEXTTAU',
'SUEXTTAU',
))
s = self.sample
I = (s.TOTEXTTAU<=0)
s.TOTEXTTAU[I] = 1.E30
self.fdu = s.DUEXTTAU / s.TOTEXTTAU
self.fss = s.SSEXTTAU / s.TOTEXTTAU
self.fbc = s.BCEXTTAU / s.TOTEXTTAU
self.foc = s.OCEXTTAU / s.TOTEXTTAU
self.fcc = self.fbc + self.foc
self.fsu = s.SUEXTTAU / s.TOTEXTTAU
if FineMode:
TOTEXTTAU = s.TOTEXTTAU[:]
self.sampleFile(aer_x,onlyVars=('DUEXTTFM','SSEXTTFM'))
self.fduf = s.DUEXTTFM / TOTEXTTAU
self.fssf = s.SSEXTTFM / TOTEXTTAU
del self.sample
#---
def sampleG5(self,gas_x=None,avk_x=None,int_x=None,slv_x=None,ext_Nc=None):
"""
Sample key parameters from GAAS files.
"""
from gfio import GFIOHandle
if gas_x is not None:
self.sampleFile(gas_x,onlyVars=('AODANA',))
self.tau_550 = self.sample.AODANA[:]
if avk_x is not None:
tyme = self.tyme[:]
self.tyme = getSyn(tyme)
self.sampleFile(avk_x,onlyVars=('AOD',))
self.avk = self.sample.AOD[:]
self.tyme[:] = tyme[:]
if int_x is not None:
try:
self.sampleFile(int_x,onlyVars=('TQV',)) # As in file spec
self.tpw = self.sample.TQV[:]
except:
self.sampleFile(int_x,onlyVars=('TPW',)) # Larry's name
self.tpw = self.sample.TPW[:]
if slv_x is not None:
self.sampleFile(slv_x,onlyVars=('U10M','V10M'))
self.wind = sqrt(self.sample.U10M[:]**2 + self.sample.V10M[:]**2)
if ext_Nc is not None:
self.sampleFile(ext_Nc,onlyVars=('taod',))
self.tau_660 = self.sample.taod[:,5] # 660
del self.sample
#---
def sampleFile(self, inFile, npzFile=None, onlyVars=None, Verbose=False):
"""
Interpolates all variables of inFile and optionally
save them to file *npzFile*
"""
from gfio import GFIO, GFIOctl, GFIOHandle
# Instantiate grads and open file
# -------------------------------
name, ext = os.path.splitext(inFile)
if ext in ( '.nc4', '.nc', '.hdf'):
fh = GFIO(inFile) # open single file
if fh.lm == 1:
timeInterp = False # no time interpolation in this case
else:
raise ValueError, "cannot handle files with more tha 1 time, use ctl instead"
else:
fh = GFIOctl(inFile) # open timeseries
timeInterp = True # perform time interpolation
self.sample = GFIOHandle(inFile)
if onlyVars is None:
onlyVars = fh.vname
nt = self.lon.shape
tymes = self.tyme
lons = self.lon
lats = self.lat
# Loop over variables on file
# ---------------------------
for v in onlyVars:
if Verbose:
print "<> Sampling ", v
if timeInterp:
var = fh.sample(v,lons,lats,tymes,Verbose=Verbose)
else:
var = fh.interp(v,lons,lats)
if len(var.shape) == 1:
self.sample.__dict__[v] = var
elif len(var.shape) == 2:
var = var.T # shape should be (nobs,nz)
self.sample.__dict__[v] = var
else:
raise IndexError, 'variable <%s> has rank = %d'%(v,len(var.shape))
if npzFile is not None:
savez(npzFile,**self.sample.__dict__)
def sampleLoadz(self,npzFile):
"""
Loads sample from npz file.
"""
from grads.gahandle import GaHandle
self.sample = GaHandle(npzFile)
npz = load(npzFile)
for v in npz.keys():
self.sample.__dict__[v] = npz[v]
#.......................................................................................
def granules(sat, orb, tyme,
bracket=True,
RootDir='/Users/adasilva/data/AVHRR/Level2B',
template='$year/$doy/SFinal002_patmosx_$sat_$orb_$year_$doy_v05r02.hdf'):
"""
Given a date in *tyme* get files corresponding to bracketing days.
"""
oneday = timedelta(seconds=24*60*60)
t2 = datetime(tyme.year,tyme.month,tyme.day)
t1 = t2 - oneday
t3 = t2 + oneday
if bracket:
Times = (t1,t2,t3)
else:
Times = (t2,)
Files = []
for t in Times:
dt = (t - datetime(t.year,1,1))
doy = '%03d'%int(1 + dt.total_seconds() / oneday.total_seconds())
year = str(t.year)
pat = RootDir+'/'+template.replace('$year',year)\
.replace('$doy',doy)\
.replace('$sat',sat)\
.replace('$orb',orb)
Files += sorted(glob(pat))
return Files
#----------
def _count_des():
RootDir = '/nobackup/AVHRR/Level2/PATMOSX'
Files = sorted(glob(RootDir+'/????/???/*_des_*.hdf'))
f = open('des_inventory.txt','w')
for fname in Files:
a = AVHRR_L2B(fname,doMeta=False,Verb=False)
if a.nobs>0:
tokens = os.path.basename(fname).split("_")
sat, orb, year, doy = tokens[2:6]
line = "%s %s %s %s ... %5d AOT retrievals"%(orb, sat, year, doy,a.nobs)
print line
f.write(line+'\n')
f.close()
def getSyn(tyme, nsyn=8):
"""
Return synoptic time. Can be optimized.
"""
dtsyn = 24/nsyn
dt_secs = dtsyn * 60. * 60.
oneday = timedelta(seconds=24*60*60)
syn_time = []
for t in tyme:
sod = t.hour*3600 + t.minute * 60 + t.second
isyn = int(0.5+sod/dt_secs)
if isyn<nsyn:
t_ = datetime(t.year,t.month,t.day,dtsyn*isyn)
elif isyn==nsyn:
t_ = datetime(t.year,t.month,t.day) + oneday
else:
raise ValueError, 'invalid isyn'
syn_time += [t_,]
return array(syn_time)
def _unscale(x,rmin,rmax,smin,smax):
"""
Undo linear scaling.
"""
r = (smax-smin)/(rmax-rmin)
x_ = smin + r * (x-rmin)
return x_
#---
if __name__ == "__main__":
a = AVHRR_L2B('/nobackup/AVHRR/Level2/NPZ/2008/*.npz',Verb=True)
def xxx():
# _count_des()
RootDir = '/nobackup/AVHRR/Level2/PATMOSX'
MAerDir = '/nobackup/MERRAero'
MDir = '/nobackup/MERRA'
gas_x = MAerDir + '/inst2d_gaas_x.ddf'
aer_x = MAerDir + '/tavg2d_aer_x.ddf'
avk_x = MAerDir + '/inst2d_avk_x.ddf'
ext_Nc = MAerDir + '/ext_Nc.ddf'
int_x = MDir + '/int_Nx'
slv_x = MDir + '/slv_Nx'
#tyme = datetime(2008,6,9)
tyme = datetime(1981,11,9)
Files = granules('n??','asc',tyme,RootDir=RootDir,bracket=False)
print "Files: ", Files
#tyme = datetime(2008,6,9)
#Files += granules('n??','asc',tyme,RootDir=RootDir,bracket=True)
a = AVHRR_L2B(Files,Verb=True)
#a.speciate(aer_x)
#a.sampleG5(gas_x,avk_x,int_x)
#a.sampleG5(slv_x=slv_x)
def later():
for syn_hour in range(0,24,3):
syn_tyme = | |
<filename>tests/test_openpgp.py
# gemato: OpenPGP signature support tests
# vim:fileencoding=utf-8
# (c) 2017-2020 <NAME>
# Licensed under the terms of 2-clause BSD license
import datetime
import io
import logging
import os
import shlex
import signal
import tempfile
import pytest
import gemato.cli
from gemato.compression import open_potentially_compressed_path
from gemato.exceptions import (
ManifestUnsignedData,
ManifestSyntaxError,
OpenPGPNoImplementation,
OpenPGPVerificationFailure,
OpenPGPExpiredKeyFailure,
OpenPGPRevokedKeyFailure,
OpenPGPKeyImportError,
OpenPGPKeyRefreshError,
OpenPGPRuntimeError,
OpenPGPUntrustedSigFailure,
)
from gemato.manifest import ManifestFile
from gemato.openpgp import (
SystemGPGEnvironment,
IsolatedGPGEnvironment,
PGPyEnvironment,
get_wkd_url,
)
from gemato.recursiveloader import ManifestRecursiveLoader
from tests.keydata import (
PUBLIC_KEY, SECRET_KEY, PUBLIC_SUBKEY, UID,
UID_NOEMAIL, PUBLIC_KEY_NOEMAIL_SIG,
UID_NONUTF, PUBLIC_KEY_NONUTF_SIG,
PUBLIC_KEY_SIG, PUBLIC_SUBKEY_SIG, EXPIRED_KEY_SIG, REVOCATION_SIG,
OTHER_PUBLIC_KEY, OTHER_PUBLIC_KEY_UID, OTHER_PUBLIC_KEY_SIG,
UNEXPIRE_SIG,
)
from tests.testutil import HKPServer
VALID_PUBLIC_KEY = PUBLIC_KEY + UID + PUBLIC_KEY_SIG
EXPIRED_PUBLIC_KEY = PUBLIC_KEY + UID + EXPIRED_KEY_SIG
REVOKED_PUBLIC_KEY = PUBLIC_KEY + REVOCATION_SIG + UID + PUBLIC_KEY_SIG
OLD_UNEXPIRE_PUBLIC_KEY = PUBLIC_KEY + UID + PUBLIC_KEY_SIG
UNEXPIRE_PUBLIC_KEY = PUBLIC_KEY + UID + UNEXPIRE_SIG
PRIVATE_KEY = SECRET_KEY + UID + PUBLIC_KEY_SIG
PRIVATE_KEY_ID = b'0x136880E72A7B1384'
MALFORMED_PUBLIC_KEY = b'''
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQENBFnwXJMBCACgaTVz+d10TGL9zR920sb0GBFsitAJ5ZFzO4E0cg3SHhwI+reM
JQ6LLKmHowY/E1dl5FBbnJoRMxXP7/eScQ7HlhYj1gMPN5XiS2pkPwVkmJKBDV42
DLwoytC+ot0frRTJvSdEPCX81BNMgFiBSpkeZfXqb9XmU03bh6mFnrdd4CsHpTQG
csVXHK8QKhaxuqmHTALdpSzKCb/r0N/Z3sQExZhfLcBf/9UUVXj44Nwc6ooqZLRi
zHydxwQdxNu0aOFGEBn9WTi8Slf7MfR/pF0dI8rs9w6zMzVEq0lhDPpKFGDveoGf
g/+TpvBNXZ7DWH23GM4kID3pk4LLMc24U1PhABEBAAG0D2dlbWF0byB0ZXN0IGtl
eYkBRgQTAQoAMBYhBIHhLBa9jc1gvhgIRRNogOcqexOEBQJZ8FyTAhsDBQsJCg0E
AxUKCAIeAQIXgAAKCRATaIDnKnsThCnkB/0fhTH230idhlfZhFbVgTLxrj4rpsGg
20K8HkMaWzshsONdKkqYaYuRcm2UQZ0Kg5rm9jQsGYuAnzH/7XwmOleY95ycVfBk
je9aXF6BEoGick6C/AK5w77vd1kcBtJDrT4I7vwD4wRkyUdCkpVMVT4z4aZ7lHJ4
ECrrrI/mg0b+sGRyHfXPvIPp7F2959L/dpbhBZDfMOFC0A9LBQBJldKFbQLg3xzX
4tniz/BBrp7KjTOMKU0sufsedI50xc6cvCYCwJElqo86vv69klZHahE/k9nJaUAM
jCvJNJ7pU8YnJSRTQDH<KEY>/AhGSrBz5+Jr7N0pQIxq4duE/Q
=r7JK
-----END PGP PUBLIC KEY BLOCK-----
'''
SIGNED_MANIFEST = u'''
-----BEGIN PGP SIGNED MESSAGE-----
Hash: SHA256
TIMESTAMP 2017-10-22T18:06:41Z
MANIFEST eclass/Manifest 0 MD5 d41d8cd98f00b204e9800998ecf8427e\
SHA1 da39a3ee5e6b4b0d3255bfef95601890afd80709
IGNORE local
DATA myebuild-0.ebuild 0 MD5 d41d8cd98f00b204e9800998ecf8427e\
SHA1 da39a3ee5e6b4b0d3255bfef95601890afd80709
MISC metadata.xml 0 MD5 d41d8cd98f00b204e9800998ecf8427e\
SHA1 da39a3ee5e6b4b0d3255bfef95601890afd80709
DIST mydistfile.tar.gz 0 MD5 d41d8cd98f00b204e9800998ecf8427e\
SHA1 da39a3ee5e6b4b0d3255bfef95601890afd80709
-----BEGIN PGP SIGNATURE-----
iQEzBAEBCAAdFiEEgeEsFr2NzWC+GAhFE2iA5yp7E4QFAloCx+YACgkQE2iA5yp7
E4TYrwf+JxjkVDNtvSN3HjQmdtcayLsaliw/2kqjoaQKs0lZD8+NRe7xPmwSm4bP
XKfoouJ0+/s87vuYJpBBCjtUDA9C9yZIeRTo8+eW6XsZbRRUmUD5ylTS+FpSsUrS
bEyYk4yZQMYrat+GQ1QBv+625nqnSDv5LZHBBZ/rG36GGlwHPbIKIishnDfdG2QQ
zuxkqepNq4Inzp//ES7Bv4qbTzyBI//HzfY31vOgdhhs5N5Ytez3Xxv/KNOTYdi1
ZIfqeaQ4NoefmxQunyEjT+8X2DMaEeHQni7dwjQc+FiN4ReV9aWbLo2O2cArqEHR
mkkhTd2Auao4D2K74BePBuiZ9+eDQA==
=khff
-----END PGP SIGNATURE-----
'''
DASH_ESCAPED_SIGNED_MANIFEST = u'''
-----BEGIN PGP SIGNED MESSAGE-----
Hash: SHA256
- TIMESTAMP 2017-10-22T18:06:41Z
- MANIFEST eclass/Manifest 0 MD5 d41d8cd98f00b204e9800998ecf8427e\
SHA1 da39a3ee5e6b4b0d3255bfef95601890afd80709
IGNORE local
- DATA myebuild-0.ebuild 0 MD5 d41d8cd98f00b204e9800998ecf8427e\
SHA1 da39a3ee5e6b4b0d3255bfef95601890afd80709
MISC metadata.xml 0 MD5 d41d8cd98f00b204e9800998ecf8427e\
SHA1 da39a3ee5e6b4b0d3255bfef95601890afd80709
- DIST mydistfile.tar.gz 0 MD5 d41d8cd98f00b204e9800998ecf8427e\
SHA1 da39a3ee5e6b4b0d3255bfef95601890afd80709
-----BEGIN PGP SIGNATURE-----
iQEzBAEBCAAdFiEEgeEsFr2NzWC+GAhFE2iA5yp7E4QFAloCx+YACgkQE2iA5yp7
E4TYrwf+JxjkVDNtvSN3HjQmdtcayLsaliw/2kqjoaQKs0lZD8+NRe7xPmwSm4bP
XKfoouJ0+/s87vuYJpBBCjtUDA9C9yZIeRTo8+eW6XsZbRRUmUD5ylTS+FpSsUrS
bEyYk4yZQMYrat+GQ1QBv+625nqnSDv5LZHBBZ/rG36GGlwHPbIKIishnDfdG2QQ
zuxkqepNq4Inzp//ES7Bv4qbTzyBI//HzfY31vOgdhhs5N5Ytez3Xxv/KNOTYdi1
ZIfqeaQ4NoefmxQunyEjT+8X2DMaEeHQni7dwjQc+FiN4ReV9aWbLo2O2cArqEHR
mkkhTd2Auao4D2K74BePBuiZ9+eDQA==
=khff
-----END PGP SIGNATURE-----
'''
MODIFIED_SIGNED_MANIFEST = u'''
-----BEGIN PGP SIGNED MESSAGE-----
Hash: SHA256
TIMESTAMP 2017-10-22T18:06:41Z
MANIFEST eclass/Manifest 0 MD5 d41d8cd98f00b204e9800998ecf8427e\
SHA1 da39a3ee5e6b4b0d3255bfef95601890afd80709
IGNORE local
DATA myebuild-0.ebuild 32
MISC metadata.xml 0 MD5 d41d8cd98f00b204e9800998ecf8427e\
SHA1 da39a3ee5e6b4b0d3255bfef95601890afd80709
DIST mydistfile.tar.gz 0 MD5 d41d8cd98f00b204e9800998ecf8427e\
SHA1 da39a3ee5e6b4b0d3255bfef95601890afd80709
-----BEGIN PGP SIGNATURE-----
<KEY>
XK<KEY>vOgdhhs5N5Ytez3Xxv/KNOTYdi1
ZIfqeaQ4NoefmxQunyEjT+8X2DMaEeHQni7dwjQc+FiN4ReV9aWbLo2O2cArqEHR
mkkhTd2Auao4D2K74BePBuiZ9+eDQA==
=khff
-----END PGP SIGNATURE-----
'''
EXPIRED_SIGNED_MANIFEST = u'''
-----BEGIN PGP SIGNED MESSAGE-----
Hash: SHA256
TIMESTAMP 2017-10-22T18:06:41Z
MANIFEST eclass/Manifest 0 MD5 d41d8cd98f00b204e9800998ecf8427e\
SHA1 da39a3ee5e6b4b0d3255bfef95601890afd80709
IGNORE local
DATA myebuild-0.ebuild 0 MD5 d41d8cd98f00b204e9800998ecf8427e\
SHA1 da39a3ee5e6b4b0d3255bfef95601890afd80709
MISC metadata.xml 0 MD5 d41d8cd98f00b204e9800998ecf8427e\
SHA1 da39a3ee5e6b4b0d3255bfef95601890afd80709
DIST mydistfile.tar.gz 0 MD5 d41d8cd98f00b204e9800998ecf8427e\
SHA1 da39a3ee5e6b4b0d3255bfef95601890afd80709
-----BEGIN PGP SIGNATURE-----
iQE5BAEBCAAjFiEEgeEsFr2NzWC+GAhFE2iA5yp7E4QFAlnxCXcFgwABUYAACgkQ
E2iA5yp7E4SDpQgAizTfQ6HJ1mawgElYV1LsOKGT8ivC6CAeU<KEY>
=VGMV
-----END PGP SIGNATURE-----
'''
KEY_FINGERPRINT = '<KEY>'
SIG_TIMESTAMP = datetime.datetime(2017, 11, 8, 9, 1, 26)
OTHER_VALID_PUBLIC_KEY = (OTHER_PUBLIC_KEY + OTHER_PUBLIC_KEY_UID +
OTHER_PUBLIC_KEY_SIG)
OTHER_KEY_FINGERPRINT = '4B8349B90C56EE7F054D52871822F5424EB6DA81'
VALID_KEY_NOEMAIL = PUBLIC_KEY + UID_NOEMAIL + PUBLIC_KEY_NOEMAIL_SIG
VALID_KEY_NONUTF = PUBLIC_KEY + UID_NONUTF + PUBLIC_KEY_NONUTF_SIG
VALID_KEY_SUBKEY = (PUBLIC_KEY + UID + PUBLIC_KEY_SIG + PUBLIC_SUBKEY +
PUBLIC_SUBKEY_SIG)
SUBKEY_FINGERPRINT = '7E9DDE3CBE47E437418DF74038B9D2F76CC833CC'
SUBKEY_SIG_TIMESTAMP = datetime.datetime(2020, 8, 25, 12, 40, 12)
SUBKEY_SIGNED_MANIFEST = u'''
-----BEGIN PGP SIGNED MESSAGE-----
Hash: SHA256
TIMESTAMP 2017-10-22T18:06:41Z
MANIFEST eclass/Manifest 0 MD5 d41d8cd98f00b204e9800998ecf8427e\
SHA1 da39a3ee5e6b4b0d3255bfef95601890afd80709
IGNORE local
DATA myebuild-0.ebuild 0 MD5 d41d8cd98f00b204e9800998ecf8427e\
SHA1 da39a3ee5e6b4b0d3255bfef95601890afd80709
MISC metadata.xml 0 MD5 d41d8cd98f00b204e9800998ecf8427e\
SHA1 da39a3ee5e6b4b0d3255bfef95601890afd80709
DIST mydistfile.tar.gz 0 MD5 d41d8cd98f00b204e9800998ecf8427e\
SHA1 da39a3ee5e6b4b0d3255bfef95601890afd80709
-----BEGIN PGP SIGNATURE-----
iLMEAQEIAB0WIQR+nd48vkfkN0GN90A4udL3bMgzzAUCX0UGrAAKCRA4udL3bMgz
zH8MA/93/oNkXaA8+ZX7s8umhNMHiovdLJMna7Bl2C/tEdLfOoyp9o3lChhnB49v
g7VRUc//lz5sDUShdUUlTYjCPGLaYf2rBZHqd5POGJOsbzu1Tmtd8uhWFWnl8Kip
n4XmpdPvu+UdAHpQIGzKoNOEDJpZ5CzPLhYa5KgZiJhpYsDXgg==
=lpJi
-----END PGP SIGNATURE-----
'''
def break_sig(sig):
"""Return signature packet mangled to mismatch the signed key"""
return sig[:-1] + b'\x55'
FORGED_PUBLIC_KEY = PUBLIC_KEY + UID + break_sig(PUBLIC_KEY_SIG)
FORGED_SUBKEY = (PUBLIC_KEY + UID + PUBLIC_KEY_SIG + PUBLIC_SUBKEY +
break_sig(PUBLIC_SUBKEY_SIG))
FORGED_UNEXPIRE_KEY = (PUBLIC_KEY + UID + EXPIRED_KEY_SIG +
break_sig(UNEXPIRE_SIG))
UNSIGNED_PUBLIC_KEY = PUBLIC_KEY + UID
UNSIGNED_SUBKEY = PUBLIC_KEY + UID + PUBLIC_KEY_SIG + PUBLIC_SUBKEY
COMBINED_PUBLIC_KEYS = OTHER_VALID_PUBLIC_KEY + VALID_PUBLIC_KEY
def strip_openpgp(text):
lines = text.lstrip().splitlines()
start = lines.index('')
stop = lines.index('-----BEGIN PGP SIGNATURE-----')
return '\n'.join(lines[start+1:stop-start+2]) + '\n'
MANIFESTS_GOOD_SIG = [
'SIGNED_MANIFEST',
'DASH_ESCAPED_SIGNED_MANIFEST',
'SUBKEY_SIGNED_MANIFEST',
]
MANIFESTS_BAD_SIG = [
'MODIFIED_SIGNED_MANIFEST',
'EXPIRED_SIGNED_MANIFEST'
]
@pytest.mark.parametrize('manifest_var',
MANIFESTS_GOOD_SIG + MANIFESTS_BAD_SIG)
def test_noverify_goodish_manifest_load(manifest_var):
"""Test Manifest files that should succeed (OpenPGP disabled)"""
m = ManifestFile()
with io.StringIO(globals()[manifest_var]) as f:
m.load(f, verify_openpgp=False)
assert m.find_timestamp() is not None
assert m.find_path_entry('myebuild-0.ebuild') is not None
assert not m.openpgp_signed
assert m.openpgp_signature is None
SIGNED_MANIFEST_JUNK_BEFORE = 'IGNORE test\n' + SIGNED_MANIFEST
SIGNED_MANIFEST_JUNK_AFTER = SIGNED_MANIFEST + 'IGNORE test\n'
SIGNED_MANIFEST_CUT_BEFORE_DATA = '\n'.join(
SIGNED_MANIFEST.splitlines()[:3])
SIGNED_MANIFEST_CUT_BEFORE_SIGNATURE = '\n'.join(
SIGNED_MANIFEST.splitlines()[:7])
SIGNED_MANIFEST_CUT_BEFORE_END = '\n'.join(
SIGNED_MANIFEST.splitlines()[:15])
@pytest.mark.parametrize('manifest_var,expected',
[('SIGNED_MANIFEST_JUNK_BEFORE',
ManifestUnsignedData),
('SIGNED_MANIFEST_JUNK_AFTER',
ManifestUnsignedData),
('SIGNED_MANIFEST_CUT_BEFORE_DATA',
ManifestSyntaxError),
('SIGNED_MANIFEST_CUT_BEFORE_SIGNATURE',
ManifestSyntaxError),
('SIGNED_MANIFEST_CUT_BEFORE_END',
ManifestSyntaxError),
])
def test_noverify_bad_manifest_load(manifest_var, expected):
"""Test Manifest files that should fail"""
m = ManifestFile()
with io.StringIO(globals()[manifest_var]) as f:
with pytest.raises(expected):
m.load(f, verify_openpgp=False)
@pytest.mark.parametrize('write_back', [False, True])
def test_noverify_recursive_manifest_loader(tmp_path, write_back):
"""Test reading signed Manifest"""
with open(tmp_path / 'Manifest', 'w') as f:
f.write(MODIFIED_SIGNED_MANIFEST)
m = ManifestRecursiveLoader(tmp_path / 'Manifest',
verify_openpgp=False)
assert not m.openpgp_signed
assert m.openpgp_signature is None
if write_back:
m.save_manifest('Manifest')
with open(tmp_path / 'Manifest', 'r') as f:
assert f.read() == strip_openpgp(MODIFIED_SIGNED_MANIFEST)
def test_noverify_load_cli(tmp_path):
"""Test reading signed Manifest via CLI"""
with open(tmp_path / 'Manifest', 'w') as f:
f.write(MODIFIED_SIGNED_MANIFEST)
os.mkdir(tmp_path / 'eclass')
with open(tmp_path / 'eclass' / 'Manifest', 'w'):
pass
with open(tmp_path / 'myebuild-0.ebuild', 'wb') as f:
f.write(b'12345678901234567890123456789012')
with open(tmp_path / 'metadata.xml', 'wb'):
pass
assert 0 == gemato.cli.main(['gemato', 'verify',
'--no-openpgp-verify', str(tmp_path)])
class MockedSystemGPGEnvironment(SystemGPGEnvironment):
"""System environment variant mocked to use isolated GNUPGHOME"""
def __init__(self, *args, **kwargs):
self._tmpdir = tempfile.TemporaryDirectory()
self._home = self._tmpdir.name
os.environ['GNUPGHOME'] = self._tmpdir.name
super().__init__(*args, **kwargs)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_cb):
self.close()
def close(self):
if self._tmpdir is not None:
IsolatedGPGEnvironment.close(self)
# we need to recreate it to make cleanup() happy
os.mkdir(self._tmpdir.name)
self._tmpdir.cleanup()
self._tmpdir = None
os.environ.pop('GNUPGHOME', None)
def import_key(self, keyfile, trust=True):
IsolatedGPGEnvironment.import_key(self, keyfile, trust=trust)
@pytest.fixture(params=[IsolatedGPGEnvironment,
MockedSystemGPGEnvironment,
PGPyEnvironment,
])
def openpgp_env(request):
"""OpenPGP environment fixture"""
try:
env = request.param()
except OpenPGPNoImplementation as e:
pytest.skip(str(e))
yield env
env.close()
@pytest.fixture(params=[IsolatedGPGEnvironment,
])
def openpgp_env_with_refresh(request):
"""OpenPGP environments that support refreshing keys"""
env = request.param()
yield env
env.close()
MANIFEST_VARIANTS = [
# manifest, key, expected fpr/exception
# == good manifests ==
('SIGNED_MANIFEST', 'VALID_PUBLIC_KEY', None),
('SIGNED_MANIFEST', 'VALID_KEY_NOEMAIL', None),
('SIGNED_MANIFEST', 'VALID_KEY_NONUTF', None),
('SIGNED_MANIFEST', 'COMBINED_PUBLIC_KEYS', None),
('DASH_ESCAPED_SIGNED_MANIFEST', 'VALID_PUBLIC_KEY', None),
('SUBKEY_SIGNED_MANIFEST', 'VALID_KEY_SUBKEY', None),
# == using private key ==
('SIGNED_MANIFEST', 'PRIVATE_KEY', None),
# == bad manifests ==
('MODIFIED_SIGNED_MANIFEST', 'VALID_PUBLIC_KEY',
OpenPGPVerificationFailure),
('EXPIRED_SIGNED_MANIFEST', 'VALID_PUBLIC_KEY',
OpenPGPVerificationFailure),
# == bad keys ==
('SIGNED_MANIFEST', None,
OpenPGPVerificationFailure),
('SIGNED_MANIFEST', 'EXPIRED_PUBLIC_KEY',
OpenPGPExpiredKeyFailure),
('SIGNED_MANIFEST', 'REVOKED_PUBLIC_KEY',
OpenPGPRevokedKeyFailure),
('SIGNED_MANIFEST', 'OTHER_VALID_PUBLIC_KEY',
OpenPGPVerificationFailure),
('SIGNED_MANIFEST', 'UNSIGNED_PUBLIC_KEY',
OpenPGPKeyImportError),
('SIGNED_MANIFEST', 'FORGED_PUBLIC_KEY',
OpenPGPKeyImportError),
('SUBKEY_SIGNED_MANIFEST', 'UNSIGNED_SUBKEY',
OpenPGPVerificationFailure),
('SUBKEY_SIGNED_MANIFEST', 'FORGED_SUBKEY',
OpenPGPVerificationFailure),
]
def assert_signature(sig, manifest_var):
"""Make assertions about the signature"""
if manifest_var == 'SUBKEY_SIGNED_MANIFEST':
assert sig.fingerprint == SUBKEY_FINGERPRINT
assert sig.timestamp == SUBKEY_SIG_TIMESTAMP
assert sig.expire_timestamp is None
assert sig.primary_key_fingerprint == KEY_FINGERPRINT
else:
assert sig.fingerprint == KEY_FINGERPRINT
assert sig.timestamp == SIG_TIMESTAMP
assert sig.expire_timestamp is None
assert sig.primary_key_fingerprint == KEY_FINGERPRINT
@pytest.mark.parametrize('manifest_var,key_var,expected',
MANIFEST_VARIANTS)
def test_verify_manifest(openpgp_env, manifest_var, key_var, expected):
"""Test direct Manifest data verification"""
if (isinstance(openpgp_env, PGPyEnvironment) and
manifest_var == 'DASH_ESCAPED_SIGNED_MANIFEST'):
pytest.xfail('dash escaping is known-broken in pgpy')
try:
with io.StringIO(globals()[manifest_var]) as f:
if expected is None:
if key_var is not None:
with io.BytesIO(globals()[key_var]) as kf:
openpgp_env.import_key(kf)
sig = openpgp_env.verify_file(f)
assert_signature(sig, manifest_var)
else:
with pytest.raises(expected):
if key_var is not None:
with io.BytesIO(globals()[key_var]) as kf:
openpgp_env.import_key(kf)
openpgp_env.verify_file(f)
except OpenPGPNoImplementation as e:
pytest.skip(str(e))
def test_verify_untrusted_key():
try:
with MockedSystemGPGEnvironment() as openpgp_env:
with io.BytesIO(VALID_PUBLIC_KEY) as f:
openpgp_env.import_key(f, trust=False)
with io.StringIO(SIGNED_MANIFEST) as f:
with pytest.raises(OpenPGPUntrustedSigFailure):
openpgp_env.verify_file(f)
except OpenPGPNoImplementation as e:
pytest.skip(str(e))
@pytest.mark.parametrize('manifest_var,key_var,expected',
MANIFEST_VARIANTS)
def test_manifest_load(openpgp_env, manifest_var, key_var, expected):
"""Test Manifest verification via ManifestFile.load()"""
if (isinstance(openpgp_env, PGPyEnvironment) and
manifest_var == 'DASH_ESCAPED_SIGNED_MANIFEST'):
pytest.xfail('dash escaping is known-broken in pgpy')
try:
key_loaded = False
m = ManifestFile()
with io.StringIO(globals()[manifest_var]) as f:
if expected is None:
if key_var is not None:
with io.BytesIO(globals()[key_var]) as kf:
openpgp_env.import_key(kf)
key_loaded = True
m.load(f, openpgp_env=openpgp_env)
assert m.openpgp_signed
assert_signature(m.openpgp_signature, manifest_var)
else:
with pytest.raises(expected):
if key_var is not None:
with io.BytesIO(globals()[key_var]) as kf:
openpgp_env.import_key(kf)
key_loaded = True
m.load(f, openpgp_env=openpgp_env)
assert not m.openpgp_signed
assert m.openpgp_signature is None
if key_loaded:
# Manifest entries should be loaded even if verification failed
assert m.find_timestamp() is not None
assert m.find_path_entry('myebuild-0.ebuild') is not None
except OpenPGPNoImplementation as e:
pytest.skip(str(e))
@pytest.mark.parametrize('filename', ['Manifest', 'Manifest.gz'])
@pytest.mark.parametrize('manifest_var,key_var,expected',
MANIFEST_VARIANTS)
def test_recursive_manifest_loader(tmp_path, openpgp_env, filename,
manifest_var, key_var, expected):
"""Test Manifest verification via ManifestRecursiveLoader"""
if (isinstance(openpgp_env, PGPyEnvironment) and
manifest_var == 'DASH_ESCAPED_SIGNED_MANIFEST'):
pytest.xfail('dash escaping is known-broken in pgpy')
try:
with open_potentially_compressed_path(tmp_path / filename, 'w') as cf:
cf.write(globals()[manifest_var])
if expected is None:
if key_var is not None:
with io.BytesIO(globals()[key_var]) as f:
openpgp_env.import_key(f)
m = ManifestRecursiveLoader(tmp_path / filename,
verify_openpgp=True,
openpgp_env=openpgp_env)
assert m.openpgp_signed
assert_signature(m.openpgp_signature, manifest_var)
else:
with pytest.raises(expected):
if key_var is not None:
with io.BytesIO(globals()[key_var]) as f:
openpgp_env.import_key(f)
ManifestRecursiveLoader(tmp_path / filename,
verify_openpgp=True,
openpgp_env=openpgp_env)
except OpenPGPNoImplementation as e:
pytest.skip(str(e))
@pytest.mark.parametrize('manifest_var,key_var,expected',
[(m, k, e) for m, k, e in MANIFEST_VARIANTS
if k is not None])
def test_cli(tmp_path, caplog, manifest_var, key_var, expected):
"""Test Manifest verification via CLI"""
with open(tmp_path / '.key.bin', 'wb') as f:
f.write(globals()[key_var])
with open(tmp_path / 'Manifest', 'w') as f:
f.write(globals()[manifest_var])
os.mkdir(tmp_path / 'eclass')
with open(tmp_path / 'eclass' / 'Manifest', 'w'):
pass
with open(tmp_path / 'myebuild-0.ebuild', 'wb') as f:
if manifest_var == 'MODIFIED_SIGNED_MANIFEST':
f.write(b'12345678901234567890123456789012')
with open(tmp_path / 'metadata.xml', 'wb'):
pass
retval = gemato.cli.main(['gemato', 'verify',
'--openpgp-key',
str(tmp_path / '.key.bin'),
'--no-refresh-keys',
'--require-signed-manifest',
str(tmp_path)])
if str(OpenPGPNoImplementation('install gpg')) in caplog.text:
pytest.skip('OpenPGP implementation missing')
eexit = 0 if expected is None else 1
assert retval == eexit
if expected is not None:
assert str(expected('')) in caplog.text
EMPTY_DATA = b''
@pytest.mark.parametrize(
'key_var,success',
[('VALID_PUBLIC_KEY', True),
('VALID_KEY_NOEMAIL', True),
('VALID_KEY_NONUTF', True),
('MALFORMED_PUBLIC_KEY', False),
('EMPTY_DATA', False),
('FORGED_PUBLIC_KEY', False),
('UNSIGNED_PUBLIC_KEY', False),
])
def test_env_import_key(openpgp_env, key_var, success):
"""Test importing valid and invalid keys"""
try:
if success:
openpgp_env.import_key(io.BytesIO(globals()[key_var]))
else:
with pytest.raises(OpenPGPKeyImportError):
openpgp_env.import_key(io.BytesIO(globals()[key_var]))
except OpenPGPNoImplementation as e:
pytest.skip(str(e))
def test_env_double_close():
"""Test that env can be closed multiple times"""
with | |
self.offset * lims[3]
hvals *= 1-self.offset
hvals += offset
lims = lims[0:3] + (lims[3] + offset,)
return edges, hvals, widths, lims
def _update_artists(self, n, element, edges, hvals, widths, lims, ranges):
super(SideHistogramPlot, self)._update_artists(n, element, edges, hvals, widths, lims, ranges)
self._update_plot(n, element, self.handles['artist'], lims, ranges)
def _update_plot(self, key, element, bars, lims, ranges):
"""
Process the bars and draw the offset line as necessary. If a
color map is set in the style of the 'main' ViewableElement object, color
the bars appropriately, respecting the required normalization
settings.
"""
main = self.adjoined.main
_, y1 = element.range(1)
offset = self.offset * y1
range_item, main_range, dim = get_sideplot_ranges(self, element, main, ranges)
# Check if plot is colormapped
plot_type = Store.registry['matplotlib'].get(type(range_item))
opts = self.lookup_options(range_item, 'plot')
if plot_type and issubclass(plot_type, ColorbarPlot):
cidx = opts.options.get('color_index', None)
cdim = None if cidx is None else range_item.get_dimension(cidx)
else:
cdim = None
# Get colormapping options
if isinstance(range_item, Raster) or cdim:
style = self.lookup_options(range_item, 'style')[self.cyclic_index]
cmap = cm.get_cmap(style.get('cmap'))
main_range = style.get('clims', main_range)
else:
cmap = None
if offset and ('offset_line' not in self.handles):
self.handles['offset_line'] = self.offset_linefn(offset,
linewidth=1.0,
color='k')
elif offset:
self._update_separator(offset)
if cmap is not None:
self._colorize_bars(cmap, bars, element, main_range, dim)
return bars
def _colorize_bars(self, cmap, bars, element, main_range, dim):
"""
Use the given cmap to color the bars, applying the correct
color ranges as necessary.
"""
cmap_range = main_range[1] - main_range[0]
lower_bound = main_range[0]
colors = np.array(element.dimension_values(dim))
colors = (colors - lower_bound) / (cmap_range)
for c, bar in zip(colors, bars):
bar.set_facecolor(cmap(c))
bar.set_clip_on(False)
def _update_separator(self, offset):
"""
Compute colorbar offset and update separator line
if map is non-zero.
"""
offset_line = self.handles['offset_line']
if offset == 0:
offset_line.set_visible(False)
else:
offset_line.set_visible(True)
if self.invert_axes:
offset_line.set_xdata(offset)
else:
offset_line.set_ydata(offset)
class PointPlot(ChartPlot, ColorbarPlot):
"""
Note that the 'cmap', 'vmin' and 'vmax' style arguments control
how point magnitudes are rendered to different colors.
"""
color_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension from which the color will the drawn""")
size_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension from which the sizes will the drawn.""")
scaling_method = param.ObjectSelector(default="area",
objects=["width", "area"],
doc="""
Determines whether the `scaling_factor` should be applied to
the width or area of each point (default: "area").""")
scaling_factor = param.Number(default=1, bounds=(0, None), doc="""
Scaling factor which is applied to either the width or area
of each point, depending on the value of `scaling_method`.""")
show_grid = param.Boolean(default=False, doc="""
Whether to draw grid lines at the tick positions.""")
size_fn = param.Callable(default=np.abs, doc="""
Function applied to size values before applying scaling,
to remove values lower than zero.""")
style_opts = ['alpha', 'color', 'edgecolors', 'facecolors',
'linewidth', 'marker', 'size', 'visible',
'cmap', 'vmin', 'vmax', 'norm']
_disabled_opts = ['size']
_plot_methods = dict(single='scatter')
def get_data(self, element, ranges, style):
xs, ys = (element.dimension_values(i) for i in range(2))
self._compute_styles(element, ranges, style)
return (ys, xs) if self.invert_axes else (xs, ys), style, {}
def _compute_styles(self, element, ranges, style):
cdim = element.get_dimension(self.color_index)
color = style.pop('color', None)
cmap = style.get('cmap', None)
if cdim and cmap:
cs = element.dimension_values(self.color_index)
# Check if numeric otherwise treat as categorical
if cs.dtype.kind in 'if':
style['c'] = cs
else:
categories = np.unique(cs)
xsorted = np.argsort(categories)
ypos = np.searchsorted(categories[xsorted], cs)
style['c'] = xsorted[ypos]
self._norm_kwargs(element, ranges, style, cdim)
elif color:
style['c'] = color
style['edgecolors'] = style.pop('edgecolors', style.pop('edgecolor', 'none'))
sdim = element.get_dimension(self.size_index)
if sdim:
sizes = element.dimension_values(self.size_index)
ms = style['s'] if 's' in style else mpl.rcParams['lines.markersize']
sizes = compute_sizes(sizes, self.size_fn, self.scaling_factor,
self.scaling_method, ms)
if sizes is None:
eltype = type(element).__name__
self.warning('%s dimension is not numeric, cannot '
'use to scale %s size.' % (sdim.pprint_label, eltype))
else:
style['s'] = sizes
style['edgecolors'] = style.pop('edgecolors', 'none')
def update_handles(self, key, axis, element, ranges, style):
paths = self.handles['artist']
(xs, ys), style, _ = self.get_data(element, ranges, style)
paths.set_offsets(np.column_stack([xs, ys]))
sdim = element.get_dimension(self.size_index)
if sdim:
paths.set_sizes(style['s'])
cdim = element.get_dimension(self.color_index)
if cdim:
paths.set_clim((style['vmin'], style['vmax']))
paths.set_array(style['c'])
if 'norm' in style:
paths.norm = style['norm']
class VectorFieldPlot(ColorbarPlot):
"""
Renders vector fields in sheet coordinates. The vectors are
expressed in polar coordinates and may be displayed according to
angle alone (with some common, arbitrary arrow length) or may be
true polar vectors.
The color or magnitude can be mapped onto any dimension using the
color_index and size_index.
The length of the arrows is controlled by the 'scale' style
option. The scaling of the arrows may also be controlled via the
normalize_lengths and rescale_lengths plot option, which will
normalize the lengths to a maximum of 1 and scale them according
to the minimum distance respectively.
"""
color_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension from which the color will the drawn""")
size_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension from which the sizes will the drawn.""")
arrow_heads = param.Boolean(default=True, doc="""
Whether or not to draw arrow heads. If arrowheads are enabled,
they may be customized with the 'headlength' and
'headaxislength' style options.""")
normalize_lengths = param.Boolean(default=True, doc="""
Whether to normalize vector magnitudes automatically. If False,
it will be assumed that the lengths have already been correctly
normalized.""")
rescale_lengths = param.Boolean(default=True, doc="""
Whether the lengths will be rescaled to take into account the
smallest non-zero distance between two vectors.""")
style_opts = ['alpha', 'color', 'edgecolors', 'facecolors',
'linewidth', 'marker', 'visible', 'cmap',
'scale', 'headlength', 'headaxislength', 'pivot',
'width','headwidth', 'norm']
_plot_methods = dict(single='quiver')
def __init__(self, *args, **params):
super(VectorFieldPlot, self).__init__(*args, **params)
self._min_dist = self._get_map_info(self.hmap)
def _get_map_info(self, vmap):
"""
Get the minimum sample distance and maximum magnitude
"""
return np.min([get_min_distance(vfield) for vfield in vmap])
def get_data(self, element, ranges, style):
input_scale = style.pop('scale', 1.0)
xidx, yidx = (1, 0) if self.invert_axes else (0, 1)
xs = element.dimension_values(xidx) if len(element.data) else []
ys = element.dimension_values(yidx) if len(element.data) else []
radians = element.dimension_values(2) if len(element.data) else []
if self.invert_axes: radians = radians+1.5*np.pi
angles = list(np.rad2deg(radians))
if self.rescale_lengths:
input_scale = input_scale / self._min_dist
mag_dim = element.get_dimension(self.size_index)
if mag_dim:
magnitudes = element.dimension_values(mag_dim)
_, max_magnitude = ranges[mag_dim.name]
if self.normalize_lengths and max_magnitude != 0:
magnitudes = magnitudes / max_magnitude
else:
magnitudes = np.ones(len(xs))
args = (xs, ys, magnitudes, [0.0] * len(element))
if self.color_index:
colors = element.dimension_values(self.color_index)
args += (colors,)
cdim = element.get_dimension(self.color_index)
self._norm_kwargs(element, ranges, style, cdim)
style['clim'] = (style.pop('vmin'), style.pop('vmax'))
style.pop('color', None)
if 'pivot' not in style: style['pivot'] = 'mid'
if not self.arrow_heads:
style['headaxislength'] = 0
style.update(dict(scale=input_scale, angles=angles,
units='x', scale_units='x'))
return args, style, {}
def update_handles(self, key, axis, element, ranges, style):
args, style, axis_kwargs = self.get_data(element, ranges, style)
# Set magnitudes, angles and colors if supplied.
quiver = self.handles['artist']
quiver.set_offsets(np.column_stack(args[:2]))
quiver.U = args[2]
quiver.angles = style['angles']
if self.color_index:
quiver.set_array(args[-1])
quiver.set_clim(style['clim'])
return axis_kwargs
class BarPlot(LegendPlot):
group_index = param.Integer(default=0, doc="""
Index of the dimension in the supplied Bars
Element, which will be laid out into groups.""")
category_index = param.Integer(default=1, doc="""
Index of the dimension in the supplied Bars
Element, which will be laid out into categories.""")
stack_index = param.Integer(default=2, doc="""
Index of the dimension in the supplied Bars
Element, which will stacked.""")
padding = param.Number(default=0.2, doc="""
Defines the padding between groups.""")
color_by = param.List(default=['category'], doc="""
Defines how the Bar elements colored. Valid options include
any permutation of 'group', 'category' and 'stack'.""")
show_legend = param.Boolean(default=True, doc="""
Whether to show legend for the plot.""")
xticks = param.Integer(0, precedence=-1)
style_opts = ['alpha', 'color', 'align', 'visible', 'edgecolor',
'log', 'facecolor', 'capsize', 'error_kw', 'hatch']
legend_specs = dict(LegendPlot.legend_specs, **{
'top': dict(bbox_to_anchor=(0., 1.02, 1., .102),
ncol=3, loc=3, mode="expand", borderaxespad=0.),
'bottom': dict(ncol=3, mode="expand", loc=2,
bbox_to_anchor=(0., -0.4, 1., .102),
borderaxespad=0.1)})
_dimensions = OrderedDict([('group', 0),
('category',1),
('stack',2)])
def __init__(self, element, **params):
super(BarPlot, self).__init__(element, **params)
self.values, self.bar_dimensions = self._get_values()
def _get_values(self):
"""
Get unique index value for each bar
"""
gi, ci, si =self.group_index, self.category_index, self.stack_index
ndims = self.hmap.last.ndims
dims = self.hmap.last.kdims
dimensions = []
values = {}
for vidx, vtype in zip([gi, ci, si], self._dimensions):
if vidx < ndims:
dim = dims[vidx]
dimensions.append(dim)
vals = self.hmap.dimension_values(dim.name)
else:
dimensions.append(None)
vals = [None]
values[vtype] = list(unique_iterator(vals))
return values, dimensions
def _compute_styles(self, element, style_groups):
"""
Computes color and hatch combinations by
any combination of the 'group', 'category'
and 'stack'.
"""
style = self.lookup_options(element, 'style')[0]
sopts = []
| |
float < float
if scalar_ty.is_floating():
return tl.tensor(builder.create_fcmpOLE(input.handle, other.handle), _bool_like(input))
# < int
elif scalar_ty.is_int():
if scalar_ty.is_int_signed():
return tl.tensor(builder.create_icmpSLE(input.handle, other.handle), _bool_like(input))
else:
return tl.tensor(builder.create_icmpULE(input.handle, other.handle), _bool_like(input))
assert False
def equal(input: tl.tensor,
other: tl.tensor,
builder: ir.builder) -> tl.tensor:
input, other = binary_op_type_checking_impl(input, other, builder)
scalar_ty = input.type.scalar
# float == float
if scalar_ty.is_floating():
return tl.tensor(builder.create_fcmpOEQ(input.handle, other.handle), _bool_like(input))
# == int
elif scalar_ty.is_int():
return tl.tensor(builder.create_icmpEQ(input.handle, other.handle), _bool_like(input))
assert False
def not_equal(input: tl.tensor,
other: tl.tensor,
builder: ir.builder) -> tl.tensor:
input, other = binary_op_type_checking_impl(input, other, builder)
scalar_ty = input.type.scalar
# float == float
if scalar_ty.is_floating():
return tl.tensor(builder.create_fcmpUNE(input.handle, other.handle), _bool_like(input))
# == int
elif scalar_ty.is_int():
return tl.tensor(builder.create_icmpNE(input.handle, other.handle), _bool_like(input))
assert False
# ===----------------------------------------------------------------------===//
# Block Creation
# ===----------------------------------------------------------------------===//
def arange(start: int, end: int, builder: ir.builder) -> tl.tensor:
shape = [end - start]
ret_ty = tl.block_type(tl.int32, shape)
return tl.tensor(builder.get_range(start, end), ret_ty)
def zeros(shape: List[int], dtype: tl.dtype, builder: ir.builder) -> tl.tensor:
_0 = ir.constant.get_null_value(dtype.to_ir(builder))
ret_ty = tl.block_type(dtype, shape)
return tl.tensor(builder.create_splat(_0, shape), ret_ty)
# ===----------------------------------------------------------------------===//
# Shape Manipulation
# ===----------------------------------------------------------------------===//
def reshape(input: tl.tensor,
dst_shape: List[int],
builder: ir.builder) -> tl.tensor:
numel = 1
for s in dst_shape:
numel *= s
if input.type.numel != numel:
raise ValueError("cannot reshape block of different shape")
ret_ty = tl.block_type(input.type.scalar, dst_shape)
return tl.tensor(builder.create_reshape(input.handle, dst_shape), ret_ty)
def cat(lhs: tl.tensor, rhs: tl.tensor, builder: ir.builder) -> tl.tensor:
assert lhs.type.is_block() and rhs.type.is_block()
assert lhs.type.shape[1:] == rhs.type.shape[1:]
ret_shape = [lhs.type.shape[0] + rhs.type.shape[0]]
ret_ty = tl.block_type(lhs.type.scalar, ret_shape)
return tl.tensor(builder.create_cat(lhs.handle, rhs.handle), ret_ty)
def broadcast_impl_shape(input: tl.tensor,
shape: List[int],
builder: ir.builder) -> tl.tensor:
if not input.type.is_block():
ret_ty = tl.block_type(input.type, shape)
return tl.tensor(builder.create_splat(input.handle, shape), ret_ty)
src_shape = input.type.get_block_shapes()
if len(src_shape) != len(shape):
raise ValueError(f"Cannot broadcast, rank mismatch: {src_shape}, {shape}")
if shape == src_shape:
return input
ret_ty = tl.block_type(input.type.scalar, shape)
return tl.tensor(builder.create_broadcast(input.handle, shape), ret_ty)
def broadcast_impl_value(lhs: tl.tensor,
rhs: tl.tensor,
builder: ir.builder) -> tl.tensor:
lhs_ty = lhs.type
rhs_ty = rhs.type
# make_shape_compatible(block, scalar)
if lhs_ty.is_block() and not rhs_ty.is_block():
rhs_ty = tl.block_type(rhs_ty.scalar, lhs_ty.shape)
rhs = tl.tensor(builder.create_splat(rhs.handle, lhs_ty.get_block_shapes()), rhs_ty)
# make_shape_compatible(scalar, block)
elif not lhs_ty.is_block() and rhs_ty.is_block():
lhs_ty = tl.block_type(lhs_ty.scalar, rhs_ty.shape)
lhs = tl.tensor(builder.create_splat(lhs.handle, rhs_ty.get_block_shapes()), lhs_ty)
# make_shape_compatible(block, block)
elif lhs_ty.is_block() and rhs_ty.is_block():
lhs_shape = lhs_ty.get_block_shapes()
rhs_shape = rhs_ty.get_block_shapes()
if len(lhs_shape) != len(rhs_shape):
raise ValueError("Cannot make_shape_compatible: blocks must have the same rank")
ret_shape = []
for i in range(len(lhs_shape)):
left = lhs_shape[i]
right = rhs_shape[i]
if left == 1:
ret_shape.append(right)
elif right == 1:
ret_shape.append(left)
elif left == right:
ret_shape.append(left)
else:
raise ValueError("Cannot make_shape_compatible: incompatible dimensions "
"at index " + str(i) + ": " + str(left) + " and " + str(right))
if lhs_shape != ret_shape:
ret_ty = tl.block_type(lhs_ty.scalar, ret_shape)
lhs = tl.tensor(builder.create_broadcast(lhs.handle, ret_shape), ret_ty)
if rhs_shape != ret_shape:
ret_ty = tl.block_type(rhs_ty.scalar, ret_shape)
rhs = tl.tensor(builder.create_broadcast(rhs.handle, ret_shape), ret_ty)
# (scalar, scalar) => returns original blocks
return lhs, rhs
#######
# cast
#######
def bitcast(input: tl.tensor,
dst_ty: tl.dtype,
builder: ir.builder) -> tl.tensor:
src_ty = input.type
if src_ty.is_block():
dst_ty = tl.block_type(dst_ty, input.type.get_block_shapes())
if src_ty == dst_ty:
return input
src_sca_ty = src_ty.scalar
dst_sca_ty = dst_ty.scalar
if src_sca_ty.is_ptr() or dst_sca_ty.is_ptr():
return cast(input, dst_ty, builder)
# Bitcast
src_bits = src_sca_ty.primitive_bitwidth
dst_bits = dst_sca_ty.primitive_bitwidth
if src_bits != dst_bits:
raise ValueError("Cannot bitcast data-type of size " + str(src_bits) + "to "
"data-type of size " + str(dst_bits))
return tl.tensor(builder.create_bitcast(input.handle, dst_ty.to_ir(builder)),
dst_ty)
def cast(input: tl.tensor,
dst_ty: tl.dtype,
builder: ir.builder) -> tl.tensor:
src_ty = input.type
if src_ty.is_block() and not dst_ty.is_block():
dst_ty = tl.block_type(dst_ty, input.type.get_block_shapes())
if src_ty == dst_ty:
return input
src_sca_ty = src_ty.scalar
dst_sca_ty = dst_ty.scalar
# bf16 <=> (not fp32)
if (src_sca_ty.is_bf16() and not dst_sca_ty.is_fp32()) or \
(dst_sca_ty.is_bf16() and not src_sca_ty.is_fp32()):
return cast(cast(input, tl.float32, builder), dst_sca_ty, builder)
# FP Truncation
truncate_fp = src_sca_ty.is_floating() and \
dst_sca_ty.is_floating() and \
src_sca_ty.fp_mantissa_width > dst_sca_ty.fp_mantissa_width
if truncate_fp:
return tl.tensor(builder.create_fp_trunc(input.handle,
dst_ty.to_ir(builder)),
dst_ty)
# FP Extension
ext_fp = src_sca_ty.is_floating() and \
dst_sca_ty.is_floating() and \
src_sca_ty.fp_mantissa_width < dst_sca_ty.fp_mantissa_width
if ext_fp:
return tl.tensor(builder.create_fp_ext(input.handle,
dst_ty.to_ir(builder)),
dst_ty)
# Int cast
if src_sca_ty.is_int() and dst_sca_ty.is_int() and \
(src_sca_ty.int_bitwidth != dst_sca_ty.int_bitwidth or src_sca_ty.int_signedness != dst_sca_ty.int_signedness):
sign_extend = src_sca_ty.is_int_signed() and not src_sca_ty.is_bool()
return tl.tensor(builder.create_int_cast(input.handle,
dst_ty.to_ir(builder), sign_extend),
dst_ty)
# Float to Int
if src_sca_ty.is_floating() and dst_sca_ty.is_int():
# TODO: is this correct?
if dst_sca_ty.is_bool():
return tl.tensor(builder.create_fp_to_ui(input.handle,
dst_ty.to_ir(builder)),
dst_ty)
else:
return tl.tensor(builder.create_fp_to_si(input.handle,
dst_ty.to_ir(builder)),
dst_ty)
# int => float
if src_sca_ty.is_int() and dst_sca_ty.is_floating():
if src_sca_ty.is_bool() or not src_sca_ty.is_int_signed():
return tl.tensor(builder.create_ui_to_fp(input.handle,
dst_ty.to_ir(builder)),
dst_ty)
else:
return tl.tensor(builder.create_si_to_fp(input.handle,
dst_ty.to_ir(builder)),
dst_ty)
# ptr => int
if src_sca_ty.is_ptr() and dst_sca_ty.is_int():
bitwidth = dst_sca_ty.int_bitwidth
if bitwidth == 64:
return tl.tensor(builder.create_cast(ir.PtrToInt, input.handle, dst_ty.to_ir(builder)),
dst_ty)
if bitwidth == 1:
return not_equal(cast(input, tl.int64, builder),
tl.tensor(builder.get_int64(0), tl.int64),
builder)
if not src_sca_ty.is_ptr() and dst_sca_ty.is_ptr():
return tl.tensor(builder.create_int_to_ptr(input.handle, dst_ty.to_ir(builder)), dst_ty)
# Ptr . Ptr
if src_sca_ty.is_ptr() and dst_sca_ty.is_ptr():
return tl.tensor(builder.create_bitcast(input.handle, dst_ty.to_ir(builder)), dst_ty)
# * . Bool
if dst_sca_ty.is_bool():
if src_sca_ty.is_ptr():
input = cast(input, tl.int64, builder)
other = builder.get_int64(0)
if src_ty.is_bool():
other = builder.create_splat(other, src_ty.get_block_shapes())
return tl.tensor(builder.create_icmpNE(input.handle, other), dst_ty)
assert False, f'cannot cast {input} to {dst_ty}'
# ===----------------------------------------------------------------------===//
# Memory Operators
# ===----------------------------------------------------------------------===//
def load(ptr: tl.tensor,
mask: Optional[tl.tensor],
other: Optional[tl.tensor],
cache_modifier: str,
eviction_policy: str,
is_volatile: bool,
builder: ir.builder) -> tl.tensor:
if not ptr.type.scalar.is_ptr():
raise ValueError("Pointer argument of load instruction is " + ptr.type.__repr__())
if ptr.type.is_block():
if mask:
mask = broadcast_impl_shape(mask, ptr.type.get_block_shapes(), builder)
if other:
other = broadcast_impl_shape(other, ptr.type.get_block_shapes(), builder)
if other:
other = cast(other, ptr.type.scalar.element_ty, builder)
ptr_ty = ptr.type.scalar
elt_ty = ptr_ty.element_ty
# treat bool* as tl.int8*
if elt_ty == tl.int1:
elt_ty = tl.int8
ptr_ty = tl.pointer_type(elt_ty, ptr_ty.address_space)
ptr = cast(ptr, ptr_ty, builder)
# cache modifier
cache = ir.CACHE_MODIFIER.NONE # default
if cache_modifier:
if cache_modifier == ".ca":
cache = ir.CACHE_MODIFIER.CA
elif cache_modifier == ".cg":
cache = ir.CACHE_MODIFIER.CG
else:
raise ValueError(f"Cache modifier {cache_modifier} not supported")
# eviction policy
eviction = ir.EVICTION_POLICY.NORMAL # default
if eviction_policy:
if eviction_policy == "evict_last":
eviction = ir.EVICTION_POLICY.EVICT_LAST
elif eviction_policy == "evict_first":
eviction = ir.EVICTION_POLICY.EVICT_FIRST
else:
raise ValueError(f"Eviction policy {eviction_policy} not supported")
if ptr.type.is_block():
shape = ptr.type.get_block_shapes()
dst_ty = tl.block_type(elt_ty, shape)
else:
dst_ty = elt_ty
if not mask and not other:
return tl.tensor(builder.create_load(ptr.handle, cache, eviction, is_volatile),
dst_ty)
if not mask:
raise ValueError("`other` cannot be provided without `mask`")
if not other:
other_ir = ir.undef.get(elt_ty.to_ir(builder))
if ptr.type.is_block():
other_ir = builder.create_splat(other_ir, ptr.type.get_block_shapes())
other = tl.tensor(other_ir, dst_ty)
return tl.tensor(builder.create_masked_load(ptr.handle,
mask.handle,
other.handle,
cache, eviction, is_volatile),
dst_ty)
def store(ptr: tl.tensor,
val: tl.tensor,
mask: Optional[tl.tensor],
builder: ir.builder) -> tl.tensor:
if not ptr.type.scalar.is_ptr():
raise ValueError("Pointer argument of store instruction is " + ptr.type.__repr__())
if ptr.type.is_block():
val = broadcast_impl_shape(val, ptr.type.get_block_shapes(), builder)
if mask:
mask = broadcast_impl_shape(mask, ptr.type.get_block_shapes(), builder)
ptr_ty = ptr.type.scalar
elt_ty = ptr_ty.element_ty
# treat bool* as tl.int8*
if elt_ty == tl.int1:
elt_ty = tl.int8
ptr_ty = tl.pointer_type(elt_ty, ptr_ty.address_space)
ptr = cast(ptr, ptr_ty, builder)
# cast to target data-type
val = cast(val, elt_ty, builder)
if not mask:
return tl.tensor(builder.create_store(ptr.handle, val.handle), tl.void)
if not mask.type.scalar.is_bool():
raise ValueError("Mask must have boolean scalar type")
return tl.tensor(builder.create_masked_store(ptr.handle, val.handle, mask.handle), tl.void)
#########
# atomic
#########
def atomic_cas(ptr: tl.tensor,
cmp: tl.tensor,
val: tl.tensor,
builder: ir.builder) -> tl.tensor:
# TODO: type checking
return tl.tensor(builder.create_atomic_cas(ptr.handle, cmp.handle, val.handle), val.type)
def atom_red_typechecking_impl(ptr: tl.tensor,
val: tl.tensor,
mask: tl.tensor,
builder: ir.builder) -> Tuple[tl.tensor, tl.tensor, tl.tensor]:
if not ptr.type.scalar.is_ptr():
raise ValueError("Pointer argument of store instruction is " + ptr.type.__repr__())
if ptr.type.is_block():
if mask:
mask = broadcast_impl_shape(mask, ptr.type.get_block_shapes(), builder)
if val:
val = broadcast_impl_shape(val, ptr.type.get_block_shapes(), builder)
val = cast(val, ptr.type.scalar.element_ty, builder)
if not mask:
mask_ir = builder.get_int1(True)
mask_ty = tl.int1
if ptr.type.is_block():
mask_ir = builder.create_splat(mask_ir, ptr.type.get_block_shapes())
mask_ty = tl.block_type(tl.int1, ptr.type.get_block_shapes())
mask = tl.tensor(mask_ir, mask_ty)
return ptr, val, mask
def atomic_max(ptr: tl.tensor,
val: tl.tensor,
mask: tl.tensor,
builder: ir.builder) -> tl.tensor:
ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, builder)
sca_ty = val.type.scalar
# direct call to atomic_max for integers
if sca_ty.is_int():
if sca_ty.is_int_signed():
return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.MAX,
ptr.handle,
val.handle,
mask.handle),
val.type)
else:
return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.UMAX,
ptr.handle,
val.handle,
mask.handle),
val.type)
# for float
# return atomic_smax(i_ptr, i_val) if val >= 0
# return atomic_umin(i_ptr, i_val) if val < 0
i_val = bitcast(val, tl.int32, builder)
i_ptr = bitcast(ptr, tl.pointer_type(tl.int32, 1), builder)
pos = greater_equal(val, tl.tensor(ir.constant_float.get(sca_ty.to_ir(builder), 0), sca_ty), builder)
neg = less_than(val, tl.tensor(ir.constant_float.get(sca_ty.to_ir(builder), 0), sca_ty), builder)
pos_ret = tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.MAX, i_ptr.handle, i_val.handle, and_(mask, pos, builder).handle), i_val.type)
neg_ret = tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.UMIN, i_ptr.handle, i_val.handle, and_(mask, neg, builder).handle), i_val.type)
return where(pos, pos_ret, neg_ret, builder)
def atomic_min(ptr: tl.tensor,
val: tl.tensor,
mask: tl.tensor,
builder: ir.builder) -> tl.tensor:
ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, builder)
sca_ty = val.type.scalar
# direct call to atomic_min | |
<reponame>BXuan694/SOLO-pytorch<gh_stars>1-10
import os.path as osp
import numpy as np
import pycocotools.mask as maskUtils
from collections.abc import Sequence
import torch
from .data_container import DataContainer as DC
from .compose import Compose
import cv2
from .imgutils import rescale_size, imresize, imrescale, imflip, impad, impad_to_multiple
class LoadImageFromFile(object):
def __init__(self, to_float32=False, color_type='color'):
self.to_float32 = to_float32
self.color_type = color_type
def __call__(self, results):
if results['img_prefix'] is not None:
filename = osp.join(results['img_prefix'],
results['img_info']['filename'])
else:
filename = results['img_info']['filename']
img = cv2.imread(filename, cv2.IMREAD_COLOR)
if self.to_float32:
img = img.astype(np.float32)
results['filename'] = filename
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
return results
def __repr__(self):
return '{} (to_float32={}, color_type={})'.format(
self.__class__.__name__, self.to_float32, self.color_type)
class LoadAnnotations(object):
def __init__(self,
with_bbox=True,
with_label=True,
with_mask=False,
poly2mask=True):
self.with_bbox = with_bbox
self.with_label = with_label
self.with_mask = with_mask
self.poly2mask = poly2mask
def _load_bboxes(self, results):
ann_info = results['ann_info']
results['gt_bboxes'] = ann_info['bboxes']
gt_bboxes_ignore = ann_info.get('bboxes_ignore', None)
if gt_bboxes_ignore is not None:
results['gt_bboxes_ignore'] = gt_bboxes_ignore
results['bbox_fields'].append('gt_bboxes_ignore')
results['bbox_fields'].append('gt_bboxes')
return results
def _load_labels(self, results):
results['gt_labels'] = results['ann_info']['labels']
return results
def _poly2mask(self, mask_ann, img_h, img_w):
if isinstance(mask_ann, list):
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(mask_ann, img_h, img_w)
rle = maskUtils.merge(rles)
elif isinstance(mask_ann['counts'], list):
# uncompressed RLE
rle = maskUtils.frPyObjects(mask_ann, img_h, img_w)
else:
# rle
rle = mask_ann
mask = maskUtils.decode(rle)
return mask
def _load_masks(self, results):
h, w = results['img_info']['height'], results['img_info']['width']
gt_masks = results['ann_info']['masks']
if self.poly2mask:
gt_masks = [self._poly2mask(mask, h, w) for mask in gt_masks]
results['gt_masks'] = gt_masks
results['mask_fields'].append('gt_masks')
return results
def __call__(self, results):
if self.with_bbox:
results = self._load_bboxes(results)
if results is None:
return None
if self.with_label:
results = self._load_labels(results)
if self.with_mask:
results = self._load_masks(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += '(with_bbox={}, with_label={}, with_mask={})'.format(self.with_bbox, self.with_label, self.with_mask)
return repr_str
class Normalize(object):
"""Normalize the image.
Added key is "img_norm_cfg".
Args:
mean (sequence): Mean values of 3 channels.
std (sequence): Std values of 3 channels.
to_rgb (bool): Whether to convert the image from BGR to RGB,
default is true.
"""
def __init__(self, mean, std, to_rgb=True):
self.mean = np.array(mean, dtype=np.float32)
self.std = np.array(std, dtype=np.float32)
self.to_rgb = to_rgb
def __call__(self, results):
"""Call function to normalize images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Normalized results, 'img_norm_cfg' key is added into
result dict.
"""
for key in results.get('img_fields', ['img']):
results[key] = self.imnormalize(results[key], self.mean, self.std,
self.to_rgb)
results['img_norm_cfg'] = dict(
mean=self.mean, std=self.std, to_rgb=self.to_rgb)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(mean={self.mean}, std={self.std}, to_rgb={self.to_rgb})'
return repr_str
def imnormalize(self, img, mean, std, to_rgb=True):
"""Normalize an image with mean and std.
Args:
img (ndarray): Image to be normalized.
mean (ndarray): The mean to be used for normalize.
std (ndarray): The std to be used for normalize.
to_rgb (bool): Whether to convert to rgb.
Returns:
ndarray: The normalized image.
"""
img = img.copy().astype(np.float32)
return self.imnormalize_(img, mean, std, to_rgb)
def imnormalize_(self, img, mean, std, to_rgb=True):
"""Inplace normalize an image with mean and std.
Args:
img (ndarray): Image to be normalized.
mean (ndarray): The mean to be used for normalize.
std (ndarray): The std to be used for normalize.
to_rgb (bool): Whether to convert to rgb.
Returns:
ndarray: The normalized image.
"""
# cv2 inplace normalization does not accept uint8
assert img.dtype != np.uint8
mean = np.float64(mean.reshape(1, -1))
stdinv = 1 / np.float64(std.reshape(1, -1))
if to_rgb:
cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) # inplace
cv2.subtract(img, mean, img) # inplace
cv2.multiply(img, stdinv, img) # inplace
return img
class Resize(object):
"""Resize images & bbox & mask.
This transform resizes the input image to some scale. Bboxes and masks are
then resized with the same scale factor. If the input dict contains the key
"scale", then the scale in the input dict is used, otherwise the specified
scale in the init method is used.
`img_scale` can either be a tuple (single-scale) or a list of tuple
(multi-scale). There are 3 multiscale modes:
- `ratio_range` is not None: randomly sample a ratio from the ratio range
and multiply it with the image scale.
- `ratio_range` is None and `multiscale_mode` == "range": randomly sample a
scale from the a range.
- `ratio_range` is None and `multiscale_mode` == "value": randomly sample a
scale from multiple scales.
Args:
img_scale (tuple or list[tuple]): Images scales for resizing.
multiscale_mode (str): Either "range" or "value".
ratio_range (tuple[float]): (min_ratio, max_ratio)
keep_ratio (bool): Whether to keep the aspect ratio when resizing the
image.
"""
def __init__(self,
img_scale=None,
multiscale_mode='range',
ratio_range=None,
keep_ratio=True):
if img_scale is None:
self.img_scale = None
else:
if isinstance(img_scale, list):
self.img_scale = img_scale
else:
self.img_scale = [img_scale]
assert isinstance(self.img_scale, list)
if ratio_range is not None:
# mode 1: given a scale and a range of image ratio
assert len(self.img_scale) == 1
else:
# mode 2: given multiple scales or a range of scales
assert multiscale_mode in ['value', 'range']
self.multiscale_mode = multiscale_mode
self.ratio_range = ratio_range
self.keep_ratio = keep_ratio
@staticmethod
def random_select(img_scales):
assert isinstance(img_scales, list)
scale_idx = np.random.randint(len(img_scales))
img_scale = img_scales[scale_idx]
return img_scale, scale_idx
@staticmethod
def random_sample(img_scales):
assert isinstance(img_scales, list) and len(img_scales) == 2
img_scale_long = [max(s) for s in img_scales]
img_scale_short = [min(s) for s in img_scales]
long_edge = np.random.randint(
min(img_scale_long),
max(img_scale_long) + 1)
short_edge = np.random.randint(
min(img_scale_short),
max(img_scale_short) + 1)
img_scale = (long_edge, short_edge)
return img_scale, None
@staticmethod
def random_sample_ratio(img_scale, ratio_range):
assert isinstance(img_scale, list) and len(img_scale) == 2
min_ratio, max_ratio = ratio_range
assert min_ratio <= max_ratio
ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio
scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio)
return scale, None
def _random_scale(self, results):
if self.ratio_range is not None:
scale, scale_idx = self.random_sample_ratio(
self.img_scale[0], self.ratio_range)
elif len(self.img_scale) == 1:
scale, scale_idx = self.img_scale[0], 0
elif self.multiscale_mode == 'range':
scale, scale_idx = self.random_sample(self.img_scale)
elif self.multiscale_mode == 'value':
scale, scale_idx = self.random_select(self.img_scale)
else:
raise NotImplementedError
results['scale'] = scale
results['scale_idx'] = scale_idx
def _resize_img(self, results):
if self.keep_ratio:
img, scale_factor = imrescale(
results['img'], results['scale'], return_scale=True)
else:
img, w_scale, h_scale = imresize(
results['img'], results['scale'], return_scale=True)
scale_factor = np.array([w_scale, h_scale, w_scale, h_scale],
dtype=np.float32)
results['img'] = img
results['img_shape'] = img.shape
results['pad_shape'] = img.shape # in case that there is no padding
results['scale_factor'] = scale_factor
results['keep_ratio'] = self.keep_ratio
def _resize_bboxes(self, results):
img_shape = results['img_shape']
for key in results.get('bbox_fields', []):
bboxes = results[key] * results['scale_factor']
bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1] - 1)
bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0] - 1)
results[key] = bboxes
def _resize_masks(self, results):
for key in results.get('mask_fields', []):
if results[key] is None:
continue
if self.keep_ratio:
masks = [
imrescale(
mask, results['scale_factor'], interpolation='nearest')
for mask in results[key]
]
else:
mask_size = (results['img_shape'][1], results['img_shape'][0])
masks = [
imresize(mask, mask_size, interpolation='nearest')
for mask in results[key]
]
results[key] = np.stack(masks)
def __call__(self, results):
if 'scale' not in results:
self._random_scale(results)
self._resize_img(results)
self._resize_bboxes(results)
self._resize_masks(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += ('(img_scale={}, multiscale_mode={}, ratio_range={}, '
'keep_ratio={})').format(self.img_scale,
self.multiscale_mode,
self.ratio_range,
self.keep_ratio)
return repr_str
class RandomFlip(object):
"""Flip the image & bbox & mask.
If the input dict contains the key "flip", then the flag will be used,
otherwise it will be randomly decided by a ratio specified in the init
method.
Args:
flip_ratio (float, optional): The flipping probability.
"""
def __init__(self, flip_ratio=None, direction='horizontal'):
self.flip_ratio = flip_ratio
self.direction = direction
if flip_ratio is not None:
assert flip_ratio >= 0 and flip_ratio <= 1
assert direction in ['horizontal', 'vertical']
def bbox_flip(self, bboxes, img_shape, direction):
"""Flip bboxes horizontally.
Args:
bboxes(ndarray): shape (..., 4*k)
img_shape(tuple): (height, width)
"""
assert bboxes.shape[-1] % 4 == 0
flipped = bboxes.copy()
if direction == 'horizontal':
w = img_shape[1]
flipped[..., 0::4] = w - bboxes[..., 2::4] - 1
flipped[..., 2::4] = w - bboxes[..., 0::4] - 1
elif direction == 'vertical':
h = img_shape[0]
flipped[..., 1::4] = h - bboxes[..., 3::4] - 1
flipped[..., 3::4] = h - bboxes[..., 1::4] - 1
else:
raise ValueError(
'Invalid flipping direction "{}"'.format(direction))
return flipped
def __call__(self, results):
if 'flip' not in results:
flip = True if np.random.rand() < self.flip_ratio else False
results['flip'] = flip
if 'flip_direction' not in results:
results['flip_direction'] = self.direction
if results['flip']:
# flip image
results['img'] = imflip(
results['img'], direction=results['flip_direction'])
# flip bboxes
for key in results.get('bbox_fields', []):
results[key] | |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from .. import models
class BudgetsOperations(object):
"""BudgetsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Version of the API to be used with the client request. The current version is 2018-01-31. Constant value: "2018-01-31".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-01-31"
self.config = config
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Lists all budgets for a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Budget
:rtype:
~azure.mgmt.consumption.models.BudgetPaged[~azure.mgmt.consumption.models.Budget]
:raises:
:class:`ErrorResponseException<azure.mgmt.consumption.models.ErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Consumption/budgets'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.BudgetPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.BudgetPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_by_resource_group_name(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Lists all budgets for a resource group under a subscription.
:param resource_group_name: Azure Resource Group Name.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Budget
:rtype:
~azure.mgmt.consumption.models.BudgetPaged[~azure.mgmt.consumption.models.Budget]
:raises:
:class:`ErrorResponseException<azure.mgmt.consumption.models.ErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Consumption/budgets'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.BudgetPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.BudgetPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def get(
self, budget_name, custom_headers=None, raw=False, **operation_config):
"""Gets the budget for a subscription by budget name.
:param budget_name: Budget Name.
:type budget_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Budget or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.consumption.models.Budget or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.consumption.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Consumption/budgets/{budgetName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'budgetName': self._serialize.url("budget_name", budget_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Budget', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, budget_name, parameters, custom_headers=None, raw=False, **operation_config):
"""The operation to create or update a budget. Update operation requires
latest eTag to be set in the request mandatorily. You may obtain the
latest eTag by performing a get operation. Create operation does not
require eTag.
:param budget_name: Budget Name.
:type budget_name: str
:param parameters: Parameters supplied to the Create Budget operation.
:type parameters: ~azure.mgmt.consumption.models.Budget
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Budget or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.consumption.models.Budget or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.consumption.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Consumption/budgets/{budgetName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'budgetName': self._serialize.url("budget_name", budget_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'Budget')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Budget', response)
if response.status_code == 201:
deserialized = self._deserialize('Budget', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, budget_name, custom_headers=None, raw=False, **operation_config):
"""The operation to delete a budget.
:param budget_name: Budget Name.
:type budget_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.consumption.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Consumption/budgets/{budgetName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'budgetName': self._serialize.url("budget_name", budget_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_by_resource_group_name(
self, resource_group_name, budget_name, custom_headers=None, raw=False, **operation_config):
"""Gets the budget for a resource group under a subscription by budget
name.
:param resource_group_name: Azure Resource Group Name.
:type resource_group_name: str
:param budget_name: Budget Name.
:type budget_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Budget or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.consumption.models.Budget or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.consumption.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Consumption/budgets/{budgetName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'budgetName': self._serialize.url("budget_name", budget_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Budget', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update_by_resource_group_name(
self, resource_group_name, budget_name, parameters, custom_headers=None, raw=False, **operation_config):
"""The operation to create or update a budget. Update operation requires
latest eTag to be set in the request mandatorily. You may obtain the
latest eTag by performing a get operation. Create operation does not
require eTag.
:param resource_group_name: Azure Resource Group Name.
:type resource_group_name: str
:param budget_name: Budget Name.
:type budget_name: str
:param parameters: Parameters supplied | |
{
"cell_type": "code",
"metadata": {
"id": "TE8DC7VJi_zO",
"outputId": "0596c486-9a80-4d93-8238-4c4cf94e0c94",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 34
}
},
"source": [
"df_ratings.shape"
],
"execution_count": 10,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"(1149780, 3)"
]
},
"metadata": {
"tags": []
},
"execution_count": 10
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "2h9vkU0xhIvQ",
"outputId": "92c9bdb3-4beb-4a64-e19e-f986973a544b",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 121
}
},
"source": [
"ratings = df_ratings['user'].value_counts()\n",
"ratings.sort_values(ascending=False).head()"
],
"execution_count": 11,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"11676 13602\n",
"198711 7550\n",
"153662 6109\n",
"98391 5891\n",
"35859 5850\n",
"Name: user, dtype: int64"
]
},
"metadata": {
"tags": []
},
"execution_count": 11
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "07yE5rUyjM_T",
"outputId": "e716315a-375c-4b79-8923-89bee494f680",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 34
}
},
"source": [
"len(ratings[ratings < 200])"
],
"execution_count": 12,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"104378"
]
},
"metadata": {
"tags": []
},
"execution_count": 12
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "m5qCNGD3mx9R",
"outputId": "8f1c5fec-86db-47ef-e30a-01c72d7cf9bb",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 34
}
},
"source": [
"df_ratings['user'].isin(ratings[ratings < 200].index).sum()"
],
"execution_count": 13,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"622224"
]
},
"metadata": {
"tags": []
},
"execution_count": 13
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "cknVbnKri6_c",
"outputId": "f442b6a1-46ce-4675-fd2e-4984b9137b12",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 34
}
},
"source": [
"df_ratings_rm = df_ratings[\n",
" ~df_ratings['user'].isin(ratings[ratings < 200].index)\n",
"]\n",
"df_ratings_rm.shape"
],
"execution_count": 14,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"(527556, 3)"
]
},
"metadata": {
"tags": []
},
"execution_count": 14
}
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "JcAof3kQn2NV"
},
"source": [
"## Remove books with less than 100 ratings"
]
},
{
"cell_type": "code",
"metadata": {
"id": "3ucTVN7CevEN",
"outputId": "e67d1138-1fe0-4cfa-9df1-0be277b25da5",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 121
}
},
"source": [
"ratings = df_ratings['isbn'].value_counts() # we have to use the original df_ratings to pass the challenge\n",
"ratings.sort_values(ascending=False).head()"
],
"execution_count": 15,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"0971880107 2502\n",
"0316666343 1295\n",
"0385504209 883\n",
"0060928336 732\n",
"0312195516 723\n",
"Name: isbn, dtype: int64"
]
},
"metadata": {
"tags": []
},
"execution_count": 15
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "85aIP2kph2q0",
"outputId": "c83195f0-e16e-4291-f1cd-4cf7463182d1",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 34
}
},
"source": [
"len(ratings[ratings < 100])"
],
"execution_count": 16,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"339825"
]
},
"metadata": {
"tags": []
},
"execution_count": 16
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "oW68SnJQqU4E",
"outputId": "f105035e-e6eb-411b-fcac-1004fe4a67fa",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 34
}
},
"source": [
"df_books['isbn'].isin(ratings[ratings < 100].index).sum()"
],
"execution_count": 17,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"269442"
]
},
"metadata": {
"tags": []
},
"execution_count": 17
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "8WMIqkWNfVDd",
"outputId": "e2a67008-2d74-4b41-c62c-68fb15ef89d6",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 34
}
},
"source": [
"df_ratings_rm = df_ratings_rm[\n",
" ~df_ratings_rm['isbn'].isin(ratings[ratings < 100].index)\n",
"]\n",
"df_ratings_rm.shape"
],
"execution_count": 18,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"(49781, 3)"
]
},
"metadata": {
"tags": []
},
"execution_count": 18
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "16CsPkMrpnQ1",
"outputId": "8a311083-ec37-4952-b3bb-12fd54147b5a",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 104
}
},
"source": [
"# These should exist\n",
"books = [\"Where the Heart Is (Oprah's Book Club (Paperback))\",\n",
" \"I'll Be Seeing You\",\n",
" \"The Weight of Water\",\n",
" \"The Surgeon\",\n",
" \"I Know This Much Is True\"]\n",
"\n",
"for book in books:\n",
" print(df_ratings_rm.isbn.isin(df_books[df_books.title == book].isbn).sum())"
],
"execution_count": 19,
"outputs": [
{
"output_type": "stream",
"text": [
"183\n",
"75\n",
"49\n",
"57\n",
"77\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "Bd8nZD08QIvR"
},
"source": [
"## Prepare dataset for KNN"
]
},
{
"cell_type": "code",
"metadata": {
"id": "39VgsA7MNiiA",
"outputId": "bf221282-0017-46de-85bc-c621204f35e7",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 287
}
},
"source": [
"df = df_ratings_rm.pivot_table(index=['user'],columns=['isbn'],values='rating').fillna(0).T\n",
"df.head()"
],
"execution_count": 20,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th>user</th>\n",
" <th>254</th>\n",
" <th>2276</th>\n",
" <th>2766</th>\n",
" <th>2977</th>\n",
" <th>3363</th>\n",
" <th>4017</th>\n",
" <th>4385</th>\n",
" <th>6242</th>\n",
" <th>6251</th>\n",
" <th>6323</th>\n",
" <th>6543</th>\n",
" <th>6563</th>\n",
" <th>6575</th>\n",
" <th>7158</th>\n",
" <th>7286</th>\n",
" <th>7346</th>\n",
" <th>7915</th>\n",
" <th>8067</th>\n",
" <th>8245</th>\n",
" <th>8681</th>\n",
" <th>8936</th>\n",
" <th>9856</th>\n",
" <th>10447</th>\n",
" <th>10819</th>\n",
" <th>11601</th>\n",
" <th>11676</th>\n",
" <th>11993</th>\n",
" <th>12538</th>\n",
" <th>12824</th>\n",
" <th>12982</th>\n",
" <th>13082</th>\n",
" <th>13273</th>\n",
" <th>13552</th>\n",
" <th>13850</th>\n",
" <th>14422</th>\n",
" <th>14521</th>\n",
" <th>15408</th>\n",
" <th>15418</th>\n",
" <th>15957</th>\n",
" <th>16106</th>\n",
" <th>...</th>\n",
" <th>264317</th>\n",
" <th>264321</th>\n",
" <th>264637</th>\n",
" <th>265115</th>\n",
" <th>265313</th>\n",
" <th>265595</th>\n",
" <th>265889</th>\n",
" <th>266056</th>\n",
" <th>266226</th>\n",
" <th>266753</th>\n",
" <th>266865</th>\n",
" <th>266866</th>\n",
" <th>267635</th>\n",
" <th>268030</th>\n",
" <th>268032</th>\n",
" <th>268110</th>\n",
" <th>268330</th>\n",
" <th>268622</th>\n",
" <th>268932</th>\n",
" <th>269566</th>\n",
" <th>269719</th>\n",
" <th>269728</th>\n",
" <th>269890</th>\n",
" <th>270713</th>\n",
" <th>270820</th>\n",
" <th>271195</th>\n",
" <th>271284</th>\n",
" <th>271448</th>\n",
" <th>271705</th>\n",
" <th>273979</th>\n",
" <th>274004</th>\n",
" <th>274061</th>\n",
" <th>274301</th>\n",
" <th>274308</th>\n",
" <th>274808</th>\n",
" <th>275970</th>\n",
" <th>277427</th>\n",
" <th>277478</th>\n",
" <th>277639</th>\n",
" <th>278418</th>\n",
" </tr>\n",
" <tr>\n",
" <th>isbn</th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>002542730X</th>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>6.0</td>\n",
" <td>0.0</td>\n",
" <td>10.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>...</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>7.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>10.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>0060008032</th>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>8.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>...</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>0.0</td>\n",
" <td>6.0</td>\n",
" <td>0.0</td>\n",
| |
-467, "ranXe": -468,
"orveX": 469, "leriX": -470, "maXis": -471,
"Xamag": 472, "aS iX": 473, "a uXa": 474,
"anXin": -475, "Xerel": -476, "rt aX": 477,
"Xalig": -478, "ediXi": -479, "t ilX": 480,
"myonX": -481, "Xetes": 482, "retiX": -483,
"Xanli": -484, " Xesa": -485, "bovaX": -486,
"pis X": -487, "Xecen": 488, "UreXe": -489,
"U miX": -490, "urmaX": -491, "Xceg": 492,
"vereX": -493, "rdUnX": -494, "acatX": 495,
"ndy X": -496, "lerXe": -497, "layaX": -498,
"laXag": -499, "mumXi": 500, " aCaX": -501,
"n Xik": 502, "Xesit": 503, "Xerek": 504,
"a Xik": 505, "SXeg": -506, "tavX": 507,
"amXl": 508, "dstX": -509, "Xolw": -510,
"zkUX": 511, "erIX": -512, "Xrec": 513,
"Xsit": 514, "yodX": 515, "tXeg": -516,
"bseX": -517, "goXn": 518, "Xlah": -519,
"Xolb": -520, "hXur": -521, "edoX": -522,
"umXe": 523, "geXu": 524, "usuX": -525,
"rXee": 526, "etcX": 527, "Xiok": 528,
"Xogh": -529, "iXma": -530, "koXr": 531,
"hefX": -532, "fXad": -533, "moXl": -534,
"gliX": -535, "skUX": 536, "tiyX": 537,
"vyaX": 538, "C X ": 539, "koXc": 540,
"vniX": 541, "Xaeb": 542, "IlXu": 543,
"CelX": -544, "SXig": -545, "piXl": 546,
" Xoi": -547, "uicX": 548, " UXc": 549,
"girX": -550, "Xoct": -551, "keXc": 552,
"ruXi": -553, "pUrX": 554, "pIrX": -555,
"nkXl": -556, "CiaX": 557, "yaIX": -558,
" UlX": 559, "iy X": 560, "eetX": -561,
"blIX": -562, "aveX": -563, "rnuX": -564,
"Xtif": -565, "feXl": 566, "oXev": -567,
"ljuX": 568, "Xorf": -569, "tabX": -570,
"geaX": 571, "kUCX": 572, "kUXc": 573,
"OnXi": 574, "iXet": -575, "tXas": -576,
"mlaX": -577, "wo X": -578, "apiX": 579,
"hiXd": 580, "Xagr": 581, "uy X": -582,
"Xebl": 583, "rilX": -584, "Xelv": -585,
"y Xk": 586, "Xsu ": -587, "peXk": -588,
"ukuX": 589, " uzX": 590, "x Xo": -591,
"yaXm": -592, "IXaa": -593, "OzaX": 594,
"Xech": -595, "OrtX": -596, "oXko": -597,
"kXah": 598, "uliX": -599, "lilX": -600,
"CliX": -601, "Xipt": -602, "Xal1": 603,
"oyXa": 604, "CiXc": 605, "rokX": -606,
"lasX": 607, "iXoc": 608, "morX": -609,
"boXo": -610, "ouaX": -611, "Xamt": 612,
"dIrX": -613, "buCX": 614, "nXov": 615,
"nXs ": 616, "Xirn": -617, "onX ": -618,
"e Xg": -619, "Xatr": -620, "ondX": 621,
"nXez": 622, "odiX": -623, "Xork": -624,
"Xoum": 625, "inkX": -626, "utuX": -627,
"uXcu": 628, "yuhX": -629, "Xnae": 630,
"Xao ": -631, "umcX": 632, "Xve ": 633,
"uXea": -634, "Xayy": 635, "kiXd": 636,
"udoX": -637, " gXu": 638, "gneX": 639,
"Xesz": -640, " Xcs": 641, "ruXr": 642,
"Xozy": -643, "lviX": -644, "Xibr": -645,
"Xlur": -646, "ulXi": 647, " nIX": -648,
"sezX": 649, " isX": -650, "raXd": 651,
"isaX": -652, "srtX": -653, "leXa": 654,
"pelX": -655, "tXem": 656, "I Xg": -657,
"zilX": -658, "vasX": -659, "4 yX": -660,
"uzUX": 661, "gelX": -662, "Xuan": 663,
"nviX": -664, "lguX": -665, "Xcag": 666,
"ureX": -667, "agaX": -668, "imXa": 669,
"muXa": -670, "guXu": -671, "tXce": 672,
"Xucg": 673, "nXvi": 674, "veXk": -675,
"Xavk": -676, "liXl": 677, "Xadu": 678,
"my X": -679, "Xdis": 680, "pmeX": -681,
"lseX": -682, "oduX": -683, "uSuX": -684,
"iXev": 685, "Xulh": 686, "Xiol": -687,
"paSX": 688, "UziX": 689, "Xot ": -690,
"uXit": -691, "ikiX": -692, "kiXa": 693,
" zUX": -694, "omeX": -695, "kkeX": 696,
"aXif": -697, "I dX": 698, "fuXk": -699,
"Xirv": -703, "eXig": -704, "punX": -705,
"msiX": -706, "syaX": -707, "aIrX": -708,
"Xinh": 709, "oXea": -710, "ierX": -711,
"usaX": -712, " gIX": -713, "dluX": -714,
" aXc": -715, "fliX": -716, "suXo": 717,
"ssoX": -718, "puXu": -719, "fXan": -720,
"fInX": -721, "heXe": -722, "nCIX": -723,
"Xin1": 724, "a Xb": -725, "mXak": 726,
"halX": -727, "Xesc": -728, "gIXa": -729,
"umaX": -730, "breX": -731, "Xatf": -732,
"Xevt": -733, "iXig": -734, "Xuls": 735,
"vaXt": 736, "Xlaf": -737, " yeX": 738,
"elaX": -739, "reXo": -740, "rlaX": -741,
"danX": -742, "nXur": -743, "ibiX": -744,
" UXk": 745, "USXe": -746, "uziX": -747,
"nduX": -748, "zXes": -749, "Xaz ": -750,
" aSX": 751, "Xoco": -752, "proX": -753,
"plIX": -754, "Xenn": -755, "lXam": 756,
"betX": 757, "Xagn": -758, "Xigd": 759,
"Xesm": 760, "IkIX": -761, " bIX": 762,
"asaX": -763, "hiiX": 764, "Xabu": 765,
"ekeX": -766, "rliX": -767, "rkIX": -768,
"Xmil": -769, "UnUX": 770, "SUnX": -771,
"ImaX": -772, "Xade": -773, "Xatc": -774,
"Xilm": 775, "marX": -776, "rXus": -777,
"porX": -778, "Xtik": 779, "kaXt": 780,
"enXi": -781, "eteX": -782, "eXeg": -783,
"silX": -784, "yrIX": -785, "Xozu": 786,
" iXi": 787, "UXn": 788, "yXp": 789,
"Xbc": -790, "aXj": 791, "Xgw": -792,
"tXp": 793, "IeX": -794, "Xcg": 795,
"Xzo": 796, "hXp": -797, "Xsc": -798,
"Xo2": -799, "kXp": -800, " X1": -801,
"aXf": -802, "weX": -803, "kcX": 804,
"dXg": 805, "wrX": -806, "znX": -807,
"jnX": -808, "ueX": -809, "Xth": -810,
"iXp": -811, "Xzy": -812, "Xej": -813,
"tXb": -814, "SCX": -815, "SXc": 816,
"Xeu": -817, "uXv": 818, "UbX": -819,
"sOX": 820, "chX": -821, "Xix": -822,
"voX": -823, "lcX": 824, " SX": -825,
" lX": -826, "tCX": -827, "UXv": 828,
" hX": -829, " fX": -830, "euX": -831,
"jeX": -832, "4aX": -833, "eXg": -834,
"llX": -835, "ylX": -836, "rzX": 837,
"GuX": -838, "thX": -839, "yXb": 840,
"rXh": -841, "jlX": 842, "iXm": 843,
"IXt": 844, "Xd ": -845, "iuX": -846,
"Xlo": -847, "Xah": -848, "yIX": -849,
"Xks": -850, "Xok": 851, "qX": -852,
"Xx": -853, "X2": 854, "X5": 855,
"X7": -856, "abilme gUXune": 857,
"imin sonuXud": 858,
"0 stankoviX": -859, "i smeonoviX": -860,
"n mijatoviX": 861, "I silah gUX": 862,
"layan saraX": 863, "k halilagiX": 864,
"zlar polisX": -865, "i halilagiX": 866,
" elindeki X": 867, "nlarInIn Xo": 868,
"n de aXikt": -869, "a ivanoviX": -870,
"Iyla baliX": 871, "ar ve Xam ": 872,
"i raftingX": 873, "k markoviX": -874,
"rIlmayan X": 875, "asInda Xop": -876,
"i bir Xam ": 877, "nin Xanlar": 878,
"i sonuXudu": 879, "r dolara X": 880,
"a aXisini": -881, "I Xanlar ": 882,
"kan a aXi": -883, "u da Xaba": -884,
"a aXiyord": -885, "i de Xaba": -886,
"e vlaoviX": 887, "i adI geX": 888,
"ne ve Xog": -889, "an aXilar": -890,
"na geXeli": 891, "e keynesX": -892,
"ka bir aX": 893, "lara aXiy": -894,
"zeki Xab": -895, "e armutX": -896,
"a aXisiy": -897, "unda Xos": 898,
"r aXisid": -899, " aXisin ": -900,
"men Xop ": -901, "okanoviX": -902,
"n genXel": 903, "Cek kauX": -904,
"son haXi": 905, "bi dostX": -906,
"an mahXu": -907, " bu Xos": 908,
"ulu Xam ": 909, "im aXim ": -910,
"bin Xam ": 911, "tU saraX": 912, "I kuruXa": 913,
"nce Xamd": 914, "vaS aXis": -915, "e pen X": -916,
"n Xiler ": 917, "ek aXili": 918, "emiS Xin": -919,
"isini Xo": 920, "eki Xim ": 921, " de Xam ": 922,
"zasIna X": 923, " Xandik": 924, "joroviX": -925,
"n kukoX": 926, "Xandarl": 927, "nobil X": 928,
"nuna iX": -929, "in Xad ": 930, " barXa": 931,
"a avdiX": -932, "kada X ": 933, "ecep Xi": -934,
"ir haX ": 935, "aXiklis": -936, "panoviX": -937,
" tarXin": 938, "oXanind": 939, "t Xilar": 940,
"er Xayi": 941, "lI saX ": -942, " ta Xam": 943,
"lit Xam": 944, "ri Xaml": 945, "batIXim": 946,
"lsa Xam": 947, "an nahX": -948, "ok aXiy": -949,
" paXin ": 950, " Xinlik": -951, "ri Xat ": 952,
"m saraX": 953, " Xurumd": -954, "a arXa ": 955,
"lan akX": 956, "un akXa": 957, "ol Xin ": -958,
"usam aX": 959, "ma Xaml": 960, "da faXa": 961,
"aS aXi ": -962, "trafI X": 963, "bu Xin ": -964,
"ze aXiy": -965, " un aX ": -966, "t Xarsi": 967,
"i o gUX": 968, "niGde X": 969, "tekli X": 970,
"hce maX": -971, "t urumX": 972, "dada Xa": 973,
"in aXiy": -974, "Um aXis": -975, "lis Xe ": 976,
"ve aXim": -977, "ce Xop ": -978, " Xamla": 979,
"le Xosu": 980, "et genX": 981, "S aXil ": -982,
"ni aXis": 983, " Sen X ": 984, "rlIk Xa": 985,
"Xarcal": -986, "kut Xu": 987, "d maX ": -988,
"urem X": 989, "8 Xarm": -990, "afa aX": -991,
"kentiX": 992, "Xollez": -993, "I Xor ": -994,
"si Xop": -995, | |
<gh_stars>100-1000
import asyncio
import os
import uuid
import json
import sqlite3
import dask
from dask.utils import tmpfile
from dask_cloudprovider.generic.vmcluster import (
VMCluster,
VMInterface,
SchedulerMixin,
)
from dask_cloudprovider.gcp.utils import build_request, is_inside_gce
from distributed.core import Status
try:
import googleapiclient.discovery
from googleapiclient.errors import HttpError
except ImportError as e:
msg = (
"Dask Cloud Provider GCP requirements are not installed.\n\n"
"Please either conda or pip install as follows:\n\n"
" conda install -c conda-forge dask-cloudprovider # either conda install\n"
' pip install "dask-cloudprovider[gcp]" --upgrade # or python -m pip install'
)
raise ImportError(msg) from e
class GCPCredentialsError(Exception):
"""Raised when GCP credentials are missing"""
def __init__(self, message=None):
if message is None:
message = (
"GCP Credentials have not been provided. Either set the following environment variable: "
"export GOOGLE_APPLICATION_CREDENTIALS=<Path-To-GCP-JSON-Credentials> "
"or authenticate with "
"gcloud auth login"
)
super().__init__(message)
class GCPInstance(VMInterface):
def __init__(
self,
cluster,
config=None,
zone=None,
projectid=None,
machine_type=None,
filesystem_size=None,
disk_type=None,
on_host_maintenance=None,
source_image=None,
docker_image=None,
network=None,
env_vars=None,
ngpus=None,
gpu_type=None,
bootstrap=None,
gpu_instance=None,
auto_shutdown=None,
preemptible=False,
**kwargs,
):
super().__init__(**kwargs)
self.cluster = cluster
self.config = config
self.on_host_maintenance = on_host_maintenance or self.config.get(
"on_host_maintenance"
)
self.projectid = projectid or self.config.get("projectid")
self.zone = zone or self.config.get("zone")
self.machine_type = machine_type or self.config.get("machine_type")
self.source_image = self.expand_source_image(
source_image or self.config.get("source_image")
)
self.docker_image = docker_image or self.config.get("docker_image")
self.env_vars = env_vars
self.filesystem_size = filesystem_size or self.config.get("filesystem_size")
self.disk_type = disk_type or self.config.get("disk_type")
self.ngpus = ngpus or self.config.get("ngpus")
self.network = network or self.config.get("network")
self.gpu_type = gpu_type or self.config.get("gpu_type")
self.gpu_instance = gpu_instance
self.bootstrap = bootstrap
self.auto_shutdown = auto_shutdown
self.preemptible = preemptible
self.general_zone = "-".join(self.zone.split("-")[:2]) # us-east1-c -> us-east1
def create_gcp_config(self):
config = {
"name": self.name,
"machineType": f"zones/{self.zone}/machineTypes/{self.machine_type}",
"displayDevice": {"enableDisplay": "false"},
"tags": {"items": ["http-server", "https-server"]},
# Specify the boot disk and the image to use as a source.
"disks": [
{
"kind": "compute#attachedDisk",
"type": "PERSISTENT",
"boot": "true",
"mode": "READ_WRITE",
"autoDelete": "true",
"deviceName": self.name,
"initializeParams": {
"sourceImage": self.source_image,
"diskType": f"projects/{self.projectid}/zones/{self.zone}/diskTypes/{self.disk_type}",
"diskSizeGb": f"{self.filesystem_size}", # nvidia-gpu-cloud cannot be smaller than 32 GB
"labels": {},
# "source": "projects/nv-ai-infra/zones/us-east1-c/disks/ngc-gpu-dask-rapids-docker-experiment",
},
"diskEncryptionKey": {},
}
],
"canIpForward": "false",
"networkInterfaces": [
{
"kind": "compute#networkInterface",
"subnetwork": f"projects/{self.projectid}/regions/{self.general_zone}/subnetworks/{self.network}",
"aliasIpRanges": [],
}
],
# Allow the instance to access cloud storage and logging.
"serviceAccounts": [
{
"email": "default",
"scopes": [
"https://www.googleapis.com/auth/devstorage.read_write",
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring.write",
],
}
],
# Metadata is readable from the instance and allows you to
# pass configuration from deployment scripts to instances.
"metadata": {
"items": [
{
# Startup script is automatically executed by the
# instance upon startup.
"key": "google-logging-enabled",
"value": "true",
},
{"key": "user-data", "value": self.cloud_init},
]
},
"labels": {"container-vm": "dask-cloudprovider"},
"scheduling": {
"preemptible": ("true" if self.preemptible else "false"),
"onHostMaintenance": self.on_host_maintenance.upper(),
"automaticRestart": ("false" if self.preemptible else "true"),
"nodeAffinities": [],
},
"shieldedInstanceConfig": {
"enableSecureBoot": "false",
"enableVtpm": "true",
"enableIntegrityMonitoring": "true",
},
"deletionProtection": "false",
"reservationAffinity": {"consumeReservationType": "ANY_RESERVATION"},
}
if self.config.get("public_ingress", True):
config["networkInterfaces"][0]["accessConfigs"] = [
{
"kind": "compute#accessConfig",
"name": "External NAT",
"type": "ONE_TO_ONE_NAT",
"networkTier": "PREMIUM",
}
]
if self.ngpus:
config["guestAccelerators"] = [
{
"acceleratorCount": self.ngpus,
"acceleratorType": f"projects/{self.projectid}/zones/{self.zone}/acceleratorTypes/{self.gpu_type}",
}
]
return config
async def create_vm(self):
self.cloud_init = self.cluster.render_process_cloud_init(self)
self.gcp_config = self.create_gcp_config()
try:
inst = await self.cluster.call_async(
self.cluster.compute.instances()
.insert(project=self.projectid, zone=self.zone, body=self.gcp_config)
.execute
)
self.gcp_inst = inst
self.id = self.gcp_inst["id"]
except HttpError as e:
# something failed
print(str(e))
raise Exception(str(e))
while await self.update_status() != "RUNNING":
await asyncio.sleep(0.5)
self.internal_ip = await self.get_internal_ip()
if self.config.get("public_ingress", True):
self.external_ip = await self.get_external_ip()
else:
self.external_ip = None
self.cluster._log(
f"{self.name}\n\tInternal IP: {self.internal_ip}\n\tExternal IP: {self.external_ip}"
)
return self.internal_ip, self.external_ip
async def get_internal_ip(self):
return (
await self.cluster.call_async(
self.cluster.compute.instances()
.list(
project=self.projectid, zone=self.zone, filter=f"name={self.name}"
)
.execute
)
)["items"][0]["networkInterfaces"][0]["networkIP"]
async def get_external_ip(self):
return (
await self.cluster.call_async(
self.cluster.compute.instances()
.list(
project=self.projectid, zone=self.zone, filter=f"name={self.name}"
)
.execute
)
)["items"][0]["networkInterfaces"][0]["accessConfigs"][0]["natIP"]
async def update_status(self):
d = await self.cluster.call_async(
self.cluster.compute.instances()
.list(project=self.projectid, zone=self.zone, filter=f"name={self.name}")
.execute
)
self.gcp_inst = d
if not d.get("items", None):
self.cluster._log("Failed to find running VMI...")
self.cluster._log(self.gcp_inst)
raise Exception(f"Missing Instance {self.name}")
return d["items"][0]["status"]
def expand_source_image(self, source_image):
if "/" not in source_image:
return f"projects/{self.projectid}/global/images/{source_image}"
if source_image.startswith("https://www.googleapis.com/compute/v1/"):
return source_image.replace("https://www.googleapis.com/compute/v1/", "")
return source_image
async def close(self):
self.cluster._log(f"Closing Instance: {self.name}")
await self.cluster.call_async(
self.cluster.compute.instances()
.delete(project=self.projectid, zone=self.zone, instance=self.name)
.execute
)
class GCPScheduler(SchedulerMixin, GCPInstance):
"""Scheduler running in a GCP instance."""
def __init__(self, *args, **kwargs):
kwargs.pop("preemptible", None) # scheduler instances are not preemptible
super().__init__(*args, **kwargs)
async def start(self):
await self.start_scheduler()
self.status = Status.running
async def start_scheduler(self):
self.cluster._log(
f"Launching cluster with the following configuration: "
f"\n Source Image: {self.source_image} "
f"\n Docker Image: {self.docker_image} "
f"\n Machine Type: {self.machine_type} "
f"\n Filesytsem Size: {self.filesystem_size} "
f"\n Disk Type: {self.disk_type} "
f"\n N-GPU Type: {self.ngpus} {self.gpu_type}"
f"\n Zone: {self.zone} "
)
self.cluster._log("Creating scheduler instance")
self.internal_ip, self.external_ip = await self.create_vm()
if self.config.get("public_ingress", True) and not is_inside_gce():
# scheduler must be publicly available, and firewall
# needs to be in place to allow access to 8786 on
# the external IP
self.address = f"{self.cluster.protocol}://{self.external_ip}:8786"
else:
# if the client is running inside GCE environment
# it's better to use internal IP, which doesn't
# require firewall setup
self.address = f"{self.cluster.protocol}://{self.internal_ip}:8786"
await self.wait_for_scheduler()
# need to reserve internal IP for workers
# gcp docker containers can't see resolve ip address
self.cluster.scheduler_internal_ip = self.internal_ip
self.cluster.scheduler_external_ip = self.external_ip
class GCPWorker(GCPInstance):
"""Worker running in an GCP instance."""
def __init__(
self,
scheduler: str,
*args,
worker_class: str = "distributed.cli.dask_worker",
worker_options: dict = {},
**kwargs,
):
super().__init__(**kwargs)
self.scheduler = scheduler
self.worker_class = worker_class
self.name = f"dask-{self.cluster.uuid}-worker-{str(uuid.uuid4())[:8]}"
internal_scheduler = (
f"{self.cluster.protocol}://{self.cluster.scheduler_internal_ip}:8786"
)
self.command = " ".join(
[
self.set_env,
"python",
"-m",
"distributed.cli.dask_spec",
internal_scheduler,
"--spec",
"''%s''" # in yaml double single quotes escape the single quote
% json.dumps(
{
"cls": self.worker_class,
"opts": {
**worker_options,
"name": self.name,
},
}
),
]
)
async def start(self):
await super().start()
await self.start_worker()
async def start_worker(self):
self.cluster._log("Creating worker instance")
self.internal_ip, self.external_ip = await self.create_vm()
if self.config.get("public_ingress", True):
# scheduler is publicly available
self.address = self.external_ip
else:
self.address = self.internal_ip
class GCPCluster(VMCluster):
"""Cluster running on GCP VM Instances.
This cluster manager constructs a Dask cluster running on Google Cloud Platform 67VMs.
When configuring your cluster you may find it useful to install the ``gcloud`` tool for querying the
GCP API for available options.
https://cloud.google.com/sdk/gcloud
Parameters
----------
projectid: str
Your GCP project ID. This must be set either here or in your Dask config.
https://cloud.google.com/resource-manager/docs/creating-managing-projects
See the GCP docs page for more info.
https://cloudprovider.dask.org/en/latest/gcp.html#project-id
zone: str
The GCP zone to launch you cluster in. A full list can be obtained with ``gcloud compute zones list``.
network: str
The GCP VPC network/subnetwork to use. The default is `default`. If using firewall rules,
please ensure the follwing accesses are configured:
- egress 0.0.0.0/0 on all ports for downloading docker images and general data access
- ingress 10.0.0.0/8 on all ports for internal communication of workers
- ingress 0.0.0.0/0 on 8786-8787 for external accessibility of the dashboard/scheduler
- (optional) ingress 0.0.0.0./0 on 22 for ssh access
machine_type: str
The VM machine_type. You can get a full list with ``gcloud compute machine-types list``.
The default is ``n1-standard-1`` which is 3.75GB RAM and 1 vCPU
source_image: str
The OS image to use for the VM. Dask Cloudprovider will boostrap Ubuntu based images automatically.
Other images require Docker and for GPUs the NVIDIA Drivers and NVIDIA Docker.
A list of available images can be found with ``gcloud compute images list``
Valid values are:
- The short image name provided it is in ``projectid``.
- The full image name ``projects/<projectid>/global/images/<source_image>``.
- The full image URI such as those listed in ``gcloud compute images list --uri``.
The default is ``projects/ubuntu-os-cloud/global/images/ubuntu-minimal-1804-bionic-v20201014``.
docker_image: string (optional)
The Docker image to run on all instances.
This image must have a valid Python environment and have ``dask`` installed in order for the
``dask-scheduler`` and ``dask-worker`` commands to be available. It is recommended the Python
environment matches your local environment where ``EC2Cluster`` is being created from.
For GPU instance types the Docker image much have NVIDIA drivers and ``dask-cuda`` installed.
By default the ``daskdev/dask:latest`` image will be used.
docker_args: string (optional)
Extra command line arguments to pass to Docker.
ngpus: int (optional)
The number of GPUs to atatch to the instance.
Default is ``0``.
gpu_type: str (optional)
The name of the GPU to use. This must be set if ``ngpus>0``.
You can see a | |
always round fractional pixel locations in such a way
as to make the images bigger.
This setting may be useful if pixel rounding errors are causing
images to have a gap between them, when they should appear flush.
""")
# TODO: (bev) support anchor property for ImageRGBA
# ref: https://github.com/bokeh/bokeh/issues/1763
class ImageURL(Glyph):
""" Render images loaded from given URLs.
Example
-------
.. bokeh-plot:: ../tests/glyphs/ImageURL.py
:source-position: none
*source:* `tests/glyphs/ImageURL.py <https://github.com/bokeh/bokeh/tree/master/tests/glyphs/ImageURL.py>`_
"""
url = DataSpec("url", help="""
The URLs to retrieve images from.
.. note::
The actual retrieving and loading of the images happens on
the client.
""")
x = DataSpec("x", help="""
The x-coordinates to locate the image anchors.
""")
y = DataSpec("y", help="""
The y-coordinates to locate the image anchors.
""")
# TODO: (bev) rename to "dw" for consistency
w = DataSpec("w", help="""
The widths of the plot regions that the images will occupy.
.. note::
This is not the number of pixels that an image is wide.
That number is fixed by the image itself.
.. note::
This may be renamed to "dw" in the future.
""")
# TODO: (bev) rename to "dh" for consistency
h = DataSpec("h", help="""
The height of the plot region that the image will occupy.
.. note::
This is not the number of pixels that an image is tall.
That number is fixed by the image itself.
.. note::
This may be renamed to "dh" in the future.
""")
angle = DataSpec(default=0, help="""
The angles to rotate the images, in radians as measured from the
horizontal.
""")
dilate = Bool(False, help="""
Whether to always round fractional pixel locations in such a way
as to make the images bigger.
This setting may be useful if pixel rounding errors are causing
images to have a gap between them, when they should appear flush.
""")
anchor = Enum(Anchor, help="""
What position of the image should be anchored at the `x`, `y`
coordinates.
""")
class Line(Glyph):
""" Render a single line.
.. note::
The ``Line`` glyph is different from most other glyphs in that
the vector of values only produces one glyph on the Plot.
Example
-------
.. bokeh-plot:: ../tests/glyphs/Line.py
:source-position: none
*source:* `tests/glyphs/Line.py <https://github.com/bokeh/bokeh/tree/master/tests/glyphs/Line.py>`_
"""
x = DataSpec("x", help="""
The x-coordinates for the points of the line.
""")
y = DataSpec("y", help="""
The y-coordinates for the points of the line.
""")
line_props = Include(LineProps, use_prefix=False, help="""
The %s values for the line.
""")
class MultiLine(Glyph):
""" Render several lines.
.. note::
The data for the ``MultiLine`` glyph is different in that the
vector of values is not a vector of scalars. Rather, it is a
"list of lists".
Example
-------
.. bokeh-plot:: ../tests/glyphs/MultiLine.py
:source-position: none
*source:* `tests/glyphs/MultiLine.py <https://github.com/bokeh/bokeh/tree/master/tests/glyphs/MultiLine.py>`_
"""
xs = DataSpec("xs", help="""
The x-coordinates for all the lines, given as a "list of lists".
""")
ys = DataSpec("ys", help="""
The x-coordinates for all the lines, given as a "list of lists".
""")
line_props = Include(LineProps, use_prefix=False, help="""
The %s values for the lines.
""")
class Oval(Glyph):
u""" Render ovals.
.. note::
This glyph renders ovals using Bézier curves, which are similar,
but not identical to ellipses.
Example
-------
.. bokeh-plot:: ../tests/glyphs/Oval.py
:source-position: none
*source:* `tests/glyphs/Oval.py <https://github.com/bokeh/bokeh/tree/master/tests/glyphs/Oval.py>`_
"""
x = DataSpec("x", help="""
The x-coordinates of the centers of the ovals.
""")
y = DataSpec("y", help="""
The y-coordinates of the centers of the ovals.
""")
width = DataSpec("width", help="""
The overall widths of each oval.
""")
height = DataSpec("height", help="""
The overall height of each oval.
""")
angle = DataSpec("angle", help="""
The angle the ovals are rotated from horizontal. [rad]
""")
line_props = Include(LineProps, use_prefix=False, help="""
The %s values for the ovals.
""")
fill_props = Include(FillProps, use_prefix=False, help="""
The %s values for the ovals.
""")
class Patch(Glyph):
""" Render a single patch.
.. note::
The ``Patch`` glyph is different from most other glyphs in that
the vector of values only produces one glyph on the Plot.
Example
-------
.. bokeh-plot:: ../tests/glyphs/Patch.py
:source-position: none
*source:* `tests/glyphs/Patch.py <https://github.com/bokeh/bokeh/tree/master/tests/glyphs/Patch.py>`_
"""
x = DataSpec("x", help="""
The x-coordinates for the points of the patch.
.. note::
A patch may comprise multiple polygons. In this case the
x-coordinates for each polygon should be separated by NaN
values in the sequence.
""")
y = DataSpec("y", help="""
The y-coordinates for the points of the patch.
.. note::
A patch may comprise multiple polygons. In this case the
y-coordinates for each polygon should be separated by NaN
values in the sequence.
""")
line_props = Include(LineProps, use_prefix=False, help="""
The %s values for the patch.
""")
fill_props = Include(FillProps, use_prefix=False, help="""
The %s values for the patch.
""")
class Patches(Glyph):
""" Render several patches.
.. note::
The data for the ``Patches`` glyph is different in that the
vector of values is not a vector of scalars. Rather, it is a
"list of lists".
Example
-------
.. bokeh-plot:: ../tests/glyphs/Patches.py
:source-position: none
*source:* `tests/glyphs/Patches.py <https://github.com/bokeh/bokeh/tree/master/tests/glyphs/Patches.py>`_
"""
xs = DataSpec("xs", help="""
The x-coordinates for all the patches, given as a "list of lists".
.. note::
Individual patches may comprise multiple polygons. In this case
the x-coordinates for each polygon should be separated by NaN
values in the sublists.
""")
ys = DataSpec("ys", help="""
The y-coordinates for all the patches, given as a "list of lists".
.. note::
Individual patches may comprise multiple polygons. In this case
the y-coordinates for each polygon should be separated by NaN
values in the sublists.
""")
line_props = Include(LineProps, use_prefix=False, help="""
The %s values for the patches.
""")
fill_props = Include(FillProps, use_prefix=False, help="""
The %s values for the patches.
""")
class Quad(Glyph):
""" Render axis-aligned quads.
Example
-------
.. bokeh-plot:: ../tests/glyphs/Quad.py
:source-position: none
*source:* `tests/glyphs/Quad.py <https://github.com/bokeh/bokeh/tree/master/tests/glyphs/Quad.py>`_
"""
left = DataSpec("left", help="""
The x-coordinates of the left edges.
""")
right = DataSpec("right", help="""
The x-coordinates of the right edges.
""")
bottom = DataSpec("bottom", help="""
The y-coordinates of the bottom edges.
""")
top = DataSpec("top", help="""
The y-coordinates of the top edges.
""")
line_props = Include(LineProps, use_prefix=False, help="""
The %s values for the quads.
""")
fill_props = Include(FillProps, use_prefix=False, help="""
The %s values for the quads.
""")
class Quadratic(Glyph):
""" Render parabolas.
Example
-------
.. bokeh-plot:: ../tests/glyphs/Quadratic.py
:source-position: none
*source:* `tests/glyphs/Quadratic.py <https://github.com/bokeh/bokeh/tree/master/tests/glyphs/Quadratic.py>`_
"""
x0 = DataSpec("x0", help="""
The x-coordinates of the starting points.
""")
y0 = DataSpec("y0", help="""
The y-coordinates of the starting points.
""")
x1 = DataSpec("x1", help="""
The x-coordinates of the ending points.
""")
y1 = DataSpec("y1", help="""
The y-coordinates of the ending points.
""")
cx = DataSpec("cx", help="""
The x-coordinates of the control points.
""")
cy = DataSpec("cy", help="""
The y-coordinates of the control points.
""")
line_props = Include(LineProps, use_prefix=False, help="""
The %s values for the parabolas.
""")
class Ray(Glyph):
""" Render rays.
Example
-------
.. bokeh-plot:: ../tests/glyphs/Ray.py
:source-position: none
*source:* `tests/glyphs/Ray.py <https://github.com/bokeh/bokeh/tree/master/tests/glyphs/Ray.py>`_
"""
x = DataSpec("x", help="""
The x-coordinates to start the rays.
""")
y = DataSpec("y", help="""
The y-coordinates to start the rays.
""")
angle = DataSpec("angle", help="""
The angles in radians to extend the rays, as measured from the
horizontal.
""")
# TODO: (bev) should default to "length" field?
length = DataSpec(units="screen", help="""
The length to extend the ray. Note that this ``length`` defaults
to screen units.
""")
line_props = Include(LineProps, use_prefix=False, help="""
The %s values for the rays.
""")
class Rect(Glyph):
""" Render rectangles.
Example
-------
.. bokeh-plot:: ../tests/glyphs/Rect.py
:source-position: none
*source:* `tests/glyphs/Rect.py <https://github.com/bokeh/bokeh/tree/master/tests/glyphs/Rect.py>`_
"""
x = DataSpec("x", help="""
The x-coordinates of the centers of the rectangles.
""")
y = DataSpec("y", help="""
The y-coordinates of the centers of the rectangles.
""")
width = DataSpec("width", help="""
The overall widths of the rectangles.
""")
height = DataSpec("height", help="""
The overall heights of the rectangles.
""")
angle = DataSpec("angle", help="""
The angles to rotate the rectangles, in radians, as measured from
the horizontal.
""")
dilate = Bool(False, help="""
Whether to always round fractional pixel locations in such a way
as to make the rectangles bigger.
This setting may be useful if pixel rounding errors are causing
rectangles | |
FAKE_RESPONSE_HEADERS,
{'services': [{'binary': binary,
'host': host,
'zone': 'nova',
'status': 'enabled',
'state': 'up',
'updated_at': datetime.datetime(
2012, 10, 29, 13, 42, 2),
'id': service_id_1},
{'binary': binary,
'host': host,
'zone': 'nova',
'status': 'disabled',
'state': 'down',
'updated_at': datetime.datetime(
2012, 9, 18, 8, 3, 38),
'id': service_id_2},
]})
def put_os_services_enable(self, body, **kw):
return (200, FAKE_RESPONSE_HEADERS,
{'service': {'host': body['host'],
'binary': body['binary'],
'status': 'enabled'}})
def put_os_services_disable(self, body, **kw):
return (200, FAKE_RESPONSE_HEADERS,
{'service': {'host': body['host'],
'binary': body['binary'],
'status': 'disabled'}})
def put_os_services_disable_log_reason(self, body, **kw):
return (200, FAKE_RESPONSE_HEADERS, {'service': {
'host': body['host'],
'binary': body['binary'],
'status': 'disabled',
'disabled_reason': body['disabled_reason']}})
def put_os_services_75e9eabc_ed3b_4f11_8bba_add1e7e7e2de(
self, body, **kw):
"""This should only be called with microversion >= 2.53."""
return (200, FAKE_RESPONSE_HEADERS, {'service': {
'host': 'host1',
'binary': 'nova-compute',
'status': body.get('status', 'enabled'),
'disabled_reason': body.get('disabled_reason'),
'forced_down': body.get('forced_down', False)}})
def delete_os_services_1(self, **kw):
return (204, FAKE_RESPONSE_HEADERS, None)
def delete_os_services_75e9eabc_ed3b_4f11_8bba_add1e7e7e2de(self, **kwarg):
return (204, FAKE_RESPONSE_HEADERS, None)
def put_os_services_force_down(self, body, **kw):
return (200, FAKE_RESPONSE_HEADERS, {'service': {
'host': body['host'],
'binary': body['binary'],
'forced_down': False}})
#
# Hosts
#
def get_os_hosts(self, **kw):
zone = kw.get('zone', 'nova1')
return (200, {}, {'hosts': [{'host': 'host1',
'service': 'nova-compute',
'zone': zone},
{'host': 'host1',
'service': 'nova-cert',
'zone': zone}]})
def put_os_hosts_sample_host_1(self, body, **kw):
return (200, {}, {'host': 'sample-host_1',
'status': 'enabled'})
def put_os_hosts_sample_host_2(self, body, **kw):
return (200, {}, {'host': 'sample-host_2',
'maintenance_mode': 'on_maintenance'})
def put_os_hosts_sample_host_3(self, body, **kw):
return (200, {}, {'host': 'sample-host_3',
'status': 'enabled',
'maintenance_mode': 'on_maintenance'})
def get_os_hosts_sample_host_reboot(self, **kw):
return (200, {}, {'host': 'sample_host',
'power_action': 'reboot'})
def get_os_hosts_sample_host_startup(self, **kw):
return (200, {}, {'host': 'sample_host',
'power_action': 'startup'})
def get_os_hosts_sample_host_shutdown(self, **kw):
return (200, {}, {'host': 'sample_host',
'power_action': 'shutdown'})
def get_os_hypervisors(self, **kw):
return (200, {}, {
"hypervisors": [
{'id': 1234, 'hypervisor_hostname': 'hyper1'},
{'id': 5678, 'hypervisor_hostname': 'hyper2'}]})
def get_os_hypervisors_statistics(self, **kw):
return (200, {}, {
"hypervisor_statistics": {
'count': 2,
'vcpus': 8,
'memory_mb': 20 * 1024,
'local_gb': 500,
'vcpus_used': 4,
'memory_mb_used': 10 * 1024,
'local_gb_used': 250,
'free_ram_mb': 10 * 1024,
'free_disk_gb': 250,
'current_workload': 4,
'running_vms': 4,
'disk_available_least': 200}
})
def get_os_hypervisors_hyper1(self, **kw):
return (200, {}, {
'hypervisor':
{'id': 1234,
'service': {'id': 1, 'host': 'compute1'},
'vcpus': 4,
'memory_mb': 10 * 1024,
'local_gb': 250,
'vcpus_used': 2,
'memory_mb_used': 5 * 1024,
'local_gb_used': 125,
'hypervisor_type': "xen",
'hypervisor_version': 3,
'hypervisor_hostname': "hyper1",
'free_ram_mb': 5 * 1024,
'free_disk_gb': 125,
'current_workload': 2,
'running_vms': 2,
'cpu_info': 'cpu_info',
'disk_available_least': 100}})
def get_os_hypervisors_region_child_1(self, **kw):
return (200, {}, {
'hypervisor':
{'id': 'region!child@1',
'service': {'id': 1, 'host': 'compute1'},
'vcpus': 4,
'memory_mb': 10 * 1024,
'local_gb': 250,
'vcpus_used': 2,
'memory_mb_used': 5 * 1024,
'local_gb_used': 125,
'hypervisor_type': "xen",
'hypervisor_version': 3,
'hypervisor_hostname': "hyper1",
'free_ram_mb': 5 * 1024,
'free_disk_gb': 125,
'current_workload': 2,
'running_vms': 2,
'cpu_info': 'cpu_info',
'disk_available_least': 100}})
def get_os_hypervisors_hyper_search(self, **kw):
return (200, {}, {
'hypervisors': [
{'id': 1234, 'hypervisor_hostname': 'hyper1'},
{'id': 5678, 'hypervisor_hostname': 'hyper2'}]})
def get_os_hypervisors_hyper_servers(self, **kw):
return (200, {}, {
'hypervisors': [
{'id': 1234,
'hypervisor_hostname': 'hyper1',
'servers': [
{'name': 'inst1', 'uuid': 'uuid1'},
{'name': 'inst2', 'uuid': 'uuid2'}]},
{'id': 5678,
'hypervisor_hostname': 'hyper2',
'servers': [
{'name': 'inst3', 'uuid': 'uuid3'},
{'name': 'inst4', 'uuid': 'uuid4'}]}]
})
def get_os_hypervisors_hyper1_servers(self, **kw):
return (200, {}, {
'hypervisors': [
{'id': 1234,
'hypervisor_hostname': 'hyper1',
'servers': [
{'name': 'inst1', 'uuid': 'uuid1'},
{'name': 'inst2', 'uuid': 'uuid2'}]}]
})
def get_os_hypervisors_hyper2_servers(self, **kw):
return (200, {}, {
'hypervisors': [
{'id': 5678,
'hypervisor_hostname': 'hyper2',
'servers': [
{'name': 'inst3', 'uuid': 'uuid3'},
{'name': 'inst4', 'uuid': 'uuid4'}]}]
})
def get_os_hypervisors_hyper_no_servers_servers(self, **kw):
return (200, {}, {'hypervisors':
[{'id': 1234, 'hypervisor_hostname': 'hyper1'}]})
def get_os_hypervisors_1234(self, **kw):
return (200, {}, {
'hypervisor':
{'id': 1234,
'service': {'id': 1, 'host': 'compute1'},
'vcpus': 4,
'memory_mb': 10 * 1024,
'local_gb': 250,
'vcpus_used': 2,
'memory_mb_used': 5 * 1024,
'local_gb_used': 125,
'hypervisor_type': "xen",
'hypervisor_version': 3,
'hypervisor_hostname': "hyper1",
'free_ram_mb': 5 * 1024,
'free_disk_gb': 125,
'current_workload': 2,
'running_vms': 2,
'cpu_info': 'cpu_info',
'disk_available_least': 100}})
def get_os_hypervisors_1234_uptime(self, **kw):
return (200, {}, {
'hypervisor': {'id': 1234,
'hypervisor_hostname': "hyper1",
'uptime': "fake uptime"}})
def get_os_hypervisors_region_child_1_uptime(self, **kw):
return (200, {}, {
'hypervisor': {'id': 'region!child@1',
'hypervisor_hostname': "hyper1",
'uptime': "fake uptime"}})
def get_v2_0_networks(self, **kw):
"""Return neutron proxied networks.
We establish a few different possible networks that we can get
by name, which we can then call in tests. The only usage of
this API should be for name -> id translation, however a full
valid neutron block is provided for the private network to see
the kinds of things that will be in that payload.
"""
name = kw.get('name', "blank")
networks_by_name = {
'private': [
{"status": "ACTIVE",
"router:external": False,
"availability_zone_hints": [],
"availability_zones": ["nova"],
"description": "",
"name": "private",
"subnets": ["64706c26-336c-4048-ab3c-23e3283bca2c",
"18512740-c760-4d5f-921f-668105c9ee44"],
"shared": False,
"tenant_id": "abd42f270bca43ea80fe4a166bc3536c",
"created_at": "2016-08-15T17:34:49",
"tags": [],
"ipv6_address_scope": None,
"updated_at": "2016-08-15T17:34:49",
"admin_state_up": True,
"ipv4_address_scope": None,
"port_security_enabled": True,
"mtu": 1450,
"id": "e43a56c7-11d4-45c9-8681-ddc8171b5850",
"revision": 2}],
'duplicate': [
{"status": "ACTIVE",
"id": "e43a56c7-11d4-45c9-8681-ddc8171b5850"},
{"status": "ACTIVE",
"id": "f43a56c7-11d4-45c9-8681-ddc8171b5850"}],
'blank': []
}
return (200, {}, {"networks": networks_by_name[name]})
def get_os_availability_zone_detail(self, **kw):
return (200, {}, {
"availabilityZoneInfo": [
{"zoneName": "zone-1",
"zoneState": {"available": True},
"hosts": {
"fake_host-1": {
"nova-compute": {
"active": True,
"available": True,
"updated_at": datetime.datetime(
2012, 12, 26, 14, 45, 25, 0)}}}},
{"zoneName": "internal",
"zoneState": {"available": True},
"hosts": {
"fake_host-1": {
"nova-sched": {
"active": True,
"available": True,
"updated_at": datetime.datetime(
2012, 12, 26, 14, 45, 25, 0)}},
"fake_host-2": {
"nova-network": {
"active": True,
"available": False,
"updated_at": datetime.datetime(
2012, 12, 26, 14, 45, 24, 0)}}}},
{"zoneName": "zone-2",
"zoneState": {"available": False},
"hosts": None}]})
def get_servers_1234_os_interface(self, **kw):
return (200, {}, {
"interfaceAttachments": [
{"port_state": "ACTIVE",
"net_id": "net-id-1",
"port_id": "port-id-1",
"mac_address": "aa:bb:cc:dd:ee:ff",
"fixed_ips": [{"ip_address": "192.168.127.12"}],
},
{"port_state": "ACTIVE",
"net_id": "net-id-1",
"port_id": "port-id-1",
"mac_address": "aa:bb:cc:dd:ee:ff",
"fixed_ips": [{"ip_address": "192.168.127.12"}],
}]
})
def post_servers_1234_os_interface(self, **kw):
return (200, {}, {'interfaceAttachment': {}})
def delete_servers_1234_os_interface_port_id(self, **kw):
return (200, {}, None)
def post_servers_1234_os_volume_attachments(self, **kw):
return (200, FAKE_RESPONSE_HEADERS, {
"volumeAttachment":
{"device": "/dev/vdb",
"volumeId": 2}})
def put_servers_1234_os_volume_attachments_Work(self, **kw):
return (200, FAKE_RESPONSE_HEADERS,
{"volumeAttachment": {"volumeId": 2}})
def get_servers_1234_os_volume_attachments(self, **kw):
return (200, FAKE_RESPONSE_HEADERS, {
"volumeAttachments": [
{"display_name": "Work",
"display_description": "volume for work",
"status": "ATTACHED",
"id": "15e59938-07d5-11e1-90e3-e3dffe0c5983",
"created_at": "2011-09-09T00:00:00Z",
"attached": "2011-11-11T00:00:00Z",
"size": 1024,
"attachments": [{"id": "3333", "links": ''}],
"metadata": {}}]})
def get_servers_1234_os_volume_attachments_Work(self, **kw):
return (200, FAKE_RESPONSE_HEADERS, {
"volumeAttachment":
{"display_name": "Work",
"display_description": "volume for work",
"status": "ATTACHED",
"id": "15e59938-07d5-11e1-90e3-e3dffe0c5983",
"created_at": "2011-09-09T00:00:00Z",
"attached": "2011-11-11T00:00:00Z",
"size": 1024,
"attachments": [{"id": "3333", "links": ''}],
"metadata": {}}})
def delete_servers_1234_os_volume_attachments_Work(self, **kw):
return (200, FAKE_RESPONSE_HEADERS, {})
def get_servers_1234_os_instance_actions(self, **kw):
return (200, FAKE_RESPONSE_HEADERS, {
"instanceActions":
[{"instance_uuid": "1234",
"user_id": "b968c25e04ab405f9fe4e6ca54cce9a5",
"start_time": "2013-03-25T13:45:09.000000",
"request_id": "req-abcde12345",
"action": "create",
"message": None,
"project_id": "04019601fe3648c0abd4f4abfb9e6106"}]})
def get_servers_1234_os_instance_actions_req_abcde12345(self, **kw):
return (200, FAKE_RESPONSE_HEADERS, {
"instanceAction":
{"instance_uuid": "1234",
"user_id": "b968c25e04ab405f9fe4e6ca54cce9a5",
"start_time": "2013-03-25T13:45:09.000000",
"request_id": "req-abcde12345",
"action": "create",
"message": None,
"project_id": "04019601fe3648c0abd4f4abfb9e6106"}})
def post_servers_uuid1_action(self, **kw):
return 202, {}, {}
def post_servers_uuid2_action(self, **kw):
return 202, {}, {}
def post_servers_uuid3_action(self, **kw):
return 202, {}, {}
def post_servers_uuid4_action(self, **kw):
return 202, {}, {}
def post_servers_uuid5_action(self, **kw):
return 202, {}, {}
def post_servers_uuid6_action(self, **kw):
return 202, {}, {}
def get_os_cells_child_cell(self, **kw):
cell = {'cell': {
'username': 'cell1_user',
'name': 'cell1',
'rpc_host': '10.0.1.10',
'info': {
'username': 'cell1_user',
'rpc_host': '10.0.1.10',
'type': 'child',
'name': 'cell1',
'rpc_port': 5673},
'type': 'child',
'rpc_port': 5673,
'loaded': True
}}
return (200, FAKE_RESPONSE_HEADERS, cell)
def get_os_cells_capacities(self, **kw):
cell_capacities_response = {"cell": {"capacities": {"ram_free": {
"units_by_mb": {"8192": 0, "512": 13, "4096": 1, "2048": 3,
"16384": 0}, "total_mb": 7680}, "disk_free": {
"units_by_mb": {"81920": 11, "20480": 46, "40960": 23, "163840": 5,
"0": 0}, "total_mb": 1052672}}}}
return (200, FAKE_RESPONSE_HEADERS, cell_capacities_response)
def get_os_cells_child_cell_capacities(self, **kw):
return self.get_os_cells_capacities()
def get_os_migrations(self, **kw):
migration1 = {
"created_at": "2012-10-29T13:42:02.000000",
"dest_compute": "compute2",
"dest_host": "1.2.3.4",
"dest_node": "node2",
"id": '1234',
"instance_uuid": "instance_id_123",
"new_instance_type_id": 2,
"old_instance_type_id": 1,
"source_compute": "compute1",
"source_node": "node1",
"status": "Done",
"updated_at": "2012-10-29T13:42:02.000000"
}
migration2 = {
"created_at": "2012-10-29T13:42:02.000000",
"dest_compute": "compute2",
"dest_host": "1.2.3.4",
"dest_node": "node2",
"id": '1234',
"instance_uuid": "instance_id_456",
"new_instance_type_id": 2,
"old_instance_type_id": 1,
"source_compute": "compute1",
"source_node": "node1",
"status": "Done",
"updated_at": "2013-11-50T13:42:02.000000"
}
if self.api_version >= api_versions.APIVersion("2.23"):
migration1.update({"migration_type": "live-migration"})
migration2.update({"migration_type": "live-migration"})
migration_list = []
instance_uuid = kw.get('instance_uuid', None)
if instance_uuid == migration1['instance_uuid']:
migration_list.append(migration1)
elif instance_uuid == migration2['instance_uuid']:
migration_list.append(migration2)
elif instance_uuid is None:
migration_list.extend([migration1, migration2])
migrations = {'migrations': migration_list}
return (200, FAKE_RESPONSE_HEADERS, migrations)
#
# Server Groups
#
def get_os_server_groups(self, **kw):
server_groups = [
{"members": [], "metadata": {},
"id": "2cbd51f4-fafe-4cdb-801b-cf913a6f288b",
"policies": [], "name": "ig1"},
{"members": [], "metadata": {},
"id": "4473bb03-4370-4bfb-80d3-dc8cffc47d94",
"policies": ["anti-affinity"], "name": "ig2"},
{"members": [], "metadata": {"key": "value"},
"id": "31ab9bdb-55e1-4ac3-b094-97eeb1b65cc4",
"policies": [], "name": "ig3"},
{"members": ["2dccb4a1-02b9-482a-aa23-5799490d6f5d"],
"metadata": {},
"id": "4890bb03-7070-45fb-8453-d34556c87d94",
"policies": ["anti-affinity"], "name": "ig2"}]
other_project_server_groups = [
{"members": [], "metadata": {},
"id": "11111111-1111-1111-1111-111111111111",
"policies": [], "name": "ig4"},
{"members": [], "metadata": | |
"port-id-value"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.ReplyIngress.PortId.PortIdValue, ['port_id_format', 'port_id_string', 'port_id_mac', 'port_id_raw'], name, value)
class ReplyEgress(Entity):
"""
Reply egress TLV
.. attribute:: port_id
Port ID
**type**\: :py:class:`PortId <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.ReplyEgress.PortId>`
.. attribute:: action
Reply egress action
**type**\: :py:class:`CfmPmEgressAction <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.CfmPmEgressAction>`
.. attribute:: mac_address
MAC address
**type**\: str
**pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5}
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.ReplyEgress, self).__init__()
self.yang_name = "reply-egress"
self.yang_parent_name = "linktrace-reply"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("port-id", ("port_id", Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.ReplyEgress.PortId))])
self._leafs = OrderedDict([
('action', (YLeaf(YType.enumeration, 'action'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper', 'CfmPmEgressAction', '')])),
('mac_address', (YLeaf(YType.str, 'mac-address'), ['str'])),
])
self.action = None
self.mac_address = None
self.port_id = Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.ReplyEgress.PortId()
self.port_id.parent = self
self._children_name_map["port_id"] = "port-id"
self._segment_path = lambda: "reply-egress"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.ReplyEgress, ['action', 'mac_address'], name, value)
class PortId(Entity):
"""
Port ID
.. attribute:: port_id_value
Port ID (Current)
**type**\: :py:class:`PortIdValue <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.ReplyEgress.PortId.PortIdValue>`
.. attribute:: port_id_type
Port ID type
**type**\: :py:class:`CfmPmPortIdFmt <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.CfmPmPortIdFmt>`
.. attribute:: port_id_type_value
Port ID type value
**type**\: int
**range:** 0..255
.. attribute:: port_id
Port ID (Deprecated)
**type**\: str
**pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)?
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.ReplyEgress.PortId, self).__init__()
self.yang_name = "port-id"
self.yang_parent_name = "reply-egress"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("port-id-value", ("port_id_value", Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.ReplyEgress.PortId.PortIdValue))])
self._leafs = OrderedDict([
('port_id_type', (YLeaf(YType.enumeration, 'port-id-type'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper', 'CfmPmPortIdFmt', '')])),
('port_id_type_value', (YLeaf(YType.uint8, 'port-id-type-value'), ['int'])),
('port_id', (YLeaf(YType.str, 'port-id'), ['str'])),
])
self.port_id_type = None
self.port_id_type_value = None
self.port_id = None
self.port_id_value = Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.ReplyEgress.PortId.PortIdValue()
self.port_id_value.parent = self
self._children_name_map["port_id_value"] = "port-id-value"
self._segment_path = lambda: "port-id"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.ReplyEgress.PortId, ['port_id_type', 'port_id_type_value', 'port_id'], name, value)
class PortIdValue(Entity):
"""
Port ID (Current)
.. attribute:: port_id_format
PortIDFormat
**type**\: :py:class:`CfmPmIdFmt <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.CfmPmIdFmt>`
.. attribute:: port_id_string
Port ID String
**type**\: str
.. attribute:: port_id_mac
Port ID MAC Address
**type**\: str
**pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5}
.. attribute:: port_id_raw
Raw Port ID
**type**\: str
**pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)?
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.ReplyEgress.PortId.PortIdValue, self).__init__()
self.yang_name = "port-id-value"
self.yang_parent_name = "port-id"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('port_id_format', (YLeaf(YType.enumeration, 'port-id-format'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper', 'CfmPmIdFmt', '')])),
('port_id_string', (YLeaf(YType.str, 'port-id-string'), ['str'])),
('port_id_mac', (YLeaf(YType.str, 'port-id-mac'), ['str'])),
('port_id_raw', (YLeaf(YType.str, 'port-id-raw'), ['str'])),
])
self.port_id_format = None
self.port_id_string = None
self.port_id_mac = None
self.port_id_raw = None
self._segment_path = lambda: "port-id-value"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.ReplyEgress.PortId.PortIdValue, ['port_id_format', 'port_id_string', 'port_id_mac', 'port_id_raw'], name, value)
class LastHop(Entity):
"""
Last hop ID
.. attribute:: egress_id
Egress ID
**type**\: :py:class:`EgressId <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.LastHop.EgressId>`
.. attribute:: last_hop_format
LastHopFormat
**type**\: :py:class:`CfmPmLastHopFmt <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.CfmPmLastHopFmt>`
.. attribute:: host_name
Hostname
**type**\: str
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.LastHop, self).__init__()
self.yang_name = "last-hop"
self.yang_parent_name = "linktrace-reply"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("egress-id", ("egress_id", Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.LastHop.EgressId))])
self._leafs = OrderedDict([
('last_hop_format', (YLeaf(YType.enumeration, 'last-hop-format'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper', 'CfmPmLastHopFmt', '')])),
('host_name', (YLeaf(YType.str, 'host-name'), ['str'])),
])
self.last_hop_format = None
self.host_name = None
self.egress_id = Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.LastHop.EgressId()
self.egress_id.parent = self
self._children_name_map["egress_id"] = "egress-id"
self._segment_path = lambda: "last-hop"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.LastHop, ['last_hop_format', 'host_name'], name, value)
class EgressId(Entity):
"""
Egress ID
.. attribute:: unique_id
Unique ID
**type**\: int
**range:** 0..65535
.. attribute:: mac_address
MAC address
**type**\: str
**pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5}
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.LastHop.EgressId, self).__init__()
self.yang_name = "egress-id"
self.yang_parent_name = "last-hop"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('unique_id', (YLeaf(YType.uint16, 'unique-id'), ['int'])),
('mac_address', (YLeaf(YType.str, 'mac-address'), ['str'])),
])
self.unique_id = None
self.mac_address = None
self._segment_path = lambda: "egress-id"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.LastHop.EgressId, ['unique_id', 'mac_address'], name, value)
class OrganizationSpecificTlv(Entity):
"""
Organizational\-specific TLVs
.. attribute:: oui
Organizationally\-unique ID
**type**\: str
**pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)?
.. attribute:: subtype
Subtype of TLV
**type**\: int
**range:** 0..255
.. attribute:: value
Binary data in TLV
**type**\: str
**pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)?
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.OrganizationSpecificTlv, self).__init__()
self.yang_name = "organization-specific-tlv"
self.yang_parent_name = "linktrace-reply"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('oui', (YLeaf(YType.str, 'oui'), ['str'])),
('subtype', (YLeaf(YType.uint8, 'subtype'), ['int'])),
('value', (YLeaf(YType.str, 'value'), ['str'])),
])
self.oui = None
self.subtype = None
self.value = None
self._segment_path = lambda: "organization-specific-tlv"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.OrganizationSpecificTlv, ['oui', 'subtype', 'value'], name, value)
class UnknownTlv(Entity):
"""
Unknown TLVs
.. attribute:: typecode
Type code of TLV
**type**\: int
**range:** 0..255
.. attribute:: value
Binary data in TLV
**type**\: str
**pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)?
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.UnknownTlv, self).__init__()
self.yang_name = "unknown-tlv"
self.yang_parent_name = "linktrace-reply"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('typecode', (YLeaf(YType.uint8, 'typecode'), ['int'])),
('value', (YLeaf(YType.str, 'value'), ['str'])),
])
self.typecode = None
self.value = None
self._segment_path = lambda: "unknown-tlv"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.UnknownTlv, ['typecode', 'value'], name, value)
class ExploratoryLinktraceReply(Entity):
"""
Received exploratory linktrace replies
.. attribute:: header
Frame header
**type**\: :py:class:`Header <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.TracerouteCaches.TracerouteCache.ExploratoryLinktraceReply.Header>`
.. attribute:: sender_id
Sender ID TLV
**type**\: :py:class:`SenderId <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.TracerouteCaches.TracerouteCache.ExploratoryLinktraceReply.SenderId>`
.. attribute:: reply_ingress
Reply ingress TLV
**type**\: :py:class:`ReplyIngress <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.TracerouteCaches.TracerouteCache.ExploratoryLinktraceReply.ReplyIngress>`
.. attribute:: reply_egress
Reply egress TLV
**type**\: :py:class:`ReplyEgress <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.TracerouteCaches.TracerouteCache.ExploratoryLinktraceReply.ReplyEgress>`
.. attribute:: last_hop
Last hop ID
**type**\: :py:class:`LastHop <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.TracerouteCaches.TracerouteCache.ExploratoryLinktraceReply.LastHop>`
.. attribute:: raw_data
Undecoded frame
**type**\: str
**pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)?
.. attribute:: organization_specific_tlv
Organizational\-specific TLVs
**type**\: list of :py:class:`OrganizationSpecificTlv <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.TracerouteCaches.TracerouteCache.ExploratoryLinktraceReply.OrganizationSpecificTlv>`
.. attribute:: unknown_tlv
Unknown TLVs
**type**\: list of :py:class:`UnknownTlv <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.TracerouteCaches.TracerouteCache.ExploratoryLinktraceReply.UnknownTlv>`
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.TracerouteCaches.TracerouteCache.ExploratoryLinktraceReply, self).__init__()
self.yang_name = "exploratory-linktrace-reply"
self.yang_parent_name = "traceroute-cache"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("header", ("header", Cfm.Global.TracerouteCaches.TracerouteCache.ExploratoryLinktraceReply.Header)), ("sender-id", ("sender_id", Cfm.Global.TracerouteCaches.TracerouteCache.ExploratoryLinktraceReply.SenderId)), ("reply-ingress", ("reply_ingress", Cfm.Global.TracerouteCaches.TracerouteCache.ExploratoryLinktraceReply.ReplyIngress)), ("reply-egress", ("reply_egress", Cfm.Global.TracerouteCaches.TracerouteCache.ExploratoryLinktraceReply.ReplyEgress)), ("last-hop", ("last_hop", Cfm.Global.TracerouteCaches.TracerouteCache.ExploratoryLinktraceReply.LastHop)), ("organization-specific-tlv", ("organization_specific_tlv", Cfm.Global.TracerouteCaches.TracerouteCache.ExploratoryLinktraceReply.OrganizationSpecificTlv)), ("unknown-tlv", ("unknown_tlv", Cfm.Global.TracerouteCaches.TracerouteCache.ExploratoryLinktraceReply.UnknownTlv))])
self._leafs = OrderedDict([
('raw_data', (YLeaf(YType.str, 'raw-data'), ['str'])),
])
self.raw_data = None
self.header = Cfm.Global.TracerouteCaches.TracerouteCache.ExploratoryLinktraceReply.Header()
self.header.parent = self
self._children_name_map["header"] = "header"
self.sender_id = Cfm.Global.TracerouteCaches.TracerouteCache.ExploratoryLinktraceReply.SenderId()
self.sender_id.parent = self
self._children_name_map["sender_id"] = "sender-id"
self.reply_ingress = Cfm.Global.TracerouteCaches.TracerouteCache.ExploratoryLinktraceReply.ReplyIngress()
self.reply_ingress.parent = self
self._children_name_map["reply_ingress"] = "reply-ingress"
self.reply_egress = Cfm.Global.TracerouteCaches.TracerouteCache.ExploratoryLinktraceReply.ReplyEgress()
self.reply_egress.parent = self
self._children_name_map["reply_egress"] = "reply-egress"
self.last_hop = Cfm.Global.TracerouteCaches.TracerouteCache.ExploratoryLinktraceReply.LastHop()
self.last_hop.parent = self
self._children_name_map["last_hop"] = "last-hop"
self.organization_specific_tlv = YList(self)
self.unknown_tlv = YList(self)
self._segment_path = lambda: "exploratory-linktrace-reply"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global.TracerouteCaches.TracerouteCache.ExploratoryLinktraceReply, ['raw_data'], name, value)
class Header(Entity):
"""
Frame header
.. attribute:: level
MD level
**type**\: :py:class:`CfmBagMdLevel <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.CfmBagMdLevel>`
.. attribute:: version
Version
**type**\: int
**range:** 0..255
.. attribute:: forwarded
ELR was forwarded
**type**\: bool
.. attribute:: terminal_mep
Terminal MEP reached
**type**\: bool
.. attribute:: reply_filter_unknown
Reply Filter unrecognized
**type**\: bool
.. attribute:: transaction_id
Transaction ID
**type**\: int
**range:** 0..4294967295
.. attribute:: ttl
TTL
**type**\: int
**range:** 0..255
.. attribute:: relay_action
Relay action
**type**\: :py:class:`CfmPmElrRelayAction <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.CfmPmElrRelayAction>`
.. attribute:: next_hop_timeout
Next Hop Timeout, in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: delay_model
Delay Model
**type**\: :py:class:`CfmPmEltDelayModel <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.CfmPmEltDelayModel>`
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.TracerouteCaches.TracerouteCache.ExploratoryLinktraceReply.Header, self).__init__()
self.yang_name = "header"
self.yang_parent_name = "exploratory-linktrace-reply"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('level', (YLeaf(YType.enumeration, 'level'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper', 'CfmBagMdLevel', '')])),
('version', (YLeaf(YType.uint8, 'version'), ['int'])),
('forwarded', (YLeaf(YType.boolean, 'forwarded'), ['bool'])),
('terminal_mep', (YLeaf(YType.boolean, 'terminal-mep'), ['bool'])),
('reply_filter_unknown', (YLeaf(YType.boolean, 'reply-filter-unknown'), ['bool'])),
('transaction_id', (YLeaf(YType.uint32, 'transaction-id'), ['int'])),
('ttl', (YLeaf(YType.uint8, 'ttl'), ['int'])),
('relay_action', (YLeaf(YType.enumeration, 'relay-action'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper', 'CfmPmElrRelayAction', '')])),
('next_hop_timeout', (YLeaf(YType.uint32, 'next-hop-timeout'), ['int'])),
('delay_model', (YLeaf(YType.enumeration, 'delay-model'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper', 'CfmPmEltDelayModel', '')])),
])
self.level = None
self.version = None
self.forwarded = None
self.terminal_mep = None
self.reply_filter_unknown = None
self.transaction_id = None
self.ttl = None
self.relay_action = None
self.next_hop_timeout = None
self.delay_model | |
y: concat(x, y)[3:] = y[3:]
return Extract(x.args[1], i, j)
elif j >= x.args[1].width:
# 4-bit x, y: concat(x, y)[:5] = x[:1]
offset = x.args[1].width
return Extract(x.args[0], i - offset, j - offset)
elif isinstance(x, (BvShl, RotateLeft)) and \
isinstance(x.args[1], (int, core.Constant)) and int(x.args[1]) <= j:
# (x << 1)[:2] = x[n-2: 1]
offset = int(x.args[1])
return Extract(x.args[0], i - offset, j - offset)
elif isinstance(x, (BvLshr, RotateRight)) and \
isinstance(x.args[1], (int, core.Constant)) and i < x.width - int(x.args[1]):
# (x >> 1)[n-3:] = x[n-2: 1]
offset = int(x.args[1])
return Extract(x.args[0], i + offset, j + offset)
class Concat(PrimaryOperation):
"""Concatenation operation.
Given the bit-vectors :math:`(x_{n-1}, \dots, x_0)` and
:math:`(y_{m-1}, \dots, y_0)`, ``Concat(x, y)`` returns the bit-vector
:math:`(x_{n-1}, \dots, x_0, y_{m-1}, \dots, y_0)`.
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.operation import Concat
>>> Concat(Constant(0x12, 8), Constant(0x345, 12))
0x12345
>>> Concat(Variable("x", 8), Variable("y", 8))
x :: y
"""
arity = [2, 0]
is_symmetric = False
infix_symbol = "::"
@classmethod
def output_width(cls, x, y):
return x.width + y.width
@classmethod
def eval(cls, x, y):
def doit(x, y):
"""Concatenation when both operands are int."""
return int(x.bin() + y.bin()[2:], 2)
if isinstance(x, core.Constant) and isinstance(y, core.Constant):
return core.Constant(doit(x, y), cls.output_width(x, y))
elif isinstance(x, core.Constant) and isinstance(y, Concat) and \
isinstance(y.args[0], core.Constant):
return Concat(Concat(x, y.args[0]), y.args[1])
elif isinstance(y, core.Constant) and isinstance(x, Concat) and \
isinstance(x.args[1], core.Constant):
return Concat(x.args[0], Concat(x.args[1], y))
elif isinstance(x, Extract) and isinstance(y, Extract):
# x[5:4] concat x[3:2] = x[5:2]
if x.args[0] == y.args[0] and x.args[2] == y.args[1] + 1:
return Extract(x.args[0], x.args[1], y.args[2])
# Arithmetic operators
class BvNeg(PrimaryOperation):
"""Unary minus operation.
It overrides the unary operator -. See `PrimaryOperation` for more information.
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.operation import BvNeg
>>> BvNeg(Constant(1, 8))
0xff
>>> -Constant(1, 8)
0xff
>>> BvNeg(Variable("x", 8))
-x
"""
arity = [1, 0]
is_symmetric = False
unary_symbol = "-"
@classmethod
def output_width(cls, x):
return x.width
@classmethod
def eval(cls, x):
def doit(x, width):
"""Unary minus operation when the operand is int."""
return ((2 ** width) - x) % (2 ** width)
if isinstance(x, core.Constant):
return core.Constant(doit(int(x), x.width), x.width)
elif isinstance(x, BvNeg):
return x.args[0]
# # disabled (all op equal precedence)
# elif isinstance(x, BvAdd):
# return BvAdd(BvNeg(x.args[0]), BvNeg(x.args[1]))
# elif isinstance(x, (BvMul, BvDiv, BvMod)):
# return x.func(BvNeg(x.args[0]), x.args[1])
class BvAdd(PrimaryOperation):
"""Modular addition operation.
It overrides the operator + and provides Automatic Constant Conversion.
See `PrimaryOperation` for more information.
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.operation import BvAdd
>>> BvAdd(Constant(1, 8), Constant(2, 8))
0x03
>>> BvAdd(Constant(1, 8), 2)
0x03
>>> Constant(1, 8) + 2
0x03
>>> Variable("x", 8) + Variable("y", 8)
x + y
"""
arity = [2, 0]
is_symmetric = True
is_simple = True
infix_symbol = "+"
@classmethod
def condition(cls, x, y):
return x.width == y.width
@classmethod
def output_width(cls, x, y):
return x.width
@classmethod
def eval(cls, x, y):
def doit(x, y, width):
"""Modular addition when both operands are integers."""
return (x + y) % (2 ** width)
if isinstance(x, core.Constant) and isinstance(y, core.Constant):
return core.Constant(doit(int(x), int(y), x.width), x.width)
zero = core.Constant(0, x.width)
if x == zero:
return y
elif y == zero:
return x
elif x == BvNeg(y):
return zero
elif isinstance(x, BvSub): # (x0 - x1=y) + y
if x.args[1] == y:
return x.args[0]
elif isinstance(y, BvSub): # x + (y0 - y1=x)
if y.args[1] == x:
return y.args[0]
elif isinstance(x, BvNeg): # (-x) + y = y - x
return y - x.args[0]
elif isinstance(y, BvNeg): # x + (-y) = x - y
return x - y.args[0]
def _simplify(self, *args, **kwargs):
# simplify if x and BvNeg(x) appear in a flattened addition
compatible_terms = [
lambda x: BvNeg(x)
]
return self._binary_symmetric_simplification(compatible_terms)
class BvSub(PrimaryOperation):
"""Modular subtraction operation.
It overrides the operator - and provides Automatic Constant Conversion.
See `PrimaryOperation` for more information.
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.operation import BvSub
>>> BvSub(Constant(1, 8), Constant(2, 8))
0xff
>>> BvSub(Constant(1, 8), 2)
0xff
>>> Constant(1, 8) - 2
0xff
>>> Variable("x", 8) - Variable("y", 8)
x - y
"""
arity = [2, 0]
is_symmetric = False
is_simple = True
infix_symbol = "-"
@classmethod
def condition(cls, x, y):
return x.width == y.width
@classmethod
def output_width(cls, x, y):
return x.width
@classmethod
def eval(cls, x, y):
def doit(x, y, width):
"""Modular subtraction when both operands are integers."""
return (x - y) % (2 ** width)
if isinstance(x, core.Constant) and isinstance(y, core.Constant):
return core.Constant(doit(int(x), int(y), x.width), x.width)
zero = core.Constant(0, x.width)
if x == zero:
return BvNeg(y)
elif y == zero:
return x
elif x == y:
return zero
elif isinstance(x, BvAdd): # (x0 + x1) - y, y in [x0, x1]
if x.args[0] == y:
return x.args[1]
elif x.args[1] == y:
return x.args[0]
elif isinstance(y, BvAdd): # x - (y0 + y1), x in [y0, y1]
if y.args[0] == x:
return BvNeg(y.args[1])
elif y.args[1] == x:
return BvNeg(y.args[0])
elif isinstance(y, BvNeg): # x - (-y) = x + y
return x + y.args[0]
class BvMul(PrimaryOperation):
"""Modular multiplication operation.
It overrides the operator * and provides Automatic Constant Conversion.
See `PrimaryOperation` for more information.
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.operation import BvMul
>>> BvMul(Constant(4, 8), Constant(3, 8))
0x0c
>>> BvMul(Constant(4, 8), 3)
0x0c
>>> Constant(4, 8) * 3
0x0c
>>> Variable("x", 8) * Variable("y", 8)
x * y
"""
arity = [2, 0]
is_symmetric = True
is_simple = True
infix_symbol = "*"
@classmethod
def condition(cls, x, y):
return x.width == y.width
@classmethod
def output_width(cls, x, y):
return x.width
@classmethod
def eval(cls, x, y):
def doit(x, y, width):
"""Modular multiplication when both operands are int."""
return (x * y) % (2 ** width)
if isinstance(x, core.Constant) and isinstance(y, core.Constant):
return core.Constant(doit(int(x), int(y), x.width), x.width)
zero = core.Constant(0, x.width)
one = core.Constant(1, x.width)
if x == zero or y == zero:
return zero
elif x == one:
return y
elif y == one:
return x
class BvUdiv(PrimaryOperation):
"""Unsigned and truncated division operation.
It overrides the operator / and provides Automatic Constant Conversion.
See `PrimaryOperation` for more information.
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.operation import BvUdiv
>>> BvUdiv(Constant(0x0c, 8), Constant(3, 8))
0x04
>>> BvUdiv(Constant(0x0c, 8), 3)
0x04
>>> Constant(0x0c, 8) / 3
0x04
>>> Variable("x", 8) / Variable("y", 8)
x / y
"""
arity = [2, 0]
is_symmetric = False
is_simple = True
infix_symbol = "/"
@classmethod
def condition(cls, x, y):
return x.width == y.width
@classmethod
def output_width(cls, x, y):
return x.width
@classmethod
def eval(cls, x, y):
def doit(x, y):
"""Division operation (truncated) when both operands are int."""
assert y != 0
return x // y
zero = core.Constant(0, x.width)
one = core.Constant(1, x.width)
assert y != zero
if isinstance(x, core.Constant) and isinstance(y, core.Constant):
return core.Constant(doit(int(x), int(y)), x.width)
elif x == y:
return one
elif x == zero:
return zero
elif y == one:
return x
class BvUrem(PrimaryOperation):
"""Unsigned remainder (modulus) operation.
Usage:
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.operation import BvUrem
>>> BvUrem(Constant(0x0d, 8), Constant(3, 8))
0x01
>>> BvUrem(Constant(0x0d, 8), 3)
0x01
>>> Constant(0x0d, 8) % 3
0x01
>>> Variable("x", 8) % Variable("y", 8)
x % y
"""
arity = [2, 0]
is_symmetric = False
is_simple = True
infix_symbol = "%"
@classmethod
def condition(cls, x, y):
return x.width == y.width
@classmethod
def output_width(cls, x, y):
return x.width
@classmethod
def eval(cls, x, y):
def doit(x, y):
"""Remainder operation when both operands are int."""
assert y != 0
return x % y
zero = core.Constant(0, x.width)
one = core.Constant(1, x.width)
assert y != zero
if isinstance(x, core.Constant) and isinstance(y, core.Constant):
return core.Constant(doit(int(x), int(y)), x.width)
elif x == y or x == zero or y == one:
return zero
class BvIdentity(PrimaryOperation):
"""The identity operation.
Return the same value when the input is constant and
a `BvIdentity` object when the input is symbolic:
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.operation import BvIdentity
>>> BvIdentity(Constant(0x1, 4))
0x1
>>> BvIdentity(Variable("x", 8))
Id(x)
"""
| |
<filename>common/xrd-ui-tests-python/tests/xroad_ss_backup_configuration/ss_management.py<gh_stars>1-10
# coding=utf-8
from selenium.webdriver.common.by import By
from view_models import sidebar, popups, ss_system_parameters, messages, log_constants, keys_and_certificates_table
from helpers import auditchecker, ssh_client, xroad
import time
import os
from shutil import copyfile
def test_ss_backup_conf(case, ssh_host=None, ssh_username=None, ssh_password=None):
'''
UC SS_14: Back Up Configuration
:param case: MainController object
:param ssh_host: str|None - if set, Central Server SSH host for checking the audit.log; if None, no log check
:param ssh_username: str|None SSH username
:param ssh_password: str|None SSH password
:return:
'''
self = case
def backup_conf():
self.logdata = []
if ssh_host is not None:
log_checker = auditchecker.AuditChecker(host=ssh_host, username=ssh_username, password=<PASSWORD>)
current_log_lines = log_checker.get_line_count()
'''Add "Back up configuration failed" to comparision variable'''
self.logdata.append(log_constants.SS_BACKUP_CONFIGURATION_FAILED)
'''Write file to ss2 to cause error message'''
create_empty_file = 'sh -c "echo \'\' > {0}"'.format(ss_system_parameters.BACKUP_CORRUPTION_FILE)
self.log('Connecting ssh client')
self.ssh_client = ssh_client.SSHClient(host=ssh_host, username=ssh_username, password=<PASSWORD>)
self.ssh_client.exec_command(command=create_empty_file, sudo=True)
change_file_permissions = 'chmod o-r {0}'.format(ss_system_parameters.BACKUP_CORRUPTION_FILE)
self.ssh_client.exec_command(command=change_file_permissions, sudo=True)
'''Click "Back Up and Restore" button'''
self.wait_until_visible(type=By.CSS_SELECTOR, element=sidebar.BACKUP_AND_RESTORE_BTN_CSS).click()
self.wait_jquery()
'''Click on "Back up configuration" button'''
self.log('UC SS_14 1.SS administrator selects to back up the security server configuration.')
self.wait_until_visible(self.by_id(ss_system_parameters.BACKUP_CONFIGURATION_BUTTON_ID)).click()
self.wait_jquery()
self.log('System displays the error message.')
'''Save message error message'''
error_message = self.wait_until_visible(type=By.CSS_SELECTOR, element=messages.ERROR_MESSAGE_CSS).text
self.log('UC SS_14 3a.1. System displays the error message “Error making configuration backup, script exited with status code X and the output of the backup script.')
'''Verify error message'''
self.is_true(error_message == messages.SS_CONFIGURATION_BACKUP_ERROR,
msg='Wrong error message')
'''Remove empty file for making successful test'''
remove_file = 'rm {0}'.format(ss_system_parameters.BACKUP_CORRUPTION_FILE)
self.ssh_client.exec_command(command=remove_file, sudo=True)
'''Close Backup Script backup popup'''
popups.close_console_output_dialog(self)
'''Check audit log'''
if ssh_host is not None:
# Check logs for entries
self.log(
'UC SS_14 3a.2 Backing up the security server configuration failed.')
logs_found = log_checker.check_log(self.logdata, from_line=current_log_lines + 1)
self.is_true(logs_found,
msg='Some log entries were missing. Expected: "{0}", found: "{1}"'.format(self.logdata,
log_checker.found_lines))
'''Reload page and wait until additional data is loaded using jQuery'''
self.driver.refresh()
self.wait_jquery()
'''Click "Delete"'''
self.by_xpath(ss_system_parameters.DELETE).click()
'''Confirm delete'''
popups.confirm_dialog_click(self)
'''Save delete message'''
success_deletion = messages.get_notice_message(self)
'''Verify successful delete'''
self.is_true(success_deletion == messages.SS_SUCCESSFUL_DELETE,
msg='Wrong message for deleted backup')
self.logdata = []
if ssh_host is not None:
log_checker = auditchecker.AuditChecker(host=ssh_host, username=ssh_username, password=ssh_password)
current_log_lines = log_checker.get_line_count()
'''Click "Back Up and Restore" button'''
self.wait_until_visible(type=By.CSS_SELECTOR, element=sidebar.BACKUP_AND_RESTORE_BTN_CSS).click()
self.wait_jquery()
self.log('UC SS_14 1.SS administrator selects to back up the security server configuration.')
'''Click on "Back up configuration" button'''
self.wait_until_visible(self.by_id(ss_system_parameters.BACKUP_CONFIGURATION_BUTTON_ID)).click()
self.wait_jquery()
'''Save message "Configuration backup created"'''
success_notice = messages.get_notice_message(self)
'''Verify correct error message'''
self.is_true(error_message == messages.SS_CONFIGURATION_BACKUP_ERROR,
msg='Wrong error message')
self.logdata.append(log_constants.SS_BACKUP_CONFIGURATION)
'''Save "Backup Script Output" message'''
backup_script_output = messages.get_console_output(self)
self.log('UC SS_14 2a.System runs the backup script that creates a dump file of the database to the location /var/lib/xroad/dbdump.dat, that contains the contents of the security server database')
'''Verify dump file location /var/lib/xroad/dbdump.dat '''
self.is_true(ss_system_parameters.BACKUP_DUMP_LOCATION in backup_script_output,
msg='Dump /var/lib/xroad/dbdump.dat not found')
self.log('UC SS_14 2b.System runs the backup script that creates the backup file containing the database dump file')
'''Verify dump file location /etc/xroad/ '''
self.is_true(ss_system_parameters.BACKUP_DUMP_DIR1 in backup_script_output,
msg='Dump directory /etc/xroad/ not found')
'''Verify dump file location /etc/nginx/sites-enabled/ '''
self.is_true(ss_system_parameters.BACKUP_DUMP_DIR2 in backup_script_output,
msg='Dump directory /etc/nginx/sites-enabled/ not found')
'''Verify dump file label information '''
self.is_true(ss_system_parameters.BACKUP_DUMP_VER in backup_script_output,
msg='Dump contains different label information')
self.log('UC SS_14 2c.System runs the backup script that saves the created backup file to /var/lib/xroad/backup.')
'''Verify created backup '''
self.is_true(ss_system_parameters.BACKUP_CREATED in backup_script_output,
msg='Dump contains different label information')
self.log('UC SS_14 3.System displays the message “Configuration backup created” and the backup script output to the SS administrator.')
'''Verify "Configuration backup created"'''
self.is_true(success_notice == log_constants.SS_BACKUP_SCRIPT_CREATED,
msg='Wrong error message')
if ssh_host is not None:
# Check logs for entries
self.log(
'UC SS_14 4.System logs the event “Back up configuration” to the audit log.')
logs_found = log_checker.check_log(self.logdata, from_line=current_log_lines + 1)
self.is_true(logs_found,
msg='Some log entries were missing. Expected: "{0}", found: "{1}"'.format(self.logdata,
log_checker.found_lines))
'''Close Backup Script backup popup'''
popups.close_console_output_dialog(self)
return backup_conf
def test_ss_backup_download(case):
'''
UC SS 16 Try to download backup file and verify it form local location
:param case: MainController object
:return:
'''
self = case
def backup_conf():
'''Click "Back Up and Restore" button" '''
self.log('Click "Back Up and Restore" button"')
self.wait_until_visible(type=By.CSS_SELECTOR, element=sidebar.BACKUP_AND_RESTORE_BTN_CSS).click()
self.wait_jquery()
'''Name of backup file'''
backup_conf_file_name = self.by_css(ss_system_parameters.BACKUP_FILE_NAME).text
self.log('SS_16: 1. SS administrator selects to download a backup file.')
self.log('SS_16: 2. System prompts the file for downloading..')
'''Click "Download"'''
self.log('Click "Download"')
self.by_xpath(ss_system_parameters.DOWNLOAD).click()
self.wait_jquery()
'''Wait for donwload to be completed'''
time.sleep(7)
'''Downloaded backup file location'''
local_path = self.get_download_path(backup_conf_file_name)
self.log('SS_16 3. SS administrator saves the file to the local file system.')
'''Verify that backup file is downloaded'''
self.log('Verify that backup file is downloaded')
if not os.path.isfile(local_path):
raise RuntimeWarning('Backup file not found: {0}'.format(local_path))
return backup_conf
def test_ss_backup_delete(case, ssh_host=None, ssh_username=None, ssh_password=None):
'''
UC SS 17 Try to delete backup file and verify deletion from audit log
:param case: MainController object
:param ssh_host: str|None - if set, Central Server SSH host for checking the audit.log; if None, no log check
:param ssh_username: str|None SSH username
:param ssh_password: str|None SSH password
:return:
'''
self = case
def backup_delete():
self.logdata = []
if ssh_host is not None:
log_checker = auditchecker.AuditChecker(host=ssh_host, username=ssh_username, password=ssh_password)
current_log_lines = log_checker.get_line_count()
'''Click "Back Up and Restore" button" '''
self.log('Click "Back Up and Restore" button"')
self.wait_until_visible(type=By.CSS_SELECTOR, element=sidebar.BACKUP_AND_RESTORE_BTN_CSS).click()
self.wait_jquery()
'''Name of backup file'''
backup_conf_file_name = self.by_css(ss_system_parameters.BACKUP_FILE_NAME).text
self.log('UC SS_17 1.SS administrator selects to delete a backup file.')
'''Click "Delete"'''
self.log('Click "Delete"')
self.by_xpath(ss_system_parameters.DELETE).click()
self.wait_jquery()
self.log('UC SS_17 2. System prompts for confirmation.')
self.log('UC SS_17 3.a. SS administrator cancels the deleting of the backup file.')
'''Confirm delete, click cancel button on popup'''
self.by_xpath(popups.CONFIRM_POPUP_CANCEL_BTN_XPATH).click()
delete_backup_conf(self)
'''Check audit log'''
if ssh_host is not None:
# Check logs for entries
self.log(
'UC SS_17 5. System logs the event “Delete backup file” to the audit log.')
logs_found = log_checker.check_log(self.logdata, from_line=current_log_lines + 1)
self.is_true(logs_found,
msg='Some log entries were missing. Expected: "{0}", found: "{1}"'.format(self.logdata,
log_checker.found_lines))
else:
raise RuntimeError('Not able to check audit log!')
return backup_delete
def test_ss_upload_backup(case, ssh_host=None, ssh_username=None, ssh_password=None):
'''
UC SS 18 Try to upload backup file and verify successful upload from audit log
:param case: MainController object
:param ssh_host: str|None - if set, Central Server SSH host for checking the audit.log; if None, no log check
:param ssh_username: str|None SSH username
:param ssh_password: str|None SSH password
:return:
'''
self = case
def backup_conf():
self.logdata = []
if ssh_host is not None:
log_checker = auditchecker.AuditChecker(host=ssh_host, username=ssh_username, password=ssh_password)
current_log_lines = log_checker.get_line_count()
'''Click "Back Up and Restore" button'''
self.wait_until_visible(type=By.CSS_SELECTOR, element=sidebar.BACKUP_AND_RESTORE_BTN_CSS).click()
self.wait_jquery()
'''Name of backup file'''
backup_conf_file_name = self.by_css(ss_system_parameters.BACKUP_FILE_NAME).text
'''Click "Download"'''
self.log('Click "Download"')
self.by_xpath(ss_system_parameters.DOWNLOAD).click()
self.wait_jquery()
'''Wait for donwload to be completed'''
time.sleep(7)
'''Path of downloaded file'''
local_path = self.get_download_path(backup_conf_file_name)
'''Verify that backup file is downloaded'''
self.log('Verify that backup file is downloaded')
if not os.path.isfile(local_path):
raise RuntimeWarning('Backup file not found: {0}'.format(local_path))
'''Create invalid character path'''
invalid_char_dst = self.download_dir + '\\' + ss_system_parameters.INVALID_CHARACTER_FILE
'''Create copy of real backup file and name it as invalid character file'''
copyfile(local_path, invalid_char_dst)
'''Path of invalid extension file'''
invalid_extension_dst = os.path.join(self.download_dir + '\\' + ss_system_parameters.EMPTY_ZIP_FILE)
# os.rename(local_path, invalid_extension_dst)
copyfile(local_path, invalid_extension_dst)
open(self.download_dir + '/' "test.tar", "w+")
'''Path of invalid .tar file'''
invalid_format = self.get_download_path(ss_system_parameters.EMPTY_TAR_FILE)
check_inputs(self, invalid_char_dst, invalid_extension_dst, invalid_format)
successful_reupload(self, local_path, backup_conf_file_name)
successful_upload(self, local_path, backup_conf_file_name)
if ssh_host is not None:
# Check logs for entries
self.log(
'UC SS_18 8.System logs the event “Upload backup file” to the audit log.')
logs_found = log_checker.check_log(self.logdata, from_line=current_log_lines + 1)
self.is_true(logs_found,
msg='Some log entries were missing. Expected: "{0}", found: "{1}"'.format(self.logdata,
log_checker.found_lines))
return backup_conf
def test_view_list_backup_files(case):
"""
SS 13 View the List of Configuration Backup Files
:param case: MainController object
:return:
"""
self = case
def test_case():
'''UC SS_13 step 1. SS administrator selects to view the list of configuration backup files.'''
self.log('''UC SS_13 step 1. SS administrator selects to view the list of configuration backup files.''')
self.wait_until_visible(type=By.CSS_SELECTOR, element=sidebar.BACKUP_AND_RESTORE_BTN_CSS).click()
self.wait_jquery()
'''UC SS_13 step 2. System displays the list of backup files. For each file, the following information
is displayed:'''
self.log('''UC SS_13 step 2. System displays the list of backup files. For each file, the following information
is displayed:''')
'''Find all backup file names from the table'''
list_of_backup_files = self.wait_until_visible(type=By.XPATH,
element=ss_system_parameters.BACKUP_FILE_NAME_ROW, multiple=True)
'''UC SS_13 2.1 The file name of the backup file'''
'''Loop through backup file names'''
backup_file_name_displayed = False
for file_name in list_of_backup_files:
file_name = file_name.text
'''Verify that backup file name is | |
WLOK * WLOK
GB = 9.784 * (1.0 - 0.0026 * math.cos(PHI + PHI) - 0.00000028 * HMOK)
if OPTIC:
A = (287.604 + (1.6288 + 0.0136 / WLSQ) / WLSQ) * 273.15e-6 / 1013.25
else:
A = 77.624e-6
GAMAL = (GB * DMD) / GCR
GAMMA = GAMAL / ALPHA
GAMM2 = GAMMA - 2.0
DELM2 = DELTA - 2.0
TDC = TDKOK - 273.15
PSAT = 10.0**((0.7859 + 0.03477 * TDC) / (1.0 + 0.00412 * TDC))
PSAT *= (1.0 + PMBOK * (4.5e-6 + 6e-10 * TDC * TDC))
if (PMBOK > 0.0):
PWO = RHOK * PSAT / (1.0 - (1.0 - RHOK) * PSAT / PMBOK)
else:
PWO = 0.0
W = PWO * (1.0 - DMW / DMD) * GAMMA / (DELTA - GAMMA)
C1 = A * (PMBOK + W) / TDKOK
if OPTIC:
C2 = (A * W + 11.2684e-6 * PWO) / TDKOK
else:
C2 = (A * W + 12.92e-6 * PWO) / TDKOK
C3 = (GAMMA - 1.0) * ALPHA * C1 / TDKOK
C4 = (DELTA - 1.0) * ALPHA * C2 / TDKOK
if OPTIC:
C5 = 0.0
C6 = 0.0
else:
C5 = 371897e-6 * PWO / TDKOK
C6 = C5 * DELM2 * ALPHA / (TDKOK * TDKOK)
# Conditions at the observer.
R0 = S + HMOK
TEMPO, DN0, RDNDR0 = atmt(R0, TDKOK, ALPHA, GAMM2, DELM2,
C1, C2, C3, C4, C5, C6, R0)
SK0 = DN0 * R0 * math.sin(ZOBS2)
F0 = refi(R0, DN0, RDNDR0)
# Conditions in the troposphere at the tropopause.
RT = S + HT
TT, DNT, RDNDRT = atmt(R0, TDKOK, ALPHA, GAMM2, DELM2,
C1, C2, C3, C4, C5, C6, RT)
SINE = SK0 / (RT * DNT)
ZT = math.atan2(SINE, math.sqrt(max(1.0 - SINE * SINE, 0.0)))
FT = refi(RT, DNT, RDNDRT)
# Conditions in the stratosphere at the tropopause.
DNTS, RDNDRP = atms(RT, TT, DNT, GAMAL, RT)
SINE = SK0 / (RT * DNTS)
ZTS = math.atan2(SINE, math.sqrt(max(1.0 - SINE * SINE,0.0)))
FTS = refi(RT, DNTS, RDNDRP)
# Conditions at the stratosphere limit.
RS = S + HS
DNS, RDNDRS = atms(RT, TT, DNT, GAMAL, RS)
SINE = SK0 / (RS * DNS)
ZS = math.atan2(SINE, math.sqrt(max(1.0 - SINE * SINE, 0.0)))
FS = refi(RS, DNS, RDNDRS)
# Integrate the refraction integral in two parts; first in the
# troposphere (K=1), then in the stratosphere (K=2).
# Initialize previous refraction to ensure at least two iterations.
REFOLD = 1.0e6
# Start off with 8 strips for the troposphere integration, and then
# use the final troposphere value for the stratosphere integration,
# which tends to need more strips.
IS = 8
# Troposphere then stratosphere.
for K in [1,2]:
# Start Z, Z range, and start and end values.
if K == 1:
Z0 = ZOBS2
ZRANGE = ZT - Z0
FB = F0
FF = FT
else:
Z0 = ZTS
ZRANGE = ZS - Z0
FB = FTS
FF = FS
# Sums of odd and even values.
FO = 0.0
FE = 0.0
# First time through the loop we have to do every point.
N = 1
# Start of iteration loop (terminates at specified precision).
LOOP = True
while LOOP:
# Strip width.
H = ZRANGE / float(IS)
# Initialize distance from Earth centre for quadrature pass.
if K == 1:
R = R0
else:
R = RT
# One pass (no need to compute evens after first time).
for I in range(1, IS, N):
# Sine of observed zenith distance.
SZ = math.sin(Z0 + H * float(I))
# Find R (to the nearest metre, maximum four iterations).
if SZ > 1e-20:
W = SK0 / SZ
RG = R
DR = 1e6
J = 0
while (abs(DR) > 1.0) and (J < 4):
J = J + 1
if K == 1:
TG, DN, RDNDR = atmt(R0, TDKOK, ALPHA, GAMM2, DELM2,
C1, C2, C3, C4, C5, C6, RG)
else:
DN, RDNDR = atms(RT, TT, DNT, GAMAL, RG)
DR = (RG * DN - W) / (DN + RDNDR)
RG = RG - DR
R = RG
# Find the refractive index and integrand at R.
if K == 1:
T, DN, RDNDR = atmt(R0, TDKOK, ALPHA, GAMM2, DELM2,
C1, C2, C3, C4, C5, C6, R)
else:
DN,RDNDR = atms(RT, TT, DNT, GAMAL, R)
F = refi(R, DN, RDNDR)
# Accumulate odd and (first time only) even values.
if (N == 1) and ((I % 2) == 0):
FE = FE + F
else:
FO = FO + F
# Evaluate the integrand using Simpson's Rule.
REFP = H * (FB + 4.0 * FO + 2.0 * FE + FF) / 3.0
# Has the required precision been achieved?
if (abs(REFP - REFOLD) > TOL):
# No: prepare for next iteration.
# Save current value for convergence test.
REFOLD = REFP
# Double the number of strips.
IS = IS + IS
# Sum of all current values = sum of next pass's even values.
FE = FE + FO
# Prepare for new odd values.
FO = 0.0
# Skip even values next time.
N = 2
else:
# Yes: save troposphere component and terminate the loop.
if (K == 1):
REFT = REFP
LOOP = False
# END IF
# END FOR
# END WHILE
# Result.
REF = REFT + REFP
if (ZOBS1 < 0.0):
REF = -REF
return REF
def atmt(R0, T0, ALPHA, GAMM2, DELM2, C1, C2, C3, C4, C5, C6, R):
"""
Internal routine used by REFRO
Refractive index and derivative with respect to height for the
troposphere.
Given:
R0 d height of observer from centre of the Earth (metre)
T0 d temperature at the observer (deg K)
ALPHA d alpha )
GAMM2 d gamma minus 2 ) see HMNAO paper
DELM2 d delta minus 2 )
C1 d useful term )
C2 d useful term )
C3 d useful term ) see source
C4 d useful term ) of slRFRO
C5 d useful term )
C6 d useful term )
R d current distance from the centre of the Earth (metre)
Returned:
T d temperature at R (deg K)
DN d refractive index at R
RDNDR d R rate the refractive index is changing at R
Note that in the optical case C5 and C6 are zero.
P.T.Wallace Starlink 30 May 1997
Copyright (C) 1997 Rutherford Appleton Laboratory
Copyright (C) 1995 Association of Universities for Research in Astronomy Inc.
"""
T = max(min(T0 - ALPHA * (R - R0), 320.0), 100.0)
TT0 = T / T0
TT0GM2 = TT0**GAMM2
TT0DM2 = TT0**DELM2
DN = 1.0 + (C1 * TT0GM2 - (C2 - C5 / T) * TT0DM2) * TT0
RDNDR = R * (-C3 * TT0GM2 + (C4 - C6 / TT0) * TT0DM2)
return T, DN, RDNDR
def atms(RT, TT, DNT, GAMAL, R):
"""
Internal routine used by REFRO
Refractive index and derivative with respect to height for the
stratosphere.
Given:
RT d height of tropopause from centre of the Earth (metre)
TT d temperature at the tropopause (deg K)
DNT d refractive index at the tropopause
GAMAL d constant of the atmospheric model = G MD/R
R d current distance from the centre of the Earth (metre)
Returned:
DN d refractive index at R
RDNDR d R rate the refractive index is changing at R
P.T.Wallace Starlink 14 July 1995
Copyright (C) 1995 Rutherford Appleton Laboratory
Copyright (C) 1995 Association of Universities for Research in Astronomy Inc.
"""
B = GAMAL / TT
W = (DNT - 1.0) * math.exp(-B * (R - RT))
DN = 1.0 + W
RDNDR = -R | |
beta-test code")
self.native = mavnative.NativeConnection(MAVLink_message, mavlink_map)
else:
self.native = None
if native_testing:
self.test_buf = bytearray()
self.mav20_unpacker = struct.Struct('<cBBBBBBHB')
self.mav10_unpacker = struct.Struct('<cBBBBB')
self.mav20_h3_unpacker = struct.Struct('BBB')
self.mav_csum_unpacker = struct.Struct('<H')
self.mav_sign_unpacker = struct.Struct('<IH')
def set_callback(self, callback, *args, **kwargs):
self.callback = callback
self.callback_args = args
self.callback_kwargs = kwargs
def set_send_callback(self, callback, *args, **kwargs):
self.send_callback = callback
self.send_callback_args = args
self.send_callback_kwargs = kwargs
def send(self, mavmsg, force_mavlink1=False):
'''send a MAVLink message'''
buf = mavmsg.pack(self, force_mavlink1=force_mavlink1)
self.file.write(buf)
self.seq = (self.seq + 1) % 256
self.total_packets_sent += 1
self.total_bytes_sent += len(buf)
if self.send_callback:
self.send_callback(mavmsg, *self.send_callback_args, **self.send_callback_kwargs)
def buf_len(self):
return len(self.buf) - self.buf_index
def bytes_needed(self):
'''return number of bytes needed for next parsing stage'''
if self.native:
ret = self.native.expected_length - self.buf_len()
else:
ret = self.expected_length - self.buf_len()
if ret <= 0:
return 1
return ret
def __parse_char_native(self, c):
'''this method exists only to see in profiling results'''
m = self.native.parse_chars(c)
return m
def __callbacks(self, msg):
'''this method exists only to make profiling results easier to read'''
if self.callback:
self.callback(msg, *self.callback_args, **self.callback_kwargs)
def parse_char(self, c):
'''input some data bytes, possibly returning a new message'''
self.buf.extend(c)
self.total_bytes_received += len(c)
if self.native:
if native_testing:
self.test_buf.extend(c)
m = self.__parse_char_native(self.test_buf)
m2 = self.__parse_char_legacy()
if m2 != m:
print("Native: %s\nLegacy: %s\n" % (m, m2))
raise Exception('Native vs. Legacy mismatch')
else:
m = self.__parse_char_native(self.buf)
else:
m = self.__parse_char_legacy()
if m is not None:
self.total_packets_received += 1
self.__callbacks(m)
else:
# XXX The idea here is if we've read something and there's nothing left in
# the buffer, reset it to 0 which frees the memory
if self.buf_len() == 0 and self.buf_index != 0:
self.buf = bytearray()
self.buf_index = 0
return m
def __parse_char_legacy(self):
'''input some data bytes, possibly returning a new message (uses no native code)'''
header_len = HEADER_LEN_V1
if self.buf_len() >= 1 and self.buf[self.buf_index] == PROTOCOL_MARKER_V2:
header_len = HEADER_LEN_V2
if self.buf_len() >= 1 and self.buf[self.buf_index] != PROTOCOL_MARKER_V1 and self.buf[self.buf_index] != PROTOCOL_MARKER_V2:
magic = self.buf[self.buf_index]
self.buf_index += 1
if self.robust_parsing:
m = MAVLink_bad_data(bytearray([magic]), 'Bad prefix')
self.expected_length = header_len+2
self.total_receive_errors += 1
return m
if self.have_prefix_error:
return None
self.have_prefix_error = True
self.total_receive_errors += 1
raise MAVError("invalid MAVLink prefix '%s'" % magic)
self.have_prefix_error = False
if self.buf_len() >= 3:
sbuf = self.buf[self.buf_index:3+self.buf_index]
if sys.version_info.major < 3:
sbuf = str(sbuf)
(magic, self.expected_length, incompat_flags) = self.mav20_h3_unpacker.unpack(sbuf)
if magic == PROTOCOL_MARKER_V2 and (incompat_flags & MAVLINK_IFLAG_SIGNED):
self.expected_length += MAVLINK_SIGNATURE_BLOCK_LEN
self.expected_length += header_len + 2
if self.expected_length >= (header_len+2) and self.buf_len() >= self.expected_length:
mbuf = array.array('B', self.buf[self.buf_index:self.buf_index+self.expected_length])
self.buf_index += self.expected_length
self.expected_length = header_len+2
if self.robust_parsing:
try:
if magic == PROTOCOL_MARKER_V2 and (incompat_flags & ~MAVLINK_IFLAG_SIGNED) != 0:
raise MAVError('invalid incompat_flags 0x%x 0x%x %u' % (incompat_flags, magic, self.expected_length))
m = self.decode(mbuf)
except MAVError as reason:
m = MAVLink_bad_data(mbuf, reason.message)
self.total_receive_errors += 1
else:
if magic == PROTOCOL_MARKER_V2 and (incompat_flags & ~MAVLINK_IFLAG_SIGNED) != 0:
raise MAVError('invalid incompat_flags 0x%x 0x%x %u' % (incompat_flags, magic, self.expected_length))
m = self.decode(mbuf)
return m
return None
def parse_buffer(self, s):
'''input some data bytes, possibly returning a list of new messages'''
m = self.parse_char(s)
if m is None:
return None
ret = [m]
while True:
m = self.parse_char("")
if m is None:
return ret
ret.append(m)
return ret
def check_signature(self, msgbuf, srcSystem, srcComponent):
'''check signature on incoming message'''
if isinstance(msgbuf, array.array):
msgbuf = msgbuf.tostring()
timestamp_buf = msgbuf[-12:-6]
link_id = msgbuf[-13]
(tlow, thigh) = self.mav_sign_unpacker.unpack(timestamp_buf)
timestamp = tlow + (thigh<<32)
# see if the timestamp is acceptable
stream_key = (link_id,srcSystem,srcComponent)
if stream_key in self.signing.stream_timestamps:
if timestamp <= self.signing.stream_timestamps[stream_key]:
# reject old timestamp
# print('old timestamp')
return False
else:
# a new stream has appeared. Accept the timestamp if it is at most
# one minute behind our current timestamp
if timestamp + 6000*1000 < self.signing.timestamp:
# print('bad new stream ', timestamp/(100.0*1000*60*60*24*365), self.signing.timestamp/(100.0*1000*60*60*24*365))
return False
self.signing.stream_timestamps[stream_key] = timestamp
# print('new stream')
h = hashlib.new('sha256')
h.update(self.signing.secret_key)
h.update(msgbuf[:-6])
sig1 = str(h.digest())[:6]
sig2 = str(msgbuf)[-6:]
if sig1 != sig2:
# print('sig mismatch')
return False
# the timestamp we next send with is the max of the received timestamp and
# our current timestamp
self.signing.timestamp = max(self.signing.timestamp, timestamp)
return True
def decode(self, msgbuf):
'''decode a buffer as a MAVLink message'''
# decode the header
if msgbuf[0] != PROTOCOL_MARKER_V1:
headerlen = 10
try:
magic, mlen, incompat_flags, compat_flags, seq, srcSystem, srcComponent, msgIdlow, msgIdhigh = self.mav20_unpacker.unpack(msgbuf[:headerlen])
except struct.error as emsg:
raise MAVError('Unable to unpack MAVLink header: %s' % emsg)
msgId = msgIdlow | (msgIdhigh<<16)
mapkey = msgId
else:
headerlen = 6
try:
magic, mlen, seq, srcSystem, srcComponent, msgId = self.mav10_unpacker.unpack(msgbuf[:headerlen])
incompat_flags = 0
compat_flags = 0
except struct.error as emsg:
raise MAVError('Unable to unpack MAVLink header: %s' % emsg)
mapkey = msgId
if (incompat_flags & MAVLINK_IFLAG_SIGNED) != 0:
signature_len = MAVLINK_SIGNATURE_BLOCK_LEN
else:
signature_len = 0
if ord(magic) != PROTOCOL_MARKER_V1 and ord(magic) != PROTOCOL_MARKER_V2:
raise MAVError("invalid MAVLink prefix '%s'" % magic)
if mlen != len(msgbuf)-(headerlen+2+signature_len):
raise MAVError('invalid MAVLink message length. Got %u expected %u, msgId=%u headerlen=%u' % (len(msgbuf)-(headerlen+2+signature_len), mlen, msgId, headerlen))
if not mapkey in mavlink_map:
raise MAVError('unknown MAVLink message ID %s' % str(mapkey))
# decode the payload
type = mavlink_map[mapkey]
fmt = type.format
order_map = type.orders
len_map = type.lengths
crc_extra = type.crc_extra
# decode the checksum
try:
crc, = self.mav_csum_unpacker.unpack(msgbuf[-(2+signature_len):][:2])
except struct.error as emsg:
raise MAVError('Unable to unpack MAVLink CRC: %s' % emsg)
crcbuf = msgbuf[1:-(2+signature_len)]
if True: # using CRC extra
crcbuf.append(crc_extra)
crc2 = x25crc(crcbuf)
if crc != crc2.crc:
raise MAVError('invalid MAVLink CRC in msgID %u 0x%04x should be 0x%04x' % (msgId, crc, crc2.crc))
sig_ok = False
if signature_len == MAVLINK_SIGNATURE_BLOCK_LEN:
self.signing.sig_count += 1
if self.signing.secret_key is not None:
accept_signature = False
if signature_len == MAVLINK_SIGNATURE_BLOCK_LEN:
sig_ok = self.check_signature(msgbuf, srcSystem, srcComponent)
accept_signature = sig_ok
if sig_ok:
self.signing.goodsig_count += 1
else:
self.signing.badsig_count += 1
if not accept_signature and self.signing.allow_unsigned_callback is not None:
accept_signature = self.signing.allow_unsigned_callback(self, msgId)
if accept_signature:
self.signing.unsigned_count += 1
else:
self.signing.reject_count += 1
elif self.signing.allow_unsigned_callback is not None:
accept_signature = self.signing.allow_unsigned_callback(self, msgId)
if accept_signature:
self.signing.unsigned_count += 1
else:
self.signing.reject_count += 1
if not accept_signature:
raise MAVError('Invalid signature')
csize = type.unpacker.size
mbuf = msgbuf[headerlen:-(2+signature_len)]
if len(mbuf) < csize:
# zero pad to give right size
mbuf.extend([0]*(csize - len(mbuf)))
if len(mbuf) < csize:
raise MAVError('Bad message of type %s length %u needs %s' % (
type, len(mbuf), csize))
mbuf = mbuf[:csize]
try:
t = type.unpacker.unpack(mbuf)
except struct.error as emsg:
raise MAVError('Unable to unpack MAVLink payload type=%s fmt=%s payloadLength=%u: %s' % (
type, fmt, len(mbuf), emsg))
tlist = list(t)
# handle sorted fields
if True:
t = tlist[:]
if sum(len_map) == len(len_map):
# message has no arrays in it
for i in range(0, len(tlist)):
tlist[i] = t[order_map[i]]
else:
# message has some arrays
tlist = []
for i in range(0, len(order_map)):
order = order_map[i]
L = len_map[order]
tip = sum(len_map[:order])
field = t[tip]
if L == 1 or isinstance(field, str):
tlist.append(field)
else:
tlist.append(t[tip:(tip + L)])
# terminate any strings
for i in range(0, len(tlist)):
if type.fieldtypes[i] == 'char':
if sys.version_info.major >= 3:
tlist[i] = tlist[i].decode('utf-8')
tlist[i] = str(MAVString(tlist[i]))
t = tuple(tlist)
# construct the message object
try:
m = type(*t)
except Exception as emsg:
raise MAVError('Unable to instantiate MAVLink message of type %s : %s' % (type, emsg))
m._signed = sig_ok
if m._signed:
m._link_id = msgbuf[-13]
m._msgbuf = msgbuf
m._payload = msgbuf[6:-(2+signature_len)]
m._crc = crc
m._header = MAVLink_header(msgId, incompat_flags, compat_flags, mlen, seq, srcSystem, srcComponent)
return m
def icarous_heartbeat_encode(self, status):
'''
ICAROUS heartbeat
status : See the FMS_STATE enum. (type:uint8_t, values:ICAROUS_FMS_STATE)
'''
return MAVLink_icarous_heartbeat_message(status)
def icarous_heartbeat_send(self, status, force_mavlink1=False):
'''
ICAROUS heartbeat
status : See the FMS_STATE enum. (type:uint8_t, values:ICAROUS_FMS_STATE)
'''
return self.send(self.icarous_heartbeat_encode(status), force_mavlink1=force_mavlink1)
def icarous_kinematic_bands_encode(self, numBands, type1, min1, max1, type2, min2, max2, type3, min3, max3, type4, min4, max4, type5, min5, max5):
'''
Kinematic multi bands (track) output from Daidalus
numBands : Number of track bands (type:int8_t)
type1 : See the TRACK_BAND_TYPES enum. (type:uint8_t, values:ICAROUS_TRACK_BAND_TYPES)
min1 : min angle (degrees) [deg] (type:float)
max1 : max angle (degrees) [deg] (type:float)
type2 : See the TRACK_BAND_TYPES enum. (type:uint8_t, values:ICAROUS_TRACK_BAND_TYPES)
min2 : min angle (degrees) [deg] (type:float)
max2 | |
<filename>Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/envs/test.py<gh_stars>1-10
"""
This config file runs the simplest dev environment using sqlite, and db-based
sessions. Assumes structure:
/envroot/
/db # This is where it'll write the database file
/edx-platform # The location of this repo
/log # Where we're going to write log files
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
import logging
from collections import OrderedDict
from uuid import uuid4
import openid.oidutil
from django.utils.translation import ugettext_lazy
from edx_django_utils.plugins import add_plugins
from importlib.metadata import version
from path import Path as path
from openedx.core.djangoapps.plugins.constants import ProjectType, SettingsType
from openedx.core.lib.derived import derive_settings
from openedx.core.lib.tempdir import mkdtemp_clean
from xmodule.modulestore.modulestore_settings import update_module_store_settings
from .common import *
from common.djangoapps.util.testing import patch_sessions, patch_testcase # pylint: disable=wrong-import-order
# This patch disables the commit_on_success decorator during tests
# in TestCase subclasses.
patch_testcase()
patch_sessions()
# Allow all hosts during tests, we use a lot of different ones all over the codebase.
ALLOWED_HOSTS = [
'*'
]
# Silence noisy logs to make troubleshooting easier when tests fail.
LOG_OVERRIDES = [
('factory.generate', logging.ERROR),
('factory.containers', logging.ERROR),
]
for log_name, log_level in LOG_OVERRIDES:
logging.getLogger(log_name).setLevel(log_level)
# mongo connection settings
MONGO_PORT_NUM = int(os.environ.get('EDXAPP_TEST_MONGO_PORT', '27017'))
MONGO_HOST = os.environ.get('EDXAPP_TEST_MONGO_HOST', 'localhost')
THIS_UUID = uuid4().hex[:5]
FEATURES['DISABLE_SET_JWT_COOKIES_FOR_TESTS'] = True
# can't test start dates with this True, but on the other hand,
# can test everything else :)
FEATURES['DISABLE_START_DATES'] = True
# Most tests don't use the discussion service, so we turn it off to speed them up.
# Tests that do can enable this flag, but must use the UrlResetMixin class to force urls.py
# to reload. For consistency in user-experience, keep the value of this setting in sync with
# the one in cms/envs/test.py
FEATURES['ENABLE_DISCUSSION_SERVICE'] = False
FEATURES['ENABLE_SERVICE_STATUS'] = True
FEATURES['ENABLE_VERIFIED_CERTIFICATES'] = True
# Toggles embargo on for testing
FEATURES['EMBARGO'] = True
# Enable the milestones app in tests to be consistent with it being enabled in production
FEATURES['MILESTONES_APP'] = True
FEATURES['ENABLE_ENROLLMENT_TRACK_USER_PARTITION'] = True
FEATURES['ENABLE_BULK_ENROLLMENT_VIEW'] = True
FEATURES['ENABLE_BULK_USER_RETIREMENT'] = True
DEFAULT_MOBILE_AVAILABLE = True
# Need wiki for courseware views to work. TODO (vshnayder): shouldn't need it.
WIKI_ENABLED = True
# Enable a parental consent age limit for testing
PARENTAL_CONSENT_AGE_LIMIT = 13
# Local Directories
TEST_ROOT = path("test_root")
# Want static files in the same dir for running on jenkins.
STATIC_ROOT = TEST_ROOT / "staticfiles"
WEBPACK_LOADER['DEFAULT']['STATS_FILE'] = STATIC_ROOT / "webpack-stats.json"
STATUS_MESSAGE_PATH = TEST_ROOT / "status_message.json"
COURSES_ROOT = TEST_ROOT / "data"
DATA_DIR = COURSES_ROOT
COMMON_TEST_DATA_ROOT = COMMON_ROOT / "test" / "data"
# Where the content data is checked out. This may not exist on jenkins.
GITHUB_REPO_ROOT = ENV_ROOT / "data"
USE_I18N = True
LANGUAGE_CODE = 'en' # tests assume they will get English.
XQUEUE_INTERFACE = {
"url": "http://sandbox-xqueue.edx.org",
"django_auth": {
"username": "lms",
"password": "***<PASSWORD>***"
},
"basic_auth": ('anant', 'agarwal'),
}
XQUEUE_WAITTIME_BETWEEN_REQUESTS = 5 # seconds
# Don't rely on a real staff grading backend
MOCK_STAFF_GRADING = True
MOCK_PEER_GRADING = True
COMMENTS_SERVICE_URL = 'http://localhost:4567'
DJFS = {
'type': 'osfs',
'directory_root': f'{DATA_DIR}/django-pyfs/static/django-pyfs',
'url_root': '/static/django-pyfs',
}
############################ STATIC FILES #############################
# TODO (cpennington): We need to figure out how envs/test.py can inject things
# into common.py so that we don't have to repeat this sort of thing
STATICFILES_DIRS = [
COMMON_ROOT / "static",
PROJECT_ROOT / "static",
]
STATICFILES_DIRS += [
(course_dir, COMMON_TEST_DATA_ROOT / course_dir)
for course_dir in os.listdir(COMMON_TEST_DATA_ROOT)
if os.path.isdir(COMMON_TEST_DATA_ROOT / course_dir)
]
# Avoid having to run collectstatic before the unit test suite
# If we don't add these settings, then Django templates that can't
# find pipelined assets will raise a ValueError.
# http://stackoverflow.com/questions/12816941/unit-testing-with-django-pipeline
STATICFILES_STORAGE = 'pipeline.storage.NonPackagingPipelineStorage'
# Don't use compression during tests
PIPELINE['JS_COMPRESSOR'] = None
update_module_store_settings(
MODULESTORE,
module_store_options={
'fs_root': TEST_ROOT / "data",
},
xml_store_options={
'data_dir': mkdtemp_clean(dir=TEST_ROOT), # never inadvertently load all the XML courses
},
doc_store_settings={
'host': MONGO_HOST,
'port': MONGO_PORT_NUM,
'db': f'test_xmodule_{THIS_UUID}',
'collection': 'test_modulestore',
},
)
CONTENTSTORE = {
'ENGINE': 'xmodule.contentstore.mongo.MongoContentStore',
'DOC_STORE_CONFIG': {
'host': MONGO_HOST,
'db': f'test_xcontent_{THIS_UUID}',
'port': MONGO_PORT_NUM,
}
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'ATOMIC_REQUESTS': True,
},
'student_module_history': {
'ENGINE': 'django.db.backends.sqlite3',
},
}
CACHES = {
# This is the cache used for most things.
# In staging/prod envs, the sessions also live here.
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
# The general cache is what you get if you use our util.cache. It's used for
# things like caching the course.xml file for different A/B test groups.
# We set it to be a DummyCache to force reloading of course.xml in dev.
# In staging environments, we would grab VERSION from data uploaded by the
# push process.
'general': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
'mongo_metadata_inheritance': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
'loc_cache': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
'course_structure_cache': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
# Blockstore caching tests require a cache that actually works:
'blockstore': {
'KEY_PREFIX': 'blockstore',
'KEY_FUNCTION': 'common.djangoapps.util.memcache.safe_key',
'LOCATION': 'edx_loc_mem_cache',
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
}
############################### BLOCKSTORE #####################################
# Blockstore tests
RUN_BLOCKSTORE_TESTS = os.environ.get('EDXAPP_RUN_BLOCKSTORE_TESTS', 'no').lower() in ('true', 'yes', '1')
BLOCKSTORE_API_URL = os.environ.get('EDXAPP_BLOCKSTORE_API_URL', "http://edx.devstack.blockstore-test:18251/api/v1/")
BLOCKSTORE_API_AUTH_TOKEN = os.environ.get('EDXAPP_BLOCKSTORE_API_AUTH_TOKEN', 'edxapp-test-key')
XBLOCK_RUNTIME_V2_EPHEMERAL_DATA_CACHE = 'blockstore' # This must be set to a working cache for the tests to pass
# Dummy secret key for dev
SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd'
############################# SECURITY SETTINGS ################################
# Default to advanced security in common.py, so tests can reset here to use
# a simpler security model
FEATURES['ENFORCE_PASSWORD_POLICY'] = False
FEATURES['ENABLE_MAX_FAILED_LOGIN_ATTEMPTS'] = False
FEATURES['SQUELCH_PII_IN_LOGS'] = False
FEATURES['PREVENT_CONCURRENT_LOGINS'] = False
######### Third-party auth ##########
FEATURES['ENABLE_THIRD_PARTY_AUTH'] = True
AUTHENTICATION_BACKENDS = [
'social_core.backends.google.GoogleOAuth2',
'social_core.backends.linkedin.LinkedinOAuth2',
'social_core.backends.facebook.FacebookOAuth2',
'social_core.backends.azuread.AzureADOAuth2',
'social_core.backends.twitter.TwitterOAuth',
'common.djangoapps.third_party_auth.identityserver3.IdentityServer3',
'common.djangoapps.third_party_auth.dummy.DummyBackend',
'common.djangoapps.third_party_auth.saml.SAMLAuthBackend',
'common.djangoapps.third_party_auth.lti.LTIAuthBackend',
] + AUTHENTICATION_BACKENDS
THIRD_PARTY_AUTH_CUSTOM_AUTH_FORMS = {
'custom1': {
'secret_key': 'opensesame',
'url': '/misc/my-custom-registration-form',
'error_url': '/misc/my-custom-sso-error-page'
},
}
############################## OAUTH2 Provider ################################
FEATURES['ENABLE_OAUTH2_PROVIDER'] = True
OAUTH_ENFORCE_SECURE = False
########################### External REST APIs #################################
FEATURES['ENABLE_MOBILE_REST_API'] = True
FEATURES['ENABLE_VIDEO_ABSTRACTION_LAYER_API'] = True
################################# CELERY ######################################
CELERY_ALWAYS_EAGER = True
CELERY_RESULT_BACKEND = 'django-cache'
CLEAR_REQUEST_CACHE_ON_TASK_COMPLETION = False
######################### MARKETING SITE ###############################
MKTG_URL_LINK_MAP = {
'ABOUT': 'about',
'CONTACT': 'contact',
'HELP_CENTER': 'help-center',
'COURSES': 'courses',
'ROOT': 'root',
'TOS': 'tos',
'HONOR': 'honor',
'PRIVACY': 'privacy',
'CAREERS': 'careers',
'NEWS': 'news',
'PRESS': 'press',
'BLOG': 'blog',
'DONATE': 'donate',
'SITEMAP.XML': 'sitemap_xml',
# Verified Certificates
'WHAT_IS_VERIFIED_CERT': 'verified-certificate',
}
SUPPORT_SITE_LINK = 'https://example.support.edx.org'
PASSWORD_RESET_SUPPORT_LINK = 'https://support.example.com/password-reset-help.html'
ACTIVATION_EMAIL_SUPPORT_LINK = 'https://support.example.com/activation-email-help.html'
LOGIN_ISSUE_SUPPORT_LINK = 'https://support.example.com/login-issue-help.html'
ENTERPRISE_MARKETING_FOOTER_QUERY_PARAMS = OrderedDict([
("utm_campaign", "edX.org Referral"),
("utm_source", "edX.org"),
("utm_medium", "Footer"),
])
############################ STATIC FILES #############################
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
MEDIA_ROOT = TEST_ROOT / "uploads"
MEDIA_URL = "/static/uploads/"
STATICFILES_DIRS.append(("uploads", MEDIA_ROOT))
_NEW_STATICFILES_DIRS = []
# Strip out any static files that aren't in the repository root
# so that the tests can run with only the edx-platform directory checked out
for static_dir in STATICFILES_DIRS:
# Handle both tuples and non-tuple directory definitions
try:
_, data_dir = static_dir
except ValueError:
data_dir = static_dir
if data_dir.startswith(REPO_ROOT):
_NEW_STATICFILES_DIRS.append(static_dir)
STATICFILES_DIRS = _NEW_STATICFILES_DIRS
FILE_UPLOAD_TEMP_DIR = TEST_ROOT / "uploads"
FILE_UPLOAD_HANDLERS = [
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
]
BLOCK_STRUCTURES_SETTINGS['PRUNING_ACTIVE'] = True
########################### Server Ports ###################################
# These ports are carefully chosen so that if the browser needs to
# access them, they will be available through the SauceLabs SSH tunnel
XQUEUE_PORT = 8040
YOUTUBE_PORT = 8031
LTI_PORT = 8765
VIDEO_SOURCE_PORT = 8777
FEATURES['PREVIEW_LMS_BASE'] = "preview.localhost"
############### Module Store Items ##########
PREVIEW_DOMAIN = FEATURES['PREVIEW_LMS_BASE'].split(':')[0]
HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS = {
PREVIEW_DOMAIN: 'draft-preferred'
}
################### Make tests faster
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
]
### This enables the Metrics tab for the Instructor dashboard ###########
FEATURES['CLASS_DASHBOARD'] = True
################### Make tests quieter
# OpenID spews messages like this to stderr, we don't need to see them:
# Generated checkid_setup request to http://testserver/openid/provider/login/
# With assocication {HMAC-SHA1}{51d49995}{s/kRmA==}
openid.oidutil.log = lambda message, level=0: None
# Include a non-ascii character in PLATFORM_NAME and PLATFORM_DESCRIPTION to uncover possible
# UnicodeEncodeErrors in tests. Also use lazy text to reveal possible json dumps errors
PLATFORM_NAME = ugettext_lazy("édX")
PLATFORM_DESCRIPTION = ugettext_lazy("Open édX Platform")
SITE_NAME = "edx.org"
TEST_THEME = COMMON_ROOT / "test" / "test-theme"
# add extra template directory for test-only templates
MAKO_TEMPLATE_DIRS_BASE.extend([
COMMON_ROOT / 'test' / 'templates',
COMMON_ROOT / 'test' / 'test_sites',
REPO_ROOT / 'openedx' / 'core' / 'djangolib' / 'tests' / 'templates',
])
# Setting for the testing of Software Secure Result Callback
VERIFY_STUDENT["SOFTWARE_SECURE"] = {
"API_ACCESS_KEY": "<KEY>",
"API_SECRET_KEY": "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC",
}
VIDEO_CDN_URL = {
'CN': 'http://api.xuetangx.com/edx/video?s3_url='
}
######### dashboard git log settings #########
MONGODB_LOG = {
'host': MONGO_HOST,
'port': MONGO_PORT_NUM,
'user': '',
'password': '',
'db': 'xlog',
}
NOTES_DISABLED_TABS = []
# Enable EdxNotes for tests.
FEATURES['ENABLE_EDXNOTES'] = True
# Enable courseware search for tests
FEATURES['ENABLE_COURSEWARE_SEARCH'] = True
# Enable dashboard search for tests
FEATURES['ENABLE_DASHBOARD_SEARCH'] = True
# Use MockSearchEngine as the search engine for test scenario
SEARCH_ENGINE = "search.tests.mock_search_engine.MockSearchEngine"
FACEBOOK_APP_SECRET = "Test"
FACEBOOK_APP_ID = "Test"
FACEBOOK_API_VERSION = "v2.8"
####################### ELASTICSEARCH TESTS #######################
# Enable this when testing elasticsearch-based code which couldn't be tested using the mock engine
ENABLE_ELASTICSEARCH_FOR_TESTS = os.environ.get(
'EDXAPP_ENABLE_ELASTICSEARCH_FOR_TESTS', 'no').lower() in ('true', 'yes', '1')
TEST_ELASTICSEARCH_USE_SSL = os.environ.get(
'EDXAPP_TEST_ELASTICSEARCH_USE_SSL', 'no').lower() in ('true', 'yes', '1')
TEST_ELASTICSEARCH_HOST = os.environ.get('EDXAPP_TEST_ELASTICSEARCH_HOST', 'edx.devstack.elasticsearch710')
TEST_ELASTICSEARCH_PORT = int(os.environ.get('EDXAPP_TEST_ELASTICSEARCH_PORT', '9200'))
######### custom courses #########
INSTALLED_APPS += ['lms.djangoapps.ccx', 'openedx.core.djangoapps.ccxcon.apps.CCXConnectorConfig']
FEATURES['CUSTOM_COURSES_EDX'] = True
# Set dummy values for profile image settings.
PROFILE_IMAGE_BACKEND = {
'class': 'openedx.core.storage.OverwriteStorage',
'options': {
'location': MEDIA_ROOT,
'base_url': 'http://example-storage.com/profile-images/',
},
}
PROFILE_IMAGE_DEFAULT_FILENAME = 'default'
PROFILE_IMAGE_DEFAULT_FILE_EXTENSION = 'png'
PROFILE_IMAGE_HASH_SEED = 'secret'
PROFILE_IMAGE_MAX_BYTES = 1024 * 1024
PROFILE_IMAGE_MIN_BYTES = 100
# Enable the LTI provider feature for testing
FEATURES['ENABLE_LTI_PROVIDER'] = True
INSTALLED_APPS.append('lms.djangoapps.lti_provider.apps.LtiProviderConfig')
AUTHENTICATION_BACKENDS.append('lms.djangoapps.lti_provider.users.LtiBackend')
# Financial assistance page
FEATURES['ENABLE_FINANCIAL_ASSISTANCE_FORM'] = True
COURSE_BLOCKS_API_EXTRA_FIELDS = [
('course', 'course_visibility'),
('course', 'other_course_settings'),
]
COURSE_CATALOG_URL_ROOT = 'https://catalog.example.com'
COURSE_CATALOG_API_URL = f'{COURSE_CATALOG_URL_ROOT}/api/v1'
COMPREHENSIVE_THEME_DIRS = [REPO_ROOT / "themes", REPO_ROOT / "common/test"]
COMPREHENSIVE_THEME_LOCALE_PATHS = [REPO_ROOT / "themes/conf/locale", ]
ENABLE_COMPREHENSIVE_THEMING = True
LMS_ROOT_URL = "http://localhost:8000"
# | |
this kind of news analysis can also prove useful for individual stocks. News articles can often explain \
why a stock is outpacing the growth of the market or lagging behind. In the \"Try it Yourself!\" section below you can do your own \
research. One interesting thread is to input TSLA as your stock ticker. You will quickly notice that TSLA had an incredibly \
large drop (-21.1%) on September 8th, 2020. Clicking through the link for this date will show you that this was the result of Tesla \
being snubbed by the S&P 500 and kept off of that index [3].")
st.write("")
tryout = st.beta_expander("Try it yourself! Analyze Trading Data.")
with tryout:
st.write("Input a stock of your own choosing and view personalized versions of Figures 1 and 2, \
as well as search results for each day.")
user_choice = st.text_input('Input Ticker:', '')
#Verify it's a real ticker.
if(user_choice):
df3 = yf.download(user_choice, start = start_date, end = end_date, progress = False)
if(df3.empty):
st.write("Invalid ticker, please enter a different one.")
else:
#Calculate A bunch of Stuff
source2 = pd.DataFrame({
'Date': df3.index,
'Price (USD)': df3["Adj Close"]
})
source2['DateStr'] = source2['Date'].dt.strftime('%Y%m%d')
dateList2 = source2['DateStr'].tolist()
valueList2 = source2['Price (USD)'].tolist()
urlList2 = []
valueChange2 = []
prev_date2 = dateList2[0]
for date in dateList2:
#urlList2.append('https://www.nytimes.com/search?dropmab=true&endDate='+date+'&query=COVID&sort=best&startDate='+prev_date)
urlList2.append('https://www.marketwatch.com/search?q='+user_choice+'&m=Ticker&rpp=15&mp=2005&bd=true&bd=false&bdv='\
+date[4:6]+'%2F'+date[6:8]+'%2F'+date[0:4]+'&rs=true')
prev_date2 = date
source2['URLs'] = urlList2
#Derivative Data for Bar Chart
old_value2 = valueList2[0]
for value in valueList2:
valueChange2.append(((value-old_value2)/old_value2)*100)
old_value2 = value
source2['Percent_Change'] = valueChange2
#Make charts
chart3 = alt.Chart(source2,title="Value of " +user_choice+" During COVID19 Pandemic").mark_point().encode(
x='Date',
y='Price (USD)',
tooltip=['Date','Price (USD)'],
href='URLs'
).interactive().properties(
width=550,
height=400)
st.write(chart3)
chart4 = alt.Chart(source2,title="Day to Day Percent Change of "+user_choice).mark_bar().encode(
x="Date",
y="Percent_Change",
color=alt.condition(
alt.datum.Percent_Change > 0,
alt.value("black"), # The positive color
alt.value("red") # The negative color
),
tooltip=['Date','Percent_Change'],
href='URLs'
).properties(width=550).interactive()
st.write(chart4)
st.write("")
st.title("Open Source Information")
st.write("Beyond mediums like news articles or broadcasts, there are many different avenues through which information \
relevant to the market could be found. \"Open source information\" can be defined as any information that is publicly available, \
and can be obtained by request, purchase, or observation [4]. Although this kind of information is publicly available, it could be difficult \
or expensive to obtain. For many machine learning applications, open source information is drawn from large companies like Facebook, Google, \
and Twitter. Although data obtained through these company's APIs has some metadata tagging, it still needs to be further cleaned and analyzed.")
st.write("Just like articles from news media can have an impact on the market on large time scales, it follows that smaller events that are much more immediate \
and time sensitive could impact the stock market in real time. For example, on May 1st 2020, <NAME> tweeted \"Tesla stock price is too high imo,\" \
which sent the company's stock value into a -10% nosedive almost immediately [5]. In this case, only the savviest and quickest investors would have \
been able to see this tweet and sell prior to the inevitable drop." )
st.image("https://static.toiimg.com/photo/imgsize-11092,msid-75579881/75579881.jpg",caption="The Tweet Heard \'Round the World!")
st.write("Right now it is hard to imagine how one might automatically incorporate this type of information into any model about the stock market. \
Although humans can easily read and interpret a tweet such as the one from <NAME>, writing an algorithm or model to do this is \
no easy task. In order to apply a machine learning aspect to this problem, we pursued an approach based on analyzing the sentiment of tweets \
about a chosen keyword.")
st.write("For our sentiment analysis we used TextBlob, which is a natural language processing (NLP) package for Python [6]. TextBlob is able to perform \
many language processing tasks, including word lemmatization and inflection, noun-phrase extraction, and part of speech tagging. The feature in \
particular that we care about is sentiment classification. TextBlob is able to classify the polarity of text (positive or negative) as well as \
the subjectivity of a given text (fact or opinion) [7].")
st.write("")
tryout2 = st.beta_expander("Try it yourself! Sentiment Classification.")
with tryout2:
st.write("Input a sentence or phrase below to try out sentiment classification for yourself:")
myPhrase = st.text_input('Input Phrase:', '')
phrase_blob = TextBlob(myPhrase)
if(myPhrase):
if(phrase_blob.sentiment.polarity>0):
st.write("Classified as positive.")
if(phrase_blob.sentiment.polarity==0):
st.write("Classified as neutral.")
if(phrase_blob.sentiment.polarity<0):
st.write("Classified as negative.")
st.write("")
st.write("With the sentiment classification aspect sorted, the next step is to gather data from Twitter. We did this by applying for Twitter API access credentials \
and then using that API in order to make queries for tweets about a specific keyword. Although the Twitter API is quite useful (and free!) there are \
certain limitations that one has to work around while using it. The first major limitation of the API is that query size is restricted to 100 tweets. \
This means that any keyword request made through the API can at most contain 100 responses. Because of this limitation and the bursty nature of tweeting, \
queries can often end up with many duplicate tweets because of many users retweeting a particularly popular post in a short period of time. A second limitation \
of the API is that the default behavior is to provide the 100 most recent tweets matching a keyword as the query response. There are workarounds to look for specific \
time periods, however this is still limited to a 7 day window prior to the current time. As a result, it is difficult to use the API approach in order to research things \
in the past like the market at the beginning of the COVID period. We designed our data collection to work around these limitations by writing a program to gather market \
data and batches of tweets simultaenously throughout a trading day. Using this script, we collected data for a few search terms, including AAPL, TSLA, BTC, and COVID19.")
tryout3 = st.beta_expander("Try it yourself! Twitter Keyword Search and Classification.")
with tryout3:
st.write("Input a keyword search to fetch up to three tweets.")
myPhrase2 = st.text_input('Search Term:', '')
if(myPhrase2):
api = TwitterClient()
tweets = api.get_tweets(query = myPhrase2, count = 3)
for tweet in tweets:
st.write("Text: "+tweet['text'])
st.write("Classification: "+tweet['sentiment'])
st.markdown("---")
st.markdown("---")
st.write("Following from the idea that particularly consequential tweets could result in major swings \
in a company’s value, we also wanted to visualize how tweets from a particularly important individual \
(<NAME> in this case), could impact share price over a longer period of time. In the chart below, \
the average sentiment of Musk’s tweets on a daily basis are layered with a plot of TSLA’s share price \
over the same time period. Each tweet was classified using the TextBlob method described above. After \
classification, tweets were binned by date to compute an average sentiment for the day. Inspecting this \
graph closely, you can see that very negatively and very positively polarized tweeting days are often nearby \
days with large deltas in share price. Although this does not allow us to draw any conclusions about \
causality or the extent to which these two phenomenon are intertwined, it does give us pause to investigate further. ")
st.image("https://i.ibb.co/RzYDpPQ/Screen-Shot-2020-12-09-at-10-25-06-PM.png",width=700)
st.title("Sentiment Analysis - Day Scale")
st.write("In this section, we examine visualizations for Bitcoin, Tesla, and the SPDR S&P 500 ETF. \
These data were collected over a single trading day using a Tweet Miner, as described above. \
Three encodings are used to represent the data: color for strength of the correlation \
between the price of the asset and the Twitter sentiment; size to denote the net sentiment \
calculated as the difference between the rate of positive tweets and negative tweets about the asset; \
and shape to denote if the sign of the derivatives of the price and sentiment data are in agreement \
or not.")
chart_b_1 = alt.Chart(BTC_df).mark_point().encode(
alt.X('Date',
scale=alt.Scale(zero=False)
),
y = alt.Y('Price', scale=alt.Scale(zero=False)),
color = 'Correlation',
fill = 'Correlation',
size = 'Net Sentiment',
shape = 'Sign of Change',
tooltip=['Date','Price','Net Sentiment','Correlation']
).properties(width=700, height = 400, title | |
"""
Tests for the particle array module.
"""
# standard imports
import unittest
import numpy
# local imports
from pysph.base import particle_array
from pysph.base import utils
from pyzoltan.core.carray import LongArray, IntArray, DoubleArray
import pickle
import pytest
from pysph.cpy.config import get_config
def check_array(x, y):
"""Check if two arrays are equal with an absolute tolerance of
1e-16."""
return numpy.allclose(x, y, atol=1e-16, rtol=0)
###############################################################################
# `ParticleArrayTest` class.
###############################################################################
class ParticleArrayTest(object):
"""
Tests for the particle array class.
"""
def test_constructor(self):
# Default constructor test.
p = particle_array.ParticleArray(name='test_particle_array',
backend=self.backend)
self.assertEqual(p.name, 'test_particle_array')
self.assertEqual('tag' in p.properties, True)
self.assertEqual(p.properties['tag'].length, 0)
# Constructor with some properties.
x = [1, 2, 3, 4.]
y = [0., 1., 2., 3.]
z = [0., 0., 0., 0.]
m = [1., 1., 1., 1.]
h = [.1, .1, .1, .1]
p = particle_array.ParticleArray(x={'data': x},
y={'data': y},
z={'data': z},
m={'data': m},
h={'data': h},
backend=self.backend)
self.assertEqual(p.name, '')
self.assertEqual('x' in p.properties, True)
self.assertEqual('y' in p.properties, True)
self.assertEqual('z' in p.properties, True)
self.assertEqual('m' in p.properties, True)
self.assertEqual('h' in p.properties, True)
# get the properties are check if they are the same
xarr = p.properties['x'].get_npy_array()
self.assertEqual(check_array(xarr, x), True)
yarr = p.properties['y'].get_npy_array()
self.assertEqual(check_array(yarr, y), True)
zarr = p.properties['z'].get_npy_array()
self.assertEqual(check_array(zarr, z), True)
marr = p.properties['m'].get_npy_array()
self.assertEqual(check_array(marr, m), True)
harr = p.properties['h'].get_npy_array()
self.assertEqual(check_array(harr, h), True)
# check if the 'tag' array was added.
self.assertEqual('tag' in p.properties, True)
self.assertEqual(list(p.properties.values())[0].length == len(x), True)
# Constructor with tags
tags = [0, 1, 0, 1]
p = particle_array.ParticleArray(x={'data': x}, y={'data': y},
z={'data': z},
tag={'data': tags, 'type': 'int'},
backend=self.backend)
self.assertEqual(check_array(p.get('tag', only_real_particles=False),
[0, 0, 1, 1]), True)
self.assertEqual(check_array(p.get('x', only_real_particles=False),
[1, 3, 2, 4]), True)
self.assertEqual(check_array(p.get('y', only_real_particles=False),
[0, 2, 1, 3]), True)
self.assertEqual(check_array(p.get('z', only_real_particles=False),
[0, 0, 0, 0]), True)
# trying to create particle array without any values but some
# properties.
p = particle_array.ParticleArray(x={}, y={}, z={}, h={},
backend=self.backend)
self.assertEqual(p.get_number_of_particles(), 0)
self.assertEqual('x' in p.properties, True)
self.assertEqual('y' in p.properties, True)
self.assertEqual('z' in p.properties, True)
self.assertEqual('tag' in p.properties, True)
# now trying to supply some properties with values and others without
p = particle_array.ParticleArray(
x={'default': 10.0}, y={'data': [1.0, 2.0]},
z={}, h={'data': [0.1, 0.1]}, backend=self.backend
)
self.assertEqual(p.get_number_of_particles(), 2)
self.assertEqual(check_array(p.x, [10., 10.]), True)
self.assertEqual(check_array(p.y, [1., 2.]), True)
self.assertEqual(check_array(p.z, [0, 0]), True)
self.assertEqual(check_array(p.h, [0.1, 0.1]), True)
def test_constructor_works_with_strides(self):
# Given
x = [1, 2, 3, 4.]
rho = 10.0
data = numpy.arange(8)
# When
p = particle_array.ParticleArray(
x=x, rho=rho, data={'data': data, 'stride': 2}, name='fluid',
backend=self.backend
)
# Then
self.assertEqual(p.name, 'fluid')
self.assertEqual('x' in p.properties, True)
self.assertEqual('rho' in p.properties, True)
self.assertEqual('data' in p.properties, True)
self.assertEqual('tag' in p.properties, True)
self.assertEqual('pid' in p.properties, True)
self.assertEqual('gid' in p.properties, True)
# get the properties are check if they are the same
self.assertEqual(check_array(p.x, x), True)
self.assertEqual(check_array(p.rho, numpy.ones(4) * rho), True)
self.assertEqual(check_array(p.data, numpy.ravel(data)), True)
def test_constructor_works_with_simple_props(self):
# Given
x = [1, 2, 3, 4.]
y = [0., 1., 2., 3.]
rho = 10.0
data = numpy.diag((2, 2))
# When
p = particle_array.ParticleArray(
x=x, y=y, rho=rho, data=data, name='fluid',
backend=self.backend
)
# Then
self.assertEqual(p.name, 'fluid')
self.assertEqual('x' in p.properties, True)
self.assertEqual('y' in p.properties, True)
self.assertEqual('rho' in p.properties, True)
self.assertEqual('data' in p.properties, True)
self.assertEqual('tag' in p.properties, True)
self.assertEqual('pid' in p.properties, True)
self.assertEqual('gid' in p.properties, True)
# get the properties are check if they are the same
self.assertEqual(check_array(p.x, x), True)
self.assertEqual(check_array(p.y, y), True)
self.assertEqual(check_array(p.rho, numpy.ones(4) * rho), True)
self.assertEqual(check_array(p.data, numpy.ravel(data)), True)
def test_get_number_of_particles(self):
x = [1, 2, 3, 4.]
y = [0., 1., 2., 3.]
z = [0., 0., 0., 0.]
m = [1., 1., 1., 1.]
h = [.1, .1, .1, .1]
A = numpy.arange(12)
p = particle_array.ParticleArray(
x={'data': x}, y={'data': y},
z={'data': z}, m={'data': m},
h={'data': h}, A={'data': A, 'stride': 3},
backend=self.backend
)
self.assertEqual(p.get_number_of_particles(), 4)
def test_get(self):
x = [1, 2, 3, 4.]
y = [0., 1., 2., 3.]
z = [0., 0., 0., 0.]
m = [1., 1., 1., 1.]
h = [.1, .1, .1, .1]
A = numpy.arange(12)
p = particle_array.ParticleArray(
x={'data': x}, y={'data': y},
z={'data': z}, m={'data': m},
h={'data': h}, A={'data': A, 'stride': 3},
backend=self.backend
)
self.assertEqual(check_array(x, p.get('x')), True)
self.assertEqual(check_array(y, p.get('y')), True)
self.assertEqual(check_array(z, p.get('z')), True)
self.assertEqual(check_array(m, p.get('m')), True)
self.assertEqual(check_array(h, p.get('h')), True)
self.assertEqual(check_array(A.ravel(), p.get('A')), True)
def test_clear(self):
x = [1, 2, 3, 4.]
y = [0., 1., 2., 3.]
z = [0., 0., 0., 0.]
m = [1., 1., 1., 1.]
h = [.1, .1, .1, .1]
p = particle_array.ParticleArray(x={'data': x}, y={'data': y},
z={'data': z}, m={'data': m},
h={'data': h}, backend=self.backend)
p.clear()
self.assertEqual(len(p.properties), 3)
self.assertEqual('tag' in p.properties, True)
self.assertEqual(p.properties['tag'].length, 0)
self.assertEqual('pid' in p.properties, True)
self.assertEqual(p.properties['pid'].length, 0)
self.assertEqual('gid' in p.properties, True)
self.assertEqual(p.properties['gid'].length, 0)
def test_getattr(self):
x = [1, 2, 3, 4.]
y = [0., 1., 2., 3.]
z = [0., 0., 0., 0.]
m = [1., 1., 1., 1.]
h = [.1, .1, .1, .1]
A = numpy.arange(12)
p = particle_array.ParticleArray(
x={'data': x}, y={'data': y},
z={'data': z}, m={'data': m},
h={'data': h}, A={'data': A, 'stride': 3},
backend=self.backend
)
self.assertEqual(check_array(x, p.x), True)
self.assertEqual(check_array(y, p.y), True)
self.assertEqual(check_array(z, p.z), True)
self.assertEqual(check_array(m, p.m), True)
self.assertEqual(check_array(h, p.h), True)
self.assertEqual(check_array(A.ravel(), p.get('A')), True)
# try getting an non-existant attribute
self.assertRaises(AttributeError, p.__getattr__, 'a')
def test_setattr(self):
x = [1, 2, 3, 4.]
y = [0., 1., 2., 3.]
z = [0., 0., 0., 0.]
m = [1., 1., 1., 1.]
h = [.1, .1, .1, .1]
A = numpy.arange(12)
p = particle_array.ParticleArray(
x={'data': x}, y={'data': y},
z={'data': z}, m={'data': m},
h={'data': h}, A={'data': A, 'stride': 3},
backend=self.backend
)
p.x = p.x * 2.0
self.assertEqual(check_array(p.get('x'), [2., 4, 6, 8]), True)
p.x = p.x + 3.0 * p.x
self.assertEqual(check_array(p.get('x'), [8., 16., 24., 32.]), True)
p.A = p.A*2
self.assertEqual(check_array(p.get('A').ravel(), A*2), True)
def test_remove_particles(self):
x = [1, 2, 3, 4.]
y = [0., 1., 2., 3.]
z = [0., 0., 0., 0.]
m = [1., 1., 1., 1.]
h = [.1, .1, .1, .1]
A = numpy.arange(12)
p = particle_array.ParticleArray(
x={'data': x}, y={'data': y},
z={'data': z}, m={'data': m},
h={'data': h}, A={'data': A, 'stride': 3},
backend=self.backend
)
remove_arr = LongArray(0)
remove_arr.append(0)
remove_arr.append(1)
p.remove_particles(remove_arr)
self.pull(p)
self.assertEqual(p.get_number_of_particles(), 2)
self.assertEqual(check_array(p.x, [3., 4.]), True)
self.assertEqual(check_array(p.y, [2., 3.]), True)
self.assertEqual(check_array(p.z, [0., 0.]), True)
self.assertEqual(check_array(p.m, [1., 1.]), True)
self.assertEqual(check_array(p.h, [.1, .1]), True)
self.assertEqual(check_array(p.A, numpy.arange(6, 12)), True)
# now try invalid operations to make sure errors are raised.
remove_arr.resize(10)
self.assertRaises(ValueError, p.remove_particles, remove_arr)
# now try to remove a particle with index more that particle
# length.
remove_arr = [2]
p.remove_particles(remove_arr)
self.pull(p)
# make sure no change occurred.
self.assertEqual(p.get_number_of_particles(), 2)
self.assertEqual(check_array(p.x, [3., 4.]), True)
self.assertEqual(check_array(p.y, [2., 3.]), True)
self.assertEqual(check_array(p.z, [0., 0.]), True)
self.assertEqual(check_array(p.m, [1., 1.]), True)
self.assertEqual(check_array(p.h, [.1, .1]), True)
self.assertEqual(check_array(p.A, numpy.arange(6, 12)), True)
def test_add_particles(self):
x = [1, 2, 3, 4.]
y = [0., 1., 2., 3.]
z = [0., 0., 0., 0.]
m = [1., 1., 1., 1.]
h = [.1, .1, .1, .1]
A = numpy.arange(12)
p = particle_array.ParticleArray(
x={'data': x}, y={'data': y},
z={'data': z}, m={'data': m},
h={'data': h}, A=dict(data=A, stride=3),
backend=self.backend
)
new_particles = {}
new_particles['x'] = numpy.array([5., 6, 7], dtype=numpy.float32)
new_particles['y'] = numpy.array([4., 5, 6], dtype=numpy.float32)
new_particles['z'] = numpy.array([0., 0, 0], dtype=numpy.float32)
p.add_particles(**new_particles)
self.pull(p)
self.assertEqual(p.get_number_of_particles(), 7)
self.assertEqual(check_array(p.x, [1., 2, 3, 4, 5, 6, 7]), True)
self.assertEqual(check_array(p.y, [0., 1, 2, 3, 4, 5, 6]), True)
self.assertEqual(check_array(p.z, [0., 0, 0, 0, 0, 0, 0]), True)
expect = numpy.zeros(21, dtype=A.dtype)
expect[:12] = A
numpy.testing.assert_array_equal(p.A, expect)
# make sure the other arrays were resized
self.assertEqual(len(p.h), 7)
self.assertEqual(len(p.m), 7)
# try adding an empty particle list
p.add_particles(**{})
self.pull(p)
self.assertEqual(p.get_number_of_particles(), 7)
self.assertEqual(check_array(p.x, [1., 2, 3, 4, 5, 6, 7]), True)
self.assertEqual(check_array(p.y, [0., 1, 2, 3, 4, 5, 6]), True)
self.assertEqual(check_array(p.z, [0., 0, 0, 0, 0, 0, 0]), True)
self.assertEqual(check_array(p.A, expect), True)
# make sure the other arrays were resized
self.assertEqual(len(p.h), 7)
self.assertEqual(len(p.m), 7)
# adding particles with tags
p = particle_array.ParticleArray(x={'data': x}, y={'data': y},
z={'data': z}, m={'data': m},
h={'data': h},
backend=self.backend)
p.add_particles(x=[5, 6, 7, 8], tag=[1, 1, 0, 0])
self.pull(p)
self.assertEqual(p.get_number_of_particles(), 8)
self.assertEqual(check_array(p.x, [1, 2, 3, 4, 7, 8]), True)
self.assertEqual(check_array(p.y, [0, 1, 2, 3, 0, 0]), True)
self.assertEqual(check_array(p.z, [0, 0, 0, 0, 0, 0]), True)
def test_remove_tagged_particles(self):
x = [1, 2, 3, 4.]
y = [0., 1., 2., 3.]
z = [0., 0., 0., 0.]
m = [1., 1., 1., 1.]
h = [.1, | |
frames of a dot moving across an image
def build_frames(size, timeStep=0):
frames = list()
labelA = list()
labelB = list()
labelC = list()
# create the first frame
frame = np.zeros((size, size))
step = np.randint(0, size - 1)
# decide if we are heading left or right
right = 1 if np.random() < 0.5 else 0
col = 0 if right else size - 1
frame[step, col] = 0
frames.append(frame)
# create all remaining frames
'''for i in range(1, size):
col = i if right else size - 1 - i
frame, step = next_frame(step, frame, col)
frames.append(frame)'''
amplify = np.randint(5, 10) / 10.0
xratio = np.randint(1, 4)
yratio = np.randint(1, 4)
labelA.append('NailWashLeft')
for i in range(1, size):
i = i / float(size)
column, row = generate_sin(1, i, amplify=amplify)
# frame = np.zeros((size, size))
frame, step = next_frameSin(int(row * size / xratio), frame, int(column * size / yratio))
frames.append(frame)
# labelA.append('NailWashLeft')
frame = np.zeros((size, size))
frames.append(frame)
amplify = np.randint(5, 20) / 10.0
xratio = np.randint(1, 4)
yratio = np.randint(1, 4)
labelA.append('NailWashRight')
for i in range(1, size):
i = i / float(size)
column, row = generate_DampedSin(0.5, i, 3, amplify=amplify)
# frame = np.zeros((size, size))
frame, step = next_frameDampedSin(int(row * size / xratio), frame, int(column * size / yratio))
frames.append(frame)
# labelA.append('NailWashRight')
frame = np.zeros((size, size))
frames.append(frame)
radius = np.randint(5, 7) / 10
xratio = np.randint(1, 3)
yratio = np.randint(1, 3)
x0 = np.randint(2, 3) / 10
y0 = np.randint(2, 3) / 10
labelA.append('ThumbFingureWash')
for i in range(1, size):
i = float(i) / float(size)
column, row = generate_circle(1, i, 0.5, radius=radius, x0=x0, y0=y0)
# frame = np.zeros((size, size))
frame, step = next_frameDampedCircle(int(row * size / xratio), frame, int(column * size / yratio))
frames.append(frame)
# labelA.append('ThumbFingureWash')
frame = np.zeros((size, size))
frames.append(frame)
radius = np.randint(5, 7) / 10
xratio = np.randint(1, 3)
yratio = np.randint(1, 3)
labelA.append('ForeFingureWash')
for i in range(1, size):
i = float(i) / float(size)
column, row = generate_Heart(1, i, 0.5)
# frame = np.zeros((size, size))
frame, step = next_frameDampedHeart(int(row * size / xratio), frame, int(column * size / yratio))
frames.append(frame)
# labelA.append('ForeFingureWash')
return frames, labelA
def GestureA(size, period=100, type=0):
frames = list()
labelA = list()
amplify = np.random.randint(5, 10) / 10.0
xratio = 2 # rang(1,5)
yratio = 1 # rang(0.1,1,0.1)
zratio = size - yratio * size # rang(0,size - yratio* size)
if type == 1 or type == 2:
xratio = np.random.randint(3, 5) # rang(1,5)
yratio = np.random.randint(1, 10) / 10.0 # rang(0.1,1,0.1)
zratio = np.random.randint(0, size - yratio * size) # rang(0,size - yratio* size)
labelA.append('GestureA')
x = list()
y = list()
for i in range(0, period):
x1, y1 = [i, 50 + 50 * np.sin(2 * pi * i / period)]
x.append(x1)
y.append(y1)
x2 = list()
y2 = list()
for i in range(0, period, xratio):
# print(x[i], y[i])
xx = x[i] / 100 * (size - 1)
yy = y[i] / 100 * (size - 1) * yratio + zratio
x2.append(xx)
y2.append(yy)
# frame = np.zeros((size, size), dtype=int)
for i, (xx, yy) in enumerate(zip(x2, y2)):
# frame = frame.copy()
if i < len(x2) - 1:
frame = np.zeros((size, size), dtype=int)
frame[math.floor(yy), math.floor(xx)] = 1
frames.append(frame)
'''f = pyplot.figure(figsize=(5, 5))
# create a grayscale subplot for each frame
ax = f.add_subplot(1, 1, 1)
ax.imshow(frame, cmap='Greys')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
pyplot.show()'''
'''if type == 1:
for i in range(0, size - len(x2)):
frame = np.zeros((size, size), dtype=int)
frames.append(frame)'''
return frames, labelA
def GestureB(size, period=100, type=0):
frames = list()
labelA = list()
amplify = np.random.randint(5, 10) / 10.0
xratio = 2 # range(2,5)
yratio = 0.5 # range(0.1,1,0.1)
zratio = size - yratio * size # rang(0,size - yratio* size)
if type == 1 or type == 2:
xratio = np.random.randint(3, 5) # rang(1,5)
yratio = np.random.randint(1, 10) / 10.0 # rang(0.1,1,0.1)
zratio = np.random.randint(0, size - yratio * size) # rang(0,size - yratio* size)
decay = 0.03
labelA.append('GestureB')
x = list()
y = list()
for i in range(0, period):
x1, y1 = [i, 50 + 50 * np.sin(2 * pi * i / (period / 2)) * np.exp(-decay * i)]
x.append(x1)
y.append(y1)
x2 = list()
y2 = list()
for i in range(0, period, xratio):
# print(x[i], y[i])
xx = x[i] / 100 * (size - 1)
yy = y[i] / 100 * (size - 1) * yratio + zratio
x2.append(xx)
y2.append(yy)
# frame = np.zeros((size, size))
for xx, yy in zip(x2, y2):
frame = np.zeros((size, size))
# frame = frame.copy()
frame[math.floor(yy), math.floor(xx)] = 1
frames.append(frame)
# f = pyplot.figure(figsize=(5, 5))
# create a grayscale subplot for each frame
'''ax = f.add_subplot(1, 1, 1)
ax.imshow(frame, cmap='Greys')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
pyplot.show()'''
'''if type == 1:
for i in range(0, size - len(x2)):
frame = np.zeros((size, size), dtype=int)
frames.append(frame)'''
return frames, labelA
def GestureC(size, period=100, type=0):
frames = list()
labelA = list()
amplify = np.random.randint(5, 10) / 10.0
xratio = 2
yratio = 1
R = 50
zratio = size - yratio * size # rang(0,size - yratio* size)
if type == 1 or type == 2:
xratio = np.random.randint(3, 5) # rang(1,5)
yratio = np.random.randint(1, 10) / 10.0 # rang(0.1,1,0.1)
zratio = np.random.randint(0, size - yratio * size) # rang(0,size - yratio* size)
R = np.random.randint(40, 50)
labelA.append('GestureC')
x = list()
y = list()
for i in range(0, period):
x1 = R * np.cos(2 * pi * i / period) + R
y1 = R * np.sin(2 * pi * i / period) + R
x.append(x1)
y.append(y1)
x2 = list()
y2 = list()
for i in range(0, period, xratio):
# print(x[i], y[i])
xx = x[i] / 100 * (size - 1)
yy = y[i] / 100 * (size - 1) * yratio + zratio
x2.append(xx)
y2.append(yy)
# frame = np.zeros((size, size))
for xx, yy in zip(x2, y2):
# frame = frame.copy()
frame = np.zeros((size, size))
frame[math.floor(yy), math.floor(xx)] = 1
frames.append(frame)
# f = pyplot.figure(figsize=(5, 5))
# create a grayscale subplot for each frame
'''ax = f.add_subplot(1, 1, 1)
ax.imshow(frame, cmap='Greys')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
pyplot.show()'''
'''if type == 1:
for i in range(0, size - len(x2)):
frame = np.zeros((size, size), dtype=int)
frames.append(frame)'''
return frames, labelA
def GestureD(size, period=100, type=0):
frames = list()
labelA = list()
amplify = np.random.randint(5, 10) / 10.0
xratio = 2 # range(1,5)
yratio = 1 # rang(0.1,1.0.1)
A = 100
P = 25
zratio = size - yratio * size # rang(0,size - yratio* size)
if type == 1 or type == 2:
xratio = np.random.randint(3, 5) # rang(1,5)
yratio = np.random.randint(1, 10) / 10.0 # rang(0.1,1,0.1)
zratio = np.random.randint(0, size - yratio * size) # rang(0,size - yratio* size)
labelA.append('GestureD')
x = list()
y = list()
for i in range(0, period):
x1 = i
y1 = (A / P) * (P - abs(i % (2 * P) - P))
x.append(x1)
y.append(y1)
x2 = list()
y2 = list()
for i in range(0, period, xratio):
# print(x[i], y[i])
xx = x[i] / 100 * (size - 1)
yy = y[i] / 100 * (size - 1) * yratio + zratio
x2.append(xx)
y2.append(yy)
# frame = np.zeros((size, size))
for xx, yy in zip(x2, y2):
# frame = frame.copy()
frame = np.zeros((size, size))
frame[math.floor(xx), math.floor(yy)] = 1
frames.append(frame.T)
# f = pyplot.figure(figsize=(5, 5))
# create a grayscale subplot for each frame
'''ax = f.add_subplot(1, 1, 1)
ax.imshow(frame.T, cmap='Greys')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
pyplot.show()'''
'''if type == 1:
for i in range(0, size - len(x2)):
frame = np.zeros((size, size), dtype=int)
frames.append(frame)'''
return frames, labelA
def GestureBackground(size, period=5, type=0):
frames = list()
labelA = list()
labelA.append('Background')
for _ in range(0, period):
frame = np.zeros((size, size))
frames.append(frame.T)
return frames, labelA
'''
def GenNailLeftDuration(size):
frames = list()
labelA = list()
frame = np.zeros((size, size))
step = np.randint(0, size - 1)
# decide if we are heading left or right
right = 1 if np.random() < 0.5 else 0
col = 0 if right else size - 1
frame[step, col] | |
in gRNAs: # for every gRNA candidate within recoded region,
if g.index[0] >= startRecode-frame and g.index[1] <= endRecode+frame2: # if grna is inside recoded region
gOnSeq = g.seq; # get original gRNA sequence
wholeRecSeq = nonRecodedStart + recodedSeq; # add initial bases
gOffSeq = "";
anchor = -1; # will store index of gRNA bp most to the left (whichever strand). Default to -1 to indicate excision
if geneGB.checkInExon(g.index[0]) or geneGB.checkInExon(g.index[1]): # if the gRNA hasn't been completely excised,
if pamType == "NGG" and g.comp or pamType == "TTTV" and not g.comp: # if PAM is to the left of the rest of the gRNA sequence (on whichever strand),
anchor = g.index[0]-startRecode-frame; # stores index of gRNA bp most to the left (whichever strand)
for intron in intronIndices: # for every intron,
if g.index[0] > intron[1]: # if anchor after end of intron,
anchor -= intron[1]-intron[0]; # substract intron length from anchor index
elif intron[0] >= g.index[0] >= intron[1]: # if anchor inside intron,
anchor -= g.index[0] - intron[0]; # substract distance between intron start and anchor from anchor
gOffSeq = wholeRecSeq[anchor:anchor+len(g.seq)]; # get recoded sequence that used to be gRNA
if g.comp: # if on comp strand
gOffSeq = revComp(gOffSeq); # save as reverse complement
else: # if PAM is to the right,
anchor = g.index[1]-startRecode-frame; # stores index of gRNA bp most to the right (whichever strand)
for intron in intronIndices: # for every intron,
if g.index[1] > intron[1]: # if anchor after end of intron,
anchor -= intron[1]-intron[0]; # substract intron length from anchor index
elif intron[0] >= g.index[1] >= intron[1]: # if anchor inside intron,
anchor -= g.index[1] - intron[0]; # substract distance between intron start and anchor from anchor
gOffSeq = wholeRecSeq[anchor-len(g.seq):anchor]; # get recoded sequence that used to be gRNA
if g.comp: # if on comp strand
gOffSeq = revComp(gOffSeq); # save as reverse complement
gNewPAM = ""; # will store new PAM sequence
if pamType == "NGG" and anchor > -1: # if using NGG PAM and gRNA not completely excised,
if (g.index[1]+3 >= endRecode and not g.comp) or (g.index[0]-3 >= startRecode and g.comp): # if PAM is within recoded region,
if not g.comp: # if on positive strand,
gNewPAM = wholeRecSeq[anchor+len(g.seq):anchor+len(g.seq)+3]; # retrieve PAM downstream of gRNA sequence
else: # if on negative strand,
gNewPAM = revComp(wholeRecSeq[anchor+len(g.seq)-3:anchor+len(g.seq)]); # retrieve PAM upstream of gRNA sequence, on comp strand
else: # if outside recoded region,
if g.comp: # if on comp strand,
gNewPAM = geneGB.origin[g.index[1]:g.index[1]+3]; # will store new PAM sequence
else: # if on positive strand,
gNewPAM = revComp(geneGB.origin[g.index[0]-3:g.index[0]]); # will store new PAM sequence
elif pamType == "TTTV" and anchor > -1: # if using TTTV PAM and gRNA not completely excised,
if (g.index[1]+4 >= endRecode and g.comp) or (g.index[0]-4 >= startRecode and not g.comp): # if PAM is inside recoded region,
if not g.comp: # if on positive strand,
gNewPAM = wholeRecSeq[anchor+len(g.seq)-4:anchor+len(g.seq)]; # retrieve PAM upstream of gRNA sequence
else: # if on negative strand,
gNewPAM = revComp(wholeRecSeq[anchor+len(g.seq):anchor+len(g.seq)+4]); # retrieve PAM downstream of gRNA sequence, on comp strand
else: # if outside recoded region,
if g.comp: # if on comp strand,
gNewPAM = geneGB.origin[g.index[1]:g.index[1]+4]; # will store new PAM sequence
else: # if on positive strand,
gNewPAM = revComp(geneGB.origin[g.index[0]-4:g.index[0]]); # will store new PAM sequence
newOffScore = 0; # Assume gRNA was excised
if offTargetMethod == "cfd" and len(gOffSeq) > 22: # if using cfd and gRNA not completely excised,
newOffScore = pairScoreCFD(gOnSeq,gOffSeq,gNewPAM,pamType); # calculate pairwise off-target score
elif offTargetMethod == "hsu" and len(gOffSeq) > 22: # if using hsu and gRNA not completely excised,
newOffScore = pairScoreHsu(gOnSeq,gOffSeq,gNewPAM,pamType); # calculate pairwise off-target score
offScore = max(offScore,newOffScore); # set offscore for next iteration
for g in gRNATable: # find this gRNA in table
if "gRNAs not evaluated" not in gRNATableString and g[14] == gOnSeq: # if there is a gRNA table (no table if using custom gRNA) and gRNA found,
g[15] = gOffSeq; # store recoded sequence
g[16] = str(newOffScore); # store recoded sequence's pair score
else: # if gRNA is not entirely contained,
offScore = max(offScore,0); # assume recoded
for site in cutSeqs: # for every cut site being filtered,
cutCheck = cutCheck * ( findFirst(recodedSeq,site) < 0 ); # Find cut site, register in cutCheck
cutCheck = cutCheck * ( findFirst(recodedSeq,revComp(site)) < 0 ); # Find cut site in comp strand, register in cutCheck
if gcContent(recodedSeq[0:40]) < minGCEnd: # if the first bases don't have enough gc content
badStart = True;
trickyCount = 1
trickyLimit = 1000
tricky = isTricky(recodedSeq); # check if tricky to synthesize
bestRecodedSeq = recodedSeq if bestRecodedSeq==recodeSeq else bestRecodedSeq; # store this sequence if no recoded sequence has been stored as best
if offScore <= offScoreThreshold and cutCheck: # if parameters other than badStart are ok and this sequence has better start than previous best,
if not candidateFound or isTricky(bestRecodedSeq) > -1: # if no candidate found until now or current best is already tricky,
while tricky > -1 and tricky < len(recodedSeq)-9 and trickyCount < trickyLimit: # targeted recoding of problematic fragments
recodedSeq = recodedSeq[0:tricky-tricky%3] + optimizeCodons(recodedSeq[tricky-tricky%3:tricky-tricky%3+9]) + recodedSeq[tricky-tricky%3+9:]; # optimize codons.
new_tricky = isTricky(recodedSeq)
tricky = max(tricky,new_tricky) if new_tricky > -1 else new_tricky; # check if tricky to synthesize (only downstream to avoid going back to fix newly repeated sequences)
trickyCount += 1
if trickyCount % 10 == 0: # shuffle everything every 100 targeted recodings
recodedSeq = recodedSeq[0:tricky-tricky%3] + optimizeCodons(recodedSeq[tricky-tricky%3:]); # optimize codons of remainder
new_tricky = isTricky(recodedSeq)
tricky = max(tricky,new_tricky) if new_tricky > -1 else new_tricky; # check if tricky to synthesize (only downstream to avoid going back to fix newly repeated sequences)
bestRecodedSeq = recodedSeq; # make this new best
elif not tricky > -1 and gcContent(recodedSeq[0:40]) > gcContent(bestRecodedSeq[0:40]):
bestRecodedSeq = recodedSeq; # make this new best
if not tricky > -1:
candidateFound = True; # signal possible candidate found
count += 1; # advances iteration counter
if count > 200 or trickyCount >= trickyLimit: # if out of iteration limit,
if not candidateFound: # if no candidate without cut sequences found,
if tricky > -1:
log = log + "Warning: Recoded region for gene " + gene.label + " could not reshuffle enough to avoid repeated sequences or low-complexity regions.\n\n"; # log warning
else:
log = log + "Warning: Recoded region for gene " + gene.label + " could not reshuffle enough to fulfill the maximum off-target sgRNA score threshold, or avoid all the following cut sequences: \n" + str(cutSeqs) + "\n\n"; # log warning
break; # escape loop
#print [gOnSeq+"NGG",gOffSeq+gNewPAM,pairScoreCFD(gOnSeq,gOffSeq,gNewPAM,pamType),pairScoreHsu(gOnSeq,gOffSeq,gNewPAM,pamType)]
recodedSeq = nonRecodedStart + bestRecodedSeq + nonRecodedEnd; # adds initial bases from reading frame adjustment to best candidate
annRecoded = GenBankAnn(gene.label + " Recoded", "misc_feature", recodedSeq, False, [startRecode-frame,endRecode+frame2], annColors['recodedRegionColor']); # creates var to store finished recodedSeq as annotation
log = log + "Recoded region with size " + str(len(recodedSeq)) + " for gene " + gene.label + " selected.\n\n"; # logs this process finished
else: # if no recoded region necessary,
log = log + "Recoded region not deemed necessary for gene " + gene.label + ".\n\n"; # logs this process finished
if "gRNAs not evaluated" not in gRNATableString:
gRNATableString = "\n".join([",".join(g) for g in gRNATable]); # Creates string from grna array
gRNATableString = gRNATableString.replace(">=threshold",">="+str(offScoreThreshold)); # adds pairwise recoded threshold values
return {"out":annRecoded, "log":log, "gRNATable":gRNATableString}; # returns recoded region GenBankAnn object
"""
Chooses the region to be recoded to avoid gRNA targeting in already transfected
regions. Returns GenBankAnn object with recoded sequence and indexes between
which it should go. GenBank object given as argument should contain one gene
with geneName included in its label, and at least one annotation with "LHR" in
its label. Also needs all gRNAs to be annotated in the file. Returns empty
region if LHR end is at or downstream of gene | |
<filename>dev/testingUncertProp.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 14 20:35:03 2017
test functions that propagate uncertanty
@author: sebalander
"""
# %%
#import time
#import timeit
import numpy as np
import numdifftools as ndf
from calibration import calibrator as cl
import matplotlib.pyplot as plt
from dev import bayesLib as bl
from importlib import reload
# %% LOAD DATA
# input
plotCorners = False
# cam puede ser ['vca', 'vcaWide', 'ptz'] son los datos que se tienen
camera = 'vcaWide'
# puede ser ['rational', 'fisheye', 'poly']
modelos = ['poly', 'rational', 'fisheye']
model = modelos[2]
imagesFolder = "/home/sebalander/Desktop/Code/sebaPhD/resources/intrinsicCalib/" + camera + "/"
cornersFile = imagesFolder + camera + "Corners.npy"
patternFile = imagesFolder + camera + "ChessPattern.npy"
imgShapeFile = imagesFolder + camera + "Shape.npy"
# model data files
distCoeffsFile = imagesFolder + camera + model + "DistCoeffs.npy"
linearCoeffsFile = imagesFolder + camera + model + "LinearCoeffs.npy"
tVecsFile = imagesFolder + camera + model + "Tvecs.npy"
rVecsFile = imagesFolder + camera + model + "Rvecs.npy"
# %% load data
imagePoints = np.load(cornersFile)
n = len(imagePoints) # cantidad de imagenes
#indexes = np.arange(n)
#
#np.random.shuffle(indexes)
#indexes = indexes
#
#imagePoints = imagePoints[indexes]
#n = len(imagePoints) # cantidad de imagenes
chessboardModel = np.load(patternFile)
imgSize = tuple(np.load(imgShapeFile))
#images = glob.glob(imagesFolder+'*.png')
# Parametros de entrada/salida de la calibracion
objpoints = np.array([chessboardModel]*n)
m = chessboardModel.shape[1] # cantidad de puntos
# load model specific data
distCoeffs = np.load(distCoeffsFile)
cameraMatrix = np.load(linearCoeffsFile)
rVecs = np.load(rVecsFile)#[indexes]
tVecs = np.load(tVecsFile)#[indexes]
#
#
## %% simplest test
#j = 0
#
#plt.figure()
#plt.scatter(chessboardModel[0,:,0], chessboardModel[0,:,1],
# marker='+', c='k', s=100)
#
#for j in range(0,n):
# xm, ym, Cm = cl.inverse(imagePoints[j,0], rVecs[j], tVecs[j],
# cameraMatrix, distCoeffs, model)
#
# plt.scatter(xm, ym, marker='x', c='b')
#
#
## %%
#ep = 0.01 # realtive standard deviation in parameters
#
## matriz de incerteza de deteccion en pixeles
#Cccd = np.repeat([np.eye(2,2)*0.1**2],imagePoints[j,0].shape[0], axis=0)
#Cf = np.diag(cameraMatrix[[0,1,0,1],[0,1,2,2]] * ep)**2
#Ck = np.diag((distCoeffs.reshape(-1) * ep )**2)
#Cr = np.diag((rVecs[j].reshape(-1) * ep )**2)
#Ct = np.diag((tVecs[j].reshape(-1) * ep )**2)
#
#
#Crt = [Cr, Ct]
#
#
## %%
#xpp, ypp, Cpp = cl.ccd2hom(imagePoints[j,0], cameraMatrix, Cccd, Cf)
#
#
## %% undistort
#xp, yp, Cp = cl.homDist2homUndist(xpp, ypp, distCoeffs, model, Cpp, Ck)
#
#
## %% project to plane z=0 from homogenous
#xm, ym, Cm = cl.xypToZplane(xp, yp, rVecs[j], tVecs[j], Cp, Crt)
#
#Caux = Cm
## %%
#xm, ym, Cm = cl.inverse(imagePoints[j,0], rVecs[j], tVecs[j],
# cameraMatrix, distCoeffs, model,
# Cccd, Cf, Ck, Crt)
#
#fig = plt.figure()
#ax = fig.gca()
#ax.scatter(chessboardModel[0,:,0], chessboardModel[0,:,1])
#cl.plotPointsUncert(ax, Cm, xm, ym, 'k')
#
#
#
#er = [xm, ym] - chessboardModel[0,:,:2].T
##
## %%
#statement1 = '''
#p1 = np.tensordot(er, Cm, axes=(0,1))[range(54),range(54)]
#p2 = p1.dot(er)[range(54),range(54)]
#'''
#
## %%
#statement2 = '''
#Er = np.empty_like(xp)
#for i in range(len(xp)):
# Er[i] = er[:,i].dot(Cm[i]).dot(er[:,i])
#'''
#
## %%
#statement3 = '''
#q1 = [np.sum(Cm[:,:,0]*er.T,1), np.sum(Cm[:,:,1]*er.T,1)];
#q2 = np.sum(q1*er,0)
#'''
## %%
#
#t1 = timeit.timeit(statement1, globals=globals(), number=10000) / 1e4
#
#t2 = timeit.timeit(statement2, globals=globals(), number=10000) / 1e4
#
#t3 = timeit.timeit(statement3, globals=globals(), number=10000) / 1e4
#
#print(t1/t3, t2/t3)
## %%
#ep = 0.0001 # relative standard deviation in parameters
#
## matriz de incerteza de deteccion en pixeles
#Cccd = np.repeat([np.eye(2,2)*0.1**2],imagePoints[j,0].shape[0], axis=0)
#Cf = np.diag(cameraMatrix[[0,1,0,1],[0,1,2,2]] * ep)**2
#Ck = np.diag((distCoeffs.reshape(-1) * ep )**2)
#
#
#fig = plt.figure()
#ax = fig.gca()
#
#ax.scatter(chessboardModel[0,:,0], chessboardModel[0,:,1],
# marker='+', c='k', s=100)
#
#for j in range(0,n,3):
# Cr = np.diag((rVecs[j].reshape(-1) * ep )**2)
# Ct = np.diag((tVecs[j].reshape(-1) * ep )**2)
#
# Crt = [Cr, Ct]
#
# xm, ym, Cm = cl.inverse(imagePoints[j,0], rVecs[j], tVecs[j],
# cameraMatrix, distCoeffs, model,
# Cccd, Cf, Ck, Crt)
#
# cl.plotPointsUncert(ax, Cm, xm, ym, 'k')
# %% poniendo ruido
def sacoParmams(pars):
r = pars[4:7]
t = pars[7:10]
d = pars[10:]
if model is 'poly' or model is 'rational':
d = np.concatenate([d[:2],np.zeros_like(d[:2]), d[2].reshape((1,-1))])
if len(pars.shape) > 1:
k = np.zeros((3, 3, pars.shape[1]), dtype=float)
else:
k = np.zeros((3, 3), dtype=float)
k[[0,1,2,2],[0,1,0,1]] = pars[:4]
k[2,2] = 1
return r.T, t.T, k.T, d.T
def sdt2covs(covAll):
Cf = np.diag(covAll[:4])
Crt = np.diag(covAll[4:10])
Ck = np.diag(covAll[10:])
return Cf, Ck, Crt
j = 0 # elijo una imagen para trabajar
import glob
imagenFile = glob.glob(imagesFolder+'*.png')[j]
plt.figure()
plt.imshow(plt.imread(imagenFile), origin='lower')
plt.scatter(imagePoints[j,0,:,0], imagePoints[j,0,:,1])
nSampl = int(3e4) # cantidad de samples
ep = 1e-5 # relative standard deviation in parameters
stdIm = 1e-2
sacaCeros = {'poly' : [0,1,4],
'rational' : [0,1,4,5,6],
'fisheye' : range(4)}
# apilo todos los parametros juntos
parsAll = np.hstack([cameraMatrix[[0,1,0,1],[0,1,2,2]],
rVecs[j].reshape(-1),
tVecs[j].reshape(-1),
distCoeffs.reshape(-1)[sacaCeros[model]]])
# standard deviations
sAll = ep * parsAll
Cccd = np.repeat([np.eye(2) * stdIm**2], imagePoints[j,0].shape[0], axis=0)
Cf, Ck, Crt = sdt2covs(sAll**2)
# genero todo el ruido
noiseParam = np.random.randn(nSampl, sAll.shape[0])
noisePos = np.random.randn(nSampl, imagePoints[j,0].shape[0], imagePoints[j,0].shape[1])
# dejo los valores preparados
parsGen = parsAll.reshape((1, -1)) + noiseParam * sAll.reshape((1, -1))
posIgen = imagePoints[j,0].reshape((1,-1,2)) + noisePos * stdIm
posMap = np.zeros_like(posIgen)
rG, tG, kG, dG = sacoParmams(parsGen.T)
# %% TEST EACH STEP. STEP 1: CCD TO DISTORTED HOMOGENOUS ===================
#### COMPARACION DE JACOBIANOS ####
# parte ANALITICA jacobianos
Jd_i, Jd_k = cl.ccd2homJacobian(imagePoints[j,0], cameraMatrix)
# parte JACOBIANOS NUMERICOS
def step1vsX(X, cameraMatrix):
imagePoints = X.T
xpp, ypp, Cpp = cl.ccd2hom(imagePoints, cameraMatrix)
return np.array([xpp, ypp])
def step1vsParams(params, imagePoints):
cameraMatrix = np.zeros((3,3))
cameraMatrix[[0 , 1, 0, 1], [0, 1, 2, 2]] = params
cameraMatrix[2,2] = 1
xpp, ypp, Cpp = cl.ccd2hom(imagePoints, cameraMatrix)
return np.array([xpp, ypp])
# calculo con derivada numerica
Jd_inumeric = ndf.Jacobian(step1vsX, order=4)(imagePoints[j,0].T, cameraMatrix).T
Jd_knumeric = ndf.Jacobian(step1vsParams, order=4,
method='central')(cameraMatrix[[0 , 1, 0, 1],
[0, 1, 2, 2]],
imagePoints[j,0]).transpose((2,0,1))
indJacNon0 = [[0,1,2,3], [0,1,0,1]]
jkanal = Jd_k.T[indJacNon0].reshape(-1)
jknumDif = np.abs(Jd_knumeric.T[indJacNon0].reshape(-1) - jkanal)
jianal = np.diag(Jd_i)
jinum = np.abs(Jd_inumeric[:,[0,1],[0,1]] - jianal)
# COMPARO JACOBIANOS
plt.figure()
plt.title('Jacobianos relative error')
plt.plot(jkanal, jknumDif / np.abs(jkanal), '+', label='params')
plt.plot(jianal.reshape((-1,1)), (jinum / jianal).T, 'xk', label='posicion')
plt.yscale('log')
plt.legend(loc=0)
#### COMPARACION DE COVARIANZAS ####
# %% montecarlo vs analitico comparo covarianzas
mahalanobis = np.zeros([n, m])
for j in range(n):
# parte ANALITICA propagacion de incerteza
xpp, ypp, Cpp = cl.ccd2hom(imagePoints[j,0], cameraMatrix, Cccd=Cccd, Cf=Cf)
posIgen = imagePoints[j,0].reshape((1,-1,2)) + noisePos * stdIm
xyp = np.zeros((nSampl,m,2))
# parte MONTE CARLO
for i in range(nSampl):
# posMap[i,:,0], posMap[i,:,1], _ = cl.ccd2hom(posIgen[i], cameraMatrix)
xyp[i,:,0], xyp[i,:,1], _ = cl.ccd2hom(posIgen[i], kG[i])
# # COMPARO VARIANZAS
# # medias y varianzas de las nubes de puntos
# posIMean = np.mean(posIgen, axis=0)
# dif = (posIgen - posIMean).T
# posIVar = np.sum([dif[0] * dif, dif[1] * dif], axis=-1) / (nSampl - 1)
# posIVar = posIVar.transpose((2,0,1))
xypMean = np.mean(xyp, axis=0)
dif = (xyp - xypMean).T
xypVar = np.sum([dif[0] * dif, dif[1] * dif], axis=-1).T / (nSampl - 1)
# posMapVar = posMapVar.transpose((2,0,1))
# mido la distancia mahalanobis
mahalanobis[j] = [bl.varMahal(xypVar[i], nSampl, Cpp[i]) for i in range(m)]
plt.figure()
nh, bins, patches = plt.hist(np.reshape(mahalanobis,-1), 100, normed=True)
ch2pdf = bl.chi2.pdf(bins,3)
plt.plot(bins, ch2pdf)
# %%
#fig = plt.figure()
#ax = fig.gca()
#ax.set_title('pos generadas')
#ax.scatter(posIgen[:,:,0], posIgen[:,:,1], marker='.', c='b', s=1)
#cl.plotPointsUncert(ax, Cccd, imagePoints[j,0,:,0], imagePoints[j,0,:,1], 'k')
#cl.plotPointsUncert(ax, posIVar, posIMean[:,0], posIMean[:,1], 'b')
cAnal = Cpp[:, [0,1,0],[0,1,1]]
cDif = np.abs(xypVar[:, [0,1,0],[0,1,1]] - cAnal)
plt.plot(cAnal.T, cDif.T / np.linalg.norm(Cpp, axis=(1,2)), '+b')
fig = plt.figure()
ax = fig.gca()
ax.set_title('propagacion')
ax.scatter(posMap[:,:,0], posMap[:,:,1], marker='.', c='b', s=1)
cl.plotPointsUncert(ax, Cpp, xpp, ypp, 'k')
cl.plotPointsUncert(ax, xypVar, xypMean[:,0], xypMean[:,1], 'b')
xypVar / Cpp
xypMean[:,0] / xpp
xypMean[:,1] / ypp
# %% TEST EACH STEP. STEP 2: HOMOGENOUS UNDISTORTION =======================
reload(cl)
# parte ANALITICA propagacion de incerteza
xp, yp, Cp = cl.homDist2homUndist(xpp, ypp, distCoeffs, model, Cpp=Cpp, Ck=Ck)
# parte ANALITICA jacobianos
_, _, Jh_d, Jh_k = cl.homDist2homUndist_ratioJacobians(xpp, ypp, distCoeffs, model)
# parte MONTE CARLO
# Genero los valores a usar
# matriz para rotoescalear el ruido
convEllip2 = np.array([cl.unit2CovTransf(c) for c in Cpp])
# aplico rotoescaleo
xypertub2 = (convEllip2.reshape((1,-1,2,2)) *
noisePos.reshape((nSampl,-1,1,2))).sum(-1)
# aplico la perturbacion
xPPgen = xpp.reshape((1, -1)) + xypertub2[:,:,0]
yPPgen = ypp.reshape((1, -1)) + xypertub2[:,:,1]
xP = np.zeros_like(xPPgen)
yP = np.zeros_like(yPPgen)
# hago todos los mapeos de Monte Carlo, una iteracion por sample
for i in range(nSampl):
xP[i], yP[i], _ = cl.homDist2homUndist(xPPgen[i], yPPgen[i], dG[i], model)
# parte JACOBIANOS NUMERICOS
def step2vsX(X, distCoeffs, model):
xpp, ypp = X
xp, yp, Cpp = cl.homDist2homUndist(xpp, ypp, distCoeffs, model)
return np.array([xp, yp])
def step2vsParams(distCoeffs, X, model):
xpp, ypp = X
xp, yp, Cpp = cl.homDist2homUndist(xpp, ypp, distCoeffs, model)
return np.array([xp, yp])
X = np.array([xpp, ypp])
Jh_dnumeric = ndf.Jacobian(step2vsX)(X, distCoeffs, model).T
Jh_knumeric = ndf.Jacobian(step2vsParams)(distCoeffs, X, model).transpose((2,0,1))
# COMPARO JACOBIANOS
plt.figure()
plt.title('Jacobianos')
plt.plot(Jh_d.flat, np.abs(Jh_dnumeric - Jh_d).reshape(-1), '+', label='posicion')
plt.plot(Jd_knumeric.flat, np.abs(Jd_knumeric - Jd_k).reshape(-1), 'x', label='params')
plt.legend(loc=0)
plt.yscale('log')
#plt.figure()
#plt.title('Jacobianos 2')
#plt.plot(Jh_knumeric.flat, Jh_k.flat, '+', label='posicion')
# COMPARO VARIANZAS
# saco media y varianza de cada nube de puntos
def mediaCovars(x, y):
'''
saco media y covarianza de x(n,m), y(n,m), el primer indice es de muestreo
'''
xm, ym = np.mean([x,y], axis=1)
dx, dy = (x - xm), (y - ym)
Cxy = dx*dy
return xm, ym, np.array([[dx**2, Cxy],[Cxy, dy**2]]).mean(2).T
xPPm, yPPm, varPP = mediaCovars(xPPgen, yPPgen)
xPm, yPm, varP = mediaCovars(xP, yP)
#fig = plt.figure()
#ax = fig.gca()
#ax.set_title('pos generadas')
#ax.plot(xPPgen, yPPgen, '.b')
#cl.plotPointsUncert(ax, Cpp, xpp, ypp, 'k')
#cl.plotPointsUncert(ax, varPP, xPPm, yPPm, 'b')
fig = plt.figure()
ax = fig.gca()
ax.set_title('propagacion')
ax.scatter(xP, yP, marker='.', c='b', s=1)
cl.plotPointsUncert(ax, Cp, xp, yp, 'k')
cl.plotPointsUncert(ax, varP, xPm, yPm, 'b')
# %% TEST EACH STEP. STEP 3: PROJECT TO MAP, UNDO ROTOTRASLATION
# parte ANALITICA propagacion de incerteza
xm, ym, Cm = cl.xypToZplane(xp, yp, rVecs[j], tVecs[j], Cp=Cp, Crt=Crt)
# parte ANALITICA jacobianos
JXm_Xp, JXm_rtV = cl.jacobianosHom2Map(xp, yp, rVecs[j], tVecs[j])
# parte MONTE CARLO
# dejo los valores preparados
# matriz para rotoescalear el ruido
convEllip3 = np.array([cl.unit2CovTransf(c) for c in Cp])
# aplico rotoescaleo
xypertub3 = (convEllip3.reshape((1,-1,2,2)) *
noisePos.reshape((nSampl,-1,1,2))).sum(-1)
xPgen = xp.reshape((1, -1)) + xypertub3[:,:,0]
yPgen = yp.reshape((1, -1)) + xypertub3[:,:,1]
xM = np.zeros_like(xPgen)
yM = np.zeros_like(yPgen)
# hago todos los mapeos de Monte Carlo, una | |
<filename>roll/ast.py
import logging
import random
from abc import ABC, abstractmethod
from copy import copy
from enum import Enum, auto
import roll.exceptions as rollerr
# import * imports all tokens, operators, the Assignment class, and the root Program class
__all__ = [
"TokenNumber",
"TokenString",
"TokenRoll",
"TokenVariable",
"TokenLet",
"TokenFunction",
"TokenApplication",
"TokenOperator",
"TokenTernary",
"TokenCase",
"Program",
"Assignment",
"Operator",
]
MAX_ROLLS = 1000
def isfunction(token):
return isinstance(token, TokenFunction)
class Environment:
def __init__(self, root):
self.root = root
self.closure = {}
self.trace = []
def copy(self):
out = Environment(self.root)
out.closure = self.closure.copy()
out.trace = self.trace
return out
class HashCounter:
"""A mutating object that records the variable IDs to be used in general hashing and eta-reduction hashing"""
def __init__(self):
self.__next_id = 0
self.__next_scope_id = -1
self.trace = []
@property
def next_id(self):
return self.__next_id
def pop_id(self):
id = self.__next_id
self.__next_id += 1
return id
@property
def next_scope_id(self):
return self.__next_scope_id
def pop_scope_id(self):
id = self.__next_scope_id
self.__next_scope_id -= 1
return id
def trace(func):
"""A decorator that updates the environment/counter to know its position in the current expression"""
def wrapper(*args, **kwargs):
# args[0] will be self (the expression token object)
# args[1] should always be the env/map object
args[1].trace.append(args[0])
out = func(*args, **kwargs)
args[1].trace.pop()
return out
return wrapper
class IToken(ABC):
@trace
@abstractmethod
def reduce(self, env, counter):
"""Returns a fully reduced version of the token"""
raise NotImplementedError
@abstractmethod
def substitute(self, old_to_new):
"""Returns a deep copy of the token where each variable with an ID in the map is replaced by a copy with the new ID"""
raise NotImplementedError
@abstractmethod
def __str__(self):
"""Recursively constructs a string representation of the token"""
raise NotImplementedError
def dereference(self, env):
"""Attempts to deference the token if it is a pointer"""
return self
@trace
@abstractmethod
def hash_vars(self, counter, map):
"""Recursively sets the IDs of variables to be unique"""
raise NotImplementedError
class IPure(ABC):
@property
@abstractmethod
def pure(self):
"""Returns the value of a reduced token in Python primitives"""
raise NotImplementedError
class TokenNumber(IToken, IPure):
def __init__(self, value):
self.__value = value
logging.debug(self.__class__.__name__, self)
@trace
def reduce(self, env, counter):
return self
def substitute(self, _):
return TokenNumber(self.__value)
@trace
def hash_vars(self, counter, map):
pass
def __str__(self):
return str(self.__value)
@property
def pure(self):
return self.__value
def __eq__(self, other):
return isinstance(other, TokenNumber) and self.__value == other.__value
class TokenString(IToken, IPure):
def __init__(self, value):
self.__value = value
logging.debug(self.__class__.__name__, self)
@trace
def reduce(self, env, counter):
return self
def substitute(self, _):
return TokenString(self.__value)
@trace
def hash_vars(self, counter, map):
pass
def __str__(self):
unescaped = '"'
escaped = r"\""
return f'"{self.__value.replace(unescaped, escaped)}"'
@property
def pure(self):
return self.__value
def __eq__(self, other):
return isinstance(other, TokenString) and self.__value == other.__value
class TokenRoll(IToken):
def __init__(self, count, sides):
self.count = count
self.sides = sides
logging.debug(self.__class__.__name__, self)
@trace
def reduce(self, env, counter):
sides = self.sides.reduce(env, counter).pure
if sides == 0:
raise rollerr.ZeroDiceSidesError(env.trace)
if sides < 0:
raise rollerr.NegativeDiceSidesError(env.trace, sides)
if int(sides) != sides:
raise rollerr.FloatingPointDiceSidesError(env.trace, sides)
count = self.count.reduce(env, counter).pure
if count == 0:
raise rollerr.ZeroDiceCountError(env.trace)
if count < 0:
raise rollerr.NegativeDiceCountError(env.trace, count)
if int(count) != count:
raise rollerr.FloatingPointDiceCountError(env.trace, count)
if sides == 1:
return TokenNumber(count)
if count > MAX_ROLLS:
raise rollerr.ExcessiveDiceRollsError(env.trace)
return TokenNumber(sum(random.choices(range(1, sides + 1), k=count)))
def substitute(self, old_to_new):
return TokenRoll(
self.count.substitute(old_to_new), self.sides.substitute(old_to_new)
)
@trace
def hash_vars(self, counter, map):
self.count.hash_vars(counter, map)
self.sides.hash_vars(counter, map)
def __str__(self):
count = str(self.count)
sides = str(self.sides)
if not isinstance(self.count, TokenNumber):
count = f"({count})"
if not isinstance(self.sides, TokenNumber):
sides = f"({sides})"
return f"{count}d{sides}"
class TokenVariable(IToken):
def __init__(self, name, id=None):
self.name = name
self.identifier = hash(name) if id is None else id
logging.debug(self.__class__.__name__, self)
@trace
def reduce(self, env, counter):
return self.dereference(env).reduce(env, counter)
def substitute(self, old_to_new):
return TokenVariable(
self.name,
old_to_new[self.identifier]
if self.identifier in old_to_new
else self.identifier,
)
@trace
def hash_vars(self, counter, map):
try:
self.identifier = map[self.name]
except KeyError:
raise rollerr.UndefinedIdentifierError(counter.trace, self.name)
def __str__(self):
return f"{self.name}" # _{self.identifier}"
def dereference(self, env):
id = self.identifier
try:
return env.closure[id].dereference(env)
except KeyError:
raise rollerr.UndefinedIdentifierError(env.trace, self.name)
class TokenLet(IToken):
"""declarations = [Assignment*]"""
def __init__(self, declarations, expression):
self.declarations = declarations
self.expression = expression
logging.debug(self.__class__.__name__, self)
@trace
def reduce(self, env, counter):
new_env = self.update_env(env)
return self.expression.reduce(new_env, counter)
def substitute(self, old_to_new):
# Remove variables from the mapping that are hidden by let declarations
old_to_new = old_to_new.copy()
for decl in self.declarations:
old_to_new.pop(decl.identifier, None)
# Copy the declarations
new_decls = []
for decl in self.declarations:
new_decls.append(
Assignment(
decl.name, decl.expression.substitute(old_to_new), decl.identifier
)
)
# Copy the let body
new_expr = self.expression.substitute(old_to_new)
# Return reconstructed let statement
return TokenLet(new_decls, new_expr)
@trace
def hash_vars(self, counter, map):
new_map = map.copy()
for i in range(len(self.declarations)):
decl = self.declarations[i]
new_map[decl.name] = counter.next_id
self.declarations[i].identifier = counter.next_id
counter.pop_id()
for decl in self.declarations:
decl.expression.hash_vars(counter, new_map)
self.expression.hash_vars(counter, new_map)
def update_env(self, env):
new_env = env.copy()
for decl in self.declarations:
new_env.closure[decl.identifier] = decl.expression
return new_env
def __str__(self):
return f"^{', '.join([f'{d.name}={d.expression}' for d in self.declarations])}${self.expression}"
class TokenFunction(IToken):
def __init__(self, arg_name, expression, arg_id=None):
self.arg_name = arg_name
self.arg_id = arg_id if arg_id is not None else hash(arg_name)
self.expression = expression
logging.debug(self.__class__.__name__, self)
@trace
def reduce(self, env, counter):
return self
def substitute(self, old_to_new):
# If the function arg hides a variable in the map, remove it
if self.arg_id in old_to_new:
old_to_new = old_to_new.copy()
del old_to_new[self.arg_id]
# Copy the function body
new_expr = self.expression.substitute(old_to_new)
# Return reconstructed function statement
return TokenFunction(self.arg_name, new_expr, self.arg_id)
@trace
def hash_vars(self, counter, map):
new_map = map.copy()
new_map[self.arg_name] = counter.next_id
self.arg_id = counter.next_id
counter.pop_id()
self.expression.hash_vars(counter, new_map)
def __str__(self):
return f"({self.arg_name}->{self.expression})"
class TokenApplication(IToken):
def __init__(self, lhs, rhs):
self.lhs = lhs
self.rhs = rhs
logging.debug(self.__class__.__name__, self)
@trace
def reduce(self, env, counter):
# Apply as many arguments as possible (assumes the application is valid)
# May result in a partially-applied function
# Create declarations from lhs function(s)
out = self.lhs.dereference(env)
decls = []
for expr in self.rhs:
decls.append(Assignment(out.arg_name, expr, id=out.arg_id))
out = (
out.expression
) # Note: this used to attempt to .dereference() - I can't remember why but it seems to work without it
# Substitute variables for scoped variables (allows recursion)
substitutions = {}
env = env.copy()
for decl in decls:
substitutions[decl.identifier] = counter.next_scope_id
env.closure[counter.next_scope_id] = decl.expression
counter.pop_scope_id()
out = out.substitute(substitutions)
# Check if the application was partial
# If it was, extract the rest of the function arguments and put the Let token at the bottom
if isfunction(out):
func_path = [out]
out = out.expression.dereference(env)
while isfunction(out):
func_path.append(out)
out = out.expression.dereference(env)
func_path[-1] = TokenFunction(
func_path[-1].arg_name,
TokenLet(decls, out),
func_path[-1].arg_id - 1000,
)
for i in range(len(func_path) - 1, 0, -1):
func_path[i - 1] = TokenFunction(
func_path[i - 1].arg_name,
func_path[i],
func_path[i - 1].arg_id - 1000,
)
result = func_path[0]
# If it wasn't, return the fully applied function expression
else:
result = TokenLet(decls, out)
return result.reduce(env, counter)
def substitute(self, old_to_new):
lhs = self.lhs.substitute(old_to_new)
rhs = [expr.substitute(old_to_new) for expr in self.rhs]
return TokenApplication(lhs, rhs)
@trace
def hash_vars(self, counter, map):
self.lhs.hash_vars(counter, map)
for expr in self.rhs:
expr.hash_vars(counter, map)
def __str__(self):
return f"{self.lhs} {' '.join([str(expr) for expr in self.rhs])}"
class Operator(Enum):
EQ = auto()
NE = auto()
GT = auto()
GE = auto()
LT = auto()
LE = auto()
AND = auto()
OR = auto()
ADD = auto()
SUB = auto()
MUL = auto()
DIV = auto()
POW = auto()
NOT = auto()
NEG = auto()
def __str__(self):
mapping = {
Operator.EQ: "==",
Operator.NE: "!=",
Operator.GE: ">=",
Operator.GT: ">",
Operator.LE: "<=",
Operator.LT: "<",
Operator.AND: "&",
Operator.OR: "|",
Operator.ADD: "+",
Operator.SUB: "-",
Operator.MUL: "*",
Operator.DIV: "/",
Operator.POW: "^",
Operator.NOT: "!",
Operator.NEG: "-",
}
return mapping[self]
class TokenOperator(IToken):
mapping = {
Operator.EQ: lambda xs: int(xs[0] == xs[1]),
Operator.NE: lambda xs: int(xs[0] != xs[1]),
Operator.GE: lambda xs: int(xs[0] >= xs[1]),
Operator.GT: lambda xs: int(xs[0] > xs[1]),
Operator.LE: lambda xs: int(xs[0] <= xs[1]),
Operator.LT: lambda xs: int(xs[0] < xs[1]),
Operator.AND: lambda xs: int(xs[0] and xs[1]),
Operator.OR: lambda xs: int(xs[0] or xs[1]),
Operator.ADD: lambda xs: xs[0] + xs[1],
Operator.SUB: lambda xs: xs[0] - xs[1],
Operator.MUL: lambda xs: xs[0] * xs[1],
Operator.DIV: lambda xs: xs[0] / xs[1],
Operator.POW: lambda xs: xs[0] ** xs[1],
Operator.NOT: lambda xs: 0 if xs[0] else 1,
Operator.NEG: lambda xs: -xs[0],
}
def __init__(self, op, args):
self.op = op
self.args = args
logging.debug(self.__class__.__name__, self)
@trace
def reduce(self, env, counter):
try:
value = TokenOperator.mapping[self.op](
[a.reduce(env, counter).pure for a in self.args]
)
except ZeroDivisionError:
raise rollerr.ZeroDivisionError(env.trace)
if | |
import os
import sys
import time
import numpy as np
import json
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch_geometric
from torch_geometric.nn import (NNConv, GMMConv, GraphConv, Set2Set)
from torch_geometric.nn import (SplineConv, graclus, max_pool, max_pool_x, global_mean_pool)
#from neuralnet_pytorch.metrics import chamfer_loss
import trimesh
from visualization_utils import plot_mesh_3d
from models import *
from sklearn.neighbors import KDTree
def compute_traingle_area(a, b, c):
side1 = torch.sum((a - b) ** 2) ** (1/2)
side2 = torch.sum((a - c) ** 2) ** (1/2)
side3 = torch.sum((c - b) ** 2) ** (1/2)
p = (side1 + side2 + side3) / 2
return (p * (p - side1) * (p - side2) * (p - side3)) ** (1/2)
def compute_normal(a, b, c, mc):
norm = torch.cross(a - b, a - c)
if torch.dot(mc - a, norm) > 0:
norm = -norm
return norm
def compute_lift(data_instance, answers):
fld_tree = KDTree(data_instance.x.cpu().detach())
mass_center = torch.mean(data_instance.x, axis=0)
distances, indeces = fld_tree.query(data_instance.x.cpu().detach(), k=4)
areas = [compute_traingle_area(data_instance.x[a], data_instance.x[b], data_instance.x[c])
for a, b, c in indeces[:, 1:]]
normals = [compute_normal(data_instance.x[a], data_instance.x[b], data_instance.x[c], mass_center)
for a, b, c in indeces[:, 1:]]
mult = torch.tensor([a * n[2] for a, n in zip(areas, normals)]).to('cuda:0')
lift = answers[:, 0] * mult
return torch.sum(lift[~torch.isnan(lift)])
# areas = [torch.mean(torch.tensor([compute_traingle_area(data_instance.x[data_instance.faces[f][0]],
# data_instance.x[data_instance.faces[f][1]],
# data_instance.x[data_instance.faces[f][2]])
# for f in faces if f > 0])) for faces in data_instance.vertex_faces[0] ]
def compute_lift_faces(data_instance, answers, axis=0):
mesh = trimesh.Trimesh(vertices=data_instance.x.cpu().detach(), faces=data_instance.face.t().cpu().detach())
#mass_center = torch.mean(data_instance.x, axis=0)
pressures = torch.mean(answers[data_instance.face, 0], axis=0)
mult = torch.tensor( - mesh.area_faces * mesh.face_normals[:, axis], dtype=torch.float).to('cuda:0')
lift = torch.mul(pressures, mult)
return torch.sum(lift[~torch.isnan(lift)])
def compute_lift_faces_signs(data_instance, answers, axis=0):
mesh = trimesh.Trimesh(vertices=data_instance.x.cpu().detach(), faces=data_instance.face.t().cpu().detach())
#mass_center = torch.mean(data_instance.x, axis=0)
pressures = torch.mean(answers[data_instance.face, 0], axis=0)
signs = np.sign(np.sum(np.sum(mesh.vertices[mesh.faces], axis=1) * mesh.face_normals, axis=1))
mult = torch.tensor( - mesh.area_faces * mesh.face_normals[:, axis] * signs, dtype=torch.float).to('cuda:0')
lift = torch.mul(pressures, mult)
return torch.sum(lift[~torch.isnan(lift)])
def compute_lift_faces_diff(data_instance, answers, axis=0):
pressures = torch.mean(answers[data_instance.face, 0], axis=0)
# TODO: cahnge to x if needed
pos = data_instance.x
cross_prod = (pos[data_instance.face[1]] - pos[data_instance.face[0]]).cross(
pos[data_instance.face[2]] - pos[data_instance.face[0]])
mult = -cross_prod[:, axis] / 2
lift = torch.mul(pressures, mult)
return torch.sum(lift[~torch.isnan(lift)])
def compute_lift_faces_diff_signs(data_instance, answers, axis=0):
pressures = torch.mean(answers[data_instance.face, 0], axis=0)
# TODO: cahnge to x if needed
pos = data_instance.x
cross_prod = (pos[data_instance.face[1]] - pos[data_instance.face[0]]).cross(
pos[data_instance.face[2]] - pos[data_instance.face[0]])
signs = torch.sign(torch.sum(pos[data_instance.face[0]] * cross_prod, axis=1))
mult = - cross_prod[:, axis] * signs / 2
lift = torch.mul(pressures, mult)
return torch.sum(lift[~torch.isnan(lift)])
def compute_signs_for_loss(ply_mesh):
faces = ply_mesh['face']['vertex_indices']
pos = np.hstack(( ply_mesh['vertex']['x'][:, None],
ply_mesh['vertex']['y'][:, None],
ply_mesh['vertex']['z'][:, None]))
normals = np.hstack(( ply_mesh['normals']['x'][:, None],
ply_mesh['normals']['y'][:, None],
ply_mesh['normals']['z'][:, None]))
cross_prod = np.cross(pos[faces[:,1]] - pos[faces[:,0]], pos[faces[:,2]] - pos[faces[:,0]])
face_normals = np.mean(normals[faces], axis=1)
return np.sign(np.sum(face_normals * cross_prod, axis=1))
def compute_lift_faces_diff_mem_signs(data_instance, answers, signs, axis=0):
pressures = torch.mean(answers[data_instance.face, 0], axis=0)
# TODO: cahnge to x if needed
pos = data_instance.x
cross_prod = (pos[data_instance.face[1]] - pos[data_instance.face[0]]).cross(
pos[data_instance.face[2]] - pos[data_instance.face[0]])
mult = cross_prod[:, axis] * signs / 2
lift = torch.mul(pressures, mult)
return torch.sum(lift[~torch.isnan(lift)])
def make_data_instance_from_stl(fld_path, replace_inf=10e5, batch_norm=False, normilize=True) -> torch_geometric.data.Data:
''' Takes a path to generated fld file with following colomns: x,y,z,p,k,omega,nut
and converts it into a geometric data instance.
'''
fld = np.genfromtxt(fld_path, delimiter=',', skip_header=1)
np.random.shuffle(fld)
fld[fld > 10e5] = np.nan
fld = fld[~np.isnan(fld).any(axis=1)]
answers = fld[:, 3:]
# if (batch_norm):
# mean_values = answers.mean(axis=0)
# std_values = answers.std(axis=0)
# else:
mean_values = [-1.15994242e+01, 9.01274307e-01, 1.83840398e+03, 6.36532838e-05]
std_values = [4.78920149e+01, 3.70121534e-01, 7.36068558e+02, 2.35466637e-05]
if normilize:
for f in range(answers.shape[1]):
#answers[:, f] = (answers[:, f] - mean_values[f]) / std_values[f]
answers[:, f] = (answers[:, f] - np.mean(answers[:, f])) / np.std(answers[:, f])
stl_path = fld_path.replace('fld', 'stl', 1)[:-9] + '.stl'
mesh = trimesh.load(stl_path)
fld_tree = KDTree(fld[:, :3])
distances, indeces = fld_tree.query(mesh.vertices, k=1)
interpolations = answers[indeces].squeeze()
edge_attr = [mesh.vertices[a] - mesh.vertices[b] for a, b in mesh.edges]
data = torch_geometric.data.Data(x = torch.tensor(mesh.vertices, dtype=torch.float),
pos= torch.tensor(mesh.vertices, dtype=torch.float),
face = torch.tensor(mesh.faces, dtype=torch.long).t(),
y = torch.tensor(interpolations, dtype=torch.float),
edge_attr = torch.tensor(edge_attr, dtype=torch.float),
edge_index= torch.tensor(mesh.edges, dtype=torch.long).t().contiguous())
scr_path = fld_path.replace('fld', 'scr', 1).replace('fld', 'json')
with open(scr_path) as scr_file:
scr_data = json.load(scr_file)
global_mean = np.array([ 7.15702765e-01, 9.76291022e-03, -1.97037022e-04,
4.33680848e-02, 2.71446501e-03, 2.42610894e-05,
-1.63100377e-05, -1.20658604e-03, 2.01998814e-01,
-2.94244062e-06, 1.35224581e-05, -6.22179022e-04] )
global_std = np.array([ 3.12011511e-01, 2.76790047e-01, 4.93472812e-02,
7.02184919e-03, 1.78783928e-03, 8.31190054e-04,
4.32590171e-03, 1.71780821e-02, 1.01220579e-01,
1.13513395e-04, 2.45400068e-04, 1.05765967e-03] )
data.pressure_drag = torch.tensor((scr_data['pressure_drag'] - global_mean[None, :3]) /
global_std[None, :3], dtype=torch.float)
data.viscous_drag = torch.tensor((scr_data['viscous_drag'] - global_mean[3:6]) /
global_std[None, 3:6], dtype=torch.float)
data.pressure_moment = torch.tensor((scr_data['pressure_moment'] - global_mean[6:9]) /
global_std[None, 6:9], dtype=torch.float)
data.viscous_moment = torch.tensor((scr_data['viscous_moment'] - global_mean[9:]) /
global_std[None, 9:], dtype=torch.float)
data.path = fld_path
return data
def make_data_instance_from_fld(fld_path, k=10, replace_inf=10e5, data_step=1) -> torch_geometric.data.Data:
''' Takes a path to generated fld file with following colomns: x,y,z,p,k,omega,nut
and converts it into a geometric data instance.
'''
fld = np.genfromtxt(fld_path, delimiter=',', skip_header=1)
np.random.shuffle(fld)
fld = fld[::data_step]
fld[fld > 10e5] = np.nan
fld = fld[~np.isnan(fld).any(axis=1)]
answers = fld[:, 3:]
mean_values = [-1.90357614e+00, 9.55119907e-02, 2.05472217e+02, 5.53618263e-05]
std_values = [3.71674873e+00, 4.93675056e-02, 1.10871494e+02, 2.63155496e-05]
for f in range(answers.shape[1]):
answers[:, f] = (answers[:, f] - mean_values[f]) / std_values[f]
if k > 0:
# find correspondences
fld_tree = KDTree(fld[:, :3])
distances, indeces = fld_tree.query(fld[:, :3], k=10)
# create edge indicies
edge_indices = np.array([[(idx, indeces[idx][i]) for i in range(k)] for idx in range(len(fld))])
edge_indices = edge_indices.reshape( k * len(fld), 2)
edge_attr = np.array([[fld[idx, :3] - fld[indeces[idx][i]][:3] for i in range(k)]
for idx in range(len(fld))])
edge_attr = edge_attr.reshape( k * len(fld), 3)
else:
edge_indices = np.array([[0,0]])
edge_attr = np.array([0])
data = torch_geometric.data.Data(x = torch.tensor(fld[:, :3], dtype=torch.float),
pos= torch.tensor(fld[:, :3], dtype=torch.float),
y = torch.tensor(fld[:, 3:], dtype=torch.float),
edge_attr = torch.tensor(edge_attr, dtype=torch.float),
edge_index= torch.tensor(edge_indices, dtype=torch.long).t().contiguous())
scr_path = fld_path.replace('fld', 'scr', 1).replace('fld', 'json')
with open(scr_path) as scr_file:
scr_data = json.load(scr_file)
global_mean = np.array([ 7.15702765e-01, 9.76291022e-03, -1.97037022e-04,
4.33680848e-02, 2.71446501e-03, 2.42610894e-05,
-1.63100377e-05, -1.20658604e-03, 2.01998814e-01,
-2.94244062e-06, 1.35224581e-05, -6.22179022e-04] )
global_std = np.array([ 3.12011511e-01, 2.76790047e-01, 4.93472812e-02,
7.02184919e-03, 1.78783928e-03, 8.31190054e-04,
4.32590171e-03, 1.71780821e-02, 1.01220579e-01,
1.13513395e-04, 2.45400068e-04, 1.05765967e-03] )
data.pressure_drag = torch.tensor((scr_data['pressure_drag'] - global_mean[None, :3]) /
global_std[None, :3], dtype=torch.float)
data.viscous_drag = torch.tensor((scr_data['viscous_drag'] - global_mean[3:6]) /
global_std[None, 3:6], dtype=torch.float)
data.pressure_moment = torch.tensor((scr_data['pressure_moment'] - global_mean[6:9]) /
global_std[None, 6:9], dtype=torch.float)
data.viscous_moment = torch.tensor((scr_data['viscous_moment'] - global_mean[9:]) /
global_std[None, 9:], dtype=torch.float)
data.path = fld_path
return data
def generateNamesMapping(root):
mapping = {}
objects = []
for (dirpath, dirnames, filenames) in os.walk(os.path.join(root, 'scr')):
objects += [(os.path.join(dirpath, file), file[:-5]) for file in filenames if file[-5:] == '.json' and file[0] != '.']
for path, name in objects:
with open(path, 'r') as json_data:
origId = json.load(json_data)['stl_id']
mapping[origId] = name
return mapping
class CDFDataset(torch_geometric.data.Dataset):
def __init__(self, root, transform=None, pre_transform=None,
train=True, delimetr=0.9, connectivity=10, data_step=1, split=None):
super(CDFDataset, self).__init__(root, transform, pre_transform)
self.objects = list()
if split is not None:
with open(split, 'r') as json_data:
objNmae = json.load(json_data)['ShapeNetV2']['02958343']
self.objects += [os.path.join(root, objName + '.fld')]
else:
for (dirpath, dirnames, filenames) in os.walk(root):
self.objects += [os.path.join(dirpath, file) for file in filenames if file[-4:] == '.fld']
delimetr = int(delimetr * len(self.objects))
if train:
self.objects = self.objects[:delimetr]
else:
self.objects = self.objects[delimetr:]
self.connectivity = connectivity
self.data_step = data_step
@property
def raw_file_names(self):
return []
@property
def processed_file_names(self):
return []
def __len__(self):
return len(self.objects)
def get(self, idx):
#return make_data_instance_from_fld(self.objects[idx], self.connectivity, data_step=self.data_step)
return make_data_instance_from_stl(self.objects[idx], self.connectivity, data_step=self.data_step)
class CDFDatasetInMemory(torch_geometric.data.InMemoryDataset):
def __init__(self, root, transform=None, pre_transform=None,
train=True, delimetr=0.95):
self.delimetr = delimetr
self.train = train
super(CDFDatasetInMemory, self).__init__(root, transform, pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def processed_file_names(self):
if self.train:
return ['data15_train_full_normalized.pt']
else:
return ['data15_test_full_normalized.pt']
def process(self):
# Get list of meshes
# if self.split is not None:
# mapping = generateNamesMapping(self.root)
# with open(self.split, 'r') as json_data:
# objNames = json.load(json_data)['ShapeNetV2']['02958343']
# self.objects = [os.path.join(self.root, 'fld/' + mapping[name] + '.fld') for name in objNames if name in mapping.keys()]
# print('Taken ' + str(len(self.objects)) + ' out of ' + str(len(objNames)))
self.objects = list()
for (dirpath, dirnames, filenames) in os.walk(self.root):
self.objects += [os.path.join(dirpath, file) for file in filenames if file[-4:] == '.fld']
delimetr = int(self.delimetr * len(self.objects))
if self.train:
self.objects = self.objects[:delimetr]
else:
self.objects = self.objects[delimetr:]
print(len(self.objects))
data_list = [ make_data_instance_from_stl(obj) for obj in self.objects]
if self.pre_filter is not None:
data_list = [data for data in data_list if self.pre_filter(data)]
if self.pre_transform is not None:
data_list = [self.pre_transform(data) for data in data_list]
| |
<reponame>rickyHong/parallel-wave-vocoder-repl<gh_stars>100-1000
# -*- coding: utf-8 -*-
# !/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.signal import stft
def causal_conv(value, filter_, dilation, name='causal_conv'):
def time_to_batch(value, dilation):
shape = tf.shape(value)
pad_elements = dilation - 1 - (shape[1] + dilation - 1) % dilation
padded = tf.pad(value, [[0, 0], [0, pad_elements], [0, 0]])
reshaped = tf.reshape(padded, [-1, dilation, shape[2]])
transposed = tf.transpose(reshaped, perm=[1, 0, 2])
return tf.reshape(transposed, [shape[0] * dilation, -1, shape[2]])
def batch_to_time(value, dilation):
shape = tf.shape(value)
prepared = tf.reshape(value, [dilation, -1, shape[2]])
transposed = tf.transpose(prepared, perm=[1, 0, 2])
return tf.reshape(transposed,
[tf.div(shape[0], dilation), -1, shape[2]])
with tf.variable_scope(name):
filter_width = tf.shape(filter_)[0]
if dilation > 1:
transformed = time_to_batch(value, dilation)
# for left-side padding because tf.nn.conv1d do not support left-side padding with padding='SAME'
padded = tf.pad(transformed, [[0, 0], [filter_width - 1, 0], [0, 0]])
conv = tf.nn.conv1d(padded, filter_, stride=1, padding='VALID')
restored = batch_to_time(conv, dilation)
# Remove excess elements at the end caused by padding in time_to_batch.
result = tf.slice(restored,
[0, 0, 0],
[-1, tf.shape(value)[1], -1])
else:
padded = tf.pad(value, [[0, 0], [filter_width - 1, 0], [0, 0]])
result = tf.nn.conv1d(padded, filter_, stride=1, padding='VALID')
return result
class LinearIAFLayer(object):
def __init__(self, batch_size, scaler, shifter):
self.batch_size = batch_size
self.scaler = scaler
self.shifter = shifter
# network
def __call__(self, input, condition=None):
'''
input = (n, t, h), condition = (n, t, h)
'''
scale = self.scaler(input, condition)
shift = self.shifter(input, condition)
out = input * scale + shift
return out
# This WaveNet code is renovated based on https://github.com/ibab/tensorflow-wavenet
class WaveNet(object):
'''Implements the WaveNet network for generative audio.
Usage (with the architecture as in the DeepMind paper):
dilations = [2**i for i in range(N)] * M
filter_width = 2 # Convolutions just use 2 samples.
residual_channels = 16 # Not specified in the paper.
dilation_channels = 32 # Not specified in the paper.
skip_channels = 16 # Not specified in the paper.
net = WaveNetModel(batch_size, dilations, filter_width,
residual_channels, dilation_channels,
skip_channels)
'''
def __init__(self,
batch_size,
dilations,
filter_width,
residual_channels,
dilation_channels,
skip_channels,
quantization_channels=2 ** 8,
use_biases=False,
condition_channels=None,
use_skip_connection=True,
normalize=None,
is_training=True,
name='wavenet'):
'''Initializes the WaveNet model.
Args:
batch_size: How many audio files are supplied per batch
(recommended: 1).
dilations: A list with the dilation factor for each layer.
filter_width: The samples that are included in each convolution,
after dilating.
residual_channels: How many filters to learn for the residual.
dilation_channels: How many filters to learn for the dilated
convolution.
skip_channels: How many filters to learn that contribute to the
quantized softmax output.
quantization_channels: How many amplitude values to use for audio
quantization and the corresponding one-hot encoding.
Default: 256 (8-bit quantization).
use_biases: Whether to add a bias layer to each convolution.
Default: False.
condition_channels: Number of channels in (embedding
size) of global conditioning vector. None indicates there is
no global conditioning.
'''
self.batch_size = batch_size
self.dilations = dilations
self.filter_width = filter_width
self.residual_channels = residual_channels
self.dilation_channels = dilation_channels
self.quantization_channels = quantization_channels
self.use_biases = use_biases
self.skip_channels = skip_channels
self.condition_channels = condition_channels
self.use_skip_connection = use_skip_connection
self.normalize = normalize
self.is_training = is_training
self.name = name
# network
def __call__(self, input_batch, condition_batch=None):
with tf.variable_scope(self.name):
'''Construct the WaveNet network.'''
outputs = []
with tf.variable_scope('causal_layer'):
current_layer = self._create_causal_layer(input_batch)
# Add all defined dilation layers.
with tf.variable_scope('dilated_stack'):
for layer_index, dilation in enumerate(self.dilations):
with tf.variable_scope('layer{}'.format(layer_index)):
output, current_layer = self._create_dilation_layer(
current_layer, dilation, condition_batch)
outputs.append(output)
# Perform (+) -> ReLU -> 1x1 conv -> ReLU -> 1x1 conv to postprocess the output.
with tf.variable_scope('postprocessing'):
# We skip connections from the outputs of each layer, adding them all up here.
total = sum(outputs) if self.use_skip_connection else outputs[-1]
transformed1 = tf.nn.relu(total)
if self.normalize:
transformed1 = normalize(transformed1, method=self.normalize, is_training=self.is_training,
name='normalize_postprocess1')
w1 = tf.get_variable('postprocess1', [1, self.skip_channels, self.skip_channels])
conv1 = tf.nn.conv1d(transformed1, w1, stride=1, padding="SAME")
if self.use_biases:
b1 = tf.get_variable('postprocess1_bias', [self.skip_channels], initializer=tf.zeros_initializer)
conv1 = tf.add(conv1, b1)
transformed2 = tf.nn.relu(conv1)
if self.normalize:
transformed2 = normalize(transformed2, method=self.normalize, is_training=self.is_training,
name='normalize_postprocess2')
w2 = tf.get_variable('postprocess2', [1, self.skip_channels, self.quantization_channels])
conv2 = tf.nn.conv1d(transformed2, w2, stride=1, padding="SAME")
if self.use_biases:
b2 = tf.get_variable('postprocess2_bias', [self.quantization_channels], initializer=tf.zeros_initializer)
conv2 = tf.add(conv2, b2)
return conv2
# @staticmethod
# def calculate_receptive_field(filter_width, dilations):
# receptive_field = (filter_width - 1) * sum(dilations) + 1
# receptive_field += filter_width - 1
# return receptive_field
def _create_causal_layer(self, input_batch):
'''Creates a single causal convolution layer.
The layer can change the number of channels.
'''
weights_filter = tf.get_variable('filter', [self.filter_width, self.quantization_channels, self.residual_channels])
layer = causal_conv(input_batch, weights_filter, 1)
if self.normalize:
layer = normalize(layer, method=self.normalize, is_training=self.is_training)
return layer
def _create_dilation_layer(self, input_batch, dilation, local_condition):
'''Creates a single causal dilated convolution layer.
Args:
input_batch: Input to the dilation layer.
layer_index: Integer indicating which layer this is.
dilation: Integer specifying the dilation size.
local_condition: The data which each timestep is to be conditioned on.
Shape: [batch size, n_timesteps, channels].
The layer contains a gated filter that connects to dense output
and to a skip connection:
|-> [gate] -| |-> 1x1 conv -> skip output
| |-> (*) -|
input -|-> [filter] -| |-> 1x1 conv -|
| |-> (+) -> dense output
|------------------------------------|
Where `[gate]` and `[filter]` are causal convolutions with a
non-linear activation at the output. Biases and conditioning
are omitted due to the limits of ASCII art.
'''
weights_filter = tf.get_variable('filter', [self.filter_width, self.residual_channels, self.dilation_channels])
weights_gate = tf.get_variable('gate', [self.filter_width, self.residual_channels, self.dilation_channels])
conv_filter = causal_conv(input_batch, weights_filter, dilation)
conv_gate = causal_conv(input_batch, weights_gate, dilation)
if local_condition is not None:
weights_cond_filter = tf.get_variable('gc_filter', [1, self.condition_channels, self.dilation_channels])
conv_filter = conv_filter + tf.nn.conv1d(local_condition, weights_cond_filter, stride=1, padding="SAME",
name="gc_filter")
weights_cond_gate = tf.get_variable('gc_gate', [1, self.condition_channels, self.dilation_channels])
conv_gate = conv_gate + tf.nn.conv1d(local_condition, weights_cond_gate, stride=1, padding="SAME",
name="gc_gate")
if self.use_biases:
filter_bias = tf.get_variable('filter_bias', [self.dilation_channels], initializer=tf.zeros_initializer)
gate_bias = tf.get_variable('gate_bias', [self.dilation_channels], initializer=tf.zeros_initializer)
conv_filter = tf.add(conv_filter, filter_bias)
conv_gate = tf.add(conv_gate, gate_bias)
if self.normalize:
conv_filter = normalize(conv_filter, method=self.normalize, is_training=self.is_training,
name='normalize_filter')
conv_gate = normalize(conv_gate, method=self.normalize, is_training=self.is_training,
name='normalize_gate')
out = tf.tanh(conv_filter) * tf.sigmoid(conv_gate)
# The 1x1 conv to produce the residual output
weights_dense = tf.get_variable('dense', [1, self.dilation_channels, self.residual_channels])
transformed = tf.nn.conv1d(out, weights_dense, stride=1, padding="SAME", name="dense")
# The 1x1 conv to produce the skip output
weights_skip = tf.get_variable('skip', [1, self.dilation_channels, self.skip_channels])
skip_output = tf.nn.conv1d(out, weights_skip, stride=1, padding="SAME", name="skip")
if self.use_biases:
dense_bias = tf.get_variable('dense_bias', [self.residual_channels], initializer=tf.zeros_initializer)
skip_bias = tf.get_variable('skip_bias', [self.skip_channels], initializer=tf.zeros_initializer)
transformed = transformed + dense_bias
skip_output = skip_output + skip_bias
dense_output = input_batch + transformed
if self.normalize:
skip_output = normalize(skip_output, method=self.normalize, is_training=self.is_training,
name='normalize_skip_output')
dense_output = normalize(dense_output, method=self.normalize, is_training=self.is_training,
name='normalize_dense_output')
return skip_output, dense_output
# TODO refactoring
def normalize(input, is_training, method='bn', name='normalize'):
with tf.variable_scope(name):
if method == 'bn':
input = tf.layers.batch_normalization(input, training=is_training)
elif method == 'in':
input = instance_normalization(input)
# elif hp.model.normalize == 'wn':
return input
# TODO generalization
def instance_normalization(input, epsilon=1e-8):
inputs_shape = input.get_shape()
params_shape = inputs_shape[-1:]
time_axis = 1
mean, variance = tf.nn.moments(input, [time_axis], keep_dims=True)
beta = tf.get_variable("beta", shape=params_shape, initializer=tf.zeros_initializer)
gamma = tf.get_variable("gamma", shape=params_shape, initializer=tf.ones_initializer)
normalized = (input - mean) / ((variance + epsilon) ** (.5))
output = gamma * normalized + beta
return output
# def get_var_maybe_avg(var_name, ema, **kwargs):
# ''' utility for retrieving polyak averaged params '''
# v = tf.get_variable(var_name, **kwargs)
# if ema is not None:
# v = ema.average(v)
# return v
#
# def get_vars_maybe_avg(var_names, ema, **kwargs):
# ''' utility for retrieving polyak averaged params '''
# vars = []
# for vn in var_names:
# vars.append(get_var_maybe_avg(vn, ema, **kwargs))
# return vars
#
# def dense(x, num_units, nonlinearity=None, init_scale=1., counters={}, init=False, ema=None, **kwargs):
# ''' fully connected layer '''
# if init:
# # data based initialization of parameters
# V = tf.get_variable('V', [int(x.get_shape()[1]),num_units], tf.float32, tf.random_normal_initializer(0, 0.05), trainable=True)
# V_norm = tf.nn.l2_normalize(V.initialized_value(), [0])
# x_init = tf.matmul(x, V_norm)
# m_init, v_init = tf.nn.moments(x_init, [0])
# scale_init = init_scale/tf.sqrt(v_init + 1e-10)
# g = tf.get_variable('g', dtype=tf.float32, initializer=scale_init, trainable=True)
# b = tf.get_variable('b', dtype=tf.float32, initializer=-m_init*scale_init, trainable=True)
# x_init = tf.reshape(scale_init,[1,num_units])*(x_init-tf.reshape(m_init,[1,num_units]))
# if nonlinearity is not None:
# x_init = nonlinearity(x_init)
# return x_init
#
# else:
# V,g,b = get_vars_maybe_avg(['V','g','b'], ema)
# tf.assert_variables_initialized([V,g,b])
#
# # use weight normalization (Salimans & Kingma, 2016)
# x = tf.matmul(x, V)
# scaler = g/tf.sqrt(tf.reduce_sum(tf.square(V),[0]))
# x = tf.reshape(scaler,[1,num_units])*x + tf.reshape(b,[1,num_units])
#
# # apply nonlinearity
# if nonlinearity is not None:
# x = nonlinearity(x)
# return x
#
# def weight_normalization(input, init_scale=1.):
# scale_init = init_scale / tf.sqrt(v_init + 1e-10)
# g = tf.get_variable('g', dtype=tf.float32, initializer=scale_init, trainable=True)
#
# V = tf.get_variable('V', [int(input.get_shape()[1]), num_units], tf.float32,
# tf.random_normal_initializer(0, 0.05), trainable=True)
# V_norm = tf.nn.l2_normalize(V.initialized_value(), [0])
# | |
the no_ext argument when pulling/downloading')
@click.option('-o', '--overwrite', flag_value=True, help='Overwrite previously added file if the file has been modified')
@click.option('-s', '--srx', type=click.Path(exists=True), help='srx file')
@click.option('-si', '--srx_id', help='srx id')
@click.option('-i', '--its', type=click.Path(exists=True), help='its file')
@click.option('-ii', '--its_id', help='its id')
@click.option('-c', '--charset', help='File encoding')
@click.option('-ff', '--fprm', type=click.Path(exists=True), help='fprm file')
@click.option('-fi', '--fprm_id', help='fprm id')
@click.option('-fs', '--fprm_subfilter', type=click.Path(exists=True), help='fprm subfilter file')
@click.option('-fsi', '--fprm_subfilter_id', help='fprm subfilter id')
@click.option('-v', '--vault_id', help='Save-to TM vault id')
@click.option('-e', '--external_url', help='Source url')
@click.option('--due_date', help='Due date (as Unix timestamp, in milliseconds)')
@click.option('--due_reason', help='Reason for due date')
@click.option('-m', '--metadata', flag_value=True, help="Launches the metadata wizard")
# # Metadata - optional parameters
# @click.option('--author_email', help='Author email')
# @click.option('--author_name', help='Author name')
# @click.option('--business_division', help='Business division')
# @click.option('--business_unit', help='Business unit')
# @click.option('--campaign_id', help='Campaign ID')
# @click.option('--campaign_rating', help='Campaign rating')
# @click.option('--channel', help='Channel')
# @click.option('--contact_email', help='Contact email')
# @click.option('--contact_name', help='Contact name')
# @click.option('--content_description', help='Content description')
# @click.option('--content_type', help='Content type')
# @click.option('--domain', help='Domain')
# @click.option('--external_application_id', help='External application ID')
# @click.option('--external_document_id', help='External document ID')
# @click.option('--external_style_id', help='External style ID')
# @click.option('--job_id', help='Job ID')
# @click.option('--purchase_order', help='Purchase Order')
# @click.option('--reference_url', help='Reference URL')
# @click.option('--region', help='Region')
# @click.option('--require_review', help='Require review')
# @click.option('--category_id', help='Category ID')
# @click.option('--note', help='Note')
# Metadata - optional parameters
@click.option('--author_email', hidden=True)
@click.option('--author_name', hidden=True)
@click.option('--business_division', hidden=True)
@click.option('--business_unit', hidden=True)
@click.option('--campaign_id', hidden=True)
@click.option('--campaign_rating', hidden=True)
@click.option('--channel', hidden=True)
@click.option('--contact_email', hidden=True)
@click.option('--contact_name', hidden=True)
@click.option('--content_description', hidden=True)
@click.option('--content_type', hidden=True)
@click.option('--domain', hidden=True)
@click.option('--external_application_id', hidden=True)
@click.option('--external_document_id', hidden=True)
@click.option('--external_style_id', hidden=True)
@click.option('--job_id', hidden=True)
@click.option('--purchase_order', hidden=True)
@click.option('--reference_url', hidden=True)
@click.option('--region', hidden=True)
@click.option('--require_review', hidden=True)
@click.option('--category_id', hidden=True)
@click.option('--note', hidden=True)
def add(file_names, **kwargs):
#""" Add files and folders for upload to Lingotek. Fileglobs (e.g. *.txt) can be used to add all matching files and/or folders. Added folders will automatically add the new files added or created inside of them. """
""" Add files and folders for upload to Lingotek. Fileglobs (e.g. *.txt) can be used to add all matching files and/or folders. Added folders will automatically add the new files added or created inside of them.
Metadata can be added by launching the metadata wizard with the -m flag or by using flags for specific metadata. The metadata flags are --author_email, --author_name, --business_division, --business_unit, --campaign_id, --campaign_rating, --channel, --contact_email, --contact_name, --content_description, --content_type, --domain, --external_application_id, --external_document_id, --external_style_id, --job_id, --purchase_order, --reference_url, --region, --require_review, --category_id, and --note """
try:
action = add_action.AddAction(os.getcwd())
init_logger(action.path)
file_names = remove_powershell_formatting(file_names)
for f in kwargs:
if kwargs[f]:
temp = remove_powershell_formatting(kwargs[f])
kwargs[f] = temp
action.add_action(file_names, **kwargs)
except (UninitializedError, RequestFailedError, ResourceNotFound, AlreadyExistsError) as e:
print_log(e)
logger.error(e)
return
@ltk.command(short_help="Sends updated content to Lingotek for documents that have been added; defaults to the entire project.")
@click.option('-n', '--test', 'test', flag_value=True, help='Shows which files will be added or updated without actually uploading any content')
@click.option('-t', '--title', 'title', flag_value=True, help='Display document titles rather than file paths')
@click.argument('files', type=click.Path(exists=True), required=False, nargs=-1)
@click.option('--due_date', help='Due date (as Unix timestamp, in milliseconds)')
@click.option('--due_reason', help='Reason for due date')
@click.option('-m', '--metadata', flag_value=True, help="Launches the metadata wizard")
@click.option('-o', '--metadata-only', 'metadata_only', flag_value=True, help="Only updates the metadata and due date/due reason and does not update the document contents")
# # Metadata - optional parameters
# @click.option('--author_email', help='Author email')
# @click.option('--author_name', help='Author name')
# @click.option('--business_division', help='Business division')
# @click.option('--business_unit', help='Business unit')
# @click.option('--campaign_id', help='Campaign ID')
# @click.option('--campaign_rating', help='Campaign rating')
# @click.option('--channel', help='Channel')
# @click.option('--contact_email', help='Contact email')
# @click.option('--contact_name', help='Contact name')
# @click.option('--content_description', help='Content description')
# @click.option('--content_type', help='Content type')
# @click.option('--domain', help='Domain')
# @click.option('--external_application_id', help='External application ID')
# @click.option('--external_document_id', help='External document ID')
# @click.option('--external_style_id', help='External style ID')
# @click.option('--job_id', help='Job ID')
# @click.option('--purchase_order', help='Purchase Order')
# @click.option('--reference_url', help='Reference URL')
# @click.option('--region', help='Region')
# @click.option('--require_review', help='Require review')
# @click.option('--category_id', help='Category ID')
# @click.option('--note', help='Note')
# Metadata - optional parameters
@click.option('--author_email', hidden=True)
@click.option('--author_name', hidden=True)
@click.option('--business_division', hidden=True)
@click.option('--business_unit', hidden=True)
@click.option('--campaign_id', hidden=True)
@click.option('--campaign_rating', hidden=True)
@click.option('--channel', hidden=True)
@click.option('--contact_email', hidden=True)
@click.option('--contact_name', hidden=True)
@click.option('--content_description', hidden=True)
@click.option('--content_type', hidden=True)
@click.option('--domain', hidden=True)
@click.option('--external_application_id', hidden=True)
@click.option('--external_document_id', hidden=True)
@click.option('--external_style_id', hidden=True)
@click.option('--job_id', hidden=True)
@click.option('--purchase_order', hidden=True)
@click.option('--reference_url', hidden=True)
@click.option('--region', hidden=True)
@click.option('--require_review', hidden=True)
@click.option('--category_id', hidden=True)
@click.option('--note', hidden=True)
def push(test, title, files, metadata, metadata_only, **kwargs):
#""" Sends updated content to Lingotek for documents that have been added. Fileglobs (e.g. *.txt) can be used to push all matching files """
""" Sends updated content to Lingotek for documents that have been added. Fileglobs (e.g. *.txt) can be used to push all matching files
Metadata can be updated by launching the metadata wizard with the -m flag or by using flags for specific metadata. The metadata flags are --author_email, --author_name, --business_division, --business_unit, --campaign_id, --campaign_rating, --channel, --contact_email, --contact_name, --content_description, --content_type, --domain, --external_application_id, --external_document_id, --external_style_id, --job_id, --purchase_order, --reference_url, --region, --require_review, --category_id, and --note """
try:
action = push_action.PushAction(os.getcwd(), test, title)
init_logger(action.path)
action.push_action(files=files, set_metadata=metadata, metadata_only=metadata_only, **kwargs)
except UninitializedError as e:
print_log(e)
logger.error(e)
return
@ltk.command(short_help="Add targets to document(s) to start translation; defaults to the entire project. Use ltk list -l to see possible locales")
@click.option('-n', '--doc_name', help='The name of the document for which to request target locale(s)')
@click.option('-p', '--path', type=click.Path(exists=True), help='The file name or directory for which to request target locale(s)')
@click.option('-c', '--cancel', 'to_cancel', flag_value=True, help='Cancels a specified target locale')
@click.option('-d', '--delete', 'to_delete', flag_value=True, help='Deletes a specified target locale')
@click.option('--due_date', help='The due date of the translation')
@click.option('-w', '--workflow', help='The workflow of the translation (Use "ltk list -w" to see available workflows)')
@click.argument('locales', required=False, nargs=-1) # can have unlimited number of locales
def request(doc_name, path, locales, to_cancel, to_delete, due_date, workflow):
""" Add targets to document(s) to start translation; defaults to the entire project. If no locales are specified, Filesystem Connector
will look for target watch locales set in ltk config. Use ltk list -l to see possible locales. """
try:
action = request_action.RequestAction(os.getcwd(), doc_name, path, locales, to_cancel, to_delete, due_date, workflow)
init_logger(action.path)
if locales and isinstance(locales,str):
locales = [locales]
doc_name = remove_powershell_formatting(doc_name)
path = remove_powershell_formatting(path)
action.target_action()
except (UninitializedError, ResourceNotFound, RequestFailedError) as e:
print_log(e)
logger.error(e)
return
# todo add a --all option to see all document ids once only show relative to cwd is implemented
@ltk.command(name='list', short_help='Shows docs (default), workflows, locales, formats, or filters')
@click.option('-t', '--title', 'title', flag_value=True, help='List document titles and folder paths from project root instead of relative file paths')
@click.option('-c', '--hide_docs', 'hide_docs', flag_value=True, help='Collapse down to list only added directories instead of both directories and documents.')
@click.option('-w', '--workflows', 'id_type', flag_value='workflow', help='List available workflows')
@click.option('-l', '--locales', 'id_type', flag_value='locale', help='List supported locale codes')
@click.option('-f', '--formats', 'id_type', flag_value='format', help='List supported formats')
@click.option('-r', '--remote', 'id_type', flag_value='remote', help='List all project documents on Lingotek Cloud')
@click.option('--filters', 'id_type', flag_value='filter', help='List default and custom filters')
@click.option('-d', '--download_folder', 'show_dests', flag_value=True, help="Show target download folders for files that have had them set")
def list(**kwargs):
""" Shows docs, workflows, locales, formats, or filters. By default lists added folders and docs. """
try:
action = list_action.ListAction(os.getcwd())
init_logger(action.path)
action.list_action(**kwargs)
except (UninitializedError, RequestFailedError) as e:
print_log(e)
logger.error(e)
return
@ltk.command(short_help="Gets the status of a specific document or all documents")
@click.option('-n', '--doc_name', help='Specific document name to get status of')
@click.option('-d', '--detailed', flag_value=True, help='Detailed status of each locale for the document')
@click.option('-a', '--all', flag_value=True, help='List all project documents on Lingotek Cloud')
def status(**kwargs):
""" Gets the status of a specific document or all documents """
try:
action = status_action.StatusAction(os.getcwd())
init_logger(action.path)
for f in kwargs:
if kwargs[f]:
temp = remove_powershell_formatting(kwargs[f])
kwargs[f] = temp
action.get_status(**kwargs)
except (UninitializedError, ResourceNotFound) as e:
print_log(e)
logger.error(e)
return
@ltk.command(short_help='Download specified translations')
@click.option('-a', '--auto_format', flag_value=True, help='Flag to auto apply formatting during download')
@click.option('-l', '--locales', help="Specify locales to download (defaults to all target locales for the document). For multiple locales give a list separated by commas and no spaces (ex: en_US,en_GB).")
@click.option('-e', '--locale_ext', flag_value=True, help="Specifies to add the name of the locale as an extension to the file name (ex: doc1.fr_FR.docx). This is the default unless the clone download option is active.")
@click.option('-n', '--no_ext', flag_value=True, help="Specifies to not add the name of the locale as an extension to the file name. This is the default if the clone download option is active.")
@click.option('-x', '--xliff', flag_value=True, help="Download xliff version of the specified translation")
@click.argument('file_names', type=click.Path(exists=True), required=True, nargs=-1)
def download(auto_format, locales, locale_ext, no_ext, xliff, file_names):
""" Downloads translated content specified by filename for specified locales, or all locales if none are specified. Change download options and folders using ltk config."""
try:
action = download_action.DownloadAction(os.getcwd())
init_logger(action.path)
for name in file_names:
action.download_by_path(name, locales, locale_ext, no_ext, auto_format, xliff)
print("\n")
except (UninitializedError, ResourceNotFound, RequestFailedError) as e:
print_log(e)
logger.error(e)
return
@ltk.command(short_help='Pulls translations for all added documents for all locales or by specified locales')
@click.option('-a', '--auto_format', flag_value=True, help='Flag to auto apply formatting during download')
@click.option('-e', '--locale_ext', flag_value=True, help="Specifies to add the name of the locale as an extension to the file name (ex: doc1.fr_FR.docx). This is the default unless the clone download option is active.")
@click.option('-n', '--no_ext', flag_value=True, help="Specifies to not add the name of the locale as an extension to the file name. This is the default if the clone download option is active.")
@click.argument('locales', nargs=-1)
def pull(auto_format, locale_ext, no_ext, locales):
""" Pulls translations for all added documents for all locales or by specified locales """
try:
download = download_action.DownloadAction(os.getcwd())
action = pull_action.PullAction(os.getcwd(), download)
init_logger(action.path)
| |
from collections import namedtuple
from copy import copy
from datetime import *
from geojson import Feature, Point, FeatureCollection, LineString
from geojson.mapping import to_mapping
from typing import List, Dict
import functools
import os
import struct
from KMALL import kmall
from hyo2.mate.lib.scan import Scan, A_NONE, A_PARTIAL, A_FULL, A_FAIL, A_PASS
from hyo2.mate.lib.scan import ScanState, ScanResult
class ScanKMALL(Scan):
'''
A Scan object that contains check information on the contents of a Kongsberg .all file
:param file_path: The file path to the .all file
:type file_path: str
'''
def __init__(self, file_path):
Scan.__init__(self, file_path)
self.reader = open(self.file_path, 'rb')
self.kmall_reader = kmall(self.file_path)
def scan_datagram(self, progress_callback=None):
'''scan data to extract basic information for each type of datagram'''
# summary type information stored in plain dict
self.scan_result = {}
# datagram objects
self.datagrams = {}
while not self.kmall_reader.eof:
# update progress
if self.kmall_reader.FID is not None:
self.progress = 1.0 - ((self.kmall_reader.file_size - self.kmall_reader.FID.tell()) / self.kmall_reader.file_size)
if progress_callback is not None:
progress_callback(self.progress)
# read datagram information
self.kmall_reader.decode_datagram()
dg_type = self.kmall_reader.datagram_ident
# Large amounts of data in MRZ packets in only reading
# header, common and ping data as full data not required
if dg_type == 'MRZ':
dg = {}
start = self.kmall_reader.FID.tell()
dg['header'] = self.kmall_reader.read_EMdgmHeader()
dg['partition'] = self.kmall_reader.read_EMdgmMpartition()
dg['cmnPart'] = self.kmall_reader.read_EMdgmMbody()
dg['pingInfo'] = self.kmall_reader.read_EMdgmMRZ_pingInfo()
numBytesDgm, dgmType, dgmVersion, dgm_version, systemID, \
dgtime, dgdatetime = dg['header'].values()
self.kmall_reader.FID.seek(start + numBytesDgm, 0)
else:
self.kmall_reader.read_datagram()
numBytesDgm, dgmType, dgmVersion, dgm_version, systemID, \
dgtime, dgdatetime = self.kmall_reader.datagram_data['header'].values()
if dg_type not in self.scan_result.keys():
self.scan_result[dg_type] = copy(self.default_info)
self.scan_result[dg_type]['_seqNo'] = None
self.scan_result[dg_type]['byteCount'] += numBytesDgm
self.scan_result[dg_type]['recordCount'] += 1
if self.scan_result[dg_type]['startTime'] is None:
self.scan_result[dg_type]['startTime'] = dgdatetime
self.scan_result[dg_type]['stopTime'] = dgdatetime
if dgmType == b'#IIP':
self._push_datagram(dg_type, self.kmall_reader.datagram_data)
if dgmType == b'#IOP':
self._push_datagram(dg_type, self.kmall_reader.datagram_data)
if dgmType == b'#IBE':
self._push_datagram(dg_type, self.kmall_reader.datagram_data['BISTText'])
if dgmType == b'#IBR':
self._push_datagram(dg_type, self.kmall_reader.datagram_data['BISTText'])
if dgmType == b'#IBS':
self._push_datagram(dg_type, self.kmall_reader.datagram_data['BISTText'])
if dgmType == b'#MRZ':
self._push_datagram(dg_type, dg)
if dgmType == b'#MWC':
self._push_datagram(dg_type, self.kmall_reader.datagram_data)
if dgmType == b'#SPO':
self._push_datagram(dg_type, self.kmall_reader.datagram_data)
if dgmType == b'#SKM':
self._push_datagram(dg_type, self.kmall_reader.datagram_data)
if dgmType == b'#SVP':
self._push_datagram(dg_type, self.kmall_reader.datagram_data)
if dgmType == b'#SVT':
self._push_datagram(dg_type, self.kmall_reader.datagram_data)
if dgmType == b'#SCL':
self._push_datagram(dg_type, self.kmall_reader.datagram_data)
if dgmType == b'#SDE':
self._push_datagram(dg_type, self.kmall_reader.datagram_data)
if dgmType == b'#SHI':
self._push_datagram(dg_type, self.kmall_reader.datagram_data)
if dgmType == b'#CPO':
self._push_datagram(dg_type, self.kmall_reader.datagram_data)
if dgmType == b'#CHE':
self._push_datagram(dg_type, self.kmall_reader.datagram_data)
if dgmType == b'#FCF':
self._push_datagram(dg_type, self.kmall_reader.datagram_data)
filename, totalpings, NpingsMissed, MissingMRZCount = self.kmall_reader.check_ping_count()
self.scan_result['MRZ']['missedPings'] = NpingsMissed
self.scan_result['MRZ']['pingCount'] = totalpings
self.scan_result['MRZ']['missingPackets'] = MissingMRZCount
return
def get_installation_parameters(self):
'''
Gets the decoded contents of the IIP datagram (installation parameters)
as a Python dict. If multiple IIP datagrams are included only decoded
parameters from the first will be included.
Will return None if datagram not present in *.kmall file
'''
if 'IIP' not in self.datagrams:
return None
installationParametersDatagrams = self.datagrams['IIP']
for installationParametersDatagram in installationParametersDatagrams:
# skip datagrams with no params
if len(installationParametersDatagram['install_txt']) == 0:
continue
return installationParametersDatagram['install_txt']
return None
def installation_parameters(self) -> ScanResult:
'''
Gets the installation parameters in a ScanResult format
'''
ip = self.get_installation_parameters()
if ip is None:
return ScanResult(
state=ScanState.FAIL,
messages=(
"Installation Parameters datagram not found in file"),
data={})
data = {}
for ip_param_name, ip_param_value in ip.items():
# todo: in future we'll need to perform some translations between
# the raw field names and a standardised version across all
# file formats
data[ip_param_name] = ip_param_value
return ScanResult(
state=ScanState.PASS,
messages=[],
data=data
)
def get_active_sensors(self):
installationParameters = self.get_installation_parameters()
active = []
for key, value in installationParameters.items():
if value == 'ACTIVE':
active.append(key)
for sensor in active:
if 'position' in sensor:
activeSensors = {'Position': sensor[:-15]}
if 'motion' in sensor:
activeSensors['RollPitch'] = sensor[:-15]
activeSensors['Heave'] = sensor[:-15]
activeSensors['Heading'] = sensor[:-15]
activeSensors['Attvel'] = sensor[:-15]
return activeSensors
def filename_changed(self) -> ScanResult:
'''
Check if the filename is different from what recorded
in the datagram. Requires I - Installation Parameters datagram
:return: :class:`hyo2.mate.lib.scan.ScanResult`
'''
return ScanResult(
state=ScanState.WARNING,
messages=["Check not implemented for KMALL format"]
)
def date_match(self) -> ScanResult:
'''
Compare the date as in the IIP datagram and the date as written
in the filename
:return: :class:`hyo2.mate.lib.scan.ScanResult`
'''
if 'IIP' not in self.datagrams:
# then there's no way to check, so fail test
return ScanResult(
state=ScanState.FAIL,
messages=[
"'IIP' datagram not found, cannot extract startTime"]
)
base_fn = os.path.basename(self.file_path)
installationParametersDatagrams = self.datagrams['IIP']
# assume we just use the first one we find
rec = installationParametersDatagrams[0]['header']
found = rec['dgdatetime'].strftime('%Y%m%d') in base_fn
if found:
return ScanResult(state=ScanState.PASS)
else:
msg = (
"Could not find record date {} in filename"
.format(rec['dgdatetime'])
)
return ScanResult(
state=ScanState.FAIL,
messages=[msg]
)
def bathymetry_availability(self) -> ScanResult:
'''
Checks the contents of a Kongsberg .kmall file for all required datagrams when bathymetry processing
:return: :class:`hyo2.mate.lib.scan.ScanResult`
'''
# define a named tuple for bathy datagrams. Provides a means to define
# then reference this info.
Datagram = namedtuple(
'Datagram',
'id critical error_message alternatives'
)
bathy_datagrams = [
Datagram(
'IIP',
False,
"Warning: installation parameters are missing please ensure that you have your lever arms and vessel frame parameters collected elsewhere.",
[]
),
Datagram(
'IOP',
False,
"Warning: runtime parameters are missing these are critical for backscatter processing streams. If just collecting bathymetry, please consider other users of this data.",
[]
),
Datagram(
'SKM',
True,
"Critical: runtime parameters are missing these are critical for backscatter processing streams. If just collecting bathymetry, please consider other users of this data.",
[]
),
Datagram(
'SPO',
True,
"Critical: position data missing, you will not be able to process this data without ensuring this data is being collected.",
[]
),
Datagram(
'SVT',
False,
"Warning: surface sound velocity data is missing, ensure your sensor is working or collect as many profiles as possible to attempt to compensate.",
[]
),
Datagram(
'SVP',
False,
"Warning: no sound velocity profile data has been collected in the raw file, please ensure you are collecting this data elsewhere.",
[]
),
Datagram(
'MRZ',
False,
"Warning: neither datagram 'D' or 'X' were found, processed depth information is missing.",
[]
)
]
return self.__result_from_datagram_presence(bathy_datagrams)
def backscatter_availability(self) -> ScanResult:
'''
Checks the contents of a Kongsberg .kmall file for all required datagrams when backscatter processing
:return: :class:`hyo2.mate.lib.scan.ScanResult`
'''
# define a named tuple for datagrams. Provides a means to define
# then reference this info.
Datagram = namedtuple(
'Datagram',
'id critical error_message alternatives'
)
bs_datagrams = [
Datagram(
'MRZ',
True,
"Critical: backscatter information is missing ('S' or 'Y' datagram). You will not be able to process backscatter without seabed image data. If you intend processing backscatter check your setup.",
[]
)
]
return self.__result_from_datagram_presence(bs_datagrams)
def ray_tracing_availability(self) -> ScanResult:
'''
Checks the contents of a Kongsberg .kmall file for all required datagrams when recalculating ray tracing
:return: :class:`hyo2.mate.lib.scan.ScanResult`
'''
# define a named tuple for datagrams. Provides a means to define
# then reference this info.
Datagram = namedtuple(
'Datagram',
'id critical error_message alternatives'
)
rt_datagrams = [
Datagram(
'IIP',
False,
"Warning: installation parameters are missing please ensure that you have your lever arms and vessel frame parameters collected elsewhere.",
[]
),
Datagram(
'IOP',
False,
"Warning: runtime parameters are missing these are critical for backscatter processing streams. If just collecting bathymetry, please consider other users of this data.",
[]
),
Datagram(
'SKM',
True,
"Critical: runtime parameters are missing these are critical for backscatter processing streams. If just collecting bathymetry, please consider other users of this data.",
[]
),
Datagram(
'SPO',
True,
"Critical: position data missing, you will not be able to process this data without ensuring this data is being collected.",
[]
),
Datagram(
'SVT',
False,
"Warning: surface sound velocity data is missing, ensure your sensor is working or collect as many profiles as possible to attempt to compensate.",
[]
),
Datagram(
'SVP',
False,
"Warning: no sound velocity profile data has been collected in the raw file, please ensure you are collecting this data elsewhere.",
[]
),
Datagram(
'MRZ',
True,
"Critical: neither datagram 'F', 'f' or 'N' were found. Critical range and angle data missing, you are not collecting the data required for post processing. If you are collecting processed depths it is possible to back process however it is not desirable and is a complex process.",
[]
)
]
return |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.