text stringlengths 957 885k |
|---|
import os
import jax
import jax.numpy as jnp
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from models.models_flax import SIREN
from train.standard import fit_image
def multi_tone_fitting(
model,
num_samples,
k_values=[],
learning_rate=1e-4,
iters=2000,
test_train_ratio=2,
rand_state=0,
):
T_s = 1 / num_samples
coords = np.linspace(0, 1, num_samples, endpoint=False)
x_test = np.expand_dims(coords, axis=-1)
freq = np.fft.fftfreq(num_samples, d=T_s)
fun_k = generate_signal_with_components_equal_amplitude(
comp_cos=[], comp_sin=k_values, num_samples=num_samples
)
data_whole = [x_test, np.expand_dims(fun_k, axis=-1)]
train_set = [
x_test[::test_train_ratio, :],
np.expand_dims(fun_k, axis=-1)[::test_train_ratio, :],
]
train_data = train_set
test_data = data_whole
@jax.jit
def model_pred(params, x):
return model.apply(params, x)
outputs, get_params = fit_image(
model,
train_data,
test_data,
"adam",
batch_size=None,
learning_rate=learning_rate,
iters=iters,
rand_state=rand_state,
input_dim=1,
)
# Show final network outputs
rec = outputs["pred_imgs"][-1].flatten()
freq = jnp.fft.fftfreq(len(rec), d=1 / len(rec))
fft_rec = 20 * jnp.log10(jnp.abs(jnp.fft.fft(rec)))
rec_test_ft = jnp.fft.fftshift(fft_rec)
test_freqs = jnp.fft.fftshift(freq)
gt_test_ft = 20 * jnp.log10(np.fft.fftshift(jnp.abs(jnp.fft.fft(fun_k))))
rec = model_pred(get_params(outputs["opt_state"]), train_data[0])
rec = rec.flatten()
freq = jnp.fft.fftfreq(len(rec), d=1 / len(rec))
fft_rec = 20 * jnp.log10(jnp.abs(jnp.fft.fft(rec)))
train_freqs = jnp.fft.fftshift(freq)
rec_train_ft = jnp.fft.fftshift(fft_rec)
gt_train_ft = 20 * jnp.log10(
np.fft.fftshift(jnp.abs(jnp.fft.fft(fun_k[::test_train_ratio].flatten())))
)
return gt_train_ft, rec_train_ft, train_freqs, gt_test_ft, rec_test_ft, test_freqs
def generate_signal_with_components_equal_amplitude(
comp_cos, comp_sin=[], num_samples=256, random_phase=True, random_amplitude=False
):
coords = np.linspace(0, 1, num_samples, endpoint=False)
fun_k = np.zeros(num_samples)
for k in comp_cos:
if random_amplitude:
a_k = np.random.uniform(0, 1)
else:
a_k = 1
if random_phase:
phase = np.random.uniform(0, 2 * np.pi)
else:
phase = 0
fun_k += a_k * jnp.cos(2 * jnp.pi * k * coords + phase)
for k in comp_sin:
if random_amplitude:
a_k = np.random.uniform(0, 1)
else:
a_k = 1
if random_phase:
phase = np.random.uniform(0, 2 * np.pi)
else:
phase = 0
fun_k += a_k * jnp.sin(2 * jnp.pi * k * coords + phase)
return fun_k
if __name__ == "__main__":
K_COMPONENTS = [23]
NETWORK_SIZE = [128, 128, 1]
NUM_SAMPLES = 256
ITERS = 2000
LEARNING_RATE = 1e-4
outdir = os.path.join(os.getcwd(), "figures", "figure_3")
if not os.path.exists(outdir):
os.makedirs(outdir)
# Part I
print("Fitting sinusoid with SIREN-30")
(
gt_train_ft,
rec_train_ft,
train_freqs,
gt_test_ft,
rec_test_ft,
test_freqs,
) = multi_tone_fitting(
SIREN(features=NETWORK_SIZE, first_omega_0=30, hidden_omega_0=30, input_dim=1),
num_samples=NUM_SAMPLES,
k_values=K_COMPONENTS,
iters=ITERS,
learning_rate=LEARNING_RATE,
test_train_ratio=2,
)
sns.set_context("paper", font_scale=3)
fig, ax = plt.subplots()
ax.plot(train_freqs, rec_train_ft, label="Rec", linewidth=3)
ax.plot(train_freqs, gt_train_ft, label="GT", linestyle="dashed", linewidth=3)
sns.despine()
ax.annotate(
"$f$ (Hz)",
xy=(1.01, 0.03),
ha="left",
va="top",
xycoords="axes fraction",
textcoords="offset points",
fontsize=25,
)
ax.annotate(
"Magnitude Spectrum (dB)",
xy=(-0.25, 1.1),
xytext=(-15, 2),
ha="left",
va="top",
xycoords="axes fraction",
textcoords="offset points",
fontsize=25,
)
ax.annotate(
"$f=23$",
xy=(0.60, 0.87),
ha="left",
va="top",
xycoords="axes fraction",
textcoords="offset points",
color="darkgreen",
fontsize=25,
)
ax.annotate(
r"$f=\!\!-\!23$",
xy=(0.25, 0.87),
ha="left",
va="top",
xycoords="axes fraction",
textcoords="offset points",
color="darkgreen",
fontsize=25,
)
plt.ylim([-130, 90])
plt.savefig(outdir + "/train_grid_merged_30.pdf", bbox_inches="tight")
sns.set_context("paper", font_scale=3)
fig, ax = plt.subplots()
ax.plot(test_freqs, rec_test_ft, label="$f_{\\theta}(r)$", linewidth=3)
ax.plot(test_freqs, gt_test_ft, label="$g(r)$", linestyle="dashed", linewidth=3)
plt.ylim(-20, 60)
ax.annotate(
"$f$ (Hz)",
xy=(1.01, 0.03),
ha="left",
va="top",
xycoords="axes fraction",
textcoords="offset points",
fontsize=25,
)
ax.annotate(
"Magnitude Spectrum (dB)",
xy=(-0.25, 1.1),
xytext=(-15, 2),
ha="left",
va="top",
xycoords="axes fraction",
textcoords="offset points",
fontsize=25,
)
ax.annotate(
"$f=23$",
xy=(0.55, 0.92),
ha="left",
va="top",
xycoords="axes fraction",
textcoords="offset points",
color="darkgreen",
fontsize=25,
)
ax.annotate(
r"$f=\!\!-\!23$",
xy=(0.23, 0.92),
ha="left",
va="top",
xycoords="axes fraction",
textcoords="offset points",
color="darkgreen",
fontsize=25,
)
sns.despine()
ax.set_yticks([-30, 0, 30])
plt.legend(loc=(0.8, 0.7))
plt.savefig(outdir + "/test_grid_merged_30.pdf", bbox_inches="tight")
# Part II
print("Fitting sinusoid with SIREN-300")
(
gt_train_ft,
rec_train_ft,
train_freqs,
gt_test_ft,
rec_test_ft,
test_freqs,
) = multi_tone_fitting(
SIREN(features=NETWORK_SIZE, first_omega_0=300, hidden_omega_0=30, input_dim=1),
num_samples=NUM_SAMPLES,
k_values=K_COMPONENTS,
iters=ITERS,
learning_rate=LEARNING_RATE,
test_train_ratio=2,
)
sns.set_context("paper", font_scale=3)
fig, ax = plt.subplots()
ax.plot(test_freqs, rec_test_ft, label="$f_{\\theta}(r)$", linewidth=3)
ax.plot(test_freqs, gt_test_ft, label="$g(r)$", linestyle="dashed", linewidth=3)
plt.vlines(105, -20, 35, linestyles="dashed", color="yellowgreen", linewidth=2)
plt.vlines(-105, -20, 35, linestyles="dashed", color="yellowgreen", linewidth=2)
plt.ylim(-50, 70)
ax.annotate(
"$f$ (Hz)",
xy=(1.01, 0.03),
ha="left",
va="top",
xycoords="axes fraction",
textcoords="offset points",
fontsize=25,
)
ax.annotate(
"Magnitude Spectrum (dB)",
xy=(-0.25, 1.1),
xytext=(-15, 2),
ha="left",
va="top",
xycoords="axes fraction",
textcoords="offset points",
fontsize=25,
)
ax.annotate(
"$f=23$",
xy=(0.55, 0.9),
ha="left",
va="top",
xycoords="axes fraction",
textcoords="offset points",
color="darkgreen",
fontsize=25,
)
ax.annotate(
r"$f=\!\!-\!23$",
xy=(0.2, 0.9),
ha="left",
va="top",
xycoords="axes fraction",
textcoords="offset points",
color="darkgreen",
fontsize=25,
)
ax.annotate(
"$f=105$",
xy=(0.80, 0.75),
ha="left",
va="top",
xycoords="axes fraction",
textcoords="offset points",
color="darkgreen",
fontsize=25,
)
ax.annotate(
r"$f=\!\!-\!105$",
xy=(0.05, 0.75),
ha="left",
va="top",
xycoords="axes fraction",
textcoords="offset points",
color="darkgreen",
fontsize=25,
)
sns.despine()
ax.set_yticks([0, 20, 40])
plt.savefig(outdir + "/test_grid_merged_128_300.pdf", bbox_inches="tight")
sns.set_context("paper", font_scale=3)
fig, ax = plt.subplots()
ax.plot(train_freqs, rec_train_ft, label="Rec", linewidth=3)
ax.plot(train_freqs, gt_train_ft, label="GT", linestyle="dashed", linewidth=3)
sns.despine()
ax.annotate(
"$f$ (Hz)",
xy=(1.01, 0.03),
ha="left",
va="top",
xycoords="axes fraction",
textcoords="offset points",
fontsize=25,
)
ax.annotate(
"Magnitude Spectrum (dB)",
xy=(-0.25, 1.1),
xytext=(-15, 2),
ha="left",
va="top",
xycoords="axes fraction",
textcoords="offset points",
fontsize=25,
)
plt.ylim(-80, 80)
ax.annotate(
"$f=23$",
xy=(0.6, 0.85),
ha="left",
va="top",
xycoords="axes fraction",
textcoords="offset points",
color="darkgreen",
fontsize=25,
)
ax.annotate(
r"$f=\!\!-\!23$",
xy=(0.25, 0.85),
ha="left",
va="top",
xycoords="axes fraction",
textcoords="offset points",
color="darkgreen",
fontsize=25,
)
plt.savefig(outdir + "/train_grid_merged_128_300.pdf", bbox_inches="tight")
|
"""
This module handles the topological elements of force fields.
"""
from simtk import unit
class TopologyElement(object):
"""
A wrapper for any topological element.
"""
_name = None
_writable_attrs = []
class TopologyIterator(object):
"""
An iterator for topological elements that iterates over their
attributes in an ordered way.
It is useful when writing topological elements to file.
"""
def __init__(self, top_el):
"""
It initiates a TopologyIterator object.
Parameters
----------
top_el : a TopologyElement object
The topology element to iterate on.
"""
self._index = int(0)
self._top_el = top_el
def __next__(self):
"""
It returns the next item for the iteration.
Returns
-------
attr_name : str
The name of the attribute
attr_value : float
The value of the attribute
"""
if self._index == len(self._top_el._writable_attrs):
raise StopIteration
attr_name = self._top_el._writable_attrs[self._index]
attr_value = getattr(self._top_el, attr_name)
self._index += 1
return attr_name, attr_value
@property
def name(self):
"""
The name that this topological element has.
Returns
-------
name : str
The name of the topological element
"""
return self._name
@property
def n_writable_attrs(self):
"""
The number of writable attributes this topological element has.
Returns
-------
n_writable_attrs : int
The number of writable attributes
"""
return len(self._writable_attrs)
def __iter__(self):
"""
It returns an instance of the TopologyIterator.
Returns
-------
iterator : a TopologyIterator
The TopologyIterator object
"""
return self.TopologyIterator(self)
def __repr__(self):
"""
It returns the representation string of this topological element.
Returns
-------
repr_string : str
The representation string
"""
repr_string = '{}('.format(self._name)
attrs = [attr for attr in self]
for attr_name, value in attrs[:-1]:
repr_string += '{}={}, '.format(attr_name, value)
repr_string += '{}={})'.format(*attrs[-1])
return repr_string
def __str__(self):
"""
It returns the readable representation string of this topological
element.
Returns
-------
str_string : str
The readable representation string
"""
return self.__repr__()
class Bond(TopologyElement):
"""
It represents a bond in the topology.
"""
_name = 'Bond'
_writable_attrs = ['atom1_idx', 'atom2_idx', 'spring_constant', 'eq_dist']
def __init__(self, index=-1, atom1_idx=None, atom2_idx=None,
spring_constant=None, eq_dist=None):
"""
It initiates a Bond object.
Parameters
----------
index : int
The index of this Bond object
atom1_idx : int
The index of the first atom involved in this Bond
atom2_idx : int
The index of the second atom involved in this Bond
spring_constant : simtk.unit.Quantity
The spring constant of this Bond
eq_dist : simtk.unit.Quantity
The equilibrium distance of this Bond
"""
self._index = index
self._atom1_idx = atom1_idx
self._atom2_idx = atom2_idx
self._spring_constant = spring_constant
self._eq_dist = eq_dist
def set_atom1_idx(self, index):
"""
It sets atom1's index.
Parameters
----------
index : int
The index of the first atom involved in this Bond
"""
self._atom1_idx = index
def set_atom2_idx(self, index):
"""
It sets atom2's index.
Parameters
----------
index : int
The index of the second atom involved in this Bond
"""
self._atom2_idx = index
@property
def index(self):
"""
Bond's index.
Returns
-------
index : int
The index of this Bond object
"""
return self._index
@property
def atom1_idx(self):
"""
Bond's atom1 index.
Returns
-------
atom1_idx : int
The index of the first atom involved in this Bond object
"""
return self._atom1_idx
@property
def atom2_idx(self):
"""
Bond's atom2 index.
Returns
-------
atom2_idx : int
The index of the second atom involved in this Bond object
"""
return self._atom2_idx
@property
def spring_constant(self):
"""
Bond's spring constant.
Returns
-------
spring_constant : simtk.unit.Quantity
The spring constant of this Bond object
"""
return self._spring_constant
@property
def eq_dist(self):
"""
Bond's equilibrium distance.
Returns
-------
eq_dist : simtk.unit.Quantity
The equilibrium distance of this Bond object
"""
return self._eq_dist
class Angle(TopologyElement):
"""
It represents an angle in the topology.
"""
_name = 'Angle'
_writable_attrs = ['atom1_idx', 'atom2_idx', 'atom3_idx',
'spring_constant', 'eq_angle']
def __init__(self, index=-1, atom1_idx=None, atom2_idx=None,
atom3_idx=None, spring_constant=None, eq_angle=None):
"""
It initiates an Angle object.
Parameters
----------
index : int
The index of this Angle object
atom1_idx : int
The index of the first atom involved in this Angle
atom2_idx : int
The index of the second atom involved in this Angle
atom3_idx : int
The index of the third atom involved in this Angle
spring_constant : simtk.unit.Quantity
The spring constant of this Angle
eq_angle : simtk.unit.Quantity
The equilibrium angle of this Angle
"""
self._index = index
self._atom1_idx = atom1_idx
self._atom2_idx = atom2_idx
self._atom3_idx = atom3_idx
self._spring_constant = spring_constant
self._eq_angle = eq_angle
def set_atom1_idx(self, index):
"""
It sets atom1's index.
Parameters
----------
index : int
The index of the first atom involved in this Angle
"""
self._atom1_idx = index
def set_atom2_idx(self, index):
"""
It sets atom2's index.
Parameters
----------
index : int
The index of the second atom involved in this Angle
"""
self._atom2_idx = index
def set_atom3_idx(self, index):
"""
It sets atom3's index.
Parameters
----------
index : int
The index of the third atom involved in this Angle
"""
self._atom3_idx = index
@property
def index(self):
"""
Angle's index.
Returns
-------
index : int
The index of this Angle object
"""
return self._index
@property
def atom1_idx(self):
"""
Angle's atom1 index.
Returns
-------
atom1_idx : int
The index of the first atom involved in this Angle object
"""
return self._atom1_idx
@property
def atom2_idx(self):
"""
Angle's atom2 index.
Returns
-------
atom1_idx : int
The index of the second atom involved in this Angle object
"""
return self._atom2_idx
@property
def atom3_idx(self):
"""
Angle's atom3 index.
Returns
-------
atom1_idx : int
The index of the third atom involved in this Angle object
"""
return self._atom3_idx
@property
def spring_constant(self):
"""
Angle's spring constant.
Returns
-------
spring_constant : simtk.unit.Quantity
The spring constant of this Angle object
"""
return self._spring_constant
@property
def eq_angle(self):
"""
Angle's equilibrium angle.
Returns
-------
eq_angle : simtk.unit.Quantity
The equilibrium angle of this Angle object
"""
return self._eq_angle
class Dihedral(TopologyElement):
"""
It represents a dihedral in the topology.
It can be a proper or an improper dihedral.
"""
_name = 'Dihedral'
_writable_attrs = ['atom1_idx', 'atom2_idx', 'atom3_idx', 'atom4_idx',
'constant', 'prefactor', 'periodicity']
def __init__(self, index=-1, atom1_idx=None, atom2_idx=None,
atom3_idx=None, atom4_idx=None, periodicity=None,
prefactor=None, constant=None):
"""
It initiates an Dihedral object.
Parameters
----------
index : int
The index of this Dihedral object
atom1_idx : int
The index of the first atom involved in this Dihedral
atom2_idx : int
The index of the second atom involved in this Dihedral
atom3_idx : int
The index of the third atom involved in this Dihedral
atom4_idx : int
The index of the fourth atom involved in this Dihedral
periodicity : int
The periodicity of this Dihedral
prefactor : int
The prefactor of this Dihedral
constant : simtk.unit.Quantity
The constant of this Dihedral
"""
self._index = index
self._atom1_idx = atom1_idx
self._atom2_idx = atom2_idx
self._atom3_idx = atom3_idx
self._atom4_idx = atom4_idx
self._periodicity = periodicity
self._prefactor = prefactor
self._constant = constant
def set_atom1_idx(self, index):
"""
It sets atom1's index.
Parameters
----------
index : int
The index of the first atom involved in this Dihedral
"""
self._atom1_idx = index
def set_atom2_idx(self, index):
"""
It sets atom2's index.
Parameters
----------
index : int
The index of the second atom involved in this Dihedral
"""
self._atom2_idx = index
def set_atom3_idx(self, index):
"""
It sets atom3's index.
Parameters
----------
index : int
The index of the third atom involved in this Dihedral
"""
self._atom3_idx = index
def set_atom4_idx(self, index):
"""
It sets atom4's index.
Parameters
----------
index : int
The index of the fourth atom involved in this Dihedral
"""
self._atom4_idx = index
def plot(self):
"""
It plots this Dihedral as a function of phi angle.
"""
from matplotlib import pyplot
import numpy as np
x = unit.Quantity(np.arange(0, np.pi, 0.1), unit=unit.radians)
pyplot.plot(x, self.constant * (1 + self.prefactor
* np.cos(self.periodicity * x)),
'r--')
pyplot.show()
@property
def index(self):
"""
Dihedral's index.
Returns
-------
index : int
The index of this Dihedral object
"""
return self._index
@property
def atom1_idx(self):
"""
Dihedral's atom1 index.
Returns
-------
atom1_idx : int
The index of the first atom involved in this Dihedral object
"""
return self._atom1_idx
@property
def atom2_idx(self):
"""
Dihedral's atom2 index.
Returns
-------
atom1_idx : int
The index of the second atom involved in this Dihedral object
"""
return self._atom2_idx
@property
def atom3_idx(self):
"""
Dihedral's atom3 index.
Returns
-------
atom1_idx : int
The index of the third atom involved in this Dihedral object
"""
return self._atom3_idx
@property
def atom4_idx(self):
"""
Dihedral's atom4 index.
Returns
-------
atom1_idx : int
The index of the fourth atom involved in this Dihedral object
"""
return self._atom4_idx
@property
def periodicity(self):
"""
Dihedral's periodicity.
Returns
-------
periodicity : int
The periodicity this Dihedral object
"""
return self._periodicity
@property
def prefactor(self):
"""
Dihedral's prefactor.
Returns
-------
prefactor : int
The prefactor this Dihedral object
"""
return self._prefactor
@property
def constant(self):
"""
Dihedral's constant.
Returns
-------
constant : int
The constant this Dihedral object
"""
return self._constant
class Proper(Dihedral):
"""
It represents a proper dihedral in the topology.
"""
_name = 'Proper'
exclude = False
def exclude_from_14_list(self):
"""
It excludes this proper dihedral from PELE's 1-4 list by
setting the index of the third atom to negative.
"""
self.exclude = True
class Improper(Dihedral):
"""
It represents an improper dihedral in the topology.
"""
_name = 'Improper'
class OFFDihedral(TopologyElement):
"""
It represents a dihedral in the Open Force Field's topology.
"""
_name = 'OFFDihedral'
_writable_attrs = ['atom1_idx', 'atom2_idx', 'atom3_idx', 'atom4_idx',
'periodicity', 'phase', 'k', 'idivf']
_to_PELE_class = Dihedral
def __init__(self, index=-1, atom1_idx=None, atom2_idx=None,
atom3_idx=None, atom4_idx=None, periodicity=None,
phase=None, k=None, idivf=None):
"""
It initiates an Dihedral object.
Parameters
----------
index : int
The index of this Dihedral object
atom1_idx : int
The index of the first atom involved in this Dihedral
atom2_idx : int
The index of the second atom involved in this Dihedral
atom3_idx : int
The index of the third atom involved in this Dihedral
atom4_idx : int
The index of the fourth atom involved in this Dihedral
periodicity : int
The periodicity of this Dihedral
phase : simtk.unit.Quantity
The phase of this Dihedral
k : simtk.unit.Quantity
The constant of this Dihedral
idivf : int
The idivf of this Dihedral
"""
self.index = index
self.atom1_idx = atom1_idx
self.atom2_idx = atom2_idx
self.atom3_idx = atom3_idx
self.atom4_idx = atom4_idx
self.periodicity = periodicity
self.phase = phase
self.k = k
self.idivf = idivf
def to_PELE(self):
"""
It converts this Open Force Field Dihedral object into a
PELE-compatible one.
.. todo ::
* Review doublecheck idivf term in OFF's torsion equation
Returns
-------
PELE_dihedral : a Dihedral
The PELE-compatible Dihedral object
"""
if (self.periodicity is None or self.phase is None
or self.k is None or self.idivf is None):
return None
assert self.periodicity in (1, 2, 3, 4, 6), 'Expected values for ' \
'periodicity are 1, 2, 3, 4 or 6, obtained ' \
'{}'.format(self.periodicity)
assert self.phase.value_in_unit(unit.degree) in (0, 180), \
'Expected values for phase are 0 or 180, obtained ' \
'{}'.format(self.phase)
# idivf can take values other than 1 in case of impropers
# proper's idivfs must always be 1
# assert self.idivf == 1, 'The expected value for idivf is 1, ' \
# 'obtained {}'.format(self.idivf)
if self.phase.value_in_unit(unit.degree) == 180:
PELE_prefactor = -1
else:
PELE_prefactor = 1
PELE_constant = self.k / self.idivf
PELE_dihedral_kwargs = {'index': self.index,
'atom1_idx': self.atom1_idx,
'atom2_idx': self.atom2_idx,
'atom3_idx': self.atom3_idx,
'atom4_idx': self.atom4_idx,
'periodicity': self.periodicity,
'prefactor': PELE_prefactor,
'constant': PELE_constant}
return self._to_PELE_class(**PELE_dihedral_kwargs)
def plot(self):
"""
It plots this Dihedral as a function of phi angle.
"""
from matplotlib import pyplot
import numpy as np
x = unit.Quantity(np.arange(0, np.pi, 0.1), unit=unit.radians)
pyplot.plot(x,
self.k * (1 + np.cos(self.periodicity * x - self.phase)),
'r--')
pyplot.show()
class OFFProper(OFFDihedral):
"""
It represents a proper dihedral in the Open Force Field's topology.
"""
_name = 'OFFProper'
_to_PELE_class = Proper
class OFFImproper(OFFDihedral):
"""
It represents an improper dihedral in the Open Force Field's topology.
"""
_name = 'OFFImproper'
_to_PELE_class = Improper
|
<gh_stars>0
from __future__ import print_function, absolute_import, division # makes these scripts backward compatible with python 2.6 and 2.7
import KratosMultiphysics as KM
import KratosMultiphysics.KratosUnittest as KratosUnittest
from KratosMultiphysics.CoSimulationApplication.coupling_interface_data import CouplingInterfaceData
from KratosMultiphysics.CoSimulationApplication.co_simulation_tools import UsingPyKratos
using_pykratos = UsingPyKratos()
# The expected definitions are here to make the handling of the
# multiline-stings easier (no need to deal with indentation)
coupling_interface_data_str = '''CouplingInterfaceData:
Name: "default"
SolverWrapper: "default_solver"
ModelPart: "mp_4_test"
IsDistributed: False
Variable: "DISPLACEMENT" (Vector with dimension: 2)
Location: "node_historical"
Size: 10
'''
model_part_scalar_value = -61.225
model_part_vector_value = [123.5, 54.9, -92.4]
model_part_scalar_value_2 = 9745.34
model_part_vector_value_2 = [-556.3, -334.2, 65.9]
class TestCouplingInterfaceData(KratosUnittest.TestCase):
def setUp(self):
self.model = KM.Model()
self.mp = self.model.CreateModelPart("mp_4_test", 2)
self.mp.AddNodalSolutionStepVariable(KM.PRESSURE)
self.mp.AddNodalSolutionStepVariable(KM.DISPLACEMENT)
self.mp.ProcessInfo[KM.DOMAIN_SIZE] = 2
num_nodes = 5
num_elems = 7
num_conds = 3
props = self.mp.CreateNewProperties(2)
for i in range(num_nodes):
node_id = i+1
node = self.mp.CreateNewNode(node_id, 0.0, 0.0, i+1)
node.SetSolutionStepValue(KM.PRESSURE, 0, NodeScalarHistValueCurrent(node_id))
node.SetSolutionStepValue(KM.DISPLACEMENT, 0, NodeVectorHistValueCurrent(node_id))
node.SetSolutionStepValue(KM.PRESSURE, 1, NodeScalarHistValuePrevious(node_id))
node.SetSolutionStepValue(KM.DISPLACEMENT, 1, NodeVectorHistValuePrevious(node_id))
node.SetValue(KM.TEMPERATURE, NodeScalarNonHistValue(node_id))
node.SetValue(KM.VELOCITY, NodeVectorNonHistValue(node_id))
for i in range(num_elems):
elem_id = i+1
elem = self.mp.CreateNewElement("Element2D2N", elem_id, [1,2], props)
elem.SetValue(KM.DENSITY, ElementScalarValue(elem_id))
elem.SetValue(KM.FORCE, ElementVectorValue(elem_id))
if not using_pykratos: # pyKratos does not have Conditions
for i in range(num_conds):
cond_id = i+1
cond = self.mp.CreateNewCondition("LineCondition2D2N", cond_id, [1,2], props)
cond.SetValue(KM.YOUNG_MODULUS, ConditionScalarValue(cond_id))
cond.SetValue(KM.ROTATION, ConditionVectorValue(cond_id))
self.mp[KM.NODAL_MASS] = model_part_scalar_value
self.mp[KM.TORQUE] = model_part_vector_value
def test_basics(self):
settings_scal_hist = KM.Parameters("""{
"model_part_name" : "mp_4_test",
"variable_name" : "PRESSURE"
}""")
settings_vec_elem = KM.Parameters("""{
"model_part_name" : "mp_4_test",
"variable_name" : "DISPLACEMENT",
"location" : "element",
"dimension" : 2
}""")
coupling_data_scal = CouplingInterfaceData(settings_scal_hist, self.model)
coupling_data_scal.Initialize()
coupling_data_vec = CouplingInterfaceData(settings_vec_elem, self.model)
coupling_data_vec.Initialize()
self.assertEqual(coupling_data_scal.GetModelPart().Name, "mp_4_test")
self.assertEqual(coupling_data_vec.GetModelPart().Name, "mp_4_test")
self.assertFalse(coupling_data_scal.GetModelPart().IsDistributed())
self.assertFalse(coupling_data_vec.GetModelPart().IsDistributed())
self.assertEqual(coupling_data_scal.Size(), 5) # 5 nodes and scalar var
self.assertEqual(coupling_data_vec.Size(), 14) # 7 elements and vector var with dim==2
self.assertEqual(coupling_data_scal.GetBufferSize(), 2)
self.assertEqual(coupling_data_vec.GetBufferSize(), 1)
def test_printing(self):
settings = KM.Parameters("""{
"model_part_name" : "mp_4_test",
"variable_name" : "DISPLACEMENT",
"dimension" : 2
}""")
coupling_data = CouplingInterfaceData(settings, self.model)
coupling_data.Initialize()
self.assertMultiLineEqual(str(coupling_data), coupling_interface_data_str)
def test_without_initialization(self):
settings = KM.Parameters("""{
"model_part_name" : "mp_4_test",
"variable_name" : "DISPLACEMENT",
"dimension" : 2
}""")
coupling_data = CouplingInterfaceData(settings, self.model)
# coupling_data.Initialize() # intentially commented to raise error
with self.assertRaisesRegex(Exception, ' can onyl be called after initializing the CouplingInterfaceData!'):
self.assertMultiLineEqual(str(coupling_data), coupling_interface_data_str)
with self.assertRaisesRegex(Exception, ' can onyl be called after initializing the CouplingInterfaceData!'):
coupling_data.PrintInfo()
with self.assertRaisesRegex(Exception, ' can onyl be called after initializing the CouplingInterfaceData!'):
coupling_data.GetModelPart()
with self.assertRaisesRegex(Exception, ' can onyl be called after initializing the CouplingInterfaceData!'):
coupling_data.IsDistributed()
with self.assertRaisesRegex(Exception, ' can onyl be called after initializing the CouplingInterfaceData!'):
coupling_data.Size()
with self.assertRaisesRegex(Exception, ' can onyl be called after initializing the CouplingInterfaceData!'):
coupling_data.GetBufferSize()
with self.assertRaisesRegex(Exception, ' can onyl be called after initializing the CouplingInterfaceData!'):
coupling_data.GetData()
with self.assertRaisesRegex(Exception, ' can onyl be called after initializing the CouplingInterfaceData!'):
coupling_data.SetData([])
def test_unallowed_names(self):
settings = KM.Parameters("""{
"model_part_name" : "mp_4_test",
"variable_name" : "PRESSURE"
}""")
with self.assertRaisesRegex(Exception, 'The name cannot be empty, contain whitespaces or "."!'):
CouplingInterfaceData(settings, self.model, "")
with self.assertRaisesRegex(Exception, 'The name cannot be empty, contain whitespaces or "."!'):
CouplingInterfaceData(settings, self.model, "aaa.bbbb")
with self.assertRaisesRegex(Exception, 'The name cannot be empty, contain whitespaces or "."!'):
CouplingInterfaceData(settings, self.model, "aaa bbb")
def test_var_does_not_exist(self):
settings = KM.Parameters("""{
"model_part_name" : "mp_4_test",
"variable_name" : "var_that_hopefully_none_will_ever_create_otherwise_this_test_will_be_wrong"
}""")
with self.assertRaisesRegex(Exception, 'does not exist!'):
CouplingInterfaceData(settings, self.model)
def test_wrong_input_dim_scalar(self):
settings = KM.Parameters("""{
"model_part_name" : "mp_4_test",
"variable_name" : "PRESSURE",
"dimension" : 2
}""")
coupling_data = CouplingInterfaceData(settings, self.model)
with self.assertRaisesRegex(Exception, '"dimension" cannot be specifed for scalar variables!'):
coupling_data.Initialize()
def test_wrong_input_no_dim_vector(self):
settings = KM.Parameters("""{
"model_part_name" : "mp_4_test",
"variable_name" : "DISPLACEMENT"
}""")
coupling_data = CouplingInterfaceData(settings, self.model)
with self.assertRaisesRegex(Exception, '"dimension" has to be specifed for vector variables!'):
coupling_data.Initialize()
def test_wrong_input_variable_type(self):
settings = KM.Parameters("""{
"model_part_name" : "mp_4_test",
"variable_name" : "EXTERNAL_FORCES_VECTOR"
}""")
exp_error = 'The input for "variable" "EXTERNAL_FORCES_VECTOR" is of variable type "Vector" which is not allowed, only the following variable types are allowed:\nBool, Integer, Unsigned Integer, Double, Array'
with self.assertRaisesRegex(Exception, exp_error):
CouplingInterfaceData(settings, self.model)
def test_wrong_input_location(self):
settings = KM.Parameters("""{
"model_part_name" : "mp_4_test",
"variable_name" : "PRESSURE",
"location" : "dummy"
}""")
exp_error = '"dummy" is not allowed as "location", only the following options are possible:\nnode_historical, node_non_historical, element, condition, model_part'
with self.assertRaisesRegex(Exception, exp_error):
CouplingInterfaceData(settings, self.model)
def test_wrong_input_set_data(self):
settings = KM.Parameters("""{
"model_part_name" : "mp_4_test",
"variable_name" : "PRESSURE"
}""")
settings_model_part = KM.Parameters("""{
"model_part_name" : "mp_4_test",
"variable_name" : "PRESSURE",
"location" : "model_part"
}""")
coupling_data = CouplingInterfaceData(settings, self.model)
coupling_data.Initialize()
coupling_data_mp = CouplingInterfaceData(settings_model_part, self.model)
coupling_data_mp.Initialize()
wrong_data = [1,2,3]
correct_data = [1,2,3,4,5]
with self.assertRaisesRegex(Exception, "The sizes of the data are not matching, got: 3, expected: 5"):
coupling_data.SetData(wrong_data)
with self.assertRaisesRegex(Exception, "The buffer-size is not large enough: current buffer size: 2 | requested solution_step_index: 3"):
coupling_data.SetData(correct_data, 2)
with self.assertRaisesRegex(Exception, "accessing data from previous steps is only possible with historical nodal data!"):
coupling_data_mp.SetData(correct_data, 2)
def test_wrong_input_dim_array(self):
settings = KM.Parameters("""{
"model_part_name" : "mp_4_test",
"variable_name" : "DISPLACEMENT",
"dimension" : 4
}""")
exp_error = '"dimension" can only be 1,2,3 when using variables of type "Array"'
coupling_data = CouplingInterfaceData(settings, self.model)
with self.assertRaisesRegex(Exception, exp_error):
coupling_data.Initialize()
def test_wrong_input_missing_solutionstepvar(self):
settings = KM.Parameters("""{
"model_part_name" : "mp_4_test",
"variable_name" : "FORCE",
"dimension" : 2
}""")
exp_error = '"FORCE" is missing as SolutionStepVariable in ModelPart "mp_4_test"'
coupling_data = CouplingInterfaceData(settings, self.model)
with self.assertRaisesRegex(Exception, exp_error):
coupling_data.Initialize()
def test_wrong_input_missing_solutionstepvar_component(self):
settings = KM.Parameters("""{
"model_part_name" : "mp_4_test",
"variable_name" : "FORCE_X"
}""")
exp_error = '"FORCE_X" is missing as SolutionStepVariable in ModelPart "mp_4_test"'
coupling_data = CouplingInterfaceData(settings, self.model)
with self.assertRaisesRegex(Exception, exp_error):
coupling_data.Initialize()
def test_wrong_input_missing_solutionstepvar_double(self):
settings = KM.Parameters("""{
"model_part_name" : "mp_4_test",
"variable_name" : "TEMPERATURE"
}""")
exp_error = '"TEMPERATURE" is missing as SolutionStepVariable in ModelPart "mp_4_test"'
coupling_data = CouplingInterfaceData(settings, self.model)
with self.assertRaisesRegex(Exception, exp_error):
coupling_data.Initialize()
def test_non_existing_model_part(self):
settings = KM.Parameters("""{
"model_part_name" : "something",
"variable_name" : "PRESSURE",
"location" : "node_non_historical"
}""")
coupling_data = CouplingInterfaceData(settings, self.model)
with self.assertRaisesRegex(Exception, "The specified ModelPart is not in the Model, only the following ModelParts are available:"):
coupling_data.Initialize()
def test_GetHistoricalVariableDict(self):
settings = KM.Parameters("""{
"model_part_name" : "mp_4_test",
"variable_name" : "PRESSURE"
}""")
coupling_data = CouplingInterfaceData(settings, self.model)
exp_dict = {"mp_4_test" : KM.PRESSURE}
dict_res = coupling_data.GetHistoricalVariableDict()
self.assertEqual(len(exp_dict), len(dict_res))
self.assertEqual(exp_dict["mp_4_test"].Name(), dict_res["mp_4_test"].Name())
# this should not give anything since there are no historical variables in this case
settings_2 = KM.Parameters("""{
"model_part_name" : "mp_4_test",
"variable_name" : "PRESSURE",
"location" : "element"
}""")
coupling_data_2 = CouplingInterfaceData(settings_2, self.model)
exp_dict_2 = {}
dict_res_2 = coupling_data_2.GetHistoricalVariableDict()
self.assertEqual(len(exp_dict_2), len(dict_res_2))
def test_GetSetNodalHistoricalData(self):
settings_scal = KM.Parameters("""{
"model_part_name" : "mp_4_test",
"variable_name" : "PRESSURE"
}""")
settings_vec = KM.Parameters("""{
"model_part_name" : "mp_4_test",
"variable_name" : "DISPLACEMENT",
"dimension" : 2
}""")
coupling_data_scal = CouplingInterfaceData(settings_scal, self.model)
coupling_data_vec = CouplingInterfaceData(settings_vec, self.model)
coupling_data_scal.Initialize()
coupling_data_vec.Initialize()
# 1. check the initial values
exp_data_scal_cur = [NodeScalarHistValueCurrent(node.Id) for node in self.mp.Nodes]
exp_data_scal_prev = [NodeScalarHistValuePrevious(node.Id) for node in self.mp.Nodes]
exp_data_vec_cur = GetVectorValues(self.mp.Nodes, NodeVectorHistValueCurrent, 2)
exp_data_vec_prev = GetVectorValues(self.mp.Nodes, NodeVectorHistValuePrevious, 2)
self.__CheckData(exp_data_scal_cur, coupling_data_scal.GetData())
self.__CheckData(exp_data_vec_cur, coupling_data_vec.GetData())
self.__CheckData(exp_data_scal_prev, coupling_data_scal.GetData(1))
self.__CheckData(exp_data_vec_prev, coupling_data_vec.GetData(1))
# 2. check setting and getting works
set_data_scal_cur = [ElementScalarValue(node.Id) for node in self.mp.Nodes]
set_data_scal_prev = [ConditionScalarValue(node.Id) for node in self.mp.Nodes]
set_data_vec_cur = GetVectorValues(self.mp.Nodes, ElementVectorValue, 2)
set_data_vec_prev = GetVectorValues(self.mp.Nodes, ConditionVectorValue, 2)
self.__CheckSetGetData(set_data_scal_cur, coupling_data_scal)
self.__CheckSetGetData(set_data_vec_cur, coupling_data_vec)
self.__CheckSetGetData(set_data_scal_prev, coupling_data_scal, 1)
self.__CheckSetGetData(set_data_vec_prev, coupling_data_vec, 1)
def test_GetSetNodalNonHistoricalData(self):
settings_scal = KM.Parameters("""{
"model_part_name" : "mp_4_test",
"location" : "node_non_historical",
"variable_name" : "TEMPERATURE"
}""")
settings_vec = KM.Parameters("""{
"model_part_name" : "mp_4_test",
"variable_name" : "VELOCITY",
"location" : "node_non_historical",
"dimension" : 3
}""")
self.mp.ProcessInfo[KM.DOMAIN_SIZE] = 3
coupling_data_scal = CouplingInterfaceData(settings_scal, self.model)
coupling_data_vec = CouplingInterfaceData(settings_vec, self.model)
coupling_data_scal.Initialize()
coupling_data_vec.Initialize()
# 1. check the initial values
exp_data_scal = [NodeScalarNonHistValue(node.Id) for node in self.mp.Nodes]
exp_data_vec = GetVectorValues(self.mp.Nodes, NodeVectorNonHistValue, 3)
self.__CheckData(exp_data_scal, coupling_data_scal.GetData())
self.__CheckData(exp_data_vec, coupling_data_vec.GetData())
# 2. check setting and getting works
set_data_scal = [ConditionScalarValue(node.Id) for node in self.mp.Nodes]
set_data_vec = GetVectorValues(self.mp.Nodes, ConditionVectorValue, 3)
self.__CheckSetGetData(set_data_scal, coupling_data_scal)
self.__CheckSetGetData(set_data_vec, coupling_data_vec)
def test_GetSetElementalData(self):
settings_scal = KM.Parameters("""{
"model_part_name" : "mp_4_test",
"location" : "element",
"variable_name" : "DENSITY"
}""")
settings_vec = KM.Parameters("""{
"model_part_name" : "mp_4_test",
"variable_name" : "FORCE",
"location" : "element",
"dimension" : 2
}""")
coupling_data_scal = CouplingInterfaceData(settings_scal, self.model)
coupling_data_vec = CouplingInterfaceData(settings_vec, self.model)
coupling_data_scal.Initialize()
coupling_data_vec.Initialize()
# 1. check the initial values
exp_data_scal = [ElementScalarValue(elem.Id) for elem in self.mp.Elements]
exp_data_vec = GetVectorValues(self.mp.Elements, ElementVectorValue, 2)
self.__CheckData(exp_data_scal, coupling_data_scal.GetData())
self.__CheckData(exp_data_vec, coupling_data_vec.GetData())
# 2. check setting and getting works
set_data_scal = [NodeScalarNonHistValue(elem.Id) for elem in self.mp.Elements]
set_data_vec = GetVectorValues(self.mp.Elements, NodeVectorNonHistValue, 2)
self.__CheckSetGetData(set_data_scal, coupling_data_scal)
self.__CheckSetGetData(set_data_vec, coupling_data_vec)
def test_GetSetConditionalData(self):
if using_pykratos:
self.skipTest("This test cannot be run with pyKratos!")
settings_scal = KM.Parameters("""{
"model_part_name" : "mp_4_test",
"location" : "condition",
"variable_name" : "YOUNG_MODULUS"
}""")
settings_vec = KM.Parameters("""{
"model_part_name" : "mp_4_test",
"variable_name" : "ROTATION",
"location" : "condition",
"dimension" : 2
}""")
coupling_data_scal = CouplingInterfaceData(settings_scal, self.model)
coupling_data_vec = CouplingInterfaceData(settings_vec, self.model)
coupling_data_scal.Initialize()
coupling_data_vec.Initialize()
# 1. check the initial values
exp_data_scal = [ConditionScalarValue(cond.Id) for cond in self.mp.Conditions]
exp_data_vec = GetVectorValues(self.mp.Conditions, ConditionVectorValue, 2)
self.__CheckData(exp_data_scal, coupling_data_scal.GetData())
self.__CheckData(exp_data_vec, coupling_data_vec.GetData())
# 2. check setting and getting works
set_data_scal = [NodeScalarHistValuePrevious(cond.Id) for cond in self.mp.Conditions]
set_data_vec = GetVectorValues(self.mp.Conditions, NodeVectorHistValuePrevious, 2)
self.__CheckSetGetData(set_data_scal, coupling_data_scal)
self.__CheckSetGetData(set_data_vec, coupling_data_vec)
def test_GetSetModelPartData(self):
settings_scal = KM.Parameters("""{
"model_part_name" : "mp_4_test",
"location" : "model_part",
"variable_name" : "NODAL_MASS"
}""")
settings_vec = KM.Parameters("""{
"model_part_name" : "mp_4_test",
"variable_name" : "TORQUE",
"location" : "model_part",
"dimension" : 1
}""")
self.mp.ProcessInfo[KM.DOMAIN_SIZE] = 1
coupling_data_scal = CouplingInterfaceData(settings_scal, self.model)
coupling_data_vec = CouplingInterfaceData(settings_vec, self.model)
coupling_data_scal.Initialize()
coupling_data_vec.Initialize()
# 1. check the initial values
self.__CheckData([model_part_scalar_value], coupling_data_scal.GetData())
self.__CheckData([model_part_vector_value[0]], coupling_data_vec.GetData())
# 2. check setting and getting works
set_data_scal = [model_part_scalar_value_2]
set_data_vec = [model_part_vector_value_2[0]]
self.__CheckSetGetData(set_data_scal, coupling_data_scal)
self.__CheckSetGetData(set_data_vec, coupling_data_vec)
def __CheckData(self, exp_data, data):
self.assertEqual(len(exp_data), len(data))
for exp_val, val in zip(exp_data, data):
self.assertAlmostEqual(exp_val, val)
def __CheckSetGetData(self, the_data, coupling_data, solution_step_index=0):
# Checking to call fct differently
if solution_step_index > 0:
coupling_data.SetData(the_data, solution_step_index)
extracted_data = coupling_data.GetData(solution_step_index)
else:
coupling_data.SetData(the_data)
extracted_data = coupling_data.GetData()
self.__CheckData(the_data, extracted_data)
def GetVectorValues(container, fct_ptr, dim):
values = []
for entity in container:
vector_vals = fct_ptr(entity.Id)
for i in range(dim):
values.append(vector_vals[i])
return values
def NodeScalarHistValueCurrent(the_id):
return the_id*1.5
def NodeScalarHistValuePrevious(the_id):
return the_id*123.7
def NodeVectorHistValueCurrent(the_id):
return [the_id*1.1, the_id*1.1+2.5, the_id*2.3]
def NodeVectorHistValuePrevious(the_id):
return [the_id*1.4-0.4, the_id*10.4+2.5, the_id*5.3]
def NodeScalarNonHistValue(the_id):
return the_id*2.1+6.2
def NodeVectorNonHistValue(the_id):
return [the_id*14.7, the_id*19.2-10.5, the_id*303.9]
def ElementScalarValue(the_id):
return the_id*6.4+1.1
def ElementVectorValue(the_id):
return [the_id*11.7, the_id*12.2-120.5, the_id*3.9]
def ConditionScalarValue(the_id):
return the_id*14.5+15.8
def ConditionVectorValue(the_id):
return [the_id*65.7, the_id*12.1, the_id+3.9]
if __name__ == '__main__':
KratosUnittest.main()
|
import requests
import json
from requests.auth import HTTPBasicAuth
class FodyError(Exception):
pass
class UnknownHandler(FodyError):
pass
class HTTPError(FodyError):
pass
class UnexpectedParameter(FodyError):
pass
class IMQFody:
def __init__(self, url, username, password, sslverify=True):
self._url = url.rstrip('/')
self._session = requests.session()
self._session.verify = sslverify
self._credentials = username, password
self._login()
def __exit__(self, exc_type, exc_val, exc_tb):
self._session.close()
def _login(self):
"""Implement new token based auth."""
response = self._session.post(
f"{self._url}/api/login", data={
"username": self._credentials[0],
"password": <PASSWORD>[1]
}
)
if response.status_code != 200:
raise HTTPError(f"Fody returned invalid HTTP response: {response.status_code} - {response.text}")
response_data = response.json()
if "login_token" not in response_data:
raise KeyError(f"Fody API should have returned a login token, but hasn't.")
self._session.headers.update({"Authorization": response_data["login_token"]})
def _search(self, handler, endpoint, query):
"""Generic search method to build queries.
:param handler: Handler on fody side: [contactdb, events, tickets, checkticket]
:param endpoint: Specific endpoint; e.g. searchasn, searchorg etc.
:param query: Input used for querying fody."""
if handler not in ['contactdb', 'events', 'tickets', 'checkticket']:
raise UnknownHandler('Handler must be one of [contactdb, events, tickets, checkticket].')
response = self._session.get('{}/api/{}/{}'.format(self._url, handler, endpoint), data=query)
if response.status_code != 200:
raise HTTPError(f"Fody returned {response.status_code}: {response.text}")
dict_response = json.loads(response.text)
response.close()
return dict_response
def _get_contacts_from_id_list(self, ids):
"""
Get organisations by their ids, iterate over auto and manual contacts.
:param ids: dictionary containing auto and manual ids
:return: list of contacts
"""
contacts = []
for manual in ids['manual']:
contacts.append(self._search('contactdb', 'org/manual/{}'.format(manual), {}))
for auto in ids['auto']:
contacts.append(self._search('contactdb', 'org/auto/{}'.format(auto), {}))
return contacts
def get_api_documentation(self):
"""Querying the base url returns the documentation"""
return json.loads(self._session.get(self._url))
# #################
# ContactDB queries
def ping(self):
"""
Ping contactdb
:return: dict
"""
return self._search('contactdb', 'ping', {})
def search_asn(self, asn):
"""
Search in contactdb using an asn number
:param asn: asn number as string
:return: dict
"""
result = self._search('contactdb', 'searchasn', {'asn': asn})
return self._get_contacts_from_id_list(result)
def search_org(self, name):
"""
Search in contactdb using an organisation name
:param name: organisation name to search for
:return: dict
"""
result = self._search('contactdb', 'searchorg', {'name': name})
return self._get_contacts_from_id_list(result)
def search_email(self, email):
"""
Search in contactdb for an email
:param email: email as string
:return: dict
"""
result = self._search('contactdb', 'searchcontact', {'email': email})
return self._get_contacts_from_id_list(result)
def search_cidr(self, cidr):
"""
Search in contactdb using the cidr
:param cidr: cidr as string
:return: dict
"""
result = self._search('contactdb', 'searchcidr', {'address': cidr})
return self._get_contacts_from_id_list(result)
def search_ip(self, ip):
"""
Wrapper for search_cidr
:param ip: ip as str
:return: dict
"""
return self.search_cidr(ip)
def search_fqdn(self, fqdn):
"""
Search in the contactdb for an fqdn.
:param fqdn: fqdn as str
:return: dict
"""
result = self._search('contactdb', 'searchfqdn', {'domain': fqdn})
return self._get_contacts_from_id_list(result)
def search_national(self, cc):
"""
Search through contactdb using a 2-3 letter country code
:param cc: 2 to 3 letter Country code
:return: dict
"""
if len(cc) < 2 or len(cc) > 3:
raise UnexpectedParameter('Country code should be 2 or 3 letters long.')
result = self._search('contactdb', 'searchnational', {'countrycode', cc})
return self._get_contacts_from_id_list(result)
# #############
# Event queries
def get_event(self, id):
"""
Retrieve event by id
:param id: event id int
:return: dict
"""
response = self._session.get('{}/api/events'.format(self._url), data={'id': id})
if response.status_code == 200:
return response.json()[0]
raise HTTPError('Statuscode {} while getting event by id.'.format(response.status_code))
def get_event_subqueries(self):
"""
Return dictionary of event subqueries
:return: dict
"""
return self._search('events', 'subqueries', {})
def search_event(self, subquery):
"""
Search for events by a subquery.
:param subquery: dict subquery
:return: dict
"""
return self._search('events', 'search', subquery)
def get_event_stats(self, subquery):
"""
Returns distribution of events for a given subquery
:param subquery: dict subquery
:return: dict
"""
return self._search('events', 'stats', subquery)
def export_events(self, subquery):
"""
Exports events matching the subquery
:param subquery: dict subquery
:return: dict
"""
return self._search('events', 'export', subquery)
# ##############
# Ticket queries
def get_ticket(self, id):
"""
Get ticket by id
:param id: ticket id
:return: dict
"""
response = self._session.get('{}/api/tickets'.format(self._url), data={'id': id})
if response.status_code == 200:
return response.json()
raise HTTPError('Statuscode {} while getting ticket by id.'.format(response.status_code))
def get_ticket_subqueries(self):
"""
Returns a dict of subqueries.
:return: dict
"""
return self._search('tickets', 'subqueries', {})
def search_ticket(self, subquery):
"""
Search for tickets matching the subquery
:param subquery: dict subquery
:return: dict
"""
return self._search('tickets', 'search', subquery)
def get_ticket_stats(self, subquery):
"""
Get a statistic tickets matching the subquery.
:param subquery: dict subquery
:return: dict
"""
return self._search('tickets', 'stats', subquery)
def get_ticket_recipient(self, ticket_number):
"""
Get the recipient for a given ticket.
:param ticket_number: ticket number
:return: dict
"""
return self._search('tickets', 'getRecipient', {'ticketnumber': ticket_number})
def get_ticket_event_ids(self, ticket_number):
"""
Get eventIds for a ticket.
:param ticket_number: ticket number
:return: dict
"""
return self._search('checkticket', 'getEventIDsForTicket', {'ticket': ticket_number})
def get_ticket_events(self, ticket_number, limit=0):
"""
Get events for a ticket
:param ticket_number: ticket number
:param limit: limits the output to [limit] events, default 0
:return: dict
"""
self._search('checkticket', 'getEventsForTicket', {'ticket': ticket_number, 'limit': limit})
def get_last_ticket_number(self):
"""
Returns the last ticket number
:return: dict
"""
return self._search('checkticket', 'getLastTicketNumber', {})
|
<filename>muddery/server/combat/combat_runner/base_combat.py
"""
Combat handler.
The life of a combat:
1. create: create a combat.
2. set_combat: set teams in the combat and the end time if available, then calls the start_combat.
3. start_combat: start the combat. Characters in the combat are allowed to use skills.
4. prepare_skill: characters call the prepare_skill to use skills in the combat. It casts a skill and check if the
combat is finished.
5. can_finish: Check if the combat is finished. A combat finishes when only one or zero team has alive characters, or
the combat is timeout. If a combat can finish calls the finish method.
6. finish: send combat results to all characters.
7. leave_combat: characters notify the combat that it has left.
8. stop: if all characters left, remove the combat.
"""
from enum import Enum
import time
import datetime
from apscheduler.schedulers.background import BackgroundScheduler
from django.conf import settings
from evennia.utils import logger
from muddery.server.utils import defines
from muddery.server.database.worlddata.worlddata import WorldData
from muddery.server.mappings.element_set import ELEMENT, ELEMENT_SET
from muddery.server.utils import utils
class CStatus(Enum):
"""
Character's combat status.
"""
JOINED = 1
ACTIVE = 2
FINISHED = 3
ESCAPED = 4
LEFT = 5
class BaseCombat(object):
"""
This implements the combat handler.
properties:
characters: {
"char_id": {
"char": character's object,
"team": team's id,
"status": character's combat status,
}
}
"""
# set initial values
def __init__(self):
self.characters = {}
# if battle is finished
self.finished = False
self.winners = {}
self.losers = {}
# combat rewards
self.rewards = {}
self.timeout = 0
self.scheduler = None
def __del__(self):
# When the combat is finished.
if self.scheduler:
self.scheduler.stop()
def at_timeout(self):
"""
Combat timeout.
Returns:
None.
"""
if self.finished:
return
self.set_combat_draw()
def set_combat(self, handler, combat_id, combat_type, teams, desc, timeout):
"""
Add combatant to handler
Args:
combat_id: (int) combat's id
combat_type: (string) combat's type
teams: (dict) {<team id>: [<characters>]}
desc: (string) combat's description
timeout: (int) Total combat time in seconds. Zero means no limit.
"""
self.handler = handler
self.combat_id = combat_id
self.combat_type = combat_type
self.desc = desc
self.timeout = timeout
# Add teams.
for team in teams:
for character in teams[team]:
self.characters[character.get_id()] = {
"char": character,
"team": team,
"status": CStatus.JOINED,
}
# Set combat to characters.
for char in self.characters.values():
character = char["char"]
# add the combat handler
character.join_combat(combat_id)
if utils.is_player(character):
self.show_combat(character)
def start(self):
"""
Start a combat, make all NPCs to cast skills automatically.
"""
if self.timeout:
# Set finish time.
finish_time = datetime.datetime.fromtimestamp(time.time() + self.timeout)
self.scheduler = BackgroundScheduler()
self.scheduler.add_job(self.at_timeout, "date", run_date=finish_time)
self.scheduler.start()
for char in self.characters.values():
char["status"] = CStatus.ACTIVE
def stop(self):
"""
Stop this combat.
:return:
"""
self.handler.remove_combat(self.combat_id)
def show_combat(self, character):
"""
Show combat information to a character.
Args:
character: (object) character
Returns:
None
"""
# Show combat information to the player.
character.msg({"joined_combat": True})
# send messages in order
character.msg({"combat_info": self.get_appearance()})
def prepare_skill(self, skill_key, caller, target_id):
"""
Cast a skill.
:arg
skill_key: (string) skill's key
caller: (obj) the skill's caller's object
target_id: (int) target's id
"""
if self.finished:
return
# get target's object
target = None
if target_id and target_id in self.characters:
target = self.characters[target_id]["char"]
if caller:
caller.cast_skill(skill_key, target)
if self.can_finish():
# if there is only one team left, kill this handler
self.finish()
def can_finish(self):
"""
Check if can finish this combat. The combat finishes when a team's members
are all dead.
Return True or False
"""
if not len(self.characters):
return False
teams = set()
for char in self.characters.values():
if char["status"] == CStatus.ACTIVE:
character = char["char"]
if character.is_alive():
teams.add(char["team"])
if len(teams) > 1:
# More than one team has alive characters.
return False
return True
def finish(self):
"""
Finish a combat. Send results to players, and kill all failed characters.
"""
self.finished = True
if self.scheduler:
self.scheduler.stop()
self.scheduler = None
# get winners and losers
self.winners, self.losers = self.calc_winners()
for char in self.characters.values():
char["status"] = CStatus.FINISHED
# calculate combat rewards
self.rewards = self.calc_combat_rewards(self.winners, self.losers)
self.notify_combat_results(self.winners, self.losers)
def escape_combat(self, caller):
"""
Character escaped.
Args:
caller: (object) the caller of the escape skill.
Returns:
None
"""
if caller and caller.id in self.characters:
self.characters[caller.id]["status"] = CStatus.ESCAPED
caller.combat_result(self.combat_type, defines.COMBAT_ESCAPED)
def leave_combat(self, character):
"""
Remove combatant from handler.
:param character: character object
"""
if character.id in self.characters:
if self.characters[character.id]["status"] == CStatus.LEFT:
return
self.characters[character.id]["status"] = CStatus.LEFT
all_player_left = True
for char in self.characters.values():
if char["status"] != CStatus.LEFT and\
char["char"].is_element(settings.PLAYER_CHARACTER_ELEMENT_TYPE):
all_player_left = False
break
if all_player_left:
# There is no player character in combat.
for char in self.characters.values():
if char["status"] != CStatus.LEFT:
char["status"] = CStatus.LEFT
try:
char["char"].remove_from_combat()
except Exception as e:
logger.log_err("Leave combat error: %s" % e)
self.stop()
def msg_all(self, message):
"Send message to all combatants"
for char in self.characters.values():
char["char"].msg(message)
def set_combat_draw(self):
"""
Called when the combat ended in a draw.
Returns:
None.
"""
for char in self.characters.values():
char["char"].combat_result(self.combat_type, defines.COMBAT_DRAW)
def calc_winners(self):
"""
Calculate combat winners and losers.
"""
winner_team = None
for char in self.characters.values():
if char["status"] == CStatus.ACTIVE and char["char"].is_alive():
winner_team = char["team"]
break
# winners and losers do not include escaped characters.
winners = {char_id: char["char"] for char_id, char in self.characters.items()
if char["status"] == CStatus.ACTIVE and char["team"] == winner_team}
losers = {char_id: char["char"] for char_id, char in self.characters.items()
if char["status"] == CStatus.ACTIVE and char["team"] != winner_team}
return winners, losers
def calc_combat_rewards(self, winners, losers):
"""
Called when the character wins the combat.
Args:
winners: (dict) all combat winners.
losers: (dict) all combat losers.
Returns:
(dict) reward dict
"""
rewards = {}
for winner_id, winner_char in winners.items():
exp = 0
loots = []
for loser in losers:
loser_char = self.characters[loser]["char"]
exp += loser_char.provide_exp(self)
obj_list = loser_char.loot_handler.get_obj_list(winner_char)
if obj_list:
loots.extend(obj_list)
obj_list = []
if loots:
common_models = ELEMENT("COMMON_OBJECT").get_models()
for obj_info in loots:
try:
table_data = WorldData.get_tables_data(common_models, key=obj_info["object_key"])
table_data = table_data[0]
obj_list.append({
"object_key": obj_info["object_key"],
"level": obj_info["level"],
"number": obj_info["number"],
"name": table_data.name,
"icon": table_data.icon,
"reject": "",
})
except Exception as e:
logger.log_errmsg("Can not loot object %s: %s." % (obj_info["object_key"], e))
pass
rewards[winner_id] = {
"exp": exp,
"loots": obj_list,
}
return rewards
def get_combat_rewards(self, char_id):
"""
Get a character's combat rewards.
"""
return self.rewards.get(char_id, None)
def notify_combat_results(self, winners, losers):
"""
Called when the character wins the combat.
Args:
winners: (List) all combat winners.
losers: (List) all combat losers.
Returns:
None
"""
for char_id, char in winners.items():
char.combat_result(self.combat_type, defines.COMBAT_WIN, losers.values(), self.get_combat_rewards(char_id))
for char_id, char in losers.items():
char.combat_result(self.combat_type, defines.COMBAT_LOSE, winners.values())
def get_appearance(self):
"""
Get the combat appearance.
"""
appearance = {"desc": self.desc,
"timeout": self.timeout,
"characters": []}
for char in self.characters.values():
character = char["char"]
info = character.get_appearance(self)
info["team"] = char["team"]
appearance["characters"].append(info)
return appearance
def get_combat_characters(self):
"""
Get all characters in combat.
"""
return self.characters.values()
def get_opponents(self, character_id):
"""
Get a character' opponents.
:param character_id:
:return:
"""
if character_id not in self.characters:
return []
team = self.characters[character_id]["team"]
# teammates = [c for c in characters if c.get_team() == team]
opponents = [c["char"] for c in self.characters.values() if c["status"] == CStatus.ACTIVE and c["team"] != team]
return opponents
def is_finished(self):
"""
:return: combat finished or not.
"""
return self.finished
def get_combat_type(self):
"""
Get the combat's type.
"""
return self.combat_type
def get_combat_result(self, char_id):
"""
Get a character's combat result.
:param char_id: character's db id
:return:
"""
if not self.finished:
return
if char_id not in self.characters:
return
if self.characters[char_id]:
status = self.characters[char_id]["status"]
if status == CStatus.ESCAPED:
return defines.COMBAT_ESCAPED, None, None
elif status == CStatus.FINISHED or status == CStatus.LEFT:
if char_id in self.winners:
return defines.COMBAT_WIN, self.losers.values(), self.get_combat_rewards(char_id)
elif char_id in self.losers:
return defines.COMBAT_LOSE, self.winners.values(), None
else:
return defines.COMBAT_DRAW, None, None
else:
return defines.COMBAT_DRAW, None, None
|
<reponame>akashkj/commcare-hq
import json
from decimal import Decimal
from casexml.apps.stock.utils import months_of_stock_remaining, stock_category
from corehq.apps.consumption.const import DAYS_IN_MONTH
from corehq.apps.domain.models import Domain
from dimagi.utils import parsing as dateparse
from datetime import datetime, timedelta
from casexml.apps.stock import const
from memoized import memoized
DEFAULT_CONSUMPTION_FUNCTION = lambda case_id, product_id: None
class ConsumptionHelper(object):
"""
Helper object for dealing with consumption at the individual domain/case/entry level
"""
def __init__(self, domain, case_id, section_id, entry_id, daily_consumption, balance, sql_location):
self.domain = domain
self.case_id = case_id
self.section_id = section_id
self.entry_id = entry_id
self.daily_consumption = daily_consumption
self.balance = balance
self.sql_location = sql_location
@property
def domain_obj(self):
return Domain.get_by_name(self.domain)
def get_default_monthly_consumption(self):
return get_default_monthly_consumption_for_case_and_entry(
self.domain_obj, self.case_id, self.entry_id
)
@memoized
def get_daily_consumption(self):
if self.daily_consumption is not None:
return self.daily_consumption
else:
monthly = self.get_default_monthly_consumption()
if monthly is not None:
return Decimal(monthly) / Decimal(DAYS_IN_MONTH)
def get_monthly_consumption(self):
if self.daily_consumption is not None:
return Decimal(self.daily_consumption) * Decimal(DAYS_IN_MONTH)
else:
return self.get_default_monthly_consumption()
def get_months_remaining(self):
return months_of_stock_remaining(
self.balance,
self.get_daily_consumption()
)
def get_resupply_quantity_needed(self):
monthly_consumption = self.get_monthly_consumption()
if monthly_consumption is not None and self.sql_location is not None:
overstock = self.sql_location.location_type.overstock_threshold
needed_quantity = int(
monthly_consumption * overstock
)
return int(max(needed_quantity - self.balance, 0))
else:
return None
def get_stock_category(self):
if not self.sql_location:
return 'nodata'
location_type = self.sql_location.location_type
return stock_category(
self.balance,
self.get_daily_consumption(),
location_type.understock_threshold,
location_type.overstock_threshold,
)
class ConsumptionConfiguration(object):
DEFAULT_MIN_PERIODS = 2
DEFAULT_MIN_WINDOW = 10
DEFAULT_MAX_WINDOW = 60
def __init__(self, min_periods=None, min_window=None, max_window=None,
default_monthly_consumption_function=None, exclude_invalid_periods=False):
def _default_if_none(value, default):
return value if value is not None else default
# the minimum number of consumption periods to include in a calculation
# periods are intervals between stock reports
self.min_periods = _default_if_none(min_periods, self.DEFAULT_MIN_PERIODS)
# the minimum total time of consumption data to include (in days)
# consumption should resort to static defaults if less than this
# amount of data is available
self.min_window = _default_if_none(min_window, self.DEFAULT_MIN_WINDOW)
# the maximum time to look backwards for consumption data (in days)
# data before this period will not be included in the calculation
self.max_window = _default_if_none(max_window, self.DEFAULT_MAX_WINDOW)
self.default_monthly_consumption_function = _default_if_none(default_monthly_consumption_function,
DEFAULT_CONSUMPTION_FUNCTION)
self.exclude_invalid_periods = exclude_invalid_periods
@classmethod
def test_config(cls):
return cls(0, 0, 60)
def __repr__(self):
return json.dumps({
'min_periods': self.min_periods,
'min_window': self.min_window,
'max_window': self.max_window,
'has_default_monthly_consumption_function': bool(self.default_monthly_consumption_function),
'exclude_invalid_periods': self.exclude_invalid_periods
}, indent=2)
def from_ts(dt):
# damn this is ugly
if isinstance(dt, datetime):
return dt.replace(tzinfo=None)
if len(dt) > 20 and dt.endswith('Z'):
# deal with invalid timestamps (where are these coming from?)
dt = dt[:-1]
return dateparse.string_to_datetime(dt).replace(tzinfo=None)
to_ts = dateparse.json_format_datetime
def span_days(start, end):
span = end - start
return span.days + span.seconds / 86400.
def compute_daily_consumption(
domain, case_id, product_id, window_end,
section_id=const.SECTION_TYPE_STOCK, configuration=None):
"""
Computes the consumption for a product at a supply point.
Can optionally pass a section_id, but by default the 'stock'
value is used for computation.
Returns None if there is insufficient history.
"""
from corehq.form_processor.interfaces.dbaccessors import LedgerAccessors
configuration = configuration or ConsumptionConfiguration()
window_start = window_end - timedelta(days=configuration.max_window)
transactions = LedgerAccessors(domain).get_transactions_for_consumption(
case_id,
product_id,
section_id,
window_start,
window_end
)
return compute_daily_consumption_from_transactions(transactions, window_start, configuration)
def get_default_monthly_consumption_for_case_and_entry(domain, case_id, entry_id):
if domain and domain.commtrack_settings:
config = domain.commtrack_settings.get_consumption_config()
else:
config = None
return compute_default_monthly_consumption(
case_id,
entry_id,
config
)
def compute_default_monthly_consumption(case_id, product_id, configuration):
return configuration.default_monthly_consumption_function(
case_id,
product_id,
)
def compute_daily_consumption_from_transactions(transactions, window_start, configuration=None):
configuration = configuration or ConsumptionConfiguration()
class ConsumptionPeriod(object):
def __init__(self, tx):
self.start = from_ts(tx.received_on)
self.start_soh = tx.normalized_value
self.end_soh = None
self.end = None
self.consumption = 0
self.receipts = 0
def add(self, tx):
self.consumption += tx.normalized_value
def receipt(self, receipt):
self.receipts += receipt
def close_out(self, tx):
self.end = from_ts(tx.received_on)
self.end_soh = tx.normalized_value
def is_valid(self):
return self.start_soh + Decimal(self.receipts) >= self.end_soh
@property
def length(self):
return span_days(self.start, self.end)
@property
def normalized_length(self):
return span_days(max(self.start, window_start), max(self.end, window_start))
@property
def normalized_consumption(self):
return float(self.consumption) * self.normalized_length / self.length
def split_periods(transactions):
period = None
for tx in transactions:
if tx.is_checkpoint:
if period:
period.close_out(tx)
if not configuration.exclude_invalid_periods or period.is_valid():
yield period
period = ConsumptionPeriod(tx)
elif tx.is_stockout:
if period:
# throw out current period
period = None
elif tx.type == const.TRANSACTION_TYPE_CONSUMPTION:
# TODO in the future it's possible we'll want to break this out by action_type, in order to track
# different kinds of consumption: normal vs losses, etc.
if period:
period.add(tx)
elif configuration.exclude_invalid_periods and tx.type == const.TRANSACTION_TYPE_RECEIPTS:
if period and period.start:
period.receipt(tx.normalized_value)
periods = list(split_periods(transactions))
# exclude periods that occur entirely before the averaging window
periods = [period for period in periods if period.normalized_length]
total_consumption = sum(period.normalized_consumption for period in periods)
total_length = sum(period.normalized_length for period in periods)
# check minimum statistical significance thresholds
if len(periods) < configuration.min_periods or total_length < configuration.min_window:
return None
return total_consumption / float(total_length) if total_length else None
|
<gh_stars>1-10
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from typing import List, Tuple, Set
import click
from packaging.requirements import Requirement
from packaging.version import Version
import pkg_resources
def _get_package_requirements(package_name: str) -> List[Requirement]:
"""
Get a list of all requirements and extras declared by this package.
The package must already be installed in the environment.
Args:
package_name (str): The name of the package.
Returns:
List[pkg_resources.Requirement]: A list of package requirements and extras.
"""
dist = pkg_resources.get_distribution(package_name)
extras = tuple(dist.extras)
requirements = [Requirement(str(r)) for r in dist.requires(extras=extras)]
return requirements
def _parse_requirements_file(requirements_file: str) -> List[Requirement]:
"""
Get a list of requirements found in a requirements file.
Args:
requirements_file (str): Path to a requirements file.
Returns:
List[Requirement]: A list of requirements.
"""
requirements = []
with Path(requirements_file).open() as f:
for line in f:
line = line.strip()
if line and not line.startswith("#"):
requirements.append(Requirement(line))
return requirements
def _get_pinned_versions(
ctx: click.Context, requirements: List[Requirement]
) -> Set[Tuple[str, Version]]:
"""Turn a list of requirements into a set of (package name, Version) tuples.
The requirements are all expected to pin explicitly to one version.
Other formats will result in an error.
{("requests", Version("1.25.0"), ("google-auth", Version("1.0.0")}
Args:
ctx (click.Context): The current click context.
requirements (List[Requirement]): A list of requirements.
Returns:
Set[Tuple[str, Version]]: Tuples of the package name and Version.
"""
constraints = set()
invalid_requirements = []
for constraint in requirements:
spec_set = list(constraint.specifier)
if len(spec_set) != 1:
invalid_requirements.append(constraint.name)
else:
if spec_set[0].operator != "==":
invalid_requirements.append(constraint.name)
else:
constraints.add((constraint.name, Version(spec_set[0].version)))
if invalid_requirements:
ctx.fail(
f"These requirements are not pinned to one version: {invalid_requirements}"
)
return constraints
class IndeterminableLowerBound(Exception):
pass
def _lower_bound(requirement: Requirement) -> str:
"""
Given a requirement, determine the lowest version that fulfills the requirement.
The lower bound can be determined for a requirement only if it is one of these
formats:
foo==1.2.0
foo>=1.2.0
foo>=1.2.0, <2.0.0dev
foo<2.0.0dev, >=1.2.0
Args:
requirement (Requirement): A requirement to parse
Returns:
str: The lower bound for the requirement.
"""
spec_set = list(requirement.specifier)
# sort by operator: <, then >=
spec_set.sort(key=lambda x: x.operator)
if len(spec_set) == 1:
# foo==1.2.0
if spec_set[0].operator == "==":
return spec_set[0].version
# foo>=1.2.0
elif spec_set[0].operator == ">=":
return spec_set[0].version
# foo<2.0.0, >=1.2.0 or foo>=1.2.0, <2.0.0
elif len(spec_set) == 2:
if spec_set[0].operator == "<" and spec_set[1].operator == ">=":
return spec_set[1].version
raise IndeterminableLowerBound(
f"Lower bound could not be determined for {requirement.name}"
)
def _get_package_lower_bounds(
ctx: click.Context, requirements: List[Requirement]
) -> Set[Tuple[str, Version]]:
"""Get a set of tuples ('package_name', Version('1.0.0')) from a
list of Requirements.
Args:
ctx (click.Context): The current click context.
requirements (List[Requirement]): A list of requirements.
Returns:
Set[Tuple[str, Version]]: A set of (package_name, lower_bound)
tuples.
"""
bad_package_lower_bounds = []
package_lower_bounds = set()
for req in requirements:
try:
version = _lower_bound(req)
package_lower_bounds.add((req.name, Version(version)))
except IndeterminableLowerBound:
bad_package_lower_bounds.append(req.name)
if bad_package_lower_bounds:
ctx.fail(
f"setup.py is missing explicit lower bounds for the following packages: {str(bad_package_lower_bounds)}"
)
else:
return package_lower_bounds
@click.group()
def main():
pass
@main.command()
@click.option("--package-name", required=True, help="Name of the package.")
@click.option("--constraints-file", required=True, help="Path to constraints file.")
@click.pass_context
def update(ctx: click.Context, package_name: str, constraints_file: str) -> None:
"""Create a constraints file with lower bounds for package-name.
If the constraints file already exists the contents will be overwritten.
"""
requirements = _get_package_requirements(package_name)
requirements.sort(key=lambda x: x.name)
package_lower_bounds = list(_get_package_lower_bounds(ctx, requirements))
package_lower_bounds.sort(key=lambda x: x[0])
constraints = [f"{name}=={version}" for name, version in package_lower_bounds]
Path(constraints_file).write_text("\n".join(constraints))
@main.command()
@click.option("--package-name", required=True, help="Name of the package.")
@click.option("--constraints-file", required=True, help="Path to constraints file.")
@click.pass_context
def check(ctx: click.Context, package_name: str, constraints_file: str):
"""Check that the constraints-file pins to the lower bound specified in package-name's
setup.py for each requirement.
Requirements:
1. The setup.py pins every requirement in one of the following formats:
* foo==1.2.0
* foo>=1.2.0
* foo>=1.2.0, <2.0.0dev
* foo<2.0.0dev, >=1.2.0
2. The constraints file pins every requirement to a single version:
* foo==1.2.0
3. package-name is already installed in the environment.
"""
package_requirements = _get_package_requirements(package_name)
constraints = _parse_requirements_file(constraints_file)
package_lower_bounds = _get_package_lower_bounds(ctx, package_requirements)
constraints_file_versions = _get_pinned_versions(ctx, constraints)
# Look for dependencies in setup.py that are missing from constraints.txt
package_names = {x[0] for x in package_lower_bounds}
constraint_names = {x[0] for x in constraints_file_versions}
missing_from_constraints = package_names - constraint_names
if missing_from_constraints:
ctx.fail(
(
f"The following packages are declared as a requirement or extra"
f"in setup.py but were not found in {constraints_file}: {str(missing_from_constraints)}"
)
)
# We use .issuperset() instead of == because there may be additional entries
# in constraints.txt (e.g., test only requirements)
if not constraints_file_versions.issuperset(package_lower_bounds):
first_line = f"The following packages have different versions {package_name}'s setup.py and {constraints_file}"
error_msg = [first_line, "-" * (7 + len(first_line))]
difference = package_lower_bounds - constraints_file_versions
constraints_dict = dict(constraints_file_versions)
for req, setup_py_version in difference:
error_msg.append(
f"'{req}' lower bound is {setup_py_version} in setup.py but constraints file has {constraints_dict[req]}"
)
ctx.fail("\n".join(error_msg))
click.secho("All good!", fg="green")
if __name__ == "__main__":
main()
|
<reponame>vatsalag99/mapping_self-harm_risk_twitter
# coding: utf-8
# In[1]:
import warnings
warnings.filterwarnings("ignore")
import ftfy
import matplotlib.pyplot as plt
import nltk
import numpy as np
import pandas as pd
import re
import time
from math import exp
from numpy import sign
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from gensim.models import KeyedVectors
from nltk.corpus import stopwords
from nltk import PorterStemmer
from keras.models import Model, Sequential
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Conv1D, Dense, Input, LSTM, Embedding, Dropout, Activation, MaxPooling1D
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
# In[69]:
EMBEDDING_FILE = 'GoogleNews-vectors-negative300.bin.gz'
print("Loading embedding file..")
word2vec = KeyedVectors.load_word2vec_format(EMBEDDING_FILE, binary=True)
while(True):
df = pd.read_csv("twitter2.csv")
df = df[['tweet', 'location']]
df
# In[70]:
df = df.dropna()
df
# In[118]:
import re
# Expand Contraction
cList = {
"ain't": "am not",
"aren't": "are not",
"can't": "cannot",
"can't've": "cannot have",
"'cause": "because",
"could've": "could have",
"couldn't": "could not",
"couldn't've": "could not have",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"hadn't": "had not",
"hadn't've": "had not have",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'd've": "he would have",
"he'll": "he will",
"he'll've": "he will have",
"he's": "he is",
"how'd": "how did",
"how'd'y": "how do you",
"how'll": "how will",
"how's": "how is",
"I'd": "I would",
"I'd've": "I would have",
"I'll": "I will",
"I'll've": "I will have",
"I'm": "I am",
"I've": "I have",
"isn't": "is not",
"it'd": "it had",
"it'd've": "it would have",
"it'll": "it will",
"it'll've": "it will have",
"it's": "it is",
"let's": "let us",
"ma'am": "madam",
"mayn't": "may not",
"might've": "might have",
"mightn't": "might not",
"mightn't've": "might not have",
"must've": "must have",
"mustn't": "must not",
"mustn't've": "must not have",
"needn't": "need not",
"needn't've": "need not have",
"o'clock": "of the clock",
"oughtn't": "ought not",
"oughtn't've": "ought not have",
"shan't": "shall not",
"sha'n't": "shall not",
"shan't've": "shall not have",
"she'd": "she would",
"she'd've": "she would have",
"she'll": "she will",
"she'll've": "she will have",
"she's": "she is",
"should've": "should have",
"shouldn't": "should not",
"shouldn't've": "should not have",
"so've": "so have",
"so's": "so is",
"that'd": "that would",
"that'd've": "that would have",
"that's": "that is",
"there'd": "there had",
"there'd've": "there would have",
"there's": "there is",
"they'd": "they would",
"they'd've": "they would have",
"they'll": "they will",
"they'll've": "they will have",
"they're": "they are",
"they've": "they have",
"to've": "to have",
"wasn't": "was not",
"we'd": "we had",
"we'd've": "we would have",
"we'll": "we will",
"we'll've": "we will have",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'll": "what will",
"what'll've": "what will have",
"what're": "what are",
"what's": "what is",
"what've": "what have",
"when's": "when is",
"when've": "when have",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'll": "who will",
"who'll've": "who will have",
"who's": "who is",
"who've": "who have",
"why's": "why is",
"why've": "why have",
"will've": "will have",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"y'alls": "you alls",
"y'all'd": "you all would",
"y'all'd've": "you all would have",
"y'all're": "you all are",
"y'all've": "you all have",
"you'd": "you had",
"you'd've": "you would have",
"you'll": "you you will",
"you'll've": "you you will have",
"you're": "you are",
"you've": "you have"
}
c_re = re.compile('(%s)' % '|'.join(cList.keys()))
def expandContractions(text, c_re=c_re):
def replace(match):
return cList[match.group(0)]
return c_re.sub(replace, text)
def clean_tweets(tweets, df):
cleaned_tweets = []
i = 0
for tweet in tweets:
new_tweet = str(tweet)
# if url links then dont append to avoid news articles
# also check tweet length, save those > 10 (length of word "depression")
if re.match("(\w+:\/\/\S+)", tweet) == None and len(tweet) > 10:
#remove hashtag, @mention, emoji and image URLs
new_tweet = ' '.join(re.sub("(@[A-Za-z0-9]+)|(\#[A-Za-z0-9]+)|(<Emoji:.*>)|(pic\.twitter\.com\/.*)", " ", tweet).split())
#fix weirdly encoded texts
new_tweet = ftfy.fix_text(new_tweet)
#expand contraction
new_tweet = expandContractions(new_tweet)
#remove punctuation
new_tweet = ' '.join(re.sub("([^0-9A-Za-z \t])", " ", new_tweet).split())
#stop words
stop_words = set(stopwords.words('english'))
word_tokens = nltk.word_tokenize(new_tweet)
filtered_sentence = [w for w in word_tokens if not w in stop_words]
new_tweet = ' '.join(filtered_sentence)
#stemming words
new_tweet = PorterStemmer().stem(new_tweet)
cleaned_tweets.append(new_tweet)
i += 1
else:
df = df.drop(df.index[i])
return cleaned_tweets, df
# In[120]:
import nltk
nltk.download('stopwords')
nltk.download('punkt')
df_arr = [x for x in df["tweet"]]
X, df = clean_tweets(df_arr, df)
print(len(X))
df
# In[73]:
MAX_NB_WORDS = 20000
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(X)
# In[74]:
sequence = tokenizer.texts_to_sequences(X)
# In[75]:
word_index = tokenizer.word_index
print('Found %s unique tokens' % len(word_index))
# In[76]:
MAX_SEQUENCE_LENGTH = 140
data = pad_sequences(sequence, maxlen=MAX_SEQUENCE_LENGTH)
print('Shape of data_d tensor:', data.shape)
# In[77]:
nb_words = min(MAX_NB_WORDS, len(word_index))
EMBEDDING_DIM = 300
embedding_matrix = np.zeros((nb_words, EMBEDDING_DIM))
for (word, idx) in word_index.items():
print(word, idx)
if word in word2vec.vocab and idx < MAX_NB_WORDS:
embedding_matrix[idx] = word2vec.word_vec(word)
# In[78]:
from keras.models import model_from_json
# Model reconstruction from JSON file
with open('model_architecture.json', 'r') as f:
model = model_from_json(f.read())
# Load weights into the new model
model.load_weights('model_weights.h5')
# In[79]:
labels_pred = model.predict(data)
labels_pred = np.round(labels_pred.flatten())
# In[80]:
print(X)
labels_pred
# In[81]:
i = 0
locations = []
for x in np.nditer(labels_pred):
if(x == 1):
locations.append(df.iloc[i]["location"])
i+=1
locations
print(len(locations))
# In[82]:
top = 49.3457868 # north lat
left = -124.7844079 # west long
right = -66.9513812 # east long
bottom = 24.7433195 # south lat
def cull(lat, lng):
if bottom <= lat <= top and left <= lng <= right:
return True
return False
# In[83]:
from geopy.geocoders import Nominatim, ArcGIS
from geopy.extra.rate_limiter import RateLimiter
import pycountry
df_coord = pd.DataFrame(columns=('Latitude', 'Longitude'))
geolocator = ArcGIS(timeout=10)
i = 0
for location in locations:
if location:
loc = geolocator.geocode(location, exactly_one=True)
if loc:
print(loc.address)
if(cull(loc.latitude, loc.longitude)):
df_coord.loc[i] = (loc.latitude, loc.longitude)
print(loc.latitude, loc.longitude)
i+=1
# In[84]:
df_coord
# In[57]:
i = 0
demo_locations = []
for x in np.nditer(labels_pred):
demo_locations.append(df.iloc[i]["location"])
i+=1
demo_locations
print(len(demo_locations))
with open('coords.csv', 'a', encoding='utf-8') as f:
df_coord.to_csv(f, header=False, encoding='utf-8')
|
import pytest
import opentracing
from mock import MagicMock
from opentracing.ext import tags as opentracing_tags
from basictracer import BasicTracer
from .conftest import Recorder
from opentracing_utils import trace, extract_span_from_kwargs
def is_span_in_kwargs(**kwargs):
for _, v in kwargs.items():
if isinstance(v, opentracing.Span):
return True
return False
def test_trace_single():
@trace()
def f1():
pass
recorder = Recorder()
opentracing.tracer = BasicTracer(recorder=recorder)
test_span = opentracing.tracer.start_span(operation_name='test_trace')
with test_span:
f1()
f1()
f1()
assert len(recorder.spans) == 4
for span in recorder.spans[:3]:
assert span.context.trace_id == test_span.context.trace_id
assert span.parent_id == test_span.context.span_id
def test_trace_follows_from():
@trace(use_follows_from=True)
def f1():
pass
recorder = Recorder()
opentracing.tracer = BasicTracer(recorder=recorder)
test_span = opentracing.tracer.start_span(operation_name='test_trace')
with test_span:
f1()
assert len(recorder.spans) == 2
assert recorder.spans[0].context.trace_id == test_span.context.trace_id
assert recorder.spans[0].parent_id == test_span.context.span_id
def test_trace_method():
class C:
@trace()
def func(self):
pass
recorder = Recorder()
opentracing.tracer = BasicTracer(recorder=recorder)
test_span = opentracing.tracer.start_span(operation_name='test_trace')
with test_span:
C().func()
assert len(recorder.spans) == 2
assert recorder.spans[0].context.trace_id == test_span.context.trace_id
assert recorder.spans[0].parent_id == recorder.spans[1].context.span_id
def test_trace_generator():
@trace()
def f1():
list(l2_gen())
@trace()
def f2():
pass
@trace(pass_span=True)
def l2_gen(**kwargs):
s = extract_span_from_kwargs(**kwargs) # noqa
f2(span=s)
for i in range(10):
yield i
recorder = Recorder()
opentracing.tracer = BasicTracer(recorder=recorder)
test_span = opentracing.tracer.start_span(operation_name='test_trace')
with test_span:
f1()
assert len(recorder.spans) == 4
assert recorder.spans[0].context.trace_id == test_span.context.trace_id
assert recorder.spans[0].parent_id == recorder.spans[2].context.span_id
# Inside generator takes generator as parent!
assert recorder.spans[1].context.trace_id == test_span.context.trace_id
assert recorder.spans[1].parent_id == recorder.spans[0].context.span_id
assert recorder.spans[2].context.trace_id == test_span.context.trace_id
assert recorder.spans[2].parent_id == recorder.spans[3].context.span_id
@pytest.mark.parametrize('pass_span', (False, True))
def test_trace_nested(pass_span):
@trace(pass_span=pass_span)
def parent(**kwargs):
assert is_span_in_kwargs(**kwargs) is pass_span
if pass_span:
current_span = extract_span_from_kwargs(**kwargs)
assert current_span.operation_name == 'parent'
nested()
@trace(pass_span=pass_span)
def nested(**kwargs):
assert is_span_in_kwargs(**kwargs) is pass_span
if pass_span:
current_span = extract_span_from_kwargs(**kwargs)
assert current_span.operation_name == 'nested'
recorder = Recorder()
opentracing.tracer = BasicTracer(recorder=recorder)
test_span = opentracing.tracer.start_span(operation_name='test_trace')
with test_span:
parent()
assert len(recorder.spans) == 3
assert recorder.spans[0].context.trace_id == test_span.context.trace_id
assert recorder.spans[0].parent_id == recorder.spans[1].context.span_id
assert recorder.spans[1].context.trace_id == test_span.context.trace_id
assert recorder.spans[1].parent_id == test_span.context.span_id
assert recorder.spans[-1].parent_id is None
def test_trace_nested_with_args():
@trace()
def parent(arg1, arg2):
nested(arg1)
@trace()
def nested(arg1, **kwargs):
assert is_span_in_kwargs(**kwargs) is False
@trace(pass_span=True)
def expect_span(**kwargs):
assert is_span_in_kwargs(**kwargs) is True
recorder = Recorder()
opentracing.tracer = BasicTracer(recorder=recorder)
test_span = opentracing.tracer.start_span(operation_name='test_trace')
with test_span:
parent(1, 2)
expect_span()
assert len(recorder.spans) == 4
assert recorder.spans[0].context.trace_id == test_span.context.trace_id
assert recorder.spans[0].parent_id == recorder.spans[1].context.span_id
assert recorder.spans[1].context.trace_id == test_span.context.trace_id
assert recorder.spans[1].parent_id == test_span.context.span_id
assert recorder.spans[-1].parent_id is None
def test_trace_mutliple_spans():
@trace()
def parent():
nested()
@trace()
def nested(**kwargs):
assert is_span_in_kwargs(**kwargs) is False
recorder = Recorder()
opentracing.tracer = BasicTracer(recorder=recorder)
test_span_first = opentracing.tracer.start_span(operation_name='test_trace_first')
with test_span_first:
parent()
assert len(recorder.spans) == 3
assert recorder.spans[0].context.trace_id == test_span_first.context.trace_id
assert recorder.spans[0].parent_id == recorder.spans[1].context.span_id
assert recorder.spans[1].context.trace_id == test_span_first.context.trace_id
assert recorder.spans[1].parent_id == test_span_first.context.span_id
assert recorder.spans[-1].parent_id is None
assert recorder.spans[-1].operation_name == 'test_trace_first'
# reset recorder
recorder.reset()
test_span_second = opentracing.tracer.start_span(operation_name='test_trace_second')
with test_span_second:
nested(span=test_span_second)
assert len(recorder.spans) == 2
assert recorder.spans[0].context.trace_id == test_span_second.context.trace_id
assert recorder.spans[0].parent_id == recorder.spans[1].context.span_id
assert recorder.spans[-1].parent_id is None
assert recorder.spans[-1].operation_name == 'test_trace_second'
def test_trace_nested_broken_traces():
@trace()
def f1():
pass
@trace()
def f2():
pass
recorder = Recorder()
opentracing.tracer = BasicTracer(recorder=recorder)
test_span = opentracing.tracer.start_span(operation_name='test_trace')
with test_span:
f1()
broken_span = opentracing.tracer.start_span(operation_name='broken_trace')
with broken_span:
f1(span=broken_span)
# Broken traces does not work with stack inspection, it is better to pass the span in this case!
f2(span=test_span)
assert len(recorder.spans) == 5
assert recorder.spans[0].context.trace_id == test_span.context.trace_id
assert recorder.spans[0].parent_id == recorder.spans[-1].context.span_id
assert recorder.spans[1].context.trace_id == broken_span.context.trace_id
assert recorder.spans[1].parent_id == recorder.spans[2].context.span_id
assert recorder.spans[3].context.trace_id == test_span.context.trace_id
assert recorder.spans[3].parent_id == recorder.spans[-1].context.span_id
assert recorder.spans[2].parent_id is None
assert recorder.spans[2].operation_name == 'broken_trace'
assert recorder.spans[-1].parent_id is None
assert recorder.spans[-1].operation_name == 'test_trace'
def test_trace_single_with_tracer_args():
tags = {'t1': 'v1'}
operation_name = 'op_name'
component = 'component'
@trace(tags=tags, operation_name=operation_name, component=component)
def f1():
pass
recorder = Recorder()
opentracing.tracer = BasicTracer(recorder=recorder)
test_span = opentracing.tracer.start_span(operation_name='test_trace')
with test_span:
f1()
tags.update({opentracing_tags.COMPONENT: component})
assert recorder.spans[0].tags == tags
@pytest.mark.parametrize('return_span', (True, False))
def test_trace_single_with_extractor(return_span):
recorder = Recorder()
opentracing.tracer = BasicTracer(recorder=recorder)
test_span = opentracing.tracer.start_span(operation_name='test_trace')
other_span = opentracing.tracer.start_span(operation_name='other_span')
extractor = MagicMock()
extractor.return_value = test_span if return_span else None
@trace(span_extractor=extractor)
def f1():
pass
with other_span:
# other_span could be ignored if extractor returned a span!
f1(span=other_span)
if return_span:
assert recorder.spans[0].context.trace_id == test_span.context.trace_id
assert recorder.spans[0].parent_id == test_span.context.span_id
else:
assert recorder.spans[0].context.trace_id == other_span.context.trace_id
assert recorder.spans[0].parent_id == other_span.context.span_id
def test_trace_single_with_ignore_parent():
@trace(ignore_parent_span=True)
def f1():
pass
recorder = Recorder()
opentracing.tracer = BasicTracer(recorder=recorder)
test_span = opentracing.tracer.start_span(operation_name='test_trace')
with test_span:
# test_span will be ignored!
f1()
assert recorder.spans[0].context.trace_id != test_span.context.trace_id
assert recorder.spans[0].parent_id is None
def test_trace_separate_functions():
@trace()
def f1():
pass
recorder = Recorder()
opentracing.tracer = BasicTracer(recorder=recorder)
dummy_span = opentracing.tracer.start_span(operation_name='dummy_trace')
dummy_span.finish()
def actual():
test_span = opentracing.tracer.start_span(operation_name='test_trace')
with test_span:
f1()
assert len(recorder.spans) == 3
assert recorder.spans[1].context.trace_id == test_span.context.trace_id
assert recorder.spans[1].parent_id == test_span.context.span_id
actual()
def test_trace_loop():
@trace()
def f1():
pass
def f0():
f1()
recorder = Recorder()
opentracing.tracer = BasicTracer(recorder=recorder)
for i in range(3):
test_span = opentracing.tracer.start_span(operation_name='test_trace')
test_span.set_tag('loop', i)
with test_span:
f0()
assert len(recorder.spans) == 6
root_spans = recorder.spans[1::2]
for idx, span in enumerate(recorder.spans[::2]):
parent_span = root_spans[idx]
assert parent_span.tags == {'loop': idx}
assert span.context.trace_id == parent_span.context.trace_id
assert span.parent_id == parent_span.context.span_id
def test_trace_skip_span():
def skip_span(skip_me, *args, **kwargs):
return skip_me
@trace(skip_span=skip_span)
def f1(skip_me):
pass
recorder = Recorder()
opentracing.tracer = BasicTracer(recorder=recorder)
test_span = opentracing.tracer.start_span(operation_name='test_trace')
with test_span:
f1(False)
f1(True)
assert len(recorder.spans) == 2
assert recorder.spans[0].context.trace_id == test_span.context.trace_id
assert recorder.spans[0].parent_id == test_span.context.span_id
|
<filename>dragon/python/vm/onnx/core/backend/native.py<gh_stars>10-100
# ------------------------------------------------------------
# Copyright (c) 2017-present, SeetaTech, Co.,Ltd.
#
# Licensed under the BSD 2-Clause License.
# You should have received a copy of the BSD 2-Clause License
# along with the software. If not, See,
#
# <https://opensource.org/licenses/BSD-2-Clause>
#
# ------------------------------------------------------------
"""Native ONNX backend."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import numpy
try:
import onnx
from onnx.backend.base import Backend
from onnx.backend.base import BackendRep as ONNXBackendRep
from onnx.backend.base import Device
from onnx.backend.base import DeviceType
from onnx.backend.base import namedtupledict
except ImportError:
from dragon.core.util import deprecation
onnx = deprecation.NotInstalled('onnx')
Backend = object
ONNXBackendRep = object
Device = deprecation.NotInstalled('onnx')
DeviceType = deprecation.NotInstalled('onnx')
namedtupledict = collections.namedtuple
from dragon.core.autograph.graph_lib import GraphLib
from dragon.core.framework import context
from dragon.core.framework import workspace
from dragon.core.framework.tensor import Tensor
from dragon.core.util import nest
class BackendRep(ONNXBackendRep):
"""ONNX-Dragon backend to execute repeatedly."""
def __init__(self, model, device, **kwargs):
"""Create a ``BackendRep``.
Parameters
----------
model : str
The path of onnx model file.
device : onnx.Device
The executing device.
"""
if not isinstance(device, Device):
device = Device(device)
execute_ws = workspace.get_workspace()
if device.type == DeviceType.CPU:
device_type, device_index = 'cpu', 0
elif device.type == DeviceType.CUDA:
device_type, device_index = 'cuda', device.device_id
else:
raise ValueError('Unsupported device type: ' + device.type)
with context.device(device_type, device_index):
self._context = GraphLib.from_onnx(model)
self._input_dict = collections.OrderedDict()
self._output_dict = collections.OrderedDict()
for input in self._context._def.input:
impl = execute_ws.get_tensor(input)
self._input_dict[input] = Tensor(impl=impl)
for output in self._context._def.output:
impl = execute_ws.get_tensor(output)
self._output_dict[output] = Tensor(impl=impl)
self._output_tuple = namedtupledict('Outputs', self._context._def.output)
def run(self, inputs, **kwargs):
"""Run the model.
Parameters
----------
inputs : Union[Sequence, Dict]
The input arrays.
Returns
-------
namedtuple
The model outputs.
"""
if isinstance(inputs, numpy.ndarray):
inputs = [inputs]
if isinstance(inputs, dict):
for name, value in inputs.items():
self._input_dict[name]._impl.FromNumpy(value)
elif nest.is_sequence(inputs):
for ref, value in zip(self._input_dict.values(), inputs):
ref._impl.FromNumpy(value)
else:
raise ValueError('Excepted sequence or dict inputs.')
self._context.run()
return self._output_tuple(*self._output_dict.values())
class DragonBackend(Backend):
"""ONNX-Dragon backend."""
@classmethod
def prepare(cls, model, device='CPU:0', **kwargs):
"""Create a backend to execute repeatedly.
Parameters
----------
model : str
The path of onnx model file.
device : str, optional, default='CPU:0'
The executing device.
Returns
-------
dragon.onnx.BackendRep
The backend.
"""
if not os.path.exists(model):
raise ValueError('Model({}) is not existed.'.format(model))
return BackendRep(model, device, **kwargs)
@classmethod
def run_model(cls, model, inputs, device='CUDA:0', **kwargs):
"""Execute an onnx model once.
Parameters
----------
model : str
The path of onnx model file.
inputs : Union[Sequence, Dict]
The input arrays.
device : str, optional
The executing device.
Returns
-------
namedtuple
The model outputs.
"""
return cls.prepare(model, device, **kwargs).run(inputs)
@classmethod
def supports_device(cls, device_str):
"""Query if the given device is supported.
Parameters
----------
device_str : str
The device descriptor.
Returns
-------
bool
``True`` if device is supported otherwise ``False``.
"""
device = Device(device_str)
if device.type in (DeviceType.CPU, DeviceType.CUDA):
return True
return False
prepare = DragonBackend.prepare
run_model = DragonBackend.run_model
supports_device = DragonBackend.supports_device
|
import os
def prepare_arg(argument):
"""Prepend dashes to conform with cli standards on arguments if necessary"""
keyword, arg = argument.split("=")
if len(keyword) == 1:
keyword = "-" + keyword
elif len(keyword) == 2 and keyword[0] == "-":
pass
elif keyword[:2] != "--":
keyword = "--" + keyword
return [keyword, arg]
class ExperimentSetup:
"""Configuration Class that holds the inputs for an experiment run"""
def __init__(self, *, name: str, configuration_path: str, script_path: str, args: list = None, tags: list = None,
reference_configuration_path: str = None):
"""
Args:
configuration_path (str): The path to the configuration file/dir of the experiment
script_path (str): The path to the script that will run the experiment
args (list): Optional, list of strings with extra args not included in the configuration_path to
be passed to the script. Expected form is ["arg1=x", "arg2=y", "arg3=z"]
tags (list): Optional, list of strings with tags that describe the experiment
reference_configuration_path (str): Optional a path for a reference configuration. If it is given
the reference_configuration_path defines the experiment and the configuration_path only requires
the updated variables
"""
assert os.path.exists(configuration_path), "conf path: {} does not exist".format(configuration_path)
assert os.path.exists(script_path), "script path: {} does not exist".format(script_path)
self._conf_path = configuration_path
self._script_path = script_path
self._name = name
self._args = []
self._args = [] if args is None else [prepare_arg(argument) for argument in args]
self._tags = [] if tags is None else tags[:]
ref_conf_path = reference_configuration_path
assert ref_conf_path is None or os.path.exists(ref_conf_path), "ref conf path: {} does not exist".format(
ref_conf_path)
self._ref_conf_path = ref_conf_path
@property
def name(self):
return self._name
@property
def configuration_path(self):
return self._conf_path
@property
def script_path(self):
return self._script_path
@property
def tags(self):
return self._tags
@property
def reference_configuration_path(self):
return self._ref_conf_path
@property
def args(self):
return self._args
class MultiStageExperimentSetup(ExperimentSetup):
"""class"""
def __init__(self, *, name, configuration_path, script_path, output_path, stage_name, input_path=None, args=None,
tags=None,
reference_configuration_path=None):
"""
Args:
name (str): The name of the Multistage Experiment
configuration_path (str): The path to the configuration file/dir of the experiment
script_path (str): The path to the script that will run the experiment
args (list): Optional, list of strings with extra args not included in the configuration_path to
be passed to the script. Expected form is ["arg1=x", "arg2=y", "arg3=z"]
tags (list): Optional, list of strings with tags that describe the experiment
reference_configuration_path (str): Optional a path for a reference configuration. If it is given
the reference_configuration_path defines the experiment and the configuration_path only requires
the updated variables
"""
super(MultiStageExperimentSetup, self).__init__(
name=name,
configuration_path=configuration_path,
script_path=script_path,
args=args,
tags=tags,
reference_configuration_path=reference_configuration_path
)
assert input_path is None or os.path.exists(input_path), "input_path doesn't exist: {}".format(input_path)
self._input_path = input_path
self._output_path = output_path
self._stage_name = stage_name
@property
def stage_name(self):
return self._stage_name
@property
def input_path(self):
return self._input_path
@property
def output_path(self):
return self._output_path
|
<gh_stars>100-1000
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'SecretIamBindingCondition',
'SecretIamMemberCondition',
'SecretReplication',
'SecretReplicationUserManaged',
'SecretReplicationUserManagedReplica',
'SecretReplicationUserManagedReplicaCustomerManagedEncryption',
'SecretRotation',
'SecretTopic',
'GetSecretReplicationResult',
'GetSecretReplicationUserManagedResult',
'GetSecretReplicationUserManagedReplicaResult',
'GetSecretReplicationUserManagedReplicaCustomerManagedEncryptionResult',
'GetSecretRotationResult',
'GetSecretTopicResult',
]
@pulumi.output_type
class SecretIamBindingCondition(dict):
def __init__(__self__, *,
expression: str,
title: str,
description: Optional[str] = None):
pulumi.set(__self__, "expression", expression)
pulumi.set(__self__, "title", title)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter
def expression(self) -> str:
return pulumi.get(self, "expression")
@property
@pulumi.getter
def title(self) -> str:
return pulumi.get(self, "title")
@property
@pulumi.getter
def description(self) -> Optional[str]:
return pulumi.get(self, "description")
@pulumi.output_type
class SecretIamMemberCondition(dict):
def __init__(__self__, *,
expression: str,
title: str,
description: Optional[str] = None):
pulumi.set(__self__, "expression", expression)
pulumi.set(__self__, "title", title)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter
def expression(self) -> str:
return pulumi.get(self, "expression")
@property
@pulumi.getter
def title(self) -> str:
return pulumi.get(self, "title")
@property
@pulumi.getter
def description(self) -> Optional[str]:
return pulumi.get(self, "description")
@pulumi.output_type
class SecretReplication(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "userManaged":
suggest = "user_managed"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SecretReplication. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SecretReplication.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SecretReplication.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
automatic: Optional[bool] = None,
user_managed: Optional['outputs.SecretReplicationUserManaged'] = None):
"""
:param bool automatic: The Secret will automatically be replicated without any restrictions.
:param 'SecretReplicationUserManagedArgs' user_managed: The Secret will automatically be replicated without any restrictions.
Structure is documented below.
"""
if automatic is not None:
pulumi.set(__self__, "automatic", automatic)
if user_managed is not None:
pulumi.set(__self__, "user_managed", user_managed)
@property
@pulumi.getter
def automatic(self) -> Optional[bool]:
"""
The Secret will automatically be replicated without any restrictions.
"""
return pulumi.get(self, "automatic")
@property
@pulumi.getter(name="userManaged")
def user_managed(self) -> Optional['outputs.SecretReplicationUserManaged']:
"""
The Secret will automatically be replicated without any restrictions.
Structure is documented below.
"""
return pulumi.get(self, "user_managed")
@pulumi.output_type
class SecretReplicationUserManaged(dict):
def __init__(__self__, *,
replicas: Sequence['outputs.SecretReplicationUserManagedReplica']):
"""
:param Sequence['SecretReplicationUserManagedReplicaArgs'] replicas: The list of Replicas for this Secret. Cannot be empty.
Structure is documented below.
"""
pulumi.set(__self__, "replicas", replicas)
@property
@pulumi.getter
def replicas(self) -> Sequence['outputs.SecretReplicationUserManagedReplica']:
"""
The list of Replicas for this Secret. Cannot be empty.
Structure is documented below.
"""
return pulumi.get(self, "replicas")
@pulumi.output_type
class SecretReplicationUserManagedReplica(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "customerManagedEncryption":
suggest = "customer_managed_encryption"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SecretReplicationUserManagedReplica. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SecretReplicationUserManagedReplica.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SecretReplicationUserManagedReplica.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
location: str,
customer_managed_encryption: Optional['outputs.SecretReplicationUserManagedReplicaCustomerManagedEncryption'] = None):
"""
:param str location: The canonical IDs of the location to replicate data. For example: "us-east1".
:param 'SecretReplicationUserManagedReplicaCustomerManagedEncryptionArgs' customer_managed_encryption: Customer Managed Encryption for the secret.
Structure is documented below.
"""
pulumi.set(__self__, "location", location)
if customer_managed_encryption is not None:
pulumi.set(__self__, "customer_managed_encryption", customer_managed_encryption)
@property
@pulumi.getter
def location(self) -> str:
"""
The canonical IDs of the location to replicate data. For example: "us-east1".
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="customerManagedEncryption")
def customer_managed_encryption(self) -> Optional['outputs.SecretReplicationUserManagedReplicaCustomerManagedEncryption']:
"""
Customer Managed Encryption for the secret.
Structure is documented below.
"""
return pulumi.get(self, "customer_managed_encryption")
@pulumi.output_type
class SecretReplicationUserManagedReplicaCustomerManagedEncryption(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "kmsKeyName":
suggest = "kms_key_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SecretReplicationUserManagedReplicaCustomerManagedEncryption. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SecretReplicationUserManagedReplicaCustomerManagedEncryption.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SecretReplicationUserManagedReplicaCustomerManagedEncryption.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
kms_key_name: str):
"""
:param str kms_key_name: Describes the Cloud KMS encryption key that will be used to protect destination secret.
"""
pulumi.set(__self__, "kms_key_name", kms_key_name)
@property
@pulumi.getter(name="kmsKeyName")
def kms_key_name(self) -> str:
"""
Describes the Cloud KMS encryption key that will be used to protect destination secret.
"""
return pulumi.get(self, "kms_key_name")
@pulumi.output_type
class SecretRotation(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "nextRotationTime":
suggest = "next_rotation_time"
elif key == "rotationPeriod":
suggest = "rotation_period"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SecretRotation. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SecretRotation.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SecretRotation.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
next_rotation_time: Optional[str] = None,
rotation_period: Optional[str] = None):
"""
:param str next_rotation_time: Timestamp in UTC at which the Secret is scheduled to rotate.
A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
:param str rotation_period: The Duration between rotation notifications. Must be in seconds and at least 3600s (1h) and at most 3153600000s (100 years).
If rotationPeriod is set, `next_rotation_time` must be set. `next_rotation_time` will be advanced by this period when the service automatically sends rotation notifications.
"""
if next_rotation_time is not None:
pulumi.set(__self__, "next_rotation_time", next_rotation_time)
if rotation_period is not None:
pulumi.set(__self__, "rotation_period", rotation_period)
@property
@pulumi.getter(name="nextRotationTime")
def next_rotation_time(self) -> Optional[str]:
"""
Timestamp in UTC at which the Secret is scheduled to rotate.
A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
"""
return pulumi.get(self, "next_rotation_time")
@property
@pulumi.getter(name="rotationPeriod")
def rotation_period(self) -> Optional[str]:
"""
The Duration between rotation notifications. Must be in seconds and at least 3600s (1h) and at most 3153600000s (100 years).
If rotationPeriod is set, `next_rotation_time` must be set. `next_rotation_time` will be advanced by this period when the service automatically sends rotation notifications.
"""
return pulumi.get(self, "rotation_period")
@pulumi.output_type
class SecretTopic(dict):
def __init__(__self__, *,
name: str):
"""
:param str name: The resource name of the Pub/Sub topic that will be published to, in the following format: projects/*/topics/*.
For publication to succeed, the Secret Manager Service Agent service account must have pubsub.publisher permissions on the topic.
"""
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> str:
"""
The resource name of the Pub/Sub topic that will be published to, in the following format: projects/*/topics/*.
For publication to succeed, the Secret Manager Service Agent service account must have pubsub.publisher permissions on the topic.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetSecretReplicationResult(dict):
def __init__(__self__, *,
automatic: bool,
user_manageds: Sequence['outputs.GetSecretReplicationUserManagedResult']):
pulumi.set(__self__, "automatic", automatic)
pulumi.set(__self__, "user_manageds", user_manageds)
@property
@pulumi.getter
def automatic(self) -> bool:
return pulumi.get(self, "automatic")
@property
@pulumi.getter(name="userManageds")
def user_manageds(self) -> Sequence['outputs.GetSecretReplicationUserManagedResult']:
return pulumi.get(self, "user_manageds")
@pulumi.output_type
class GetSecretReplicationUserManagedResult(dict):
def __init__(__self__, *,
replicas: Sequence['outputs.GetSecretReplicationUserManagedReplicaResult']):
pulumi.set(__self__, "replicas", replicas)
@property
@pulumi.getter
def replicas(self) -> Sequence['outputs.GetSecretReplicationUserManagedReplicaResult']:
return pulumi.get(self, "replicas")
@pulumi.output_type
class GetSecretReplicationUserManagedReplicaResult(dict):
def __init__(__self__, *,
customer_managed_encryptions: Sequence['outputs.GetSecretReplicationUserManagedReplicaCustomerManagedEncryptionResult'],
location: str):
pulumi.set(__self__, "customer_managed_encryptions", customer_managed_encryptions)
pulumi.set(__self__, "location", location)
@property
@pulumi.getter(name="customerManagedEncryptions")
def customer_managed_encryptions(self) -> Sequence['outputs.GetSecretReplicationUserManagedReplicaCustomerManagedEncryptionResult']:
return pulumi.get(self, "customer_managed_encryptions")
@property
@pulumi.getter
def location(self) -> str:
return pulumi.get(self, "location")
@pulumi.output_type
class GetSecretReplicationUserManagedReplicaCustomerManagedEncryptionResult(dict):
def __init__(__self__, *,
kms_key_name: str):
pulumi.set(__self__, "kms_key_name", kms_key_name)
@property
@pulumi.getter(name="kmsKeyName")
def kms_key_name(self) -> str:
return pulumi.get(self, "kms_key_name")
@pulumi.output_type
class GetSecretRotationResult(dict):
def __init__(__self__, *,
next_rotation_time: str,
rotation_period: str):
pulumi.set(__self__, "next_rotation_time", next_rotation_time)
pulumi.set(__self__, "rotation_period", rotation_period)
@property
@pulumi.getter(name="nextRotationTime")
def next_rotation_time(self) -> str:
return pulumi.get(self, "next_rotation_time")
@property
@pulumi.getter(name="rotationPeriod")
def rotation_period(self) -> str:
return pulumi.get(self, "rotation_period")
@pulumi.output_type
class GetSecretTopicResult(dict):
def __init__(__self__, *,
name: str):
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
|
<reponame>ikestar99/endopy
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 7 13:06:21 2021
Core code obtained from <NAME>, Barnhart Lab
Class structure and relative imports from Ike Ogbonna, Barnhart Lab
@author: ike
"""
import shutil
import numpy as np
from glob import glob
from .pathutils import getPath, changeExt, makeParentDirectory
def clearEmptyDirectories(base):
for directory in glob(getPath(base, "*")):
if "." in directory:
continue
else:
test = glob(getPath(directory, "**", "*.*"), recursive=True)
if len(test) == 0:
shutil.rmtree(directory)
def isInt(string):
try:
int(string)
return True
except ValueError:
return False
def boundInt(num, low, up):
num = min((max(num, low)), up)
return num
def nextFactor(num, div):
rem = div - (num % div)
num = (0 if rem == div else rem) + num
return num
def smooth(x, window_len=5, window="hanning"):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: input signal
window_len: dimension of the smoothing window; should be an odd integer
window: type of window from "flat", "hanning", "hamming", "bartlett",
"blackman"
flat window will produce a moving average smoothing.
output:
smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
see also:
np.hanning, np.hamming, np.bartlett, np.blackman, np.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead
of a string
NOTE: length(output) != length(input)
to correct: return y[(window_len/2-1):-(window_len/2)] instead of y.
"""
if x.ndim != 1:
raise (ValueError, "smooth only accepts 1 dimension arrays.")
elif x.size < window_len:
raise (ValueError, "Input vector needs to be bigger than window size.")
elif window not in ("flat", "hanning", "hamming", "bartlett", "blackman"):
raise (ValueError,
"Window is 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
if window_len < 3:
return x
s = np.r_[x[window_len - 1:0:-1], x, x[-2:-1 - window_len:-1]] # print(len(s))
if window == "flat": # moving average
w = np.ones(window_len, "d")
else:
w = eval("np.{}(window_len)".format(window))
y = np.convolve(w / w.sum(), s, mode="valid")[2:-2]
return y
def upsample(x, t, up):
x_up = x
t_up = t
iters = 0
while iters < up:
x_new = list()
x_new.append(x_up[:-1])
x_new.append(x_up[1:])
x_up.extend(list(np.mean(np.asarray(x_new), axis=0)))
t_new = list()
t_new.append(t_up[:-1])
t_new.append(t_up[1:])
t_up.extend(list(np.mean(np.asarray(t_new), axis=0)))
A = np.zeros((len(t_up), 2))
A[:, 0] = t_up
A[:, 1] = x_up
AS = A[A[:, 0].argsort()]
t_up = list(AS[:, 0])
x_up = list(AS[:, 1])
iters += 1
return t_up, x_up
def downsample(x, t, bin_size, first_bin, last_bin):
bin_centers = np.arange(first_bin, last_bin + bin_size, bin_size)
A = np.zeros((len(t), 2))
A[:, 0] = t
A[:, 1] = x
AS = A[A[:, 0].argsort()]
ds_t = []
ds_x = []
for b in bin_centers:
bi1 = np.searchsorted(AS[:, 0], b - bin_size / 2.)
bi2 = np.searchsorted(AS[:, 0], b + bin_size / 2.)
ds_t.extend([np.mean(AS[bi1:bi2, 0])])
ds_x.extend([np.mean(AS[bi1:bi2, 1])])
return ds_t, ds_x
def resample(x, t, up, bin_size, first_bin, last_bin):
t_up, x_up = upsample(x, t, up)
ds_t, ds_x = downsample(x_up, t_up, bin_size, first_bin, last_bin)
return ds_t, ds_x
|
<reponame>windstamp/PaddleSeg
# coding: utf8
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
import paddle.fluid as fluid
from paddle.fluid.initializer import MSRA
from paddle.fluid.param_attr import ParamAttr
from .seg_modules import softmax_with_loss
from .seg_modules import dice_loss
from .seg_modules import bce_loss
from .libs import sigmoid_to_softmax
class HRNet(object):
def __init__(self,
num_classes,
mode='train',
stage1_num_modules=1,
stage1_num_blocks=[4],
stage1_num_channels=[64],
stage2_num_modules=1,
stage2_num_blocks=[4, 4],
stage2_num_channels=[18, 36],
stage3_num_modules=4,
stage3_num_blocks=[4, 4, 4],
stage3_num_channels=[18, 36, 72],
stage4_num_modules=3,
stage4_num_blocks=[4, 4, 4, 4],
stage4_num_channels=[18, 36, 72, 144],
use_bce_loss=False,
use_dice_loss=False,
class_weight=None,
ignore_index=255):
# dice_loss或bce_loss只适用两类分割中
if num_classes > 2 and (use_bce_loss or use_dice_loss):
raise ValueError(
"dice loss and bce loss is only applicable to binary classfication"
)
if class_weight is not None:
if isinstance(class_weight, list):
if len(class_weight) != num_classes:
raise ValueError(
"Length of class_weight should be equal to number of classes"
)
elif isinstance(class_weight, str):
if class_weight.lower() != 'dynamic':
raise ValueError(
"if class_weight is string, must be dynamic!")
else:
raise TypeError(
'Expect class_weight is a list or string but receive {}'.
format(type(class_weight)))
self.num_classes = num_classes
self.mode = mode
self.use_bce_loss = use_bce_loss
self.use_dice_loss = use_dice_loss
self.class_weight = class_weight
self.ignore_index = ignore_index
self.stage1_num_modules = stage1_num_modules
self.stage1_num_blocks = stage1_num_blocks
self.stage1_num_channels = stage1_num_channels
self.stage2_num_modules = stage2_num_modules
self.stage2_num_blocks = stage2_num_blocks
self.stage2_num_channels = stage2_num_channels
self.stage3_num_modules = stage3_num_modules
self.stage3_num_blocks = stage3_num_blocks
self.stage3_num_channels = stage3_num_channels
self.stage4_num_modules = stage4_num_modules
self.stage4_num_blocks = stage4_num_blocks
self.stage4_num_channels = stage4_num_channels
def build_net(self, inputs):
if self.use_dice_loss or self.use_bce_loss:
self.num_classes = 1
image = inputs['image']
logit = self._high_resolution_net(image, self.num_classes)
if self.num_classes == 1:
out = sigmoid_to_softmax(logit)
out = fluid.layers.transpose(out, [0, 2, 3, 1])
else:
out = fluid.layers.transpose(logit, [0, 2, 3, 1])
pred = fluid.layers.argmax(out, axis=3)
pred = fluid.layers.unsqueeze(pred, axes=[3])
if self.mode == 'train':
label = inputs['label']
mask = label != self.ignore_index
return self._get_loss(logit, label, mask)
else:
if self.num_classes == 1:
logit = sigmoid_to_softmax(logit)
else:
logit = fluid.layers.softmax(logit, axis=1)
return pred, logit
return logit
def generate_inputs(self):
inputs = OrderedDict()
inputs['image'] = fluid.data(
dtype='float32', shape=[None, 3, None, None], name='image')
if self.mode == 'train':
inputs['label'] = fluid.data(
dtype='int32', shape=[None, 1, None, None], name='label')
elif self.mode == 'eval':
inputs['label'] = fluid.data(
dtype='int32', shape=[None, 1, None, None], name='label')
return inputs
def _get_loss(self, logit, label, mask):
avg_loss = 0
if not (self.use_dice_loss or self.use_bce_loss):
avg_loss += softmax_with_loss(
logit,
label,
mask,
num_classes=self.num_classes,
weight=self.class_weight,
ignore_index=self.ignore_index)
else:
if self.use_dice_loss:
avg_loss += dice_loss(logit, label, mask)
if self.use_bce_loss:
avg_loss += bce_loss(
logit, label, mask, ignore_index=self.ignore_index)
return avg_loss
def _conv_bn_layer(self,
input,
filter_size,
num_filters,
stride=1,
padding=1,
num_groups=1,
if_act=True,
name=None):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=num_groups,
act=None,
param_attr=ParamAttr(initializer=MSRA(), name=name + '_weights'),
bias_attr=False)
bn_name = name + '_bn'
bn = fluid.layers.batch_norm(
input=conv,
param_attr=ParamAttr(
name=bn_name + "_scale",
initializer=fluid.initializer.Constant(1.0)),
bias_attr=ParamAttr(
name=bn_name + "_offset",
initializer=fluid.initializer.Constant(0.0)),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance')
if if_act:
bn = fluid.layers.relu(bn)
return bn
def _basic_block(self,
input,
num_filters,
stride=1,
downsample=False,
name=None):
residual = input
conv = self._conv_bn_layer(
input=input,
filter_size=3,
num_filters=num_filters,
stride=stride,
name=name + '_conv1')
conv = self._conv_bn_layer(
input=conv,
filter_size=3,
num_filters=num_filters,
if_act=False,
name=name + '_conv2')
if downsample:
residual = self._conv_bn_layer(
input=input,
filter_size=1,
num_filters=num_filters,
if_act=False,
name=name + '_downsample')
return fluid.layers.elementwise_add(x=residual, y=conv, act='relu')
def _bottleneck_block(self,
input,
num_filters,
stride=1,
downsample=False,
name=None):
residual = input
conv = self._conv_bn_layer(
input=input,
filter_size=1,
num_filters=num_filters,
name=name + '_conv1')
conv = self._conv_bn_layer(
input=conv,
filter_size=3,
num_filters=num_filters,
stride=stride,
name=name + '_conv2')
conv = self._conv_bn_layer(
input=conv,
filter_size=1,
num_filters=num_filters * 4,
if_act=False,
name=name + '_conv3')
if downsample:
residual = self._conv_bn_layer(
input=input,
filter_size=1,
num_filters=num_filters * 4,
if_act=False,
name=name + '_downsample')
return fluid.layers.elementwise_add(x=residual, y=conv, act='relu')
def _fuse_layers(self, x, channels, multi_scale_output=True, name=None):
out = []
for i in range(len(channels) if multi_scale_output else 1):
residual = x[i]
shape = fluid.layers.shape(residual)[-2:]
for j in range(len(channels)):
if j > i:
y = self._conv_bn_layer(
x[j],
filter_size=1,
num_filters=channels[i],
if_act=False,
name=name + '_layer_' + str(i + 1) + '_' + str(j + 1))
y = fluid.layers.resize_bilinear(input=y, out_shape=shape)
residual = fluid.layers.elementwise_add(
x=residual, y=y, act=None)
elif j < i:
y = x[j]
for k in range(i - j):
if k == i - j - 1:
y = self._conv_bn_layer(
y,
filter_size=3,
num_filters=channels[i],
stride=2,
if_act=False,
name=name + '_layer_' + str(i + 1) + '_' +
str(j + 1) + '_' + str(k + 1))
else:
y = self._conv_bn_layer(
y,
filter_size=3,
num_filters=channels[j],
stride=2,
name=name + '_layer_' + str(i + 1) + '_' +
str(j + 1) + '_' + str(k + 1))
residual = fluid.layers.elementwise_add(
x=residual, y=y, act=None)
residual = fluid.layers.relu(residual)
out.append(residual)
return out
def _branches(self, x, block_num, channels, name=None):
out = []
for i in range(len(channels)):
residual = x[i]
for j in range(block_num[i]):
residual = self._basic_block(
residual,
channels[i],
name=name + '_branch_layer_' + str(i + 1) + '_' +
str(j + 1))
out.append(residual)
return out
def _high_resolution_module(self,
x,
blocks,
channels,
multi_scale_output=True,
name=None):
residual = self._branches(x, blocks, channels, name=name)
out = self._fuse_layers(
residual,
channels,
multi_scale_output=multi_scale_output,
name=name)
return out
def _transition_layer(self, x, in_channels, out_channels, name=None):
num_in = len(in_channels)
num_out = len(out_channels)
out = []
for i in range(num_out):
if i < num_in:
if in_channels[i] != out_channels[i]:
residual = self._conv_bn_layer(
x[i],
filter_size=3,
num_filters=out_channels[i],
name=name + '_layer_' + str(i + 1))
out.append(residual)
else:
out.append(x[i])
else:
residual = self._conv_bn_layer(
x[-1],
filter_size=3,
num_filters=out_channels[i],
stride=2,
name=name + '_layer_' + str(i + 1))
out.append(residual)
return out
def _stage(self,
x,
num_modules,
num_blocks,
num_channels,
multi_scale_output=True,
name=None):
out = x
for i in range(num_modules):
if i == num_modules - 1 and multi_scale_output == False:
out = self._high_resolution_module(
out,
num_blocks,
num_channels,
multi_scale_output=False,
name=name + '_' + str(i + 1))
else:
out = self._high_resolution_module(
out, num_blocks, num_channels, name=name + '_' + str(i + 1))
return out
def _layer1(self, input, num_modules, num_blocks, num_channels, name=None):
# num_modules 默认为1,是否增加处理,官网实现为[1],是否对齐。
conv = input
for i in range(num_blocks[0]):
conv = self._bottleneck_block(
conv,
num_filters=num_channels[0],
downsample=True if i == 0 else False,
name=name + '_' + str(i + 1))
return conv
def _high_resolution_net(self, input, num_classes):
x = self._conv_bn_layer(
input=input,
filter_size=3,
num_filters=self.stage1_num_channels[0],
stride=2,
if_act=True,
name='layer1_1')
x = self._conv_bn_layer(
input=x,
filter_size=3,
num_filters=self.stage1_num_channels[0],
stride=2,
if_act=True,
name='layer1_2')
la1 = self._layer1(
x,
self.stage1_num_modules,
self.stage1_num_blocks,
self.stage1_num_channels,
name='layer2')
tr1 = self._transition_layer([la1],
self.stage1_num_channels,
self.stage2_num_channels,
name='tr1')
st2 = self._stage(
tr1,
self.stage2_num_modules,
self.stage2_num_blocks,
self.stage2_num_channels,
name='st2')
tr2 = self._transition_layer(
st2, self.stage2_num_channels, self.stage3_num_channels, name='tr2')
st3 = self._stage(
tr2,
self.stage3_num_modules,
self.stage3_num_blocks,
self.stage3_num_channels,
name='st3')
tr3 = self._transition_layer(
st3, self.stage3_num_channels, self.stage4_num_channels, name='tr3')
st4 = self._stage(
tr3,
self.stage4_num_modules,
self.stage4_num_blocks,
self.stage4_num_channels,
name='st4')
# upsample
shape = fluid.layers.shape(st4[0])[-2:]
st4[1] = fluid.layers.resize_bilinear(st4[1], out_shape=shape)
st4[2] = fluid.layers.resize_bilinear(st4[2], out_shape=shape)
st4[3] = fluid.layers.resize_bilinear(st4[3], out_shape=shape)
out = fluid.layers.concat(st4, axis=1)
last_channels = sum(self.stage4_num_channels)
out = self._conv_bn_layer(
input=out,
filter_size=1,
num_filters=last_channels,
stride=1,
if_act=True,
name='conv-2')
out = fluid.layers.conv2d(
input=out,
num_filters=num_classes,
filter_size=1,
stride=1,
padding=0,
act=None,
param_attr=ParamAttr(initializer=MSRA(), name='conv-1_weights'),
bias_attr=False)
input_shape = fluid.layers.shape(input)[-2:]
out = fluid.layers.resize_bilinear(out, input_shape)
return out
|
<reponame>Sebastian-Belkner/LensIt
from __future__ import print_function
import glob
import os
import shutil
import time
import numpy as np
from lensit.pbs import pbs
from lensit.ffs_deflect import ffs_deflect
from lensit.ffs_qlms import qlms as ql
from lensit.ffs_covs import ffs_specmat, ffs_cov
from lensit.misc.misc_utils import PartialDerivativePeriodic as PDP, cl_inverse
from lensit.ffs_iterators import bfgs
from lensit.qcinv import multigrid, chain_samples
from lensit.sims import ffs_phas
_types = ['T', 'QU', 'TQU']
def prt_time(dt, label=''):
dh = np.floor(dt / 3600.)
dm = np.floor(np.mod(dt, 3600.) / 60.)
ds = np.floor(np.mod(dt, 60))
print("\r [" + ('%02d:%02d:%02d' % (dh, dm, ds)) + "] " + label)
return
class ffs_iterator(object):
r"""Flat-sky iterator template class
Args:
lib_dir: many things will be written there
typ: 'T', 'QU' or 'TQU' for estimation on temperature data, polarization data or jointly
filt: inverse-variance filtering instance (e.g. *lensit.qcinv.ffs_ninv_filt* )
dat_maps: data maps or path to maps.
lib_qlm: lib_alm (*lensit.ffs_covs.ell_mat.ffs_alm*) instance describing the lensing estimate Fourier arrays
Plm0: Starting point for the iterative search. alm array consistent with *lib_qlm*
H0: initial isotropic likelihood curvature approximation (roughly, inverse lensing noise bias :math:`N^{(0)}_L`)
cpp_prior: fiducial lensing power spectrum, used for the prior part of the posterior density.
chain_descr: multigrid conjugate gradient inversion chain description
"""
def __init__(self, lib_dir, typ, filt, dat_maps, lib_qlm, Plm0, H0, cpp_prior,
use_Pool_lens=0, use_Pool_inverse=0, chain_descr=None, opfilt=None, soltn0=None, cache_magn=False,
no_deglensing=False, NR_method=100, tidy=10, verbose=True, maxcgiter=150, PBSSIZE=None, PBSRANK=None,
**kwargs):
assert typ in _types
assert chain_descr is not None
assert opfilt is not None
assert filt.lib_skyalm.lsides == lib_qlm.lsides
self.PBSSIZE = pbs.size if PBSSIZE is None else PBSSIZE
self.PBSRANK = pbs.rank if PBSRANK is None else PBSRANK
assert self.PBSRANK < self.PBSSIZE, (self.PBSRANK, self.PBSSIZE)
self.barrier = (lambda: 0) if self.PBSSIZE == 1 else pbs.barrier
self.type = typ
self.lib_dir = lib_dir
self.dat_maps = dat_maps
self.chain_descr = chain_descr
self.opfilt = opfilt
self.cl_pp = cpp_prior
self.lib_qlm = lib_qlm
self.cache_magn = cache_magn
self.lsides = filt.lib_skyalm.lsides
self.lmax_qlm = self.lib_qlm.ellmax
self.NR_method = NR_method
self.tidy = tidy
self.maxiter = maxcgiter
self.verbose = verbose
self.nodeglensing = no_deglensing
if self.verbose:
print(" I see t", filt.Nlev_uKamin('t'))
print(" I see q", filt.Nlev_uKamin('q'))
print(" I see u", filt.Nlev_uKamin('u'))
# Defining a trial newton step length :
def newton_step_length(it, norm_incr): # FIXME
# Just trying if half the step is better for S4 QU
if filt.Nlev_uKamin('t') > 2.1: return 1.0
if filt.Nlev_uKamin('t') <= 2.1 and norm_incr >= 0.5:
return 0.5
return 0.5
self.newton_step_length = newton_step_length
self.soltn0 = soltn0
f_id = ffs_deflect.ffs_id_displacement(filt.lib_skyalm.shape, filt.lib_skyalm.lsides)
if not hasattr(filt, 'f') or not hasattr(filt, 'fi'):
self.cov = filt.turn2wlfilt(f_id, f_id)
else:
filt.set_ffi(f_id, f_id)
self.cov = filt
if self.PBSRANK == 0:
if not os.path.exists(self.lib_dir): os.makedirs(self.lib_dir)
self.barrier()
#FIXME
#self.soltn_cond = np.all([np.all(self.filt.get_mask(_t) == 1.) for _t in self.type])
self.soltn_cond = False
print('ffs iterator : This is %s trying to setup %s' % (self.PBSRANK, lib_dir))
# Lensed covariance matrix library :
# We will redefine the displacement at each iteration step
self.use_Pool = use_Pool_lens
self.use_Pool_inverse = use_Pool_inverse
if self.PBSRANK == 0: # FIXME : hash and hashcheck
if not os.path.exists(self.lib_dir):
os.makedirs(self.lib_dir)
if not os.path.exists(self.lib_dir + '/MAPlms'):
os.makedirs(self.lib_dir + '/MAPlms')
if not os.path.exists(self.lib_dir + '/cghistories'):
os.makedirs(self.lib_dir + '/cghistories')
# pre_calculation of qlm_norms with rank 0:
if self.PBSRANK == 0 and \
(not os.path.exists(self.lib_dir + '/qlm_%s_H0.dat' % ('P'))
or not os.path.exists(self.lib_dir + '/%shi_plm_it%03d.npy' % ('P', 0))):
print('++ ffs_%s_iterator: Caching qlm_norms and N0s' % typ + self.lib_dir)
# Caching qlm norm that we will use as zeroth order curvature : (with lensed weights)
# Prior curvature :
# Gaussian priors
prior_pp = cl_inverse(self.cl_pp[0:self.lmax_qlm + 1])
prior_pp[0] *= 0.5
curv_pp = H0 + prior_pp # isotropic estimate of the posterior curvature at the starting point
self.cache_cl(self.lib_dir + '/qlm_%s_H0.dat' % ('P'), cl_inverse(curv_pp))
print(" cached %s" % self.lib_dir + '/qlm_%s_H0.dat' % 'P')
fname_P = self.lib_dir + '/%shi_plm_it%03d.npy' % ('P', 0)
self.cache_qlm(fname_P, self.load_qlm(Plm0))
self.barrier()
if not os.path.exists(self.lib_dir + '/Hessian') and self.PBSRANK == 0:
os.makedirs(self.lib_dir + '/Hessian')
# We store here the rank 2 updates to the Hessian according to the BFGS iterations.
if not os.path.exists(self.lib_dir + '/history_increment.txt') and self.PBSRANK == 0:
with open(self.lib_dir + '/history_increment.txt', 'w') as file:
file.write('# Iteration step \n' +
'# Exec. time in sec.\n' +
'# Increment norm (normalized to starting point displacement norm) \n' +
'# Total gradient norm (all grad. norms normalized to initial total gradient norm)\n' +
'# Quad. gradient norm\n' +
'# Det. gradient norm\n' +
'# Pri. gradient norm\n' +
'# Newton step length\n')
file.close()
if self.PBSRANK == 0: print('++ ffs_%s masked iterator : setup OK' % type)
self.barrier()
def get_mask(self):
ret = np.ones(self.cov.lib_datalm.shape, dtype=float)
ret[np.where(self.cov.ninv_rad <= 0.)] *= 0
return ret
def get_datmaps(self):
return np.load(self.dat_maps) if isinstance(self.dat_maps, str) else self.dat_maps
def cache_qlm(self, fname, alm, pbs_rank=None):
if pbs_rank is not None and self.PBSRANK != pbs_rank:
return
else:
assert self.load_qlm(alm).ndim == 1 and self.load_qlm(alm).size == self.lib_qlm.alm_size
print('rank %s caching ' % self.PBSRANK + fname)
self.lib_qlm.write_alm(fname, self.load_qlm(alm))
return
def load_qlm(self, fname):
return self.lib_qlm.read_alm(fname) if isinstance(fname, str) else fname
def cache_rlm(self, fname, rlm):
assert rlm.ndim == 1 and rlm.size == 2 * self.lib_qlm.alm_size, (rlm.ndim, rlm.size)
print('rank %s caching ' % self.PBSRANK, fname)
np.save(fname, rlm)
def load_rlm(self, fname):
rlm = np.load(fname)
assert rlm.ndim == 1 and rlm.size == 2 * self.lib_qlm.alm_size, (rlm.ndim, rlm.size)
return rlm
@staticmethod
def cache_cl(fname, cl):
assert cl.ndim == 1
np.savetxt(fname, cl)
@staticmethod
def load_cl(fname):
assert os.path.exists(fname), fname
return np.loadtxt(fname)
def get_H0(self, key):
assert key.lower() in ['p', 'o'], key # potential or curl potential.
fname = os.path.join(self.lib_dir, 'qlm_%s_H0.dat' % key.upper())
assert os.path.exists(fname), fname
return self.load_cl(fname)
def is_previous_iter_done(self, it, key):
if it == 0: return True
assert key.lower() in ['p', 'o'], key # potential or curl potential.
fn = os.path.join(self.lib_dir, '%s_plm_it%03d.npy' % ({'p': 'Phi', 'o': 'Om'}[key.lower()], it - 1))
return os.path.exists(fn)
def how_many_iter_done(self, key):
""" Returns the number of points already calculated. 0th is the qest.
"""
assert key.lower() in ['p', 'o'], key # potential or curl potential.
fn = os.path.join(self.lib_dir, '%s_plm_it*.npy' % {'p': 'Phi', 'o': 'Om'}[key.lower()])
return len( glob.glob(fn))
def get_Plm(self, it, key):
"""Loads solution at iteration *it*
"""
if it < 0:
return np.zeros(self.lib_qlm.alm_size, dtype=complex)
assert key.lower() in ['p', 'o'], key # potential or curl potential.
fn = os.path.join(self.lib_dir,'%s_plm_it%03d.npy' % ({'p': 'Phi', 'o': 'Om'}[key.lower()], it))
assert os.path.exists(fn), fn
return self.load_qlm(fn)
def get_Phimap(self, it, key):
assert key.lower() in ['p', 'o'], key # potential or curl potential.
return self.lib_qlm.alm2map(self.get_Plm(it, key))
def _getfnames_f(self, key, it):
assert key.lower() in ['p', 'o'], key # potential or curl potential.
fname_dx = os.path.join(self.lib_dir, 'f_%s_it%03d_dx.npy' % (key.lower(), it))
fname_dy = os.path.join(self.lib_dir, 'f_%s_it%03d_dy.npy' % (key.lower(), it))
return fname_dx, fname_dy
def _getfnames_finv(self, key, it):
assert key.lower() in ['p', 'o'], key # potential or curl potential.
fname_dx = os.path.join(self.lib_dir, 'finv_%s_it%03d_dx.npy' % (key.lower(), it))
fname_dy = os.path.join(self.lib_dir, 'finv_%s_it%03d_dy.npy' % (key.lower(), it))
return fname_dx, fname_dy
def _calc_ffinv(self, it, key):
"""Calculates displacement at iter and its inverse. Only mpi rank 0 can do this.
"""
assert self.PBSRANK == 0, 'NO MPI METHOD'
if it < 0: return
assert key.lower() in ['p', 'o'], key # potential or curl potential.
fname_dx, fname_dy = self._getfnames_f(key, it)
if not os.path.exists(fname_dx) or not os.path.exists(fname_dy):
# FIXME : does this from plm
assert self.is_previous_iter_done(it, key)
Phi_est_WF = self.get_Phimap(it, key)
assert self.cov.lib_skyalm.shape == Phi_est_WF.shape
assert self.cov.lib_skyalm.shape == self.lib_qlm.shape
assert self.cov.lib_skyalm.lsides == self.lib_qlm.lsides
rmin = np.array(self.cov.lib_skyalm.lsides) / np.array(self.cov.lib_skyalm.shape)
print('rank %s caching displacement comp. for it. %s for key %s' % (self.PBSRANK, it, key))
if key.lower() == 'p':
dx = PDP(Phi_est_WF, axis=1, h=rmin[1])
dy = PDP(Phi_est_WF, axis=0, h=rmin[0])
else:
dx = -PDP(Phi_est_WF, axis=0, h=rmin[0])
dy = PDP(Phi_est_WF, axis=1, h=rmin[1])
if self.PBSRANK == 0:
np.save(fname_dx, dx)
np.save(fname_dy, dy)
del dx, dy
lib_dir = os.path.join(self.lib_dir, 'f_%04d_libdir' % it)
if not os.path.exists(lib_dir): os.makedirs(lib_dir)
fname_invdx, fname_invdy = self._getfnames_finv(key, it)
if not os.path.exists(fname_invdx) or not os.path.exists(fname_invdy):
f = self._load_f(it, key)
print('rank %s inverting displacement it. %s for key %s' % (self.PBSRANK, it, key))
f_inv = f.get_inverse(use_Pool=self.use_Pool_inverse)
np.save(fname_invdx, f_inv.get_dx())
np.save(fname_invdy, f_inv.get_dy())
lib_dir = os.path.join(self.lib_dir, 'finv_%04d_libdir' % it)
if not os.path.exists(lib_dir): os.makedirs(lib_dir)
assert os.path.exists(fname_invdx), fname_invdx
assert os.path.exists(fname_invdy), fname_invdy
return
def _load_f(self, it, key):
"""Loads current displacement solution at iteration iter
"""
fname_dx, fname_dy = self._getfnames_f(key, it)
lib_dir = os.path.join(self.lib_dir, 'f_%04d_libdir' % it)
assert os.path.exists(fname_dx), fname_dx
assert os.path.exists(fname_dx), fname_dy
assert os.path.exists(lib_dir), lib_dir
return ffs_deflect.ffs_displacement(fname_dx, fname_dy, self.lsides,
verbose=(self.PBSRANK == 0), lib_dir=lib_dir, cache_magn=self.cache_magn)
def _load_finv(self, it, key):
"""Loads current inverse displacement solution at iteration iter.
"""
fname_invdx, fname_invdy = self._getfnames_finv(key, it)
lib_dir = os.path.join(self.lib_dir, 'finv_%04d_libdir' % it)
assert os.path.exists(fname_invdx), fname_invdx
assert os.path.exists(fname_invdx), fname_invdy
assert os.path.exists(lib_dir), lib_dir
return ffs_deflect.ffs_displacement(fname_invdx, fname_invdy, self.lsides,
verbose=(self.PBSRANK == 0), lib_dir=lib_dir, cache_magn=self.cache_magn)
def load_soltn(self, it, key):
assert key.lower() in ['p', 'o']
for i in np.arange(it, -1, -1):
fname = os.path.join(self.lib_dir, 'MAPlms/Mlik_%s_it%s.npy' % (key.lower(), i))
if os.path.exists(fname):
print("rank %s loading " % pbs.rank + fname)
return np.load(fname)
if self.soltn0 is not None: return np.load(self.soltn0)[:self.opfilt.TEBlen(self.type)]
return np.zeros((self.opfilt.TEBlen(self.type), self.cov.lib_skyalm.alm_size), dtype=complex)
def _cache_tebwf(self, TEBMAP, it, key):
assert key.lower() in ['p', 'o']
fname = os.path.join(self.lib_dir, 'MAPlms/Mlik_%s_it%s.npy' % (key.lower(), it))
print("rank %s caching " % pbs.rank + fname)
np.save(fname, TEBMAP)
def get_gradPpri(self, it, key, cache_only=False):
"""Builds prior gradient at iteration *it*
"""
assert self.PBSRANK == 0, 'NO MPI method!'
assert key.lower() in ['p', 'o'], key # potential or curl potential.
assert it > 0, it
fname = os.path.join(self.lib_dir, 'qlm_grad%spri_it%03d.npy' % (key.upper(), it - 1))
if os.path.exists(fname):
return None if cache_only else self.load_qlm(fname)
assert self.is_previous_iter_done(it, key)
grad = self.lib_qlm.almxfl(self.get_Plm(it - 1, key),
cl_inverse(self.cl_pp if key.lower() == 'p' else self.cl_oo))
self.cache_qlm(fname, grad, pbs_rank=0)
return None if cache_only else self.load_qlm(fname)
def _mlik2rest_tqumlik(self, TQUMlik, it, key):
"""Produces B^t Ni (data - B D Mlik) in TQU space, that is fed into the qlm estimator.
"""
f_id = ffs_deflect.ffs_id_displacement(self.cov.lib_skyalm.shape, self.cov.lib_skyalm.lsides)
self.cov.set_ffi(self._load_f(it - 1, key), self._load_finv(it - 1, key))
temp = ffs_specmat.TQU2TEBlms(self.type, self.cov.lib_skyalm, TQUMlik)
maps = self.get_datmaps() - self.cov.apply_Rs(self.type, temp)
self.cov.apply_maps(self.type, maps, inplace=True)
self.cov.set_ffi(f_id, f_id)
temp = self.cov.apply_Rts(self.type, maps)
return ffs_specmat.TEB2TQUlms(self.type, self.cov.lib_skyalm, temp)
def calc_gradplikpdet(self, it, key):
"""Calculates the likelihood gradient (quadratic and mean-field parts)
"""
assert 0, 'subclass this'
def load_graddet(self, it, key):
"""Loads mean-field gradient at iteration *it*
Gradient must have already been calculated
"""
fname_detterm = os.path.join(self.lib_dir, 'qlm_grad%sdet_it%03d.npy' % (key.upper(), it))
assert os.path.exists(fname_detterm), fname_detterm
return self.load_qlm(fname_detterm)
def load_gradpri(self, it, key):
"""Loads prior gradient at iteration *it*
Gradient must have already been calculated
"""
fname_prior = os.path.join(self.lib_dir, 'qlm_grad%spri_it%03d.npy' % (key.upper(), it))
assert os.path.exists(fname_prior), fname_prior
return self.load_qlm(fname_prior)
def load_gradquad(self, it, key):
"""Loads likelihood quadratic piece gradient at iteration *it*
Gradient must have already been calculated
"""
fname_likterm = os.path.join(self.lib_dir, 'qlm_grad%slik_it%03d.npy' % (key.upper(), it))
assert os.path.exists(fname_likterm), fname_likterm
return self.load_qlm(fname_likterm)
def load_total_grad(self, it, key):
"""Load the total gradient at iteration *it*.
All gradients must have already been calculated.
"""
return self.load_gradpri(it, key) + self.load_gradquad(it, key) + self.load_graddet(it, key)
def _calc_norm(self, qlm):
return np.sqrt(np.sum(self.lib_qlm.alm2rlm(qlm) ** 2))
def _apply_curv(self, k, key, alphak, plm):
"""Apply curvature matrix making use of information incuding sk and yk.
Applies v B_{k + 1}v = v B_k v + (y^t v)** 2/(y^t s) - (s^t B v) ** 2 / (s^t B s))
(B_k+1 = B + yy^t / (y^ts) - B s s^t B / (s^t Bk s)) (all k on the RHS))
For quasi Newton, s_k = x_k1 - x_k = - alpha_k Hk grad_k with alpha_k newton step-length.
--> s^t B s at k is alpha_k^2 g_k H g_k
B s = -alpha g_k
"""
H = self.get_Hessian(max(k + 1,0), key) # get_Hessian(k) loads sk and yk from 0 to k - 1
assert H.L > k, 'not implemented'
assert len(alphak) >= (k + 1),(k + 1,len(alphak))
dot_op = lambda plm1,plm2,:np.sum(self.lib_qlm.alm2cl(plm1,alm2=plm2) * self.lib_qlm.get_Nell()[:self.lib_qlm.ellmax + 1])
if k <= -1:
return dot_op(plm,self.lib_qlm.rlm2alm(H.applyB0k(self.lib_qlm.alm2rlm(plm),0)))
ret = self._apply_curv(k - 1, key, alphak, plm)
Hgk = H.get_mHkgk(self.lib_qlm.alm2rlm(self.load_total_grad(k, key)), k)
st_Bs = alphak[k] ** 2 * dot_op(self.load_total_grad(k, key),self.lib_qlm.rlm2alm(Hgk))
yt_s = dot_op(self.lib_qlm.rlm2alm(H.s(k)),self.lib_qlm.rlm2alm(H.y(k)))
yt_v = dot_op(self.lib_qlm.rlm2alm(H.y(k)),plm)
st_Bv = - alphak[k] *dot_op(self.load_total_grad(k, key),plm)
return ret + yt_v ** 2 / yt_s - st_Bv ** 2 / st_Bs
def get_lndetcurv_update(self, k, key, alphak):
#Builds update to the BFGS log-determinant
H = self.get_Hessian(k, key)
Hgk = H.get_mHkgk(self.lib_qlm.alm2rlm(self.load_total_grad(k, key)), k)
denom = np.sum(self.lib_qlm.alm2rlm(self.load_total_grad(k, key)) * Hgk)
num = np.sum(self.lib_qlm.alm2rlm(self.load_total_grad(k + 1, key)) * Hgk)
assert 1. - num / denom / alphak > 0.
return np.log(1. - num / denom / alphak)
def get_Gaussnoisesample(self, it, key,plm_noisephas, real_space=False, verbose=False):
"""Produce a Gaussian random field from the approximate BFGS covariance
Args:
it: iteration index
key: 'p' or 'o' for lensing gradient or curl iteration
plm_noisepha: unit spectra random phases of the right shape
real_space: produces random field in real space if set, otherwise alm array
"""
assert key.lower() in ['p', 'o'], key # potential or curl potential.
assert plm_noisephas.shape == (self.lib_qlm.alm_size,),(plm_noisephas.shape,self.lib_qlm.alm_size)
alm_0 = self.lib_qlm.almxfl(plm_noisephas, np.sqrt(self.get_H0(key)))
ret = self.get_Hessian(max(it,0), key).sample_Gaussian(it, self.lib_qlm.alm2rlm(alm_0))
return self.lib_qlm.alm2map(self.lib_qlm.rlm2alm(ret)) if real_space else self.lib_qlm.rlm2alm(ret)
def get_Hessian(self, it, key):
"""Build the L-BFGS Hessian at iteration *it*
"""
# Zeroth order inverse Hessian :
apply_H0k = lambda rlm, k: \
self.lib_qlm.alm2rlm(self.lib_qlm.almxfl(self.lib_qlm.rlm2alm(rlm), self.get_H0(key)))
apply_B0k = lambda rlm, k: \
self.lib_qlm.alm2rlm(self.lib_qlm.almxfl(self.lib_qlm.rlm2alm(rlm), cl_inverse(self.get_H0(key))))
BFGS_H = bfgs.BFGS_Hessian(os.path.join(self.lib_dir, 'Hessian'), apply_H0k, {}, {}, L=self.NR_method,
verbose=self.verbose,apply_B0k=apply_B0k)
# Adding the required y and s vectors :
for k in range(np.max([0, it - BFGS_H.L]), it):
BFGS_H.add_ys(os.path.join(self.lib_dir, 'Hessian', 'rlm_yn_%s_%s.npy' % (k, key)),
os.path.join(self.lib_dir, 'Hessian', 'rlm_sn_%s_%s.npy' % (k, key)), k)
return BFGS_H
def build_incr(self, it, key, gradn):
"""Search direction
BGFS method with 'self.NR method' BFGS updates to the Hessian.
Initial Hessian are built from N0s.
It must be rank 0 here.
Args:
it: current iteration level. Will produce the increment to phi_{k-1}, from gradient est. g_{k-1}
phi_{k_1} + output = phi_k
key: 'p' or 'o'
gradn: current estimate of the gradient (alm array)
Returns:
increment for next iteration (alm array)
"""
assert self.PBSRANK == 0, 'single MPI process method !'
assert it > 0, it
k = it - 2
yk_fname = os.path.join(self.lib_dir, 'Hessian', 'rlm_yn_%s_%s.npy' % (k, key))
if k >= 0 and not os.path.exists(yk_fname): # Caching Hessian BFGS yk update :
yk = self.lib_qlm.alm2rlm(gradn - self.load_total_grad(k, key))
self.cache_rlm(yk_fname, yk)
k = it - 1
BFGS = self.get_Hessian(k, key) # Constructing L-BFGS Hessian
# get descent direction sk = - H_k gk : (rlm array). Will be cached directly
sk_fname = os.path.join(self.lib_dir, 'Hessian', 'rlm_sn_%s_%s.npy' % (k, key))
step = 0.
if not os.path.exists(sk_fname):
print("rank %s calculating descent direction" % self.PBSRANK)
t0 = time.time()
incr = BFGS.get_mHkgk(self.lib_qlm.alm2rlm(gradn), k)
norm_inc = self._calc_norm(self.lib_qlm.rlm2alm(incr)) / self._calc_norm(self.get_Plm(0, key))
step = self.newton_step_length(it, norm_inc)
self.cache_rlm(sk_fname,incr * step)
prt_time(time.time() - t0, label=' Exec. time for descent direction calculation')
assert os.path.exists(sk_fname), sk_fname
return self.lib_qlm.rlm2alm(self.load_rlm(sk_fname)),step
def iterate(self, it, key, cache_only=False):
"""Performs an iteration
This builds the gradients at iteration *it*, and the potential estimate, and saves the *it* + 1 estimate.
"""
assert key.lower() in ['p', 'o'], key # potential or curl potential.
plm_fname = os.path.join(self.lib_dir, '%s_plm_it%03d.npy' % ({'p': 'Phi', 'o': 'Om'}[key.lower()], it))
if os.path.exists(plm_fname): return None if cache_only else self.load_qlm(plm_fname)
assert self.is_previous_iter_done(it, key), 'previous iteration not done'
# Calculation in // of lik and det term :
ti = time.time()
if self.PBSRANK == 0: # Single processes routines :
self._calc_ffinv(it - 1, key)
self.get_gradPpri(it, key, cache_only=True)
self.barrier()
# Calculation of the likelihood term, involving the det term over MCs :
irrelevant = self.calc_gradplikpdet(it, key)
self.barrier() # Everything should be on disk now.
if self.PBSRANK == 0:
incr,steplength = self.build_incr(it, key, self.load_total_grad(it - 1, key))
self.cache_qlm(plm_fname, self.get_Plm(it - 1, key) + incr, pbs_rank=0)
# Saves some info about increment norm and exec. time :
norm_inc = self._calc_norm(incr) / self._calc_norm(self.get_Plm(0, key))
norms = [self._calc_norm(self.load_gradquad(it - 1, key))]
norms.append(self._calc_norm(self.load_graddet(it - 1, key)))
norms.append(self._calc_norm(self.load_gradpri(it - 1, key)))
norm_grad = self._calc_norm(self.load_total_grad(it - 1, key))
norm_grad_0 = self._calc_norm(self.load_total_grad(0, key))
for i in [0, 1, 2]: norms[i] = norms[i] / norm_grad_0
with open(os.path.join(self.lib_dir, 'history_increment.txt'), 'a') as file:
file.write('%03d %.1f %.6f %.6f %.6f %.6f %.6f %.12f \n'
% (it, time.time() - ti, norm_inc, norm_grad / norm_grad_0, norms[0], norms[1], norms[2],
steplength))
file.close()
if self.tidy > 2: # Erasing dx,dy and det magn (12GB for full sky at 0.74 amin per iteration)
f1, f2 = self._getfnames_f(key, it - 1)
f3, f4 = self._getfnames_finv(key, it - 1)
for _f in [f1, f2, f3, f4]:
if os.path.exists(_f):
os.remove(_f)
if self.verbose: print(" removed :", _f)
if os.path.exists(os.path.join(self.lib_dir, 'f_%04d_libdir' % (it - 1))):
shutil.rmtree(os.path.join(self.lib_dir, 'f_%04d_libdir' % (it - 1)))
if self.verbose: print("Removed :", os.path.join(self.lib_dir, 'f_%04d_libdir' % (it - 1)))
if os.path.exists(os.path.join(self.lib_dir, 'finv_%04d_libdir' % (it - 1))):
shutil.rmtree(os.path.join(self.lib_dir, 'finv_%04d_libdir' % (it - 1)))
if self.verbose: print("Removed :", os.path.join(self.lib_dir, 'finv_%04d_libdir' % (it - 1)))
self.barrier()
return None if cache_only else self.load_qlm(plm_fname)
class ffs_iterator_cstMF(ffs_iterator):
r"""Iterator instance, that uses fixed, input mean-field at each step.
Args:
lib_dir: many things will be written there
typ: 'T', 'QU' or 'TQU' for estimation on temperature data, polarization data or jointly
filt: inverse-variance filtering instance (e.g. *lensit.qcinv.ffs_ninv_filt* )
dat_maps: data maps or path to maps.
lib_qlm: lib_alm (*lensit.ffs_covs.ell_mat.ffs_alm*) instance describing the lensing estimate Fourier arrays
Plm0: Starting point for the iterative search. alm array consistent with *lib_qlm*
H0: initial isotropic likelihood curvature approximation (roughly, inverse lensing noise bias :math:`N^{(0)}_L`)
MF_qlms: mean-field alm array (also desribed by lib_qlm)
cpp_prior: fiducial lensing power spectrum, used for the prior part of the posterior density.
"""
def __init__(self, lib_dir, typ, filt, dat_maps, lib_qlm, Plm0, H0, MF_qlms, cpp_prior, **kwargs):
super(ffs_iterator_cstMF, self).__init__(lib_dir, typ, filt, dat_maps, lib_qlm, Plm0, H0, cpp_prior,
PBSSIZE=1, PBSRANK=0, # so that all proc. act independently
**kwargs)
self.MF_qlms = MF_qlms
def calc_gradplikpdet(self, it, key):
assert key.lower() in ['p', 'o'], key # potential or curl potential.
fname_likterm = os.path.join(self.lib_dir, 'qlm_grad%slik_it%03d.npy' % (key.upper(), it - 1))
fname_detterm = os.path.join(self.lib_dir, 'qlm_grad%sdet_it%03d.npy' % (key.upper(), it - 1))
assert it > 0, it
if os.path.exists(fname_likterm) and os.path.exists(fname_detterm):
return 0
assert self.is_previous_iter_done(it, key)
# Identical MF here
self.cache_qlm(fname_detterm, self.load_qlm(self.MF_qlms))
self.cov.set_ffi(self._load_f(it - 1, key), self._load_finv(it - 1, key))
mchain = multigrid.multigrid_chain(self.opfilt, self.type, self.chain_descr, self.cov,
no_deglensing=self.nodeglensing)
# FIXME : The solution input is not working properly sometimes. We give it up for now.
# FIXME don't manage to find the right d0 to input for a given sol ?!!
soltn = self.load_soltn(it, key).copy() * self.soltn_cond
self.opfilt._type = self.type
mchain.solve(soltn, self.get_datmaps(), finiop='MLIK')
self._cache_tebwf(soltn, it - 1, key)
# soltn = self.opfilt.MLIK2BINV(soltn,self.cov,self.get_datmaps())
# grad = - ql.get_qlms(self.type, self.cov.lib_skyalm, soltn, self.cov.cls, self.lib_qlm,
# use_Pool=self.use_Pool, f=self.cov.f)[{'p': 0, 'o': 1}[key.lower()]]
TQUMlik = self.opfilt.soltn2TQUMlik(soltn, self.cov)
ResTQUMlik = self._mlik2rest_tqumlik(TQUMlik, it, key)
grad = - ql.get_qlms_wl(self.type, self.cov.lib_skyalm, TQUMlik, ResTQUMlik, self.lib_qlm,
use_Pool=self.use_Pool, f=self._load_f(it - 1, key))[{'p': 0, 'o': 1}[key.lower()]]
self.cache_qlm(fname_likterm, grad, pbs_rank=self.PBSRANK)
# It does not help to cache both grad_O and grad_P as they do not follow the trajectory in plm space.
return 0
class ffs_iterator_pertMF(ffs_iterator):
"""Iterator instance, that uses the deflection-perturbative prediction for the mean-field at each step.
Args:
lib_dir: many things will be written there
typ: 'T', 'QU' or 'TQU' for estimation on temperature data, polarization data or jointly
filt: inverse-variance filtering instance (e.g. *lensit.qcinv.ffs_ninv_filt* )
dat_maps: data maps or path to maps.
lib_qlm: lib_alm (*lensit.ffs_covs.ell_mat.ffs_alm*) instance describing the lensing estimate Fourier arrays
Plm0: Starting point for the iterative search. alm array consistent with *lib_qlm*
H0: initial isotropic likelihood curvature approximation (roughly, inverse lensing noise bias :math:`N^{(0)}_L`)
cpp_prior: fiducial lensing power spectrum, used for the prior part of the posterior density.
"""
def __init__(self, lib_dir, typ, filt, dat_maps, lib_qlm, Plm0, H0, cpp_prior,
init_rank=pbs.rank, init_barrier=pbs.barrier, **kwargs):
super(ffs_iterator_pertMF, self).__init__(lib_dir, typ, filt, dat_maps, lib_qlm, Plm0, H0, cpp_prior,
PBSSIZE=1, PBSRANK=0, # so that all proc. act independently
**kwargs)
#lmax_sky_ivf = filt.lib_skyalm.ellmax
#iso_libdat = filt.lib_skyalm
#cls_noise = {'t': (filt.Nlev_uKamin('t') / 60. / 180. * np.pi) ** 2 * np.ones(lmax_sky_ivf + 1),
# 'q': (filt.Nlev_uKamin('q') / 60. / 180. * np.pi) ** 2 * np.ones(lmax_sky_ivf + 1),
# 'u': (filt.Nlev_uKamin('u') / 60. / 180. * np.pi) ** 2 * np.ones(lmax_sky_ivf + 1)}
lmax_ivf = filt.lib_datalm.ellmax
iso_libdat = filt.lib_datalm
cls_noise = {'t': (filt.Nlev_uKamin('t') / 60. / 180. * np.pi) ** 2 * np.ones(lmax_ivf + 1),
'q': (filt.Nlev_uKamin('q') / 60. / 180. * np.pi) ** 2 * np.ones(lmax_ivf + 1),
'u': (filt.Nlev_uKamin('u') / 60. / 180. * np.pi) ** 2 * np.ones(lmax_ivf + 1)}
self.isocov = ffs_cov.ffs_diagcov_alm(os.path.join(lib_dir, 'isocov'),
iso_libdat, filt.cls, filt.cls, filt.cl_transf, cls_noise,
lib_skyalm=filt.lib_skyalm, init_rank=init_rank,
init_barrier=init_barrier)
def get_mfresp(self, key):
return self.isocov.get_mfresplms(self.type, self.lib_qlm, use_cls_len=False)[{'p': 0, 'o': 1}[key.lower()]]
def calc_gradplikpdet(self, it, key):
assert key.lower() in ['p', 'o'], key # potential or curl potential.
fname_likterm = os.path.join(self.lib_dir, 'qlm_grad%slik_it%03d.npy' % (key.upper(), it - 1))
fname_detterm = os.path.join(self.lib_dir, 'qlm_grad%sdet_it%03d.npy' % (key.upper(), it - 1))
assert it > 0, it
if os.path.exists(fname_likterm) and os.path.exists(fname_detterm):
return 0
assert self.is_previous_iter_done(it, key)
# Identical MF here
self.cache_qlm(fname_detterm, self.load_qlm(self.get_mfresp(key.lower()) * self.get_Plm(it - 1, key.lower())))
self.cov.set_ffi(self._load_f(it - 1, key), self._load_finv(it - 1, key))
mchain = multigrid.multigrid_chain(self.opfilt, self.type, self.chain_descr, self.cov,
no_deglensing=self.nodeglensing)
# FIXME : The solution input is not working properly sometimes. We give it up for now.
# FIXME don't manage to find the right d0 to input for a given sol ?!!
soltn = self.load_soltn(it, key).copy() * self.soltn_cond
self.opfilt._type = self.type
mchain.solve(soltn, self.get_datmaps(), finiop='MLIK')
self._cache_tebwf(soltn, it - 1, key)
TQUMlik = self.opfilt.soltn2TQUMlik(soltn, self.cov)
ResTQUMlik = self._mlik2rest_tqumlik(TQUMlik, it, key)
grad = - ql.get_qlms_wl(self.type, self.cov.lib_skyalm, TQUMlik, ResTQUMlik, self.lib_qlm,
use_Pool=self.use_Pool, f=self._load_f(it - 1, key))[{'p': 0, 'o': 1}[key.lower()]]
self.cache_qlm(fname_likterm, grad, pbs_rank=self.PBSRANK)
# It does not help to cache both grad_O and grad_P as they do not follow the trajectory in plm space.
return 0
class ffs_iterator_simMF(ffs_iterator):
r"""Iterator instance, that estimate the mean-field at each steps from Monte-Carlos.
Args:
lib_dir: many things will be written there
typ: 'T', 'QU' or 'TQU' for estimation on temperature data, polarization data or jointly
MFkey: mean-field estimator key
nsims: number of sims to use at each step
filt: inverse-variance filtering instance (e.g. *lensit.qcinv.ffs_ninv_filt* )
dat_maps: data maps or path to maps.
lib_qlm: lib_alm (*lensit.ffs_covs.ell_mat.ffs_alm*) instance describing the lensing estimate Fourier arrays
Plm0: Starting point for the iterative search. alm array consistent with *lib_qlm*
H0: initial isotropic likelihood curvature approximation (roughly, inverse lensing noise bias :math:`N^{(0)}_L`)
cpp_prior: fiducial lensing power spectrum, used for the prior part of the posterior density.
"""
def __init__(self, lib_dir, typ, MFkey, nsims, filt, dat_maps, lib_qlm, Plm0, H0, cpp_prior, **kwargs):
super(ffs_iterator_simMF, self).__init__(lib_dir, typ, filt, dat_maps, lib_qlm, Plm0, H0, cpp_prior,
**kwargs)
print('++ ffs_%s simMF iterator (PBSSIZE %s pbs.size %s) : setup OK' % (self.type, self.PBSSIZE, pbs.size))
self.MFkey = MFkey
self.nsims = nsims
self.same_seeds = kwargs.pop('same_seeds', False)
self.subtract_phi0 = kwargs.pop('subtract_phi0', True)
self.barrier()
def build_pha(self, it):
"""Builds sims for the mean-field evaluation at iter *it*
"""
if self.nsims == 0: return None
phas_pix = ffs_phas.pix_lib_phas(
os.path.join(self.lib_dir, '%s_sky_noise_iter%s' % (self.type, it * (not self.same_seeds))),
len(self.type), self.cov.lib_datalm.shape, nsims_max=self.nsims)
phas_cmb = None # dont need it so far
if self.PBSRANK == 0:
for lib, lab in zip([phas_pix, phas_cmb], ['phas pix', 'phas_cmb']):
if not lib is None and not lib.is_full():
print("++ run iterator regenerating %s phases mf_sims rank %s..." % (lab, self.PBSRANK))
for idx in np.arange(self.nsims): lib.get_sim(idx, phas_only=True)
self.barrier()
return phas_pix, phas_cmb
def calc_gradplikpdet(self, it, key, callback='default_callback'):
"""Caches the det term for iter via MC sims, together with the data one, with MPI maximal //isation.
"""
assert key.lower() in ['p', 'o'], key # potential or curl potential.
fname_detterm = os.path.join(self.lib_dir, 'qlm_grad%sdet_it%03d.npy' % (key.upper(), it - 1))
fname_likterm = os.path.join(self.lib_dir, 'qlm_grad%slik_it%03d.npy' % (key.upper(), it - 1))
if os.path.exists(fname_detterm) and os.path.exists(fname_likterm):
return 0
assert self.is_previous_iter_done(it, key)
pix_pha, cmb_pha = self.build_pha(it)
if self.PBSRANK == 0 and not os.path.exists(os.path.join(self.lib_dir, 'mf_it%03d' % (it - 1))):
os.makedirs(os.path.join(self.lib_dir, 'mf_it%03d' % (it - 1)))
self.barrier()
# Caching gradients for the mc_sims_mf sims , plus the dat map.
# The gradient of the det term is the data averaged lik term, with the opposite sign.
jobs = []
try:
self.load_qlm(fname_likterm)
except:
jobs.append(-1) # data map
for idx in range(self.nsims): # sims
if not os.path.exists(os.path.join(self.lib_dir, 'mf_it%03d/g%s_%04d.npy' % (it - 1, key.lower(), idx))):
jobs.append(idx)
else:
try: # just checking if file is OK.
self.load_qlm(os.path.join(self.lib_dir, 'mf_it%03d/g%s_%04d.npy' % (it - 1, key.lower(), idx)))
except:
jobs.append(idx)
self.opfilt._type = self.type
# By setting the chain outside the main loop we avoid potential MPI barriers
# in degrading the lib_alm libraries:
mchain = multigrid.multigrid_chain(self.opfilt, self.type, self.chain_descr, self.cov,
no_deglensing=self.nodeglensing)
for i in range(self.PBSRANK, len(jobs), self.PBSSIZE):
idx = jobs[i]
print("rank %s, doing mc det. gradients idx %s, job %s in %s at iter level %s:" \
% (self.PBSRANK, idx, i, len(jobs), it))
ti = time.time()
if idx >= 0: # sim
grad_fname = os.path.join(self.lib_dir, 'mf_it%03d/g%s_%04d.npy' % (it - 1, key.lower(), idx))
self.cov.set_ffi(self._load_f(it - 1, key), self._load_finv(it - 1, key))
MFest = ql.MFestimator(self.cov, self.opfilt, mchain, self.lib_qlm,
pix_pha=pix_pha, cmb_pha=cmb_pha, use_Pool=self.use_Pool)
grad = MFest.get_MFqlms(self.type, self.MFkey, idx)[{'p': 0, 'o': 1}[key.lower()]]
if self.subtract_phi0:
isofilt = self.cov.turn2isofilt()
chain_descr_iso = chain_samples.get_isomgchain(
self.cov.lib_skyalm.ellmax, self.cov.lib_datalm.shape, iter_max=self.maxiter)
mchain_iso = multigrid.multigrid_chain(
self.opfilt, self.type, chain_descr_iso, isofilt, no_deglensing=self.nodeglensing)
MFest = ql.MFestimator(isofilt, self.opfilt, mchain_iso, self.lib_qlm,
pix_pha=pix_pha, cmb_pha=cmb_pha, use_Pool=self.use_Pool)
grad -= MFest.get_MFqlms(self.type, self.MFkey, idx)[{'p': 0, 'o': 1}[key.lower()]]
self.cache_qlm(grad_fname, grad, pbs_rank=self.PBSRANK)
else:
# This is the data.
# FIXME : The solution input is not working properly sometimes. We give it up for now.
# FIXME don't manage to find the right d0 to input for a given sol ?!!
self.cov.set_ffi(self._load_f(it - 1, key), self._load_finv(it - 1, key))
soltn = self.load_soltn(it, key).copy() * self.soltn_cond
mchain.solve(soltn, self.get_datmaps(), finiop='MLIK')
self._cache_tebwf(soltn, it - 1, key)
TQUMlik = self.opfilt.soltn2TQUMlik(soltn, self.cov)
ResTQUMlik = self._mlik2rest_tqumlik(TQUMlik, it, key)
grad = - ql.get_qlms_wl(self.type, self.cov.lib_skyalm, TQUMlik, ResTQUMlik, self.lib_qlm,
use_Pool=self.use_Pool, f=self._load_f(it - 1, key))[
{'p': 0, 'o': 1}[key.lower()]]
self.cache_qlm(fname_likterm, grad, pbs_rank=self.PBSRANK)
print("%s it. %s sim %s, rank %s cg status " % (key.lower(), it, idx, self.PBSRANK))
# It does not help to cache both grad_O and grad_P as they do not follow the trajectory in plm space.
# Saves some info about current iteration :
if idx == -1: # Saves some info about iteration times etc.
with open(os.path.join(self.lib_dir, 'cghistories','history_dat.txt'), 'a') as file:
file.write('%04d %.3f \n' % (it, time.time() - ti))
file.close()
else:
with open(os.path.join(self.lib_dir, 'cghistories', 'history_sim%04d.txt' % idx), 'a') as file:
file.write('%04d %.3f \n' % (it, time.time() - ti))
file.close()
self.barrier()
if self.PBSRANK == 0:
# Collecting terms and caching det term.
# We also cache arrays formed from independent sims for tests.
print("rank 0, collecting mc det. %s gradients :" % key.lower())
det_term = np.zeros(self.lib_qlm.alm_size, dtype=complex)
for i in range(self.nsims):
fname = os.path.join(self.lib_dir, 'mf_it%03d'%(it -1),'g%s_%04d.npy'%(key.lower(), i))
det_term = (det_term * i + self.load_qlm(fname)) / (i + 1.)
self.cache_qlm(fname_detterm, det_term, pbs_rank=0)
det_term *= 0.
fname_detterm1 = fname_detterm.replace('.npy', 'MF1.npy')
assert 'MF1' in fname_detterm1
for i in np.arange(self.nsims)[0::2]:
fname = os.path.join(self.lib_dir, 'mf_it%03d'%(it - 1),'g%s_%04d.npy'%(key.lower(), i))
det_term = (det_term * i + self.load_qlm(fname)) / (i + 1.)
self.cache_qlm(fname_detterm1, det_term, pbs_rank=0)
det_term *= 0.
fname_detterm2 = fname_detterm.replace('.npy', 'MF2.npy')
assert 'MF2' in fname_detterm2
for i in np.arange(self.nsims)[1::2]:
fname = os.path.join(self.lib_dir, 'mf_it%03d'%(it - 1),'g%s_%04d.npy'%(key.lower(), i))
det_term = (det_term * i + self.load_qlm(fname)) / (i + 1.)
self.cache_qlm(fname_detterm2, det_term, pbs_rank=0)
# Erase some temp files if requested to do so :
if self.tidy > 1:
# We erase as well the gradient determinant term that were stored on disk :
files_to_remove = \
[os.path.join(self.lib_dir, 'mf_it%03d'%(it -1), 'g%s_%04d.npy'%(key.lower(), i)) for i in range(self.nsims)]
print('rank %s removing %s maps in ' % (
self.PBSRANK, len(files_to_remove)), os.path.join(self.lib_dir, 'mf_it%03d'%(it - 1)))
for file in files_to_remove: os.remove(file)
self.barrier()
|
import os
import logging
import subprocess
import sys
from theano.configparser import (
AddConfigVar, BoolParam, ConfigParam, EnumStr, IntParam, FloatParam,
StrParam, TheanoConfigParser)
_logger = logging.getLogger('theano.configdefaults')
config = TheanoConfigParser()
AddConfigVar('floatX',
"Default floating-point precision for python casts",
EnumStr('float64', 'float32'),
)
AddConfigVar('cast_policy',
"Rules for implicit type casting",
EnumStr('custom', 'numpy+floatX',
# The 'numpy' policy was originally planned to provide a smooth
# transition from numpy. It was meant to behave the same as
# numpy+floatX, but keeping float64 when numpy would. However
# the current implementation of some cast mechanisms makes it
# a bit more complex to add than what was expected, so it is
# currently not available.
#numpy,
),
)
# python 2.* define int / int to return int and int // int to return int.
# python 3* define int / int to return float and int // int to return int.
# numpy 1.6.1 behaves as python 2.*. I think we should not change it faster
# than numpy. When we will do the transition, we should create an int_warn
# and floatX_warn option.
AddConfigVar('int_division',
"What to do when one computes x / y, where both x and y are of "
"integer types",
EnumStr('int', 'raise', 'floatX'),
in_c_key=False)
#gpu mean let the driver select the gpu. Needed in case of gpu in exclusive mode.
#gpuX mean use the gpu number X.
AddConfigVar('device',
"Default device for computations. If gpu*, change the default to try to move computation to it and to put shared variable of float32 on it.",
EnumStr('cpu', 'gpu',
'gpu0', 'gpu1', 'gpu2', 'gpu3',
'gpu4', 'gpu5', 'gpu6', 'gpu7',
'gpu8', 'gpu9', 'gpu10', 'gpu11',
'gpu12', 'gpu13', 'gpu14', 'gpu15',
allow_override=False),
in_c_key=False,
)
AddConfigVar('init_gpu_device',
("Initialize the gpu device to use, works only if device=cpu. "
"Unlike 'device', setting this option will NOT move computations, "
"nor shared variables, to the specified GPU. "
"It can be used to run GPU-specific tests on a particular GPU."),
EnumStr('', 'gpu',
'gpu0', 'gpu1', 'gpu2', 'gpu3',
'gpu4', 'gpu5', 'gpu6', 'gpu7',
'gpu8', 'gpu9', 'gpu10', 'gpu11',
'gpu12', 'gpu13', 'gpu14', 'gpu15',
allow_override=False),
in_c_key=False)
AddConfigVar('force_device',
"Raise an error if we can't use the specified device",
BoolParam(False, allow_override=False),
in_c_key=False)
# Do not add FAST_RUN_NOGC to this list (nor any other ALL CAPS shortcut).
# The way to get FAST_RUN_NOGC is with the flag 'linker=c|py_nogc'.
# The old all capital letter way of working is deprecated as it is not
# scalable.
# Also, please be careful not to modify the first item in the enum when adding
# new modes, since it is the default mode.
AddConfigVar('mode',
"Default compilation mode",
EnumStr('Mode', 'ProfileMode', 'DebugMode', 'FAST_RUN',
'FAST_COMPILE', 'PROFILE_MODE', 'DEBUG_MODE'),
in_c_key=False)
# Test whether or not gcc is present: disable C code if it is not.
# Using the dummy file descriptor below is a workaround for a crash experienced
# in an unusual Python 2.4.4 Windows environment with the default stdin=None.
dummy_stdin = open(os.devnull)
try:
subprocess.Popen('gcc', stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=dummy_stdin.fileno())
# Keep the default linker the same as the one for the mode FAST_RUN
AddConfigVar('linker',
"Default linker used if the theano flags mode is Mode or ProfileMode",
EnumStr('c|py', 'py', 'c', 'c|py_nogc', 'c&py',
'vm', 'cvm', 'vm_nogc', 'cvm_nogc'),
in_c_key=False)
except OSError:
# gcc is not present, linker should default to python only
AddConfigVar('linker',
"Default linker used if the theano flags mode is Mode or ProfileMode",
EnumStr('py', 'c|py', 'c', 'c|py_nogc', 'c&py',
'vm', 'cvm', 'vm_nogc', 'cvm_nogc'),
in_c_key=False)
_logger.warning('GCC not detected ! Theano will be unable to execute '
'optimized C-implementations (for both CPU and GPU) and will '
'default to Python implementations. Performance will be severely '
'degraded.')
del dummy_stdin
#Keep the default optimizer the same as the one for the mode FAST_RUN
AddConfigVar('optimizer',
"Default optimizer. If not None, will use this linker with the Mode object(not ProfileMode or DebugMode)",
EnumStr('fast_run', 'merge', 'fast_compile', 'None'),
in_c_key=False)
AddConfigVar('on_opt_error',
"What to do when an optimization crashes: warn and skip it, or raise the exception",
EnumStr('warn', 'raise'),
in_c_key=False)
def safe_no_home(home):
"""
Make sure the user is not attempting to use `config.home`.
This config option was removed in Thenao 0.5 since it was redundant with
`config.base_compiledir`. This filter function ensures people who were
setting the location of their compilation directory through `config.home`
switch to `config.basecompiledir` instead, by raising an error when
`config.home` is used.
"""
if home:
raise RuntimeError(
'The `config.home` option has been removed and should not be '
'used anymore. Please set the `config.base_compiledir` option '
'instead (for instance to: %s)' %
os.path.join(home, '.theano'))
return True
AddConfigVar('home',
"This config option was removed in 0.5: do not use it!",
ConfigParam('', allow_override=False, filter=safe_no_home),
in_c_key=False)
AddConfigVar('nocleanup',
"Suppress the deletion of code files that did not compile cleanly",
BoolParam(False),
in_c_key=False)
# This flag is used when we import Theano to initialize global variables.
# So changing it after import will not modify these global variables.
# This could be done differently... but for now we simply prevent it from being
# changed at runtime.
AddConfigVar('tensor.cmp_sloppy',
"Relax tensor._allclose (0) not at all, (1) a bit, (2) more",
IntParam(0, lambda i: i in (0,1,2), allow_override=False),
in_c_key=False)
AddConfigVar('tensor.local_elemwise_fusion',
"Enable or not in fast_run mode(fast_run optimization) the elemwise fusion optimization",
BoolParam(True),
in_c_key=False)
AddConfigVar('gpu.local_elemwise_fusion',
"Enable or not in fast_run mode(fast_run optimization) the gpu elemwise fusion optimization",
BoolParam(True),
in_c_key=False)
#http://developer.amd.com/CPU/LIBRARIES/LIBM/Pages/default.aspx
AddConfigVar('lib.amdlibm',
"Use amd's amdlibm numerical library",
BoolParam(False))
AddConfigVar('op.set_flops',
"currently used only in ConvOp. The profile mode will print the flops/s for the op.",
BoolParam(False),
in_c_key=False)
AddConfigVar('gpuelemwise.sync',
"when true, wait that the gpu fct finished and check it error code.",
BoolParam(True))
AddConfigVar('traceback.limit',
"The number of stack to trace. -1 mean all.",
IntParam(5),
in_c_key=False)
AddConfigVar('experimental.mrg',
"Another random number generator that work on the gpu",
BoolParam(False))
AddConfigVar('numpy.seterr_all',
("Sets numpy's behaviour for floating-point errors, ",
"see numpy.seterr. "
"'None' means not to change numpy's default, which can be "
"different for different numpy releases. "
"This flag sets the default behaviour for all kinds of floating-"
"point errors, its effect can be overriden for specific errors "
"by the following flags: seterr_divide, seterr_over, "
"seterr_under and seterr_invalid."),
EnumStr('ignore', 'warn', 'raise', 'call', 'print', 'log', 'None',
allow_override=False),
in_c_key=False)
AddConfigVar('numpy.seterr_divide',
("Sets numpy's behavior for division by zero, see numpy.seterr. "
"'None' means using the default, defined by numpy.seterr_all."),
EnumStr('None', 'ignore', 'warn', 'raise', 'call', 'print', 'log',
allow_override=False),
in_c_key=False)
AddConfigVar('numpy.seterr_over',
("Sets numpy's behavior for floating-point overflow, "
"see numpy.seterr. "
"'None' means using the default, defined by numpy.seterr_all."),
EnumStr('None', 'ignore', 'warn', 'raise', 'call', 'print', 'log',
allow_override=False),
in_c_key=False)
AddConfigVar('numpy.seterr_under',
("Sets numpy's behavior for floating-point underflow, "
"see numpy.seterr. "
"'None' means using the default, defined by numpy.seterr_all."),
EnumStr('None', 'ignore', 'warn', 'raise', 'call', 'print', 'log',
allow_override=False),
in_c_key=False)
AddConfigVar('numpy.seterr_invalid',
("Sets numpy's behavior for invalid floating-point operation, "
"see numpy.seterr. "
"'None' means using the default, defined by numpy.seterr_all."),
EnumStr('None', 'ignore', 'warn', 'raise', 'call', 'print', 'log',
allow_override=False),
in_c_key=False)
###
### To disable some warning about old bug that are fixed now.
###
AddConfigVar('warn.ignore_bug_before',
"If 'None', we warn about all Theano bugs found by default. If 'all', we don't warn about Theano bugs found by default. If a version, we print only the warnings relative to Theano bugs found after that version. Warning for specific bugs can be configured with specific [warn] flags.",
EnumStr('None', 'all', '0.3','0.4', '0.4.1', '0.5', allow_override=False),
in_c_key=False)
def warn_default(version):
"""
Return True iff we should warn about bugs fixed after a given version.
"""
if config.warn.ignore_bug_before == 'None':
return True
if config.warn.ignore_bug_before == 'all':
return False
if config.warn.ignore_bug_before >= version:
return False
return True
AddConfigVar('warn.argmax_pushdown_bug',
"Warn if in past version of Theano we generated a bug with the theano.tensor.nnet.nnet.local_argmax_pushdown optimization. Was fixed 27 may 2010",
BoolParam(warn_default('0.3')),
in_c_key=False)
AddConfigVar('warn.gpusum_01_011_0111_bug',
"Warn if we are in a case where old version of Theano had a silent bug with GpuSum pattern 01,011 and 0111 when the first dimensions was bigger then 4096. Was fixed 31 may 2010",
BoolParam(warn_default('0.3')),
in_c_key=False)
AddConfigVar('warn.sum_sum_bug',
"Warn if we are in a case where Theano version between version 9923a40c7b7a and the 2 august 2010(fixed date), generated an error in that case. This happen when their is 2 consecutive sum in the graph, bad code was generated. Was fixed 2 August 2010",
BoolParam(warn_default('0.3')),
in_c_key=False)
AddConfigVar('warn.sum_div_dimshuffle_bug',
"Warn if previous versions of Theano (between rev. 3bd9b789f5e8, 2010-06-16, and cfc6322e5ad4, 2010-08-03) would have given incorrect result. This bug was triggered by sum of division of dimshuffled tensors.",
BoolParam(warn_default('0.3')),
in_c_key=False)
AddConfigVar('compute_test_value',
"If 'True', Theano will run each op at graph build time, using Constants, SharedVariables and the tag 'test_value' as inputs to the function. This helps the user track down problems in the graph before it gets optimized.",
EnumStr('off', 'ignore', 'warn', 'raise'),
in_c_key=False)
"""Note to developers:
Generally your exceptions should use an apply node's __str__
method when exception_verbosity == 'low'. When exception_verbosity
== 'high', you should include a call to printing.min_informative_str
on all important apply nodes.
"""
AddConfigVar('exception_verbosity',
"If 'low', the text of exceptions will generally refer " \
+ "to apply nodes with short names such as " \
+ "Elemwise{add_no_inplace}. If 'high', some exceptions " \
+ "will also refer to apply nodes with long descriptions " \
+ """ like:
A. Elemwise{add_no_inplace}
B. log_likelihood_v_given_h
C. log_likelihood_h""",
EnumStr('low','high'),
in_c_key=False)
|
<filename>wirelesscomms/plots.py
#!/usr/bin/env python
"""A module with plotting tools for signal visualization."""
from typing import Tuple
import numpy as np
from numpy.fft import fftshift, fft, fftfreq
from matplotlib import pyplot as plt
def frequency_units(data: np.ndarray) -> Tuple[str, float]:
"""Given a dataset, return the most appropriate frequency unit
to use for plotting.
Args:
data (np.ndarray): Dataset to analyze
Returns:
Tuple[str, float]: (unit_str, scale_factor)
"""
max_val = np.amax(np.abs(data))
if max_val > 10**12:
ret = ('THz', 10**12)
elif max_val > 10**9:
ret = ('GHz', 10**9)
elif max_val > 10**6:
ret = ('MHz', 10**6)
elif max_val > 10**3:
ret = ('kHz', 10**3)
else:
ret = ('Hz', 1)
return ret
def time_units(data: np.ndarray) -> Tuple[str, float]:
"""Given a dataset, return the most appropriate time unit
to use for plotting.
Args:
data (np.ndarray): Dataset to analyze
Returns:
Tuple[str, float]: (unit_str, scale_factor)
"""
max_val = np.amax(np.abs(data))
if max_val > 1:
ret = ('s', 1)
elif max_val > 1e-3:
ret = ('ms', 1e-3)
elif max_val > 1e-6:
ret = ('us', 1e-6)
elif max_val > 1e-9:
ret = ('ns', 1e-9)
elif max_val > 1e-12:
ret = ('ps', 1e-12)
else:
ret = ('fs', 1e-15)
return ret
def inst_freq(iq: np.ndarray, fs: float = 1,
grid: bool = False, show: bool = False):
"""Plot instantaneous frequency vs time. If show is False, this function
returns a Line2D object that can be used matplotlib. If show is True,
this function shows the plot and returns None.
Args:
iq (np.ndarray): Complex data to process and plot
fs (float, optional): Sample rate of the data. Defaults to 1.
grid (bool, optional): If True, plot with a grid. Defaults to False.
show (bool, optional): If True, plot will show and this function will
return None. Defaults to False.
Returns:
matplotlib.lines.Line2D: The plot as a Line2D object
"""
f = np.diff(np.unwrap(np.angle(iq))) * fs / (2 * np.pi)
t = np.arange(len(f)) / fs
units_f, scale_f = frequency_units(f)
units_t, scale_t = time_units(t)
p = plt.plot(t / scale_t, f / scale_f)
plt.title('Frequency vs Time')
plt.xlabel(f'Time ({units_t})')
plt.ylabel(f'Frequency ({units_f})')
if grid:
plt.grid()
if show:
plt.show()
else:
return p
def power_spectrum(iq: np.ndarray, fs: float = 1, nfft: int = None,
nci: bool = False, log_scale: bool = True,
normalize: bool = False,
grid: bool = False, show: bool = False):
"""Plot the power spectrum of a complex dataset.
Args:
iq (np.ndarray): Complex data to process and plot.
fs (float, optional): Sample rate of the data. Defaults to 1.
nfft (int, optional): FFT size to use. If not specified it will use
the length of the iq data. Defaults to 0.
nci (bool, optional): If True, non-coherent integrations of size nfft will be used.
Defaults to False.
log_scale (bool, optional): If True, will plot the power spectrum in dB.
Defaults to True.
normalize (bool, optional): If True, normalizes the plot to 1 (or 0dB).
Defaults to False.
grid (bool, optional): If True, plots with a grid. Defaults to False.
show (bool, optional): If True, shows the plot and returns None. Defaults to False.
Returns:
matplotlib.lines.Line2D: The plot as a Line2D object
"""
if not nfft:
nfft = len(iq)
if nci:
nframes = len(iq) // nfft
nsamps = nframes * nfft
x = np.reshape(iq[:nsamps], (nframes, nfft))
X = np.abs(fftshift(fft(x, n=nfft, axis=1) / nfft, axes=1)) ** 2
X = np.sum(X, axis=0)
else:
X = np.abs(fftshift(fft(iq, n=nfft) / nfft))
if normalize:
X = X / np.amax(X)
if log_scale:
X = 10 * np.log10(X)
f = fftshift(fftfreq(nfft, d=1/fs))
units, scale = frequency_units(f)
yunit = ' (dB)' if log_scale else ''
p = plt.plot(f / scale, X)
plt.title('Power Spectrum')
plt.xlabel(f'Frequency {units}')
plt.ylabel('Magnitude' + yunit)
if grid:
plt.grid()
if show:
plt.show()
else:
return p
def time_domain(iq: np.ndarray, fs: float = 1, log_scale: bool = True,
grid: bool = False, show: bool = False):
"""Plot power vs time of an complex signal.
Args:
iq (np.ndarray): Complex data to process and plot.
fs (float, optional): Sample rate of the data. Defaults to 1.
log_scale (bool, optional): If True, show the data on a log scale. Defaults to True.
grid (bool, optional): If True, plot with a grid. Defaults to False.
show (bool, optional): If True, show the plot and return None. Defaults to False.
Returns:
matplotlib.lines.Line2D: The plot as a Line2D object
"""
power = np.abs(iq) ** 2
if log_scale:
power = 10 * np.log10(power)
yunit = ' (dB)' if log_scale else ''
t = np.arange(len(power)) / fs
units, scale = ('samples', 1) if fs == 1 else time_units(t)
p = plt.plot(t / scale, power)
plt.title('Power vs Time')
plt.xlabel(f'Time ({units})')
plt.ylabel('Squared Magnitude' + yunit)
if grid:
plt.grid()
if show:
plt.show()
else:
return p
|
<filename>Albert/models/multi_class_rnn.py
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : multi_class_rnn.py
@Author : Racle
@Version : 1.0
@Desc : None
'''
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers import (
PreTrainedModel,
BertModel,
BertPreTrainedModel,
AlbertModel,
AlbertPreTrainedModel,
XLNetModel,
XLNetPreTrainedModel,
DistilBertConfig,
DistilBertModel,
ElectraForMaskedLM,
ElectraForPreTraining,
RobertaConfig,
RobertaModel,
ElectraConfig,
ElectraModel,
ElectraPreTrainedModel,
)
from transformers.modeling_roberta import RobertaClassificationHead, ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
from transformers.modeling_distilbert import DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP
from transformers.modeling_electra import ELECTRA_PRETRAINED_MODEL_ARCHIVE_MAP
from transformers.modeling_utils import SequenceSummary
from awd_lstm import AWDLSTM
"""
Bert类模型加上LSTM,结合multi_label_linear.py,可以轻松定义任意Bert类模型。
NOTE: 训练Bert类模型加上LSTM,很容易过拟合。因此,一般的方法是,一起fine-tuning到一个还不错的结果,不要求完全收敛。
然后,freezeBert类模型参数,再fine-tuning LSTM,得到最优的结果。LSTM一般采用双向。
"""
class BertRNN(BertPreTrainedModel):
"""bert + lstm, multi class classification."""
def __init__(self,
config,
extra_config,
freez_pretrained=False,
weight=None):
"weight: 各个label样本中正例的比例,len==num_labels"
super(BertRNN, self).__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
if extra_config.use_awdlstm:
self.awd = True
self.lstm = AWDLSTM(config.hidden_size,
extra_config.hidden_size,
n_layers=extra_config.n_layers,
bidirectional=extra_config.bidirectional,
hidden_p=0.2,
input_p=0.6,
weight_p=0.5,)
else:
self.lstm = nn.LSTM(config.hidden_size,
extra_config.hidden_size,
extra_config.n_layers,
bidirectional=extra_config.bidirectional,
batch_first=True,
dropout=extra_config.lstm_dropout)
self.dropout = nn.Dropout(config.dropout)
if extra_config.bidirectional:
param_n = 2
else:
param_n = 1
self.linear = nn.Linear(param_n * extra_config.hidden_size,
self.num_labels)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.num_labels)
self.weight = weight
self.init_weights()
if freez_pretrained:
for param in self.albert.parameters():
param.requires_grad = False
def forward(self,
input_ids,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
labels=None):
# outputs的组成:
# last_hidden_state: Sequence of hidden-states at the output of the last layer of the model.
# (batch_size, sequence_length, hidden_size)
# pooler_output: Last layer hidden-state of the first token of the sequence (classification token)
# processed by a Linear layer and a Tanh activation function.
# hidden_states: one for the output of the embeddings + one for the output of each layer.
# each is (batch_size, sequence_length, hidden_size)
# attentions: Attentions weights after the attention softmax of each layer.
# each is (batch_size, num_heads, sequence_length, sequence_length)
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
# rnn
last_hidden_state = outputs[0]
out, _ = self.lstm(last_hidden_state)
if self.awd:
out = out[0]
out = self.dropout(out[:, -1, :])
out = self.linear(out)
# linear
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
linear_out = self.classifier(pooled_output)
# res
logits = linear_out + out
outputs = (logits, ) + outputs[2:]
if labels is not None:
loss_fct = CrossEntropyLoss(weight=self.weight)
labels = labels.float()
loss = loss_fct(logits.view(-1, self.num_labels),
labels.view(-1, self.num_labels))
outputs = (loss, ) + outputs
# (loss), logits, (hidden_states), (attentions)
return outputs
class AlbertRNN(BertPreTrainedModel):
"""bert + lstm, multi class classification."""
def __init__(self,
config,
extra_config,
freez_pretrained=False,
weight=None):
"weight: 各个label样本中正例的比例,len==num_labels"
super(AlbertRNN, self).__init__(config)
self.num_labels = config.num_labels
self.bert = AlbertModel(config)
if extra_config.use_awdlstm:
self.awd = True
self.lstm = AWDLSTM(config.hidden_size,
extra_config.hidden_size,
n_layers=extra_config.n_layers,
bidirectional=extra_config.bidirectional,
hidden_p=0.2,
input_p=0.6,
weight_p=0.5,)
else:
self.lstm = nn.LSTM(config.hidden_size,
extra_config.hidden_size,
extra_config.n_layers,
bidirectional=extra_config.bidirectional,
batch_first=True,
dropout=extra_config.lstm_dropout)
self.dropout = nn.Dropout(config.dropout)
if extra_config.bidirectional:
param_n = 2
else:
param_n = 1
self.linear = nn.Linear(param_n * extra_config.hidden_size,
self.num_labels)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.num_labels)
self.weight = weight
self.init_weights()
if freez_pretrained:
for param in self.albert.parameters():
param.requires_grad = False
def forward(self,
input_ids,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None):
# outputs的组成:
# last_hidden_state: Sequence of hidden-states at the output of the last layer of the model.
# (batch_size, sequence_length, hidden_size)
# pooler_output: Last layer hidden-state of the first token of the sequence (classification token)
# processed by a Linear layer and a Tanh activation function.
# hidden_states: one for the output of the embeddings + one for the output of each layer.
# each is (batch_size, sequence_length, hidden_size)
# attentions: Attentions weights after the attention softmax of each layer.
# each is (batch_size, num_heads, sequence_length, sequence_length)
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
# rnn
last_hidden_state = outputs[0]
out, _ = self.lstm(last_hidden_state)
if self.awd:
out = out[0]
out = self.dropout(out[:, -1, :])
out = self.linear(out)
# linear
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
linear_out = self.classifier(pooled_output)
# res
logits = linear_out + out
outputs = (logits, ) + outputs[2:]
if labels is not None:
loss_fct = CrossEntropyLoss(weight=self.weight)
labels = labels.float()
loss = loss_fct(logits.view(-1, self.num_labels),
labels.view(-1, self.num_labels))
outputs = (loss, ) + outputs
# (loss), logits, (hidden_states), (attentions)
return outputs
|
<reponame>pradeep66927/filesRecords<filename>looppatter.py<gh_stars>0
#pattern printing
#pattern triangle pattern
# print("hi pradeep")
#Qn-1 ************************************************************
"""I=1 #OUTPUT
while i<=5: #*
j=1 #**
while j<=i: #***
print("*",end=" ") #****
j=j+1 #*****
print()
i=i+1
"""
"""for i in range (1,6):
for j in range(0,i):
print("*",end="")
print()
"""
#Qn-2 ******************************************************
"""i=1 #OUTPUT
while i<=5: #1
j=1 #22
while j<=i: #333
print(i,end=" ") #4444
j=j+1 #55555
print()
i=i+1
"""
"""for i in range (1,6):
for j in range(0,i):
print(i,end="")
print()
"""
#Qn-3
"""i=1 #OUTPUT
while i<=5: #1
j=1 #12
while j<=i: #123
print(j,end=" ") #1234
j=j+1 #12345
print()
i=i+1
"""
'''for i in range (0,7):
for j in range(1,i):
print(j,end="")
print()
'''
# i=1
# while i<=5:
# b=1
# while b<=5-i:
# print(" ",end="")
# b=b+1
# j=1
# while j<=i:
# print("*",end="")
# j=j+1
# print()
# i=i+1
# print('pradeep')
#Qn-4 *******************************************
# k=1
# i=1
# while i<=5:
# b=1
# while b<=5-i:
# print(" ",end="") #not working
# b=b+1
# j=1
# while j<=k:
# print("*",end="")
# j=j+1
# print()
# k=k+1
# #print()
# i=i+1
# #print()
#Qn-5 *****************************************
# strl = input("enter string :")
# length=len(strl)
# for i in range (length):
# for j in range (length-i-1):
# print(" ",end=" ")
# for j in range (i+1):
# print(strl[j],end=" ")
# print()
#output
# p
# p y
# p y t
# p y t h
# p y t h o
# p y t h o n
# strl = input("enter string :")
# length=len(strl)
# for i in range (length):
# for j in range (length-i-1):
# print(" ",end=" ")
# for j in range (i+1):
# print(strl[i],end=" ") #we take i in
# print() #place of j
#output
# p
# y y
# t t t
# h h h h
# o o o o o
# n n n n n n
# strl = input("enter string :")
# length=len(strl)
# for i in range (length):
# for j in range (length-i-1):
# print(" ",end="") #we remove space
# for j in range (i+1): #from(" ") to ("")
# print(strl[i],end=" ")
# print()
# p
# y y
# t t t
# h h h h
# o o o o o
# n n n n n n
for r in range(6):
for c in range(4):
if (c==0 and (c==3)) or (r==0 and (c==1 or c==2)):
print('*',end='')
else:
print(' ',end='')
print()
# # G pattern with not in good way
# for row in range(7):
# for col in range(6):
# if (col==0 or (col==4 and (row!=1 and row!=2)) or (row==0 or row==6) and (col>0 and col<4)) or(row==3 and (col==3 or col==5)):
# print("*",end="")
# else:
# print(end=" ")
# print()
#Qn- **************************************
# for row in range(7):
# for col in range(5):
# if (row in {0,6}) and (col in {1,2,3}):
# print("*",end=" ")
# elif (row in {1,4,5}) and (col in {0,4}):
# print("*",end=" ")
# elif (row==2) and (col==0):
# print("*",end=" ")
# elif (row==3) and (col!=1):
# print("*",end=" ")
# else:
# print(" ",end=" ")
# print()
#OUTPUT
# * * *
# * *
# *
# * * * *
# * *
# * *
# * * *
# 2nd method *******************************
#n=5
#for i in range(n):
#Qn- ***************************************
# s=7
# for r in range(s):
# for c in range(s):
# if (r==c) or (r+c==s-1):
# print("*",end=" ")
# else:
# print(" ",end=" ")
# print()
# #output
# * *
# * *
# * *
# *
# * *
# * *
# * *
#Qn-print z pattern*******************************
# i=1
# j=4
# for row in range(0,6):
# for col in range(0,6):
# if row==0 or row==5:
# print("*",end="")
# elif row==i and col==j:
# print("*",end="")
# i=i+1
# j=j-1
# else:
# print(end=" ")
# print()
# output
# ******
# *
# *
# *
# *
# ******
#2nd method *************************************
# for row in range(0,6):
# for col in range(0,6):
# if (row==0 or row==5) or (row+col==5):
# print("*",end=" ")
# else:
# print(end=" ")
# print()
# 3rd metod **************************************
# s=6
# for row in range(s):
# for col in range(s):
# if (row==0 or row==s-1) or (row+col==s-1):
# print("*",end=" ")
# else:
# print(end=" ")
# print()
# k=1
# i=1
# while i<=5:
# b=1
# while b<=5-i: #remaind
# print(" ",end="")
# b=b+1
# j=1
# while j<=k:
# print("*",end=" ")
# j=j+1
# k=k+1
# print()
# i=i+1
# #output
# *
# * *
# * * *
# * * * *
# * * * * *
# k=1
# i=1
# while i<=5:
# b=1
# while b<=5-i: #remaind
# print(" ",end="")
# b=b+1
# j=1
# while j<=k:
# print("A",end=" ")
# j=j+1
# k=k+1
# print()
# i=i+1
# A
# A A
# A A A
# A A A A
# A A A A A
#UNIVERSAL CODE FOR PYRAMID
# n=int(input("enter your number"))
# k=1
# i=1
# while i<=n:
# b=1
# while b<=n-i: #remaind
# print(" ",end="")
# b=b+1
# j=1
# while j<=k:
# print("A",end=" ")
# j=j+1
# k=k+1
# print()
# i=i+1
#***********************************************
#ALPHABET BY USING ROW COLUMN METHOD
# for r in range(4):
# for c in range(4):
# if (r==0 and (c==0 or c==1 or c==2 or c==3)) or(r==1 and c==2) or (r==2 and c==1) or (r==3 and (c==0 or c==1 or c==2 or c==3)):
# print("*",end=" ")
# else:
# print(" ",end=" ")
# print()
# * * * *
# *
# *
# * * * *
# for r in range(3):
# for c in range(4):
# if (r==0 and (c==0 or c==2)) or (r==c) or (r==2 and (c==0 or c==2)):
# print("*",end=" ")
# else:
# print(" ", end=" ")
# print( )
#print(ord("A"))
# PRINT ALPHABET PATTERN BY USING ASCCI VALUE
# n=int(input("enter your number :"))
# for i in range(n):
# for z in range(1,n-i):
# print(' ',end='')
# for j in range(i+1):
# print(chr(65+i),end=" ")
# print(" ")
# n=int(input("enter your number"))
# k=1
# i=0
# while i<=n:
# b=1
# while b<=n-i: #remaind
# print(" ",end="")
# b=b+1
# j=1
# while j<=k:
# print(chr(65+i),end=" ")
# j=j+1
# k=k+1
# print()
# i=i+1
#SNACK PATTERN *****************************
#n=int(input("enter your number"))
# n=5 #space problem b/w 6 to 1
# k=15 #need to work
# for i in range(1,n+1):
# if i%2!=0:
# for j in range(1,i+1):
# print(k,end= " ")
# k=k-1
# if i%2==0:
# for j in range(k-i+1,k+1,1):
# print(j,end= " ")
# k=k-1
# print( )
# OUTPUT
# 15
# 13 14
# 12 11 10
# 6 7 8 9
# 5 4 3 2 1
# ARMSTRONG NUMBER(153=1^2+5^2+3^2=153 THAT IS ARMS)
# i=int(input("enter number"))
# orgi=i
# sum=0
# while i>0:
# sum=sum+(i%10)*(i%10)*(i%10)
# i=i//10
# if sum==orgi:
# print(orgi,"it is armstrong no")
# else:
# print(orgi,"it is not a armstrong no")
####################################################
#PALINDROME NUMBER
# i=int(input('enter your number : '))
# rev=0
# x=i
# while (i>0):
# rev=(rev*10)+(i%10)
# i=i//10
# if (x==rev):
# print('plindrome no')
# else:
# print('not a plindrome no')
#using for lop its not working need check again
# i=int(input('enter your number : '))
# rev=0
# x=i
# for i in range(i):
# rev=(rev*10)+(i%10)
# i=i//10
# if (x==rev):
# print('plindrome no')
# else:
# print('not a plindrome no')
################################################
#find factorial value of a number
# i=int(input('enter number : '))
# fac=1
# while (i>0):
# fac=fac*i
# i=i-1
# print('factorial = ',fac)
#using for loop factorial qn
# i=int(input('enter number : '))
# fac=1
# for i in range(i,0,-1):
# fac=fac*i
# print('factorial =',fac)
#################################################
#product of each digit of user given
# i=int(input('please enter no : '))
# prod=1
# while (i>0):
# prod=prod*(i%10)
# i=i//10
# print('product of digit of given NUMBER :',prod)
#space for loop using
##################################################
#sum of digit of given number
# i=int(input('enter number :'))
# sum=0
# while (i>0):
# sum=sum+(i%10)
# i=i//10
# print('sum of digit of NUMBER =',sum)
#space for using for loop
#################################################
#sum of square of digit of given number
# i=int(input('enter number : '))
# sum=0
# while (i>0):
# sum=sum+(i%10)*(i%10)
# i=i//10
# print('square of digit of sum = ',sum)
# using for loop
# i=int(input('enter number :'))
# sum=0
# for i in range():
# sum=sum+(i%10)*(i%10)
# i=i//10
# print('sum of digit square= ',sum)
# PROGRESS TRACKING QN ON 07/09/2021
# iterable=[1,2]
# for word in iterable:
# print(word)
# iterable.append(word)
i=0
while i>=(-20):
print(i)
i=i-1
|
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2017-2021 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""This module contains utils class for axes management.
"""
__authors__ = ["<NAME>"]
__license__ = "MIT"
__date__ = "20/11/2018"
import functools
import logging
from contextlib import contextmanager
import weakref
import silx.utils.weakref as silxWeakref
from silx.gui.plot.items.axis import Axis, XAxis, YAxis
from ...qt.inspect import isValid as _isQObjectValid
_logger = logging.getLogger(__name__)
class SyncAxes(object):
"""Synchronize a set of plot axes together.
It is created with the expected axes and starts to synchronize them.
It can be customized to synchronize limits, scale, and direction of axes
together. By default everything is synchronized.
The API :meth:`start` and :meth:`stop` can be used to enable/disable the
synchronization while this object is still alive.
If this object is destroyed the synchronization stop.
.. versionadded:: 0.6
"""
def __init__(self, axes,
syncLimits=True,
syncScale=True,
syncDirection=True,
syncCenter=False,
syncZoom=False,
filterHiddenPlots=False
):
"""
Constructor
:param list(Axis) axes: A list of axes to synchronize together
:param bool syncLimits: Synchronize axes limits
:param bool syncScale: Synchronize axes scale
:param bool syncDirection: Synchronize axes direction
:param bool syncCenter: Synchronize the center of the axes in the center
of the plots
:param bool syncZoom: Synchronize the zoom of the plot
:param bool filterHiddenPlots: True to avoid updating hidden plots.
Default: False.
"""
object.__init__(self)
def implies(x, y): return bool(y ** x)
assert(implies(syncZoom, not syncLimits))
assert(implies(syncCenter, not syncLimits))
assert(implies(syncLimits, not syncCenter))
assert(implies(syncLimits, not syncZoom))
self.__filterHiddenPlots = filterHiddenPlots
self.__locked = False
self.__axisRefs = []
self.__syncLimits = syncLimits
self.__syncScale = syncScale
self.__syncDirection = syncDirection
self.__syncCenter = syncCenter
self.__syncZoom = syncZoom
self.__callbacks = None
self.__lastMainAxis = None
for axis in axes:
self.addAxis(axis)
self.start()
def start(self):
"""Start synchronizing axes together.
The first axis is used as the reference for the first synchronization.
After that, any changes to any axes will be used to synchronize other
axes.
"""
if self.isSynchronizing():
raise RuntimeError("Axes already synchronized")
self.__callbacks = {}
axes = self.__getAxes()
# register callback for further sync
for axis in axes:
self.__connectAxes(axis)
self.synchronize()
def isSynchronizing(self):
"""Returns true if events are connected to the axes to synchronize them
all together
:rtype: bool
"""
return self.__callbacks is not None
def __connectAxes(self, axis):
refAxis = weakref.ref(axis)
callbacks = []
if self.__syncLimits:
# the weakref is needed to be able ignore self references
callback = silxWeakref.WeakMethodProxy(self.__axisLimitsChanged)
callback = functools.partial(callback, refAxis)
sig = axis.sigLimitsChanged
sig.connect(callback)
callbacks.append(("sigLimitsChanged", callback))
elif self.__syncCenter and self.__syncZoom:
# the weakref is needed to be able ignore self references
callback = silxWeakref.WeakMethodProxy(self.__axisCenterAndZoomChanged)
callback = functools.partial(callback, refAxis)
sig = axis.sigLimitsChanged
sig.connect(callback)
callbacks.append(("sigLimitsChanged", callback))
elif self.__syncZoom:
raise NotImplementedError()
elif self.__syncCenter:
# the weakref is needed to be able ignore self references
callback = silxWeakref.WeakMethodProxy(self.__axisCenterChanged)
callback = functools.partial(callback, refAxis)
sig = axis.sigLimitsChanged
sig.connect(callback)
callbacks.append(("sigLimitsChanged", callback))
if self.__syncScale:
# the weakref is needed to be able ignore self references
callback = silxWeakref.WeakMethodProxy(self.__axisScaleChanged)
callback = functools.partial(callback, refAxis)
sig = axis.sigScaleChanged
sig.connect(callback)
callbacks.append(("sigScaleChanged", callback))
if self.__syncDirection:
# the weakref is needed to be able ignore self references
callback = silxWeakref.WeakMethodProxy(self.__axisInvertedChanged)
callback = functools.partial(callback, refAxis)
sig = axis.sigInvertedChanged
sig.connect(callback)
callbacks.append(("sigInvertedChanged", callback))
if self.__filterHiddenPlots:
# the weakref is needed to be able ignore self references
callback = silxWeakref.WeakMethodProxy(self.__axisVisibilityChanged)
callback = functools.partial(callback, refAxis)
plot = axis._getPlot()
plot.sigVisibilityChanged.connect(callback)
callbacks.append(("sigVisibilityChanged", callback))
self.__callbacks[refAxis] = callbacks
def __disconnectAxes(self, axis):
if axis is not None and _isQObjectValid(axis):
ref = weakref.ref(axis)
callbacks = self.__callbacks.pop(ref)
for sigName, callback in callbacks:
if sigName == "sigVisibilityChanged":
obj = axis._getPlot()
else:
obj = axis
if obj is not None:
sig = getattr(obj, sigName)
sig.disconnect(callback)
def addAxis(self, axis):
"""Add a new axes to synchronize.
:param ~silx.gui.plot.items.Axis axis: The axis to synchronize
"""
self.__axisRefs.append(weakref.ref(axis))
if self.isSynchronizing():
self.__connectAxes(axis)
# This could be done faster as only this axis have to be fixed
self.synchronize()
def removeAxis(self, axis):
"""Remove an axis from the synchronized axes.
:param ~silx.gui.plot.items.Axis axis: The axis to remove
"""
ref = weakref.ref(axis)
self.__axisRefs.remove(ref)
if self.isSynchronizing():
self.__disconnectAxes(axis)
def synchronize(self, mainAxis=None):
"""Synchronize programatically all the axes.
:param ~silx.gui.plot.items.Axis mainAxis:
The axis to take as reference (Default: the first axis).
"""
# sync the current state
axes = self.__getAxes()
if len(axes) == 0:
return
if mainAxis is None:
mainAxis = axes[0]
refMainAxis = weakref.ref(mainAxis)
if self.__syncLimits:
self.__axisLimitsChanged(refMainAxis, *mainAxis.getLimits())
elif self.__syncCenter and self.__syncZoom:
self.__axisCenterAndZoomChanged(refMainAxis, *mainAxis.getLimits())
elif self.__syncCenter:
self.__axisCenterChanged(refMainAxis, *mainAxis.getLimits())
if self.__syncScale:
self.__axisScaleChanged(refMainAxis, mainAxis.getScale())
if self.__syncDirection:
self.__axisInvertedChanged(refMainAxis, mainAxis.isInverted())
def stop(self):
"""Stop the synchronization of the axes"""
if not self.isSynchronizing():
raise RuntimeError("Axes not synchronized")
for ref in list(self.__callbacks.keys()):
axis = ref()
self.__disconnectAxes(axis)
self.__callbacks = None
def __del__(self):
"""Destructor"""
# clean up references
if self.__callbacks is not None:
self.stop()
def __getAxes(self):
"""Returns list of existing axes.
:rtype: List[Axis]
"""
axes = [ref() for ref in self.__axisRefs]
return [axis for axis in axes if axis is not None]
@contextmanager
def __inhibitSignals(self):
self.__locked = True
yield
self.__locked = False
def __axesToUpdate(self, changedAxis):
for axis in self.__getAxes():
if axis is changedAxis:
continue
if self.__filterHiddenPlots:
plot = axis._getPlot()
if not plot.isVisible():
continue
yield axis
def __axisVisibilityChanged(self, changedAxis, isVisible):
if not isVisible:
return
if self.__locked:
return
changedAxis = changedAxis()
if self.__lastMainAxis is None:
self.__lastMainAxis = self.__axisRefs[0]
mainAxis = self.__lastMainAxis
mainAxis = mainAxis()
self.synchronize(mainAxis=mainAxis)
# force back the main axis
self.__lastMainAxis = weakref.ref(mainAxis)
def __getAxesCenter(self, axis, vmin, vmax):
"""Returns the value displayed in the center of this axis range.
:rtype: float
"""
scale = axis.getScale()
if scale == Axis.LINEAR:
center = (vmin + vmax) * 0.5
else:
raise NotImplementedError("Log scale not implemented")
return center
def __getRangeInPixel(self, axis):
"""Returns the size of the axis in pixel"""
bounds = axis._getPlot().getPlotBoundsInPixels()
# bounds: left, top, width, height
if isinstance(axis, XAxis):
return bounds[2]
elif isinstance(axis, YAxis):
return bounds[3]
else:
assert(False)
def __getLimitsFromCenter(self, axis, pos, pixelSize=None):
"""Returns the limits to apply to this axis to move the `pos` into the
center of this axis.
:param Axis axis:
:param float pos: Position in the center of the computed limits
:param Union[None,float] pixelSize: Pixel size to apply to compute the
limits. If `None` the current pixel size is applyed.
"""
scale = axis.getScale()
if scale == Axis.LINEAR:
if pixelSize is None:
# Use the current pixel size of the axis
limits = axis.getLimits()
valueRange = limits[0] - limits[1]
a = pos - valueRange * 0.5
b = pos + valueRange * 0.5
else:
pixelRange = self.__getRangeInPixel(axis)
a = pos - pixelRange * 0.5 * pixelSize
b = pos + pixelRange * 0.5 * pixelSize
else:
raise NotImplementedError("Log scale not implemented")
if a > b:
return b, a
return a, b
def __axisLimitsChanged(self, changedAxis, vmin, vmax):
if self.__locked:
return
self.__lastMainAxis = changedAxis
changedAxis = changedAxis()
with self.__inhibitSignals():
for axis in self.__axesToUpdate(changedAxis):
axis.setLimits(vmin, vmax)
def __axisCenterAndZoomChanged(self, changedAxis, vmin, vmax):
if self.__locked:
return
self.__lastMainAxis = changedAxis
changedAxis = changedAxis()
with self.__inhibitSignals():
center = self.__getAxesCenter(changedAxis, vmin, vmax)
pixelRange = self.__getRangeInPixel(changedAxis)
if pixelRange == 0:
return
pixelSize = (vmax - vmin) / pixelRange
for axis in self.__axesToUpdate(changedAxis):
vmin, vmax = self.__getLimitsFromCenter(axis, center, pixelSize)
axis.setLimits(vmin, vmax)
def __axisCenterChanged(self, changedAxis, vmin, vmax):
if self.__locked:
return
self.__lastMainAxis = changedAxis
changedAxis = changedAxis()
with self.__inhibitSignals():
center = self.__getAxesCenter(changedAxis, vmin, vmax)
for axis in self.__axesToUpdate(changedAxis):
vmin, vmax = self.__getLimitsFromCenter(axis, center)
axis.setLimits(vmin, vmax)
def __axisScaleChanged(self, changedAxis, scale):
if self.__locked:
return
self.__lastMainAxis = changedAxis
changedAxis = changedAxis()
with self.__inhibitSignals():
for axis in self.__axesToUpdate(changedAxis):
axis.setScale(scale)
def __axisInvertedChanged(self, changedAxis, isInverted):
if self.__locked:
return
self.__lastMainAxis = changedAxis
changedAxis = changedAxis()
with self.__inhibitSignals():
for axis in self.__axesToUpdate(changedAxis):
axis.setInverted(isInverted)
|
<filename>yt/frontends/gadget/io.py
"""
Gadget data-file handling functions
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Copyright (c) 2013, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import numpy as np
import os
from yt.extern.six import string_types
from yt.utilities.io_handler import \
BaseIOHandler
from yt.utilities.lib.geometry_utils import \
compute_morton
from yt.utilities.logger import ytLogger as mylog
from yt.utilities.on_demand_imports import _h5py as h5py
from .definitions import \
gadget_hdf5_ptypes, \
SNAP_FORMAT_2_OFFSET
class IOHandlerGadgetHDF5(BaseIOHandler):
_dataset_type = "gadget_hdf5"
_vector_fields = ("Coordinates", "Velocity", "Velocities")
_known_ptypes = gadget_hdf5_ptypes
_var_mass = None
_element_names = ('Hydrogen', 'Helium', 'Carbon', 'Nitrogen', 'Oxygen',
'Neon', 'Magnesium', 'Silicon', 'Iron')
@property
def var_mass(self):
if self._var_mass is None:
vm = []
for i, v in enumerate(self.ds["Massarr"]):
if v == 0:
vm.append(self._known_ptypes[i])
self._var_mass = tuple(vm)
return self._var_mass
def _read_fluid_selection(self, chunks, selector, fields, size):
raise NotImplementedError
def _read_particle_coords(self, chunks, ptf):
# This will read chunks and yield the results.
chunks = list(chunks)
data_files = set([])
for chunk in chunks:
for obj in chunk.objs:
data_files.update(obj.data_files)
for data_file in sorted(data_files, key=lambda x: x.filename):
f = h5py.File(data_file.filename, "r")
# This double-reads
for ptype, field_list in sorted(ptf.items()):
if data_file.total_particles[ptype] == 0:
continue
x = f["/%s/Coordinates" % ptype][:, 0].astype("float64")
y = f["/%s/Coordinates" % ptype][:, 1].astype("float64")
z = f["/%s/Coordinates" % ptype][:, 2].astype("float64")
yield ptype, (x, y, z)
f.close()
def _read_particle_fields(self, chunks, ptf, selector):
# Now we have all the sizes, and we can allocate
data_files = set([])
for chunk in chunks:
for obj in chunk.objs:
data_files.update(obj.data_files)
for data_file in sorted(data_files, key=lambda x: x.filename):
f = h5py.File(data_file.filename, "r")
for ptype, field_list in sorted(ptf.items()):
if data_file.total_particles[ptype] == 0:
continue
g = f["/%s" % ptype]
coords = g["Coordinates"][:].astype("float64")
mask = selector.select_points(
coords[:, 0], coords[:, 1], coords[:, 2], 0.0)
del coords
if mask is None:
continue
for field in field_list:
if field in ("Mass", "Masses") and \
ptype not in self.var_mass:
data = np.empty(mask.sum(), dtype="float64")
ind = self._known_ptypes.index(ptype)
data[:] = self.ds["Massarr"][ind]
elif field in self._element_names:
rfield = 'ElementAbundance/' + field
data = g[rfield][:][mask, ...]
elif field.startswith("Metallicity_"):
col = int(field.rsplit("_", 1)[-1])
data = g["Metallicity"][:, col][mask]
elif field.startswith("Chemistry_"):
col = int(field.rsplit("_", 1)[-1])
data = g["ChemistryAbundances"][:, col][mask]
else:
data = g[field][:][mask, ...]
yield (ptype, field), data
f.close()
def _initialize_index(self, data_file, regions):
index_ptype = self.index_ptype
f = h5py.File(data_file.filename, "r")
if index_ptype == "all":
pcount = f["/Header"].attrs["NumPart_ThisFile"][:].sum()
keys = f.keys()
else:
pt = int(index_ptype[-1])
pcount = f["/Header"].attrs["NumPart_ThisFile"][pt]
keys = [index_ptype]
morton = np.empty(pcount, dtype='uint64')
ind = 0
for key in keys:
if not key.startswith("PartType"):
continue
if "Coordinates" not in f[key]:
continue
ds = f[key]["Coordinates"]
dt = ds.dtype.newbyteorder("N") # Native
pos = np.empty(ds.shape, dtype=dt)
pos[:] = ds
regions.add_data_file(pos, data_file.file_id,
data_file.ds.filter_bbox)
morton[ind:ind + pos.shape[0]] = compute_morton(
pos[:, 0], pos[:, 1], pos[:, 2],
data_file.ds.domain_left_edge,
data_file.ds.domain_right_edge,
data_file.ds.filter_bbox)
ind += pos.shape[0]
f.close()
return morton
def _count_particles(self, data_file):
f = h5py.File(data_file.filename, "r")
pcount = f["/Header"].attrs["NumPart_ThisFile"][:]
f.close()
npart = dict(("PartType%s" % (i), v) for i, v in enumerate(pcount))
return npart
def _identify_fields(self, data_file):
f = h5py.File(data_file.filename, "r")
fields = []
cname = self.ds._particle_coordinates_name # Coordinates
mname = self.ds._particle_mass_name # Mass
# loop over all keys in OWLS hdf5 file
#--------------------------------------------------
for key in f.keys():
# only want particle data
#--------------------------------------
if not key.startswith("PartType"):
continue
# particle data group
#--------------------------------------
g = f[key]
if cname not in g:
continue
# note str => not unicode!
ptype = str(key)
if ptype not in self.var_mass:
fields.append((ptype, mname))
# loop over all keys in PartTypeX group
#----------------------------------------
for k in g.keys():
if k == 'ElementAbundance':
gp = g[k]
for j in gp.keys():
kk = j
fields.append((ptype, str(kk)))
elif k == 'Metallicity' and len(g[k].shape) > 1:
# Vector of metallicity
for i in range(g[k].shape[1]):
fields.append((ptype, "Metallicity_%02i" % i))
elif k == "ChemistryAbundances" and len(g[k].shape) > 1:
for i in range(g[k].shape[1]):
fields.append((ptype, "Chemistry_%03i" % i))
else:
kk = k
if not hasattr(g[kk], "shape"):
continue
if len(g[kk].shape) > 1:
self._vector_fields[kk] = g[kk].shape[1]
fields.append((ptype, str(kk)))
f.close()
return fields, {}
ZeroMass = object()
class IOHandlerGadgetBinary(BaseIOHandler):
_dataset_type = "gadget_binary"
_vector_fields = (("Coordinates", 3),
("Velocity", 3),
("Velocities", 3),
("FourMetalFractions", 4))
# Particle types (Table 3 in GADGET-2 user guide)
#
# Blocks in the file:
# HEAD
# POS
# VEL
# ID
# MASS (variable mass only)
# U (gas only)
# RHO (gas only)
# HSML (gas only)
# POT (only if enabled in makefile)
# ACCE (only if enabled in makefile)
# ENDT (only if enabled in makefile)
# TSTP (only if enabled in makefile)
_var_mass = None
_format = None
def __init__(self, ds, *args, **kwargs):
self._vector_fields = dict(self._vector_fields)
self._fields = ds._field_spec
self._ptypes = ds._ptype_spec
self.data_files = set([])
gformat, endianswap = ds._header.gadget_format
# gadget format 1 original, 2 with block name
self._format = gformat
self._endian = endianswap
super(IOHandlerGadgetBinary, self).__init__(ds, *args, **kwargs)
@property
def var_mass(self):
if self._var_mass is None:
vm = []
for i, v in enumerate(self.ds["Massarr"]):
if v == 0:
vm.append(self._ptypes[i])
self._var_mass = tuple(vm)
return self._var_mass
def _read_fluid_selection(self, chunks, selector, fields, size):
raise NotImplementedError
def _read_particle_coords(self, chunks, ptf):
data_files = set([])
for chunk in chunks:
for obj in chunk.objs:
data_files.update(obj.data_files)
for data_file in sorted(data_files):
poff = data_file.field_offsets
tp = data_file.total_particles
f = open(data_file.filename, "rb")
for ptype in ptf:
# This is where we could implement sub-chunking
f.seek(poff[ptype, "Coordinates"], os.SEEK_SET)
pos = self._read_field_from_file(
f, tp[ptype], "Coordinates")
yield ptype, (pos[:, 0], pos[:, 1], pos[:, 2])
f.close()
def _read_particle_fields(self, chunks, ptf, selector):
data_files = set([])
for chunk in chunks:
for obj in chunk.objs:
data_files.update(obj.data_files)
for data_file in sorted(data_files):
poff = data_file.field_offsets
tp = data_file.total_particles
f = open(data_file.filename, "rb")
for ptype, field_list in sorted(ptf.items()):
f.seek(poff[ptype, "Coordinates"], os.SEEK_SET)
pos = self._read_field_from_file(
f, tp[ptype], "Coordinates")
mask = selector.select_points(
pos[:, 0], pos[:, 1], pos[:, 2], 0.0)
del pos
if mask is None:
continue
for field in field_list:
if field == "Mass" and ptype not in self.var_mass:
data = np.empty(mask.sum(), dtype="float64")
m = self.ds.parameters["Massarr"][
self._ptypes.index(ptype)]
data[:] = m
yield (ptype, field), data
continue
f.seek(poff[ptype, field], os.SEEK_SET)
data = self._read_field_from_file(f, tp[ptype], field)
data = data[mask, ...]
yield (ptype, field), data
f.close()
def _read_field_from_file(self, f, count, name):
if count == 0:
return
if name == "ParticleIDs":
dt = self._endian + "u4"
else:
dt = self._endian + self._float_type
dt = np.dtype(dt)
if name in self._vector_fields:
count *= self._vector_fields[name]
arr = np.fromfile(f, dtype=dt, count=count)
# ensure data are in native endianness to avoid errors
# when field data are passed to cython
dt = dt.newbyteorder('N')
arr = arr.astype(dt)
if name in self._vector_fields:
factor = self._vector_fields[name]
arr = arr.reshape((count // factor, factor), order="C")
return arr
def _get_morton_from_position(self, data_file, count, offset_count,
regions, DLE, DRE):
with open(data_file.filename, "rb") as f:
# We add on an additionally 4 for the first record.
f.seek(data_file._position_offset + 4 + offset_count * 12)
# The first total_particles * 3 values are positions
pp = np.fromfile(f, dtype=self._endian + self._float_type,
count=count * 3)
pp.shape = (count, 3)
pp = pp.astype(self._float_type)
regions.add_data_file(pp, data_file.file_id,
data_file.ds.filter_bbox)
morton = compute_morton(pp[:, 0], pp[:, 1], pp[:, 2], DLE, DRE,
data_file.ds.filter_bbox)
return morton
def _initialize_index(self, data_file, regions):
DLE = data_file.ds.domain_left_edge
DRE = data_file.ds.domain_right_edge
self._float_type = data_file.ds._header.float_type
if self.index_ptype == "all":
count = sum(data_file.total_particles.values())
return self._get_morton_from_position(
data_file, count, 0, regions, DLE, DRE)
else:
idpos = self._ptypes.index(self.index_ptype)
count = data_file.total_particles.get(self.index_ptype)
account = [0] + [data_file.total_particles.get(ptype)
for ptype in self._ptypes]
account = np.cumsum(account)
return self._get_morton_from_position(
data_file, account, account[idpos], regions, DLE, DRE)
def _count_particles(self, data_file):
npart = dict((self._ptypes[i], v)
for i, v in enumerate(data_file.header["Npart"]))
return npart
# header is 256, but we have 4 at beginning and end for ints
_field_size = 4
def _calculate_field_offsets(self, field_list, pcount,
offset, file_size=None):
# field_list is (ftype, fname) but the blocks are ordered
# (fname, ftype) in the file.
if self._format == 2:
# Need to subtract offset due to extra header block
pos = offset - SNAP_FORMAT_2_OFFSET
else:
pos = offset
fs = self._field_size
offsets = {}
for field in self._fields:
if not isinstance(field, string_types):
field = field[0]
if not any((ptype, field) in field_list
for ptype in self._ptypes):
continue
if self._format == 2:
pos += 20 # skip block header
elif self._format == 1:
pos += 4
else:
raise RuntimeError(
"incorrect Gadget format %s!" % str(self._format))
any_ptypes = False
for ptype in self._ptypes:
if field == "Mass" and ptype not in self.var_mass:
continue
if (ptype, field) not in field_list:
continue
offsets[(ptype, field)] = pos
any_ptypes = True
if field in self._vector_fields:
pos += self._vector_fields[field] * pcount[ptype] * fs
else:
pos += pcount[ptype] * fs
pos += 4
if not any_ptypes:
pos -= 8
if file_size is not None:
if (file_size != pos) & (self._format == 1): # ignore the rest of format 2
mylog.warning("Your Gadget-2 file may have extra " +
"columns or different precision!" +
" (%s file vs %s computed)",
file_size, pos)
return offsets
def _identify_fields(self, domain):
# We can just look at the particle counts.
field_list = []
tp = domain.total_particles
for i, ptype in enumerate(self._ptypes):
count = tp[ptype]
if count == 0:
continue
m = domain.header["Massarr"][i]
for field in self._fields:
if isinstance(field, tuple):
field, req = field
if req is ZeroMass:
if m > 0.0:
continue
elif isinstance(req, tuple) and ptype in req:
pass
elif req != ptype:
continue
field_list.append((ptype, field))
return field_list, {}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
import sympy
import mpmath
from math import log
from six.moves import range
from mathics.core.util import unicode_superscript
def get_type(value):
if isinstance(value, sympy.Integer):
return 'z'
elif isinstance(value, sympy.Rational):
return 'q'
elif isinstance(value, sympy.Float) or isinstance(value, mpmath.mpf):
return 'f'
elif (isinstance(value, sympy.Expr) and value.is_number and
not value.is_real) or isinstance(value, mpmath.mpc):
return 'c'
else:
return None
def same(v1, v2):
return get_type(v1) == get_type(v2) and v1 == v2
def is_0(value):
return get_type(value) == 'z' and value == 0
def sympy2mpmath(value, prec=None):
if prec is None:
from mathics.builtin.numeric import machine_precision
prec = machine_precision
value = value.n(dps(prec))
if value.is_real:
return mpmath.mpf(value)
elif value.is_number:
return mpmath.mpc(*value.as_real_imag())
else:
return None
class SpecialValueError(Exception):
def __init__(self, name):
self.name = name
def mpmath2sympy(value, prec):
if isinstance(value, mpmath.mpc):
return (sympy.Float(str(value.real), dps(prec)) +
sympy.I * sympy.Float(str(value.imag), dps(prec)))
elif isinstance(value, mpmath.mpf):
if str(value) in ('+inf', '-inf'):
raise SpecialValueError('ComplexInfinity')
return sympy.Float(str(value), dps(prec))
else:
return None
C = log(10, 2) # ~ 3.3219280948873626
def dps(prec):
return max(1, int(round(int(prec) / C - 1)))
def prec(dps):
return max(1, int(round((int(dps) + 1) * C)))
def format_float(value, pretty=True, parenthesize_plus=False):
s = str(value)
s = s.split('e')
if len(s) == 2:
man, exp = s
if pretty:
return '%s\u00d710%s' % (format_float(man), unicode_superscript(exp))
else:
result = '%s*10^%s' % (format_float(man), exp)
if parenthesize_plus:
result = '(%s)' % result
return result
else:
return s[0]
def mul(x, y):
return x * y
def add(x, y):
return x + y
def min_prec(*args):
result = None
for arg in args:
prec = arg.get_precision()
if result is None or (prec is not None and prec < result):
result = prec
return result
def pickle_mp(value):
return (get_type(value), str(value))
def unpickle_mp(value):
type, value = value
if type == 'z':
return sympy.Integer(value)
elif type == 'q':
return sympy.Rational(value)
elif type == 'f':
return sympy.Float(value)
else:
return value
# algorithm based on
# http://stackoverflow.com/questions/5110177/how-to-convert-floating-point-number-to-base-3-in-python # nopep8
def convert_base(x, base, precision=10):
sign = -1 if x < 0 else 1
x *= sign
length_of_int = 0 if x == 0 else int(log(x, base))
iexps = list(range(length_of_int, -1, -1))
import string
digits = string.digits + string.ascii_lowercase
if base > len(digits):
raise ValueError
def convert(x, base, exponents):
out = []
for e in exponents:
d = int(x // (base ** e))
x -= d * (base ** e)
out.append(digits[d])
if x == 0 and e < 0:
break
return out
int_part = convert(int(x), base, iexps)
if sign == -1:
int_part.insert(0, '-')
if (isinstance(x, float)):
fexps = list(range(-1, -int(precision + 1), -1))
real_part = convert(x - int(x), base, fexps)
return "%s.%s" % (''.join(int_part), ''.join(real_part))
else:
return ''.join(int_part)
def convert_int_to_digit_list(x, base):
if x == 0:
return [0]
x = abs(x)
length_of_int = int(log(x, base)) + 1
iexps = list(range(length_of_int, -1, -1))
def convert(x, base, exponents):
out = []
for e in exponents:
d = int(x // (base ** e))
x -= d * (base ** e)
if out or d != 0: # drop any leading zeroes
out.append(d)
if x == 0 and e < 0:
break
return out
return convert(x, base, iexps)
|
<gh_stars>0
# Copyright (C) 2018 Nordstrom, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import boto3
import sys
import json
import os
import argparse
import pprint
from .asg_manager import Asg_Manager
from .ec2_tag_query_manager import EC2_Tag_Query_Manager
from .ip_address_service import Ip_Address_Service
from .constants import IP_ADDRESSES_KEY
from .constants import MESSAGE
TAG_KEY = 'tag_key'
TAG_VALUE = 'tag_value'
ASG_NAME = 'asg_name_value'
class Route_Enrichment_Manager:
"""
This class is used to fetch and append ip addresses to each route diciontary passed in
"""
def __init__(self, ip_address_service, logger):
self.ip_address_service = ip_address_service
self.logger = logger
def populate_tagged_ec2_routes_with_ip_addresses(self, route_parameter_array):
return [self._enrich_tagged_ec2_route(route) for route in route_parameter_array if self._tagged_parameters_valid(route)]
def populate_tagged_asg_routes_with_ip_addresses(self, route_parameter_array):
return [self._enrich_tagged_asg_route(route) for route in route_parameter_array if self._tagged_parameters_valid(route)]
def populate_named_asg_routes_with_ip_addresses(self, route_parameter_array):
return [self._enrich_named_asg_route(route) for route in route_parameter_array if self._named_asg_parameters_valid(route)]
def _tagged_parameters_valid(self, route):
result = route.get(TAG_KEY) != None and \
route.get(TAG_VALUE) != None and \
route.get('path') != None and \
route.get('port_number') != None and \
route.get('tls_enabled') != None
if not result:
if self.logger is not None:
self.logger.info({MESSAGE:'tagged_parameters_not_valid'})
return result
def _named_asg_parameters_valid(self, route):
result = route[ASG_NAME] != None and \
route.get('path') != None and \
route.get('port_number') != None and \
route.get('tls_enabled') != None
if not result:
if self.logger is not None:
self.logger.info({MESSAGE:'named_parameters_not_valid'})
return result
def _enrich_tagged_ec2_route(self, route):
route[IP_ADDRESSES_KEY] = self.ip_address_service.get_running_ip_addresses_by_tag(route[TAG_KEY], route[TAG_VALUE])
return route
def _enrich_tagged_asg_route(self, route):
route[IP_ADDRESSES_KEY] = self.ip_address_service.get_asg_ips_for_asg_tag(route[TAG_KEY], route[TAG_VALUE])
return route
def _enrich_named_asg_route(self, route):
asgnames = [
route[ASG_NAME]
]
route[IP_ADDRESSES_KEY] = self.ip_address_service.get_asg_ips_for_asg_names(asgnames)
return route
#------------------------------
# Built in helpful test drive
#------------------------------
if __name__ == "__main__":
tag_parameter_array = [
{
'tag_key': 'Name',
'tag_value': 'myTag1',
'name': 'route1',
'path': '/path1/',
'client_name': 'client1',
'port_number': 'port1',
'tls_enabled': False
},
{
'tag_key': 'Name',
'tag_value': 'myTag2',
'name': 'route2',
'path': '/path2/',
'client_name': 'client2',
'port_number': 'port2',
'tls_enabled': True
},
{
'tag_key': 'Name',
'tag_value': 'myTag3',
'name': 'route3',
'path': '/path3/',
'client_name': 'client3',
'port_number': 'port3',
'tls_enabled': False
}
]
name_parameter_array = [
{
'asg_name_value': 'myTag4',
'name': 'route1',
'path': '/path1/',
'client_name': 'client1',
'port_number': 'port1',
'tls_enabled': False
},
{
'asg_name_value': 'myTag5',
'name': 'route2',
'path': '/path2/',
'client_name': 'client2',
'port_number': 'port2',
'tls_enabled': True
},
{
'asg_name_value': 'myTag6',
'name': 'route3',
'path': '/path3/',
'client_name': 'client3',
'port_number': 'port3',
'tls_enabled': False
}
]
pp = pprint.PrettyPrinter(indent=4)
session = boto3.Session()
ec2_client = session.client('ec2', region_name='us-west-2')
asg_client = session.client('autoscaling', region_name='us-west-2')
asg_manager = Asg_Manager(asg_client, ec2_client)
ec2_tag_query_manager = EC2_Tag_Query_Manager(ec2_client)
ip_address_service = Ip_Address_Service(asg_manager, ec2_tag_query_manager)
subject = Route_Enrichment_Manager(ip_address_service)
results1 = subject.populate_tagged_ec2_routes_with_ip_addresses(tag_parameter_array)
pp.pprint(results1)
results2 = subject.populate_tagged_asg_routes_with_ip_addresses(tag_parameter_array)
pp.pprint(results2)
results3 = subject.populate_named_asg_routes_with_ip_addresses(name_parameter_array)
pp.pprint(results3)
|
<reponame>amcgrenera-vumc/curation
#!/usr/bin/env python
# coding: utf-8
# # REQUIREMENTS
# - Replace the ```observation_source_value``` and ```observation_source_concept_id``` for all records with
# ```observation_source_value = HealthInsurance_InsuranceTypeUpdate (ID 43528428, from The Basics)``` with the
# ```observation_source_value``` and ```observation_source_concept_ids``` for records with
# ```observation_source_value = Insurance_InsuranceType (ID 1384450, from HCAU)```.
#
#
# - Map the [HCAU] field values to the corresponding [The Basics] fields when replacing.
# If there are no values in the [HCAU] fields, set [The Basics] fields to NULL.
# ## Set up
# Load Libraries
from termcolor import colored
import pandas as pd
PROJECT_ID = ''
DATASET_ID = ''
# read as csv the list of PIDS where data needs to be overwritten
AC70_pids = pd.read_csv("AC70_PIDs.csv")
int(AC70_pids.iloc[1, 0][1:])
# removing the 'p' before the ids
AC70_pids["pid"] = None
for p in range(len(AC70_pids)):
AC70_pids.iloc[p, 1] = int(AC70_pids.iloc[p, 0][1:])
# ## Obtaining dataframes to use in the SQL query
# - ```obs_pids_notin_list``` is a dataframe of person_ids in ```AC70_pids``` that ***are not** in the
# observation table when observation_source_concept_id = 43528428. For these, we will replace the
# corresponding fields in the observation table with NULL--> see below ```update1_observation_table```
#
# - ```obs_pids_in_list``` is a dataframe of person_ids in ```AC70_pids``` that ***are*** in the
# observation table when observation_source_concept_id = 43528428. For these, we will replace the
# corresponding fields in the observation table with hcau fields (observation_source_concept_id = 1384450)
# --> see below ```update2_observation_table```
obs_overwrite = pd.read_gbq('''
SELECT * FROM `{PROJECT_ID}.{DATASET_ID}.observation` o WHERE o.observation_source_concept_id = 43528428
'''.format(PROJECT_ID=PROJECT_ID, DATASET_ID=DATASET_ID),
dialect="standard")
obs_pids_notin_list = [int(x) for x in AC70_pids['pid'] if x not in obs_overwrite['person_id']]
obs_pids_notin_list = tuple(obs_pids_notin_list)
obs_pids_in_list = [int(x) for x in AC70_pids['pid'] if x in obs_overwrite['person_id']]
print(colored("This shows that none of person_ids in [AC70_pids] \n are in the observation table "
"with observation_source_concept_id = 1384450 table).They are not in the hcau table either.", 'green'))
# # THESE ARE THE TWO QUERIES THAT WILL UPDATE THE FIELDS TO HCAU FIELDS-
update1_observation_table = pd.read_gbq('''
UPDATE `{PROJECT_ID}.{DATASET_ID}.observation`
SET observation_id = NULL,
person_id = person_id,
observation_concept_id = NULL,
observation_date = NULL,
observation_datetime = NULL,
observation_type_concept_id = NULL,
value_as_number = NULL,
value_as_string = NULL,
value_as_concept_id = NULL,
qualifier_concept_id = NULL,
unit_concept_id = NULL,
provider_id = NULL,
visit_occurrence_id = NULL,
observation_source_value = NULL,
observation_source_concept_id = NULL,
unit_source_value = NULL,
qualifier_source_value = NULL,
value_source_concept_id = NULL,
value_source_value = NULL,
questionnaire_response_id = NULL
WHERE observation_source_concept_id = 43528428
AND person_id IN {pids}'''.format(PROJECT_ID=PROJECT_ID, DATASET_ID=DATASET_ID, pids=obs_pids_notin_list),
dialect="standard")
# ###
#
# This next query gives this error because [obs_pids_in_list] is empty as said earlier.
#
# This should not be a problem when curation loads the correct list of pids in [AC70_pids <- read.csv("AC70_PIDs.csv")]
#
update2_observation_table = pd.read_gbq('''
WITH hcau as (SELECT * FROM `{PROJECT_ID}.{DATASET_ID}.observation` h WHERE h.observation_source_concept_id = 1384450)
UPDATE `{PROJECT_ID}.{DATASET_ID}.observation` as o
SET o.observation_id = hcau.observation_id,
o.person_id = o.person_id,
o.observation_concept_id = hcau.observation_concept_id,
o.observation_date = hcau.observation_date,
o.observation_datetime = hcau.observation_datetime,
o.observation_type_concept_id = hcau.observation_type_concept_id,
o.value_as_number = hcau.value_as_number,
o.value_as_string = hcau.value_as_string,
o.value_as_concept_id = hcau.value_as_concept_id,
o.qualifier_concept_id = hcau.qualifier_concept_id,
o.unit_concept_id = hcau.unit_concept_id,
o.provider_id = hcau.provider_id,
o.visit_occurrence_id = hcau.visit_occurrence_id,
o.observation_source_value = hcau.observation_source_value,
o.observation_source_concept_id = hcau.observation_source_concept_id,
o.unit_source_value = hcau.unit_source_value,
o.qualifier_source_value = hcau.qualifier_source_value,
o.value_source_concept_id = hcau.value_source_concept_id,
o.value_source_value = hcau.value_source_value,
o.questionnaire_response_id = hcau.questionnaire_response_id
# FROM (SELECT * FROM `{PROJECT_ID}.{DATASET_ID}.observation` h WHERE h.observation_source_concept_id = 1384450) as hcau
WHERE o.observation_source_concept_id = 43528428
AND person_id IN {pids})
'''.format(PROJECT_ID=PROJECT_ID, DATASET_ID=DATASET_ID, pids=obs_pids_notin_list),
dialect="standard")
|
# Filename: normalisation_fuc.py
# Description: normalisation function RIM, OMRI, ISOCOV
# Authors: <NAME>.
from numpy import *
import BWM as bwm
"""
Description :
RIM Normalisation : This normalization has been proposed by Cables and al It is the first normalization approach defined to handle value constraints. This normalization proceeds by dividing the distance between the performance ratings by the distance between the maximum (or the minimum) performance rating and the reference ideal performance rating for that criterion. The reference ideal, given generally as an interval [A, B], represents the value constraints fixed by the user for the criterion .
OMRI Normalisation : Extention from RIM Normalisation.
ISOCOV Normalisation : Another extention from RIM Normalisation with the introduction of the new argument cost-benefit for criterion.
Usage :
rim_normalisation(D, AB)
Arguments :
D : The decision matrix (m x n) with the values of the m alternatives, for the n criterion
AB :A matrix (2 x n). AB[0,:] corresponds with the A extrem, and AB[1,:] represents the B extrem of the domain of each criterion
is_it_benfit_then_it_would_be_cost : boolean matrix (2 x 1) with true for benifit criterion and false if it is a cost criterion
Value :
It returns the new normalized desision matrix
References :
Examples :
"""
def rim_normalisation(D, AB):
D_temp = zeros([D.shape[0], D.shape[1]])
d = bwm.min_max(D)
for j in range(D.shape[1]):
for i in range(D.shape[0]):
if (AB[0, j] <= D[i, j] <= AB[1, j]):
D_temp[i, j] = 1
elif (d[j, 0] <= D[i, j] <= AB[0, j]):
D_temp[i, j] = 1 - (AB[0, j] - D[i, j]) / \
(AB[0, j] - d[j, 0])
else:
D_temp[i, j] = 1 - (D[i, j] - AB[1, j]) / \
(d[j, 0] - AB[1, j])
return D_temp
def omri_normalisation(D, AB):
D_temp = zeros([D.shape[0], D.shape[1]])
d = bwm.min_max(D)
for j in range(D.shape[1]):
for i in range(D.shape[0]):
if (AB[0, j] <= D[i, j] <= AB[1, j]):
D_temp[i, j] = 1
elif (d[j, 1] <= D[i, j] <= AB[0, j]):
D_temp[i, j] = 1 - (AB[0, j] - D[i, j])/max(
AB[0, j] - d[j, 1], d[j, 0] - AB[1, j])
else:
D_temp[i, j] = 1 - (D[i, j] - AB[1, j])/max(
AB[0, j] - d[j, 1], d[j, 0] - AB[1, j])
return D_temp
def isocov_normalisation(D, p, AB):
D_temp = zeros([D.shape[0], D.shape[1]])
d = bwm.min_max(D)
for i in range(D.shape[0]):
for j in range(D.shape[1]):
if p[j] == 'max':
if (AB[0, j] <= D[i, j] <= AB[1, j]):
D_temp[i, j] = 1
elif (d[j, 1] <= D[i, j] <= AB[0, j]):
D_temp[i, j] = 1 - (AB[0, j] - D[i, j]) / \
(max(AB[0, j] - d[j, 1], d[j, 0] - AB[1, j]) + 1)
else:
D_temp[i, j] = 1 - (D[i, j] - AB[1, j]) / \
(max(AB[0, j] - d[j, 1], d[j, 0] - AB[1, j]) + 1)
elif p[j] == 'min':
if (AB[0, j] <= D[i, j] <= AB[1, j]):
D_temp[i, j] = 1 / \
(max(AB[0, j] - d[j, 1], d[j, 0] - AB[1, j]) + 1)
elif (d[j, 1] <= D[i, j] <= AB[0, j]):
D_temp[i, j] = (AB[0, j] - D[i, j]) / \
(max(AB[0, j] - d[j, 1], d[j, 0] - AB[1, j]))
else:
D_temp[i, j] = (D[i, j] - AB[1, j]) / \
(max(AB[0, j] - d[j, 1], d[j, 0] - AB[1, j]))
return D_temp
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import numpy as np
import pandas as pd
from django.db import models
from django.forms import modelformset_factory
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, redirect
from django.template import RequestContext, loader
from django.utils import timezone
from .models import Throw, Player, DailyLoser, Gladiator
class Statistics:
def __init__(self, throw_objects, loser_objects, gladiator_objects):
self.throw_objects = throw_objects
self.loser_objects = loser_objects
self.gladiator_objects = gladiator_objects
self.statisticsdict = {}
self.get_statistics()
def get_statistics(self):
player_dict = {x[0]: x[1] for x in Player.objects.values_list()}
throw_df = pd.DataFrame([x for x in self.throw_objects.values_list()])
throw_df.columns = ['pk', 'name', 'result', 'date', 'round']
throw_df['name'] = throw_df['name'].apply(lambda x: player_dict[x])
loser_df = pd.DataFrame([x for x in self.loser_objects.values_list()]).iloc[:, [2, 3]]
loser_df.columns = ['name', 'round']
loser_df['name'] = loser_df['name'].apply(lambda x: player_dict[x])
gladiator_df = pd.DataFrame([x for x in self.gladiator_objects.values_list()]).iloc[:, [2, 1, 3, 4]]
gladiator_df.columns = ['name', 'date', 'result', 'round']
gladiator_df['name'] = gladiator_df['name'].apply(lambda x: player_dict[x])
throw_df_plays = throw_df.loc[:, ['name', 'result', 'date', 'round']]
plays_total = len(throw_df_plays.groupby(by='date').count())
self.statisticsdict['TotalPlays'] = plays_total
# Plays
throw_df_plays.date = [x.date() for x in throw_df_plays.date]
plays_df = throw_df_plays.groupby(by=['name', 'date']
).sum().groupby(level='name').count().sort_values(by='result', ascending=0)
plays_df.columns = ['rel_plays', 'plays']
plays = plays_df.loc[:, 'plays']
plays_ranks = plays.rank(method='min', ascending=False).astype(int)
self.statisticsdict['plays'] = [x for x in zip(plays_ranks, plays.index, plays)]
# relative plays
plays_df.loc[:, 'rel_plays'] = plays_df.loc[:, 'plays'] / plays_total
rel_plays = plays_df.loc[:, 'rel_plays']
rel_plays_ranks = rel_plays.rank(method='min', ascending=False).astype(int)
self.statisticsdict['relativePlays'] = [x for x in zip(rel_plays_ranks, rel_plays.index, rel_plays)]
# losses
losses = loser_df.groupby(by='name').count().sort_values(by='round', ascending=0).iloc[:, 0]
losses_rank = losses.rank(method='min', ascending=False).astype(int)
self.statisticsdict['losses'] = [x for x in zip(losses_rank, losses.index, losses)]
# relative losses
rel_losses_df = pd.concat([plays_df.plays, losses], axis=1).replace(to_replace=np.nan, value=0)
rel_losses_unsorted = rel_losses_df.loc[:, 'round'] / rel_losses_df.loc[:, 'plays']
rel_losses_sorted = rel_losses_unsorted.sort_values(axis=0, ascending=False)
rel_losses = rel_losses_sorted.round(2)
rel_los_ranks = rel_losses.rank(method='min', ascending=False).astype(int)
self.statisticsdict['relativeLosses'] = [x for x in zip(rel_los_ranks, rel_losses.index, rel_losses)]
# longest pointstreak and losingstreak
pointstreak_df = throw_df_plays.sort_values(by=['name', 'date'])
pointstreak_dict = {}
zerostreak_dict = {}
for player in throw_df_plays.loc[:, 'name'].unique():
player_row = pointstreak_df.loc[:, 'name'] == player
point_str = ''.join(str(x) for x in pointstreak_df.loc[:, 'result'][player_row])
longest_pointstreak = max([len(x) for x in point_str.replace('0', ',').split(',')])
longest_zerostreak = max([len(x) for x in point_str.replace('1', ','
).replace('2', ','
).replace('3', ',').split(',')])
pointstreak_dict[player] = longest_pointstreak
zerostreak_dict[player] = longest_zerostreak
pointstreak_sorted = pd.Series(pointstreak_dict).sort_values(axis=0, ascending=False)
pointstreak_ranks = pointstreak_sorted.rank(method='min', ascending=False).astype(int)
zerostreak_sorted = pd.Series(zerostreak_dict).sort_values(axis=0, ascending=False)
zerostreak_ranks = zerostreak_sorted.rank(method='min', ascending=False).astype(int)
self.statisticsdict['pointstreak'] = [x for x in zip(pointstreak_ranks,
pointstreak_sorted.index, pointstreak_sorted)]
self.statisticsdict['zerostreak'] = [x for x in zip(zerostreak_ranks,
zerostreak_sorted.index, zerostreak_sorted)]
# gladiator
gladiator_points_df = gladiator_df.groupby(by='name').sum().sort_values(by='result', ascending=0)
gladiator_points = gladiator_points_df.loc[:, 'result']
glad_ranks = gladiator_points.rank(method='min', ascending=False).astype(int)
self.statisticsdict['gladiatorPoints'] = [x for x in zip(glad_ranks,
gladiator_points.index, gladiator_points)]
# fluktuationsmonster
throw_df_plays.result = throw_df_plays.loc[:, 'result'].apply(int)
stderivation = throw_df_plays.loc[:, ['name', 'result']].groupby(by='name').std().replace(np.nan,0)
stderivation_sorted = stderivation.sort_values(by='result', ascending=0)
std_ranks = [int(x) for x in stderivation_sorted.rank(method='min', ascending=False).values]
self.statisticsdict['stdev'] = [x for x in zip(std_ranks,
stderivation_sorted.index,
[round(x[0], 2) for x in stderivation_sorted.values])]
# durchschnittspunktzahl
mean = throw_df_plays.loc[:, ['name', 'result']].groupby(by='name').mean()
mean_sorted = mean.sort_values(by='result', ascending=0)
mean_ranks = [int(x) for x in mean_sorted.rank(method='min', ascending=False).values]
self.statisticsdict['mean'] = [x for x in zip(mean_ranks,
mean_sorted.index, [round(x[0], 2) for x in mean_sorted.values])]
# angeber
three_point_df = throw_df_plays.loc[:, ['name', 'result']].copy()
three_point_ix = [i for i, x in enumerate(throw_df_plays.loc[:, 'result']) if x == 3]
three_point_df_n = three_point_df.iloc[three_point_ix, :].groupby(by='name').count()
three_point_df_sorted = three_point_df_n.sort_values(by='result', ascending=0)
if len(three_point_df_n) > 0:
three_point_ranks = [int(x) for x in three_point_df_sorted.rank(method='min', ascending=False).values]
self.statisticsdict['threePoints'] = [x for x in zip(three_point_ranks,
three_point_df_sorted.index,
[int(x) for x in three_point_df_sorted.values])]
# Nachsitzer
second_round = throw_df_plays.loc[:, ['name', 'round']].copy()
second_round_ix = [i for i, x in enumerate(throw_df_plays.loc[:, 'round']) if x == 2]
second_round_df = second_round.iloc[second_round_ix, :].groupby(by='name').count()
second_round_sorted = second_round_df.sort_values(by='round', ascending=0)
ranks = [int(x) for x in second_round_sorted.rank(method='min', ascending=False).values]
self.statisticsdict['secondRound'] = [x for x in zip(ranks,
second_round_sorted.index,
[int(x) for x in second_round_sorted.values])]
# Norm guy by <NAME>
norm_guy = []
norm_names = pd.DataFrame(index=pd.DataFrame(self.statisticsdict['mean']).iloc[:, 1])
stat_keys = list(self.statisticsdict.keys())
stat_keys.remove('TotalPlays')
for key in stat_keys:
temp = pd.DataFrame(self.statisticsdict[key])
temp.columns = ['rank', 'name', 'result']
temp.set_index('name', inplace=True)
temp = norm_names.join(temp)
temp.replace(np.nan, 0, inplace=True)
score = np.sqrt((temp.result - np.median(temp.result)) ** 2)
score_norm = score / score.max()
norm_guy.append(score_norm)
norm_guy_result_df = pd.concat(norm_guy, axis=1).mean(axis=1).sort_values()
norm_guy_result_df = 1 - norm_guy_result_df
norm_guy_ranks = [int(x) for x in norm_guy_result_df.rank(method='min', ascending=False).values]
self.statisticsdict['norm_guy'] = [x for x in zip(norm_guy_ranks,
norm_guy_result_df.index,
[round(x, 2) for x in norm_guy_result_df.values])]
# Get Cake Guy
relevant_categories = list(self.statisticsdict.keys())
relevant_categories.remove('TotalPlays')
relevant_categories.remove('relativePlays')
winner_categorie = relevant_categories[np.random.random_integers(0, len(relevant_categories) - 1)]
categorie_dict = {
'plays': u'Basketballjunkie',
'losses': u'Kaffegott',
'relativeLosses': u'Angestellter des Wasserkochers',
'pointstreak': u'Goldenes Händchen',
'zerostreak': u'Tennisarm',
'stdev': u'Fluktuationsmonster',
'mean': u'Solider Typ',
'gladiatorPoints': u'Gladiator',
'threePoints': u'Angeber',
'secondRound': u'Nachsitzer',
'norm_guy': u'Regular every day normal guy'
}
temp_df = pd.DataFrame(self.statisticsdict[winner_categorie])
temp_df.columns = ['rank', 'name', 'result']
rank1 = sum([x for x in temp_df['rank'] if x == 1])
if rank1 > 1:
cake_baker = temp_df.name[np.random.randint(0, rank1 - 1)]
else:
cake_baker = temp_df.name[0]
self.statisticsdict['mrcake'] = (categorie_dict[winner_categorie], cake_baker)
def game_list(request):
'''creates the view for the list of throws'''
# get throws from last 30 days and group by date and make group count of throws
latest_games = (Throw.objects.filter(event_time__gte=timezone.now().date() - datetime.timedelta(days=30)).
extra(select={'date': 'date(event_time)'}).
values('date').
order_by('-date').
annotate(n=models.Count("pk")))
# is today in selection?
exist_game_today = latest_games.filter(event_time__date=timezone.now().date())
return render(request, 'bb/game_list.html', {'games': latest_games,
'exist_game_today': exist_game_today})
def next_round(request, round_nr):
'''creates the view for all throws in a round'''
# turn to integer as can be passed from template
round_nr = int(round_nr)
# get today's date
eval_day = timezone.now().date()
if round_nr == 1:
# get all currently active players
active_players = Player.objects.filter(is_active=True)
else:
# get players from last round
last_round = Throw.objects.filter(event_time__date=eval_day, round=round_nr - 1)
# get last rounds lowest score
lowest_score = last_round.aggregate(models.Min('result'))['result__min']
# get all player objects by a list of last rounds' players with lowest score
active_players = Player.objects.filter(
id__in=list(last_round.filter(result=lowest_score).values_list('player', flat=True)))
if len(active_players) == 1:
# if only player is left, sync other model entries loser and gladiator
# TODO: remove redundancy
# get last rounds loser
loser_da = DailyLoser(day=eval_day,
loser=active_players[0],
round=round_nr - 1)
loser_da.save()
# get all last rounds' players with not lowest score
glad_players = last_round.exclude(result=lowest_score).values('player', 'result')
for glad_player in glad_players:
gladiator_da = Gladiator(day=eval_day,
gladiator=Player.objects.get(id=glad_player['player']),
points=1 + (glad_player['result'] - 1) * 0.5,
round=round_nr - 1)
gladiator_da.save()
# redirect to index view
return redirect('game_list')
# make factory of modelformsets
# use only field result, scale by length of active players
RoundFormSet = modelformset_factory(Throw, fields=('result',), extra=len(active_players))
if request.method == 'POST':
formset = RoundFormSet(request.POST)
if formset.is_valid():
# save commits to instance
form_entries = formset.save(commit=False)
for form, player in zip(form_entries, active_players):
# now add player and round_nr to instance
form.player = player
form.round = round_nr
form.save()
# redirect to this view reloaded with advanced round number
return redirect('next_round', round_nr + 1)
else:
# populate formset with today's throws for specific round
formset = RoundFormSet(queryset=Throw.objects.filter(event_time__date=eval_day, round=round_nr))
return render(request, 'bb/round_input.html',
{'formset': formset, 'player_list': active_players, 'round_nr': round_nr})
def month_stat(request, year, month=None):
event_time_kwargs = {'event_time__year': year}
day_kwargs = {'day__year': year}
if month:
event_time_kwargs['event_time__month'] = month
day_kwargs['day__month'] = month
monthly_throws = Throw.objects.filter(**event_time_kwargs)
monthly_loser = DailyLoser.objects.filter(**day_kwargs)
monthly_gladiator = Gladiator.objects.filter(**day_kwargs)
if not monthly_loser:
# redirect to index view
return redirect('game_list')
else:
context_dict = Statistics(monthly_throws, monthly_loser, monthly_gladiator).statisticsdict
context_dict['year'] = year
context_dict['month'] = month
return render(request, 'bb/monthly_stats.html', context_dict)
def player_stat(request, player):
player_throws = Throw.objects.filter(player__player_name=player)
player_loser = DailyLoser.objects.filter(loser__player_name=player)
player_gladiator = Gladiator.objects.filter(gladiator__player_name=player)
if len(player_loser) == 0:
return HttpResponseRedirect('/bb/')
else:
context_dict = Statistics(player_throws, player_loser, player_gladiator).statisticsdict
context_dict['player_name'] = player
template = loader.get_template('bb/player_stats.html')
context = RequestContext(request, context_dict)
return HttpResponse(template.render(context))
|
"""Config flow for Logitech Squeezebox integration."""
import asyncio
import logging
from pysqueezebox import Server, async_discover
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
HTTP_UNAUTHORIZED,
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
# pylint: disable=unused-import
from .const import DEFAULT_PORT, DOMAIN
_LOGGER = logging.getLogger(__name__)
TIMEOUT = 5
def _base_schema(discovery_info=None):
"""Generate base schema."""
base_schema = {}
if discovery_info and CONF_HOST in discovery_info:
base_schema.update(
{
vol.Required(
CONF_HOST,
description={"suggested_value": discovery_info[CONF_HOST]},
): str,
}
)
else:
base_schema.update({vol.Required(CONF_HOST): str})
if discovery_info and CONF_PORT in discovery_info:
base_schema.update(
{
vol.Required(
CONF_PORT,
default=DEFAULT_PORT,
description={"suggested_value": discovery_info[CONF_PORT]},
): int,
}
)
else:
base_schema.update({vol.Required(CONF_PORT, default=DEFAULT_PORT): int})
base_schema.update(
{vol.Optional(CONF_USERNAME): str, vol.Optional(CONF_PASSWORD): str}
)
return vol.Schema(base_schema)
class SqueezeboxConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Logitech Squeezebox."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Initialize an instance of the squeezebox config flow."""
self.data_schema = _base_schema()
self.discovery_info = None
async def _discover(self, uuid=None):
"""Discover an unconfigured LMS server."""
self.discovery_info = None
discovery_event = asyncio.Event()
def _discovery_callback(server):
if server.uuid:
# ignore already configured uuids
for entry in self._async_current_entries():
if entry.unique_id == server.uuid:
return
self.discovery_info = {
CONF_HOST: server.host,
CONF_PORT: server.port,
"uuid": server.uuid,
}
_LOGGER.debug("Discovered server: %s", self.discovery_info)
discovery_event.set()
discovery_task = self.hass.async_create_task(
async_discover(_discovery_callback)
)
await discovery_event.wait()
discovery_task.cancel() # stop searching as soon as we find server
# update with suggested values from discovery
self.data_schema = _base_schema(self.discovery_info)
async def _validate_input(self, data):
"""
Validate the user input allows us to connect.
Retrieve unique id and abort if already configured.
"""
server = Server(
async_get_clientsession(self.hass),
data[CONF_HOST],
data[CONF_PORT],
data.get(CONF_USERNAME),
data.get(CONF_PASSWORD),
)
try:
status = await server.async_query("serverstatus")
if not status:
if server.http_status == HTTP_UNAUTHORIZED:
return "invalid_auth"
return "cannot_connect"
except Exception: # pylint: disable=broad-except
return "unknown"
if "uuid" in status:
await self.async_set_unique_id(status["uuid"])
self._abort_if_unique_id_configured()
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
errors = {}
if user_input and CONF_HOST in user_input:
# update with host provided by user
self.data_schema = _base_schema(user_input)
return await self.async_step_edit()
# no host specified, see if we can discover an unconfigured LMS server
try:
await asyncio.wait_for(self._discover(), timeout=TIMEOUT)
return await self.async_step_edit()
except asyncio.TimeoutError:
errors["base"] = "no_server_found"
# display the form
return self.async_show_form(
step_id="user",
data_schema=vol.Schema({vol.Optional(CONF_HOST): str}),
errors=errors,
)
async def async_step_edit(self, user_input=None):
"""Edit a discovered or manually inputted server."""
errors = {}
if user_input:
error = await self._validate_input(user_input)
if not error:
return self.async_create_entry(
title=user_input[CONF_HOST], data=user_input
)
errors["base"] = error
return self.async_show_form(
step_id="edit", data_schema=self.data_schema, errors=errors
)
async def async_step_import(self, config):
"""Import a config flow from configuration."""
error = await self._validate_input(config)
if error:
return self.async_abort(reason=error)
return self.async_create_entry(title=config[CONF_HOST], data=config)
async def async_step_discovery(self, discovery_info):
"""Handle discovery."""
_LOGGER.debug("Reached discovery flow with info: %s", discovery_info)
if "uuid" in discovery_info:
await self.async_set_unique_id(discovery_info.pop("uuid"))
self._abort_if_unique_id_configured()
else:
# attempt to connect to server and determine uuid. will fail if password required
error = await self._validate_input(discovery_info)
if error:
await self._async_handle_discovery_without_unique_id()
# update schema with suggested values from discovery
self.data_schema = _base_schema(discovery_info)
self.context.update({"title_placeholders": {"host": discovery_info[CONF_HOST]}})
return await self.async_step_edit()
|
"""
This module contains a class that bundles several approaches to visualize the results of the variations of
the 'SeqClu' algorithm that are contained in the package.
NOTE: This class has actually never been used during the research project and therefore needs major modifications
to make it compatible with the rest of the framework.
"""
import time
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from IPython import display
import pandas as pd
import seaborn as sns
from sklearn.manifold import TSNE
matplotlib.use("TkAgg")
class Visualizer:
def __init__(self, classes, distribution, labels, indices, result, data, classDictionary,
numPrototypes, numClusters) -> None:
self.classes = classes
self.distribution = distribution
self.labels = labels
self.numPrototypes = numPrototypes
self.numClasses = numClusters
self.indices = indices
self.result = result
self.data = data
self.classDictionary = classDictionary
def visualizeInputData(self) -> None:
"""
This method visualizes the input data in two dimensions.
:return: void
"""
fig = plt.figure(figsize=(10, 10))
plt.title('Raw data')
X_embedded = TSNE(random_state=42, n_components=2).fit_transform(self.distribution)
# plt.scatter(*X_embedded)
pal = sns.color_palette("hls", self.numClasses) # 3 classes, hence 3 colors
for i, txt in enumerate(self.labels):
plt.scatter(X_embedded.T[0][i], X_embedded.T[1][i], color=pal[self.classDictionary[txt]])
plt.annotate(i, (X_embedded.T[0][i], X_embedded.T[1][i]), color=pal[self.classDictionary[txt]], alpha=0.2)
# Color = class, annotation = Sequence ID
plt.show()
def visualizeClustersAsTSNE(self) -> None:
"""
This method visualizes the clusters as TSNE-graphs.
:return: void
"""
fig = plt.figure(figsize=(10, 10))
plt.title('Clustered data')
X_embedded = TSNE(random_state=42, n_components=2).fit_transform(self.distribution)
# plt.scatter(*X_embedded)
pal = sns.color_palette("hls", len(set(self.result)))
# ann = [x for x,y in enumerate(X)]
for i, txt in enumerate(self.indices):
plt.scatter(X_embedded.T[0][i], X_embedded.T[1][i], color=pal[self.result[i]])
plt.annotate(txt, (X_embedded.T[0][i], X_embedded.T[1][i]), color=pal[self.result[i]], alpha=0.2)
plt.show()
# plt.savefig('clus.png')
def visualizeClustersAsHeatMaps(self) -> None:
"""
This method visualizes the clusters as heatmaps.
:return: void
"""
# Show clusters as heatmaps (does not work too great for hand-written data)
clusterdata = [[] for x in range(self.numClasses)]
for idx, clus in enumerate(self.result):
clusterdata[clus].append(idx)
for cnum in range(len(clusterdata)):
values = [self.distribution[idx] for idx in clusterdata[cnum]]
fig = plt.figure(figsize=(10, 5))
df = pd.DataFrame(values, index=clusterdata[cnum])
plt.title('ClusterStore: ' + str(cnum))
ax = sns.heatmap(df, center=0.0, xticklabels=False)
ax.set_yticks(np.arange(len(clusterdata[cnum])))
ax.set_yticklabels(clusterdata[cnum])
plt.setp(ax.get_yticklabels(), rotation=0)
plt.xlabel('Time ->')
plt.ylabel('Trajectory id')
plt.show()
def simulateClusteringProcess(self) -> None:
"""
This method makes multiple plots that replay the clustering process step-by-step.
:return: void
"""
# Simulates how the clustering happened
# TODO: Fix the extra plots showing up at the end
X_embedded_ = TSNE(random_state=42, n_components=2).fit_transform(self.distribution)
for end in range(1, len(self.result)):
fig = plt.figure(figsize=(18, 10))
X_embedded = X_embedded_[0:end]
ann = [x for x, y in enumerate(self.data)][0:end]
pal = sns.color_palette("hls", len(set(self.result)))
plt.subplot(1, 2, 1)
sns.heatmap(self.distribution[0:end], center=0.0)
plt.subplot(1, 2, 2)
plt.scatter(X_embedded.T[0], X_embedded.T[1], color=[pal[c] for c in self.result[0:end]])
for i, txt in enumerate(ann):
plt.scatter(X_embedded.T[0][i], X_embedded.T[1][i], color=pal[self.result[i]])
plt.annotate(txt, (X_embedded.T[0][i], X_embedded.T[1][i]), color=pal[self.result[i]])
display.clear_output(wait=True)
display.display(plt.gcf())
time.sleep(0.01) # change the rate of rendering
|
<filename>pycity_calc/economic/energy_sys_cost/bat_cost.py
#!/usr/bin/env python
# coding=utf-8
"""
Script to estimate cost of electric batteries
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
def calc_spec_cost_bat(cap, method='sma'):
"""
Calculate specific battery cost in Euro / kWh.
Parameters
----------
cap : float
Capacity of battery in kWh
method : str, optional
Method for calculation (default: 'sma')
Options:
- 'carmen'
based on:
Centrales Agrar-Rohstoff Marketing- und Energie-Netzwerk,
Marktübersicht Batteriespeicher, 2015.
- 'sma' (#81)
based on:
http://www.photovoltaik4all.de/pv-strompeicher-kaufen-als-komplettset
Discrete values have been fitted into potential function
Returns
-------
spec_bat : float
Specific cost of battery in
"""
assert cap > 0, 'Capacity has to be larger than zero.'
assert method in ['carmen', 'sma'], 'Unknown method'
if method == 'carmen':
spec_bat = (5000 + 1225 * cap) / cap
elif method == 'sma':
spec_bat = 2094.9 * (cap ** -0.521)
return spec_bat
def calc_invest_cost_bat(cap, method='sma'):
"""
Calculate investment cost of electric batteries in Euro.
Parameters
----------
cap : float
Capacity of battery in kWh
method : str, optional
Method for calculation (default: 'sma')
Options:
- 'carmen'
based on:
Centrales Agrar-Rohstoff Marketing- und Energie-Netzwerk,
Marktübersicht Batteriespeicher, 2015.
- 'sma' (#81)
based on:
http://www.photovoltaik4all.de/pv-strompeicher-kaufen-als-komplettset
Discrete values have been fitted into potential function
Returns
-------
invest_bat : float
Investment cost for battery in Euro
"""
# Calculate specific cost of battery
spec_cost = calc_spec_cost_bat(cap=cap, method=method)
return cap * spec_cost
if __name__ == '__main__':
bat_cap = 5 # kWh
list_methods = ['sma', 'carmen']
for method in list_methods:
# Calculate specific cost for battery
spec_cost = calc_spec_cost_bat(cap=bat_cap, method=method)
print('Specific cost for battery in Euro/kWh (method ' +
str(method) +'):')
print(round(spec_cost, 2))
print()
print('##################################')
for method in list_methods:
# Calculate investment cost for battery
inv_cost = calc_invest_cost_bat(cap=bat_cap, method=method)
print('Investment cost for battery in Euro (method '
+ str(method) +'):')
print(round(inv_cost, 2))
print()
array_in = np.arange(start=1, stop=20, step=1)
array_out = np.zeros(len(array_in))
array_out2 = np.zeros(len(array_in))
for i in range(len(array_in)):
power = array_in[i]
array_out[i] = calc_invest_cost_bat(cap=power, method='carmen')
array_out2[i] = calc_invest_cost_bat(cap=power, method='sma')
plt.plot(array_in, array_out, label='carmen')
plt.plot(array_in, array_out2, label='sma')
plt.xlabel('Electric capacity in kWh')
plt.ylabel('Investment in Euro')
plt.legend()
plt.show()
plt.close()
|
""" ensure the development environment is sane
be careful about imports here:
"""
# Copyright (c) 2021 <NAME>.
# Distributed under the terms of the Modified BSD License.
import json
import os
import re
import subprocess
import sys
from datetime import datetime
from pathlib import Path
from pprint import pprint
from . import project as P
BAD_PATH_RE = r"[^a-zA-Z\d_\-\.\\/]"
ROOT_RECOMMEND = (
f"c:\\git\\{P.PY_PKG}" if P.WIN else os.path.expanduser(f"~/git/{P.PY_PKG}")
)
MC3_RECOMMEND = "c:\\mc3" if P.WIN else os.path.expanduser("~/mc3")
ARBITRARY_PATH_LENGTH = 32 if P.WIN else 64
NOT_DEFINED = "!NOT DEFINED!"
DEFAULT_KERNEL_NAME = "python3"
COPYRIGHT = f"Copyright (c) {datetime.now().year} <NAME>."
LICENSE = "Distributed under the terms of the Modified BSD License."
def check_path(path, name=None, message=None, check_len=False):
print(f"Checking sanity of {name or path}...", flush=True)
errors = {}
if path == NOT_DEFINED:
errors["not defined"] = path
else:
drive, rest = os.path.splitdrive(str(Path(path).resolve()))
path_len = len(str(path))
if not path_len:
errors["not_defined"] = True
elif check_len and path_len > ARBITRARY_PATH_LENGTH:
errors["length"] = path_len
bad_path_matches = re.findall(BAD_PATH_RE, rest)
if bad_path_matches:
errors["bad_characters"] = bad_path_matches
if errors:
print(f"... {len(errors)} problems with {name or path}")
return [{"path": str(path), "message": str(message), "errors": errors}]
return []
def check_drives(path_a, path_b, message):
print(f"Checking drives of '{path_a}' and '{path_b}'...")
a_drive, a_rest = os.path.splitdrive(str(path_a))
b_drive, b_rest = os.path.splitdrive(str(path_a))
if a_drive != b_drive:
print("...drives are no good")
return [{"paths": [path_a, path_b]}]
return []
def preflight_conda():
"""this should only run from the `base` env"""
conda_prefix = os.environ.get("CONDA_PREFIX", NOT_DEFINED)
errors = [
*check_path(
path=P.ROOT,
name="repo location",
message=f"please check out to a sane location, e.g {ROOT_RECOMMEND}",
check_len=True,
),
*check_path(
path=os.environ.get("CONDA_PREFIX", NOT_DEFINED),
message=(
"please install and activate miniconda3 in a sane location"
f" e.g. {MC3_RECOMMEND}"
),
check_len=True,
),
*check_drives(
P.ROOT,
conda_prefix,
"please ensure miniconda3 and this repo are on the same"
" physical drive/volume",
),
]
if errors:
pprint(errors)
print(">>> OK conda!")
return len(errors)
def preflight_build():
yarn_lock_errors = []
for line in P.YARN_LOCK.read_text(encoding="utf-8").splitlines():
if line.strip().startswith("resolved ") and "https://" not in line:
yarn_lock_errors += line
if yarn_lock_errors:
print(f"Encountered non-https resolutions in {P.YARN_LOCK}")
print("\n".join(yarn_lock_errors))
print(
"""Perhaps try:
rm -rf node_modules .yarn-packages yarn.lock
anaconda-project run jlpm cache clean
anaconda-project run jlpm
doit preflight:build
"""
)
return len(yarn_lock_errors)
def preflight_kernel():
"""this should only run from the `dev` env"""
print("Checking kernel list...", flush=True)
raw = subprocess.check_output(["jupyter", "kernelspec", "list", "--json"])
specs = json.loads(raw.decode("utf-8"))["kernelspecs"]
print(f"Checking {DEFAULT_KERNEL_NAME}...", flush=True)
default_kernel = specs.get(DEFAULT_KERNEL_NAME)
if default_kernel is None:
print(f"The {DEFAULT_KERNEL_NAME} kernel is not available at all!")
return 1
print(f"Checking {DEFAULT_KERNEL_NAME} python...", flush=True)
spec_py = default_kernel["spec"]["argv"][0]
if Path(spec_py).resolve() != Path(sys.executable).resolve():
pprint(spec_py)
print(f"The {DEFAULT_KERNEL_NAME} does not use {sys.executable}!")
return 2
print(">>> OK kernel!")
return 0
def preflight_lab():
proc = subprocess.Popen(
["jupyter", "labextension", "list"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = proc.communicate()
name = P.JS_PACKAGE_DATA["name"]
for line in (out + err).decode("utf-8").splitlines():
if name in line and "OK" in line and "enabled" in line:
print(">>> OK lab")
return 0
print("The labextension is not enabled")
return 1
def preflight_release():
problems = []
changelog = P.CHANGELOG.read_text(encoding="utf-8")
print("Checking CHANGELOG...", flush=True)
changelog_versions = [
f"## {P.PY_PKG} {P.PY_VERSION}",
"## {name} {version}".format(**P.JS_PACKAGE_DATA),
]
for version in changelog_versions:
if version not in changelog:
problems += [f"- Not found in CHANGELOG.md: {version}"]
print("Checking copyright/license headers...")
for any_src in [*P.ALL_PY, *P.ALL_CSS, *P.ALL_TS, *P.ALL_YML, P.SETUP_CFG]:
any_text = any_src.read_text()
if COPYRIGHT not in any_text:
problems += [f"{any_src.relative_to(P.ROOT)} missing copyright info"]
if LICENSE not in any_text:
problems += [f"{any_src.relative_to(P.ROOT)} missing license info"]
print(len(problems), "problem(s) found")
if problems:
[print(problem) for problem in problems]
return len(problems)
def preflight(stage):
if stage == "conda":
return preflight_conda()
elif stage == "build":
return preflight_build()
elif stage == "kernel":
return preflight_kernel()
elif stage == "lab":
return preflight_lab()
elif stage == "release":
return preflight_release()
print(f"Don't know how to preflight: {stage}")
return 1
if __name__ == "__main__":
sys.exit(preflight(sys.argv[1]))
|
<filename>src/doc-reST/confluencize.py
#!/usr/bin/env python
import string
from glob import glob
from xml.etree.ElementTree import ElementTree, tostring
def warn_missing(element):
if element.tag not in action: print '============( %s )============' % element.tag
pass
action = {
'title': lambda element, context: ('\nh%s. %s\n\n' % (context['header_depth'], decode_inline(element, context)), False),
'paragraph': lambda element, context: ('%s%s' % (decode_inline(element, context), context['paragraph_newlines']), False),
'list_item': lambda element, context: ('%s ' % context['list_prefix'], True),
'reference': lambda element, context: ('[%s|%s]' % (element.text, element.get('refuri', '')), True),
'literal_block': lambda element, context: ('\n{code}\n%s\n{code}\n\n' % element.text, False),
'block_quote': lambda element, context: ('\n{quote}\n%s\n{quote}\n\n' % element.text, True),
'strong': lambda element, context: ('*%s*' % element.text, True),
'literal': lambda element, context: ('{{%s}}' % element.text, True),
'emphasis': lambda element, context: ('_%s_' % element.text, True),
'image': lambda element, context: ('\n{note:title=There was an image}%s{note}\n\n' % element.attrib['uri'], False),
'term': lambda element, context: ('%s ' % context['list_prefix'], True),
'table': lambda element, context: ('\n{note:title=Table to format}{code}%s{code}{note}\n\n' % tostring(element), False)
}
initial_context = {
'header_depth': 1,
'list_prefix' : '',
'paragraph_newlines': '\n\n'
}
def decode_inline(element, context):
warn_missing(element)
text = element.text
if text is None: text = ''
tail = element.tail
if tail is None: tail = ''
subs = []
for child in element.getchildren():
if child.tag in action:
output = action[child.tag](child, context)[0]
subs.append(output)
else:
subs.append(decode_inline(child, context))
sub = string.join(subs, '')
return '%s%s%s' % (text, sub, tail)
def convert(source, target):
tree = ElementTree()
tree.parse(source)
out = open(target, "w")
def walk(element, context):
warn_missing(element)
if element.tag in action:
output, walk_children = action[element.tag](element, context)
if output is not None:
print output
out.write(output)
if not walk_children:
return
for child in element.getchildren():
new_context = dict(context)
if element.tag == 'section':
new_context['header_depth'] = new_context['header_depth'] + 1
elif element.tag == 'bullet_list':
new_context['list_prefix'] = new_context['list_prefix'] + '*'
new_context['paragraph_newlines'] = '\n'
elif element.tag == 'enumerated_list':
new_context['list_prefix'] = new_context['list_prefix'] + '#'
new_context['paragraph_newlines'] = '\n'
elif element.tag == 'definition_list':
new_context['list_prefix'] = new_context['list_prefix'] + '*'
new_context['paragraph_newlines'] = '\n'
walk(child, new_context)
for child in tree.getroot().getchildren():
walk(child, initial_context)
out.close()
if __name__ == '__main__':
for xml_file in glob('xml/*.xml'):
convert(xml_file, xml_file[0:-4] + '.txt')
|
#!/usr/bin/python
#
import sys, os ;
import datetime;
from bisect import bisect;
usage="""
Usage: copy_new_entry.py pdb_list pdb_dcp_list sourcedir targetdir
Options:
-h|--help : print this help message and exit
-v : verbose
Created 2011-03-16, updated 2011-03-16, Nanjiang
"""
def PrintHelp():
print usage
numArgv=len(sys.argv)
if numArgv < 2:
PrintHelp()
sys.exit()
isQuiet=False
argList=[]; # supplied files
isPrintVerbose=False;
def mybinaryfind(sortedList, key, lo=0,hi=-1): #{{{
#this function just return True (find) or False (not find)
i = bisect(sortedList,key,lo,hi);
if i == 0:
return False;
elif sortedList[i-1] == key:
return True;
else:
return False;
#}}}
def ReadInList(inFile): #{{{
#pdblist is a dict
# format: pdblist['1d0l']={'filename':"name1",'pdbcode':"code1"}
pdblist={};
fpin = open(inFile, "r");
lines = fpin.readlines();
fpin.close();
i=0;
while i < len(lines):
line = lines[i];
if line:
strs=line.split();
filename=strs[0];
basename=os.path.basename(filename);
pdbcode = basename[3:7];
pdblist[pdbcode]={};
pdblist[pdbcode]['filename'] = filename;
pdblist[pdbcode]['pdbcode'] = pdbcode;
datestrs=strs[1].split('-');
pdblist[pdbcode]['date'] = datetime.date(int(datestrs[0]),int(datestrs[1]),int(datestrs[2]));
i+=1;
return (pdblist);
#}}}
def CopyNewEntry(pdbcodelist_copy, sourcedir, targetdir):#{{{
for pdbcode in pdbcodelist_copy:
subdir=pdbcode[1:3];
gzfilename="pdb"+pdbcode+".ent.gz";
sfile=sourcedir + os.sep + subdir + os.sep +gzfilename;
tdir=targetdir+os.sep+subdir;
command="mkdir -p " + tdir;
os.system(command);
command="cp -f " + sfile + " " + tdir + os.sep;
os.system(command);
command="gzip -dN -f " + tdir + os.sep + gzfilename;
os.system(command);
#}}}
def DeleteEntry(pdbcodelist_delete, targetdir):#{{{
for pdbcode in pdbcodelist_delete:
subdir=pdbcode[1:3];
filename=targetdir + os.sep + subdir + os.sep + "pdb" + pdbcode +".ent";
os.remove(filename);
#}}}
def GetUpdateList(pdbList, pdb_dcpList):#{{{
i=0;
pdbcodelist_copy=[];
pdbcodelist_delete=[];
DT=datetime.timedelta(days=90); #date threshold is 90 days
dcp_pdbcodelist = pdb_dcpList.keys();
pdb_pdbcodelist = pdbList.keys();
dcp_pdbcodelist.sort();
pdb_pdbcodelist.sort();
for pdbcode in pdb_dcpList.keys(): #remove item which is not in the remote source
if not mybinaryfind(pdb_pdbcodelist, pdbcode):
pdbcodelist_delete.append(pdbcode);
for pdbcode in pdbList.keys(): # for item in the remote source, update if it is newer
if mybinaryfind(dcp_pdbcodelist, pdbcode):
date1=pdbList[pdbcode]['date'];
date2=pdb_dcpList[pdbcode]['date'];
if date1 > date2 and date1-date2 >= DT:
pdbcodelist_copy.append(pdbcode);
else: # if not exist in the local storage, copy
pdbcodelist_copy.append(pdbcode);
return (pdbcodelist_copy, pdbcodelist_delete);
#}}}
i = 1
isNonOptionArg=False
while i < numArgv:
if isNonOptionArg == True:
argList.append(sys.argv[i])
isNonOptionArg=False;
i = i + 1;
elif sys.argv[i] == "--":
isNonOptionArg=True
i = i + 1;
elif sys.argv[i][0] == "-":
if sys.argv[i] == "-h" or sys.argv[i] == "--help":
PrintHelp()
sys.exit()
elif sys.argv[i] == "-q":
isQuiet=True
i = i + 1;
elif sys.argv[i] == "-v":
isPrintVerbose=True;
i = i + 1;
else:
print "Error! Wrong argument:", sys.argv[i];
sys.exit();
else:
argList.append(sys.argv[i]);
i = i + 1;
if len(argList) != 4:
print >> sys.stderr,"Argument error, four arguments should be supplied";
sys.exit();
pdbListFile=argList[0];
pdb_dcpListFile=argList[1];
sourcedir=argList[2];
targetdir=argList[3];
try:
(pdbList ) = ReadInList(pdbListFile);
(pdb_dcpList ) = ReadInList(pdb_dcpListFile);
(pdbcodelist_copy, pdbcodelist_delete) = GetUpdateList(pdbList, pdb_dcpList);
if isPrintVerbose:#{{{
print "To be copied ", len(pdbcodelist_copy);
for pdbcode in pdbcodelist_copy:
print pdbcode, pdbList[pdbcode]['filename'], pdbList[pdbcode]['date'];
print
print "To be deleted", len(pdbcodelist_delete);
for pdbcode in pdbcodelist_delete:
print pdbcode, pdb_dcpList[pdbcode]['filename'], pdb_dcpList[pdbcode]['date'];
#}}}
CopyNewEntry(pdbcodelist_copy, sourcedir, targetdir);
DeleteEntry(pdbcodelist_delete, targetdir);
except:
print >> sys.stderr,"except";
raise;
|
<reponame>Michal-Gagala/sympy
from sympy.core.function import diff
from sympy.core.numbers import (I, pi)
from sympy.core.symbol import Symbol
from sympy.functions.elementary.complexes import conjugate
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.trigonometric import (cos, cot, sin)
from sympy.functions.special.spherical_harmonics import Ynm, Znm, Ynm_c
def test_Ynm():
# https://en.wikipedia.org/wiki/Spherical_harmonics
th, ph = Symbol("theta", real=True), Symbol("phi", real=True)
from sympy.abc import n,m
assert Ynm(0, 0, th, ph).expand(func=True) == 1/(2*sqrt(pi))
assert Ynm(1, -1, th, ph) == -exp(-2*I*ph)*Ynm(1, 1, th, ph)
assert Ynm(1, -1, th, ph).expand(func=True) == sqrt(6)*sin(th)*exp(-I*ph)/(4*sqrt(pi))
assert Ynm(1, 0, th, ph).expand(func=True) == sqrt(3)*cos(th)/(2*sqrt(pi))
assert Ynm(1, 1, th, ph).expand(func=True) == -sqrt(6)*sin(th)*exp(I*ph)/(4*sqrt(pi))
assert Ynm(2, 0, th, ph).expand(func=True) == 3*sqrt(5)*cos(th)**2/(4*sqrt(pi)) - sqrt(5)/(4*sqrt(pi))
assert Ynm(2, 1, th, ph).expand(func=True) == -sqrt(30)*sin(th)*exp(I*ph)*cos(th)/(4*sqrt(pi))
assert Ynm(2, -2, th, ph).expand(func=True) == (-sqrt(30)*exp(-2*I*ph)*cos(th)**2/(8*sqrt(pi))
+ sqrt(30)*exp(-2*I*ph)/(8*sqrt(pi)))
assert Ynm(2, 2, th, ph).expand(func=True) == (-sqrt(30)*exp(2*I*ph)*cos(th)**2/(8*sqrt(pi))
+ sqrt(30)*exp(2*I*ph)/(8*sqrt(pi)))
assert diff(Ynm(n, m, th, ph), th) == (m*cot(th)*Ynm(n, m, th, ph)
+ sqrt((-m + n)*(m + n + 1))*exp(-I*ph)*Ynm(n, m + 1, th, ph))
assert diff(Ynm(n, m, th, ph), ph) == I*m*Ynm(n, m, th, ph)
assert conjugate(Ynm(n, m, th, ph)) == (-1)**(2*m)*exp(-2*I*m*ph)*Ynm(n, m, th, ph)
assert Ynm(n, m, -th, ph) == Ynm(n, m, th, ph)
assert Ynm(n, m, th, -ph) == exp(-2*I*m*ph)*Ynm(n, m, th, ph)
assert Ynm(n, -m, th, ph) == (-1)**m*exp(-2*I*m*ph)*Ynm(n, m, th, ph)
def test_Ynm_c():
th, ph = Symbol("theta", real=True), Symbol("phi", real=True)
from sympy.abc import n,m
assert Ynm_c(n, m, th, ph) == (-1)**(2*m)*exp(-2*I*m*ph)*Ynm(n, m, th, ph)
def test_Znm():
# https://en.wikipedia.org/wiki/Solid_harmonics#List_of_lowest_functions
th, ph = Symbol("theta", real=True), Symbol("phi", real=True)
assert Znm(0, 0, th, ph) == Ynm(0, 0, th, ph)
assert Znm(1, -1, th, ph) == (-sqrt(2)*I*(Ynm(1, 1, th, ph)
- exp(-2*I*ph)*Ynm(1, 1, th, ph))/2)
assert Znm(1, 0, th, ph) == Ynm(1, 0, th, ph)
assert Znm(1, 1, th, ph) == (sqrt(2)*(Ynm(1, 1, th, ph)
+ exp(-2*I*ph)*Ynm(1, 1, th, ph))/2)
assert Znm(0, 0, th, ph).expand(func=True) == 1/(2*sqrt(pi))
assert Znm(1, -1, th, ph).expand(func=True) == (sqrt(3)*I*sin(th)*exp(I*ph)/(4*sqrt(pi))
- sqrt(3)*I*sin(th)*exp(-I*ph)/(4*sqrt(pi)))
assert Znm(1, 0, th, ph).expand(func=True) == sqrt(3)*cos(th)/(2*sqrt(pi))
assert Znm(1, 1, th, ph).expand(func=True) == (-sqrt(3)*sin(th)*exp(I*ph)/(4*sqrt(pi))
- sqrt(3)*sin(th)*exp(-I*ph)/(4*sqrt(pi)))
assert Znm(2, -1, th, ph).expand(func=True) == (sqrt(15)*I*sin(th)*exp(I*ph)*cos(th)/(4*sqrt(pi))
- sqrt(15)*I*sin(th)*exp(-I*ph)*cos(th)/(4*sqrt(pi)))
assert Znm(2, 0, th, ph).expand(func=True) == 3*sqrt(5)*cos(th)**2/(4*sqrt(pi)) - sqrt(5)/(4*sqrt(pi))
assert Znm(2, 1, th, ph).expand(func=True) == (-sqrt(15)*sin(th)*exp(I*ph)*cos(th)/(4*sqrt(pi))
- sqrt(15)*sin(th)*exp(-I*ph)*cos(th)/(4*sqrt(pi)))
|
<filename>fsbackup/fsbckWrapper.py
#!/usr/bin/python3.6
"""
.. module:: fsbck_wrapper
:platform: Windows, linux
:synopsis: the entrance point script to the backup system
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import sys
import os
import re
import argparse
import json
import pymongo
import logging
from fsbackup.fileDB import FileDB
from fsbackup.hashVolume import HashVolume
from fsbackup.funcsLogger import loggingStdout
from mongo_shelve import Mongo_shelve
import fsbackup.commands as comms
def fsbck_wrapper(arg_list):
"""Given the arguments passed, runs the appropiate task.
:param arg_list: arguments after the script name
:type arg_list: list of str
:rtype: dict
Returns info generated that is useful for tests.
"""
# ***** Parser *****
parser = argparse.ArgumentParser(
prog="fsbck",
description="Filesystem multi-volume backup manager",
)
parser.add_argument('command', help="task to perform", type=lambda s:s.lower(),
choices=("backupstatus", "extractvolumeinfo", "cleanvolume", "updatevolume", "refreshhashes", "processdrive",
"createdatabase", "checkout", "integritycheck", "showvolumeid", "removeduplicates", "sievepath", ))
parser.add_argument('-db', '--dbfile', required=True, help="jsonfile whose filesystem/database is to be managed")
if os.name == 'nt':
parser.add_argument('-dr', '--drive', help="Windows drive (letter) where the volume is mounted")
elif os.name == 'posix':
parser.add_argument('-dmp', '--drivemountpoint', help="Mounting point for external drive")
else:
raise OSError("OS '%s' not supported" % os.name)
parser.add_argument('--force', '-f', help="Confirmation flag for sensitive operations", action='store_true')
parser.add_argument('--sourcepath', help="Path in the filesystem that is to be restored")
parser.add_argument('--destpath', help="Path where the checkout should be created")
parser.add_argument('--loglevel', help="logging level.", choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"), default="DEBUG")
parser.add_argument('--volumeid', help="Volume id to be used, if forcing it is needed", default=None)
parser.add_argument('--regexp', help="Regular Expression to be used")
args = parser.parse_args(arg_list)
# ***** Logger *****
logger = loggingStdout(lev=getattr(logging, args.loglevel))
# ***** Building custom objects *****
with open(args.dbfile) as f:
dbConf = json.load(f)
client = pymongo.MongoClient(dbConf['connstr'])
databaseName = re.search("(\w*)$", dbConf['connstr']).group(1) # The database name is the last part of the connection string.
db = client[databaseName]
if dbConf['mountPoint'][0] == '.': # Relative path to the json location are allowed, if they start with '.'
mountPoint = os.path.normpath(os.path.join(os.path.dirname(args.dbfile), dbConf['mountPoint']))
else:
mountPoint = dbConf['mountPoint']
fDB = FileDB(
logger=logger,
mountPoint=mountPoint,
fsPaths=dbConf['paths'],
container=Mongo_shelve(db['files'], "filename"),
)
volDB = Mongo_shelve(db['volumes'], 'hash')
if ('drive' in args) and (args.drive is not None): # Drive for Windows
hashVol = HashVolume(
logger=logger,
locationPath="%s:\\" % args.drive,
container=volDB,
volId=args.volumeid,
)
elif ('drivemountpoint' in args) and (args.drivemountpoint is not None): # Drive for Linux
hashVol = HashVolume(
logger=logger,
locationPath=args.drivemountpoint,
container=volDB,
volId=args.volumeid,
)
# ***** Invoke the function that performs the given command *****
infoReturned = dict(db=db)
if args.command.lower() == 'backupstatus':
comms.backupStatus(fDB=fDB, volDB=volDB, reportPref=dbConf['reportpref'])
elif args.command.lower() == 'removeduplicates':
nDeleted = comms.removeDuplicates(fDB=fDB, regexp=args.regexp)
infoReturned['nDeleted'] = nDeleted
elif args.command.lower() == 'sievepath':
nDeleted = comms.removeDuplicates(fDB=fDB, path=args.sourcepath)
infoReturned['nDeleted'] = nDeleted
elif args.command.lower() == 'extractvolumeinfo':
comms.extractVolumeInfo(hashVol=hashVol)
elif args.command.lower() == 'showvolumeid':
comms.showVolumeId(hashVol=hashVol)
elif args.command.lower() == 'cleanvolume':
nDeleted = comms.cleanVolume(fDB=fDB, hashVol=hashVol)
infoReturned['nDeleted'] = nDeleted
elif args.command.lower() == 'updatevolume':
comms.updateVolume(fDB=fDB, hashVol=hashVol)
elif args.command.lower() == 'integritycheck':
comms.integrityCheck(fDB=fDB, hashVol=hashVol)
elif args.command.lower() == 'refreshhashes':
comms.refreshFileInfo(fDB=fDB, forceRecalc=args.force)
elif args.command.lower() == 'createdatabase':
comms.createDatabase(database=db, forceFlag=args.force, logger=logger)
elif args.command.lower() == 'checkout':
comms.checkout(fDB=fDB, hashVol=hashVol,
sourcePath=args.sourcepath, destPath=args.destpath)
elif args.command.lower() == 'processdrive': # Clean + update + backupStatus
comms.cleanVolume(fDB=fDB, hashVol=hashVol)
comms.updateVolume(fDB=fDB, hashVol=hashVol)
comms.backupStatus(fDB=fDB, volDB=volDB, reportPref=dbConf['reportpref'])
else:
raise Exception("Command '%s' not supported" % args.command)
# Return information, useful for now only for testing.
return infoReturned
|
import tensorflow as tf
from ._BaseModel import TransX
class Analogy(TransX):
def embedding_def(self):
# embedding def
self.ent_embeddings_1 = tf.get_variable(name="ent_embeddings_1",
shape=[self.num_ent_tags, self.ent_emb_dim // 2],
initializer=tf.contrib.layers.xavier_initializer(uniform=False))
self.rel_embeddings_1 = tf.get_variable(name="rel_embeddings_1",
shape=[self.num_rel_tags, self.rel_emb_dim // 2],
initializer=tf.contrib.layers.xavier_initializer(uniform=False))
self.ent_embeddings_2 = tf.get_variable(name="ent_embeddings_2",
shape=[self.num_ent_tags, self.ent_emb_dim // 2],
initializer=tf.contrib.layers.xavier_initializer(uniform=False))
self.rel_embeddings_2 = tf.get_variable(name="rel_embeddings_2",
shape=[self.num_rel_tags, self.rel_emb_dim // 2],
initializer=tf.contrib.layers.xavier_initializer(uniform=False))
# score function for ComplEx
def _calc_comp(self, e1_h, e2_h, e1_t, e2_t, r1, r2):
return e1_h * e1_t * r1 + e2_h * e2_t * r1 + e1_h * e2_t * r2 - e2_h * e1_t * r2
# score function for DistMult
def _calc_dist(self, e_h, e_t, rel):
return e_h * e_t * rel
def forward(self):
# Embedding entities and relations of triples
h_embed_1 = tf.nn.embedding_lookup(self.ent_embeddings_1, self.h)
t_embed_1 = tf.nn.embedding_lookup(self.ent_embeddings_1, self.t)
r_embed_1 = tf.nn.embedding_lookup(self.rel_embeddings_1, self.r)
h_embed_2 = tf.nn.embedding_lookup(self.ent_embeddings_2, self.h)
t_embed_2 = tf.nn.embedding_lookup(self.ent_embeddings_2, self.t)
r_embed_2 = tf.nn.embedding_lookup(self.rel_embeddings_2, self.r)
# predict
res_comp = -tf.reduce_sum(self._calc_comp(h_embed_1, h_embed_2, t_embed_1, t_embed_2, r_embed_1, r_embed_2),
1, keep_dims=False)
res_dist = -tf.reduce_sum(self._calc_dist(self.h_embed, self.t_embed, self.r_embed), 1, keep_dims=False)
self.predict = tf.add(res_comp, res_dist, name="predict")
# Calculating score functions for all positive triples and negative triples
loss_func = tf.reduce_mean(tf.nn.softplus(tf.reshape(self.input_y, [-1]) * tf.reshape(self.predict, [-1])),
0, keep_dims=False)
regul_func = (tf.reduce_mean(h_embed_1 ** 2) + tf.reduce_mean(t_embed_1 ** 2) +
tf.reduce_mean(h_embed_2 ** 2) + tf.reduce_mean(t_embed_2 ** 2) +
tf.reduce_mean(r_embed_1 ** 2) + tf.reduce_mean(r_embed_2 ** 2) +
tf.reduce_mean(self.h_embed ** 2) + tf.reduce_mean(self.t_embed ** 2) + tf.reduce_mean(
self.r_embed ** 2))
# Calculating loss to get what the framework will optimize
self.loss = loss_func + self.config.l2_reg_lambda * regul_func
|
# coding=utf-8
"""
Reticular is a lightweight Python module that can be used to create powerful command-line tools.
It lets you define commands easily, without losing flexibility and control.
It can handle subcommand groups and supports interactive mode!
"""
import importlib
__author__ = "<NAME>, and <NAME>"
from functools import wraps
import os
import sys
from argparse import ArgumentParser
_COMMAND_GROUPS = {}
try:
input = raw_input
except:
pass
class CLI(object):
def __init__(self, name, version, message='Welcome!', package='commands'):
self.name = name
self.message = message
self.groups = {}
self.interactive_mode = False
for path in self.list(name, package):
if path not in _COMMAND_GROUPS:
_COMMAND_GROUPS[path] = CommandGroup(path)
group = _COMMAND_GROUPS[path]
self.groups[group.name] = group
try:
self.base = self.groups.pop('base')
self.base.load(subcommand=False)
self.base.populate()
self.base.parser.add_argument('--version', action='version', version=version)
except KeyError:
raise RuntimeError('Base commands module not found in: %s.base' % package)
self.load_all()
def run(self, args=sys.argv[1:]):
if len(args) < 1:
if self.interactive_mode:
return
else:
return self.interactive()
try:
parsed_args = self.base.parser.parse_args(args)
parsed_args = vars(parsed_args)
func = parsed_args.pop('func', None)
if func is None:
try:
self.groups[args[0]].parser.error("invalid number of arguments")
except KeyError:
self.base.parser.error("invalid base command")
else:
func(**parsed_args)
except RuntimeError as e:
print('ERROR: ' + str(e))
def interactive(self):
self.interactive_mode = True
print(self.message)
while True:
try:
args = input('>> ').split()
self.run(args)
except EOFError:
print()
exit(0)
except KeyboardInterrupt:
print()
exit(1)
except SystemExit:
pass
def load_all(self):
for name, cmd_group in self.groups.items():
cmd_group.load()
cmd_group.populate()
cmd_group.register(self.base.parser_generator)
@staticmethod
def list(name, package):
module = "%s.%s" % (name, package)
try:
commands = importlib.import_module(module)
pathname = os.path.dirname(commands.__file__)
return ("%s.%s.%s" % (name, package, os.path.splitext(f)[0])
for f in os.listdir(pathname) if f.endswith('.py') and not f.startswith('_'))
except ImportError:
raise RuntimeError("%s package not found" % module)
class CommandGroup(object):
def __init__(self, path):
self.name = path.split('.')[-1]
self.path = path
self._module = None
self.parser = None
self.parser_generator = None
self.parsers = {}
def load(self, subcommand=True):
if not self.parser:
add_help = False if subcommand else True
prog = ' '+self.name if subcommand else ''
title = 'commands' if subcommand else 'base commands'
metavar = '<command>' if subcommand else '<base_command>'
self.parser = DefaultHelpParser(add_help=add_help, prog=sys.argv[0]+prog)
self.parser_generator = self.parser.add_subparsers(title=title, metavar=metavar)
self._module = __import__(self.path, fromlist=[self.name])
def register(self, subparsers):
subparsers.add_parser(self.name, parents=[self.parser], help=self.parser.description,
description=self.parser.description)
def populate(self):
self.parser.description = self._module.__doc__
for cmd, parser in self.parsers.items():
for args, kwargs in getattr(self._module, 'ARGUMENTS', []):
parser.add_argument(*args, **kwargs)
class DefaultHelpParser(ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
def argument(*args, **kwargs):
def decorator(f):
try:
_get_parser(f).add_argument(*args, **kwargs)
except KeyError:
pass
return f
return decorator
def command(f):
try:
_get_parser(f)
except KeyError:
pass
return f
def global_arg(*args, **kwargs):
return args, kwargs
class say:
INDENTATION = 0
def __init__(self, *args):
for s in args:
print((' ' * say.INDENTATION) + s)
def __enter__(self):
say.INDENTATION += 1
def __exit__(self, exc_type, exc_val, exc_tb):
say.INDENTATION -= 1
def _get_parser(f):
"""
Gets the parser for the command f, if it not exists it creates a new one
"""
_COMMAND_GROUPS[f.__module__].load()
if f.__name__ not in _COMMAND_GROUPS[f.__module__].parsers:
parser = _COMMAND_GROUPS[f.__module__].parser_generator.add_parser(f.__name__, help=f.__doc__,
description=f.__doc__)
parser.set_defaults(func=f)
_COMMAND_GROUPS[f.__module__].parsers[f.__name__] = parser
return _COMMAND_GROUPS[f.__module__].parsers[f.__name__]
def superuser(f):
@wraps(f)
def wrapper(*args, **kwargs):
if os.getuid() != 0:
raise RuntimeError('To perform this command you need super user privileges.')
return f(*args, **kwargs)
return wrapper
|
# -*- coding: utf-8 -*-
"""ShirshakkP_.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1GwKsJm8PK6zV_zVC9gCtAcJuz7rDT69e
"""
import numpy as np
import random
ROWS = 5
COLUMNS = 5
WIN_STATES = [] #Creating a list for win_states
for x in range(5):
for y in range(5):
WIN_STATES.append((x, y))
WIN_STATES.remove((0,0))
WIN_STATES.remove((1,2))
WIN_STATES.remove((1,3))
WIN_STATES.remove((1,4))
WIN_STATES.remove((1,1))
WIN_STATES.remove((2,1))
WIN_STATES.remove((2,2))
WIN_STATES.remove((2,3))
WIN_STATES.remove((3,1))
WIN_STATES.remove((3,2))
WIN_STATES.remove((3,3))
WIN_STATES.remove((4,2))
WIN_STATES.remove((4,4))
print("WIN_STATES:",WIN_STATES)
LOSE_STATES = [] #Creating list for losing_states
for x in range(5):
for y in range(5):
LOSE_STATES.append((x, y))
LOSE_STATES.remove((0,1))
LOSE_STATES.remove((0,2))
LOSE_STATES.remove((0,3))
LOSE_STATES.remove((0,4))
LOSE_STATES.remove((1,0))
LOSE_STATES.remove((1,1))
LOSE_STATES.remove((1,2))
LOSE_STATES.remove((1,3))
LOSE_STATES.remove((2,0))
LOSE_STATES.remove((2,1))
LOSE_STATES.remove((2,2))
LOSE_STATES.remove((2,3))
LOSE_STATES.remove((2,4))
LOSE_STATES.remove((3,0))
LOSE_STATES.remove((3,1))
LOSE_STATES.remove((3,2))
LOSE_STATES.remove((3,3))
LOSE_STATES.remove((3,4))
LOSE_STATES.remove((4,0))
LOSE_STATES.remove((4,1))
LOSE_STATES.remove((4,3))
print("LOSE_STATES:",LOSE_STATES)
START = (1,1) #Defining start state at (1,1)
DETERMINISTIC = True
class State:
def __init__(self, state=START):
self.board = np.zeros([BOARD_ROWS, BOARD_COLUMNS])
self.board[4,4] = -1
self.board[4,2] = -1
self.board[1,4] = -1
self.board[0,0] = -1
self.state = state
self.isEnd = False
self.determine = DETERMINISTIC
def giveReward(self):
if self.state in WIN_STATES:
return 1
elif self.state in LOSE_STATES:
return -1
else:
return 0
def isEndFunc(self):
if (self.state in WIN_STATES) or (self.state in LOSE_STATES):
self.isEnd = True
def nxtPosition(self, action):
if self.determine:
if action == "N":
nxtState = (self.state[0] , self.state[1]- 1)
elif action == "S":
nxtState = (self.state[0], self.state[1] + 1)
elif action == "W":
nxtState = (self.state[0] - 1, self.state[1])
else:
nxtState = (self.state[0] + 1 , self.state[1])
if (nxtState[0] >= 0) and (nxtState[0] <= 4):
if (nxtState[1] >= 1) and (nxtState[1] <= 3):
if nxtState != (0,0):
return nxtState
if nxtState != (4,4):
return nxtState
if nxtState != (4,2):
return nxtState
if nxtState != (1,4):
return nxtState
return self.state
def showBoard(self):
self.board[self.state] = 1
for i in range(0, BOARD_ROWS):
print('-----------------')
out = '| '
for j in range(0, BOARD_COLUMNS):
if self.board[i, j] == 1:
token = '*'
if self.board[i, j] == -1:
token = 'z'
if self.board[i, j] == 0:
token = '0'
out += token + ' | '
print(out)
print('-----------------')
class Agent: #Creating an agent for the player
def __init__(self):
self.states = []
self.actions = ["N", "S", "W", "E"]
self.State = State()
self.lr = 0.5
self.exp_rate = 0.5
self.state_values = {} #Defining rewards
for i in range(BOARD_ROWS):
for j in range(BOARD_COLUMNS):
#self.state_values[(i, j)] = 0 # Setting initial value to 0 #Q2
self.state_values[(i,j)] = random.random() # Setting initial value randomly [0,1) #Q1
def chooseAction(self): # Defining the agent to choose action with the most expected value
mx_nxt_reward = 0
action = ""
if np.random.uniform(0, 1) <= self.exp_rate:
action = np.random.choice(self.actions)
else: #Greedy algo stage
for a in self.actions: #deterministic Action
nxt_reward = self.state_values[self.State.nxtPosition(a)]
if nxt_reward >= mx_nxt_reward:
action = a
mx_nxt_reward = nxt_reward
return action
def takeAction(self, action):
position = self.State.nxtPosition(action)
return State(state=position)
def reset(self):
self.states = []
self.State = State()
def play(self, rounds=5):
i = 0
while i < rounds:
if self.State.isEnd: #Backpropagation stage
reward = self.State.giveReward()
self.state_values[self.State.state] = reward
print("Game End Reward", reward)
for s in reversed(self.states):
reward = self.state_values[s] + self.lr * (reward - self.state_values[s])
self.state_values[s] = round(reward, 1)
self.reset()
i += 1
else:
action = self.chooseAction() #traces appending
self.states.append(self.State.nxtPosition(action))
print("current position {} action {}".format(self.State.state, action))
self.State = self.takeAction(action)
self.State.isEndFunc()
print("Next state", self.State.state)
print("---------------------")
def showValues(self):
for i in range(0, BOARD_ROWS):
print('----------------------------------')
out = '| '
for j in range(0, BOARD_COLUMNS):
out += str(self.state_values[(i, j)]).ljust(6) + ' | '
print(out)
print('----------------------------------')
if __name__ == "__main__":
ag = Agent()
ag.play(50)
print(ag.showValues()) |
<reponame>BastianZim/quantumflow-dev<gh_stars>10-100
# Copyright 2020-, <NAME> and contributors
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""
Standard one qubit gates
"""
from typing import Dict, List, Type
import numpy as np
from .. import tensors, utils, var
from ..paulialgebra import Pauli, sI, sX, sY, sZ
from ..qubits import Qubit
from ..states import Density, State
from ..tensors import QubitTensor
from ..var import PI, Variable
from .stdgates import StdGate
__all__ = (
"I",
"Ph",
"X",
"Y",
"Z",
"H",
"S",
"T",
"PhaseShift",
"Rx",
"Ry",
"Rz",
"Rn",
"XPow",
"YPow",
"ZPow",
"HPow",
"S_H",
"T_H",
"V",
"V_H",
"SqrtY",
"SqrtY_H",
)
def _specialize_gate(
gate: StdGate, periods: List[float], opts: Dict[float, Type[StdGate]]
) -> StdGate:
"""Return a specialized instance of a given general gate. Used
by the specialize code of various gates.
Args:
gate: The gate instance to specialize
periods: The periods of the gate's parameters. Gate parameters
are wrapped to range[0,period]
opts: A map from particular gate parameters to a special case
of the original gate type
"""
# params = list(gate.params)
params = list(gate.params)
# for p in params:
# if variable_is_symbolic(p):
# return gate
params = [var.asfloat(p) % pd for p, pd in zip(params, periods)]
for values, gatetype in opts.items():
if np.isclose(params, values):
return gatetype(*gate.qubits) # type: ignore
return type(gate)(*params, *gate.qubits) # type: ignore
# Standard 1 qubit gates
class I(StdGate): # noqa: E742
r"""
The 1-qubit identity gate.
.. math::
I() \equiv \begin{pmatrix} 1 & 0 \\ 0 & 1 \end{pmatrix}
"""
cv_hermitian = True
cv_tensor_structure = "identity"
def __init__(self, q0: Qubit) -> None:
super().__init__(qubits=[q0])
@property
def hamiltonian(self) -> Pauli:
return Pauli.zero()
@utils.cached_property
def tensor(self) -> QubitTensor:
return tensors.asqutensor(np.eye(2))
@property
def H(self) -> "I":
return self # Hermitian
def __pow__(self, t: Variable) -> "I":
return self
def run(self, ket: State) -> State:
return ket
def evolve(self, rho: Density) -> Density:
return rho
class Ph(StdGate):
r"""
Apply a global phase shift of exp(i phi).
Since this gate applies a global phase it technically doesn't need to
specify qubits at all. But we instead anchor the gate to 1 specific
qubit so that we can keep track of the phase as we manipulate gates,
circuits, and DAGCircuits.
We generally don't actually care about the global phase, since it has no
physical meaning, although it does matter when constructing controlled gates.
.. math::
\operatorname{Ph}(\phi) \equiv \begin{pmatrix} e^{i \phi}& 0 \\
0 & e^{i \phi} \end{pmatrix}
"""
# TODO
# Ref: Explorations in Quantum Computing, Williams, p77
# Ref: Barenco
cv_tensor_structure = "diagonal"
def __init__(self, phi: Variable, q0: Qubit) -> None:
super().__init__(params=[phi], qubits=[q0])
@property
def hamiltonian(self) -> Pauli:
(q0,) = self.qubits
(phi,) = self.params
return -phi * sI(q0)
@utils.cached_property
def tensor(self) -> QubitTensor:
phi = var.asfloat(self.param("phi"))
unitary = [[np.exp(1j * phi), 0.0], [0.0, np.exp(1j * phi)]]
return tensors.asqutensor(unitary)
@property
def H(self) -> "Ph":
return self ** -1
def __pow__(self, t: Variable) -> "Ph":
return Ph(t * self.param("phi"), *self.qubits)
def run(self, ket: State) -> State:
(phi,) = self.params
tensor = ket.tensor * np.exp(1j * phi)
return State(tensor, ket.qubits, ket.memory)
def evolve(self, rho: Density) -> Density:
"""Global phase shifts have no effect on density matrices. Returns argument
unchanged."""
return rho
# End class Ph
class X(StdGate):
r"""
A 1-qubit Pauli-X gate.
.. math::
X() &\equiv \begin{pmatrix} 0 & 1 \\ 1 & 0 \end{pmatrix}
"""
cv_hermitian = True
cv_tensor_structure = "permutation"
def __init__(self, q0: Qubit) -> None:
super().__init__(qubits=[q0])
@property
def hamiltonian(self) -> Pauli:
(q0,) = self.qubits
return -(PI / 2) * (1 - sX(q0))
@utils.cached_property
def tensor(self) -> QubitTensor:
unitary = [[0, 1], [1, 0]]
return tensors.asqutensor(unitary)
@property
def H(self) -> "X":
return self # Hermitian
def __pow__(self, t: Variable) -> "XPow":
return XPow(t, *self.qubits)
def run(self, ket: State) -> State:
(idx,) = ket.qubit_indices(self.qubits)
take = utils.multi_slice(axes=[idx], items=[[1, 0]])
tensor = ket.tensor[take]
return State(tensor, ket.qubits, ket.memory)
# end class X
class Y(StdGate):
r"""
A 1-qubit Pauli-Y gate.
.. math::
Y() &\equiv \begin{pmatrix} 0 & -i \\ i & 0 \end{pmatrix}
mnemonic: "Minus eye high".
"""
cv_hermitian = True
cv_tensor_structure = "monomial"
def __init__(self, q0: Qubit) -> None:
super().__init__(qubits=[q0])
@property
def hamiltonian(self) -> Pauli:
(q0,) = self.qubits
return -(PI / 2) * (1 - sY(q0))
@utils.cached_property
def tensor(self) -> QubitTensor:
unitary = np.asarray([[0, -1.0j], [1.0j, 0]])
return tensors.asqutensor(unitary)
@property
def H(self) -> "Y":
return self # Hermitian
def __pow__(self, t: Variable) -> "YPow":
return YPow(t, *self.qubits)
def run(self, ket: State) -> State:
# This is fast Since X and Z have fast optimizations.
ket = Z(*self.qubits).run(ket)
ket = X(*self.qubits).run(ket)
return ket
# end class Y
class Z(StdGate):
r"""
A 1-qubit Pauli-Z gate.
.. math::
Z() &\equiv \begin{pmatrix} 1 & 0 \\ 0 & -1 \end{pmatrix}
"""
cv_hermitian = True
cv_tensor_structure = "diagonal"
def __init__(self, q0: Qubit) -> None:
super().__init__(qubits=[q0])
@property
def hamiltonian(self) -> Pauli:
(q0,) = self.qubits
return -(PI / 2) * (1 - sZ(q0))
@utils.cached_property
def tensor(self) -> QubitTensor:
unitary = np.asarray([[1, 0], [0, -1.0]])
return tensors.asqutensor(unitary)
@property
def H(self) -> "Z":
return self # Hermitian
def __pow__(self, t: Variable) -> "ZPow":
return ZPow(t, *self.qubits)
def run(self, ket: State) -> State:
return ZPow(1, *self.qubits).run(ket)
# end class Z
class H(StdGate):
r"""
A 1-qubit Hadamard gate.
.. math::
H() \equiv \frac{1}{\sqrt{2}}
\begin{pmatrix} 1 & 1 \\ 1 & -1 \end{pmatrix}
"""
cv_hermitian = True
def __init__(self, q0: Qubit) -> None:
super().__init__(qubits=[q0])
@property
def hamiltonian(self) -> Pauli:
(q0,) = self.qubits
return (PI / 2) * ((sX(q0) + sZ(q0)) / np.sqrt(2) - 1)
@utils.cached_property
def tensor(self) -> QubitTensor:
unitary = np.asarray([[1, 1], [1, -1]]) / np.sqrt(2)
return tensors.asqutensor(unitary)
@property
def H(self) -> "_H": # See NB implementation note below
return self # Hermitian
def __pow__(self, t: Variable) -> "HPow":
return HPow(t, *self.qubits)
def run(self, ket: State) -> State:
axes = ket.qubit_indices(self.qubits)
s0 = utils.multi_slice(axes, [0])
s1 = utils.multi_slice(axes, [1])
tensor = ket.tensor.copy()
tensor[s1] -= tensor[s0]
tensor[s1] *= -0.5
tensor[s0] -= tensor[s1]
tensor *= np.sqrt(2)
return State(tensor, ket.qubits, ket.memory)
# Note: H().H -> H, but the method shadows the class, so we can't
# annotate directly.
_H = H
# End class H
class S(StdGate):
r"""
A 1-qubit phase S gate, equivalent to ``Z ** (1/2)``. The square root
of the Z gate. Also sometimes denoted as the P gate.
.. math::
S() \equiv \begin{pmatrix} 1 & 0 \\ 0 & i \end{pmatrix}
"""
cv_tensor_structure = "diagonal"
def __init__(self, q0: Qubit) -> None:
super().__init__(qubits=[q0])
@property
def hamiltonian(self) -> Pauli:
(q0,) = self.qubits
return (PI / 2) * (sZ(q0) - 1) / 2
@utils.cached_property
def tensor(self) -> QubitTensor:
unitary = np.asarray([[1.0, 0.0], [0.0, 1.0j]])
return tensors.asqutensor(unitary)
@property
def H(self) -> "S_H":
return S_H(*self.qubits)
def __pow__(self, t: Variable) -> "ZPow":
return ZPow(t / 2, *self.qubits)
def run(self, ket: State) -> State:
return ZPow(1 / 2, *self.qubits).run(ket)
# end class S
class T(StdGate):
r"""
A 1-qubit T (pi/8) gate, equivalent to ``X ** (1/4)``. The forth root
of the Z gate (up to global phase).
.. math::
\begin{pmatrix} 1 & 0 \\ 0 & e^{i \pi / 4} \end{pmatrix}
"""
cv_tensor_structure = "diagonal"
def __init__(self, q0: Qubit) -> None:
super().__init__(qubits=[q0])
@property
def hamiltonian(self) -> Pauli:
(q0,) = self.qubits
return (PI / 2) * (sZ(q0) - 1) / 4
@utils.cached_property
def tensor(self) -> QubitTensor:
unitary = [[1.0, 0.0], [0.0, np.exp(1j * np.pi / 4.0)]]
return tensors.asqutensor(unitary)
@property
def H(self) -> "T_H":
return T_H(*self.qubits)
def __pow__(self, t: Variable) -> "ZPow":
return ZPow(t / 4, *self.qubits)
def run(self, ket: State) -> State:
return ZPow(1 / 4, *self.qubits).run(ket)
# end class T
class PhaseShift(StdGate):
r"""
A 1-qubit parametric phase shift gate.
Equivalent to Rz up to a global phase.
.. math::
\text{PhaseShift}(\theta) \equiv \begin{pmatrix}
1 & 0 \\ 0 & e^{i \theta} \end{pmatrix}
"""
cv_tensor_structure = "diagonal"
def __init__(self, theta: Variable, q0: Qubit) -> None:
super().__init__(params=[theta], qubits=[q0])
@property
def hamiltonian(self) -> Pauli:
(theta,) = self.params
(q0,) = self.qubits
return theta * (sZ(q0) - 1) / 2
@utils.cached_property
def tensor(self) -> QubitTensor:
theta = var.asfloat(self.param("theta"))
unitary = [[1.0, 0.0], [0.0, np.exp(1j * theta)]]
return tensors.asqutensor(unitary)
@property
def H(self) -> "PhaseShift":
return self ** -1
def __pow__(self, t: Variable) -> "PhaseShift":
return PhaseShift(self.param("theta") * t, *self.qubits)
def run(self, ket: State) -> State:
(theta,) = self.params
return ZPow(theta / np.pi, *self.qubits).run(ket)
def specialize(self) -> StdGate:
qbs = self.qubits
(theta,) = self.params
t = theta / np.pi
gate0 = ZPow(t, *qbs)
gate1 = gate0.specialize()
return gate1
def _diagram_labels_(self) -> List[str]:
return ["P({theta})"]
# end class PhaseShift
class Rx(StdGate):
r"""A 1-qubit Pauli-X parametric rotation gate.
.. math::
R_x(\theta) = \begin{bmatrix*}[r]
\cos(\half\theta) & -i \sin(\half\theta) \\
-i \sin(\half\theta) & \cos(\half\theta)
\end{bmatrix*}
Args:
theta: Angle of rotation in Bloch sphere
"""
def __init__(self, theta: Variable, q0: Qubit) -> None:
super().__init__(params=[theta], qubits=[q0])
@property
def hamiltonian(self) -> Pauli:
(theta,) = self.params
(q0,) = self.qubits
return theta * sX(q0) / 2
@utils.cached_property
def tensor(self) -> QubitTensor:
theta = var.asfloat(self.param("theta"))
unitary = [
[np.cos(theta / 2), -1.0j * np.sin(theta / 2)],
[-1.0j * np.sin(theta / 2), np.cos(theta / 2)],
]
return tensors.asqutensor(unitary)
@property
def H(self) -> "Rx":
return self ** -1
def __pow__(self, t: Variable) -> "Rx":
return Rx(self.param("theta") * t, *self.qubits)
def specialize(self) -> StdGate:
qbs = self.qubits
(theta,) = self.params
t = theta / np.pi
gate0 = XPow(t, *qbs)
gate1 = gate0.specialize()
return gate1
# end class Rx
class Ry(StdGate):
r"""A 1-qubit Pauli-Y parametric rotation gate
.. math::
R_y(\theta) = \begin{bmatrix*}[r]
\cos(\half\theta) & -\sin(\half\theta)
\\ \sin(\half\theta) & \cos(\half\theta)
\end{bmatrix*}
Args:
theta: Angle of rotation in Bloch sphere
"""
def __init__(self, theta: Variable, q0: Qubit) -> None:
super().__init__(params=[theta], qubits=[q0])
@property
def hamiltonian(self) -> Pauli:
(theta,) = self.params
(q0,) = self.qubits
return theta * sY(q0) / 2
@utils.cached_property
def tensor(self) -> QubitTensor:
theta = var.asfloat(self.param("theta"))
unitary = [
[np.cos(theta / 2.0), -np.sin(theta / 2.0)],
[np.sin(theta / 2.0), np.cos(theta / 2.0)],
]
return tensors.asqutensor(unitary)
@property
def H(self) -> "Ry":
return self ** -1
def __pow__(self, t: Variable) -> "Ry":
return Ry(self.param("theta") * t, *self.qubits)
def specialize(self) -> StdGate:
qbs = self.qubits
(theta,) = self.params
t = theta / np.pi
gate0 = YPow(t, *qbs)
gate1 = gate0.specialize()
return gate1
# end class Ry
class Rz(StdGate):
r"""A 1-qubit Pauli-X parametric rotation gate
.. math::
R_z(\theta) = \begin{bmatrix*}
e^{-i\half\theta} & 0 \\
0 & e^{+i\half\theta}
\end{bmatrix*}
Args:
theta: Angle of rotation in Bloch sphere
"""
cv_tensor_structure = "diagonal"
def __init__(self, theta: Variable, q0: Qubit) -> None:
super().__init__(params=[theta], qubits=[q0])
@property
def hamiltonian(self) -> Pauli:
(q0,) = self.qubits
return self.param("theta") * sZ(q0) / 2
@utils.cached_property
def tensor(self) -> QubitTensor:
theta = var.asfloat(self.param("theta"))
unitary = [[np.exp(-theta * 0.5j), 0], [0, np.exp(theta * 0.5j)]]
return tensors.asqutensor(unitary)
@property
def H(self) -> "Rz":
return self ** -1
def __pow__(self, t: Variable) -> "Rz":
return Rz(self.param("theta") * t, *self.qubits)
def run(self, ket: State) -> State:
(theta,) = self.params
return ZPow(theta / np.pi, *self.qubits).run(ket)
def specialize(self) -> StdGate:
qbs = self.qubits
(theta,) = self.params
t = theta / np.pi
gate0 = ZPow(t, *qbs)
gate1 = gate0.specialize()
return gate1
# end class Rz
# Other 1-qubit gates
class S_H(StdGate):
r"""
The inverse of the 1-qubit phase S gate, equivalent to
``Z ** -1/2``.
.. math::
\begin{pmatrix} 1 & 0 \\ 0 & -i \end{pmatrix}
"""
cv_tensor_structure = "diagonal"
def __init__(self, q0: Qubit) -> None:
super().__init__(qubits=[q0])
@property
def hamiltonian(self) -> Pauli:
(q0,) = self.qubits
return -PI * (sZ(q0) - 1) / 4
@utils.cached_property
def tensor(self) -> QubitTensor:
unitary = np.asarray([[1.0, 0.0], [0.0, -1.0j]])
return tensors.asqutensor(unitary)
@property
def H(self) -> "S":
return S(*self.qubits)
def __pow__(self, t: Variable) -> "ZPow":
return ZPow(-t / 2, *self.qubits)
def run(self, ket: State) -> State:
return ZPow(-1 / 2, *self.qubits).run(ket)
# end class S_H
class T_H(StdGate):
r"""
The inverse (complex conjugate) of the 1-qubit T (pi/8) gate, equivalent
to ``Z ** -1/4``.
.. math::
\begin{pmatrix} 1 & 0 \\ 0 & e^{-i \pi / 4} \end{pmatrix}
"""
cv_tensor_structure = "diagonal"
def __init__(self, q0: Qubit) -> None:
super().__init__(qubits=[q0])
@property
def hamiltonian(self) -> Pauli:
(q0,) = self.qubits
return -PI * (sZ(q0) - 1) / 8
@utils.cached_property
def tensor(self) -> QubitTensor:
unitary = [[1.0, 0.0], [0.0, np.exp(-1j * np.pi / 4.0)]]
return tensors.asqutensor(unitary)
@property
def H(self) -> "T":
return T(*self.qubits)
def __pow__(self, t: Variable) -> "ZPow":
return ZPow(-t / 4, *self.qubits)
def run(self, ket: State) -> State:
return ZPow(-1 / 4, *self.qubits).run(ket)
# end class T_H
class Rn(StdGate):
r"""A 1-qubit rotation of angle theta about axis (nx, ny, nz)
.. math::
R_n(\theta) = \cos \frac{\theta}{2} I - i \sin\frac{\theta}{2}
(n_x X+ n_y Y + n_z Z)
Args:
theta: Angle of rotation on Block sphere
(nx, ny, nz): A three-dimensional real unit vector
"""
def __init__(
self, theta: Variable, nx: Variable, ny: Variable, nz: Variable, q0: Qubit
) -> None:
norm = var.sqrt(nx ** 2 + ny ** 2 + nz ** 2)
nx /= norm
ny /= norm
nz /= norm
theta *= norm
super().__init__(params=[theta, nx, ny, nz], qubits=[q0])
@property
def hamiltonian(self) -> Pauli:
theta, nx, ny, nz = self.params
(q0,) = self.qubits
return theta * (nx * sX(q0) + ny * sY(q0) + nz * sZ(q0)) / 2
@utils.cached_property
def tensor(self) -> QubitTensor:
theta = var.asfloat(self.param("theta"))
nx = var.asfloat(self.param("nx"))
ny = var.asfloat(self.param("ny"))
nz = var.asfloat(self.param("nz"))
cost = np.cos(theta / 2)
sint = np.sin(theta / 2)
unitary = [
[cost - 1j * sint * nz, -1j * sint * nx - sint * ny],
[-1j * sint * nx + sint * ny, cost + 1j * sint * nz],
]
return tensors.asqutensor(unitary)
@property
def H(self) -> "Rn":
return self ** -1
def __pow__(self, t: Variable) -> "Rn":
theta, nx, ny, nz = self.params
return Rn(t * theta, nx, ny, nz, *self.qubits)
# TODO: def specialize(self) -> Gate:
# end class RN
class XPow(StdGate):
r"""Powers of the 1-qubit Pauli-X gate.
.. math::
XPow(t) = X^t = e^{i \pi t/2} R_X(\pi t)
Args:
t: Number of half turns (quarter cycles) on Block sphere
"""
def __init__(self, t: Variable, q0: Qubit) -> None:
super().__init__(params=[t], qubits=[q0])
@property
def hamiltonian(self) -> Pauli:
(t,) = self.params
(q0,) = self.qubits
return t * (sX(q0) - 1) * PI / 2
@utils.cached_property
def tensor(self) -> QubitTensor:
theta = np.pi * var.asfloat(self.param("t"))
phase = np.exp(0.5j * theta)
unitary = [
[phase * np.cos(theta / 2), phase * -1.0j * np.sin(theta / 2)],
[phase * -1.0j * np.sin(theta / 2), phase * np.cos(theta / 2)],
]
return tensors.asqutensor(unitary)
@property
def H(self) -> "XPow":
return self ** -1
def __pow__(self, t: Variable) -> "XPow":
return XPow(t * self.param("t"), *self.qubits)
def specialize(self) -> StdGate:
opts = {0.0: I, 0.5: V, 1.0: X, 1.5: V_H, 2.0: I}
return _specialize_gate(self, [2], opts)
# end class XPow
class YPow(StdGate):
r"""Powers of the 1-qubit Pauli-Y gate.
The pseudo-Hadamard gate is YPow(3/2), and its inverse is YPow(1/2).
.. math::
YPow(t) = Y^t = e^{i \pi t/2} R_Y(\pi t)
Args:
t: Number of half turns (quarter cycles) on Block sphere
"""
def __init__(self, t: Variable, q0: Qubit) -> None:
super().__init__(params=[t], qubits=[q0])
@property
def hamiltonian(self) -> Pauli:
(t,) = self.params
(q0,) = self.qubits
return t * (sY(q0) - 1) * PI / 2
@utils.cached_property
def tensor(self) -> QubitTensor:
theta = np.pi * var.asfloat(self.param("t"))
phase = np.exp(0.5j * theta)
unitary = [
[phase * np.cos(theta / 2.0), phase * -np.sin(theta / 2.0)],
[phase * np.sin(theta / 2.0), phase * np.cos(theta / 2.0)],
]
return tensors.asqutensor(unitary)
@property
def H(self) -> "YPow":
return self ** -1
def __pow__(self, t: Variable) -> "YPow":
return YPow(t * self.param("t"), *self.qubits)
def specialize(self) -> StdGate:
opts = {0.0: I, 1.0: Y, 2.0: I}
return _specialize_gate(self, [2], opts)
# end class YPow
class ZPow(StdGate):
r"""Powers of the 1-qubit Pauli-Z gate.
.. math::
ZPow(t) = Z^t = e^{i \pi t/2} R_Z(\pi t)
Args:
t: Number of half turns (quarter cycles) on Block sphere
"""
cv_tensor_structure = "diagonal"
def __init__(self, t: Variable, q0: Qubit) -> None:
super().__init__(params=[t], qubits=[q0])
@property
def hamiltonian(self) -> Pauli:
(t,) = self.params
(q0,) = self.qubits
return t * (sZ(q0) - 1) * PI / 2
@utils.cached_property
def tensor(self) -> QubitTensor:
theta = np.pi * var.asfloat(self.param("t"))
phase = np.exp(0.5j * theta)
unitary = [
[phase * np.exp(-theta * 0.5j), 0],
[0, phase * np.exp(theta * 0.5j)],
]
return tensors.asqutensor(unitary)
@property
def H(self) -> "ZPow":
return ZPow(-self.param("t"), *self.qubits)
def __pow__(self, t: Variable) -> "ZPow":
return ZPow(t * self.param("t"), *self.qubits)
def run(self, ket: State) -> State:
t = var.asfloat(self.param("t"))
axes = ket.qubit_indices(self.qubits)
s1 = utils.multi_slice(axes, [1])
tensor = ket.tensor.copy()
tensor[s1] *= np.exp(+1.0j * np.pi * t)
return State(tensor, ket.qubits, ket.memory)
def specialize(self) -> StdGate:
opts = {0.0: I, 0.25: T, 0.5: S, 1.0: Z, 1.5: S_H, 1.75: T_H, 2.0: I}
return _specialize_gate(self, [2], opts)
# end class ZPow
class HPow(StdGate):
r"""
Powers of the 1-qubit Hadamard gate.
.. math::
HPow(t) = H^t = e^{i \pi t/2}
\begin{pmatrix}
\cos(\tfrac{t}{2}) + \tfrac{i}{\sqrt{2}}\sin(\tfrac{t}{2})) &
\tfrac{i}{\sqrt{2}} \sin(\tfrac{t}{2}) \\
\tfrac{i}{\sqrt{2}} \sin(\tfrac{t}{2}) &
\cos(\tfrac{t}{2}) -\tfrac{i}{\sqrt{2}} \sin(\frac{t}{2})
\end{pmatrix}
"""
def __init__(self, t: Variable, q0: Qubit) -> None:
super().__init__(params=[t], qubits=[q0])
@property
def hamiltonian(self) -> Pauli:
return H(*self.qubits).hamiltonian * self.param("t")
@utils.cached_property
def tensor(self) -> QubitTensor:
theta = np.pi * var.asfloat(self.param("t"))
phase = np.exp(0.5j * theta)
unitary = [
[
phase * np.cos(theta / 2)
- (phase * 1.0j * np.sin(theta / 2)) / np.sqrt(2),
-(phase * 1.0j * np.sin(theta / 2)) / np.sqrt(2),
],
[
-(phase * 1.0j * np.sin(theta / 2)) / np.sqrt(2),
phase * np.cos(theta / 2)
+ (phase * 1.0j * np.sin(theta / 2)) / np.sqrt(2),
],
]
return tensors.asqutensor(unitary)
@property
def H(self) -> "HPow":
return self ** -1
def __pow__(self, t: Variable) -> "HPow":
return HPow(t * self.param("t"), *self.qubits)
def specialize(self) -> StdGate:
opts = {0.0: I, 1.0: H, 2.0: I}
return _specialize_gate(self, [2], opts)
# end class HPow
class V(StdGate):
r"""
Principal square root of the X gate, X-PLUS-90 gate.
"""
def __init__(self, q0: Qubit) -> None:
super().__init__(qubits=[q0])
@property
def hamiltonian(self) -> Pauli:
(q0,) = self.qubits
return (sX(q0) - 1) * PI / 4
@utils.cached_property
def tensor(self) -> QubitTensor:
return XPow(0.5, *self.qubits).tensor
@property
def H(self) -> "V_H":
return V_H(*self.qubits)
def __pow__(self, t: Variable) -> "XPow":
return XPow(0.5 * t, *self.qubits)
# end class V
class V_H(StdGate):
r"""
Complex conjugate of the V gate, X-MINUS-90 gate.
"""
def __init__(self, q0: Qubit) -> None:
super().__init__(qubits=[q0])
@property
def hamiltonian(self) -> Pauli:
(q0,) = self.qubits
return -(sX(q0) - 1) * PI / 4
@utils.cached_property
def tensor(self) -> QubitTensor:
return XPow(-0.5, *self.qubits).tensor
@property
def H(self) -> "V":
return V(*self.qubits)
def __pow__(self, t: Variable) -> "XPow":
return XPow(-0.5 * t, *self.qubits)
# end class V_H
class SqrtY(StdGate):
r"""
Principal square root of the Y gate.
"""
def __init__(self, q0: Qubit) -> None:
super().__init__(qubits=[q0])
@property
def hamiltonian(self) -> Pauli:
(q0,) = self.qubits
return (sY(q0) - 1) * PI / 4
@utils.cached_property
def tensor(self) -> QubitTensor:
return YPow(0.5, *self.qubits).tensor
@property
def H(self) -> "SqrtY_H":
return SqrtY_H(*self.qubits)
def __pow__(self, t: Variable) -> "YPow":
return YPow(0.5 * t, *self.qubits)
# end class SqrtY
class SqrtY_H(StdGate):
r"""
Complex conjugate of the np.sqrtY gate.
"""
def __init__(self, q0: Qubit) -> None:
super().__init__(qubits=[q0])
@property
def hamiltonian(self) -> Pauli:
(q0,) = self.qubits
return -(sY(q0) - 1) * PI / 4
@utils.cached_property
def tensor(self) -> QubitTensor:
return YPow(-0.5, *self.qubits).tensor
@property
def H(self) -> "SqrtY":
return SqrtY(*self.qubits)
def __pow__(self, t: Variable) -> "YPow":
return YPow(-0.5 * t, *self.qubits)
# end class SqrtY_H
# fin
|
# LICENSE: Simplified BSD https://github.com/mmp2/megaman/blob/master/LICENSE
from __future__ import division
import numpy as np
from scipy.sparse import isspmatrix
from sklearn.utils.validation import check_array
from .utils import RegisterSubclasses
def compute_laplacian_matrix(affinity_matrix, method='auto', **kwargs):
"""Compute the laplacian matrix with the given method"""
if method == 'auto':
method = 'geometric'
return Laplacian.init(method, **kwargs).laplacian_matrix(affinity_matrix)
def laplacian_methods():
"""Return the list of valid laplacian methods"""
return ['auto'] + list(Laplacian.methods())
class Laplacian(RegisterSubclasses):
"""Base class for computing laplacian matrices
Notes
-----
The methods here all return the negative of the standard
Laplacian definition.
"""
symmetric = False
def __init__(self, symmetrize_input=True,
scaling_epps=None, full_output=False):
self.symmetrize_input = symmetrize_input
self.scaling_epps = scaling_epps
self.full_output = full_output
@staticmethod
def _symmetrize(A):
# TODO: make this more efficient?
return 0.5 * (A + A.T)
@classmethod
def symmetric_methods(cls):
for method in cls.methods():
if cls.get_method(method).symmetric:
yield method
@classmethod
def asymmetric_methods(cls):
for method in cls.methods():
if not cls.get_method(method).symmetric:
yield method
def laplacian_matrix(self, affinity_matrix):
affinity_matrix = check_array(affinity_matrix, copy=False, dtype=float,
accept_sparse=['csr', 'csc', 'coo'])
if self.symmetrize_input:
affinity_matrix = self._symmetrize(affinity_matrix)
if isspmatrix(affinity_matrix):
affinity_matrix = affinity_matrix.tocoo()
else:
affinity_matrix = affinity_matrix.copy()
lap, lapsym, w = self._compute_laplacian(affinity_matrix)
if self.scaling_epps is not None and self.scaling_epps > 0.:
if isspmatrix(lap):
lap.data *= 4 / (self.scaling_epps ** 2)
else:
lap *= 4 / (self.scaling_epps ** 2)
if self.full_output:
return lap, lapsym, w
else:
return lap
def _compute_laplacian(self, lap):
raise NotImplementedError()
class UnNormalizedLaplacian(Laplacian):
name = 'unnormalized'
symmetric = True
def _compute_laplacian(self, lap):
w = _degree(lap)
_subtract_from_diagonal(lap, w)
return lap, lap, w
class GeometricLaplacian(Laplacian):
name = 'geometric'
symmetric = False
def _compute_laplacian(self, lap):
_normalize_laplacian(lap, symmetric=True)
lapsym = lap.copy()
w, nonzero = _normalize_laplacian(lap, symmetric=False)
_subtract_from_diagonal(lap, nonzero)
return lap, lapsym, w
class RandomWalkLaplacian(Laplacian):
name = 'randomwalk'
symmetric = False
def _compute_laplacian(self, lap):
lapsym = lap.copy()
w, nonzero = _normalize_laplacian(lap, symmetric=False)
_subtract_from_diagonal(lap, nonzero)
return lap, lapsym, w
class SymmetricNormalizedLaplacian(Laplacian):
name = 'symmetricnormalized'
symmetric = True
def _compute_laplacian(self, lap):
w, nonzero = _normalize_laplacian(lap, symmetric=True, degree_exp=0.5)
_subtract_from_diagonal(lap, nonzero)
return lap, lap, w
class RenormalizedLaplacian(Laplacian):
name = 'renormalized'
symmetric = False
def __init__(self, symmetrize_input=True,
scaling_epps=None,
full_output=False,
renormalization_exponent=1):
self.symmetrize_input = symmetrize_input
self.scaling_epps = scaling_epps
self.full_output = full_output
self.renormalization_exponent = renormalization_exponent
def _compute_laplacian(self, lap):
_normalize_laplacian(lap, symmetric=True,
degree_exp=self.renormalization_exponent)
lapsym = lap.copy()
w, nonzero = _normalize_laplacian(lap, symmetric=False)
_subtract_from_diagonal(lap, nonzero)
return lap, lapsym, w
# Utility routines: these operate in-place and assume either coo matrix or
# dense array
def _degree(lap):
return np.asarray(lap.sum(1)).squeeze()
def _divide_along_rows(lap, vals):
if isspmatrix(lap):
lap.data /= vals[lap.row]
else:
lap /= vals[:, np.newaxis]
def _divide_along_cols(lap, vals):
if isspmatrix(lap):
lap.data /= vals[lap.col]
else:
lap /= vals
def _normalize_laplacian(lap, symmetric=False, degree_exp=None):
w = _degree(lap)
w_nonzero = (w != 0)
w[~w_nonzero] = 1
if degree_exp is not None:
w **= degree_exp
if symmetric:
_divide_along_rows(lap, w)
_divide_along_cols(lap, w)
else:
_divide_along_rows(lap, w)
return w, w_nonzero
def _subtract_from_diagonal(lap, vals):
if isspmatrix(lap):
lap.data[lap.row == lap.col] -= vals
else:
lap.flat[::lap.shape[0] + 1] -= vals
|
<reponame>Tachashi/ntc-ansible
#!/usr/bin/env python
# Copyright 2015 <NAME> <<EMAIL>>
# Network to Code, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DOCUMENTATION = '''
---
module: ntc_reboot
short_description: Reboot a network device.
description:
- Reboot a network device, optionally on a timer.
- Supported platforms include Cisco Nexus switches with NX-API, Cisco IOS switches or routers, Arista switches with eAPI.
Notes:
- The timer is only supported for IOS devices.
author: <NAME> (@jedelman8)
version_added: 1.9.2
requirements:
- pyntc
options:
platform:
description:
- Switch platform
required: false
choices: ['cisco_nxos_nxapi', 'arista_eos_eapi', 'cisco_ios_ssh', 'cisco_asa_ssh', 'f5_tmos_icontrol']
timer:
description:
- Time in minutes after which the device will be rebooted.
required: false
default: null
timeout:
description:
- Time in seconds to wait for the device and API to come back up.
Uses specified port/protocol as defined with port and protocol params.
required: false
default: 240
confirm:
description:
- Safeguard boolean. Set to true if you're sure you want to reboot.
required: false
default: false
volume:
description:
- Volume name - required argument for F5 platform.
required: false
host:
description:
- Hostame or IP address of switch.
required: false
username:
description:
- Username used to login to the target device
required: false
password:
description:
- Password used to login to the target device
required: false
provider:
description:
- Dictionary which acts as a collection of arguments used to define the characteristics
of how to connect to the device.
Note - host, username, password and platform must be defined in either provider
or local param
Note - local param takes precedence, e.g. hostname is preferred to provider['host']
required: false
secret:
description:
- Enable secret for devices connecting over SSH.
required: false
transport:
description:
- Transport protocol for API-based devices.
required: false
default: https
choices: ['http', 'https']
port:
description:
- TCP/UDP port to connect to target device. If omitted standard port numbers will be used.
80 for HTTP; 443 for HTTPS; 22 for SSH.
required: false
default: null
ntc_host:
description:
- The name of a host as specified in an NTC configuration file.
required: false
default: null
ntc_conf_file:
description:
- The path to a local NTC configuration file. If omitted, and ntc_host is specified,
the system will look for a file given by the path in the environment variable PYNTC_CONF,
and then in the users home directory for a file called .ntc.conf.
required: false
default: null
'''
EXAMPLES = '''
vars:
nxos_provider:
host: "{{ inventory_hostname }}"
username: "ntc-ansible"
password: "<PASSWORD>"
platform: cisco_nxos_nxapi
connection: http
- ntc_reboot:
provider: "{{ nxos_provider }}"
confirm: true
- ntc_reboot:
platform: cisco_nxos_nxapi
confirm: true
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
transport: http
- ntc_reboot:
ntc_host: n9k1
ntc_conf_file: .ntc.conf
confirm: true
- ntc_reboot:
platform: arista_eos_eapi
confirm: true
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
- ntc_reboot:
platform: cisco_ios
confirm: true
timer: 5
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
secret: "{{ secret }}"
'''
RETURN = '''
rebooted:
description: Whether the device was instructed to reboot.
returned: success
type: boolean
sample: true
reachable:
description: Whether the device is reachable on specified port
after rebooting.
returned: always
type: boolean
sample: true
atomic:
description: Whether the module has atomically completed all steps,
including testing port and closing connection after
rebooting.
returned: always
type: boolean
sample: true
'''
import time
try:
HAS_PYNTC = True
from pyntc import ntc_device, ntc_device_by_name
except ImportError:
HAS_PYNTC = False
PLATFORM_NXAPI = 'cisco_nxos_nxapi'
PLATFORM_IOS = 'cisco_ios_ssh'
PLATFORM_EAPI = 'arista_eos_eapi'
PLATFORM_JUNOS = 'juniper_junos_netconf'
PLATFORM_F5 = 'f5_tmos_icontrol'
PLATFORM_ASA = 'cisco_asa_ssh'
def check_device(module, username, password, host, timeout, kwargs):
success = False
attempts = timeout / 30
counter = 0
atomic = False
while counter < attempts and not success:
try:
if module.params['ntc_host'] is not None:
device = ntc_device_by_name(module.params['ntc_host'],
module.params['ntc_conf_file'])
else:
device_type = module.params['platform']
device = ntc_device(device_type, host, username, password,
**kwargs)
success = True
atomic = True
try:
device.close()
except:
atomic = False
pass
except:
time.sleep(30)
counter += 1
return success, atomic
def main():
module = AnsibleModule(
argument_spec=dict(
platform=dict(choices=[PLATFORM_NXAPI, PLATFORM_IOS, PLATFORM_EAPI,
PLATFORM_JUNOS, PLATFORM_F5, PLATFORM_ASA],
required=False),
host=dict(required=False),
username=dict(required=False, type='str'),
provider=dict(required=False, type='dict'),
password=dict(required=False, type='str', no_log=True),
secret=dict(required=False, no_log=True),
transport=dict(required=False, choices=['http', 'https']),
port=dict(required=False, type='int'),
ntc_host=dict(required=False),
ntc_conf_file=dict(required=False),
confirm=dict(required=False, default=False, type='bool'),
timer=dict(requred=False, type='int'),
timeout=dict(required=False, type='int', default=240),
volume=dict(required=False, type='str'),
),
mutually_exclusive=[['host', 'ntc_host'],
['ntc_host', 'secret'],
['ntc_host', 'transport'],
['ntc_host', 'port'],
['ntc_conf_file', 'secret'],
['ntc_conf_file', 'transport'],
['ntc_conf_file', 'port'],
],
required_one_of=[['host', 'ntc_host', 'provider']],
required_if=[["platform", PLATFORM_F5, ["volume"]]],
supports_check_mode=False
)
if not HAS_PYNTC:
module.fail_json(msg='pyntc Python library not found.')
provider = module.params['provider'] or {}
no_log = ['password', 'secret']
for param in no_log:
if provider.get(param):
module.no_log_values.update(return_values(provider[param]))
# allow local params to override provider
for param, pvalue in provider.items():
if module.params.get(param) != False:
module.params[param] = module.params.get(param) or pvalue
platform = module.params['platform']
host = module.params['host']
username = module.params['username']
password = module.params['password']
ntc_host = module.params['ntc_host']
ntc_conf_file = module.params['ntc_conf_file']
transport = module.params['transport']
port = module.params['port']
secret = module.params['secret']
argument_check = {'host': host, 'username': username, 'platform': platform,
'password': password}
for key, val in argument_check.items():
if val is None:
module.fail_json(msg=str(key) + " is required")
kwargs = {}
if ntc_host is not None:
device = ntc_device_by_name(ntc_host, ntc_conf_file)
else:
if transport is not None:
kwargs['transport'] = transport
if port is not None:
kwargs['port'] = port
if secret is not None:
kwargs['secret'] = secret
device_type = platform
device = ntc_device(device_type, host, username, password, **kwargs)
confirm = module.params['confirm']
timer = module.params['timer']
timeout = module.params['timeout']
volume = module.params['volume']
if not confirm:
module.fail_json(
msg='confirm must be set to true for this module to work.')
supported_timer_platforms = [PLATFORM_IOS, PLATFORM_JUNOS]
if timer is not None and device.device_type not in supported_timer_platforms:
module.fail_json(
msg='Timer parameter not supported on platform %s.' % platform)
argument_check = {'host': host, 'username': username, 'platform': platform,
'password': password}
for key, val in argument_check.items():
if val is None:
module.fail_json(msg=str(key) + " is required")
device.open()
if volume:
device.reboot(confirm=True, volume=volume)
elif timer is not None:
device.reboot(confirm=True, timer=timer)
else:
device.reboot(confirm=True)
time.sleep(10)
reachable, atomic = check_device(module, username, password, host, timeout,
kwargs)
changed = True
rebooted = True
module.exit_json(changed=changed, rebooted=rebooted, reachable=reachable,
atomic=atomic)
from ansible.module_utils.basic import *
main()
|
"""
hs - Python Version of the Augmented Lagrangian Harmony Search Optimizer
hso if a global optimizer which solves problems of the form:
min F(x)
subject to: Gi(x) = 0, i = 1(1)ME
Gj(x) <= 0, j = ME+1(1)M
xLB <= x <= xUB
"""
import random, time
from math import floor
import numpy as np
def HS(dimensions,constraints,neqcons,xtype,x0,xmin,xmax,
memsize,maxoutiter,maxinniter,stopcriteria,stopiters,etol,
itol,atol,rtol,prtoutiter,prtinniter,r0,hmcr,par,bw,
fileout,filename,rseed,scale,objfunc):
"""
Python Version of the Augmented Lagrangian Harmony Search Optimizer
Arguments:
1-dimensions:
[] number of optimization varialbes
2-constraints:
[] number of constraints
3-neqcons:
[] number of equality constraints
4-xtype:
[Array] array marking the continuous variables with 0, and
other type with 1
5-x0:
[]
6-xmin:
[]
7-xmax:
[]
8-size:
9-maxoutiter:
10-maxinniter:
11-stopcriteria:
12-stopiters:
13-etol:
14-itol:
15-atol:
[] min change in the objective function to stop
16-rtol:
[] min Relative Change in Objective to stop
17-prtoutiter:
[Integer] every prtoutiter the variables, objective function value
will be printed, if prtoutiter = 0 nothing will be printed
18-prtinniter:
[Integer] print inner iteration to decide whether to print the variables,
objective function value
19-r0:
[Float] Initial Penalty Factor
20-hmcr:
21-par:
22-bw:
23-fileout:
24-filename:
25-rseed:
26-scale:
27-objfunc:
"""
# Set random number seed
rand = random.Random()
if rseed == {}:
rseed = time.time()
rand.seed(rseed)
if fileout == 1:
if filename == '' :
filename = 'Print.out'
ofile = open(filename,'w')
if scale == 1:
dbw = (xmax - xmin)/bw
# get the center of the space of each variable
space_centre = np.zeros(dimensions,float)
space_halflen = np.zeros(dimensions,float)
for j in range(dimensions):
space_centre[j] = (xmin[j] + xmax[j])/2.0
space_halflen[j] = ((xmax[j] - xmin[j])/2.0)
# make xmin -1 and xmax 2
xmin = -np.ones(dimensions,float)
xmax = np.ones(dimensions,float)
bw = (xmax - xmin)/dbw
# Initialize Augmented Lagrange
rp_val = np.ones(constraints, float)*r0
lambda_val = np.zeros(constraints, float)
lambda_old = np.zeros(constraints, float)
# Initialize Harmony Memory
HM = np.zeros((memsize,dimensions+1), float)
discrete_i = []
for i in range(memsize):
for j in range(dimensions):
HM[i,j] = xmin[j] + rand.random()*(xmax[j]-xmin[j])
if xtype[j] == 1:
discrete_i.append(j)
# assign the initial variable values to all the Harmony memory columns
if x0 != []:
if scale == 1:
HM[:,:-1] = (x0[:] - space_centre)/space_halflen
else:
HM[:,:-1] = x0[:]
# Initialize Harmony Memory Augmented Lagrange
x_val = np.zeros(dimensions, float)
x_tmp = np.zeros(dimensions, float)
tau_val = np.zeros(constraints, float)
nfevals = 0
# evaluate the initial values in the HM and get the L_val in the last column
for i in range(memsize):
# apply each set of variable values in the harmony memory on the
# objective function
if scale == 1:
x_tmp = (HM[i,:-1] * space_halflen) + space_centre
else:
x_tmp = HM[i,:-1]
# if the variable is discrete round it
for m in discrete_i:
x_tmp[m] = floor(x_tmp[m] + 0.5)
# Evaluate Ojective Function
[f_val,g_val] = objfunc(x_tmp)
nfevals = nfevals + 1
# Augmented Lagrangian Value
L_val = f_val
if constraints > 0:
# Equality Constraints
for l in range(neqcons):
tau_val[l] = g_val[l]
# Inequality Constraints
for l in range(neqcons,constraints):
if rp_val[l] != 0:
if g_val[l] > -lambda_val[l]/(2*rp_val[l]):
tau_val[l] = g_val[l]
else:
tau_val[l] = -lambda_val[l]/(2*rp_val[l])
else:
tau_val[l] = g_val[l]
for l in range(constraints):
L_val += lambda_val[l]*tau_val[l] + rp_val[l]*tau_val[l]**2
HM[i,dimensions] = L_val
# Initialize Best
best_x_val = np.zeros(dimensions, float)
best_f_val = []
best_g_val = np.zeros(constraints, float)
# best_x_old = np.zeros(dimensions, float)
best_f_old = []
best_g_old = np.zeros(constraints, float)
# Outer Optimization Loop
k_out = 0
kobj = 0
iobj = 0
stop_main_flag = 0
# run the loop for maxoutiter or till the flag changes
while ((k_out < maxoutiter) and (stop_main_flag == 0)):
k_out += 1
# Inner Optimization Loop
k_inn = 0
while k_inn < maxinniter:
k_inn += 1
# New Harmony Improvisation (randomly selected and pitched variable values)
for j in range(dimensions):
if ((rand.random() < hmcr) or (x0 != [] and k_out == 1)):
# Harmony Memory Considering get a random values from the
# Harmony memory then pitch adjusted with tha par value
# get a random value for each decision variable from a random row
x_val[j] = HM[int(memsize*rand.random()),j]
# Pitch Adjusting
if rand.random() <= par:
if rand.random() > 0.5:
x_val[j] = x_val[j] + rand.random()*bw[j]
else:
x_val[j] = x_val[j] - rand.random()*bw[j]
else:
# Random Searching
x_val[j] = xmin[j] + rand.random()*(xmax[j]-xmin[j])
# Check for improvisations out of range
if x_val[j] > xmax[j]:
x_val[j] = xmax[j]
elif x_val[j] < xmin[j]:
x_val[j] = xmin[j]
# Evaluate the objective function with the pitched variables values x_val
if scale == 1:
x_tmp = (x_val * space_halflen) + space_centre
else:
x_tmp = x_val
for m in discrete_i:
x_tmp[m] = floor(x_tmp[m] + 0.5)
[f_val,g_val] = objfunc(x_tmp)
nfevals += 1
# Lagrangian Value
L_val = f_val
if constraints > 0:
# Equality Constraints
for l in range(neqcons):
tau_val[l] = g_val[l]
# Inequality Constraints
for l in range(neqcons,constraints):
if (rp_val[l] != 0):
if (g_val[l] > -lambda_val[l]/(2*rp_val[l])):
tau_val[l] = g_val[l]
else:
tau_val[l] = -lambda_val[l]/(2*rp_val[l])
else:
tau_val[l] = g_val[l]
#
for l in range(constraints):
L_val += lambda_val[l]*tau_val[l] + rp_val[l]*tau_val[l]**2
feasible = True
if constraints > 0:
for l in range(constraints):
if (l < neqcons):
if abs(g_val[l]) > etol:
feasible = False
break
else:
if g_val[l] > itol:
feasible = False
break
# first outer loop iteration or there is initial values for the variables
if feasible or (k_out == 1 and x0 != []):
# Harmony Memory Update
# compare the values of the objective function
# and get the worst one(max value)
hmax_num = 0
hmax = HM[0,dimensions] # value of the objective function of te first set of variable
for i in range(memsize):
if HM[i,dimensions] > hmax:
hmax_num = i
hmax = HM[i,dimensions]
# if the obj_func value of the randomly selected pitched variables is
# better than the worst
if L_val < hmax: # replace these worst variables values with the pitched values
for j in range(dimensions):
HM[hmax_num,j] = x_val[j]
HM[hmax_num,dimensions] = L_val
# compare the values of the objective function
# and get the best one(min value)
hmin_num = 0
hmin = HM[0,dimensions]
for i in range(memsize):
if HM[i,dimensions] < hmin:
hmin_num = i
hmin = HM[i,dimensions]
# if the obj_func value of the randomly selected pitched variables equals to the best
if L_val == hmin:
best_x_val = x_val
best_f_val = f_val
best_g_val = g_val
# Print Inner
if prtinniter != 0:
# output to screen
print('%d Inner Iteration of %d Outer Iteration' %(k_inn,k_out))
print(L_val)
if (scale == 1):
x_tmp = (x_val * space_halflen) + space_centre
else:
x_tmp = x_val
for m in discrete_i:
x_tmp[m] = floor(x_tmp[m] + 0.5)
print(x_tmp)
print("f_val = " + str(f_val))
print("g_val = " + str(g_val))
print(nfevals)
if fileout == 1:
# output to filename
pass
break
if (best_f_val == [] and k_out == 1 and x0 == []):
# Re-Initialize Harmony Memory
HM = np.zeros((memsize,dimensions+1), float)
for i in range(memsize):
for j in range(dimensions):
HM[i,j] = xmin[j] + rand.random()*(xmax[j]-xmin[j])
# Re-Initialize Harmony Memory Augmented Lagrange
for i in range(memsize):
# Evaluate Ojective Function
if (scale == 1):
x_tmp = (HM[i,:-1] * space_halflen) + space_centre
else:
x_tmp = HM[i,:-1]
for m in discrete_i:
x_tmp[m] = floor(x_tmp[m] + 0.5)
[f_val,g_val] = objfunc(x_tmp)
nfevals += 1
# Augmented Lagrangian Value
L_val = f_val
if constraints > 0:
# Equality Constraints
for l in range(neqcons):
tau_val[l] = g_val[l]
# Inequality Constraints
for l in range(neqcons,constraints):
if (rp_val[l] != 0):
if (g_val[l] > -lambda_val[l]/(2*rp_val[l])):
tau_val[l] = g_val[l]
else:
tau_val[l] = -lambda_val[l]/(2*rp_val[l])
else:
tau_val[l] = g_val[l]
for l in range(constraints):
L_val += lambda_val[l]*tau_val[l] + rp_val[l]*tau_val[l]**2
HM[i,dimensions] = L_val
k_out = k_out - 1 #k_out -= 1
continue
# Print Outer
if (prtoutiter != 0 and np.mod(k_out,prtoutiter) == 0):
# Output to screen
print(("="*80 + "\n"))
print(("NUMBER OF ITERATIONS: %d\n" %(k_out)))
print(("NUMBER OF OBJECTIVE FUNCTION EVALUATIONS: %d\n" %(nfevals)))
print("OBJECTIVE FUNCTION VALUE:")
print(("\tF = %g\n" %(best_f_val)))
if (constraints > 0):
# Equality Constraints
print("EQUALITY CONSTRAINTS VALUES:")
for l in range(neqcons):
print(("\tG(%d) = %g" %(l,best_g_val[l])))
# Inequality Constraints
print("\nINEQUALITY CONSTRAINTS VALUES:")
for l in range(neqcons,constraints):
print(("\tH(%d) = %g" %(l,best_g_val[l])))
print("\nLAGRANGIAN MULTIPLIERS VALUES:")
for l in range(constraints):
print(("\tL(%d) = %g" %(l,lambda_val[l])))
print("\nDESIGN VARIABLES VALUES:")
if (scale == 1):
x_tmp = (best_x_val[:] * space_halflen) + space_centre
else:
x_tmp = best_x_val[:]
for m in discrete_i:
x_tmp[m] = floor(x_tmp[m]+0.5)
text = ''
for j in range(dimensions):
text += ("\tP(%d) = %9.3e\t" %(j,x_tmp[j]))
if (np.mod(j+1,3) == 0):
text +=("\n")
print(text)
print(("="*80 + "\n"))
if (fileout == 1):
# Output to filename
ofile.write("\n" + "="*80 + "\n")
ofile.write("\nNUMBER OF ITERATIONS: %d\n" %(k_out))
ofile.write("\nNUMBER OF OBJECTIVE FUNCTION EVALUATIONS: %d\n" %(nfevals))
ofile.write("\nOBJECTIVE FUNCTION VALUE:\n")
ofile.write("\tF = %g\n" %(best_f_val))
if (constraints > 0):
# Equality Constraints
ofile.write("\nEQUALITY CONSTRAINTS VALUES:\n")
for l in range(neqcons):
ofile.write("\tG(%d) = %g\n" %(l,best_g_val[l]))
# Inequality Constraints
ofile.write("\nINEQUALITY CONSTRAINTS VALUES:\n")
for l in range(neqcons,constraints):
ofile.write("\tH(%d) = %g\n" %(l,best_g_val[l]))
ofile.write("\nLAGRANGIAN MULTIPLIERS VALUES:\n")
for l in range(constraints):
ofile.write("\tL(%d) = %g\n" %(l,lambda_val[l]))
ofile.write("\nDESIGN VARIABLES VALUES:\n")
if (scale == 1):
x_tmp = (best_x_val[:] * space_halflen) + space_centre
else:
x_tmp = best_x_val[:]
for m in discrete_i:
x_tmp[m] = floor(x_tmp[m]+0.5)
text = ''
for j in range(dimensions):
text += ("\tP(%d) = %9.3e\t" %(j,x_tmp[j]))
if (np.mod(j+1,3) == 0):
text +=("\n")
ofile.write(text)
ofile.write("\n" + "="*80 + "\n")
ofile.flush()
# Test Constraint convergence
stop_constraints_flag = 0
if constraints == 0:
stop_constraints_flag = 1
else:
for l in range(neqcons):
if (abs(best_g_val[l]) <= etol):
stop_constraints_flag += 1
for l in range(neqcons,constraints):
if (best_g_val[l] <= itol):
stop_constraints_flag += 1
if (stop_constraints_flag == constraints):
stop_constraints_flag = 1
else:
stop_constraints_flag = 0
# Test Position and Function convergence
if best_f_old == []:
best_f_old = best_f_val
stop_criteria_flag = 0
if stopcriteria == 1:
# Absolute Change in Objective
absfdiff = abs(best_f_val - best_f_old)
if absfdiff <= atol:
kobj += 1
else:
kobj = 0
# Relative Change in Objective
if abs(best_f_old) > 1e-10:
if abs(absfdiff/abs(best_f_old)) <= rtol:
iobj += 1
else:
iobj = 0
#
best_f_old = best_f_val
#
if (kobj > stopiters or iobj > stopiters):
stop_criteria_flag = 1
else:
stop_criteria_flag = 0
# Test Convergence
if stop_constraints_flag == 1 and stop_criteria_flag == 1:
stop_main_flag = 1
else:
stop_main_flag = 0
# Update Augmented Lagrangian Terms
if stop_main_flag == 0:
if constraints > 0:
# Tau for Best
for l in range(neqcons):
tau_val[l] = best_g_val[l]
for l in range(neqcons,constraints):
if (best_g_val[l] > -lambda_val[l]/(2*rp_val[l])):
tau_val[l] = best_g_val[l]
else:
tau_val[l] = -lambda_val[l]/(2*rp_val[l])
# Update Lagrange Multiplier
for l in range(constraints):
lambda_old[l] = lambda_val[l]
lambda_val[l] += 2*rp_val[l]*tau_val[l]
# Update Penalty Factor
for l in range(neqcons):
if (abs(best_g_val[l]) > abs(best_g_old[l]) and abs(best_g_val[l]) > etol):
rp_val[l] = 2.0*rp_val[l]
elif (abs(best_g_val[l]) <= etol):
rp_val[l] = 0.5*rp_val[l]
for l in range(neqcons,constraints):
if (best_g_val[l] > best_g_old[l] and best_g_val[l] > itol):
rp_val[l] = 2.0*rp_val[l]
elif (best_g_val[l] <= itol):
rp_val[l] = 0.5*rp_val[l]
# Apply Lower Bounds on rp
for l in range(neqcons):
if (rp_val[l] < 0.5*(abs(lambda_val[l])/etol)**0.5):
rp_val[l] = 0.5*(abs(lambda_val[l])/etol)**0.5
for l in range(neqcons,constraints):
if (rp_val[l] < 0.5*(abs(lambda_val[l])/itol)**0.5):
rp_val[l] = 0.5*(abs(lambda_val[l])/itol)**0.5
for l in range(constraints):
if (rp_val[l] < 1):
rp_val[l] = 1
#
best_g_old[:] = best_g_val[:]
# Print Results
if (prtoutiter != 0):
# Output to screen
print(("="*80 + "\n"))
print(("RANDOM SEED VALUE: %.8f\n" %(rseed)))
print(("NUMBER OF ITERATIONS: %d\n" %(k_out)))
print(("NUMBER OF OBJECTIVE FUNCTION EVALUATIONS: %d\n" %(nfevals)))
print("OBJECTIVE FUNCTION VALUE:")
print(("\tF = %g\n" %(best_f_val)))
if (constraints > 0):
# Equality Constraints
print("EQUALITY CONSTRAINTS VALUES:")
for l in range(neqcons):
print(("\tG(%d) = %g" %(l,best_g_val[l])))
# Inequality Constraints
print("\nINEQUALITY CONSTRAINTS VALUES:")
for l in range(neqcons,constraints):
print(("\tH(%d) = %g" %(l,best_g_val[l])))
print("\nLAGRANGIAN MULTIPLIERS VALUES:")
for l in range(constraints):
print(("\tL(%d) = %g" %(l,float(lambda_val[l]))))
print("\nDESIGN VARIABLES VALUES:")
if (scale == 1):
x_tmp = (best_x_val[:] * space_halflen) + space_centre
else:
x_tmp = best_x_val[:]
for m in discrete_i:
x_tmp[m] = floor(x_tmp[m]+0.5)
text = ''
for j in range(dimensions):
text += ("\tP(%d) = %9.3e\t" %(j,x_tmp[j]))
if (np.mod(j+1,3) == 0):
text +=("\n")
print(text)
print(("="*80 + "\n"))
if (fileout == 1):
# Output to filename
ofile.write("\n" + "="*80 + "\n")
ofile.write("RANDOM SEED VALUE: %.8f\n" %(rseed))
ofile.write("\nNUMBER OF ITERATIONS: %d\n" %(k_out))
ofile.write("\nNUMBER OF OBJECTIVE FUNCTION EVALUATIONS: %d\n" %(nfevals))
ofile.write("\nOBJECTIVE FUNCTION VALUE:\n")
ofile.write("\tF = %g\n" %(best_f_val))
if (constraints > 0):
# Equality Constraints
ofile.write("\nEQUALITY CONSTRAINTS VALUES:\n")
for l in range(neqcons):
ofile.write("\tG(%d) = %g\n" %(l,best_g_val[l]))
# Inequality Constraints
ofile.write("\nINEQUALITY CONSTRAINTS VALUES:\n")
for l in range(neqcons,constraints):
ofile.write("\tH(%d) = %g\n" %(l,best_g_val[l]))
ofile.write("\nLAGRANGIAN MULTIPLIERS VALUES:\n")
for l in range(constraints):
ofile.write("\tL(%d) = %g\n" %(l,float(lambda_val[l])))
ofile.write("\nDESIGN VARIABLES VALUES:\n")
if (scale == 1):
x_tmp = (best_x_val[:] * space_halflen) + space_centre
else:
x_tmp = best_x_val[:]
for m in discrete_i:
x_tmp[m] = floor(x_tmp[m]+0.5)
text = ''
for j in range(dimensions):
text += ("\tP(%d) = %9.3e\t" %(j,x_tmp[j]))
if (np.mod(j+1,3) == 0):
text +=("\n")
ofile.write(text)
ofile.write("\n" + "="*80 + "\n")
ofile.close()
# Results
if (scale == 1):
opt_x = (best_x_val * space_halflen) + space_centre
else:
opt_x = best_x_val
for m in discrete_i:
opt_x[m] = int(floor(opt_x[m] + 0.5))
opt_f = best_f_val
opt_g = best_g_val
opt_lambda = lambda_val[:]
return opt_x,opt_f,opt_g,opt_lambda,nfevals,'%.8f' %(rseed)
def Chso(ND,nc,nec,xtype,x0,lb,ub,bw,HMS,HMCR,PAR,maxIter,printout,rseed,objfunc):
"""
CHSO function - Python Version of the Constrained Harmony Search Optimizer
"""
# Set random number seed
rand = random.Random()
if rseed == {}:
rseed = time.time()
# Initialize
HM = np.zeros((HMS,ND+1), float)
for i in range(HMS):
for j in range(ND):
HM[i,j] = lb[j] + rand.random()*(ub[j] - lb[j])
[f0,gs0] = objfunc(HM[i,:-1])
HM[i,ND] = f0
# Print Initial Header
if (printout == 1):
#print(' Iteration Func-count min f(x)')
print(' Iteration min f(x)');
# Iterations Loop
x = np.zeros(ND,float)
numFunEvals = 0
k = 0
status = 0
while status != 1:
# New Harmony Improvisation
for j in range(ND):
#
if (rand.random() >= HMCR):
# Random Searching
x[j] = lb[j] + rand.random()*(ub[j] - lb[j])
else:
# Harmony Memory Considering
x[j] = HM[int(HMS*rand.random()),j]
# Pitch Adjusting
if (rand.random() <= PAR):
if (rand.random() > 0.5):
x[j] = x[j] + rand.random()*((ub[j] - lb[j])/bw[j])
else:
x[j] = x[j] - rand.random()*((ub[j] - lb[j])/bw[j])
#
[fval,gvals] = objfunc(x)
numFunEvals += 1
#
if (sum(gvals) <= 0):
# Harmony Memory Update
hmax_num = 0
hmax = HM[0,ND]
for i in range(HMS):
if (HM[i,ND] > hmax):
hmax_num = i
hmax = HM[i,ND]
if (fval < hmax):
for j in range(ND):
HM[hmax_num,j] = x[j]
HM[hmax_num,ND] = fval
hmin_num = 0
hmin = HM[0,ND]
for i in range(HMS):
if (HM[i,ND] < hmin):
hmin_num = i
hmin = HM[i,ND]
# Print
if (fval == hmin):
opt_x = x
opt_f = fval
opt_g = gvals
if (printout == 1):
print(('%i,%f' %(k,fval)))
# Test Convergence
if k == maxIter-1:
if (printout == 1):
print('\nMaximum number of iterations exceeded\n')
print('increase OPTIONS.MaxIter\n')
status = 1
else:
k += 1
# Print
if (printout == 1):
print('\nNumber of function evaluations = %f\n' %(numFunEvals))
return opt_x,opt_f,opt_g,numFunEvals,'%.8f' %(rseed)
# Optimizers Test
if __name__ == '__main__':
print('Testing ...')
# Test alpso
HS = HS()
print(HS)
|
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox
import calcular as cl
import checkdiametro as cd
# Funciones
def proceso():
try:
longitud = ent_lg.get()
rb_traslape = Radiobtn1.get()
rb_diametro = Radiobtn2.get()
rb_diametro = cd.dato(rb_diametro)
if longitud == '':
tk.messagebox.showerror(title='ERROR!!!', message='Debe ingresar un numero.')
else:
resultado = cl.datos(float(longitud), rb_traslape, rb_diametro)
texto.set(resultado)
except ValueError:
tk.messagebox.showerror(title='Error!!!', message='Ingrese un numero.')
# GUI
myColor = 'cornflowerblue'
myColor1 = 'red'
root = tk.Tk()
root.title('BARRA12')
root.geometry('250x250')
root.resizable(False, False)
root.config(bg=myColor)
root.iconbitmap('Degree.ico')
# root.overrideredirect(True) # turns off title bar
# root.attributes('-type', 'splash')
# root.attributes('-topmost', True) # encima de todas las ventanas
# root.attributes('-alpha', 0.5) # convierte en transparente la ventana
# root.attributes("-fullscreen", True) # ocupa toda la pantalla
# root.attributes('-zoomed', '1') # NO FUNCIONA
# root.attributes("-toolwindow", 1) # oculta maximizar y minimizar y el icono 0 y 1
# root.lift()
# root.lower()
# root.focus_force()
# UI options
paddings = {'padx': 5, 'pady': 5}
entry_font = {'font': ('Arial Black', 11), 'width': '10'}
# Estilo de Widgets
style = ttk.Style()
style.configure('S.Label', padding=(0, 5, 0, 5), foreground='black', background=myColor, font=('Helvetica', 11))
style.layout('E.TEntry', [('Entry.plain.field', {'children': [('Entry.background', {'children': [('Entry.padding', {'children': [('Entry.textarea', {'sticky': 'nswe'})], 'sticky': 'nswe'})], 'sticky': 'nswe'})], 'border': '1', 'sticky': 'nswe'})])
style.configure('E.TEntry', foreground='black', background=myColor, fieldbackground='gainsboro')
style.configure('C.TButton', padding=(0, 5, 0, 5), relief='flat', overrelief='raised', background='blue', font=('Helvetica', 11))
style.configure('W.TRadiobutton', background=myColor, foreground='black')
# Constantes
numero = tk.StringVar()
texto = tk.StringVar()
Radiobtn1 = tk.IntVar()
Radiobtn2 = tk.IntVar()
# Widgets
lbl_lg = ttk.Label(root, text='Longitud:', style='S.Label')
lbl_lg.place(x=10, y=10)
ent_lg = ttk.Entry(root, textvariable=numero, style='E.TEntry', **entry_font)
ent_lg.place(x=80, y=7, relwidth=0.6)
ent_lg.focus()
lbl_traslape = ttk.Label(root, text='Incluir traslape:', style='S.Label')
lbl_traslape.place(x=10, y=40)
rbNo = ttk.Radiobutton(root, text='NO', variable=Radiobtn1, style='W.TRadiobutton', value=0).place(x=110, y=40)
rbSi = ttk.Radiobutton(root, text='SI', variable=Radiobtn1, style='W.TRadiobutton', value=1).place(x=160, y=40)
lbl_diametro = ttk.Label(root, text='Diametro:', style='S.Label')
lbl_diametro.place(x=10, y=70)
rb8 = ttk.Radiobutton(root, text='8', variable=Radiobtn2, style='W.TRadiobutton', value=1).place(x=10, y=90)
rb12 = ttk.Radiobutton(root, text='12', variable=Radiobtn2, style='W.TRadiobutton', value=0).place(x=50, y=90)
rb14 = ttk.Radiobutton(root, text='14', variable=Radiobtn2, style='W.TRadiobutton', value=2).place(x=100, y=90)
rb16 = ttk.Radiobutton(root, text='16', variable=Radiobtn2, style='W.TRadiobutton', value=3).place(x=150, y=90)
rb18 = ttk.Radiobutton(root, text='18', variable=Radiobtn2, style='W.TRadiobutton', value=4).place(x=200, y=90)
rb20 = ttk.Radiobutton(root, text='20', variable=Radiobtn2, style='W.TRadiobutton', value=5).place(x=10, y=120)
btn_cal = ttk.Button(root, text='CALCULAR', style='C.TButton', command=proceso)
btn_cal.place(x=10, y=150, relx=0.25)
lbl_cal = ttk.Label(root, textvariable=texto, wraplength=230, style='S.Label')
lbl_cal.place(x=10, y=200)
root.mainloop()
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# notebook_metadata_filter: all
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Slicing NDDatasets
#
# This tutorial shows how to handle NDDatasets using python slicing. As prerequisite, the user is
# expected to have read the [Import Tutorials](../importexport/import.html).
# %%
import numpy as np
import spectrochempy as scp
# %% [markdown]
# ## What is the slicing ?
#
# The slicing of a list or an array means taking elements from a given index (or set of indexes) to another index (or set of indexes). Slicing is specified using the colon operator `:` with a `from` and `to` index before and after the first column, and a `step` after the second column. Hence a slice of the object `X` will be set as:
#
# `X[from:to:step]`
#
# and will extend from the ‘from’ index, ends one item before the ‘to’ index and with an increment of `step`between each index. When not given the default values are respectively 0 (i.e. starts at the 1st index), length in the dimension (stops at the last index), and 1.
#
# Let's first illustrate the concept on a 1D example:
# %%
X = np.arange(10) # generates a 1D array of 10 elements from 0 to 9
print(X)
print(X[2:5]) # selects all elements from 2 to 4
print(X[::2]) # selects one out of two elements
print(X[:-3]) # a negative index will be counted from the end of the array
print(X[::-2]) # a negative step will slice backward, starting from 'to', ending at 'from'
# %% [markdown]
# The same applies to multidimensional arrays by indicating slices separated by commas:
# %%
X = np.random.rand(10, 10) # genarates a 10x10 array filled with random values
print(X.shape)
print(X[2:5, :].shape) # slices along the 1st dimension, X[2:5,] is equivalent
print(X[2:5, ::2].shape) # same slice along 1st dimension and takes one 1 column out of two along the second
# %% [markdown]
# ## Slicing of NDDatasets
#
# Let's import a group of IR spectra, look at its content and plot it:
# %%
X = scp.read_omnic('irdata/CO@Mo_Al2O3.SPG', description='CO adsorption, diff spectra')
X.y = (X.y - X[0].y).to("minute")
X
# %%
subplot = X.plot() # assignment avoids the display of the object address (<matplotlib.axes._subplots.AxesSubplot at 0x294076b93c8> or similar)
# %% [markdown]
# ### Slicing with indexes
#
# The classical slicing, using integers, can be used. For instance, along the 1st dimension:
# %%
print(X[:4]) # selects the first four spectra
print(X[-3:]) # selects the last three spectra
print(X[::2]) # selects one spectrum out of 2
# %% [markdown]
# The same can be made along the second dimension, simultanesly or not with the first one. For instance
# %%
print(X[:, ::2]) # all spectra, one wavenumber out of 2 (note the bug: X[,::2] generates an error)
print(X[0:3, 200:1000:2]) # 3 first spectra, one wavenumbers out of 2, from index 200 to 1000
# %% [markdown]
# Would you easily guess which wavenumber range have been actually selected ?.... probably not because the relationship between the index and the wavenumber is not straightforward as it depends on the the value of the first wavenumber, the wavenumber spacing, and whether the wavenumbers are arranged in ascending or descending order...
# Here is the answer:
# %%
X[:, 200:1000:2].x # as the Coord can be sliced, the same is obtained with: X.x[200:1000:2]
# %% [markdown]
# ### Slicing with coordinates
#
# Now the spectroscopist is generally interested in a particular region of the spectrum, for instance, 2300-1900 cm$^{-1}$. Can you easily guess the indexes that one should use to spectrum this region ? probably not without a calculator...
#
# Fortunately, a simple mechanism has been implemented in spectrochempy for this purpose: the use of floats instead of integers will slice the NDDataset at the corresponding coordinates. For instance to select the 2300-1900 cm$^{-1}$ region:
# %%
subplot = X[:, 2300.0:1900.0:].plot()
# %% [markdown]
# The same mechanism can be used along the first dimension (`y`). For instance, to select and plot the same region and the spectra recorded between 80 and 180 minutes:
# %%
subplot = X[80.:180., 2300.:1900.].plot() # Note that a decimal point is enough to get a float
# a warning is raised if one or several values are beyond the limits
# %% [markdown]
# Similarly, the spectrum recorded at the time the closest to 60 mins can be selected using a float:
# %%
X[60.].y # X[60.] slices the spectrum, .y returns the corresponding `y` axis.
# %% [markdown]
# --- End of Tutorial ---
# (todo: add advanced slicing by array of indexes, array of bool, )
|
<reponame>popfido/models
# coding=utf-8
"""
Tensorflow implementation of Doc2VecC algorithm wrapper class
:author: <NAME> (<EMAIL>)
:refer: https://openreview.net/pdf?id=B1Igu2ogg
"""
from __future__ import print_function
import tensorflow as tf
import numpy as np
from option import Option
import math
import time
import collections
from itertools import compress
import random
MAX_SENTENCE_SAMPLE = 100
def generate_batch_doc2VecC_tail(doc_ids, word_ids, doc_len, batch_size, window_size, sample_size):
"""
batch generator for PV-DM (Distributed Memory Model of Paragraph Vectors)
:param doc_ids: list of document indices
:param word_ids: list of word indices
:param doc_len: record accumulated length of each doc
:param batch_size: number of words in each mini-batch
:param window_size: number of words before the target word
:return: list of tuple of (batch, labels, batch_doc_sample, num_sampled)
"""
data_index = 0
assert batch_size % window_size == 0
span = window_size + 1
buffer = collections.deque(maxlen=span)
buffer_doc = collections.deque(maxlen=span)
batches = np.ndarray(shape=(batch_size, window_size + 1), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
batch_doc = np.ndarray(shape=(batch_size, sample_size), dtype=np.int32)
mask = [1] * span
mask[-1] = 0
i = 0
while data_index < len(word_ids):
if len(set(buffer_doc)) == 1 and len(buffer_doc) == span:
doc_id = buffer_doc[-1]
batches[i, :] = list(compress(buffer, mask)) + [doc_id]
labels[i, 0] = buffer[-1]
batch_doc[i, :] = random.sample(word_ids[doc_len[doc_id]:doc_len[doc_id + 1]],
sample_size)
i += 1
buffer.append(word_ids[data_index])
buffer_doc.append(doc_ids[data_index])
data_index = (data_index + 1) % len(word_ids)
if i == batch_size:
yield batches, labels, batch_doc
class Doc2VecC(object):
"""
Doc2VecC embedding class
"""
def __init__(self, options):
assert (isinstance(options, Option))
self._options = options
self._session = None
self.saver = None
self._cost = None
self._optimizer = None
self._word_embeddings = None
self._para_embeddings = None
self.vocab = None
self.vocab_size = 0
self.document_size = 0
self.__inputs, self.__labels, self.__lr = None, None, None
self.__cost = None
self.__optimizer = None
self.__summary = None
self.__normalized_word_embeddings = None
def setVocab(self, vocab):
self.vocab = vocab
self.vocab_size = len(vocab)
return self
def setDocSize(self, doc_size):
assert (isinstance(doc_size, int))
self.document_size = doc_size
return self
def useSubSampling(self, switch=True, threshold=1e-5):
self.use_sub_sampling = switch
self.sub_sampling_threshold = 1e-5
return self
def _get_batches(self, doc_ids, word_ids):
opts = self._options
return generate_batch_doc2VecC_tail(doc_ids, word_ids, doc_ids, opts.batch_size, opts.window_size, opts.sentence_sample)
def _get_inputs(self):
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple of Placeholders (input, targets, learning rate)
"""
opts = self._options
inputs_ = tf.placeholder(tf.int32, [None, opts.window_size], name='input')
doc_inputs_ = tf.placeholder(tf.int32, [None, None], name='doc_input')
labels_ = tf.placeholder(tf.int32, [None, 1], name='label')
lr_ = tf.placeholder(tf.float32, name='learning_rate')
return inputs_, doc_inputs_, labels_, lr_
def _get_embedding_layer(self, input_data, doc_input_data):
"""
Create embedding for <input_data> and <doc_input_data>.
:param input_data: TF placeholder for text input.
:return: Embedded input tensor.
"""
opts = self._options
word_embedding = tf.Variable(tf.random_uniform((self.vocab_size, opts.embed_dim), -1.0, 1.0))
embed = []
temp = tf.zeros([opts.batch_size, opts.embed_dim])
embed_d = []
for n in range(opts.sentence_sample):
temp = tf.add(temp, tf.nn.embedding_lookup(word_embedding, doc_input_data[:, n]))
embed_d.append(temp)
if opts.concat == 'True':
combined_embed_vector_length = opts.embed_dim * opts.window_size + opts.embed_dim
for j in range(opts.window_size):
embed_w = tf.nn.embedding_lookup(word_embedding, input_data[:, j])
embed.append(embed_w)
embed.append(embed_d)
else:
combined_embed_vector_length = opts.embed_dim
embed_w = tf.zeros([opts.batch_size, opts.embed_dim])
for j in range(opts.window_size):
embed_w += tf.nn.embedding_lookup(word_embedding, input_data[:, j])
embed_w += embed_d
embed.append(embed_w)
return tf.concat(embed, 1), word_embedding, combined_embed_vector_length
def build_graph(self):
"""
Create Graph and Initialize tf Session for training
"""
train_graph = tf.Graph()
opts = self._options
with train_graph.as_default():
self.__inputs, self.__doc_inputs, self.__labels, self.__lr = self._get_inputs()
embed, word_embeddings, combined_embed_vector_length = self._get_embedding_layer(
self.__inputs, self.__doc_inputs)
norm_w = tf.sqrt(tf.reduce_sum(tf.square(word_embeddings), 1, keep_dims=True))
self.__normalized_word_embeddings = word_embeddings / norm_w
weights = tf.Variable(
tf.truncated_normal((self.vocab_size, combined_embed_vector_length),
stddev=1.0 / math.sqrt(combined_embed_vector_length))
)
biases = tf.Variable(tf.zeros(self.vocab_size))
if opts.loss == 'softmax':
loss = tf.nn.sampled_softmax_loss(weights=weights,
biases=biases,
labels=self.__labels,
inputs=embed,
num_sampled=opts.negative_sample_size,
num_classes=opts.vocab_size)
tf.summary.scalar("Softmax loss", loss)
else:
loss = tf.nn.nce_loss(weights=weights,
biases=biases,
labels=self.__labels,
inputs=embed,
num_sampled=opts.negative_sample_size,
num_classes=opts.vocab_size)
tf.summary.scalar("NCE loss", loss)
self.__cost = tf.reduce_mean(loss)
if opts.train_method == 'Adam':
self.__optimizer = tf.train.AdamOptimizer(self.__lr).minimize(self.__cost)
else:
self.__optimizer = tf.train.GradientDescentOptimizer(self.__lr).minimize(self.__cost)
self.__summary = tf.summary.merge_all()
self._session = tf.Session(graph=train_graph)
self.saver = tf.train.Saver()
return self
def fit(self, docs):
opts = self._options
iteration = 1
loss = 0
doc_ids = [[i] * len(j) for i, j in enumerate(docs)]
doc_ids = [item for sublist in doc_ids for item in sublist]
doc_lens = [0] + [len(i) for i in docs]
for i in range(1, len(doc_lens)):
doc_lens[i] += doc_lens[i-1]
word_ids = [item for sublist in docs for item in sublist]
with self._session as session:
session.run(tf.global_variables_initializer())
for e in range(1, opts.epochs_to_train + 11):
batches = self._get_batches(doc_ids, word_ids, doc_lens)
start = time.time()
lr = opts.learning_rate if e <= opts.epochs_to_train else opts.learning_rate * (
e - opts.epochs_to_train / 10)
for x, y, m, l in batches:
opts.doc_batch_len = l
feed = {self.__inputs: x,
self.__labels: y,
self.__doc_inputs: m,
self.__lr: lr}
train_loss, _ = session.run([self.__cost, self.__optimizer], feed_dict=feed)
loss += train_loss
if iteration % opts.statistics_interval == 0:
end = time.time()
print("Epoch {}/{}".format(e, opts.epochs_to_train + 11),
"Iteration: {}".format(iteration),
"Avg. Training loss: {:.4f}".format(loss * 1.0 / opts.statistics_interval),
"{:.4f} sec/batch".format((end - start) * 1.0 / opts.statistics_interval))
loss = 0
start = time.time()
if iteration % opts.checkpoint_interval == 0:
self.saver.save(self._session,
"doc2vecc",
global_step=iteration)
iteration += 1
self._word_embeddings = self.__normalized_word_embeddings.eval()
self.saver(self._session, "final_doc2vecc")
def transform_w(self, word_index):
return self._word_embeddings[word_index, :]
def transform_doc(self, word_indexs):
doc_embeddings = [self._word_embeddings[i, :] for i in word_indexs]
return doc_embeddings |
<gh_stars>0
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Bayesian Optimization
# +
from __future__ import absolute_import, division, print_function, unicode_literals
from tensorflow_examples.models.pix2pix import pix2pix
from sten import Sten
from matplotlib.image import imread
from IPython.display import clear_output
from tqdm.auto import tqdm, trange
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
from bayes_opt import BayesianOptimization
from bayes_opt.logger import JSONLogger
from bayes_opt.event import Events
import os
import time
import glob
import random
import sys
import numpy as np
import tensorflow as tf
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
AUTOTUNE = tf.data.experimental.AUTOTUNE
# -
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
# ## Modify hyper-parameters
# +
STEN_X = int(sys.argv[1])
MAX0 = 11
MAX1 = 6
EPOCHS_RANGE = (2,5)
#(200, 500)
LAMBDA_RANGE = (8, 12)
STEPS_PER_EPOCH_RANGE = (2,3)
#(10, 50)
# -
# ## Helper functions
def discriminator_loss(real, generated):
real_loss = loss_obj(tf.ones_like(real), real)
generated_loss = loss_obj(tf.zeros_like(generated), generated)
total_disc_loss = real_loss + generated_loss
return total_disc_loss * 0.5
def generator_loss(generated):
return loss_obj(tf.ones_like(generated), generated)
def calc_cycle_loss(LAMBDA, real_image, cycled_image):
loss1 = tf.reduce_mean(tf.abs(real_image - cycled_image))
return int(LAMBDA) * loss1
def identity_loss(LAMBDA, real_image, same_image):
loss = tf.reduce_mean(tf.abs(real_image - same_image))
return int(LAMBDA) * 0.5 * loss
@tf.function
def train_step(LAMBDA,real_x, real_y):
with tf.GradientTape(persistent=True) as tape:
fake_y = generator_g(real_x, training=True)
cycled_x = generator_f(fake_y, training=True)
fake_x = generator_f(real_y, training=True)
cycled_y = generator_g(fake_x, training=True)
same_x = generator_f(real_x, training=True)
same_y = generator_g(real_y, training=True)
disc_real_x = discriminator_x(real_x, training=True)
disc_real_y = discriminator_y(real_y, training=True)
disc_fake_x = discriminator_x(fake_x, training=True)
disc_fake_y = discriminator_y(fake_y, training=True)
gen_g_loss = generator_loss(disc_fake_y)
gen_f_loss = generator_loss(disc_fake_x)
total_cycle_loss = calc_cycle_loss(LAMBDA,real_x, cycled_x) + calc_cycle_loss(LAMBDA,real_y, cycled_y)
total_gen_g_loss = gen_g_loss + total_cycle_loss + identity_loss(LAMBDA, real_y, same_y)
total_gen_f_loss = gen_f_loss + total_cycle_loss + identity_loss(LAMBDA, real_x, same_x)
disc_x_loss = discriminator_loss(disc_real_x, disc_fake_x)
disc_y_loss = discriminator_loss(disc_real_y, disc_fake_y)
generator_g_gradients = tape.gradient(total_gen_g_loss,generator_g.trainable_variables)
generator_f_gradients = tape.gradient(total_gen_f_loss, generator_f.trainable_variables)
discriminator_x_gradients = tape.gradient(disc_x_loss,discriminator_x.trainable_variables)
discriminator_y_gradients = tape.gradient(disc_y_loss, discriminator_y.trainable_variables)
generator_g_optimizer.apply_gradients(zip(generator_g_gradients,generator_g.trainable_variables))
generator_f_optimizer.apply_gradients(zip(generator_f_gradients, generator_f.trainable_variables))
discriminator_x_optimizer.apply_gradients(zip(discriminator_x_gradients,discriminator_x.trainable_variables))
discriminator_y_optimizer.apply_gradients(zip(discriminator_y_gradients,discriminator_y.trainable_variables))
# ## Loss for Bayesian Optimization
def mse(imageA, imageB):
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
return -err
# ## Create the model
# +
OUTPUT_CHANNELS = 3
generator_g = pix2pix.unet_generator(OUTPUT_CHANNELS, norm_type='instancenorm')
generator_f = pix2pix.unet_generator(OUTPUT_CHANNELS, norm_type='instancenorm')
discriminator_x = pix2pix.discriminator(norm_type='instancenorm', target=False)
discriminator_y = pix2pix.discriminator(norm_type='instancenorm', target=False)
# -
loss_obj = tf.keras.losses.BinaryCrossentropy(from_logits=True)
# +
generator_g_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
generator_f_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
discriminator_x_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
discriminator_y_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
# -
# ## Bayesian Optimization
#
# The Black_Box function parameters are different hyperparameters. The black-box function returns the average difference between the input and output as the metric to maximize.
def Black_Box(EPOCHS,LAMBDA,steps_per_epochs):
for epoch in trange(int(EPOCHS),desc='epochs'):
for _ in trange(int(steps_per_epochs), desc='steps_per_epochs'):
i = np.random.randint(1,MAX0)
j = np.random.randint(1,MAX1)
name = str(i) + '_' + str(j)
image_x = np.load(os.getcwd() + "/encodedArray/bit_{0}/{1}.npy".format(STEN_X, name))
image_y = np.load(os.getcwd() + "/decodedArray/bit_{0}/{1}.npy".format(STEN_X, name))
train_step(LAMBDA,np.asarray([image_x/255.0], dtype='float32'), np.asarray([image_y/255.0], dtype='float32'))
sum = 0.0
for i in trange(1,MAX0):
for j in trange(1,MAX1):
name = str(i) + '_' + str(j)
image_x = np.load(os.getcwd() + "/encodedArray/bit_{0}/{1}.npy".format(STEN_X, name))
image_y = np.load(os.getcwd() + "/decodedArray/bit_{0}/{1}.npy".format(STEN_X, name))
sum += mse(generator_g.predict(np.asarray([image_x/255.0], dtype='float32')), np.asarray([image_y/255.0], dtype='float32'))
avg = sum / ((MAX0-1)*(MAX1-1))
return avg
bounds = {
'EPOCHS': EPOCHS_RANGE,
'LAMBDA': LAMBDA_RANGE,
'steps_per_epochs': STEPS_PER_EPOCH_RANGE
}
optimizer = BayesianOptimization(
f = Black_Box,
pbounds = bounds,
random_state = 1
)
logger = JSONLogger(path="./logs_{0}.json".format(STEN_X))
optimizer.subscribe(Events.OPTIMIZATION_STEP, logger)
optimizer.maximize(init_points=2,n_iter=10)
|
import abc
from collections import OrderedDict
import numpy as np
from gym.spaces import Box
from rlkit.core.eval_util import create_stats_ordered_dict
from rlkit.envs.wrappers import ProxyEnv
from rlkit.core.serializable import Serializable
from rlkit.core import logger as default_logger
class MultitaskEnv(object, metaclass=abc.ABCMeta):
"""
An environment with a task that can be specified with a goal.
To change the goal, you need to explicitly call
```
goal = env.sample_goal_for_rollout()
env.set_goal(goal)
env.reset() # optional, but probably for the best
```
If you want to append the goal to the state, do this:
```
env = MyMultitaskEnv()
env = MultitaskToFlatEnv(env)
```
The above code will also make the goal change at every reset.
See MultitaskToFlatEnv for more detail.
If you want to change the goal at every call to reset(), but you do not
want the goal to be appended to the state, do this:
```
env = MyMultitaskEnv()
env = MultitaskEnvToSilentMultitaskEnv(env)
```
See `MultitaskEnvToSilentMultitaskEnv` for more detail.
"""
def __init__(self, distance_metric_order=1, goal_dim_weights=None):
self.multitask_goal = np.zeros(self.goal_dim)
if goal_dim_weights is None:
self.goal_dim_weights = np.ones(self.goal_dim)
else:
self.goal_dim_weights = np.array(goal_dim_weights)
self.distance_metric_order = distance_metric_order
"""
New environments should implement these three functions
"""
@property
@abc.abstractmethod
def goal_dim(self) -> int:
"""
:return: int, dimension of goal vector
"""
pass
@abc.abstractmethod
def sample_goals(self, batch_size):
pass
@abc.abstractmethod
def convert_obs_to_goals(self, obs):
"""
Convert a raw environment observation into a goal (if possible).
"""
pass
"""
Helper functions you probably don't need to override.
"""
def sample_goal_for_rollout(self):
"""
These goals are fed to a policy when the policy wants to actually
do rollouts.
:return:
"""
goal = self.sample_goals(1)[0]
return self.modify_goal_for_rollout(goal)
def convert_ob_to_goal(self, ob):
"""
Convert a raw environment observation into a goal (if possible).
This observation should NOT include the goal.
"""
if isinstance(ob, np.ndarray):
return self.convert_obs_to_goals(
np.expand_dims(ob, 0)
)[0]
else:
return self.convert_obs_to_goals_pytorch(
ob.unsqueeze(0)
)[0]
def compute_reward(self, ob, action, next_ob, goal):
return self.compute_rewards(
ob[None], action[None], next_ob[None], goal[None]
)
"""
Check out these default functions below! You may want to override them.
"""
def set_goal(self, goal):
self.multitask_goal = goal
def compute_rewards(self, obs, actions, next_obs, goals):
return - np.linalg.norm(
self.convert_obs_to_goals(next_obs) - goals,
axis=1,
keepdims=True,
ord=self.distance_metric_order,
)
def convert_obs_to_goals_pytorch(self, obs):
"""
PyTorch version of `convert_obs_to_goals`.
"""
return self.convert_obs_to_goals(obs)
def modify_goal_for_rollout(self, goal):
"""
Modify a goal so that it's appropriate for doing a rollout.
Common use case: zero out the goal velocities.
:param goal:
:return:
"""
return goal
def log_diagnostics(self, paths, logger=default_logger):
list_of_goals = _extract_list_of_goals(paths)
if list_of_goals is None:
return
final_differences = []
for path, goals in zip(paths, list_of_goals):
reached = self.convert_ob_to_goal(path['next_observations'][-1])
final_differences.append(reached - goals[-1])
statistics = OrderedDict()
goals = np.vstack(list_of_goals)
observations = np.vstack([path['observations'] for path in paths])
next_observations = np.vstack([path['next_observations'] for path in paths])
actions = np.vstack([path['actions'] for path in paths])
for order in [1, 2]:
final_distances = np.linalg.norm(
np.array(final_differences),
axis=1,
ord=order,
)
goal_distances = np.linalg.norm(
self.convert_obs_to_goals(observations) - np.vstack(goals),
axis=1,
ord=order,
)
statistics.update(create_stats_ordered_dict(
'Multitask L{} distance to goal'.format(order),
goal_distances,
always_show_all_stats=True,
))
statistics.update(create_stats_ordered_dict(
'Multitask Final L{} distance to goal'.format(order),
final_distances,
always_show_all_stats=True,
))
rewards = self.compute_rewards(
observations,
actions,
next_observations,
goals,
)
statistics.update(create_stats_ordered_dict(
'Multitask Env Rewards', rewards,
))
for key, value in statistics.items():
logger.record_tabular(key, value)
"""
Optional functions to implement, since most of my code doesn't use these
any more.
"""
def cost_fn(self, states, actions, next_states):
"""
This is added for model-based code. This is COST not reward.
So lower is better.
:param states: (BATCH_SIZE x state_dim) numpy array
:param actions: (BATCH_SIZE x action_dim) numpy array
:param next_states: (BATCH_SIZE x state_dim) numpy array
:return: (BATCH_SIZE, ) numpy array
"""
if len(next_states.shape) == 1:
next_states = np.expand_dims(next_states, 0)
actual = self.convert_obs_to_goals(next_states)
desired = self.multitask_goal * np.ones_like(actual)
diff = actual - desired
diff *= self.goal_dim_weights
return (diff**2).sum(1)
def _extract_list_of_goals(paths):
"""
Return list of goals. Each element in list is an array of goals and
correspond to the goal from different paths.
Returns None if it's not possible to extract goals from the paths.
:param paths:
:return:
"""
if len(paths) == 0:
return None
if 'goals' in paths[0]:
return [path['goals'] for path in paths]
if 'env_infos' in paths[0]:
env_infos = paths[0]['env_infos']
if isinstance(env_infos, dict): # rllab style paths
return [path['env_infos']['goal'] for path in paths]
elif 'goal' in env_infos[0]:
return [
[info['goal'] for info in path['env_infos']]
for path in paths
]
return None
class MultitaskToFlatEnv(ProxyEnv, Serializable):
"""
This environment tasks a multitask environment and appends the goal to
the state.
"""
def __init__(
self,
env: MultitaskEnv,
give_goal_difference=False,
):
# self._wrapped_env needs to be called first because
# Serializable.quick_init calls getattr, on this class. And the
# implementation of getattr (see below) calls self._wrapped_env.
# Without setting this first, the call to self._wrapped_env would call
# getattr again (since it's not set yet) and therefore loop forever.
self._wrapped_env = env
# Or else serialization gets delegated to the wrapped_env. Serialize
# this env separately from the wrapped_env.
self._serializable_initialized = False
self._wrapped_obs_dim = env.observation_space.low.size
self.give_goal_difference = give_goal_difference
Serializable.quick_init(self, locals())
ProxyEnv.__init__(self, env)
wrapped_low = self.observation_space.low
low = np.hstack((
wrapped_low,
min(wrapped_low) * np.ones(self._wrapped_env.goal_dim)
))
wrapped_high = self.observation_space.low
high = np.hstack((
wrapped_high,
max(wrapped_high) * np.ones(self._wrapped_env.goal_dim)
))
self.observation_space = Box(low, high)
def step(self, action):
ob, reward, done, info_dict = self._wrapped_env.step(action)
new_ob = self._add_goal_to_observation(ob)
return new_ob, reward, done, info_dict
def reset(self):
self._wrapped_env.set_goal(self._wrapped_env.sample_goal_for_rollout())
ob = super().reset()
new_ob = self._add_goal_to_observation(ob)
return new_ob
def log_diagnostics(self, paths, logger=default_logger):
for path in paths:
path['observations'] = (
path['observations'][:, :-self._wrapped_env.goal_dim]
)
path['next_observations'] = (
path['next_observations'][:, :-self._wrapped_env.goal_dim]
)
return self._wrapped_env.log_diagnostics(paths, logger=default_logger)
def _add_goal_to_observation(self, ob):
if self.give_goal_difference:
goal_difference = (
self._wrapped_env.multitask_goal
- self._wrapped_env.convert_ob_to_goal(ob)
)
return np.hstack((ob, goal_difference))
else:
return np.hstack((ob, self._wrapped_env.multitask_goal))
def cost_fn(self, states, actions, next_states):
if len(next_states.shape) == 1:
states = states[None]
actions = actions[None]
next_states = next_states[None]
unwrapped_states = states[:, :self._wrapped_obs_dim]
unwrapped_next_states = next_states[:, :self._wrapped_obs_dim]
return self._wrapped_env.cost_fn(
unwrapped_states,
actions,
unwrapped_next_states,
)
class MultitaskEnvToSilentMultitaskEnv(ProxyEnv, Serializable):
"""
Normally, reset() on a multitask env doesn't change the goal.
Now, reset will silently change the goal.
"""
def reset(self):
self._wrapped_env.set_goal(self._wrapped_env.sample_goal_for_rollout())
return super().reset()
def cost_fn(self, states, actions, next_states):
return self._wrapped_env.cost_fn(
states,
actions,
next_states,
)
def sample_goal_for_rollout(self):
return self._wrapped_env.sample_goal_for_rollout()
def sample_goals(self, batch_size):
return self._wrapped_env.sample_goals(batch_size)
def sample_states(self, batch_size):
return self._wrapped_env.sample_states(batch_size)
def convert_ob_to_goal(self, ob):
return self._wrapped_env.convert_ob_to_goal(ob)
def convert_obs_to_goals(self, obs):
return self._wrapped_env.convert_obs_to_goals(obs)
@property
def multitask_goal(self):
return self._wrapped_env.multitask_goal
def joints_to_full_state(self, *args, **kwargs):
return self._wrapped_env.joints_to_full_state(*args, **kwargs)
|
<reponame>madedotcom/ouroboros
#!/usr/bin/python
#
DOCUMENTATION = """
---
module: eventstore_subscription
short_description: create, remove, and manage subscriptions in EventStore
description:
-
options:
host_uri:
description: The fully qualified host for eventstore, eg. https://evenstore.local:2113
required: true
admin_username:
description: The username to use when modifying users
required: true
admin_password:
description: The password to use when modifying users
required: true
group_name:
description: Name of the subscription group to manage
required: true
stream:
description: Name of the stream this is a subscription to
required: true
resolve_link_tos:
description: Tells the subscription to resolve link events.
required: false
start_from:
description: Start the subscription from the position-of the event in the stream.
required: false
message_timeout:
description: Sets the timeout for a client before the message will be retried (in milliseconds).
required: false
extra_statistics:
description: Tells the backend to measure timings on the clients so statistics will contain histograms of them.
required: false
max_retry:
description: Sets the number of times a message should be retried before considered a bad message.
required: false
live_buffer_size:
description: The size of the live buffer (in memory) before resorting to paging.
required: false
buffer_size:
description: The number of messages that should be buffered when in paging mode.
required: false
read_batch_size:
description: The size of the read batch when in paging mode.
checkpoint_after:
descriptions: The amount of time the system should try to checkpoint after (in milliseconds).
required: false
min_checkpoint_count:
description: The minimum number of messages to write a checkpoint for.
required: false
max_checkpoint_count:
description: The maximum number of messages not checkpointed before forcing a checkpoint.
required: false
max_subscriber_count:
description: Sets the maximum number of allowed TCP subscribers.
required: false
named_consumer_strategy:
description: RoundRobin/DispatchToSingle/Pinned
required: false
state:
choices: ["absent", "present"]
required: true
description: Controls whether the subscription should exist or not
"""
EXAMPLES = '''
# Add the subscription 'test-sub' to stream 'events'
- eventstore_subscription:
host_uri: http://localhost:2113
admin_username: admin
admin_password: <PASSWORD>
group_name: test-sub
stream: events
state: present
# Remove the subscription 'test-sub' from 'events' stream
- eventstore_subscription:
host_uri: http://localhost:2113
admin_username: admin
admin_password: <PASSWORD>
group_name: test-sub
stream: events
state: absent
'''
from future.standard_library import install_aliases
install_aliases()
from ouroboros.client import Client, NotFoundException, AuthenticationException
def remove_subscription(client, module):
group_name = module.params['group_name']
stream = module.params['stream']
try:
client.subscriptions.get(group_name, stream)
client.subscriptions.delete(group_name, stream)
module.exit_json(changed=True)
except NotFoundException:
module.exit_json(changed=False)
def get_subscription_args(module):
args = {}
for arg in module.params:
if module.params[arg] is not None and arg not in ['host_uri', 'admin_username', 'admin_password', 'state']:
args[arg] = module.params[arg]
return args
def create_subscription(client, module):
args = get_subscription_args(module)
config = client.subscriptions.create(**args)
module.exit_json(changed=True, result={
"actions": ["create"],
"subscription": {
"group_name": args['group_name'],
"stream": args['stream'],
"config": config
}
})
def update_subscription(client, module):
group_name = module.params['group_name']
stream = module.params['stream']
try:
sub = client.subscriptions.get(group_name, stream)
except NotFoundException:
create_subscription(client, module)
return
args = get_subscription_args(module)
diff = compare_configs(sub['config'], args)
if diff == {}:
module.exit_json(changed=False, result={
"actions": ["none"],
"subscription": {
"group_name": args['group_name'],
"stream": args['stream'],
"config": sub['config']
}
})
config = client.subscriptions.update(**args)
module.exit_json(changed=True, result={
"actions": ["update"],
"subscription": {
"group_name": args['group_name'],
"stream": args['stream'],
"config": config['new_config'],
"config_diff": diff
}
})
def compare_configs(current_config, new_config):
mapping = {
"buffer_size": "bufferSize",
"checkpoint_after": "checkPointAfterMilliseconds",
"extra_statistics": "extraStatistics",
"live_buffer_size": "liveBufferSize",
"max_checkpoint_count": "maxCheckPointCount",
"max_retry": "maxRetryCount",
"max_subscriber_count": "maxSubscriberCount",
"message_timeout": "messageTimeoutMilliseconds",
"min_checkpoint_count": "minCheckPointCount",
"named_consumer_strategy": "namedConsumerStrategy",
"read_batch_size": "readBatchSize",
"resolve_link_tos": "resolveLinktos",
"start_from": "startFrom"}
diff = {}
for key in new_config:
if key in ["group_name", "stream"]:
continue
if current_config[mapping[key]] != new_config[key]:
diff[key] = "{} => {}".format(current_config[mapping[key]], new_config[key])
return diff
def main():
module = AnsibleModule(argument_spec=dict(
host_uri=dict(required=True, type='str'),
admin_username=dict(required=True, type='str'),
admin_password=dict(required=True, type='str', no_log=True),
group_name=dict(required=True, type='str'),
stream=dict(required=True, type='str'),
resolve_link_tos=dict(required=False, type='bool', default=None),
start_from=dict(required=False, type='int', default=None),
message_timeout=dict(required=False, type='int', default=None),
extra_statistics=dict(required=False, type='bool', default=None),
max_retry=dict(required=False, type='int', default=None),
live_buffer_size=dict(required=False, type='int', default=None),
buffer_size=dict(required=False, type='int', default=None),
read_batch_size=dict(required=False, type='int', default=None),
checkpoint_after=dict(required=False, type='int', default=None),
min_checkpoint_count=dict(required=False, type='int', default=None),
max_checkpoint_count=dict(required=False, type='int', default=None),
max_subscriber_count=dict(required=False, type='int', default=None),
named_consumer_strategy=dict(required=False, type='str', choices=['RoundRobin', 'DispatchToSingle', 'Pinned'], default=None),
state=dict(required=True, type='str', choices=['absent', 'present'])))
uri = module.params['host_uri']
adminuser = module.params['admin_username']
adminpass = module.params['<PASSWORD>password']
client = Client.from_uri(uri, adminuser, adminpass)
state = module.params['state']
if state == "absent":
remove_subscription(client, module)
else:
update_subscription(client, module)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
from hypothesis import given, assume
import hypothesis.strategies as st
import numpy as np
from latticegen.latticegeneration import *
@given(st.floats(0., exclude_min=True, allow_infinity=False),
st.floats(0, np.pi),
st.floats(0., 10, exclude_min=True),
st.floats(0, np.pi),
st.integers(4, 7),
)
def test_generate_ks(r, t, k, p, sym):
ks = generate_ks(r, t, k, p, sym)
assert ks.shape == (sym + 1, 2)
assert ks.max() <= max(r * k, r)
@given(r=st.floats(1e-20, 1e8, exclude_min=True, allow_infinity=False),
t=st.floats(0, np.pi),
o=st.integers(1, 3),
k=st.floats(0.001, 10, exclude_min=True),
p=st.floats(0, np.pi),
size=st.one_of([st.integers(2, 50),
st.tuples(st.integers(2, 50), st.integers(2, 50))]),
)
def test_fast_gen(r, t, o, k, p, size):
# Don't use more than float max periods.
assume(np.isfinite(r*np.max(size)*np.pi*2))
ref = hexlattice_gen(r, t, o, size, k, p)
fast = hexlattice_gen_fast(r, t, o, size, k, p)
assert fast.shape == ref.shape
ref = ref.compute()
fast = fast.compute()
atol = max(1e-10*r, 1e-10)
assert np.abs((ref-fast)).max() < atol
assert np.allclose(ref, fast, rtol=1e-5, atol=atol)
"""
@given(r=st.floats(1e-250, 1e250, allow_infinity=False),
t=st.floats(0, np.pi),
o=st.integers(1, 3),
k=st.floats(1e-10, 1e10, exclude_min=True),
p=st.floats(0, np.pi),
size=st.tuples(st.integers(2, 50), st.integers(2, 50)),
)
def test_new_hex_gen(r, t, o, k, p, size):
# Don't use more than float max periods.
assume(np.isfinite(r*np.max(size)*np.pi*2))
ref = hexlattice_gen(r, t, o, size, k, p)
new = hexlattice_gen2(r, t, o, size, k, p)
assert new.shape == ref.shape
ref = ref.compute()
new = new.compute()
assert np.allclose(ref, new)
"""
@given(r=st.floats(0.0, exclude_min=True, allow_infinity=False),
t=st.floats(0, np.pi),
o=st.integers(1, 3),
k=st.floats(1e-6, 10, exclude_min=True),
p=st.floats(0, np.pi),
size=st.tuples(st.integers(2, 70),
st.integers(2, 70)),
sym=st.integers(4, 7),
)
def test_gen(r, t, o, sym, k, p, size):
# Don't use more than float max periods.
assume(np.isfinite(r*max(size)*np.pi*2))
ref = anylattice_gen(r, t, o, sym, size, k, p)
assert ref.shape == size
ref = ref.compute()
assert np.all(~np.isnan(ref))
@given(st.integers(3, 500))
def test_shapes_square(s):
assert hexlattice_gen(0.1, 0., 1, s).shape == (s, s)
assert anylattice_gen(0.1, 0., 1, 4, s).shape == (s, s)
|
<reponame>IOverflow/cool-compiler-2020
from typing import List
import abstract.tree as coolAst
from abstract.semantics import (
IoType,
Method,
SelfType,
SemanticError,
Type,
VoidType,
IntegerType,
StringType,
ObjectType,
Context,
BoolType,
AutoType,
)
from functools import singledispatchmethod
BUILTINS = ("Int", "Bool", "Object", "String", "IO", "AUTO_TYPE")
def bootstrap_string(obj: StringType, intType: IntegerType):
def length() -> Method:
method_name = "length"
param_names = []
params_types = []
return_type = intType
return Method(method_name, param_names, params_types, return_type)
def concat() -> Method:
method_name = "concat"
param_names = ["s"]
params_types: List[Type] = [StringType()]
return_type = obj
return Method(method_name, param_names, params_types, return_type)
def substr() -> Method:
method_name = "substr"
param_names = ["i", "l"]
params_types: List[Type] = [IntegerType(), IntegerType()]
return_type = obj
return Method(method_name, param_names, params_types, return_type)
obj.methods["length"] = length()
obj.methods["concat"] = concat()
obj.methods["substr"] = substr()
def bootstrap_io(io: IoType, strType: StringType, selfType: SelfType, intType: IntegerType):
def out_string() -> Method:
method_name = "out_string"
param_names = ["x"]
param_types: List[Type] = [StringType()]
return_type = selfType
return Method(method_name, param_names, param_types, return_type)
def out_int() -> Method:
method_name = "out_int"
param_names = ["x"]
params_types: List[Type] = [IntegerType()]
return_type = selfType
return Method(method_name, param_names, params_types, return_type)
def in_string() -> Method:
method_name = "in_string"
param_names = []
params_types = []
return_type = strType
return Method(method_name, param_names, params_types, return_type)
def in_int() -> Method:
method_name = "in_int"
param_names = []
params_types = []
return_type = intType
return Method(method_name, param_names, params_types, return_type)
# Crear el metodo out_string
io.methods["out_string"] = out_string()
io.methods["out_int"] = out_int()
io.methods["in_string"] = in_string()
io.methods["in_int"] = in_int()
def bootstrap_object(obj: ObjectType, strType: StringType):
def abort() -> Method:
method_name = "abort"
param_names = []
params_types = []
return_type = obj
return Method(method_name, param_names, params_types, return_type)
def type_name() -> Method:
method_name = "type_name"
param_names = []
params_types = []
return_type = strType
return Method(method_name, param_names, params_types, return_type)
def copy() -> Method:
method_name = "copy"
param_names = []
params_types = []
return_type = SelfType()
return Method(method_name, param_names, params_types, return_type)
obj.methods["abort"] = abort()
obj.methods["type_name"] = type_name()
obj.methods["copy"] = copy()
class TypeCollector:
def __init__(self, errors=[]):
self.context = None
self.errors = errors
@singledispatchmethod
def visit(self, node):
pass
@visit.register # type: ignore
def _(self, node: coolAst.ProgramNode): # noqa: F811
self.context = Context()
OBJECT, INTEGER, STRING, BOOL, VOID, SELF_TYPE = (
ObjectType(),
IntegerType(),
StringType(),
BoolType(),
VoidType(),
SelfType(),
)
ioType = IoType()
INTEGER.set_parent(OBJECT)
STRING.set_parent(OBJECT)
BOOL.set_parent(OBJECT)
ioType.set_parent(OBJECT)
# Agregar los metodos builtin
bootstrap_string(STRING, INTEGER)
bootstrap_io(ioType, STRING, SELF_TYPE, INTEGER)
bootstrap_object(OBJECT, STRING)
# Agregar al objeto IO los metodos de OBJECT
ioType.methods.update(OBJECT.methods)
self.context.types["Object"] = OBJECT
self.context.types["Int"] = INTEGER
self.context.types["String"] = STRING
self.context.types["Bool"] = BOOL
self.context.types["Void"] = VOID
self.context.types["AUTO_TYPE"] = AutoType()
self.context.types["IO"] = ioType
self.context.types["SELF_TYPE"] = SELF_TYPE
for class_ in node.class_list:
self.visit(class_)
@visit.register
def _(self, node: coolAst.ClassDef):
try:
if node.idx in BUILTINS:
raise SemanticError(
f"{node.line, node.column} - SemanticError: Redefinition of basic class {node.idx}."
)
self.context.create_type(node.idx)
except SemanticError as e:
raise SemanticError(f"{node.line, node.column - 2} - SemanticError: Classes may not be redefined")
|
# Natural Language Toolkit: Glue Semantics
#
# Author: <NAME> <<EMAIL>>
#
# Copyright (C) 2001-2014 NLTK Project
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function, division, unicode_literals
import os
import nltk
from nltk.internals import Counter
from nltk.compat import string_types
from nltk.corpus import brown
from nltk.tag import UnigramTagger, BigramTagger, TrigramTagger, RegexpTagger
from nltk.sem.logic import (Expression, Variable, VariableExpression,
LambdaExpression, AbstractVariableExpression)
from nltk.compat import python_2_unicode_compatible
from nltk.sem import drt
from nltk.sem import linearlogic
SPEC_SEMTYPES = {'a' : 'ex_quant',
'an' : 'ex_quant',
'every' : 'univ_quant',
'the' : 'def_art',
'no' : 'no_quant',
'default' : 'ex_quant'}
OPTIONAL_RELATIONSHIPS = ['nmod', 'vmod', 'punct']
@python_2_unicode_compatible
class GlueFormula(object):
def __init__(self, meaning, glue, indices=None):
if not indices:
indices = set()
if isinstance(meaning, string_types):
self.meaning = Expression.fromstring(meaning)
elif isinstance(meaning, Expression):
self.meaning = meaning
else:
raise RuntimeError('Meaning term neither string or expression: %s, %s' % (meaning, meaning.__class__))
if isinstance(glue, string_types):
self.glue = linearlogic.LinearLogicParser().parse(glue)
elif isinstance(glue, linearlogic.Expression):
self.glue = glue
else:
raise RuntimeError('Glue term neither string or expression: %s, %s' % (glue, glue.__class__))
self.indices = indices
def applyto(self, arg):
""" self = (\\x.(walk x), (subj -o f))
arg = (john , subj)
returns ((walk john), f)
"""
if self.indices & arg.indices: # if the sets are NOT disjoint
raise linearlogic.LinearLogicApplicationException("'%s' applied to '%s'. Indices are not disjoint." % (self, arg))
else: # if the sets ARE disjoint
return_indices = (self.indices | arg.indices)
try:
return_glue = linearlogic.ApplicationExpression(self.glue, arg.glue, arg.indices)
except linearlogic.LinearLogicApplicationException:
raise linearlogic.LinearLogicApplicationException("'%s' applied to '%s'" % (self.simplify(), arg.simplify()))
arg_meaning_abstracted = arg.meaning
if return_indices:
for dep in self.glue.simplify().antecedent.dependencies[::-1]: # if self.glue is (A -o B), dep is in A.dependencies
arg_meaning_abstracted = self.make_LambdaExpression(Variable('v%s' % dep),
arg_meaning_abstracted)
return_meaning = self.meaning.applyto(arg_meaning_abstracted)
return self.__class__(return_meaning, return_glue, return_indices)
def make_VariableExpression(self, name):
return VariableExpression(name)
def make_LambdaExpression(self, variable, term):
return LambdaExpression(variable, term)
def lambda_abstract(self, other):
assert isinstance(other, GlueFormula)
assert isinstance(other.meaning, AbstractVariableExpression)
return self.__class__(self.make_LambdaExpression(other.meaning.variable,
self.meaning),
linearlogic.ImpExpression(other.glue, self.glue))
def compile(self, counter=None):
"""From <NAME>'s PhD Dissertation p108-109"""
if not counter:
counter = Counter()
(compiled_glue, new_forms) = self.glue.simplify().compile_pos(counter, self.__class__)
return new_forms + [self.__class__(self.meaning, compiled_glue, set([counter.get()]))]
def simplify(self):
return self.__class__(self.meaning.simplify(), self.glue.simplify(), self.indices)
def __eq__(self, other):
return self.__class__ == other.__class__ and self.meaning == other.meaning and self.glue == other.glue
def __ne__(self, other):
return not self == other
def __str__(self):
assert isinstance(self.indices, set)
accum = '%s : %s' % (self.meaning, self.glue)
if self.indices:
accum += ' : {' + ', '.join(str(index) for index in self.indices) + '}'
return accum
def __repr__(self):
return "%s" % self
@python_2_unicode_compatible
class GlueDict(dict):
def __init__(self, filename, encoding=None):
self.filename = filename
self.file_encoding = encoding
self.read_file()
def read_file(self, empty_first=True):
if empty_first:
self.clear()
try:
contents = nltk.data.load(self.filename, format='text', encoding=self.file_encoding)
# TODO: the above can't handle zip files, but this should anyway be fixed in nltk.data.load()
except LookupError as e:
try:
contents = nltk.data.load('file:' + self.filename, format='text', encoding=self.file_encoding)
except LookupError:
raise e
lines = contents.splitlines()
for line in lines: # example: 'n : (\\x.(<word> x), (v-or))'
# lambdacalc -^ linear logic -^
line = line.strip() # remove trailing newline
if not len(line): continue # skip empty lines
if line[0] == '#': continue # skip commented out lines
parts = line.split(' : ', 2) # ['verb', '(\\x.(<word> x), ( subj -o f ))', '[subj]']
glue_formulas = []
paren_count = 0
tuple_start = 0
tuple_comma = 0
relationships = None
if len(parts) > 1:
for (i, c) in enumerate(parts[1]):
if c == '(':
if paren_count == 0: # if it's the first '(' of a tuple
tuple_start = i+1 # then save the index
paren_count += 1
elif c == ')':
paren_count -= 1
if paren_count == 0: # if it's the last ')' of a tuple
meaning_term = parts[1][tuple_start:tuple_comma] # '\\x.(<word> x)'
glue_term = parts[1][tuple_comma+1:i] # '(v-r)'
glue_formulas.append([meaning_term, glue_term]) # add the GlueFormula to the list
elif c == ',':
if paren_count == 1: # if it's a comma separating the parts of the tuple
tuple_comma = i # then save the index
elif c == '#': # skip comments at the ends of lines
if paren_count != 0: # if the line hasn't parsed correctly so far
raise RuntimeError('Formula syntax is incorrect for entry ' + line)
break # break to the next line
if len(parts) > 2: #if there is a relationship entry at the end
rel_start = parts[2].index('[')+1
rel_end = parts[2].index(']')
if rel_start == rel_end:
relationships = frozenset()
else:
relationships = frozenset(r.strip() for r in parts[2][rel_start:rel_end].split(','))
try:
start_inheritance = parts[0].index('(')
end_inheritance = parts[0].index(')')
sem = parts[0][:start_inheritance].strip()
supertype = parts[0][start_inheritance+1:end_inheritance]
except:
sem = parts[0].strip()
supertype = None
if sem not in self:
self[sem] = {}
if relationships is None: #if not specified for a specific relationship set
#add all relationship entries for parents
if supertype:
for rels in self[supertype]:
if rels not in self[sem]:
self[sem][rels] = []
glue = self[supertype][rels]
self[sem][rels].extend(glue)
self[sem][rels].extend(glue_formulas) # add the glue formulas to every rel entry
else:
if None not in self[sem]:
self[sem][None] = []
self[sem][None].extend(glue_formulas) # add the glue formulas to every rel entry
else:
if relationships not in self[sem]:
self[sem][relationships] = []
if supertype:
self[sem][relationships].extend(self[supertype][relationships])
self[sem][relationships].extend(glue_formulas) # add the glue entry to the dictionary
def __str__(self):
accum = ''
for pos in self:
str_pos = "%s" % pos
for relset in self[pos]:
i = 1
for gf in self[pos][relset]:
if i==1:
accum += str_pos + ': '
else:
accum += ' '*(len(str_pos)+2)
accum += "%s" % gf
if relset and i==len(self[pos][relset]):
accum += ' : %s' % relset
accum += '\n'
i += 1
return accum
def to_glueformula_list(self, depgraph, node=None, counter=None, verbose=False):
if node is None:
top = depgraph.nodelist[0]
root = depgraph.nodelist[top['deps'][0]]
return self.to_glueformula_list(depgraph, root, Counter(), verbose)
glueformulas = self.lookup(node, depgraph, counter)
for dep_idx in node['deps']:
dep = depgraph.nodelist[dep_idx]
glueformulas.extend(self.to_glueformula_list(depgraph, dep, counter, verbose))
return glueformulas
def lookup(self, node, depgraph, counter):
semtype_names = self.get_semtypes(node)
semtype = None
for name in semtype_names:
if name in self:
semtype = self[name]
break
if semtype is None:
# raise KeyError, "There is no GlueDict entry for sem type '%s' (for '%s')" % (sem, word)
return []
self.add_missing_dependencies(node, depgraph)
lookup = self._lookup_semtype_option(semtype, node, depgraph)
if not len(lookup):
raise KeyError("There is no GlueDict entry for sem type of '%s'"\
" with tag '%s', and rel '%s'" %\
(node['word'], node['tag'], node['rel']))
return self.get_glueformulas_from_semtype_entry(lookup, node['word'], node, depgraph, counter)
def add_missing_dependencies(self, node, depgraph):
rel = node['rel'].lower()
if rel == 'main':
headnode = depgraph.nodelist[node['head']]
subj = self.lookup_unique('subj', headnode, depgraph)
node['deps'].append(subj['address'])
def _lookup_semtype_option(self, semtype, node, depgraph):
relationships = frozenset(depgraph.nodelist[dep]['rel'].lower()
for dep in node['deps']
if depgraph.nodelist[dep]['rel'].lower()
not in OPTIONAL_RELATIONSHIPS)
try:
lookup = semtype[relationships]
except KeyError:
# An exact match is not found, so find the best match where
# 'best' is defined as the glue entry whose relationship set has the
# most relations of any possible relationship set that is a subset
# of the actual depgraph
best_match = frozenset()
for relset_option in set(semtype)-set([None]):
if len(relset_option) > len(best_match) and \
relset_option < relationships:
best_match = relset_option
if not best_match:
if None in semtype:
best_match = None
else:
return None
lookup = semtype[best_match]
return lookup
def get_semtypes(self, node):
"""
Based on the node, return a list of plausible semtypes in order of
plausibility.
"""
rel = node['rel'].lower()
word = node['word'].lower()
if rel == 'spec':
if word in SPEC_SEMTYPES:
return [SPEC_SEMTYPES[word]]
else:
return [SPEC_SEMTYPES['default']]
elif rel in ['nmod', 'vmod']:
return [node['tag'], rel]
else:
return [node['tag']]
def get_glueformulas_from_semtype_entry(self, lookup, word, node, depgraph, counter):
glueformulas = []
glueFormulaFactory = self.get_GlueFormula_factory()
for meaning, glue in lookup:
gf = glueFormulaFactory(self.get_meaning_formula(meaning, word), glue)
if not len(glueformulas):
gf.word = word
else:
gf.word = '%s%s' % (word, len(glueformulas)+1)
gf.glue = self.initialize_labels(gf.glue, node, depgraph, counter.get())
glueformulas.append(gf)
return glueformulas
def get_meaning_formula(self, generic, word):
"""
:param generic: A meaning formula string containing the
parameter "<word>"
:param word: The actual word to be replace "<word>"
"""
word = word.replace('.', '')
return generic.replace('<word>', word)
def initialize_labels(self, expr, node, depgraph, unique_index):
if isinstance(expr, linearlogic.AtomicExpression):
name = self.find_label_name(expr.name.lower(), node, depgraph, unique_index)
if name[0].isupper():
return linearlogic.VariableExpression(name)
else:
return linearlogic.ConstantExpression(name)
else:
return linearlogic.ImpExpression(
self.initialize_labels(expr.antecedent, node, depgraph, unique_index),
self.initialize_labels(expr.consequent, node, depgraph, unique_index))
def find_label_name(self, name, node, depgraph, unique_index):
try:
dot = name.index('.')
before_dot = name[:dot]
after_dot = name[dot+1:]
if before_dot == 'super':
return self.find_label_name(after_dot, depgraph.nodelist[node['head']], depgraph, unique_index)
else:
return self.find_label_name(after_dot, self.lookup_unique(before_dot, node, depgraph), depgraph, unique_index)
except ValueError:
lbl = self.get_label(node)
if name=='f': return lbl
elif name=='v': return '%sv' % lbl
elif name=='r': return '%sr' % lbl
elif name=='super': return self.get_label(depgraph.nodelist[node['head']])
elif name=='var': return '%s%s' % (lbl.upper(), unique_index)
elif name=='a': return self.get_label(self.lookup_unique('conja', node, depgraph))
elif name=='b': return self.get_label(self.lookup_unique('conjb', node, depgraph))
else: return self.get_label(self.lookup_unique(name, node, depgraph))
def get_label(self, node):
"""
Pick an alphabetic character as identifier for an entity in the model.
:param value: where to index into the list of characters
:type value: int
"""
value = node['address']
letter = ['f','g','h','i','j','k','l','m','n','o','p','q','r','s',
't','u','v','w','x','y','z','a','b','c','d','e'][value-1]
num = int(value) // 26
if num > 0:
return letter + str(num)
else:
return letter
def lookup_unique(self, rel, node, depgraph):
"""
Lookup 'key'. There should be exactly one item in the associated relation.
"""
deps = [depgraph.nodelist[dep] for dep in node['deps']
if depgraph.nodelist[dep]['rel'].lower() == rel.lower()]
if len(deps) == 0:
raise KeyError("'%s' doesn't contain a feature '%s'" % (node['word'], rel))
elif len(deps) > 1:
raise KeyError("'%s' should only have one feature '%s'" % (node['word'], rel))
else:
return deps[0]
def get_GlueFormula_factory(self):
return GlueFormula
class Glue(object):
def __init__(self, semtype_file=None, remove_duplicates=False,
depparser=None, verbose=False):
self.verbose = verbose
self.remove_duplicates = remove_duplicates
self.depparser = depparser
from nltk import Prover9
self.prover = Prover9()
if semtype_file:
self.semtype_file = semtype_file
else:
self.semtype_file = os.path.join('grammars', 'sample_grammars','glue.semtype')
def train_depparser(self, depgraphs=None):
if depgraphs:
self.depparser.train(depgraphs)
else:
self.depparser.train_from_file(nltk.data.find(
os.path.join('grammars', 'sample_grammars',
'glue_train.conll')))
def parse_to_meaning(self, sentence):
readings = []
for agenda in self.parse_to_compiled(sentence):
readings.extend(self.get_readings(agenda))
return readings
def get_readings(self, agenda):
readings = []
agenda_length = len(agenda)
atomics = dict()
nonatomics = dict()
while agenda: # is not empty
cur = agenda.pop()
glue_simp = cur.glue.simplify()
if isinstance(glue_simp, linearlogic.ImpExpression): # if cur.glue is non-atomic
for key in atomics:
try:
if isinstance(cur.glue, linearlogic.ApplicationExpression):
bindings = cur.glue.bindings
else:
bindings = linearlogic.BindingDict()
glue_simp.antecedent.unify(key, bindings)
for atomic in atomics[key]:
if not (cur.indices & atomic.indices): # if the sets of indices are disjoint
try:
agenda.append(cur.applyto(atomic))
except linearlogic.LinearLogicApplicationException:
pass
except linearlogic.UnificationException:
pass
try:
nonatomics[glue_simp.antecedent].append(cur)
except KeyError:
nonatomics[glue_simp.antecedent] = [cur]
else: # else cur.glue is atomic
for key in nonatomics:
for nonatomic in nonatomics[key]:
try:
if isinstance(nonatomic.glue, linearlogic.ApplicationExpression):
bindings = nonatomic.glue.bindings
else:
bindings = linearlogic.BindingDict()
glue_simp.unify(key, bindings)
if not (cur.indices & nonatomic.indices): # if the sets of indices are disjoint
try:
agenda.append(nonatomic.applyto(cur))
except linearlogic.LinearLogicApplicationException:
pass
except linearlogic.UnificationException:
pass
try:
atomics[glue_simp].append(cur)
except KeyError:
atomics[glue_simp] = [cur]
for entry in atomics:
for gf in atomics[entry]:
if len(gf.indices) == agenda_length:
self._add_to_reading_list(gf, readings)
for entry in nonatomics:
for gf in nonatomics[entry]:
if len(gf.indices) == agenda_length:
self._add_to_reading_list(gf, readings)
return readings
def _add_to_reading_list(self, glueformula, reading_list):
add_reading = True
if self.remove_duplicates:
for reading in reading_list:
try:
if reading.equiv(glueformula.meaning, self.prover):
add_reading = False
break
except Exception as e:
#if there is an exception, the syntax of the formula
#may not be understandable by the prover, so don't
#throw out the reading.
print('Error when checking logical equality of statements', e)
pass
if add_reading:
reading_list.append(glueformula.meaning)
def parse_to_compiled(self, sentence):
gfls = [self.depgraph_to_glue(dg) for dg in self.dep_parse(sentence)]
return [self.gfl_to_compiled(gfl) for gfl in gfls]
def dep_parse(self, sentence):
#Lazy-initialize the depparser
if self.depparser is None:
from nltk.parse import MaltParser
self.depparser = MaltParser(tagger=self.get_pos_tagger())
if not self.depparser._trained:
self.train_depparser()
return [self.depparser.parse(sentence, verbose=self.verbose)]
def depgraph_to_glue(self, depgraph):
return self.get_glue_dict().to_glueformula_list(depgraph)
def get_glue_dict(self):
return GlueDict(self.semtype_file)
def gfl_to_compiled(self, gfl):
index_counter = Counter()
return_list = []
for gf in gfl:
return_list.extend(gf.compile(index_counter))
if self.verbose:
print('Compiled Glue Premises:')
for cgf in return_list:
print(cgf)
return return_list
def get_pos_tagger(self):
regexp_tagger = RegexpTagger(
[(r'^-?[0-9]+(.[0-9]+)?$', 'CD'), # cardinal numbers
(r'(The|the|A|a|An|an)$', 'AT'), # articles
(r'.*able$', 'JJ'), # adjectives
(r'.*ness$', 'NN'), # nouns formed from adjectives
(r'.*ly$', 'RB'), # adverbs
(r'.*s$', 'NNS'), # plural nouns
(r'.*ing$', 'VBG'), # gerunds
(r'.*ed$', 'VBD'), # past tense verbs
(r'.*', 'NN') # nouns (default)
])
brown_train = brown.tagged_sents(categories='news')
unigram_tagger = UnigramTagger(brown_train, backoff=regexp_tagger)
bigram_tagger = BigramTagger(brown_train, backoff=unigram_tagger)
trigram_tagger = TrigramTagger(brown_train, backoff=bigram_tagger)
#Override particular words
main_tagger = RegexpTagger(
[(r'(A|a|An|an)$', 'ex_quant'),
(r'(Every|every|All|all)$', 'univ_quant')
], backoff=trigram_tagger)
return main_tagger
class DrtGlueFormula(GlueFormula):
def __init__(self, meaning, glue, indices=None):
if not indices:
indices = set()
if isinstance(meaning, string_types):
self.meaning = drt.DrtExpression.fromstring(meaning)
elif isinstance(meaning, drt.AbstractDrs):
self.meaning = meaning
else:
raise RuntimeError('Meaning term neither string or expression: %s, %s' % (meaning, meaning.__class__))
if isinstance(glue, string_types):
self.glue = linearlogic.LinearLogicParser().parse(glue)
elif isinstance(glue, linearlogic.Expression):
self.glue = glue
else:
raise RuntimeError('Glue term neither string or expression: %s, %s' % (glue, glue.__class__))
self.indices = indices
def make_VariableExpression(self, name):
return drt.DrtVariableExpression(name)
def make_LambdaExpression(self, variable, term):
return drt.DrtLambdaExpression(variable, term)
class DrtGlueDict(GlueDict):
def get_GlueFormula_factory(self):
return DrtGlueFormula
class DrtGlue(Glue):
def __init__(self, semtype_file=None, remove_duplicates=False,
depparser=None, verbose=False):
if not semtype_file:
semtype_file = os.path.join('grammars', 'sample_grammars','drt_glue.semtype')
Glue.__init__(self, semtype_file, remove_duplicates, depparser, verbose)
def get_glue_dict(self):
return DrtGlueDict(self.semtype_file)
def demo(show_example=-1):
from nltk.parse import MaltParser
examples = ['<NAME> Mary',
'David eats a sandwich',
'every man chases a dog',
'every man believes a dog sleeps',
'John gives David a sandwich',
'John chases himself']
# 'John persuades David to order a pizza',
# 'John tries to go',
# 'John tries to find a unicorn',
# 'John seems to vanish',
# 'a unicorn seems to approach',
# 'every big cat leaves',
# 'every gray cat leaves',
# 'every big gray cat leaves',
# 'a former senator leaves',
print('============== DEMO ==============')
tagger = RegexpTagger(
[('^(David|Mary|John)$', 'NNP'),
('^(sees|eats|chases|believes|gives|sleeps|chases|persuades|tries|seems|leaves)$', 'VB'),
('^(go|order|vanish|find|approach)$', 'VB'),
('^(a)$', 'ex_quant'),
('^(every)$', 'univ_quant'),
('^(sandwich|man|dog|pizza|unicorn|cat|senator)$', 'NN'),
('^(big|gray|former)$', 'JJ'),
('^(him|himself)$', 'PRP')
])
depparser = MaltParser(tagger=tagger)
glue = Glue(depparser=depparser, verbose=False)
for (i, sentence) in enumerate(examples):
if i==show_example or show_example==-1:
print('[[[Example %s]]] %s' % (i, sentence))
for reading in glue.parse_to_meaning(sentence.split()):
print(reading.simplify())
print('')
if __name__ == '__main__':
demo()
|
import asyncio
import base64
import glob
import json
import os
import subprocess
import sys
import tempfile
import urllib.request
from typing import ClassVar, Optional, List, Tuple
import guy
import keyring
import requests
from cerebrate.app import native_gui_utils
from cerebrate.cerebrate import Cerebrate
from cerebrate.core import Replay
from cerebrate.core.replay_query import ReplayQuery
def _make_replay_payload(replay: Replay) -> dict:
return {
"replayId": replay.replay_hash,
"replayTimestamp": replay.timestamp,
"teams": [team.name for team in replay.teams],
"playerTeam": replay.player_team,
"opponentTeam": replay.opponent_team,
"selectedTags": replay.tags,
"notes": replay.notes,
}
def _set_replay_info_from_payload(replay: Replay, payload: dict) -> Replay:
replay.set_tags(payload.get("selectedTags", []))
replay.notes = payload.get("notes", "")
replay.player_team = payload.get("playerTeam")
replay.opponent_team = payload.get("opponentTeam")
return replay
def _replays_from_hashes(cerebrate: Cerebrate, replay_hashes: List[str]):
return [cerebrate.find_replay(replay_hash) for replay_hash in replay_hashes]
def _cross_platform_open(path: str):
if sys.platform == "win32":
os.startfile(path)
elif sys.platform == "darwin":
subprocess.Popen(["open", path])
else:
subprocess.Popen(["xdg-open", path])
# noinspection PyPep8Naming, PyMethodMayBeStatic
class Index(guy.Guy):
size = (1200, 800)
cerebrate: ClassVar[Cerebrate] = Cerebrate()
async def selectMostRecentReplay(self):
replay_path = Cerebrate.find_most_recent_replay_path()
if not replay_path:
return
with open(replay_path, "rb") as replay_data:
replay = Index.cerebrate.save_replay_data(replay_data)
if not replay:
return
replay_data.seek(0)
prefix = "data:application/octet-stream;base64,"
data_url = prefix + base64.b64encode(replay_data.read()).decode("ascii")
replay = Index.cerebrate.load_replay_info(replay)
Index.cerebrate.update_replay_info(replay)
await self.js.replayLoaded(
{
**_make_replay_payload(replay),
"replayFileName": os.path.split(replay_path)[1],
"replayData": data_url,
"force": True,
}
)
async def selectReplay(self, payload: dict):
replay_hash: str = payload["replayId"]
replay_url: Optional[str] = payload.get("replayData")
if replay_url:
with urllib.request.urlopen(replay_url) as replay_data:
replay = Index.cerebrate.save_replay_data(replay_data, replay_hash)
else:
replay = Index.cerebrate.find_replay(replay_hash)
if not replay:
return
replay = Index.cerebrate.load_replay_info(replay)
Index.cerebrate.update_replay_info(replay)
await self.js.replayLoaded(
{
**_make_replay_payload(replay),
"force": payload.get("force", False),
}
)
async def selectPlayerOpponent(self, payload: dict):
replay = self.cerebrate.find_replay(payload["replayId"])
if not replay:
return
replay.player_team = payload["playerTeam"]
replay.opponent_team = payload["opponentTeam"]
Index.cerebrate.update_replay_info(replay)
replay = Index.cerebrate.load_replay_info(replay)
await self.js.replayLoaded(_make_replay_payload(replay))
async def updateReplayInfo(self, payload: dict):
replay_hash: str = payload["replayId"]
replay_url: Optional[str] = payload.get("replayData")
if replay_url:
with urllib.request.urlopen(replay_url) as replay_data:
replay = Index.cerebrate.save_replay_data(replay_data, replay_hash)
else:
replay = Index.cerebrate.find_replay(replay_hash)
if not replay:
await self.js.replayUpdated({"success": False})
return
Index.cerebrate.update_replay_info(
_set_replay_info_from_payload(replay, payload)
)
await self.js.replayUpdated({"success": True, "replayId": replay.replay_hash})
async def findReplays(self, payload: dict):
query = ReplayQuery(
include_tags=payload.get("includeTags"),
exclude_tags=payload.get("excludeTags"),
start_timestamp=payload.get("startTimestamp"),
end_timestamp=payload.get("endTimestamp"),
)
replays = self.cerebrate.find_replays(query)
frequency_table = self.cerebrate.calculate_tag_frequency_table(
replays, query.include_tags
)
return {
"replays": [_make_replay_payload(replay) for replay in replays],
"tagFrequencyTable": [
{
"tag": tag,
"frequency": frequency,
}
for tag, frequency in frequency_table.items()
],
}
async def forgetReplays(self, payload: dict):
replay_hashes: List[str] = payload.get("replayIds", [])
for replay_hash in replay_hashes:
self.cerebrate.forget_replay(replay_hash)
async def exportReplaysToTempDir(self, payload: dict):
# no automatic cleanup - let os handle cleanup
export_path = tempfile.mkdtemp()
replay_hashes: List[str] = payload.get("replayIds", [])
replays = _replays_from_hashes(self.cerebrate, replay_hashes)
Cerebrate.export_replays_to_directory(replays, export_path)
return export_path
async def exportReplaysToTargetDir(self, payload: dict):
export_path = await native_gui_utils.open_directory_picker(
title="Export to directory"
)
if not export_path:
return None
replay_hashes: List[str] = payload.get("replayIds", [])
replays = _replays_from_hashes(self.cerebrate, replay_hashes)
Cerebrate.export_replays_to_directory(replays, export_path)
return export_path
async def exportReplaysToScelight(self, payload: dict):
scelight_path = self.cerebrate.settings.scelight_path
if not scelight_path:
return
export_path = await self.exportReplaysToTempDir(payload)
subprocess.Popen([scelight_path] + glob.glob(os.path.join(export_path, "*")))
async def exportReplaysToSc2ReplayStats(self, payload: dict):
auth_key: str = payload.get("authKey", "").strip()
if not auth_key:
return
keyring.set_password("<PASSWORD>", "auth_key", auth_key)
headers = {"Authorization": auth_key}
def export_replay(replay: Replay) -> Optional[str]:
with open(replay.path, "rb") as file:
response = requests.post(
"http://api.sc2replaystats.com/replay",
data={
"upload_method": "ext",
},
headers=headers,
files={"replay_file": file},
)
if response.status_code != 200:
return None
return json.loads(response.text).get("replay_queue_id")
def get_export_id(replay_queue_id: str) -> Optional[str]:
response = requests.get(
f"http://api.sc2replaystats.com/replay/status/{replay_queue_id}",
headers=headers,
)
if response.status_code != 200:
# Don't bother waiting if we can't get an OK response
return ""
return json.loads(response.text).get("replay_id")
replay_hashes: List[str] = payload.get("replayIds", [])
replays = _replays_from_hashes(self.cerebrate, replay_hashes)
replay_queue_ids: List[Tuple[Replay, str]] = list(
filter(
lambda result: result[1] is not None,
[(replay, export_replay(replay)) for replay in replays],
)
)
loop = True
export_ids = []
while loop:
await asyncio.sleep(3)
export_ids = [
(replay, get_export_id(replay_queue_id))
for replay, replay_queue_id in replay_queue_ids
]
loop = any(replay_id is None for replay, replay_id in export_ids)
return [
{
**_make_replay_payload(replay),
"exportUrl": f"https://sc2replaystats.com/replay/{export_id}",
}
for replay, export_id in export_ids
if export_id
]
async def openDirInFileManager(self, payload: dict):
path = payload.get("dirPath")
if not path:
return None
_cross_platform_open(path)
async def getScelightPath(self):
return self.cerebrate.settings.scelight_path
async def selectScelightPath(self):
scelight_path = await native_gui_utils.open_file_picker(
title="Choose Scelight installation"
)
if not scelight_path:
return None
self.cerebrate.settings.scelight_path = os.path.normpath(scelight_path)
return scelight_path
async def getSc2ReplayStatsAuthKey(self):
return keyring.get_password("sc<PASSWORD>", "auth_key")
def main():
app = Index()
app.run(one=True)
if __name__ == "__main__":
main()
|
import asyncio
import codecs
import os
import random
# Get the globals from Settings
import codes.paths as path
import discord
import dotenv
from discord.ext import commands
from discord.ext.commands import MissingPermissions, has_permissions
from pymongo import MongoClient
from dotenv import load_dotenv
# # # Módulo: Messages
# # - Contém alguns comandos simples, os quais consistem apenas de algumas mensagens que são exibidas pelo Bot
# # # Utiliza:
# # - Discord.py API (by Rapptz on: https://github.com/Rapptz/discord.py)
load_dotenv()
CONNECT_STRING = os.environ.get("MONGODB_URI")
class Messages(commands.Cog):
"""Módulo que contém alguns comandos simples, que retornam apenas mensagens de texto"""
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name="say")
async def say(self, ctx: commands.Context, *, text: str):
"""!say <text> => O Bot repete o que for passado para ele como <text>
Passa um texto para o Bot repetir. A mensagem original enviada é deletada.
"""
await ctx.message.delete()
await ctx.send(text)
@commands.command(name="familia")
async def familia(self, ctx):
"""!familia => Pergunta pro BRTT se isso é uma família"""
response = (
"Isso aqui não é uma família, é um time!\n"
+ "Se vai deixar morrer, teu irmão???\n\n"
+ "*CLARO QUE VAI NÉ, PORRA!*"
)
familia_embed = discord.Embed(description=response)
await ctx.send(embed=familia_embed)
@commands.Cog.listener()
async def on_member_join(self, member: discord.Member):
embed = discord.Embed(
title=f"Saudações e seja muito bem-vindo à Guilda **{member.guild.name}**, **{member.mention}**!!!",
description=f"Eu sou {self.bot.nick}, responsável por gerenciar algumas coisas aqui e ali no servidor.\n"
"Não esqueça de consultar as Regras e Diretrizes da Guilda (utilize `!rules` no servidor para isso), "
"bem como de consultar no que posso te ajudar com o comando `!help` 😎",
)
embed.set_thumbnail(
url="https://lh3.googleusercontent.com/UjwwvH1Luh8ue76YXo9tlejaqxZ3vMGmf8C6t72XPfh0JqKc3cvzBwWXFTX02W9ku_TSWrxNQISXQ_o6I--TwATi4g-D6d1K_FLftWFmfQreEK95KiK1RXGV1S7aPRL86H35w5pPAyB12QAYjU2ZXQmvWjkKGdle2peESa05Ff9pCXj3RQD44-pIM8XmxksQMT7dILoJjAPKKntJZR82Aq4Wb-alZP-XSKc-PFESaKk_RnBIMx1YMsNbgZSTO1tnNAq9u1ci3jxgRXSip4VE12EKcnFbu1b1Lg9VhTSWeiA9PgPdLmjnKCbIEztybwwDp6In8wW_pfOZsg8zoxoNmOmJP0FzQRxD6A-gJrm5su-IXA-vnDE0PohLP5C3kdtPnsRThqyaQV8fMA-1SuCTjsr9ptsn-uIBhGBggFfapYdEhtl19wtsQ5JoYYB6tfpWtENbnTpfxvZv0LHcX5DPRFCRpiT49CAOvrzwjCYMA_qFMJ2ikeKZhFbwsdJ6P1l0SwAEyraqDoCmPzoswtl9D8y1S0F36qvTRESUUwofjDe3BGDvFqRODhHtlBL_be8Eq3AdDTfgeP1nv8WbrAbGyzt8IArYJTTg8hp6rx6q9o6m_a9uUdYTqbY5QzGgVLO5oekez33pVJ2A8tKPdkTa9y7D2FXicFOt4OGw2TH3UDUBGiV_PGUrAkIHhqP8dCDniNym2F87E2gXVzQkwTMnyyI=s977-no?authuser=0"
)
await member.send(embed=embed)
@commands.Cog.listener()
async def on_guild_join(self, guild: discord.Guild):
embed = discord.Embed(
title=f"SAUDAÇÕES À TODOS! 🖖👽\nEu me chamo {self.bot.user.name} e sou o novo Bot do servidor 🤖",
description=f"Sou um Bot de propósito geral, então... Faço um pouco de tudo, {guild.roles[0]}!",
)
embed.add_field(
name="Dentre as coisas que posso fazer, estão: ",
value=(
"🔨 Gerenciar o servidor (Roles, Kick, Palavras proibidas...)\n"
"🎲 Tomar decisões através de dado, cara ou coroa ou 'escolha um'\n"
"📖 Consultar informações sobre animes, mangás e personagens!\n"
"🎮 Consultar informações sobre jogos na Steam em tempo real!\n"
"⏳ Obter o tempo estimado para terminar um game! (via HowLongToBeat)\n"
"🤑 Informar os usuários sobre Jogos/DLCs grátis para PC!\n"
"🧙♂️ Informações sobre partidas ao vivo de League of Legends, bem como detalhes dos invocadores!\n"
"🚀 ... e por aí vai!\n\n"
"Para maiores detalhes de minhas funcionalidades e como configurá-las, acesse a [documentação](https://github.com/MatheusXCH/Discordzada/wiki).\n"
"Utilize o `!help` para informações acerca do uso dos comandos.\n\n"
f"Caso encontrem bugs, por favor, entrem em contato com meu criador pelo {path.dev_contact}."
),
inline=False,
)
embed.set_image(
url="https://<KEY>PXJL4H7uXO2SY5yybJmMC5YdMNeYH0GigclXehz5X6Lat9nzqC4tB0qgrn2HkzZhkF_ClXaF9ULuf0X7jypZ2jfAI8D_BUshuIxbYuTFfI1VOoSSAgOSC7DJjbDGOyBSNbUji9rzPcG-f1iGLMEMD0WZE=w1466-h977-no?authuser=0"
)
for channel in guild.text_channels:
if channel.permissions_for(guild.me).send_messages:
await channel.send(embed=embed)
break
def setup(bot):
bot.add_cog(Messages(bot))
|
<reponame>nodaki/HRNet-tensorflow<gh_stars>1-10
# -*- coding: utf-8 -*-
import collections
import json
import logging
import os
from pathlib import Path
import click
import cv2
import numpy as np
import tensorflow as tf
from omegaconf import OmegaConf, DictConfig
from pycocotools.coco import COCO
from tqdm import tqdm
def create_category_to_label(catNms):
"""Category to label map"""
category_to_label = collections.OrderedDict()
for i, cat_nm in enumerate(catNms):
category_to_label[cat_nm] = i + 1
# Save label map to json file.
with open(os.path.join(os.getenv("PROJECT_DIR", "../"), "config/category_to_label.json"), "w") as f:
json.dump(category_to_label, f, indent=4)
return category_to_label
def make_tf_example(image: np.ndarray, label: np.ndarray, height: int, width: int, filename: str) -> tf.train.Example:
return tf.train.Example(features=tf.train.Features(feature={
"image" : tf.train.Feature(bytes_list=tf.train.BytesList(value=[image.tostring()])),
"label" : tf.train.Feature(bytes_list=tf.train.BytesList(value=[label.tostring()])),
"height" : tf.train.Feature(int64_list=tf.train.Int64List(value=[height])),
"width" : tf.train.Feature(int64_list=tf.train.Int64List(value=[width])),
"filename": tf.train.Feature(bytes_list=tf.train.BytesList(value=[filename.encode()]))
}))
def create_dataset_from_coco_annotations(
dataset_cfg: DictConfig,
input_filepath: str,
output_filepath: str,
trainval: str,
tfrecord: bool
):
"""Create dataset from coco dataset
Make image and label images.
"""
annotations_file = os.path.join(input_filepath, f"annotations_trainval2017/annotations/instances_{trainval}.json")
coco = COCO(annotation_file=annotations_file)
# Set target category
if dataset_cfg.DATASET.catNms:
catNms = dataset_cfg.DATASET.catNms
else:
# If not specified categories, all categories are set to target
catNms = [cat["name"] for cat in coco.loadCats(coco.getCatIds())]
logger.info(f"Categories: {catNms}")
catIds = coco.getCatIds(catNms=catNms)
category_to_label = create_category_to_label(catNms=catNms)
# Make image and label dir
image_dir = Path(os.path.join(output_filepath, trainval, "image"))
label_dir = Path(os.path.join(output_filepath, trainval, "label"))
image_dir.mkdir(parents=True, exist_ok=True)
label_dir.mkdir(parents=True, exist_ok=True)
record_file = os.path.join(output_filepath, f"{trainval}.tfrecord")
recorded_images_count = 0
with tf.io.TFRecordWriter(record_file) as writer:
for imgId in tqdm(coco.getImgIds(), desc=f"Make {trainval} record"):
annsIds = coco.getAnnIds(imgIds=imgId, catIds=catIds, iscrowd=None)
anns = coco.loadAnns(annsIds)
img = coco.loadImgs(imgId)[0]
path = os.path.join(input_filepath, trainval, trainval, img["file_name"])
if anns:
image = cv2.imread(path)
# if images is gray scale.
if image.ndim == 2:
image = np.tile(np.expand_dims(image, axis=-1), reps=[1, 1, 3])
label = np.zeros(shape=(img["height"], img["width"]), dtype=np.uint8)
for ann in anns:
cat_nm = coco.loadCats(ann["category_id"])[0]["name"]
label = np.maximum(label, coco.annToMask(ann) * category_to_label[cat_nm])
# Save image
image_file = image_dir / img["file_name"]
image_file = image_file.with_suffix(".png")
cv2.imwrite(str(image_file), image)
label_file = label_dir / img["file_name"]
label_file = label_file.with_suffix(".png")
cv2.imwrite(str(label_file), label)
# Record tfrecord.
if tfrecord:
image = image.astype(np.float32)
image /= 255.0 # normalize to [0,1] range
label = np.expand_dims(label, axis=2)
label = label.astype(np.uint64)
tf_example = make_tf_example(image, label, img["height"], img["width"], img["file_name"])
writer.write(tf_example.SerializeToString())
recorded_images_count += 1
logger.info(f"Record counts: {recorded_images_count} @ {trainval}")
@click.command()
@click.option("--input_filepath", "-d", type=str, default=os.getenv("INPUT_FILEPATH", "../data/raw"))
@click.option("--output_filepath", "-o", type=str, default=os.getenv("OUTPUT_FILEPATH", "../data/processed"))
@click.option("--dataset_cfg_path", type=str,
default=os.path.join(os.getenv("PROJECT_DIR"), "../", "config/dataset/person.yaml"))
@click.option("--tfrecord", is_flag=True)
def main(input_filepath: str, output_filepath: str, dataset_cfg_path: str, tfrecord: bool):
os.makedirs(output_filepath, exist_ok=True)
dataset_cfg = OmegaConf.load(dataset_cfg_path)
create_dataset_from_coco_annotations(dataset_cfg, input_filepath=input_filepath, output_filepath=output_filepath,
trainval="train2017", tfrecord=tfrecord)
create_dataset_from_coco_annotations(dataset_cfg, input_filepath=input_filepath, output_filepath=output_filepath,
trainval="val2017", tfrecord=tfrecord)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
main()
|
<filename>to/lang/OpenCV-2.2.0/samples/python/lkdemo.py
#! /usr/bin/env python
print "OpenCV Python version of lkdemo"
import sys
# import the necessary things for OpenCV
import cv
#############################################################################
# some "constants"
win_size = 10
MAX_COUNT = 500
#############################################################################
# some "global" variables
image = None
pt = None
add_remove_pt = False
flags = 0
night_mode = False
need_to_init = False
#############################################################################
# the mouse callback
# the callback on the trackbar
def on_mouse (event, x, y, flags, param):
# we will use the global pt and add_remove_pt
global pt
global add_remove_pt
if image is None:
# not initialized, so skip
return
if image.origin != 0:
# different origin
y = image.height - y
if event == cv.CV_EVENT_LBUTTONDOWN:
# user has click, so memorize it
pt = (x, y)
add_remove_pt = True
#############################################################################
# so, here is the main part of the program
if __name__ == '__main__':
frames = sys.argv[1:]
if frames == []:
print "usage lkdemo.py <image files>"
sys.exit(1)
# display a small howto use it
print "Hot keys: \n" \
"\tESC - quit the program\n" \
"\tr - auto-initialize tracking\n" \
"\tc - delete all the points\n" \
"\tn - switch the \"night\" mode on/off\n" \
"\tSPACE - next frame\n" \
"To add/remove a feature point click it\n"
# first, create the necessary windows
cv.NamedWindow ('LkDemo', cv.CV_WINDOW_AUTOSIZE)
# register the mouse callback
cv.SetMouseCallback ('LkDemo', on_mouse, None)
fc = 0
while 1:
# do forever
frame = cv.LoadImage(frames[fc])
if image is None:
# create the images we need
image = cv.CreateImage (cv.GetSize (frame), 8, 3)
image.origin = frame.origin
grey = cv.CreateImage (cv.GetSize (frame), 8, 1)
prev_grey = cv.CreateImage (cv.GetSize (frame), 8, 1)
pyramid = cv.CreateImage (cv.GetSize (frame), 8, 1)
prev_pyramid = cv.CreateImage (cv.GetSize (frame), 8, 1)
features = []
# copy the frame, so we can draw on it
cv.Copy (frame, image)
# create a grey version of the image
cv.CvtColor (image, grey, cv.CV_BGR2GRAY)
if night_mode:
# night mode: only display the points
cv.SetZero (image)
if need_to_init:
# we want to search all the good points
# create the wanted images
eig = cv.CreateImage (cv.GetSize (grey), 32, 1)
temp = cv.CreateImage (cv.GetSize (grey), 32, 1)
# the default parameters
quality = 0.01
min_distance = 10
# search the good points
features = cv.GoodFeaturesToTrack (
grey, eig, temp,
MAX_COUNT,
quality, min_distance, None, 3, 0, 0.04)
# refine the corner locations
features = cv.FindCornerSubPix (
grey,
features,
(win_size, win_size), (-1, -1),
(cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03))
elif features != []:
# we have points, so display them
# calculate the optical flow
features, status, track_error = cv.CalcOpticalFlowPyrLK (
prev_grey, grey, prev_pyramid, pyramid,
features,
(win_size, win_size), 3,
(cv.CV_TERMCRIT_ITER|cv.CV_TERMCRIT_EPS, 20, 0.03),
flags)
# set back the points we keep
features = [ p for (st,p) in zip(status, features) if st]
if add_remove_pt:
# we have a point to add, so see if it is close to
# another one. If yes, don't use it
def ptptdist(p0, p1):
dx = p0[0] - p1[0]
dy = p0[1] - p1[1]
return dx**2 + dy**2
if min([ ptptdist(pt, p) for p in features ]) < 25:
# too close
add_remove_pt = 0
# draw the points as green circles
for the_point in features:
cv.Circle (image, (int(the_point[0]), int(the_point[1])), 3, (0, 255, 0, 0), -1, 8, 0)
if add_remove_pt:
# we want to add a point
# refine this corner location and append it to 'features'
features += cv.FindCornerSubPix (
grey,
[pt],
(win_size, win_size), (-1, -1),
(cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS,
20, 0.03))
# we are no longer in "add_remove_pt" mode
add_remove_pt = False
# swapping
prev_grey, grey = grey, prev_grey
prev_pyramid, pyramid = pyramid, prev_pyramid
need_to_init = False
# we can now display the image
cv.ShowImage ('LkDemo', image)
# handle events
c = cv.WaitKey(10) % 0x100
if c == 27:
# user has press the ESC key, so exit
break
# processing depending on the character
if 32 <= c and c < 128:
cc = chr(c).lower()
if cc == 'r':
need_to_init = True
elif cc == 'c':
features = []
elif cc == 'n':
night_mode = not night_mode
elif cc == ' ':
fc = (fc + 1) % len(frames)
|
# -*- coding: utf-8 -*-
"""
K Nearest Neighbor Classification
---------------------------------
This function is built especially for a learned metric
parameterized by the matrix A where this function takes
the matrix M such that A = M * M'
"""
# Author: <NAME> <<EMAIL>>
def knn(ytr, Xtr, M, k, Xte):
"""K Nearest Neighbors classifier
y_hat = knn(y, X, M, k, Xt)
Perform knn classification on each row of Xt using a learned metric M
M is the factor matrix : A = M * M'
Parameters
----------
ytr: vector of labels, string or int
The known responses upon which to train our classifier
Xtr: 2D n*p array of numbers
The data matrix where rows are observations and columns are features
M: The p*p factor matrix of the (assumedly) learned matrix A
k: The number of nearest neighbors to use
Xt: The new data from which to predict responses
Attributes
----------
`centroids_` : array-like, shape = [n_classes, n_features]
Centroid of each class
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [7,7], [1,2], [7,8], [2,1], [8,7], [1,0], [8,9], [11, -4], [14, 1]])
>>> y = np.array([1, 2, 1, 2, 1, 2, 1, 2, 2, 2])
>>> M = np.eye(X.shape[1])
>>> k = 4
>>> Xtr, Xte = X[:8,:], X[8:,:]
>>> ytr, yte = y[:8], y[8:]
>>> print Xtr.shape
>>> print Xte.shape
>>> print 'Simple Test'
>>> print '--------------'
>>> print 'predictions'
>>> print knn(ytr, Xtr, M, k, Xte)
>>> print 'actual'
>>> print yte
>>> print '\n'
[1]
References
----------
Tibshir<NAME>., <NAME>., <NAME>., & <NAME>. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
import numpy as np
add1 = 0
if min(ytr) == 0:
ytr += 1
add1 = 1
(n, m) = Xtr.shape
(nt, m) = Xte.shape
K = np.dot(np.dot(Xtr, M), np.dot(M.T, Xte.T))
l = np.zeros((n))
for i in xrange(n):
l[i] = np.dot(np.dot(Xtr[i, :], M), np.dot(M.T, Xtr[i, :].T))
lt = np.zeros((nt))
for i in xrange(nt):
lt[i] = np.dot(np.dot(Xte[i, :], M), np.dot(M.T, Xte[i, :].T))
D = np.zeros((n, nt));
for i in xrange(n):
for j in xrange(nt):
D[i, j] = l[i] + lt[j] - 2 * K[i, j]
inds = np.argsort(D, axis = 0)
preds = np.zeros((nt), dtype = int)
for i in xrange(nt):
counts = [0 for ii in xrange(2)]
for j in xrange(k):
if ytr[inds[j, i]] > len(counts):
counts.append(1)
else:
counts[ytr[inds[j, i]] - 1] += 1
v, preds[i] = (max(counts), int(np.argmax(counts) + 1))
if add1 == 1:
preds -= 1
return preds
if __name__ == "__main__":
""" Simple Test """
import numpy as np
X = np.array([[1,1], [7,7], [1,2], [7,8], [2,1], [8,7], [1,0], [8,9], [11, -4], [14, 1]])
y = np.array([1, 2, 1, 2, 1, 2, 1, 2, 2, 2])
M = np.eye(X.shape[1])
k = 4
Xtr, Xte = X[:8,:], X[8:,:]
ytr, yte = y[:8], y[8:]
#print Xtr.shape
#print Xte.shape
print 'Simple Test'
print '--------------'
print 'predictions'
print knn(ytr, Xtr, M, k, Xte)
print 'actual'
print yte
""" Elaborate Test """
import numpy as np
import os
X = np.genfromtxt(os.path.join('data', 'test_X.csv'), delimiter = ',', dtype = float)
y = np.genfromtxt(os.path.join('data', 'test_y.csv'), delimiter = ',', dtype = int)
M = np.eye(X.shape[1])
k = 1
inds = np.genfromtxt(os.path.join('data', 'test_inds.csv'), delimiter = ',', dtype = int)
inds_tr = np.where(inds == 1)[0]
inds_te = np.where(inds == 0)[0]
Xtr = X[inds_tr, :]
Xtr, Xte = X[inds_tr, :], X[inds_te, :]
ytr, yte = y[inds_tr], y[inds_te]
ypred = knn(ytr, Xtr, M, k, Xte)
print 'Elaborate Test'
print '--------------'
print 'predictions'
print ypred
print 'actual'
print yte
matlab_accuracy = 0.958333
accuracy = float(sum(yte == ypred)) / len(yte)
if np.abs(matlab_accuracy - accuracy) > 0.00001:
print 'Problem'
else:
print 'Perfect'
|
<filename>graphene_gae/ndb/types.py<gh_stars>100-1000
import inspect
from collections import OrderedDict
from google.appengine.ext import ndb
from graphene import Field, ID # , annotate, ResolveInfo
from graphene.relay import Connection, Node
from graphene.types.objecttype import ObjectType, ObjectTypeOptions
from graphene.types.utils import yank_fields_from_attrs
from .converter import convert_ndb_property
from .registry import Registry, get_global_registry
__author__ = 'ekampf'
def fields_for_ndb_model(ndb_model, registry, only_fields, exclude_fields):
ndb_fields = OrderedDict()
for prop_name, prop in ndb_model._properties.iteritems():
name = prop._code_name
is_not_in_only = only_fields and name not in only_fields
is_excluded = name in exclude_fields # or name in already_created_fields
if is_not_in_only or is_excluded:
continue
results = convert_ndb_property(prop, registry)
if not results:
continue
if not isinstance(results, list):
results = [results]
for r in results:
ndb_fields[r.name] = r.field
return ndb_fields
class NdbObjectTypeOptions(ObjectTypeOptions):
model = None # type: Model
registry = None # type: Registry
connection = None # type: Type[Connection]
id = None # type: str
class NdbObjectType(ObjectType):
class Meta:
abstract = True
ndb_id = ID(resolver=lambda entity, *_: str(entity.key.id()))
@classmethod
def __init_subclass_with_meta__(cls, model=None, registry=None, skip_registry=False,
only_fields=(), exclude_fields=(), connection=None,
use_connection=None, interfaces=(), **options):
if not model:
raise Exception((
'NdbObjectType {name} must have a model in the Meta class attr'
).format(name=cls.__name__))
if not inspect.isclass(model) or not issubclass(model, ndb.Model):
raise Exception((
'Provided model in {name} is not an NDB model'
).format(name=cls.__name__))
if not registry:
registry = get_global_registry()
assert isinstance(registry, Registry), (
'The attribute registry in {} needs to be an instance of '
'Registry, received "{}".'
).format(cls.__name__, registry)
ndb_fields = fields_for_ndb_model(model, registry, only_fields, exclude_fields)
ndb_fields = yank_fields_from_attrs(
ndb_fields,
_as=Field,
)
if use_connection is None and interfaces:
use_connection = any((issubclass(interface, Node) for interface in interfaces))
if use_connection and not connection:
# We create the connection automatically
connection = Connection.create_type('{}Connection'.format(cls.__name__), node=cls)
if connection is not None:
assert issubclass(connection, Connection), (
"The connection must be a Connection. Received {}"
).format(connection.__name__)
_meta = NdbObjectTypeOptions(cls)
_meta.model = model
_meta.registry = registry
_meta.fields = ndb_fields
_meta.connection = connection
super(NdbObjectType, cls).__init_subclass_with_meta__(_meta=_meta, interfaces=interfaces, **options)
if not skip_registry:
registry.register(cls)
@classmethod
def is_type_of(cls, root, info):
if isinstance(root, cls):
return True
if not isinstance(root, ndb.Model):
raise Exception(('Received incompatible instance "{}".').format(root))
# Returns True if `root` is a PolyModel subclass and `cls` is in the
# class hierarchy of `root` which is retrieved with `_class_key`
if (hasattr(root, '_class_key') and
hasattr(cls._meta.model, '_class_key') and
set(cls._meta.model._class_key()).issubset(
set(root._class_key()))):
return True
return type(root) == cls._meta.model
@classmethod
def get_node(cls, info, urlsafe_key):
try:
key = ndb.Key(urlsafe=urlsafe_key)
except:
return None
model = cls._meta.model
assert key.kind() == model.__name__
return key.get()
@classmethod
def resolve_id(cls, entity, info):
return entity.key.urlsafe()
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import pytest
from OpenSSL import crypto
from pretend import call, call_recorder, stub
from twisted.internet import ssl
import pem
from pem.twisted import certificateOptionsFromFiles
from .data import CERT_PEMS, DH_PEM, KEY_PEM, KEY_PEM2
@pytest.fixture
def keyCertChainDHFile(tmpdir):
"""
Returns a file containing the key, three certificates, and DH parameters.
"""
pemFile = tmpdir.join("key_cert_and_chain_and_params.pem")
pemFile.write(KEY_PEM + b"".join(CERT_PEMS) + DH_PEM)
return pemFile
@pytest.fixture
def keyCertChainFile(tmpdir):
"""
Returns a file containing the key and three certificates.
"""
pemFile = tmpdir.join("key_cert_and_chain.pem")
pemFile.write(KEY_PEM + b"".join(CERT_PEMS))
return pemFile
class TestCertificateOptionsFromFiles(object):
def test_worksWithoutChain(self, tmpdir):
"""
Creating CO without chain certificates works.
"""
keyFile = tmpdir.join("key.pem")
keyFile.write(KEY_PEM)
certFile = tmpdir.join("cert.pem")
certFile.write(CERT_PEMS[0])
ctxFactory = certificateOptionsFromFiles(str(keyFile), str(certFile))
assert [] == ctxFactory.extraCertChain
def test_worksWithChainInExtraFile(self, tmpdir):
"""
Chain can be in a separate file.
"""
keyFile = tmpdir.join("key.pem")
keyFile.write(KEY_PEM)
certFile = tmpdir.join("cert.pem")
certFile.write(CERT_PEMS[0])
chainFile = tmpdir.join("chain.pem")
chainFile.write(b"".join(CERT_PEMS[1:]))
ctxFactory = certificateOptionsFromFiles(
str(keyFile), str(certFile), str(chainFile)
)
assert 2 == len(ctxFactory.extraCertChain)
def test_worksWithChainInSameFile(self, tmpdir):
"""
Chain can be in the same file as the certificate.
"""
keyFile = tmpdir.join("key.pem")
keyFile.write(KEY_PEM)
certFile = tmpdir.join("cert_and_chain.pem")
certFile.write(b"".join(CERT_PEMS))
ctxFactory = certificateOptionsFromFiles(str(keyFile), str(certFile))
assert 2 == len(ctxFactory.extraCertChain)
def test_useTypesNotOrdering(self, tmpdir):
"""
L{pem.certificateOptionsFromFiles} identifies the chain, key, and
certificate for Twisted's L{CertificateOptions} based on their types
and certificate fingerprints, not their order within the file.
"""
keyFile = tmpdir.join("key.pem")
keyFile.write(KEY_PEM)
certFile = tmpdir.join("cert_and_chain.pem")
certFile.write(b"".join(reversed(CERT_PEMS)))
ctxFactory = certificateOptionsFromFiles(str(keyFile), str(certFile))
assert 2 == len(ctxFactory.extraCertChain)
def test_worksWithEverythingInOneFile(self, keyCertChainDHFile):
"""
Key, certificate, and chain can also be in a single file.
"""
ctxFactory = certificateOptionsFromFiles(str(keyCertChainDHFile))
assert 2 == len(ctxFactory.extraCertChain)
assert ctxFactory.dhParameters is not None
def test_passesCertsInCorrectFormat(self, keyCertChainDHFile):
"""
PEM objects are correctly detected and passed into CO.
"""
ctxFactory = certificateOptionsFromFiles(str(keyCertChainDHFile))
assert isinstance(ctxFactory.privateKey, crypto.PKey)
assert isinstance(ctxFactory.certificate, crypto.X509)
assert all(
isinstance(cert, crypto.X509) for cert in ctxFactory.extraCertChain
)
def test_forwardsKWargs(self, keyCertChainDHFile):
"""
Extra keyword arguments are passed into CO.
"""
ctxFactory = certificateOptionsFromFiles(
str(keyCertChainDHFile), fixBrokenPeers=True
)
assert True is ctxFactory.fixBrokenPeers
def test_catchesMissingKey(self, tmpdir):
"""
Raises ValueError if a key is missing.
"""
certFile = tmpdir.join("cert_and_chain.pem")
certFile.write(b"".join(CERT_PEMS))
with pytest.raises(ValueError):
certificateOptionsFromFiles(str(certFile))
def test_catchesMultipleKeys(self, tmpdir):
"""
Raises ValueError if multiple keys are present.
"""
allFile = tmpdir.join("key_cert_and_chain.pem")
allFile.write(KEY_PEM + b"".join(CERT_PEMS) + KEY_PEM2)
with pytest.raises(ValueError):
certificateOptionsFromFiles(str(allFile))
def test_catchesMissingCertificate(self, tmpdir):
"""
Raises ValueError if no certificate is passed.
"""
keyFile = tmpdir.join("key.pem")
keyFile.write(KEY_PEM)
with pytest.raises(ValueError):
certificateOptionsFromFiles(str(keyFile))
def test_catchesKeyCertificateMismatch(self, tmpdir):
"""
A ValueError is raised when some certificates are present in the pem,
but no certificate in the pem matches the key.
"""
keyFile = tmpdir.join("key.pem")
keyFile.write(KEY_PEM + b"".join(CERT_PEMS[1:]))
with pytest.raises(ValueError) as excinfo:
certificateOptionsFromFiles(str(keyFile))
assert str(excinfo.value).startswith("No certificate matching ")
def test_catchesMultipleDHParams(self, tmpdir):
"""
A ValueError is raised when more than one set of DH parameters is
present.
"""
pemFile = tmpdir.join("multiple_params.pem")
pemFile.write(KEY_PEM + CERT_PEMS[0] + DH_PEM + DH_PEM)
with pytest.raises(ValueError) as excinfo:
certificateOptionsFromFiles(str(pemFile))
assert (
"Supplied PEM file(s) contain(s) *more* than one set of DH "
"parameters."
) == str(excinfo.value)
def test_removedLegacyDHParameterSupport(self, keyCertChainFile):
"""
Passing dhParameters as an argument raises a TypeError.
"""
fakeParameters = object()
with pytest.raises(TypeError, match="Passing DH parameters"):
certificateOptionsFromFiles(
str(keyCertChainFile), dhParameters=fakeParameters
)
class _TestForwardCompatibleDHE(object):
def test_realDHParameterFileSupport(self, monkeypatch, keyCertChainDHFile):
"""
Pass DH parameters loaded from a file directly to CertificateOptions if
the installed version of Twisted supports it.
"""
fakeCtxFactory = object()
recorder = call_recorder(lambda *a, **kw: fakeCtxFactory)
monkeypatch.setattr(ssl, "CertificateOptions", recorder)
monkeypatch.setattr(pem.twisted, "_DH_PARAMETERS_SUPPORTED", True)
ctxFactory = certificateOptionsFromFiles(str(keyCertChainDHFile))
assert ctxFactory is fakeCtxFactory
assert isinstance(
recorder.calls[0].kwargs["dhParameters"],
pem.twisted.DiffieHellmanParameters,
)
def test_DHParamContextFactory(self):
"""
ContextFactory is wrapped and DH params loaded.
"""
fakeContext = stub(load_tmp_dh=call_recorder(lambda dhParams: None))
fakeFactory = stub(getContext=lambda: fakeContext)
fakeDH = stub(path=b"foo")
ctxFactory = pem.twisted._DHParamContextFactory(
fakeFactory, pem.twisted._DiffieHellmanParameters(fakeDH)
)
ctx = ctxFactory.getContext()
assert fakeContext is ctx
assert [call(b"foo")] == fakeContext.load_tmp_dh.calls
|
# Copyright 2016 Xiaomi, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
import logging
import json
import os
import getopt
import socket
from utils.log_util import get_logger_name
KEY_ZK_ADDRESSES = "zk_addresses"
KEY_ZK_TIMEOUT = "zk_timeout"
KEY_ZK_ROOT = "zk_root"
KEY_REDIS_BIN = "redis_bin"
KEY_REDIS_CONF = "redis_conf"
KEY_REDIS_ADDRESS = "redis_address"
DEFAULT_REDIS_PORT = 6379
LOGGER = logging.getLogger(get_logger_name(__name__))
#
# Exception
#
class ConfigException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Config:
__instance = None
@classmethod
def init(cls, commands):
config = Config()
config.read_from_commands(commands)
cls.__instance = config
@classmethod
def instance(cls):
return cls.__instance
#
# print help menus
#
@staticmethod
def print_usage_help():
print "Usage: python ./redis_supervisor/redis_supervisor.py -f file -p port -c config\n" \
"-f: the file from which to read config\n" \
"-p: specify redis port\n" \
"-c: specify redis config file\n" \
"-h: print this help"
def __init__(self):
if Config.__instance:
raise ConfigException("instance has been inited")
self.__zk_addresses = None
self.__zk_timeout = 10 # default is 10 seconds
self.__zk_root = None
self.__redis_bin = None
self.__redis_conf = None
self.__redis_address = None
self.__conf_file = None
self.__redis_port_specified = None
self.__redis_conf_specified = None
self.__redis_perf_tags = None
self.__redis_enable_perf_monitor = None
#
# Read config from string
#
def read_config_from_string(self, line):
LOGGER.info("Read config %s", line)
json_obj = json.loads(line)
# parse config
if KEY_ZK_TIMEOUT in json_obj:
self.set_zk_timeout(json_obj[KEY_ZK_TIMEOUT])
if KEY_ZK_ADDRESSES not in json_obj:
raise ConfigException("Cannot found %s" % KEY_ZK_ADDRESSES)
if KEY_ZK_ROOT not in json_obj:
raise ConfigException("Cannot found %s" % KEY_ZK_ROOT)
if KEY_REDIS_BIN not in json_obj:
raise ConfigException("Cannot found %s" % KEY_REDIS_BIN)
if KEY_REDIS_CONF not in json_obj:
raise ConfigException("Cannot found %s" % KEY_REDIS_CONF)
if KEY_REDIS_ADDRESS not in json_obj:
raise ConfigException("Cannot find %s" % KEY_REDIS_ADDRESS)
# Write value
self.set_zk_addresses(json_obj[KEY_ZK_ADDRESSES])
self.set_zk_root(json_obj[KEY_ZK_ROOT])
self.set_redis_bin(json_obj[KEY_REDIS_BIN])
self.set_redis_conf(json_obj[KEY_REDIS_CONF])
self.set_redis_address(json_obj[KEY_REDIS_ADDRESS])
LOGGER.info("use config:[%s]", self.to_string())
#
# Read config from a file
#
def read_config_file(self, config_file):
LOGGER.info("read config file [%s]", config_file)
self.__conf_file = config_file
if not os.path.exists(config_file):
LOGGER.error("file %s not exist", config_file)
raise ConfigException("file {} not found".format(config_file))
# Parsing
# init this value to avoid warning
f = None
try:
f = open(config_file, "r")
lines = f.read()
self.read_config_from_string(lines)
except IOError, e:
LOGGER.error("cannot read config file:%s", config_file, exc_info=True)
raise ConfigException("cannot read config file:%s" % config_file)
finally:
if f is not None and not f.closed:
try:
f.close()
except IOError, e:
# ignore it
pass
#
# Read config from cmd arguments
#
def read_from_commands(self, commands):
opts, args = getopt.getopt(commands, "hf:p:c:l:")
for op, value in opts:
if op == "-f":
self.read_config_file(value)
if op == "-p":
self.__redis_port_specified = value
if op == "-c":
self.__redis_conf_specified = value
if op == "-h":
self.print_usage_help()
return False
if self.__redis_port_specified is not None:
[host, port_ignored] = self.__redis_address.split(":")
self.__redis_address = host + ":" + self.__redis_port_specified
LOGGER.info("specified redis address:%s", self.__redis_address)
if self.__redis_conf_specified is not None:
self.__redis_conf = self.__redis_conf_specified
LOGGER.info("specified redis conf:%s", self.__redis_conf)
return True
def get_config_filename(self):
return self.__conf_file
def set_zk_timeout(self, timeout=10):
"""
:type timeout: int
"""
self.__zk_timeout = timeout
def get_zk_timeout(self):
return self.__zk_timeout
def set_zk_addresses(self, zkAddresses):
"""
:type zkAddresses: string
:raises:
:exc: `~ConfigException` zk address is invalid
"""
self.validate_not_blank(zkAddresses, "zkAddresses")
self.validate_net_address(zkAddresses)
self.__zk_addresses = zkAddresses
def get_zk_addresses(self):
return self.__zk_addresses
def set_zk_root(self, zk_root):
"""
:type zk_root: str
"""
self.validate_not_blank(zk_root, "zk_root")
self.__zk_root = zk_root
def get_zk_root(self):
return self.__zk_root
def set_redis_address(self, redisAddress):
"""
:type redisAddress: str
"""
self.validate_not_blank(redisAddress, "redisAddress")
self.validate_net_address(redisAddress)
[host, port] = redisAddress.split(":")
if host == "":
host = socket.gethostname()
if host == "localhost":
raise ConfigException("can not get correct hostname")
LOGGER.info("use hostname [%s]" % host)
if port == "":
port = DEFAULT_REDIS_PORT
LOGGER.info("use default redis port [%d]" % port)
self.__redis_address = host + ":" + str(port)
def get_redis_address(self):
"""
:rtype: string
"""
return self.__redis_address
def set_redis_bin(self, redis_bin):
"""
:type redis_bin: string
"""
self.validate_not_blank(redis_bin, "redis_bin")
self.__redis_bin = redis_bin
def get_redis_bin(self):
"""
:rtype: str
"""
return self.__redis_bin
def set_redis_conf(self, redis_conf):
"""
:type redis_conf: str
:rtype: str
"""
self.validate_not_blank(redis_conf, "redis_conf")
self.__redis_conf = redis_conf
def get_redis_conf(self):
"""
:return: str
"""
return self.__redis_conf
def set_redis_perf_tags(self, tags):
"""
:type tags: str
"""
self.__redis_perf_tags = tags
def get_redis_perf_tags(self):
return self.__redis_perf_tags
def set_enable_perf_monitor(self, is_enable):
"""
:type is_enable: str
"""
self.__redis_enable_perf_monitor = str(True).lower() == is_enable.lower()
def get_enable_perf_monitor(self):
return self.__redis_enable_perf_monitor
def validate_net_address(self, addresses):
"""
:type addresses: str
:raises:
:exc: `~ConfigException` invalid address
"""
for address in addresses.split(","):
if len(address.split(":")) != 2:
verbose = "invalid address:[%s] in [%s]" % (address, addresses)
LOGGER.error(verbose)
raise ConfigException(verbose)
def validate_not_blank(self, string=None, msg=None):
"""
:type string: str
:type msg: str
:raises:
:exc: `~ConfigException` invalid arg
"""
if string is None or len(string) <= 0:
verbose = "invalid arg:[%s]" % msg
LOGGER.error(verbose)
raise ConfigException(verbose)
def to_string(self):
string = "\n" + KEY_ZK_ADDRESSES + ":" + self.__zk_addresses + "\n" \
+ KEY_ZK_TIMEOUT + ":" + str(self.__zk_timeout) + "\n" \
+ KEY_REDIS_BIN + ":" + self.__redis_bin + "\n" \
+ KEY_REDIS_CONF + ":" + self.__redis_conf + "\n" \
+ KEY_REDIS_ADDRESS + ":" + self.__redis_address + "\n"
return string
|
<reponame>AstroShen/fpga21-scaled-tech
"""Collects all the results produced by >>cruncher.py<< running VPR and builds a single results dictionary.
Besides keeping the separate geomean results for each magic formula, it computes another median, for each
circuit, over all formulas, and then does a final geomean. This way, the effects of the potentially poor switch-pattern
architecture are cancelled to an extent. Medians are picked per circuit instead of averaging out the per-formula averages,
as different formulas may differently skew the results for each particular circuit. Also, we avoid double processing of
the already averaged-out quantities, which may be hard to do in a sound manner.
Some filename templates may need to be changed, depending on how the rest of the flow was run.
"""
import os
import copy
from ast import literal_eval
sort_call = "python sort_magic.py --arc_dir %s --log_dir %s --out_file %s --N %d --tech %s --sort_key delay --get_median_dict 1 > dict.read"
sort_log_template = "../runner_scripts/all_circs_N8_T%s.sort"
out_file_template = "N%d_T%s_W%d.sort"
get_wire = lambda d : int(d.split('_')[-1])
get_tech = lambda d : d.split('_')[-2][1:]
get_N = lambda d : int(d.split('_')[2][1:])
get_geomean = lambda d : reduce(lambda x, y : x * y, [d[k] for k in d]) ** (1.0 / len(d))
prototype = {"circs" : {}, "wires" : {}}
res_dict = {}
for d in os.listdir("../runner_scripts/"):
if d.startswith("all_grids_N") and len([c for c in d if c == '_']) == 4 and not "logs" in d:
arc_dir = "../runner_scripts/" + d
log_dir = '_'.join(list(arc_dir.split('_')[:-1]) + ["logs"] + [d.split('_')[-1]])
wire = get_wire(d)
tech = get_tech(d)
N = get_N(d)
if not tech in res_dict:
res_dict.update({tech : {n : copy.deepcopy(prototype) for n in [2, 4, 8, 16]}})
with open(sort_log_template % tech, "r") as inf:
lines = inf.readlines()
for lcnt, line in enumerate(lines[1:], 1):
line_wire = int(line.split()[0].split("_W")[1])
if wire == line_wire:
wire_index = lcnt
break
out_file = out_file_template % (N, tech, wire)
os.system(sort_call % (arc_dir, log_dir, out_file, N, tech))
with open(out_file, "r") as inf:
lines = inf.readlines()
geom = float(lines[1].split()[1])
with open("dict.read", "r") as inf:
txt = inf.read().strip()
os.system("rm -rf dict.read")
local_dict = literal_eval(txt)
local_dict = local_dict[list(local_dict.keys())[0]]
if abs(geom - get_geomean(local_dict)) > 0.01:
print "Geomean mismatch!"
exit(-1)
res_dict[tech][N]["wires"].update({wire : {"index" : wire_index, "td" : geom}})
for circ in local_dict:
try:
res_dict[tech][N]["circs"][circ].append(local_dict[circ])
except:
res_dict[tech][N]["circs"].update({circ : [local_dict[circ]]})
wire_no = 3
for tech in res_dict:
for N in res_dict[tech]:
for circ in res_dict[tech][N]["circs"]:
if len(res_dict[tech][N]["circs"][circ]) != wire_no:
print "Missing results!"
exit(-1)
res_dict[tech][N]["circs"][circ] = sorted(res_dict[tech][N]["circs"][circ])[wire_no / 2]
res_dict[tech][N].update({"avg" : get_geomean(res_dict[tech][N]["circs"])})
del res_dict[tech][N]["circs"]
print res_dict
|
"""
This file defines actions, i.e. functions the URLs are mapped into
The @action(path) decorator exposed the function at URL:
http://127.0.0.1:8000/{app_name}/{path}
If app_name == '_default' then simply
http://127.0.0.1:8000/{path}
If path == 'index' it can be omitted:
http://127.0.0.1:8000/
The path follows the bottlepy syntax.
@action.uses('generic.html') indicates that the action uses the generic.html template
@action.uses(session) indicates that the action uses the session
@action.uses(db) indicates that the action uses the db
@action.uses(T) indicates that the action uses the i18n & pluralization
@action.uses(auth.user) indicates that the action requires a logged in user
@action.uses(auth) indicates that the action requires the auth object
session, db, T, auth, and tempates are examples of Fixtures.
Warning: Fixtures MUST be declared with @action.uses({fixtures}) else your app will result in undefined behavior
"""
from py4web import action, request, abort, redirect, URL
from .common import db, Field, session, T, cache, auth, logger, authenticated, unauthenticated, flash, groups
from py4web.utils.form import Form, FormStyleBulma
from pydal.validators import *
from pydal import *
from . import settings
import jwt
import os
from PIL import Image
import os
USER_STATUS =['Added','Invited', 'Registered']
USER_ROLES=['Owner', 'Admin','Manager', 'User']
class local_auth(object):
def __init__(self, caller):
self.client=caller
self.clients = []
self.user = auth.get_user()
def get_client_secret(self):
query = db.registered_clients.client_name == self.client
rec = db(query).select(db.registered_clients.client_secret).first()
return rec.client_secret
def is_authorised(self):
if not self.user:
return False
query = db.client_users.email == self.user['id']
recs=db(query).select()
if not recs:
return False
else:
for rec in recs:
client={}
if db.registered_clients[rec.client_id].is_active == True:
role = rec.role
name = db.registered_clients[rec.client_id].client_name
url = db.registered_clients[rec.client_id].client_url
client = dict(client=name, role=role, url=url)
self.clients.append(client)
else:
continue
res = next((item for item in self.clients if item["client"] == self.client), False)
if not res == False: #resself.client in self.clients['client']:
return True
else:
return False
def get_role(self):
res = next((item for item in self.clients if item["client"] == self.client), False)
return res["role"]
def get_url(self):
res = next((item for item in self.clients if item["client"] == self.client), False)
return res["url"]
def __str__(self):
original_url = request.environ.get("HTTP_ORIGIN") or request.url
is_authorised = self.is_authorised()
if is_authorised == False:
token = 'Unauthorised'
return token
else:
user = self.user
user['client_id'] = self.client
user['client_url'] = self.get_url()
user['role'] = self.get_role()
user['all'] = self.clients
secret = self.get_client_secret()
token = jwt.encode(user, secret, algorithm='HS256')
return token
@action("sso_logout")
@action.uses(db, session, auth)
def sso_logout():
auth.session.clear()
return 'All done'
@authenticated("sso_profile", "profile.html")
@authenticated("sso_profile/<caller>", "profile.html")
def sso_profile(caller=None):
user = auth.get_user()
caller = request.query.get('next', None)
profile = db.auth_user(user["id"]).profile.select().first()
icon = f"images/{profile.image}"
# Append the user profile icon to the dict so it prepopulates it with current data
user.update({"image": profile.image})
# Get all the required fields out of the 2 tables to display them: Username, Email, First/Last name, and Profile Pic
form_list = [field for field in db.auth_user if not field.type == "id"] + [
field for field in db.profile if not field.type == "id"
]
aform = Form(
form_list,
record=user,
csrf_session=session,
deletable=False,
formstyle=FormStyleBulma,
)
if aform.accepted:
# Update the auth user
db.auth_user[user["id"]].update_record(
username=aform.vars["username"],
email=aform.vars["email"],
first_name=aform.vars["first_name"],
last_name=aform.vars["last_name"],
)
# The icon we want to update our profile will always have a default of default.jpg
update_icon = "default.jpg"
if not aform.vars["image"] and profile.image == update_icon:
# We can't delete the default image so we just redirect back to the page.
redirect(URL("profile"))
if aform.vars["image"]:
# If we are setting it equal to a new icon, we set icon to that file name
update_icon = aform.vars["image"]
if update_icon != profile.image:
# If the new icon (which can be default.jpg) isn't the same icon as before, remove the old one and update
if profile.image != "default.jpg":
cleanup_image(profile.image)
resize_image(update_icon)
profile.update_record(image=update_icon)
# Once done with everything (Or after doing nothing because the icons are the same), return to the profile page
if caller:
redirect(caller)
else:
redirect(URL("sso_profile"))
return dict(icon=icon, aform=aform)
def resize_image(image_path):
total_path = os.path.join(settings.UPLOAD_PATH, image_path)
img = Image.open(total_path)
if img.height > 300 or img.width > 300:
output_size = (300, 300)
img.thumbnail(output_size)
img.save(total_path)
def cleanup_image(image_path):
total_path = os.path.join(settings.UPLOAD_PATH, image_path)
os.remove(total_path, dir_fd=None)
@action("index", method=['GET','POST'])
@action.uses(db, auth, session, flash, "index.html")
def index():
menu_items = []
message = "SSO Landing Page"
user=auth.get_user()
#print ('sso user', user)
if user:
grps = groups.get(auth.get_user()['id'])
if not 'Admin' in grps:
query = db.client_users.email == user['id']
recs = db(query).select()
if not recs:
flash.set('YOU DONT HAVE ACCESS TO ANY REGISTERED CLIENTS. Please contact your administrator')
else:
menu_items = []
for rec in recs:
menu_items.append(dict(name=db.registered_clients[rec.client_id].client_name,
goto=db.registered_clients[rec.client_id].client_url))
else:
menu_items = [
dict(name="Manage Groups", goto="../manage_groups"),
dict(name="Manage Clients", goto="../manage_clients"),
dict(name="Manage Users", goto="../manage_users")
]
else:
redirect(URL('auth/login', vars=dict(next = '../index')))
return dict(message=message, menu_items = menu_items)
@action('remove_group/<group_id>', method=['GET','POST'])
@action.uses(db, session, auth, flash, 'generic.html' )
def remove_group(group_id=None):
if not group_id:
flash.set('No group ID selected')
else:
db(db.auth_user_tag_groups.id == group_id).delete()
flash.set('Removed group ')
redirect(URL('manage_groups'))
@action('edit_group/<group_id>', method=['GET','POST'])
@action.uses(db, session, auth, flash, 'edit_rec.html' )
def edit_group(group_id=None):
db.auth_user_tag_groups.id.readable =db.auth_user_tag_groups.id.writable = False
form = Form(db.auth_user_tag_groups, group_id, formstyle=FormStyleBulma)
if form.accepted:
flash.set('Updated group details')
redirect(URL('manage_groups'))
return dict(form=form)
@action('manage_groups', method=['GET','POST'])
@action.uses(db, session, auth, flash, 'manage_groups.html' )
def manage_groups():
page_title = 'STEP 1 >> Manage User Groups'
back = ''
menu_items = []
user = auth.get_user()
if not user:
redirect(URL('auth', 'login'))
#back = mygui.get_button('back', URL('register'), T("Back"), T('Back'))
menu_items.append(dict(name="Manage Clients", goto="../manage_clients"))
menu_items.append(dict(name="Manage Users", goto="../manage_users"))
form=Form(db.auth_user_tag_groups, dbio=False, formstyle=FormStyleBulma)
if form.accepted:
db.auth_user_tag_groups.insert(**form.vars)
groups = db(db.auth_user_tag_groups).select()
headers = ['#ID', 'Path', 'User', 'Actions']
return dict(form=form, headers=headers, groups=groups, page_title=page_title, back=back, user=user, menu_items=menu_items)
@action('edit_client/<client_id>', method=['GET','POST'])
@action.uses(db, session, auth, flash, 'edit_rec.html' )
def edit_client(client_id=None):
db.registered_clients.id.readable =db.registered_clients.id.writable = False
form = Form(db.registered_clients, client_id, formstyle=FormStyleBulma)
if form.accepted:
flash.set('Updated client details')
redirect(URL('manage_clients'))
return dict(form=form)
@action('manage_clients', method=['GET','POST'])
@action.uses(db, session, auth, flash, 'manage_clients.html' )
def register_clients(client_id=None):
page_title = 'STEP 1 >> Register Your Clients'
back = ''
menu_items = []
user = auth.get_user()
if not user:
redirect(URL('auth', 'login'))
menu_items.append(dict(name="Manage Groups", goto="../manage_groups"))
menu_items.append(dict(name="Manage Users", goto="../manage_users"))
form=Form(db.registered_clients, dbio=False, formstyle=FormStyleBulma)
db.registered_clients.client_secret.readable = db.registered_clients.client_secret.writable = False
form=Form(db.registered_clients, dbio=False, formstyle=FormStyleBulma)
if form.accepted:
rcid = db.registered_clients.insert(**form.vars)
cuid = db.client_users.insert(email = user['id'],
client_id = rcid,
role = "Owner",
status = "Registered"
)
clients = db(db.registered_clients).select()
headers = ['#ID', 'Name', 'URL', 'Key', 'Is Active', 'Actions']
return dict(form=form, headers=headers, clients=clients, page_title=page_title, back=back, user=user, menu_items=menu_items)
@action('remove_registered_client/<client_id>')
@action.uses(db, session, auth, flash )
def remove_registered_client(client_id):
user = auth.get_user()
if not user:
redirect(URL('auth', 'login'))
if not client_id:
flash.set('No client selected ... please seleczt a client to remove')
redirect(URL('register_clients'))
else:
db(db.registered_clients.id == client_id).delete()
flash.set('Deleted client')
redirect(URL('register_clients'))
@action('remove_client_user/<user_id>')
@action.uses(db, session, auth, flash )
def remove_clieint_user(user_id):
user = auth.get_user()
if not user:
redirect(URL('auth', 'login'))
if not user_id:
flash.set('No user selected ... please seleczt a user to remove')
redirect(URL('manage_users'))
else:
db(db.client_users.id == user_id).delete()
flash.set('Deleted user')
redirect(URL('manage_users'))
@action('manage_users', method=['GET','POST'])
@action('manage_users/<user_id>', method=['GET','POST'])
@action.uses(db, session, auth, flash, 'manage_users.html' )
def manage_users(user_id=None):
page_title = 'STEP 2 >> Invite users'
back = ''
edit = False
menu_items = []
user = auth.get_user()
if not user:
redirect(URL('auth', 'login'))
#back = mygui.get_button('back', URL('register'), T("Back"), T('Back'))
menu_items.append(dict(name="Manage Groups", goto="../manage_groups"))
menu_items.append(dict(name="Manage Clients", goto="../manage_clients"))
if user_id:
db.client_users.id.readable =db.client_users.id.writable = False
form = Form(db.client_users, user_id, formstyle=FormStyleBulma)
edit = True
if form.accepted:
flash.set('Updated user for registered client')
edit=False
form = Form(db.client_users, dbio=False, formstyle=FormStyleBulma)
else:
form = Form(db.client_users, dbio=False, formstyle=FormStyleBulma)
edit = False
if form.accepted:
db.client_users.insert(**form.vars)
flash.set('Added a new registered client user')
headers=['#ID', 'Email', 'Client', 'Role', 'Status', 'Actions']
users = db(db.client_users.id > 0).select()
recs=[]
for u in users: #.render():
rec={}
rec = dict(id=u.id, email=db.auth_user[u.email].email, client=db.registered_clients[u.client_id].client_name,
role=u.role, status=u.status)
recs.append(rec)
return dict(form=form, users=recs, headers=headers, page_title=page_title, back=back, edit=edit, menu_items=menu_items, user=user)
|
<gh_stars>0
# Copyright 1997 - 2018 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class NetworkRange(Base):
"""The NetworkRange class encapsulates a user managed networkRange node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the NetworkRange property from a parent instance.
The internal properties list will be empty when the property is accessed and is populated from the server using the find method.
The internal properties list can be managed by the user by using the add and remove methods.
"""
_SDM_NAME = 'networkRange'
def __init__(self, parent):
super(NetworkRange, self).__init__(parent)
@property
def EntryTe(self):
"""An instance of the EntryTe class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.isis.entryte.EntryTe)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.isis.entryte import EntryTe
return EntryTe(self)._select()
@property
def RangeTe(self):
"""An instance of the RangeTe class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.isis.rangete.RangeTe)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.isis.rangete import RangeTe
return RangeTe(self)._select()
@property
def EnableHostName(self):
"""If true, the given dynamic host name is transmitted in all the packets sent from this router.
Returns:
bool
"""
return self._get_attribute('enableHostName')
@EnableHostName.setter
def EnableHostName(self, value):
self._set_attribute('enableHostName', value)
@property
def Enabled(self):
"""If enabled, this route range will be advertised by the nodes in the network range.
Returns:
bool
"""
return self._get_attribute('enabled')
@Enabled.setter
def Enabled(self, value):
self._set_attribute('enabled', value)
@property
def EntryCol(self):
"""The simulated router is connected to a router in the grid at a particular row and column location. This option is the column number. (default = 1)
Returns:
number
"""
return self._get_attribute('entryCol')
@EntryCol.setter
def EntryCol(self, value):
self._set_attribute('entryCol', value)
@property
def EntryRow(self):
"""The simulated router is connected to a router in the grid at a particular row and column location. This option is the row number. (default = 1)
Returns:
number
"""
return self._get_attribute('entryRow')
@EntryRow.setter
def EntryRow(self, value):
self._set_attribute('entryRow', value)
@property
def GridNodeRoutes(self):
"""The set of advertised networks within the grid to be included in isisGrid.
Returns:
list(dict(arg1:bool,arg2:str[ipAny|ipv4|ipv6],arg3:str,arg4:number,arg5:number,arg6:number,arg7:number,arg8:bool,arg9:bool,arg10:number))
"""
return self._get_attribute('gridNodeRoutes')
@GridNodeRoutes.setter
def GridNodeRoutes(self, value):
self._set_attribute('gridNodeRoutes', value)
@property
def GridOutsideExLinks(self):
"""NOT DEFINED
Returns:
list(dict(arg1:number,arg2:number,arg3:str,arg4:list[dict(arg1:str[ipAny|ipv4|ipv6],arg2:str,arg3:number)],arg5:str,arg6:number,arg7:number,arg8:number,arg9:number,arg10:number,arg11:number,arg12:number,arg13:number,arg14:number,arg15:number,arg16:number))
"""
return self._get_attribute('gridOutsideExLinks')
@GridOutsideExLinks.setter
def GridOutsideExLinks(self, value):
self._set_attribute('gridOutsideExLinks', value)
@property
def GridOutsideLinks(self):
"""Sets up the outside links between an ISIS grid and another ISIS grid.
Returns:
list(dict(arg1:number,arg2:number,arg3:str,arg4:str,arg5:number,arg6:number,arg7:number,arg8:number,arg9:number,arg10:number,arg11:number,arg12:number,arg13:number,arg14:number,arg15:number))
"""
return self._get_attribute('gridOutsideLinks')
@GridOutsideLinks.setter
def GridOutsideLinks(self, value):
self._set_attribute('gridOutsideLinks', value)
@property
def HostNamePrefix(self):
"""Allows to add a host name to this network range. The name prefix is appended by row ID and column ID in .<rowid>.<colid> combination as per the router placed in the emulated network grid behind the Ixia port.
Returns:
str
"""
return self._get_attribute('hostNamePrefix')
@HostNamePrefix.setter
def HostNamePrefix(self, value):
self._set_attribute('hostNamePrefix', value)
@property
def InterfaceIps(self):
"""The interface IP information for the simulated network.
Returns:
list(dict(arg1:str[ipAny|ipv4|ipv6],arg2:str,arg3:number))
"""
return self._get_attribute('interfaceIps')
@InterfaceIps.setter
def InterfaceIps(self, value):
self._set_attribute('interfaceIps', value)
@property
def InterfaceMetric(self):
"""The metric cost associated with this emulated ISIS router.
Returns:
number
"""
return self._get_attribute('interfaceMetric')
@InterfaceMetric.setter
def InterfaceMetric(self, value):
self._set_attribute('interfaceMetric', value)
@property
def Ipv6MtMetric(self):
"""This metric is same as the Interface Metric. If enabled, it allows you to enter data.
Returns:
number
"""
return self._get_attribute('ipv6MtMetric')
@Ipv6MtMetric.setter
def Ipv6MtMetric(self, value):
self._set_attribute('ipv6MtMetric', value)
@property
def LinkType(self):
"""The type of network link for this emulated ISIS router.
Returns:
str(pointToPoint|broadcast)
"""
return self._get_attribute('linkType')
@LinkType.setter
def LinkType(self, value):
self._set_attribute('linkType', value)
@property
def NoOfCols(self):
"""The number of columns in the simulated grid. (default = 3)
Returns:
number
"""
return self._get_attribute('noOfCols')
@NoOfCols.setter
def NoOfCols(self, value):
self._set_attribute('noOfCols', value)
@property
def NoOfRows(self):
"""The number of rows in the simulated grid. (default = 3)
Returns:
number
"""
return self._get_attribute('noOfRows')
@NoOfRows.setter
def NoOfRows(self, value):
self._set_attribute('noOfRows', value)
@property
def RouterId(self):
"""The router ID for the first emulated ISIS router in this network range.
Returns:
str
"""
return self._get_attribute('routerId')
@RouterId.setter
def RouterId(self, value):
self._set_attribute('routerId', value)
@property
def RouterIdIncrement(self):
"""The increment step to be used for creating the router IDs for the emulated ISIS routers in this network range.
Returns:
str
"""
return self._get_attribute('routerIdIncrement')
@RouterIdIncrement.setter
def RouterIdIncrement(self, value):
self._set_attribute('routerIdIncrement', value)
@property
def TePaths(self):
"""Adds a Traffic Engineering (TE) Path to the list.
Returns:
list(dict(arg1:number,arg2:number,arg3:number,arg4:number,arg5:number,arg6:number,arg7:bool,arg8:str,arg9:number,arg10:number,arg11:number,arg12:number,arg13:number,arg14:number,arg15:number,arg16:number,arg17:number,arg18:number,arg19:number))
"""
return self._get_attribute('tePaths')
@TePaths.setter
def TePaths(self, value):
self._set_attribute('tePaths', value)
@property
def UseWideMetric(self):
"""Enables the use of extended reachability (wide) metrics (defined to support TE): 32-bits wide for IP reachability (IP routes) and 24-bits wide for IS reachability (IS neighbors). If TE is enabled, Wide Metrics will be enabled automatically. The Wide Metrics may be used without enabling TE, however.
Returns:
bool
"""
return self._get_attribute('useWideMetric')
@UseWideMetric.setter
def UseWideMetric(self, value):
self._set_attribute('useWideMetric', value)
def add(self, EnableHostName=None, Enabled=None, EntryCol=None, EntryRow=None, GridNodeRoutes=None, GridOutsideExLinks=None, GridOutsideLinks=None, HostNamePrefix=None, InterfaceIps=None, InterfaceMetric=None, Ipv6MtMetric=None, LinkType=None, NoOfCols=None, NoOfRows=None, RouterId=None, RouterIdIncrement=None, TePaths=None, UseWideMetric=None):
"""Adds a new networkRange node on the server and retrieves it in this instance.
Args:
EnableHostName (bool): If true, the given dynamic host name is transmitted in all the packets sent from this router.
Enabled (bool): If enabled, this route range will be advertised by the nodes in the network range.
EntryCol (number): The simulated router is connected to a router in the grid at a particular row and column location. This option is the column number. (default = 1)
EntryRow (number): The simulated router is connected to a router in the grid at a particular row and column location. This option is the row number. (default = 1)
GridNodeRoutes (list(dict(arg1:bool,arg2:str[ipAny|ipv4|ipv6],arg3:str,arg4:number,arg5:number,arg6:number,arg7:number,arg8:bool,arg9:bool,arg10:number))): The set of advertised networks within the grid to be included in isisGrid.
GridOutsideExLinks (list(dict(arg1:number,arg2:number,arg3:str,arg4:list[dict(arg1:str[ipAny|ipv4|ipv6],arg2:str,arg3:number)],arg5:str,arg6:number,arg7:number,arg8:number,arg9:number,arg10:number,arg11:number,arg12:number,arg13:number,arg14:number,arg15:number,arg16:number))): NOT DEFINED
GridOutsideLinks (list(dict(arg1:number,arg2:number,arg3:str,arg4:str,arg5:number,arg6:number,arg7:number,arg8:number,arg9:number,arg10:number,arg11:number,arg12:number,arg13:number,arg14:number,arg15:number))): Sets up the outside links between an ISIS grid and another ISIS grid.
HostNamePrefix (str): Allows to add a host name to this network range. The name prefix is appended by row ID and column ID in .<rowid>.<colid> combination as per the router placed in the emulated network grid behind the Ixia port.
InterfaceIps (list(dict(arg1:str[ipAny|ipv4|ipv6],arg2:str,arg3:number))): The interface IP information for the simulated network.
InterfaceMetric (number): The metric cost associated with this emulated ISIS router.
Ipv6MtMetric (number): This metric is same as the Interface Metric. If enabled, it allows you to enter data.
LinkType (str(pointToPoint|broadcast)): The type of network link for this emulated ISIS router.
NoOfCols (number): The number of columns in the simulated grid. (default = 3)
NoOfRows (number): The number of rows in the simulated grid. (default = 3)
RouterId (str): The router ID for the first emulated ISIS router in this network range.
RouterIdIncrement (str): The increment step to be used for creating the router IDs for the emulated ISIS routers in this network range.
TePaths (list(dict(arg1:number,arg2:number,arg3:number,arg4:number,arg5:number,arg6:number,arg7:bool,arg8:str,arg9:number,arg10:number,arg11:number,arg12:number,arg13:number,arg14:number,arg15:number,arg16:number,arg17:number,arg18:number,arg19:number))): Adds a Traffic Engineering (TE) Path to the list.
UseWideMetric (bool): Enables the use of extended reachability (wide) metrics (defined to support TE): 32-bits wide for IP reachability (IP routes) and 24-bits wide for IS reachability (IS neighbors). If TE is enabled, Wide Metrics will be enabled automatically. The Wide Metrics may be used without enabling TE, however.
Returns:
self: This instance with all currently retrieved networkRange data using find and the newly added networkRange data available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._create(locals())
def remove(self):
"""Deletes all the networkRange data in this instance from server.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, EnableHostName=None, Enabled=None, EntryCol=None, EntryRow=None, GridNodeRoutes=None, GridOutsideExLinks=None, GridOutsideLinks=None, HostNamePrefix=None, InterfaceIps=None, InterfaceMetric=None, Ipv6MtMetric=None, LinkType=None, NoOfCols=None, NoOfRows=None, RouterId=None, RouterIdIncrement=None, TePaths=None, UseWideMetric=None):
"""Finds and retrieves networkRange data from the server.
All named parameters support regex and can be used to selectively retrieve networkRange data from the server.
By default the find method takes no parameters and will retrieve all networkRange data from the server.
Args:
EnableHostName (bool): If true, the given dynamic host name is transmitted in all the packets sent from this router.
Enabled (bool): If enabled, this route range will be advertised by the nodes in the network range.
EntryCol (number): The simulated router is connected to a router in the grid at a particular row and column location. This option is the column number. (default = 1)
EntryRow (number): The simulated router is connected to a router in the grid at a particular row and column location. This option is the row number. (default = 1)
GridNodeRoutes (list(dict(arg1:bool,arg2:str[ipAny|ipv4|ipv6],arg3:str,arg4:number,arg5:number,arg6:number,arg7:number,arg8:bool,arg9:bool,arg10:number))): The set of advertised networks within the grid to be included in isisGrid.
GridOutsideExLinks (list(dict(arg1:number,arg2:number,arg3:str,arg4:list[dict(arg1:str[ipAny|ipv4|ipv6],arg2:str,arg3:number)],arg5:str,arg6:number,arg7:number,arg8:number,arg9:number,arg10:number,arg11:number,arg12:number,arg13:number,arg14:number,arg15:number,arg16:number))): NOT DEFINED
GridOutsideLinks (list(dict(arg1:number,arg2:number,arg3:str,arg4:str,arg5:number,arg6:number,arg7:number,arg8:number,arg9:number,arg10:number,arg11:number,arg12:number,arg13:number,arg14:number,arg15:number))): Sets up the outside links between an ISIS grid and another ISIS grid.
HostNamePrefix (str): Allows to add a host name to this network range. The name prefix is appended by row ID and column ID in .<rowid>.<colid> combination as per the router placed in the emulated network grid behind the Ixia port.
InterfaceIps (list(dict(arg1:str[ipAny|ipv4|ipv6],arg2:str,arg3:number))): The interface IP information for the simulated network.
InterfaceMetric (number): The metric cost associated with this emulated ISIS router.
Ipv6MtMetric (number): This metric is same as the Interface Metric. If enabled, it allows you to enter data.
LinkType (str(pointToPoint|broadcast)): The type of network link for this emulated ISIS router.
NoOfCols (number): The number of columns in the simulated grid. (default = 3)
NoOfRows (number): The number of rows in the simulated grid. (default = 3)
RouterId (str): The router ID for the first emulated ISIS router in this network range.
RouterIdIncrement (str): The increment step to be used for creating the router IDs for the emulated ISIS routers in this network range.
TePaths (list(dict(arg1:number,arg2:number,arg3:number,arg4:number,arg5:number,arg6:number,arg7:bool,arg8:str,arg9:number,arg10:number,arg11:number,arg12:number,arg13:number,arg14:number,arg15:number,arg16:number,arg17:number,arg18:number,arg19:number))): Adds a Traffic Engineering (TE) Path to the list.
UseWideMetric (bool): Enables the use of extended reachability (wide) metrics (defined to support TE): 32-bits wide for IP reachability (IP routes) and 24-bits wide for IS reachability (IS neighbors). If TE is enabled, Wide Metrics will be enabled automatically. The Wide Metrics may be used without enabling TE, however.
Returns:
self: This instance with matching networkRange data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of networkRange data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the networkRange data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
|
from tkinter import *
from tkinter import ttk
import pandas as pd
from linearregressmodel import lrmodel
from candlestick import candlestick
import webbrowser
import googlesearch
import lxml
from arima import arimamodel
from betacalc import beta
from optionsfairvalue import options
# pulls expected ticker symbols
def gettickers():
nysecomps = pd.read_csv(
r'https://old.nasdaq.com/screening/companies-by-name.aspx?letter=0&exchange=nyse&render=download')
nysecomps['Symbol'] = [i.strip() for i in nysecomps['Symbol']]
nasdaq = pd.read_csv(
'https://old.nasdaq.com/screening/companies-by-name.aspx?letter=0&exchange=nasdaq&render=download')
nasdaq['Symbol'] = [i.strip() for i in nasdaq['Symbol']]
symbolslist = list(nysecomps['Symbol']) + list(nasdaq['Symbol'])
return symbolslist
tickers = gettickers()
# command to be executed when the button on the 'Historical Data' tab is clicked
def clicked():
selection = str(txthistory.get()).upper()
if selection in tickers:
histstatuslbl.configure(text='Opening the historical data for ' + str(selection))
yahooinfo(selection)
else:
histstatuslbl.configure(text='Please enter a valid NYSE or NASDAQ ticker')
# builds the window for the executable
window = Tk()
window.title("Stock Analyzer")
window.configure(background='black')
window.geometry('600x250')
# tabs defined and built
tab_control = ttk.Notebook(window)
HistoricalData = ttk.Frame(tab_control)
FinancialStatements = ttk.Frame(tab_control)
Analysis = ttk.Frame(tab_control)
Modeling = ttk.Frame(tab_control)
CandlestickChart = ttk.Frame(tab_control)
StockNews = ttk.Frame(tab_control)
Options = ttk.Frame(tab_control)
tab_control.add(HistoricalData, text='Historical Data')
tab_control.add(FinancialStatements, text='Financial Statements')
tab_control.add(Analysis, text='Analysis')
tab_control.add(Modeling, text='Modeling')
tab_control.add(CandlestickChart, text='Candlestick Chart')
tab_control.add(StockNews, text='Stock News')
tab_control.add(Options, text = 'Options')
'''
Stock historical data tab
'''
# text label on stock history tab
lblstockhistory = Label(HistoricalData, text="Stock Ticker:")
lblstockhistory.place(x=0, y=0)
histstatuslbl = Label(HistoricalData)
histstatuslbl.place(x=200, y=0)
# text field on stock history tab to enter the stock ticker you want
txthistory = Entry(HistoricalData, width=8)
txthistory.place(x=70, y=0)
# button on stock history tab run the clicked function to open the requested ticker historical data
btnstockhistory = Button(HistoricalData, text="Select", command=clicked)
btnstockhistory.place(x=130, y=0)
# function to open the typed stock ticker's on the history tab
def yahooinfo(selection):
stock = str(selection)
if stock in tickers:
webbrowser.open('https://www.marketwatch.com/investing/stock/' +
str(stock).lower() + '/charts')
'''
Financial Statement tab
'''
# text label on Financial Statements tab
lblfinstates = Label(FinancialStatements, text="Stock Ticker:")
lblfinstates.place(x=0, y=0)
# text field on Financial Statements tab asking for a stock ticker to be entered
txtfinstates = Entry(FinancialStatements, width=8)
txtfinstates.place(x=70, y=0)
# variable to store the selected value
selected = IntVar()
# radio buttons to choose a financial statement
rad1 = Radiobutton(FinancialStatements, text='Income Statement', value=1, variable=selected)
rad1.place(x=0, y=20)
rad2 = Radiobutton(FinancialStatements, text='Balance Sheet', value=2, variable=selected)
rad2.place(x=130, y=20)
rad3 = Radiobutton(FinancialStatements, text='Cash Flow', value=3, variable=selected)
rad3.place(x=230, y=20)
# function to run when a radio button is selected and the button is clicked
def selectedradiobut():
stock = str(txtfinstates.get()).upper()
statement = selected.get()
if stock in tickers:
if statement == 1:
finstatuslabel.configure(text='Opening the income statement for ' + str(stock))
webbrowser.open('https://www.marketwatch.com/investing/stock/' +
str(stock).lower() + '/financials')
elif statement == 2:
finstatuslabel.configure(text='Opening the balance sheet for ' + str(stock))
webbrowser.open('https://www.marketwatch.com/investing/stock/' +
str(stock).lower() + '/financials/balance-sheet')
elif statement == 3:
finstatuslabel.configure(text='Opening the cash flow statement for ' + str(stock))
webbrowser.open('https://www.marketwatch.com/investing/stock/' +
str(stock).lower() + '/financials/cash-flow')
else:
finstatuslabel.configure(text='Please enter a valid NYSE or NASDAQ ticker')
# status update on selection
finstatuslabel = Label(FinancialStatements)
finstatuslabel.place(x=170, y=0)
# button on the financial statements tab, gets selected financial statement
btnfinstates = Button(FinancialStatements, text="Select", command=selectedradiobut)
btnfinstates.place(x=330, y=20)
def secfilings():
secfile = str(txtfinstates.get()).upper()
if secfile in tickers:
webbrowser.open('https://www.sec.gov/cgi-bin/browse-edgar?CIK=' +
str(secfile) + '&Find=Search&owner=exclude&action=getcompany')
else:
lblfilingsstatus.configure(text='Please enter a valid NYSE or NASDAQ ticker')
# sec filing instructions
secfilinglabel = Label(FinancialStatements, text='Click get to open all SEC EDGAR filings')
secfilinglabel.place(x=0, y=45)
# text label updating on status of text typed
lblfilingsstatus = Label(FinancialStatements)
lblfilingsstatus.place(x=0, y=65)
# button on SEC filings tab, gets the list of all SEC filings
btnfilings = Button(FinancialStatements, text='Get', command=secfilings)
btnfilings.place(x=220, y=45)
'''
Stock analysis tab
'''
# function to run to pull analysis for a typed stock
def analysis():
typed = txtanalysis.get().upper()
typedstock = str(typed).upper()
if typed in tickers:
lblerror.configure(text='Opening analysis...')
webbrowser.open('https://www.marketwatch.com/investing/stock/' +
str(typedstock).lower() + '/analystestimates')
else:
lblerror.configure(text='Please enter a valid NYSE or NASDAQ ticker')
# text label on the Analysis tab prompting entry of a ticker
lblanalysis = Label(Analysis, text="Get financial ratios: ")
lblanalysis.grid(column=1, row=3)
# text label on the Analysis tab updating on the status of the text typed
lblerror = Label(Analysis)
lblerror.grid(column=1, row=5)
# text field on the Analysis tab to enter the ticker of the stock you want analysis for
txtanalysis = Entry(Analysis, width=8)
txtanalysis.grid(column=2, row=3)
# function to calculate beta value of the selected stock against the S&P
def calc_beta():
tickername = txtanalysis.get().upper()
if tickername in tickers:
stock = beta(tickername)
betaval = stock.beta_calculate()
lblbetavalue.configure(text=betaval)
# button on the Analysis tab to return MarketWatch analysis for a typed ticker
btnanalysis = Button(Analysis, text="Ok", command=analysis)
btnanalysis.grid(column=3, row=3)
# button to calculate daily calculated beta against the S&P 500
btnbetacalc = Button(Analysis, text='Calculate Beta', command=calc_beta)
btnbetacalc.grid(column=4, row=3)
lblbetavalue = Label(Analysis)
lblbetavalue.grid(column=5, row=3)
'''
Stock modeling tab
'''
# text label on the Stock Modeling tab, gives text prompt to enter ticker
lblmodel = Label(Modeling, text="Get modeling of stock: ")
lblmodel.grid(column=1, row=3)
# text field on the Stock Modeling tab to enter the ticker
txtmodel = Entry(Modeling, width=8)
txtmodel.grid(column=2, row=3)
# function to return the stock graph with linear regression results
def modelreturn():
tickerstr = str(txtmodel.get()).upper()
if tickerstr in tickers:
stock = lrmodel(tickerstr)
results = stock.graphlrresults(tickerstr)
return results
# function to return the stock graph with arima results with 30 day horizon
def arima():
tickerstr = str(txtmodel.get()).upper()
if tickerstr in tickers:
stock = arimamodel(tickerstr)
results = stock.arimagraph(tickerstr)
return results
# button on the Stock Modeling tab to return the linear regression model
btnmodel = Button(Modeling, text="Linear Regression Model", command=modelreturn)
btnmodel.grid(column=3, row=3)
# button on the Stock Modeling tab to return the arima model
btnarima = Button(Modeling, text='ARIMA', command=arima)
btnarima.place(x=330, y=0)
'''
Candlestick Chart tab
'''
# text label on Candlestick Chart tab asking for entry of a stock ticker
lblcandle = Label(CandlestickChart, text='Create candlestick chart for stock: ')
lblcandle.grid(column=1, row=3)
# text field on Candlestick Chart tab to enter the ticker you want a candlestick chart for
txtcandle = Entry(CandlestickChart, width=8)
txtcandle.grid(column=2, row=3)
# function to return a candlestick chart of the entered ticker
def candlechart():
chartticker = str(txtcandle.get()).upper()
if chartticker in tickers:
stock = candlestick(chartticker)
candlestickchart = stock.graph(chartticker)
return candlestickchart
# button on Candlestick Chart tab to actually run the function and return the graph
btncandle = Button(CandlestickChart, text='Create Chart', command=candlechart)
btncandle.grid(column=3, row=3)
'''
Stock News tab
'''
# Prompt label for stock news tab asking for a stock ticker to find news
lbl_stock_search = Label(StockNews, text='Select a stock ticker to search for:')
lbl_stock_search.place(x=0, y=0)
# Textbox to open stock news
txt_stock_search = Entry(StockNews, width=8)
txt_stock_search.place(x=200, y=0)
# Function to get a list of urls for the ticker related news
def search():
list_of_results = []
query = str(txt_stock_search.get()).upper() + ' news'
for j in googlesearch.search_news(query, stop=10, pause=2.0):
list_of_results.append(j)
counter = 1
for i in list_of_results:
listbox.insert(counter, list_of_results[counter - 1])
counter += 1
# Function to open whichever article is selected
def open_selected_result():
selected_news_art = listbox.get(ACTIVE)
webbrowser.open(selected_news_art)
# button on stock news tab to list all 10 results
btn_stock_search = Button(StockNews, text='Get Results', command=search)
btn_stock_search.place(x=250, y=0)
# listbox of all returned urls
listbox = Listbox(StockNews)
listbox.place(x=0, y=35)
# button to pull the selected url from the list
btn_open_art = Button(StockNews, text='Open Article', command=open_selected_result)
btn_open_art.place(x=490, y=30)
#scrollbar = Scrollbar(tab6, orient = VERTICAL)
#scrollbar.config(command = listbox.yview)
listbox.configure(width=80)
'''
Options tab
'''
# Prompt to enter stock ticker
lbl_options_ticker = Label(Options, text = 'Select a stock to price an option:')
lbl_options_ticker.grid(column=1, row = 1)
# Textbox to enter stock ticker
txt_options_ticker = Entry(Options, width = 8)
txt_options_ticker.grid(column=2, row = 1)
# Prompt to enter strike price
lbl_strike_price = Label(Options, text = 'Strike price:')
lbl_strike_price.grid(column = 1, row =2)
# Textbox to enter strike price
txt_strike_price = Entry(Options, width = 5)
txt_strike_price.grid(column=2, row = 2)
# Prompt to enter days until expiration
lbl_time_left = Label(Options, text = 'Days until expiration:')
lbl_time_left.grid(column =1, row = 3)
# Textbox to enter time left to options expiration
txt_time_left = Entry(Options, width = 5)
txt_time_left.grid(column = 2, row =3)
# Prompt to enter interest rate
lbl_interest = Label(Options, text = 'Enter the interest rate as a decimal:')
lbl_interest.grid(column = 1, row = 4)
# Textbox to enter interest rate
txt_interest = Entry(Options, width = 5)
txt_interest.grid(column = 2, row = 4)
# Prompt to select a type of option to be priced
lbl_option_type = Label(Options, text = 'Select an option to be priced:')
lbl_option_type.grid(column = 1, row = 5)
# Variable to store the type of option selected
selected_option = IntVar()
# Radio buttons to select the type of option that is to be priced
call_option = Radiobutton(Options, text='Call Option', value=1, variable=selected_option)
call_option.place(x=180, y=85)
put_option = Radiobutton(Options, text='Put Option', value=2, variable=selected_option)
put_option.place(x=270, y=85)
# Function to price the option
def options_pricing():
stock = str(txt_options_ticker.get()).upper()
statement = selected_option.get()
if stock in tickers:
options_price = options(stock, float(txt_strike_price.get()), float(txt_time_left.get()), float(txt_interest.get()))
if statement == 1:
lbl_options_price.configure(text='Call price for ' + str(stock) + " is $" + str(options_price.call_price()))
elif statement == 2:
lbl_options_price.configure(text='Put price for ' + str(stock) + " is $" + str(options_price.put_price()))
else:
lbl_options_price.configure(text='Please enter a valid NYSE or NASDAQ ticker')
# Button to submit the entered values and get an options price
options_submit = Button(Options, text = 'Calculate your options price', command = options_pricing)
options_submit.place(x = 25, y = 110)
# Label with price of selected option
lbl_options_price = Label(Options, text = '')
lbl_options_price.place(x = 200, y = 110)
'''
Builds notebook
'''
# builds the notebook
tab_control.pack(expand=1, fill='both')
# keeps the program running until exited
window.mainloop()
|
# coding: utf-8
import unittest
import mocker
from scieloapi import exceptions, httpbroker
from . import doubles
class ConnectorHttpBrokerCollaborationTests(mocker.MockerTestCase):
valid_full_microset = {
'objects': [
{
'title': u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'
},
],
'meta': {'next': None},
}
valid_microset = {
'title': u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'
}
def _makeOne(self, *args, **kwargs):
from scieloapi.core import Connector
return Connector(*args, **kwargs)
def test_api_uri_defaults_to_manager_scielo_org(self):
conn = self._makeOne('any.username', 'any.apikey')
self.assertTrue(conn.api_uri.startswith('http://manager.scielo.org'))
def test_fetching_all_docs_of_an_endpoint(self):
mock_httpbroker = self.mocker.proxy(httpbroker)
mocker.expect(mock_httpbroker.post).passthrough()
mock_httpbroker.get('http://manager.scielo.org/api/v1/',
endpoint='journals',
params={},
check_ca=mocker.ANY,
resource_id=None,
auth=('any.username', 'any.apikey'))
self.mocker.result(self.valid_full_microset)
self.mocker.replay()
conn = self._makeOne('any.username', 'any.apikey', http_broker=mock_httpbroker)
res = conn.fetch_data('journals')
self.assertTrue('objects' in res)
self.assertTrue(len(res['objects']), 1)
def test_single_document_of_an_endpoint(self):
mock_httpbroker = self.mocker.proxy(httpbroker)
mocker.expect(mock_httpbroker.post).passthrough()
mock_httpbroker.get('http://manager.scielo.org/api/v1/',
endpoint='journals',
params={},
resource_id=1,
check_ca=mocker.ANY,
auth=('any.username', 'any.apikey'))
self.mocker.result(self.valid_microset)
self.mocker.replay()
conn = self._makeOne('any.username', 'any.apikey', http_broker=mock_httpbroker)
res = conn.fetch_data('journals', resource_id=1)
self.assertIn('title', res)
def test_connection_error_fetching_data_raises_ConnectionError_after_retries(self):
mock_httpbroker = self.mocker.proxy(httpbroker)
mocker.expect(mock_httpbroker.post).passthrough()
mock_httpbroker.get('http://manager.scielo.org/api/v1/',
endpoint='journals',
params={},
resource_id=1,
auth=('any.username', 'any.apikey'),
check_ca=mocker.ANY)
self.mocker.throw(exceptions.ConnectionError)
self.mocker.count(11)
self.mocker.replay()
conn = self._makeOne('any.username', 'any.apikey', http_broker=mock_httpbroker)
with doubles.Patch(conn, '_time', doubles.TimeStub()):
self.assertRaises(exceptions.ConnectionError,
lambda: conn.fetch_data('journals', resource_id=1))
def test_fetching_data_retry_on_ConnectionError(self):
from scieloapi.exceptions import ConnectionError
mock_httpbroker = self.mocker.proxy(httpbroker)
mocker.expect(mock_httpbroker.post).passthrough()
mock_httpbroker.get('http://manager.scielo.org/api/v1/',
endpoint='journals',
params={},
resource_id=1,
check_ca=mocker.ANY,
auth=('any.username', 'any.apikey'))
self.mocker.throw(ConnectionError)
mock_httpbroker.get('http://manager.scielo.org/api/v1/',
endpoint='journals',
params={},
resource_id=1,
check_ca=mocker.ANY,
auth=('any.username', 'any.apikey'))
self.mocker.result(self.valid_microset)
self.mocker.replay()
conn = self._makeOne('any.username', 'any.apikey', http_broker=mock_httpbroker)
res = conn.fetch_data('journals', resource_id=1)
self.assertIn('title', res)
def test_fetch_data_with_querystring_params(self):
mock_httpbroker = self.mocker.proxy(httpbroker)
mocker.expect(mock_httpbroker.post).passthrough()
mock_httpbroker.get('http://manager.scielo.org/api/v1/',
endpoint='journals',
params={
'collection': 'saude-publica',
},
resource_id=1,
check_ca=mocker.ANY,
auth=('any.username', 'any.apikey'))
self.mocker.result(self.valid_microset)
self.mocker.replay()
conn = self._makeOne('any.username', 'any.apikey', http_broker=mock_httpbroker)
res = conn.fetch_data('journals', resource_id=1, collection='saude-publica')
self.assertIn('title', res)
def test_unsupported_api_version_raises_ValueError(self):
self.assertRaises(ValueError,
lambda: self._makeOne('any.username',
'any.apikey',
version='vFoo'))
def test_unsupported_api_version_at_API_VERSIONS_raises_NotFound(self):
mock_httpbroker = self.mocker.proxy(httpbroker)
mocker.expect(mock_httpbroker.post).passthrough()
mock_httpbroker.get('http://manager.scielo.org/api/v1/',
endpoint='journals',
params={},
resource_id=None,
check_ca=mocker.ANY,
auth=('any.username', 'any.apikey'))
self.mocker.throw(exceptions.NotFound)
self.mocker.replay()
conn = self._makeOne('any.username', 'any.apikey', http_broker=mock_httpbroker)
self.assertRaises(exceptions.NotFound,
lambda: conn.fetch_data('journals'))
def test_known_version_can_be_used(self):
"""
This test needs to change a module level variable, so
it needs to be restored to avoid side effects on other
tests.
"""
from scieloapi import core
old_api_versions = core.API_VERSIONS
core.API_VERSIONS += ('v2',)
conn = core.Connector('any.user', 'any.apikey', version='v2')
self.assertEqual(conn.version, 'v2')
core.API_VERSIONS = old_api_versions
def test_iteration_over_endpoint_items(self):
def fetch_data_stub(self, *args, **kwargs):
return self.valid_full_microset
conn = self._makeOne('any.username', 'any.apikey')
with doubles.Patch(conn, 'fetch_data', fetch_data_stub, instance_method=True):
res = conn.iter_docs('journals')
def test_create_http_methods_adds_http_get_method_to_instance(self):
conn = self._makeOne('any.username', 'any.apikey')
delattr(conn, '_http_get')
conn._create_http_methods(doubles.httpbroker_stub, 'any.user', 'any.apikey')
self.assertTrue(hasattr(conn, '_http_get'))
def test_post_data_with_valid_data(self):
mock_httpbroker = self.mocker.mock()
mock_httpbroker_post = self.mocker.mock()
mock_httpbroker_post('http://manager.scielo.org/api/v1/',
{'title': 'Foo'},
endpoint='journals',
check_ca=mocker.ANY,
auth=('any.username', 'any.apikey'))
self.mocker.result('http://manager.scielo.org/api/v1/journals/4/')
mocker.expect(mock_httpbroker.get).result(lambda *args, **kwargs: None)
mocker.expect(mock_httpbroker.post).result(mock_httpbroker_post)
self.mocker.replay()
conn = self._makeOne('any.username', 'any.apikey', http_broker=mock_httpbroker)
self.assertEqual(
conn.post_data('journals', {'title': 'Foo'}),
'http://manager.scielo.org/api/v1/journals/4/')
def test_iter_docs_starts_with_zeroed_offset(self):
mock_fetch_data = self.mocker.mock()
mock_fetch_data(mocker.ANY, 'journals', offset=0, limit=mocker.ANY)
self.mocker.result(self.valid_full_microset)
self.mocker.replay()
conn = self._makeOne('any.username', 'any.apikey')
with doubles.Patch(conn, 'fetch_data', mock_fetch_data, instance_method=True):
for doc in conn.iter_docs('journals'):
self.assertTrue(doc)
def test_iter_docs_offset_moves_forward(self):
from scieloapi.core import ITEMS_PER_REQUEST as ITEMS
first_valid_full_microset = {
'objects': [
{
'title': u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'
},
],
'meta': {'next': 'bla'},
}
mock_fetch_data = self.mocker.mock()
mock_fetch_data(mocker.ANY, 'journals', offset=0, limit=ITEMS)
self.mocker.result(first_valid_full_microset)
mock_fetch_data(mocker.ANY, 'journals', offset=ITEMS, limit=ITEMS)
self.mocker.result(self.valid_full_microset)
self.mocker.replay()
conn = self._makeOne('any.username', 'any.apikey')
with doubles.Patch(conn, 'fetch_data', mock_fetch_data, instance_method=True):
for doc in conn.iter_docs('journals'):
self.assertTrue(doc)
def test_iter_docs_ignores_trashed_items(self):
first_valid_full_microset = {
'objects': [
{
'title': u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)',
'is_trashed': True,
},
],
'meta': {'next': 'bla'},
}
mock_fetch_data = self.mocker.mock()
mock_fetch_data(mocker.ANY, 'journals', offset=mocker.ANY, limit=mocker.ANY)
self.mocker.result(first_valid_full_microset)
mock_fetch_data(mocker.ANY, 'journals', offset=mocker.ANY, limit=mocker.ANY)
self.mocker.result(self.valid_full_microset)
self.mocker.replay()
conn = self._makeOne('any.username', 'any.apikey')
with doubles.Patch(conn, 'fetch_data', mock_fetch_data, instance_method=True):
self.assertEqual(len(list(conn.iter_docs('journals'))), 1)
def test_check_ca_disabled_by_default(self):
conn = self._makeOne('any.username', 'any.apikey')
self.assertFalse(conn.check_ca)
def test_missing_version_defaults_to_newest(self):
from scieloapi import core
newest = sorted(core.API_VERSIONS)[-1]
conn = core.Connector('any.user', 'any.apikey')
self.assertEqual(conn.version, newest)
class EndpointTests(mocker.MockerTestCase):
valid_microset = {
'title': u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'
}
def _makeOne(self, *args, **kwargs):
from scieloapi.core import Endpoint
return Endpoint(*args, **kwargs)
def test_get_uses_fetch_data_method(self):
mock_connector = self.mocker.mock()
mock_connector.fetch_data('journals', resource_id=1)
self.mocker.result(self.valid_microset)
self.mocker.replay()
journal_ep = self._makeOne('journals', mock_connector)
self.assertEqual(journal_ep.get(1), self.valid_microset)
def test_get_invalid_resource_raises_NotFound(self):
mock_connector = self.mocker.mock()
mock_connector.fetch_data('journals', resource_id=1)
self.mocker.throw(exceptions.NotFound())
self.mocker.replay()
journal_ep = self._makeOne('journals', mock_connector)
self.assertRaises(exceptions.NotFound, lambda: journal_ep.get(1))
def test_all_uses_iter_docs_method(self):
mock_connector = self.mocker.mock()
mock_connector.iter_docs('journals')
self.mocker.result((x for x in range(2)))
self.mocker.replay()
journal_ep = self._makeOne('journals', mock_connector)
self.assertEqual(list(journal_ep.all()), [0, 1])
def test_filter_uses_iter_docs_method(self):
mock_connector = self.mocker.mock()
mock_connector.iter_docs('journals', collection='saude-publica')
self.mocker.result((x for x in range(2)))
self.mocker.replay()
journal_ep = self._makeOne('journals', mock_connector)
self.assertEqual(list(journal_ep.filter(collection='saude-publica')), [0, 1])
def test_post_uses_post_data_method(self):
mock_connector = self.mocker.mock()
mock_connector.post_data('journals', {'title': 'Foo'})
self.mocker.result('http://manager.scielo.org/api/v1/journals/4/')
self.mocker.replay()
journal_ep = self._makeOne('journals', mock_connector)
self.assertEqual(journal_ep.post({'title': 'Foo'}), '4')
def test_post_uses_post_data_method(self):
stub_connector = doubles.ConnectorStub()
stub_connector.post_data = lambda *args: '/non/sense/resource/path/'
journal_ep = self._makeOne('journals', stub_connector)
self.assertRaises(exceptions.APIError,
lambda: journal_ep.post({'title': 'Foo'}))
class ClientTests(mocker.MockerTestCase):
def _makeOne(self, *args, **kwargs):
from scieloapi.core import Client
return Client(*args, **kwargs)
def test_connector_instance_created_during_initialization(self):
mock_connector = self.mocker.mock()
mock_connector('any.user', 'any.apikey', api_uri=None,
version=None, check_ca=mocker.ANY)
self.mocker.result(mock_connector)
mock_connector.get_endpoints()
self.mocker.result({'journals': None})
self.mocker.replay()
client = self._makeOne('any.user', 'any.apikey', connector_dep=mock_connector)
def test_endpoints_introspected_during_initialization(self):
mock_connector = self.mocker.mock()
mock_connector('any.user', 'any.apikey', api_uri=None,
version=None, check_ca=mocker.ANY)
self.mocker.result(mock_connector)
mock_connector.get_endpoints()
self.mocker.result({'journals': None})
self.mocker.replay()
client = self._makeOne('any.user', 'any.apikey', connector_dep=mock_connector)
self.assertEqual(client.endpoints, ['journals'])
def test_missing_attributes_are_handled_as_endpoints(self):
mock_endpoints = self.mocker.mock()
'journals' in mock_endpoints
self.mocker.result(True)
mock_endpoints['journals']
self.mocker.result('foo')
self.mocker.replay()
client = self._makeOne('any.user', 'any.apikey', connector_dep=doubles.ConnectorStub)
with doubles.Patch(client, '_endpoints', mock_endpoints):
j = client.journals
self.assertEqual(j, 'foo')
def test_unknown_missing_attribute_raises_AttributeError(self):
mock_endpoints = self.mocker.mock()
'journals' in mock_endpoints
self.mocker.result(False)
self.mocker.replay()
client = self._makeOne('any.user', 'any.apikey', connector_dep=doubles.ConnectorStub)
with doubles.Patch(client, '_endpoints', mock_endpoints):
self.assertRaises(AttributeError, lambda: client.journals)
def test_username_and_username_are_mandatory_during_initialization(self):
self.assertRaises(TypeError, lambda: self._makeOne('any.user'))
def test_api_uri_parameterized_during_initialization(self):
mock_connector = self.mocker.mock()
mock_connector('any.user', 'any.apikey', api_uri='http://foo.org/api/',
version=None, check_ca=mocker.ANY)
self.mocker.result(mock_connector)
mock_connector.get_endpoints()
self.mocker.result({'journals': None})
self.mocker.replay()
client = self._makeOne('any.user', 'any.apikey',
api_uri='http://foo.org/api/', connector_dep=mock_connector)
def test_version_parameterized_during_initialization(self):
mock_connector = self.mocker.mock()
mock_connector('any.user', 'any.apikey', api_uri=None,
version='vFoo', check_ca=mocker.ANY)
self.mocker.result(mock_connector)
mock_connector.get_endpoints()
self.mocker.result({'journals': None})
self.mocker.replay()
client = self._makeOne('any.user', 'any.apikey',
version='vFoo', connector_dep=mock_connector)
def test_version_restricted_to_API_VERSIONS(self):
self.assertRaises(
ValueError,
lambda: self._makeOne('any.user', 'any.apikey', version='vFoo'))
def test_known_version_can_be_used(self):
from scieloapi.core import API_VERSIONS
API_VERSIONS += ('v2',)
mock_connector = self.mocker.mock()
mock_connector('any.user', 'any.apikey', api_uri=None,
version='v2', check_ca=mocker.ANY)
self.mocker.result(mock_connector)
mock_connector.get_endpoints()
self.mocker.result({'journals': None})
self.mocker.replay()
client = self._makeOne('any.user', 'any.apikey',
version='v2', connector_dep=mock_connector)
def test_fetch_relations_for_one_relation(self):
stub_connector = doubles.ConnectorStub
mock_get = self.mocker.mock()
mock_get(mocker.ANY, '/api/v1/journals/70/')
self.mocker.result({'title': 'foo'})
self.mocker.replay()
data = {'journal': '/api/v1/journals/70/'}
client = self._makeOne('any.user', 'any.apikey', connector_dep=stub_connector)
with doubles.Patch(client, 'get', mock_get, instance_method=True):
self.assertEqual(client.fetch_relations(data), {'journal': {'title': 'foo'}})
def test_fetch_relations_for_all_relations(self):
stub_connector = doubles.ConnectorStub
mock_get = self.mocker.mock()
mock_get(mocker.ANY, '/api/v1/journals/70/')
self.mocker.result({'title': 'foo'})
mock_get(mocker.ANY, '/api/v1/issues/71/')
self.mocker.result({'title': 'bar'})
self.mocker.replay()
data = {'journal': '/api/v1/journals/70/',
'issue': '/api/v1/issues/71/'}
client = self._makeOne('any.user', 'any.apikey', connector_dep=stub_connector)
with doubles.Patch(client, 'get', mock_get, instance_method=True):
self.assertEqual(
client.fetch_relations(data),
{'journal': {'title': 'foo'}, 'issue': {'title': 'bar'}})
def test_fetch_relations_for_lists(self):
stub_connector = doubles.ConnectorStub
mock_get = self.mocker.mock()
mock_get(mocker.ANY, '/api/v1/journals/70/')
self.mocker.result({'title': 'foo'})
mock_get(mocker.ANY, '/api/v1/journals/71/')
self.mocker.result({'title': 'bar'})
self.mocker.replay()
data = {'journal': ['/api/v1/journals/70/',
'/api/v1/journals/71/']}
client = self._makeOne('any.user', 'any.apikey', connector_dep=stub_connector)
with doubles.Patch(client, 'get', mock_get, instance_method=True):
self.assertEqual(
client.fetch_relations(data),
{'journal': [{'title': 'foo'}, {'title': 'bar'}]})
def test_fetch_relations_for_specific_relations(self):
stub_connector = doubles.ConnectorStub
mock_get = self.mocker.mock()
mock_get(mocker.ANY, '/api/v1/journals/70/')
self.mocker.result({'title': 'foo'})
self.mocker.replay()
data = {'journal': '/api/v1/journals/70/',
'issue': '/api/v1/issues/71/'}
client = self._makeOne('any.user', 'any.apikey', connector_dep=stub_connector)
with doubles.Patch(client, 'get', mock_get, instance_method=True):
self.assertEqual(
client.fetch_relations(data, only=('journal',)),
{'journal': {'title': 'foo'}, 'issue': '/api/v1/issues/71/'})
def test_fetch_relations_skip_non_conforming_urls_on_lists(self):
stub_connector = doubles.ConnectorStub
mock_get = self.mocker.mock()
mock_get(mocker.ANY, '/api/v1/journals/70/')
self.mocker.result({'title': 'foo'})
mock_get(mocker.ANY, '/api/v2/journals/71/')
self.mocker.throw(ValueError)
self.mocker.replay()
data = {'journal': ['/api/v1/journals/70/',
'/api/v2/journals/71/']}
client = self._makeOne('any.user', 'any.apikey', connector_dep=stub_connector)
with doubles.Patch(client, 'get', mock_get, instance_method=True):
self.assertEqual(
client.fetch_relations(data),
{'journal': [{'title': 'foo'}, '/api/v2/journals/71/']})
def test_fetch_relations_skip_non_conforming_urls_on_scalar(self):
stub_connector = doubles.ConnectorStub
mock_get = self.mocker.mock()
mock_get(mocker.ANY, '/api/v1/journals/70/')
self.mocker.result({'title': 'foo'})
mock_get(mocker.ANY, '/api/v2/journals/71/')
self.mocker.throw(ValueError)
self.mocker.replay()
data = {'journal1': '/api/v1/journals/70/',
'journal2': '/api/v2/journals/71/'}
client = self._makeOne('any.user', 'any.apikey', connector_dep=stub_connector)
with doubles.Patch(client, 'get', mock_get, instance_method=True):
self.assertEqual(
client.fetch_relations(data),
{'journal1': {'title': 'foo'}, 'journal2': '/api/v2/journals/71/'})
def test_fetch_relations_skip_scalars(self):
stub_connector = doubles.ConnectorStub
mock_get = self.mocker.mock()
mock_get(mocker.ANY, '/api/v1/journals/70/')
self.mocker.result({'title': 'foo'})
self.mocker.replay()
data = {'journal1': '/api/v1/journals/70/',
'foo': 5}
client = self._makeOne('any.user', 'any.apikey', connector_dep=stub_connector)
with doubles.Patch(client, 'get', mock_get, instance_method=True):
self.assertEqual(
client.fetch_relations(data),
{'journal1': {'title': 'foo'}, 'foo': 5})
def test_get(self):
stub_connector = doubles.ConnectorStub
stub_connector.version = 'v1'
mock_journals = self.mocker.mock()
mock_journals.get('20')
self.mocker.result({'title': 'bla'})
self.mocker.replay()
def dummy_query(inst, endpoint):
# be sure the endpoint is correct
self.assertEquals(endpoint, 'journals')
return mock_journals
client = self._makeOne('any.user', 'any.apikey', connector_dep=stub_connector)
with doubles.Patch(client, 'query', dummy_query, instance_method=True):
self.assertEqual(
client.get('/api/v1/journals/20/'),
{'title': 'bla'})
def test_get_version_check(self):
stub_connector = doubles.ConnectorStub
stub_connector.version = 'v2'
client = self._makeOne('any.user', 'any.apikey', connector_dep=stub_connector)
self.assertRaises(ValueError, lambda: client.get('/api/v3/journals/20/'))
def test_get_raises_ValueError_for_unknown_endpoints(self):
stub_connector = doubles.ConnectorStub
stub_connector.version = 'v1'
client = self._makeOne('any.user', 'any.apikey', connector_dep=stub_connector)
self.assertRaises(ValueError, lambda: client.get('/api/v1/foo/20/'))
def test_get_raises_ValueError_for_unknown_resource_uri(self):
stub_connector = doubles.ConnectorStub
stub_connector.version = 'v1'
client = self._makeOne('any.user', 'any.apikey', connector_dep=stub_connector)
self.assertRaises(ValueError, lambda: client.get('/api/some/resource/'))
def test_querying_endpoints(self):
mock_endpoints = self.mocker.mock()
'journals' in mock_endpoints
self.mocker.result(True)
mock_endpoints['journals']
self.mocker.result('foo')
self.mocker.replay()
client = self._makeOne('any.user', 'any.apikey', connector_dep=doubles.ConnectorStub)
with doubles.Patch(client, '_endpoints', mock_endpoints):
j = client.query('journals')
self.assertEqual(j, 'foo')
def test_query_raises_ValueError_for_unknown_endpoints(self):
mock_endpoints = self.mocker.mock()
'journals' in mock_endpoints
self.mocker.result(False)
self.mocker.replay()
client = self._makeOne('any.user', 'any.apikey', connector_dep=doubles.ConnectorStub)
with doubles.Patch(client, '_endpoints', mock_endpoints):
self.assertRaises(ValueError, lambda: client.query('journals'))
|
<filename>network/mainDvrTrainingDense.py
from __future__ import print_function
import argparse
import math
from math import log10
import os
import os.path
from collections import defaultdict
import itertools
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
import numpy as np
no_summary = False
try:
from torchsummary import summary
except ModuleNotFoundError:
no_summary = True
print("No summary writer found")
from console_progressbar import ProgressBar
#from data import get_training_set, get_test_set
import dataset
import models
import losses
from utils import ScreenSpaceShading, initialImage
# Training settings
parser = argparse.ArgumentParser(description='Superresolution for Direct Volume Rendering')
parser.add_argument('--dataset', type=str,
help="Path to the HDF5 file with the dataset", required=True)
parser.add_argument('--upscale_factor', type=int, default=4, help="super resolution upscale factor")
parser.add_argument('--numberOfImages', type=int, default=-1, help="Number of images taken from the inpt dataset. Default: -1 = unlimited")
parser.add_argument('--restore', type=int, default=-1, help="Restore training from the specified run index")
parser.add_argument('--restoreEpoch', type=int, default=-1, help="In combination with '--restore', specify the epoch from which to recover. Default: last epoch")
parser.add_argument('--pretrained', type=str, default=None, help="Path to a pretrained generator")
parser.add_argument('--pretrainedDiscr', type=str, default=None, help="Path to a pretrained discriminator")
#Model parameters
parser.add_argument('--model', type=str, required=True, help="""
The superresolution model.
Supported nets: 'SubpixelNet', 'EnhanceNet', 'TecoGAN', 'RCAN'
""")
parser.add_argument('--upsample', type=str, default='bilinear', help='Upsampling for EnhanceNet: nearest, bilinear, bicubic, or pixelShuffle')
parser.add_argument('--reconType', type=str, default='residual', help='Block type for EnhanceNet: residual or direct')
parser.add_argument('--useBN', action='store_true', help='Enable batch normalization in the generator and discriminator')
parser.add_argument('--useSN', action='store_true', help='Enable spectral normalization in the generator and discriminator')
parser.add_argument('--numResidualLayers', type=int, default=10, help='Number of residual layers in the generator')
parser.add_argument('--disableTemporal', action='store_true', help='Disables temporal consistency')
parser.add_argument('--initialImage', type=str, default='zero', help="""
Specifies what should be used as the previous high res frame for the first frame of the sequence,
when no previous image is available from the previous predition.
Available options:
- zero: fill everything with zeros
- unshaded: Special defaults for unshaded mode: mask=-1, normal=[0,0,1], depth=0.5, ao=1
- input: Upscale the current input image and use that as the previous high res image
Remaining channels are filled with defaults
Default: 'input'
""")
#Loss parameters
parser.add_argument('--losses', type=str, required=True, help="""
Comma-separated list of loss functions: mse,perceptual,texture,adv.
Optinally, the weighting factor can be specified with a colon.
Example: "--losses perceptual:0.1,texture:1e2,adv:10"
""")
parser.add_argument('--perceptualLossLayers',
type=str,
# defaults found with VGGAnalysis.py
default='conv_1:0.026423,conv_2:0.009285,conv_3:0.006710,conv_4:0.004898,conv_5:0.003910,conv_6:0.003956,conv_7:0.003813,conv_8:0.002968,conv_9:0.002997,conv_10:0.003631,conv_11:0.004147,conv_12:0.005765,conv_13:0.007442,conv_14:0.009666,conv_15:0.012586,conv_16:0.013377',
help="""
Comma-separated list of layer names for the perceptual loss.
Note that the convolution layers are numbered sequentially: conv_1, conv2_, ... conv_19.
Optionally, the weighting factor can be specified with a colon: "conv_4:1.0", if omitted, 1 is used.
""")
parser.add_argument('--textureLossLayers', type=str, default='conv_1,conv_3,conv_5', help="""
Comma-separated list of layer names for the perceptual loss.
Note that the convolution layers are numbered sequentially: conv_1, conv2_, ... conv_19.
Optinally, the weighting factor can be specified with a colon: "conv_4:1.0", if omitted, 1 is used.
""")
parser.add_argument('--discriminator', type=str, default='enhanceNetLarge', help="""
Network architecture for the discriminator.
Possible values: enhanceNetSmall, enhanceNetLarge, tecoGAN
""")
#parser.add_argument('--advDiscrThreshold', type=float, default=None, help="""
#Adverserial training:
#If the cross entropy loss of the discriminator falls below that threshold, the training for the discriminator is stopped.
#Set this to zero to disable the check and use a fixed number of iterations, see --advDiscrMaxSteps, instead.
#""")
parser.add_argument('--advDiscrMaxSteps', type=int, default=2, help="""
Adverserial training:
Maximal number of iterations for the discriminator training.
Set this to -1 to disable the check.
""")
parser.add_argument('--advDiscrInitialSteps', type=int, default=None, help="""
Adverserial training:
Number of iterations for the disciriminator training in the first epoch.
Used in combination with a pretrained generator to let the discriminator catch up.
""")
parser.add_argument('--advDiscrWeightClip', type=float, default=0.01, help="""
For the Wasserstein GAN, this parameter specifies the value of the hyperparameter 'c',
the range in which the discirminator parameters are clipped.
""")
#parser.add_argument('--advGenThreshold', type=float, default=None, help="""
#Adverserial training:
#If the cross entropy loss of the generator falls below that threshold, the training for the generator is stopped.
#Set this to zero to disable the check and use a fixed number of iterations, see --advGenMaxSteps, instead.
#""")
parser.add_argument('--advGenMaxSteps', type=int, default=2, help="""
Adverserial training:
Maximal number of iterations for the generator training.
Set this to -1 to disable the check.
""")
parser.add_argument('--lossBorderPadding', type=int, default=16, help="""
Because flow + warping can't be accurately estimated at the borders of the image,
the border of the input images to the loss (ground truth, low res input, prediction)
are overwritten with zeros. The size of the border is specified by this parameter.
Pass zero to disable this padding. Default=16 as in the TecoGAN paper.
""")
parser.add_argument('--samples', type=int, required=True, help='Number of samples for the train and test dataset')
parser.add_argument('--testFraction', type=float, default=0.2, help='Fraction of test data')
parser.add_argument('--batchSize', type=int, default=16, help='training batch size')
parser.add_argument('--testBatchSize', type=int, default=16, help='testing batch size')
parser.add_argument('--testNumFullImages', type=int, default=4, help='number of full size images to test for visualization')
parser.add_argument('--nEpochs', type=int, default=2, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.0001, help='Learning Rate. Default=0.0001')
parser.add_argument('--lrGamma', type=float, default=0.5, help='The learning rate decays every lrStep-epochs by this factor')
parser.add_argument('--lrStep', type=int, default=500, help='The learning rate decays every lrStep-epochs (this parameter) by lrGamma factor')
parser.add_argument('--weightDecay', type=float, default=0, help="Weight decay (L2 penalty), if supported by the optimizer. Default=0")
parser.add_argument('--optim', type=str, default="Adam", help="""
Optimizers. Possible values: RMSprop, Rprop, Adam (default).
""")
parser.add_argument('--noTestImages', action='store_true', help="Don't save full size test images")
parser.add_argument('--cuda', action='store_true', help='use cuda?')
parser.add_argument('--seed', type=int, default=124, help='random seed to use. Default=124')
parser.add_argument('--logdir', type=str, default='C:/Users/Mustafa/Desktop/dvrRendering/log', help='directory for tensorboard logs')
parser.add_argument('--modeldir', type=str, default='C:/Users/Mustafa/Desktop/dvrRendering/model', help='Output directory for the checkpoints')
opt = parser.parse_args()
opt_dict = vars(opt)
if opt.cuda and not torch.cuda.is_available():
raise Exception("No GPU found, please run without --cuda")
torch.manual_seed(opt.seed)
np.random.seed(opt.seed)
device = torch.device("cuda" if opt.cuda else "cpu")
torch.set_num_threads(4)
#########################
# RESERVE OUTPUT DIRECTORY
#########################
# Run directory
def findNextRunNumber(folder):
files = os.listdir(folder)
files = sorted([f for f in files if f.startswith('run')])
if len(files)==0:
return 0
return int(files[-1][3:])
nextRunNumber = max(findNextRunNumber(opt.logdir), findNextRunNumber(opt.modeldir)) + 1
if opt.restore == -1:
print('Current run: %05d'%nextRunNumber)
runName = 'run%05d'%nextRunNumber
logdir = os.path.join(opt.logdir, runName)
modeldir = os.path.join(opt.modeldir, runName)
runName = 'run%05d'%nextRunNumber
os.makedirs(logdir)
os.makedirs(modeldir)
#########################
# DATASETS + CHANNELS
#########################
print('===> Loading datasets')
dataset_data = dataset.dvr_dense_load_samples_hdf5(opt_dict['upscale_factor'], opt_dict, opt_dict['dataset'])
print('Dataset input images have %d channels'%dataset_data.input_channels)
input_channels = dataset_data.input_channels
assert input_channels == 4 # RGB, MASK
output_channels = dataset_data.output_channels
assert output_channels == 4 # RGB, MASK
input_channels_with_previous = input_channels + output_channels * (opt.upscale_factor ** 2)
train_set = dataset.DvrDenseDatasetFromSamples(dataset_data, False, opt.testFraction, device)
test_set = dataset.DvrDenseDatasetFromSamples(dataset_data, True, opt.testFraction, device)
test_full_set = dataset.DvrDenseDatasetFromFullImages(dataset_data, min(opt.testNumFullImages, len(dataset_data.images_low)))
training_data_loader = DataLoader(dataset=train_set, batch_size=opt.batchSize, shuffle=True)
testing_data_loader = DataLoader(dataset=test_set, batch_size=opt.testBatchSize, shuffle=False)
testing_full_data_loader = DataLoader(dataset=test_full_set, batch_size=1, shuffle=False)
#############################
# MODEL
#############################
print('===> Building model')
model = models.createNetwork(
opt.model,
opt.upscale_factor,
input_channels_with_previous,
[0,1,2,3],
output_channels,
opt)
model.to(device)
print('Model:')
print(model)
if not no_summary:
summary(model,
input_size=train_set.get_low_res_shape(input_channels_with_previous),
device=device.type)
#############################
# LOSSES
#############################
print('===> Building losses')
criterion = losses.LossNet(
device,
input_channels,
output_channels,
train_set.get_high_res_shape()[1], #high resolution size
opt.lossBorderPadding,
opt)
criterion.to(device)
print('Losses:', criterion)
res = train_set.get_high_res_shape()[1]
if no_summary:
criterion.print_summary(
(output_channels, res, res),
(output_channels, res, res),
(input_channels, res, res),
(output_channels+1, res, res),
opt.batchSize, device)
#############################
# OPTIMIZER
#############################
print('===> Create Optimizer ('+opt.optim+')')
def createOptimizer(name, parameters, lr, weight_decay):
if name=='Adam':
return optim.Adam(parameters, lr=lr, weight_decay=weight_decay)
elif name=='RMSprop':
return optim.RMSprop(parameters, lr=lr, weight_decay=weight_decay)
elif name=='Rprop':
return optim.Rprop(parameters, lr=lr)
elif name=='LBFGS':
return optim.LBFGS(parameters, lr=lr)
else:
raise ValueError("Unknown optimizer "+name)
if not criterion.has_discriminator:
adversarial_training = False
optimizer = createOptimizer(opt.optim, model.parameters(),
lr=opt.lr, weight_decay=opt.weightDecay)
#scheduler = optim.lr_scheduler.ExponentialLR(optimizer, opt.lrDecay)
scheduler = optim.lr_scheduler.StepLR(optimizer, opt.lrStep, opt.lrGamma)
else:
adversarial_training = True
gen_optimizer = createOptimizer(opt.optim, model.parameters(),
lr=opt.lr, weight_decay=opt.weightDecay)
#filter(lambda p: p.requires_grad, criterion.get_discr_parameters())
discr_optimizer = createOptimizer(
opt.optim,
filter(lambda p: p.requires_grad, criterion.get_discr_parameters()),
lr=opt.lr*0.5, weight_decay=opt.weightDecay)
#gen_scheduler = optim.lr_scheduler.ExponentialLR(gen_optimizer, opt.lrDecay)
#discr_scheduler = optim.lr_scheduler.ExponentialLR(discr_optimizer, opt.lrDecay)
gen_scheduler = optim.lr_scheduler.StepLR(gen_optimizer, opt.lrStep, opt.lrGamma)
discr_scheduler = optim.lr_scheduler.StepLR(discr_optimizer, opt.lrStep, opt.lrGamma)
#############################
# PRETRAINED
#############################
if opt.pretrained is not None:
checkpoint = torch.load(opt.pretrained)
model.load_state_dict(checkpoint['model'].state_dict())
#only load the state dict, not the whole model
#this asserts that the model structure is the same
print('Using pretrained model for the generator')
if opt.pretrainedDiscr is not None:
assert criterion.discriminator is not None
checkpoint = torch.load(opt.pretrainedDiscr)
criterion.discriminator.load_state_dict(checkpoint['discriminator'])
print('Using pretrained model for the discriminator')
#############################
# Additional Stuff: Spectral Normalization
# (placed after pretrained, because models without spectral normalization
# can't be imported as models with normalization
#############################
if opt.useSN:
from utils.apply_sn import apply_sn
apply_sn(model)
if criterion.discriminator is not None:
apply_sn(criterion.discriminator)
print("Spectral Normalization applied")
#############################
# OUTPUT DIRECTORIES or RESTORE
#############################
#Check for restoring
startEpoch = 1
if opt.restore != -1:
nextRunNumber = opt.restore
runName = 'run%05d'%nextRunNumber
modeldir = os.path.join(opt.modeldir, runName)
if opt.restoreEpoch == -1:
restoreEpoch = 0
while True:
modelInName = os.path.join(modeldir, "model_epoch_{}.pth".format(restoreEpoch+1))
if not os.path.exists(modelInName):
break;
restoreEpoch += 1
else:
restoreEpoch = opt.restoreEpoch
print("Restore training from run", opt.restore,"and epoch",restoreEpoch)
modelInName = os.path.join(modeldir, "model_epoch_{}.pth".format(restoreEpoch))
checkpoint = torch.load(modelInName)
#model.load_state_dict(checkpoint['state_dict'])
model = checkpoint['model'] #Restore full model
if adversarial_training:
criterion.discriminator.load_state_dict(checkpoint['discriminator'])
discr_optimizer = checkpoint['discr_optimizer']
gen_optimizer = checkpoint['gen_optimizer']
discr_scheduler = checkpoint['discr_scheduler']
gen_scheduler = checkpoint['gen_scheduler']
else:
optimizer = checkpoint['optimizer']
scheduler = checkpoint['scheduler']
startEpoch = restoreEpoch
#paths
print('Current run: %05d'%nextRunNumber)
runName = 'run%05d'%nextRunNumber
logdir = os.path.join(opt.logdir, runName)
modeldir = os.path.join(opt.modeldir, runName)
optStr = str(opt);
print(optStr)
with open(os.path.join(modeldir, 'info.txt'), "w") as text_file:
text_file.write(optStr)
#tensorboard logger
writer = SummaryWriter(logdir)
writer.add_text('info', optStr, 0)
#############################
# MAIN PART
#############################
#@profile
def trainNormal(epoch):
epoch_loss = 0
num_minibatch = len(training_data_loader)
pg = ProgressBar(num_minibatch, 'Training', length=50)
model.train()
for iteration, batch in enumerate(training_data_loader, 0):
pg.print_progress_bar(iteration)
input, target = batch[0].to(device), batch[1].to(device)
B, _, Cout, Hhigh, Whigh = target.shape
_, _, Cin, H, W = input.shape
assert(Cout == output_channels)
assert(Cin == input_channels)
assert(H == dataset_data.crop_size)
assert(W == dataset_data.crop_size)
assert(Hhigh == dataset_data.crop_size * opt.upscale_factor)
assert(Whigh == dataset_data.crop_size * opt.upscale_factor)
optimizer.zero_grad()
previous_output = None
loss = 0
for j in range(dataset_data.num_frames):
# prepare input
if j == 0 or opt.disableTemporal:
previous_warped = initialImage(input[:,0,:,:,:], Cout,
opt.initialImage, False, opt.upscale_factor)
# loss takes the ground truth current image as warped previous image,
# to not introduce a bias and big loss for the first image
previous_warped_loss = target[:,0,:,:,:]
previous_input = F.interpolate(input[:,0,:,:,:], size=(Hhigh, Whigh), mode=opt.upsample)
else:
previous_warped = models.VideoTools.warp_upscale(
previous_output,
flow[:, j-1, :, :, :],
opt.upscale_factor,
special_mask = True)
previous_warped_loss = previous_warped
previous_input = F.interpolate(input[:,j-1,:,:,:], size=(Hhigh, Whigh), mode=opt.upsample)
previous_input = models.VideoTools.warp_upscale(
previous_input,
flow[:, j-1, :, :, :],
opt.upscale_factor,
special_mask = True)
previous_warped_flattened = models.VideoTools.flatten_high(previous_warped, opt.upscale_factor)
single_input = torch.cat((
input[:,j,:,:,:],
previous_warped_flattened),
dim=1)
# run generator
prediction, _ = model(single_input)
# evaluate cost
input_high = F.interpolate(input[:,j,:,:,:], size=(Hhigh, Whigh), mode=opt.upsample)
loss0,_ = criterion(
target[:,j,:,:,:],
prediction,
input_high,
previous_warped_loss)
del _
loss += loss0
epoch_loss += loss0.item()
# save output
previous_output = prediction
loss.backward()
optimizer.step()
pg.print_progress_bar(num_minibatch)
epoch_loss /= num_minibatch * dataset_data.num_frames
print("===> Epoch {} Complete: Avg. Loss: {:.4f}".format(epoch, epoch_loss))
writer.add_scalar('train/total_loss', epoch_loss, epoch)
writer.add_scalar('train/lr', scheduler.get_lr()[0], epoch)
scheduler.step()
def trainAdv_v2(epoch):
"""
Second version of adverserial training,
for each batch, train both discriminator and generator.
Not full epoch for each seperately
"""
print("===> Epoch %d Training"%epoch)
discr_scheduler.step()
writer.add_scalar('train/lr_discr', discr_scheduler.get_lr()[0], epoch)
gen_scheduler.step()
writer.add_scalar('train/lr_gen', gen_scheduler.get_lr()[0], epoch)
disc_steps = opt.advDiscrInitialSteps if opt.advDiscrInitialSteps is not None and epoch==1 else opt.advDiscrMaxSteps
gen_steps = opt.advGenMaxSteps
num_minibatch = len(training_data_loader)
model.train()
criterion.discr_train()
total_discr_loss = 0
total_gen_loss = 0
total_gt_score = 0
total_pred_score = 0
pg = ProgressBar(num_minibatch, 'Train', length=50)
for iteration, batch in enumerate(training_data_loader):
pg.print_progress_bar(iteration)
input, flow, target = batch[0].to(device), batch[1].to(device), batch[2].to(device)
B, _, Cout, Hhigh, Whigh = target.shape
_, _, Cin, H, W = input.shape
# DISCRIMINATOR
for _ in range(disc_steps):
discr_optimizer.zero_grad()
gen_optimizer.zero_grad()
loss = 0
#iterate over all timesteps
for j in range(dataset_data.num_frames):
# prepare input for the generator
if j == 0 or opt.disableTemporal:
previous_warped = initialImage(input[:,0,:,:,:], Cout,
opt.initialImage, False, opt.upscale_factor)
# loss takes the ground truth current image as warped previous image,
# to not introduce a bias and big loss for the first image
previous_warped_loss = target[:,0,:,:,:]
previous_input = F.interpolate(input[:,0,:,:,:], size=(Hhigh, Whigh), mode=opt.upsample)
else:
previous_warped = models.VideoTools.warp_upscale(
previous_output,
flow[:, j-1, :, :, :],
opt.upscale_factor,
special_mask = True)
previous_warped_loss = previous_warped
previous_input = F.interpolate(input[:,j-1,:,:,:], size=(Hhigh, Whigh), mode=opt.upsample)
previous_input = models.VideoTools.warp_upscale(
previous_input,
flow[:, j-1, :, :, :],
opt.upscale_factor,
special_mask = True)
previous_warped_flattened = models.VideoTools.flatten_high(previous_warped, opt.upscale_factor)
single_input = torch.cat((
input[:,j,:,:,:],
previous_warped_flattened),
dim=1)
#evaluate generator
with torch.no_grad():
prediction, _ = model(single_input)
#prepare input for the discriminator
gt_prev_warped = models.VideoTools.warp_upscale(
target[:,j-1,:,:,:],
flow[:, j-1, :, :, :],
opt.upscale_factor,
special_mask = True)
#evaluate discriminator
input_high = F.interpolate(input[:,j,:,:,:], size=(Hhigh, Whigh), mode=opt.upsample)
disc_loss, gt_score, pred_score = criterion.train_discriminator(
input_high,
target[:,j,:,:,:],
previous_input,
gt_prev_warped,
prediction,
previous_warped_loss)
loss += disc_loss
total_gt_score += float(gt_score)
total_pred_score += float(pred_score)
# save output
previous_output = torch.cat([
torch.clamp(prediction[:,0:1,:,:], -1, +1), # mask
ScreenSpaceShading.normalize(prediction[:,1:4,:,:], dim=1),
torch.clamp(prediction[:,4:5,:,:], 0, +1), # depth
torch.clamp(prediction[:,5:6,:,:], 0, +1) # ao
], dim=1)
loss.backward()
discr_optimizer.step()
total_discr_loss += loss.item()
# GENERATOR
for _ in range(disc_steps):
discr_optimizer.zero_grad()
gen_optimizer.zero_grad()
loss = 0
#iterate over all timesteps
for j in range(dataset_data.num_frames):
# prepare input for the generator
if j == 0 or opt.disableTemporal:
previous_warped = initialImage(input[:,0,:,:,:], Cout,
opt.initialImage, False, opt.upscale_factor)
# loss takes the ground truth current image as warped previous image,
# to not introduce a bias and big loss for the first image
previous_warped_loss = target[:,0,:,:,:]
previous_input = F.interpolate(input[:,0,:,:,:], size=(Hhigh, Whigh), mode=opt.upsample)
else:
previous_warped = models.VideoTools.warp_upscale(
previous_output,
flow[:, j-1, :, :, :],
opt.upscale_factor,
special_mask = True)
previous_warped_loss = previous_warped
previous_input = F.interpolate(input[:,j-1,:,:,:], size=(Hhigh, Whigh), mode=opt.upsample)
previous_input = models.VideoTools.warp_upscale(
previous_input,
flow[:, j-1, :, :, :],
opt.upscale_factor,
special_mask = True)
previous_warped_flattened = models.VideoTools.flatten_high(previous_warped, opt.upscale_factor)
single_input = torch.cat((
input[:,j,:,:,:],
previous_warped_flattened),
dim=1)
#evaluate generator
prediction, _ = model(single_input)
#evaluate loss
input_high = F.interpolate(input[:,j,:,:,:], size=(Hhigh, Whigh), mode=opt.upsample)
loss0, map = criterion(
target[:,j,:,:,:],
prediction,
input_high,
previous_input,
previous_warped_loss)
loss += loss0
# save output
previous_output = torch.cat([
torch.clamp(prediction[:,0:1,:,:], -1, +1), # mask
ScreenSpaceShading.normalize(prediction[:,1:4,:,:], dim=1),
torch.clamp(prediction[:,4:5,:,:], 0, +1), # depth
torch.clamp(prediction[:,5:6,:,:], 0, +1) # ao
], dim=1)
loss.backward()
gen_optimizer.step()
total_gen_loss += loss.item()
pg.print_progress_bar(num_minibatch)
total_discr_loss /= num_minibatch * dataset_data.num_frames
total_gen_loss /= num_minibatch * dataset_data.num_frames
total_gt_score /= num_minibatch * dataset_data.num_frames
total_pred_score /= num_minibatch * dataset_data.num_frames
writer.add_scalar('train/discr_loss', total_discr_loss, epoch)
writer.add_scalar('train/gen_loss', total_gen_loss, epoch)
writer.add_scalar('train/gt_score', total_gt_score, epoch)
writer.add_scalar('train/pred_score', total_pred_score, epoch)
print("===> Epoch {} Complete".format(epoch))
#@profile
def test(epoch):
avg_psnr = 0
avg_losses = defaultdict(float)
with torch.no_grad():
num_minibatch = len(testing_data_loader)
pg = ProgressBar(num_minibatch, 'Testing', length=50)
model.eval()
if criterion.has_discriminator:
criterion.discr_eval()
for iteration, batch in enumerate(testing_data_loader, 0):
pg.print_progress_bar(iteration)
input, target = batch[0].to(device), batch[1].to(device)
B, _, Cout, Hhigh, Whigh = target.shape
_, _, Cin, H, W = input.shape
previous_output = None
for j in range(dataset_data.num_frames):
# prepare input
if j == 0 or opt.disableTemporal:
previous_warped = initialImage(input[:,0,:,:,:], Cout,
opt.initialImage, False, opt.upscale_factor)
# loss takes the ground truth current image as warped previous image,
# to not introduce a bias and big loss for the first image
previous_warped_loss = target[:,0,:,:,:]
previous_input = F.interpolate(input[:,0,:,:,:], size=(Hhigh, Whigh), mode=opt.upsample)
else:
previous_warped = models.VideoTools.warp_upscale(
previous_output,
flow[:, j-1, :, :, :],
opt.upscale_factor,
special_mask = True)
previous_warped_loss = previous_warped
previous_input = F.interpolate(input[:,j-1,:,:,:], size=(Hhigh, Whigh), mode=opt.upsample)
previous_input = models.VideoTools.warp_upscale(
previous_input,
flow[:, j-1, :, :, :],
opt.upscale_factor,
special_mask = True)
previous_warped_flattened = models.VideoTools.flatten_high(previous_warped, opt.upscale_factor)
single_input = torch.cat((
input[:,j,:,:,:],
previous_warped_flattened),
dim=1)
# run generator
prediction, _ = model(single_input)
# evaluate cost
input_high = F.interpolate(input[:,j,:,:,:], size=(Hhigh, Whigh), mode=opt.upsample)
loss0, loss_values = criterion(
target[:,j,:,:,:],
prediction,
input_high,
previous_warped_loss)
avg_losses['total_loss'] += loss0.item()
psnr = 10 * log10(1 / max(1e-10, loss_values['mse']))
avg_losses['psnr'] += psnr
for key, value in loss_values.items():
avg_losses[str(key)] += value
# save output for next frame
previous_output = prediction
pg.print_progress_bar(num_minibatch)
for key in avg_losses.keys():
avg_losses[key] /= num_minibatch * dataset_data.num_frames
print("===> Avg. PSNR: {:.4f} dB".format(avg_losses['psnr']))
print(" losses:",avg_losses)
for key, value in avg_losses.items():
writer.add_scalar('test/%s'%key, value, epoch)
def test_images(epoch):
def write_image(img, filename):
out_img = img.cpu().detach().numpy()
out_img *= 255.0
out_img = out_img.clip(0, 255)
out_img = np.uint8(out_img)
writer.add_image(filename, out_img, epoch)
with torch.no_grad():
num_minibatch = len(testing_full_data_loader)
pg = ProgressBar(num_minibatch, 'Test %d Images'%num_minibatch, length=50)
model.eval()
if criterion.has_discriminator:
criterion.discr_eval()
for i,batch in enumerate(testing_full_data_loader):
pg.print_progress_bar(i)
input = batch[0].to(device)
B, _, Cin, H, W = input.shape
Hhigh = H * opt.upscale_factor
Whigh = W * opt.upscale_factor
Cout = output_channels
channel_mask = [0, 1, 2] #RGB
previous_output = None
for j in range(dataset_data.num_frames):
# prepare input
if j == 0 or opt.disableTemporal:
previous_warped = initialImage(input[:,0,:,:,:], Cout,
opt.initialImage, False, opt.upscale_factor)
else:
previous_warped = models.VideoTools.warp_upscale(
previous_output,
flow[:, j-1, :, :, :],
opt.upscale_factor,
special_mask = True)
previous_warped_flattened = models.VideoTools.flatten_high(previous_warped, opt.upscale_factor)
single_input = torch.cat((
input[:,j,:,:,:],
previous_warped_flattened),
dim=1)
# run generator and cost
prediction, residual = model(single_input)
# write prediction image
write_image(prediction[0, channel_mask], 'image%03d/frame%03d_prediction' % (i, j))
# write residual image
if residual is not None:
write_image(residual[0, channel_mask], 'image%03d/frame%03d_residual' % (i, j))
# save output for next frame
previous_output = prediction
pg.print_progress_bar(num_minibatch)
print("Test images sent to Tensorboard for visualization")
def checkpoint(epoch):
model_out_path = os.path.join(modeldir, "model_epoch_{}.pth".format(epoch))
state = {'epoch': epoch + 1, 'model': model, 'parameters':opt_dict}
if not adversarial_training:
state.update({'optimizer':optimizer, 'scheduler':scheduler})
else:
state.update({'discr_optimizer':discr_optimizer,
'gen_optimizer':gen_optimizer,
'discr_scheduler':discr_scheduler,
'gen_scheduler':gen_scheduler,
'criterion': criterion})
torch.save(state, model_out_path)
print("Checkpoint saved to {}".format(model_out_path))
if not os.path.exists(opt.modeldir):
os.mkdir(opt.modeldir)
if not os.path.exists(opt.logdir):
os.mkdir(opt.logdir)
print('===> Start Training')
if not adversarial_training:
#test_images(0)
for epoch in range(startEpoch, opt.nEpochs + 1):
trainNormal(epoch)
test(epoch)
if (epoch < 20 or (epoch%10==0)) and not opt.noTestImages:
test_images(epoch)
checkpoint(epoch)
else:
test(0)
if not opt.noTestImages:
test_images(0)
for epoch in range(startEpoch, opt.nEpochs + 1):
trainAdv_v2(epoch)
test(epoch)
if (epoch < 20 or (epoch%10==0)) and not opt.noTestImages:
test_images(epoch)
if epoch%10==0:
checkpoint(epoch)
#writer.export_scalars_to_json(os.path.join(opt.logdir, "all_scalars.json"))
writer.close() |
import os
import time
import pytest
# from mock import patch
from time import sleep
from threading import Thread
from hkube_python_wrapper.communication.streaming.StreamingManager import StreamingManager
from hkube_python_wrapper import Algorunner
from tests.configs import config
from tests.mocks import mockdata
from hkube_python_wrapper.codeApi.hkube_api import HKubeApi
oneMB = 1024 * 1024
class Algorithm(object):
pass
def startCallbackBytes(args):
return bytearray(b'\xdd' * (1 * oneMB))
def startCallback(args):
return args["input"]["input"][0]
def test_load_algorithm_callbacks():
algorunner = Algorunner()
algorunner.loadAlgorithmCallbacks(startCallback, options=config)
result1 = algorunner._originalAlgorithm['start']({'input': mockdata.initData}, None)
result2 = startCallback({'input': mockdata.initData})
assert result1 == result2
algorunner.close()
def test_load_algorithm_streaming_then_batch():
algorunner = Algorunner()
algorunner.loadAlgorithmCallbacks(startCallback, options=config)
algorunner.streamingManager = StreamingManager()
algorunner._hkubeApi = HKubeApi(None, algorunner, None, None,algorunner.streamingManager)
algorunner._init(mockdata.streamingInitData)
thrd = Thread(target=algorunner._originalAlgorithm['start'], args=[{'input': mockdata.streamingInitData}, algorunner._hkubeApi])
thrd.start()
algorunner._stopAlgorithm(mockdata.initData)
result1 = algorunner._originalAlgorithm['start']({'input': mockdata.initData}, algorunner._hkubeApi)
result2 = startCallback({'input': mockdata.initData})
assert result1 == result2
algorunner.close()
def xtest_exit():
with patch('sys.exit') as exit_mock:
def doExit(a):
status['exit'] = True
def invokeExit():
algorunner._exit(None)
def isServingTrue():
return True
def isServingFalse():
return False
algorunner = Algorunner()
algorunner.loadAlgorithmCallbacks(startCallback)
algorunner.connectToWorker(config)
sleep(1)
status = {'exit': False}
algorunner.loadAlgorithmCallbacks(startCallback, exit=doExit)
algorunner._dataServer.isServing = isServingTrue
Thread(target=invokeExit).start()
sleep(1)
assert status['exit'] == False
algorunner._dataServer.isServing = isServingFalse
sleep(1)
assert status['exit'] == True
assert exit_mock.called
def test_failed_load_algorithm():
alg = Algorithm()
alg.algorithm = {
"path": "no_such_path",
"entryPoint": "main.py"
}
algorunner = Algorunner()
algorunner.loadAlgorithm(alg)
assert "No module named" in algorunner._loadAlgorithmError
assert "no_such_path" in algorunner._loadAlgorithmError
algorunner.close()
def xtest_load_algorithm():
alg = Algorithm()
alg.algorithm = {
"path": "test_alg",
"entryPoint": "main.py"
}
cwd = os.getcwd()
os.chdir(cwd + '/tests')
algorunner = Algorunner()
algorunner.loadAlgorithm(alg)
# os.chdir(cwd)
result1 = algorunner._originalAlgorithm['start']({'input': mockdata.initData}, None)
result2 = startCallback({'input': mockdata.initData})
assert result1 == result2
@pytest.mark.parametrize("test_input,expected", [
('main.py','main'),
('main','main'),
('foo.bar.main.py','foo.bar.main'),
('foo.bar.main','foo.bar.main'),
('foo/bar/main.py','foo.bar.main'),
('foo/bar/main','foo.bar.main'),
])
def test_entryPoint(test_input,expected):
actual = Algorunner._getEntryPoint(test_input)
assert actual == expected
def startCallback2(args):
return args["input"][0]
def test_connect_to_worker():
config.discovery.update({"port": "9021"})
algorunner = Algorunner()
algorunner.loadAlgorithmCallbacks(startCallback2, options=config)
algorunner.connectToWorker(config)
time.sleep(2)
assert algorunner._connected == True
assert algorunner._input == mockdata.initData
algorunner.close()
|
<filename>v0.5.0/intel/intel_minigo_submission_public_tensorflow/code/minigo/tensorflow/minigo/loop_train_eval.py
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper scripts to ensure that main.py commands are called correctly."""
import argh
import argparse
import cloud_logging
import logging
import os
import main
import shipname
import sys
import time
import shutil
import dual_net
import preprocessing
import numpy
import random
import glob
from utils import timer
from tensorflow import gfile
import tensorflow as tf
import logging
import goparams
import predict_games
import qmeas
from mlperf_compliance import mlperf_log
# Pull in environment variables. Run `source ./cluster/common` to set these.
#BUCKET_NAME = os.environ['BUCKET_NAME']
#BASE_DIR = "gs://{}".format(BUCKET_NAME)
BASE_DIR = goparams.BASE_DIR
MODELS_DIR = os.path.join(BASE_DIR, 'models')
SELFPLAY_DIR = os.path.join(BASE_DIR, 'data/selfplay')
SELFPLAY_BACKUP_DIR = os.path.join(BASE_DIR, 'data/selfplay_backup')
BURY_DIR = os.path.join(BASE_DIR, 'bury_models')
BURY_SELFPLAY_DIR = os.path.join(BASE_DIR, 'bury_selfplay')
HOLDOUT_DIR = os.path.join(BASE_DIR, 'data/holdout')
SGF_DIR = os.path.join(BASE_DIR, 'sgf')
TRAINING_CHUNK_DIR = os.path.join(BASE_DIR, 'data', 'training_chunks')
ESTIMATOR_WORKING_DIR = os.path.join(BASE_DIR, 'estimator_working_dir')
# How many games before the selfplay workers will stop trying to play more.
MAX_GAMES_PER_GENERATION = goparams.MAX_GAMES_PER_GENERATION
# What percent of games to holdout from training per generation
HOLDOUT_PCT = goparams.HOLDOUT_PCT
def print_flags():
flags = {
#'BUCKET_NAME': BUCKET_NAME,
'BASE_DIR': BASE_DIR,
'MODELS_DIR': MODELS_DIR,
'SELFPLAY_DIR': SELFPLAY_DIR,
'SELFPLAY_BACKUP_DIR': SELFPLAY_BACKUP_DIR,
'HOLDOUT_DIR': HOLDOUT_DIR,
'SGF_DIR': SGF_DIR,
'TRAINING_CHUNK_DIR': TRAINING_CHUNK_DIR,
'ESTIMATOR_WORKING_DIR': ESTIMATOR_WORKING_DIR,
}
print("Computed variables are:")
print('\n'.join('--{}={}'.format(flag, value)
for flag, value in flags.items()))
def get_models():
"""Finds all models, returning a list of model number and names
sorted increasing.
Returns: [(13, 000013-modelname), (17, 000017-modelname), ...etc]
"""
all_models = gfile.Glob(os.path.join(MODELS_DIR, '*.meta'))
model_filenames = [os.path.basename(m) for m in all_models]
model_numbers_names = sorted([
(shipname.detect_model_num(m), shipname.detect_model_name(m))
for m in model_filenames])
return model_numbers_names
def get_latest_model():
"""Finds the latest model, returning its model number and name
Returns: (17, 000017-modelname)
"""
models = get_models()
if len(models) == 0:
models = [(0, '000000-bootstrap')]
return models[-1]
def get_second_latest_model():
"""Finds the latest model, returning its model number and name
Returns: (17, 000017-modelname)
"""
models = get_models()
if len(models) == 1:
models = [(0, '000000-bootstrap')]
return models[-2]
def get_model(model_num):
models = {k: v for k, v in get_models()}
if not model_num in models:
raise ValueError("Model {} not found!".format(model_num))
return models[model_num]
def evaluate(prev_model, cur_model, readouts=200, verbose=1, resign_threshold=0.95):
''' returns True if cur model should be used in future games '''
prev_model_save_path = os.path.join(MODELS_DIR, prev_model)
cur_model_save_path = os.path.join(MODELS_DIR, cur_model)
game_output_dir = os.path.join(SELFPLAY_DIR, cur_model)
game_holdout_dir = os.path.join(HOLDOUT_DIR, cur_model)
sgf_dir = os.path.join(SGF_DIR, cur_model)
cur_win_pct = main.evaluate_evenly_many(prev_model_save_path, cur_model_save_path, game_output_dir, readouts=readouts, games=goparams.EVAL_GAMES_PER_SIDE, verbose=0)
print('Evalute Win Pct = ', cur_win_pct)
qmeas.record('evaluate_win_pct', cur_win_pct)
keep = False
if cur_win_pct >= goparams.EVAL_WIN_PCT_FOR_NEW_MODEL:
qmeas.record('evaluate_choice', 'new')
keep = True
else:
qmeas.record('evaluate_choice', 'old')
keep = False
qmeas.record('eval_summary', {'win_pct': cur_win_pct, 'model': cur_model, 'keep': keep})
return keep
def gather():
print("Gathering game output...")
main.gather(input_directory=SELFPLAY_DIR,
output_directory=TRAINING_CHUNK_DIR)
def train():
model_num, model_name = get_latest_model()
print("Training on gathered game data, initializing from {}".format(model_name))
new_model_name = shipname.generate(model_num + 1)
print("New model will be {}".format(new_model_name))
load_file = os.path.join(MODELS_DIR, model_name)
save_file = os.path.join(MODELS_DIR, new_model_name)
#try:
main.train(ESTIMATOR_WORKING_DIR, TRAINING_CHUNK_DIR, save_file,
generation_num=model_num + 1)
#except:
# print("Got an error training, muddling on...")
# logging.exception("Train error")
return new_model_name
def bury_latest_model():
main._ensure_dir_exists(BURY_DIR)
main._ensure_dir_exists(BURY_SELFPLAY_DIR)
model_num, model_name = get_latest_model()
save_file = os.path.join(MODELS_DIR, model_name)
cmd = 'mv {}* {}/'.format(save_file, BURY_DIR)
# delete any selfplay games from that model too
print('Bury CMD: ', cmd)
if os.system(cmd) != 0:
raise Exception('Failed to bury model: ' + cmd)
cmd = 'mv {}* {}/'.format(os.path.join(SELFPLAY_DIR, model_name), BURY_SELFPLAY_DIR)
# delete any selfplay games from that model too
print('Bury Games CMD: ', cmd)
if os.system(cmd) != 0:
raise Exception('Failed to bury model: ' + cmd)
prev_num, prev_model_name = get_latest_model()
prev_save_file = os.path.join(MODELS_DIR, prev_model_name)
suffixes = ['.data-00000-of-00001', '.index', '.meta', '.transformed.pb']
new_name = '{:06d}-continue'.format(model_num)
new_save_file = os.path.join(MODELS_DIR, new_name)
for suffix in suffixes:
cmd = 'cp {} {}'.format(prev_save_file + suffix, new_save_file + suffix)
print('DBUG ', cmd)
if os.system(cmd) != 0:
raise Exception('Failed to copy: ' + cmd)
# move selfplay games from selfplay_backup dir
cmd = 'mkdir -p {}'.format(os.path.join(SELFPLAY_DIR, new_name))
if os.system(cmd) != 0:
print('Failed to mkdir: ' + cmd)
cmd = 'mv {}/* {}/'.format(SELFPLAY_BACKUP_DIR, os.path.join(SELFPLAY_DIR, new_name))
print('Recover backup games CMD:', cmd)
if os.system(cmd) != 0:
print('Failed to move: ' + cmd)
def validate(model_num=None, validate_name=None):
""" Runs validate on the directories up to the most recent model, or up to
(but not including) the model specified by `model_num`
"""
if model_num is None:
model_num, model_name = get_latest_model()
else:
model_num = int(model_num)
model_name = get_model(model_num)
# Model N was trained on games up through model N-2, so the validation set
# should only be for models through N-2 as well, thus the (model_num - 1)
# term.
models = list(
filter(lambda num_name: num_name[0] < (model_num - 1), get_models()))
# Run on the most recent 50 generations,
# TODO(brianklee): make this hyperparameter dependency explicit/not hardcoded
holdout_dirs = [os.path.join(HOLDOUT_DIR, pair[1])
for pair in models[-50:]]
main.validate(ESTIMATOR_WORKING_DIR, *holdout_dirs,
checkpoint_name=os.path.join(MODELS_DIR, model_name),
validate_name=validate_name)
def echo():
pass # Flags are echo'd in the ifmain block below.
def rl_loop_train():
"""Run the reinforcement learning loop
This tries to create a realistic way to run the reinforcement learning with
all default parameters.
"""
qmeas.stop_time('selfplay_wait')
print("Gathering game output...")
gather()
print("Training on gathered game data...")
_, model_name = get_latest_model()
new_model = train()
def rl_loop_eval():
"""Run the reinforcement learning loop
This tries to create a realistic way to run the reinforcement learning with
all default parameters.
"""
(_, new_model) = get_latest_model()
qmeas.start_time('puzzle')
new_model_path = os.path.join(MODELS_DIR, new_model)
sgf_files = [
'./benchmark_sgf/9x9_pro_YKSH.sgf',
'./benchmark_sgf/9x9_pro_IYMD.sgf',
'./benchmark_sgf/9x9_pro_YSIY.sgf',
'./benchmark_sgf/9x9_pro_IYHN.sgf',
]
result, total_pct = predict_games.report_for_puzzles_parallel(new_model_path, sgf_files, 2, tries_per_move=1)
#result, total_pct = predict_games.report_for_puzzles(new_model_path, sgf_files, 2, tries_per_move=1)
print('accuracy = ', total_pct)
print('result = ', result)
mlperf_log.minigo_print(key=mlperf_log.EVAL_ACCURACY,
value={"epoch": iteration, "value": total_pct})
mlperf_log.minigo_print(key=mlperf_log.EVAL_TARGET,
value=goparams.TERMINATION_ACCURACY)
qmeas.record('puzzle_total', total_pct)
qmeas.record('puzzle_result', repr(result))
qmeas.record('puzzle_summary', {'results': repr(result), 'total_pct': total_pct, 'model': new_model})
qmeas._flush()
with open(os.path.join(BASE_DIR, new_model + '-puzzles.txt'), 'w') as f:
f.write(repr(result))
f.write('\n' + str(total_pct) + '\n')
qmeas.stop_time('puzzle')
if total_pct >= goparams.TERMINATION_ACCURACY:
print('Reaching termination accuracy; ', goparams.TERMINATION_ACCURACY)
mlperf_log.minigo_print(key=mlperf_log.RUN_STOP,
value={"success": True})
with open('TERMINATE_FLAG', 'w') as f:
f.write(repr(result))
f.write('\n' + str(total_pct) + '\n')
qmeas.end()
def rl_loop_pk():
"""Run the reinforcement learning loop
This tries to create a realistic way to run the reinforcement learning with
all default parameters.
"""
_, new_model = get_latest_model()
model_num, model_name = get_second_latest_model()
if not evaluate(model_name, new_model, verbose=0):
print('Flag bury new model')
with open('PK_FLAG', 'w') as f:
f.write("pk\n")
qmeas.end()
def rl_loop_bury():
bury_latest_model()
qmeas.end()
def rl_loop():
"""Run the reinforcement learning loop
This tries to create a realistic way to run the reinforcement learning with
all default parameters.
"""
if goparams.DUMMY_MODEL:
# monkeypatch the hyperparams so that we get a quickly executing network.
dual_net.get_default_hyperparams = lambda **kwargs: {
'k': 8, 'fc_width': 16, 'num_shared_layers': 1, 'l2_strength': 1e-4, 'momentum': 0.9}
dual_net.TRAIN_BATCH_SIZE = 16
dual_net.EXAMPLES_PER_GENERATION = 64
#monkeypatch the shuffle buffer size so we don't spin forever shuffling up positions.
preprocessing.SHUFFLE_BUFFER_SIZE = 1000
qmeas.stop_time('selfplay_wait')
print("Gathering game output...")
gather()
print("Training on gathered game data...")
_, model_name = get_latest_model()
new_model = train()
if goparams.EVALUATE_PUZZLES:
qmeas.start_time('puzzle')
new_model_path = os.path.join(MODELS_DIR, new_model)
sgf_files = [
'./benchmark_sgf/9x9_pro_YKSH.sgf',
'./benchmark_sgf/9x9_pro_IYMD.sgf',
'./benchmark_sgf/9x9_pro_YSIY.sgf',
'./benchmark_sgf/9x9_pro_IYHN.sgf',
]
result, total_pct = predict_games.report_for_puzzles(new_model_path, sgf_files, 2, tries_per_move=1)
print('accuracy = ', total_pct)
mlperf_log.minigo_print(key=mlperf_log.EVAL_ACCURACY,
value={"epoch": iteration, "value": total_pct})
mlperf_log.minigo_print(key=mlperf_log.EVAL_TARGET,
value=goparams.TERMINATION_ACCURACY)
qmeas.record('puzzle_total', total_pct)
qmeas.record('puzzle_result', repr(result))
qmeas.record('puzzle_summary', {'results': repr(result), 'total_pct': total_pct, 'model': new_model})
qmeas._flush()
with open(os.path.join(BASE_DIR, new_model + '-puzzles.txt'), 'w') as f:
f.write(repr(result))
f.write('\n' + str(total_pct) + '\n')
qmeas.stop_time('puzzle')
if total_pct >= goparams.TERMINATION_ACCURACY:
print('Reaching termination accuracy; ', goparams.TERMINATION_ACCURACY)
mlperf_log.minigo_print(key=mlperf_log.RUN_STOP,
value={"success": True})
with open('TERMINATE_FLAG', 'w') as f:
f.write(repr(result))
f.write('\n' + str(total_pct) + '\n')
if goparams.EVALUATE_MODELS:
if not evaluate(model_name, new_model):
bury_latest_model()
if __name__ == '__main__':
#tf.logging.set_verbosity(tf.logging.INFO)
seed = int(sys.argv[1])
iteration = int(sys.argv[2])
print('Setting random seed, iteration = ', seed, iteration)
seed = hash(seed) + iteration
print("training seed: ", seed)
random.seed(seed)
tf.set_random_seed(seed)
numpy.random.seed(seed)
qmeas.start(os.path.join(BASE_DIR, 'stats'))
# get TF logger
log = logging.getLogger('tensorflow')
log.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# create file handler which logs even debug messages
fh = logging.FileHandler('tensorflow.log')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
log.addHandler(fh)
if sys.argv[3] == 'train':
rl_loop_train()
if sys.argv[3] == 'eval':
mlperf_log.minigo_print(key=mlperf_log.EVAL_START, value=iteration)
rl_loop_eval()
mlperf_log.minigo_print(key=mlperf_log.EVAL_STOP, value=iteration)
if sys.argv[3] == 'pk':
rl_loop_pk()
if sys.argv[3] == 'bury':
rl_loop_bury()
|
# Author: <NAME>
# Student-ID: 801-14-3804
# H.W # 1 : Consumer and Producer problem
# We create the Mobile.py which will have one thread which will do the following:
# 1)Concurrently generate random numbers that simulates the time the mobile job will take in the compute server.
# 2) Send a message for each "job" to the compute server with a mobile ID.
# Sleep a short (1 to 5 seconds) random period of time between sends.
# References for work:
"""
1)https://docs.python.org/2/library/threading.html
2)http://thomas-cokelaer.info/tutorials/python/module_threading.html
3)https://pymotw.com/2/socket/udp.html
4)https://www.networkcomputing.com/applications/python-network-programming-handling-socket-errors/1384050408
5)https://www.8bitavenue.com/2016/11/python-mutex-example/
6)https://self-learning-java-tutorial.blogspot.com/2015/12/python-bounded-semaphore.html
"""
# We import the modules we will be using:
import threading
import random
import sys
import socket
import time
from random import randint
from threading import Thread
# we define our worker function:
# This function does the following:
# 1) It will generate how long each work created will "run" for.
# 2) It will store the given id and the time this id will run for in the "info" variable as a string
# 3) Afterwards it takes the given ip and port in order to create the serverAddress + it creates the socket
# 4) We try to send the information we stored in the "info" variable through serverAddres(ip and port) variable.
# 5) Afterwards we wait for a reply from the server from the same port
# 6) If it occurs we then close the socket.
def worker():
"""thread worker function"""
b = sys.argv
time_var = random.randint(1,20) # make the time the job will "run" from 1 sec to 20
print( 'worker id: '+str(b[1])+ " work time: " + str(time_var) + '\n')
info= str(b[1]) +':' +str(time_var)
host = b[2]#'127.0.1.1'
port = b[3]
port = int(port)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
serverAddress=(host,port)
#Info will be the message sent
try:
print("sending message info")
sent= sock.sendto(info,serverAddress)
except socket.error, e:
print("Error sending data: %s" %e)
sys.exit(1)
#receive response
try:
print('waiting to receive')
data,serverREceived= sock.recvfrom(port)
except socket.error, e:
print("Error sending data %s" %e)
sys.exit(1)
print('received')
if data:
print('close socket')
sock.close
sleep_time=random.randint(1,5)
print("it slept for :" + str(sleep_time)+ " seconds")
time.sleep(sleep_time)
return
# We create how many threads will be run
# The number inside range() can be altered to generate more mobile jobs using the same ID, for now it is sent at one.
#
threads = []
for i in range(5):
t = threading.Thread(target=worker)
threads.append(t)
#Start the threads activity.It must be called at most once per thread object.
t.start()
# We make the variable arguments which will be able to access the command line given variables we want to be able to access
# This was done just to be able to visualize the given information.
# Just done to represent info:
arguments = sys.argv
given_id = arguments[1]
given_ip = arguments[2]
given_port = arguments[3]
given_port = int(given_port)
print ("the given id was : " + str(arguments[1]))
print ("the given ip was : " + str(arguments[2]))
print ("the given port was : " + str(arguments[3]))
|
<gh_stars>0
#!/usr/bin/python3
# coding: utf-8
import argparse
import re
import sys
import urllib.request
from html_table_parser import HTMLTableParser
# Regex for parsing options on MySQL documentation pages
# Options are (normally) specified as command-line options
# as anchor tags on the page. Certain documentation pages only
# show options in table listings, however.
OPTION_REGEX = '<a name="option_%s_(.*?)"></a>'
OPTION_TABLE_REGEX = '^(--)?([A-Za-z_-]+).*$'
# File heading, as per the original supported_params file
FILE_HEADER = """# vim
{#- Do not edit this YAML file by hand. See README.rst for how to update -#}
{% load_yaml as supported_params %}
"""
FILE_FOOTER = """{% endload %}"""
# Standard YAML template for options for a section
YAML_TEMPLATE = """# From %(url)s
%(section)s:
- %(options)s
"""
# For rendering Jinja that handles multiple sections
# Several MySQL utilities use exactly the same options
# Note this variable is string formatted twice, hence the double-double % signs
YAML_TEMPLATE_MULTI = """# From %%(url)s
{%%%% for section in %(sections)r %%%%}
{{ section }}:
- %%(options)s
{%%%% endfor %%%%}
"""
# Options specified in HTML documentation as command-line options
# like so <a name="option_mysql_help"></a>.
# Structure is (section_id, documentation_url, yaml_template_str)
SECTIONS = (
('mysql',
'https://dev.mysql.com/doc/refman/5.7/en/mysql-command-options.html',
YAML_TEMPLATE_MULTI % {'sections': ['client', 'mysql']}),
('mysqldump',
'https://dev.mysql.com/doc/refman/5.7/en/mysqldump.html',
YAML_TEMPLATE),
('mysqld_safe',
'https://dev.mysql.com/doc/refman/5.7/en/mysqld-safe.html',
YAML_TEMPLATE),
# Removed in MySQL 5.7
('mysqlhotcopy',
'http://dev.mysql.com/doc/refman/5.6/en/mysqlhotcopy.html',
YAML_TEMPLATE),
('mysqladmin',
'http://dev.mysql.com/doc/refman/5.7/en/mysqladmin.html',
YAML_TEMPLATE),
('mysqlcheck',
'http://dev.mysql.com/doc/refman/5.7/en/mysqlcheck.html',
YAML_TEMPLATE),
('mysqlimport',
'http://dev.mysql.com/doc/refman/5.7/en/mysqlimport.html',
YAML_TEMPLATE),
('mysqlshow',
'http://dev.mysql.com/doc/refman/5.7/en/mysqlshow.html',
YAML_TEMPLATE),
('myisampack',
'http://dev.mysql.com/doc/refman/5.7/en/myisampack.html',
YAML_TEMPLATE),
)
# Options specified in documentation as command-line and
# option file values in a table only.
SECTIONS_VIA_TABLE = (
('myisamchk',
'https://dev.mysql.com/doc/refman/5.7/en/myisamchk.html',
YAML_TEMPLATE_MULTI % {'sections': ['myisamchk', 'isamchk']}),
)
# Server options specified in documentation
SERVER_OPTIONS = (
'mysqld',
'https://dev.mysql.com/doc/refman/5.7/en/mysqld-option-tables.html',
YAML_TEMPLATE
)
def read_url(url):
""" Read the given URL and decode the response as UTF-8.
"""
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
return response.read().decode('utf-8')
def read_first_table(url):
""" Read the given URL, parse the result, and return the first table.
"""
xhtml = read_url(url)
parser = HTMLTableParser()
parser.feed(xhtml)
return parser.tables[0] # Use first table on the page
def parse_anchors(url, section):
""" Return parsed options from option anchors at the given URL.
"""
return re.findall(OPTION_REGEX % section, read_url(url))
def parse_tables(url, section):
""" Return arsed options from HTML tables at the given URL.
This matches the given option regex, and ensures that the
first row of the table is ignored; it contains headings only.
"""
table = read_first_table(url)
return [re.match(OPTION_TABLE_REGEX, row[0]).groups()[1]
for row in table[1:]]
def parse_mysqld(url, section):
""" Return the parsed options from the huge mysqld table.
The massive options table shows variables and options and
highlights where they can be used. The following code only
pulls out those that are marked as 'Yes' for use in an option file.
"""
table = read_first_table(url)
# Find which column holds the option file data
option_index = table[0].index('Option File')
# Only pull out options able to be used in an options file
return [re.match(OPTION_TABLE_REGEX, row[0]).groups()[1]
for row in table[1:]
if len(row) >= option_index + 1 and
row[option_index].strip().lower() == 'yes']
def print_yaml_options(sections, parser, file=sys.stdout):
""" Perform really basic templating for output.
A YAML library could be used, but we avoid extra dependencies by
just using string formatting.
"""
for section, url, yaml in sections:
options = parser(url, section)
print(yaml % {'section': section,
'options': '\n - '.join(options),
'url': url}, end='', file=file)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Scrape the MySQL documentation to obtain'
' all the supported parameters for different utilities.')
parser.add_argument('--output',
'-o',
help='File output location',
default=sys.stdout)
config = parser.parse_args()
output = open(config.output, 'w') if isinstance(config.output, str) \
else config.output
print(FILE_HEADER, end='', file=output)
print_yaml_options(SECTIONS, parse_anchors, file=output)
print_yaml_options(SECTIONS_VIA_TABLE, parse_tables, file=output)
print_yaml_options((SERVER_OPTIONS,), parse_mysqld, file=output)
print(FILE_FOOTER, end='', file=output)
|
<gh_stars>1-10
# Copyright 2013 VMware, Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy.orm import exc
from neutron.openstack.common import log as logging
from neutron.plugins.vmware.common import exceptions as nsx_exc
from neutron.plugins.vmware.dbexts import vcns_models
from neutron.plugins.vmware.vshield.common import (
exceptions as vcns_exc)
LOG = logging.getLogger(__name__)
def add_vcns_router_binding(session, router_id, vse_id, lswitch_id, status):
with session.begin(subtransactions=True):
binding = vcns_models.VcnsRouterBinding(
router_id=router_id,
edge_id=vse_id,
lswitch_id=lswitch_id,
status=status)
session.add(binding)
return binding
def get_vcns_router_binding(session, router_id):
with session.begin(subtransactions=True):
return (session.query(vcns_models.VcnsRouterBinding).
filter_by(router_id=router_id).first())
def update_vcns_router_binding(session, router_id, **kwargs):
with session.begin(subtransactions=True):
binding = (session.query(vcns_models.VcnsRouterBinding).
filter_by(router_id=router_id).one())
for key, value in kwargs.iteritems():
binding[key] = value
def delete_vcns_router_binding(session, router_id):
with session.begin(subtransactions=True):
binding = (session.query(vcns_models.VcnsRouterBinding).
filter_by(router_id=router_id).one())
session.delete(binding)
#
# Edge Firewall binding methods
#
def add_vcns_edge_firewallrule_binding(session, map_info):
with session.begin(subtransactions=True):
binding = vcns_models.VcnsEdgeFirewallRuleBinding(
rule_id=map_info['rule_id'],
rule_vseid=map_info['rule_vseid'],
edge_id=map_info['edge_id'])
session.add(binding)
return binding
def delete_vcns_edge_firewallrule_binding(session, id):
with session.begin(subtransactions=True):
if not (session.query(vcns_models.VcnsEdgeFirewallRuleBinding).
filter_by(rule_id=id).delete()):
msg = _("Rule Resource binding with id:%s not found!") % id
raise nsx_exc.NsxPluginException(err_msg=msg)
def get_vcns_edge_firewallrule_binding(session, id, edge_id):
with session.begin(subtransactions=True):
return (session.query(vcns_models.VcnsEdgeFirewallRuleBinding).
filter_by(rule_id=id, edge_id=edge_id).first())
def get_vcns_edge_firewallrule_binding_by_vseid(
session, edge_id, rule_vseid):
with session.begin(subtransactions=True):
try:
return (session.query(vcns_models.VcnsEdgeFirewallRuleBinding).
filter_by(edge_id=edge_id, rule_vseid=rule_vseid).one())
except exc.NoResultFound:
msg = _("Rule Resource binding not found!")
raise nsx_exc.NsxPluginException(err_msg=msg)
def cleanup_vcns_edge_firewallrule_binding(session, edge_id):
with session.begin(subtransactions=True):
session.query(
vcns_models.VcnsEdgeFirewallRuleBinding).filter_by(
edge_id=edge_id).delete()
def add_vcns_edge_vip_binding(session, map_info):
with session.begin(subtransactions=True):
binding = vcns_models.VcnsEdgeVipBinding(
vip_id=map_info['vip_id'],
edge_id=map_info['edge_id'],
vip_vseid=map_info['vip_vseid'],
app_profileid=map_info['app_profileid'])
session.add(binding)
return binding
def get_vcns_edge_vip_binding(session, id):
with session.begin(subtransactions=True):
try:
qry = session.query(vcns_models.VcnsEdgeVipBinding)
return qry.filter_by(vip_id=id).one()
except exc.NoResultFound:
msg = _("VIP Resource binding with id:%s not found!") % id
LOG.exception(msg)
raise vcns_exc.VcnsNotFound(
resource='router_service_binding', msg=msg)
def delete_vcns_edge_vip_binding(session, id):
with session.begin(subtransactions=True):
qry = session.query(vcns_models.VcnsEdgeVipBinding)
if not qry.filter_by(vip_id=id).delete():
msg = _("VIP Resource binding with id:%s not found!") % id
LOG.exception(msg)
raise nsx_exc.NsxPluginException(err_msg=msg)
def add_vcns_edge_pool_binding(session, map_info):
with session.begin(subtransactions=True):
binding = vcns_models.VcnsEdgePoolBinding(
pool_id=map_info['pool_id'],
edge_id=map_info['edge_id'],
pool_vseid=map_info['pool_vseid'])
session.add(binding)
return binding
def get_vcns_edge_pool_binding(session, id, edge_id):
with session.begin(subtransactions=True):
return (session.query(vcns_models.VcnsEdgePoolBinding).
filter_by(pool_id=id, edge_id=edge_id).first())
def get_vcns_edge_pool_binding_by_vseid(session, edge_id, pool_vseid):
with session.begin(subtransactions=True):
try:
qry = session.query(vcns_models.VcnsEdgePoolBinding)
binding = qry.filter_by(edge_id=edge_id,
pool_vseid=pool_vseid).one()
except exc.NoResultFound:
msg = (_("Pool Resource binding with edge_id:%(edge_id)s "
"pool_vseid:%(pool_vseid)s not found!") %
{'edge_id': edge_id, 'pool_vseid': pool_vseid})
LOG.exception(msg)
raise nsx_exc.NsxPluginException(err_msg=msg)
return binding
def delete_vcns_edge_pool_binding(session, id, edge_id):
with session.begin(subtransactions=True):
qry = session.query(vcns_models.VcnsEdgePoolBinding)
if not qry.filter_by(pool_id=id, edge_id=edge_id).delete():
msg = _("Pool Resource binding with id:%s not found!") % id
LOG.exception(msg)
raise nsx_exc.NsxPluginException(err_msg=msg)
def add_vcns_edge_monitor_binding(session, map_info):
with session.begin(subtransactions=True):
binding = vcns_models.VcnsEdgeMonitorBinding(
monitor_id=map_info['monitor_id'],
edge_id=map_info['edge_id'],
monitor_vseid=map_info['monitor_vseid'])
session.add(binding)
return binding
def get_vcns_edge_monitor_binding(session, id, edge_id):
with session.begin(subtransactions=True):
return (session.query(vcns_models.VcnsEdgeMonitorBinding).
filter_by(monitor_id=id, edge_id=edge_id).first())
def delete_vcns_edge_monitor_binding(session, id, edge_id):
with session.begin(subtransactions=True):
qry = session.query(vcns_models.VcnsEdgeMonitorBinding)
if not qry.filter_by(monitor_id=id, edge_id=edge_id).delete():
msg = _("Monitor Resource binding with id:%s not found!") % id
LOG.exception(msg)
raise nsx_exc.NsxPluginException(err_msg=msg)
|
<filename>src/saltext/vmware/utils/esxi.py
import hashlib
import logging
import socket
import ssl
import salt.exceptions
import saltext.vmware.utils.cluster as utils_cluster
import saltext.vmware.utils.common as utils_common
import saltext.vmware.utils.datacenter as utils_datacenter
# pylint: disable=no-name-in-module
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
log = logging.getLogger(__name__)
def get_hosts(
service_instance,
datacenter_name=None,
host_names=None,
cluster_name=None,
get_all_hosts=False,
):
"""
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
"""
properties = ["name"]
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
"Must specify the datacenter when specifying the cluster"
)
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = utils_common.get_root_folder(service_instance)
else:
start_point = utils_datacenter.get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append("parent")
# Search for the objects
hosts = utils_common.get_mors_with_properties(
service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties,
)
log.trace("Retrieved hosts: %s", [h["name"] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h["parent"], vim.ClusterComputeResource):
continue
parent_name = utils_common.get_managed_object_name(h["parent"])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h["object"])
continue
if h["name"] in host_names:
filtered_hosts.append(h["object"])
return filtered_hosts
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB, host_cache_manager=None):
"""
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
"""
hostname = utils_common.get_managed_object_name(host_ref)
if not host_cache_manager:
props = utils_common.get_properties_of_managed_object(
host_ref, ["configManager.cacheConfigurationManager"]
)
if not props.get("configManager.cacheConfigurationManager"):
raise salt.exceptions.VMwareObjectRetrievalError(
"Host '{}' has no host cache".format(hostname)
)
host_cache_manager = props["configManager.cacheConfigurationManager"]
log.trace(
"Configuring the host cache on host '%s', datastore '%s', " "swap size=%s MiB",
hostname,
datastore_ref.name,
swap_size_MiB,
)
spec = vim.HostCacheConfigurationSpec(datastore=datastore_ref, swapSize=swap_size_MiB)
log.trace("host_cache_spec=%s", spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
"Not enough permissions. Required privilege: " "{}".format(exc.privilegeId)
)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
utils_common.wait_for_task(task, hostname, "HostCacheConfigurationTask")
log.trace("Configured host cache on host '%s'", hostname)
return True
def list_hosts(service_instance):
"""
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
"""
return utils_common.list_objects(service_instance, vim.HostSystem)
def disconnect_host(host, service_instance):
"""
Disconnects host from vCenter instance
Returns connection state of host
host
Name of ESXi instance in vCenter.
service_instance
The Service Instance Object from which to obtain host.
"""
host = utils_common.get_mor_by_property(service_instance, vim.HostSystem, host)
if host.summary.runtime.connectionState == "disconnected":
return host.summary.runtime.connectionState
task = host.DisconnectHost_Task()
host = utils_common.wait_for_task(task, host, "disconnect host task")
return host.summary.runtime.connectionState
def reconnect_host(host, service_instance):
"""
Reconnects host from vCenter instance
Returns connection state of host
host
Name of ESXi instance in vCenter.
service_instance
The Service Instance Object from which to obtain host.
"""
host = utils_common.get_mor_by_property(service_instance, vim.HostSystem, host)
if host.summary.runtime.connectionState == "connected":
return host.summary.runtime.connectionState
task = host.ReconnectHost_Task()
ret_host = utils_common.wait_for_task(task, host, "reconnect host task")
return ret_host.summary.runtime.connectionState
def move_host(host, cluster_name, service_instance):
"""
Move host to a different cluster.
Returns connection state of host
host
Name of ESXi instance in vCenter.
cluster_name
Name of cluster to move host to.
service_instance
The Service Instance Object from which to obtain host.
"""
host_ref = utils_common.get_mor_by_property(service_instance, vim.HostSystem, host)
cluster_ref = utils_common.get_mor_by_property(
service_instance, vim.ClusterComputeResource, cluster_name
)
host_dc = utils_common.get_parent_of_type(host_ref, vim.Datacenter)
host_cluster = utils_common.get_parent_of_type(host_ref, vim.ClusterComputeResource)
cluster_dc = utils_common.get_parent_of_type(cluster_ref, vim.Datacenter)
if host_dc != cluster_dc:
raise salt.exceptions.VMwareApiError("Cluster has to be in the same datacenter")
task = cluster_ref.MoveInto_Task([host_ref])
utils_common.wait_for_task(task, cluster_name, "move host task")
return f"moved {host} from {host_cluster.name} to {cluster_ref.name}"
def remove_host(host, service_instance):
"""
Removes host from vCenter instance.
Returns connection state of host
host
Name of ESXi instance in vCenter.
service_instance
The Service Instance Object from which to obtain host.
"""
host_ref = utils_common.get_mor_by_property(service_instance, vim.HostSystem, host)
task = host_ref.Destroy_Task()
utils_common.wait_for_task(task, host, "destroy host task")
return f"removed host {host}"
def _format_ssl_thumbprint(number):
"""
Formats ssl cert number
number
Number to be formatted into ssl thumbprint
"""
string = str(number)
return ":".join(a + b for a, b in zip(string[::2], string[1::2]))
def _get_host_thumbprint(ip, verify_host_cert=True):
"""
Returns host's ssl thumbprint.
ip
IP address of host.
"""
ctx = ssl.SSLContext()
if verify_host_cert:
ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH)
with socket.create_connection((ip, 443)) as _socket:
_socket.settimeout(1)
with ctx.wrap_socket(_socket, server_hostname=ip) as wrappedSocket:
cert = wrappedSocket.getpeercert(True)
sha1 = hashlib.sha1(cert).hexdigest()
response = _format_ssl_thumbprint(sha1)
return response
def add_host(
host,
root_user,
password,
cluster_name,
datacenter_name,
verify_host_cert,
connect,
service_instance,
):
"""
Adds host from vCenter instance
Returns connection state of host
host
IP address or hostname of ESXI instance.
root_user
Username with root privilege to ESXi instance.
password
<PASSWORD> <PASSWORD>.
cluster_name
Name of cluster ESXi host is being added to.
datacenter
Datacenter that contains cluster that ESXi instance is being added to.
verify_host_cert
Validates the host's SSL certificate is signed by a CA, and that the hostname in the certificate matches the host.
connect
Specifies whether host should be connected after being added.
service_instance
The Service Instance Object to place host on.
"""
dc_ref = utils_common.get_datacenter(service_instance, datacenter_name)
cluster_ref = utils_cluster.get_cluster(dc_ref, cluster_name)
connect_spec = vim.host.ConnectSpec()
connect_spec.sslThumbprint = _get_host_thumbprint(host, verify_host_cert)
connect_spec.hostName = host
connect_spec.userName = root_user
connect_spec.password = password
task = cluster_ref.AddHost_Task(connect_spec, connect)
host_ref = utils_common.wait_for_task(task, host, "add host task")
return host_ref.summary.runtime.connectionState
def get_host(host, service_instance):
return utils_common.get_mor_by_property(service_instance, vim.HostSystem, host)
|
<gh_stars>1-10
#!/usr/bin/env python2
# coding: utf-8
"""Test Taint."""
import unittest
from triton import ARCH, Instruction, MemoryAccess, TritonContext
class TestTaint(unittest.TestCase):
"""Testing the taint engine."""
def test_known_issues(self):
"""Check tainting result after processing."""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86)
Triton.taintRegister(Triton.registers.eax)
inst = Instruction()
# lea eax,[esi+eax*1]
inst.setOpcode("\x8D\x04\x06")
Triton.processing(inst)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.eax))
self.assertFalse(Triton.isRegisterTainted(Triton.registers.ebx))
def test_taint_memory(self):
"""Check tainting memory."""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
self.assertFalse(Triton.isMemoryTainted(0x1000))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
Triton.taintMemory(0x1000)
Triton.taintMemory(MemoryAccess(0x2000, 4))
self.assertTrue(Triton.isMemoryTainted(0x1000))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 1)))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 2)))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2001, 1)))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2002, 1)))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2003, 1)))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2002, 2)))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2003, 2)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x1fff, 1)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2004, 1)))
self.assertFalse(Triton.isMemoryTainted(0x1001))
self.assertFalse(Triton.isMemoryTainted(0x0fff))
Triton.untaintMemory(0x1000)
Triton.untaintMemory(MemoryAccess(0x2000, 4))
self.assertFalse(Triton.isMemoryTainted(0x1000))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2000, 1)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2000, 2)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2001, 1)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2002, 1)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2003, 1)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2002, 2)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2003, 2)))
def test_taint_register(self):
"""Check over tainting register."""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintRegister(Triton.registers.rax)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
Triton.untaintRegister(Triton.registers.rax)
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintRegister(Triton.registers.ah)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
self.assertTrue(Triton.isRegisterTainted(Triton.registers.eax))
self.assertTrue(Triton.isRegisterTainted(Triton.registers.ax))
Triton.untaintRegister(Triton.registers.ah)
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
self.assertFalse(Triton.isRegisterTainted(Triton.registers.eax))
self.assertFalse(Triton.isRegisterTainted(Triton.registers.ax))
def test_taint_assignement_memory_immediate(self):
"""Check tainting assignment memory <- immediate."""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
Triton.taintMemory(0x1000)
self.assertTrue(Triton.isMemoryTainted(0x1000))
Triton.taintAssignmentMemoryImmediate(MemoryAccess(0x1000, 1))
self.assertFalse(Triton.isMemoryTainted(0x1000))
Triton.taintMemory(0x1000)
self.assertTrue(Triton.isMemoryTainted(0x1000))
Triton.taintAssignmentMemoryImmediate(MemoryAccess(0x0fff, 2))
self.assertFalse(Triton.isMemoryTainted(0x1000))
Triton.taintMemory(0x1000)
self.assertTrue(Triton.isMemoryTainted(0x1000))
Triton.taintAssignmentMemoryImmediate(MemoryAccess(0x0ffe, 2))
self.assertTrue(Triton.isMemoryTainted(0x1000))
Triton.taintMemory(MemoryAccess(0x1000, 4))
self.assertTrue(Triton.isMemoryTainted(0x1000))
self.assertTrue(Triton.isMemoryTainted(0x1001))
self.assertTrue(Triton.isMemoryTainted(0x1002))
self.assertTrue(Triton.isMemoryTainted(0x1003))
self.assertFalse(Triton.isMemoryTainted(0x1004))
Triton.taintAssignmentMemoryImmediate(MemoryAccess(0x1001, 1))
self.assertTrue(Triton.isMemoryTainted(0x1000))
self.assertFalse(Triton.isMemoryTainted(0x1001))
self.assertTrue(Triton.isMemoryTainted(0x1002))
self.assertTrue(Triton.isMemoryTainted(0x1003))
Triton.taintAssignmentMemoryImmediate(MemoryAccess(0x1000, 4))
self.assertFalse(Triton.isMemoryTainted(0x1000))
self.assertFalse(Triton.isMemoryTainted(0x1001))
self.assertFalse(Triton.isMemoryTainted(0x1002))
self.assertFalse(Triton.isMemoryTainted(0x1003))
def test_taint_assignement_memory_memory(self):
"""Check tainting assignment memory <- memory."""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
Triton.taintMemory(MemoryAccess(0x2000, 1))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 1)))
Triton.taintAssignmentMemoryMemory(MemoryAccess(0x1000, 1), MemoryAccess(0x2000, 1))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x1000, 1)))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 1)))
Triton.taintAssignmentMemoryMemory(MemoryAccess(0x1000, 1), MemoryAccess(0x3000, 1))
Triton.taintAssignmentMemoryMemory(MemoryAccess(0x2000, 1), MemoryAccess(0x3000, 1))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x1000, 1)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2000, 1)))
Triton.taintMemory(MemoryAccess(0x2000, 4))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
Triton.taintAssignmentMemoryMemory(MemoryAccess(0x2001, 2), MemoryAccess(0x3000, 1))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 1)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2001, 1)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2001, 1)))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 1)))
def test_taint_assignement_memory_register(self):
"""Check tainting assignment memory <- register."""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
Triton.taintMemory(MemoryAccess(0x2000, 8))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 8)))
Triton.taintAssignmentMemoryRegister(MemoryAccess(0x2002, 2), Triton.registers.ax)
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 1)))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2001, 1)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2002, 1)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2003, 1)))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2004, 1)))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2005, 1)))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2006, 1)))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2007, 1)))
Triton.taintMemory(MemoryAccess(0x2000, 8))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 8)))
Triton.taintAssignmentMemoryRegister(MemoryAccess(0x1fff, 8), Triton.registers.rax)
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x1fff, 1)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2000, 1)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2001, 1)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2002, 1)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2003, 1)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2004, 1)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2005, 1)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2006, 1)))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2007, 1)))
def test_taint_assignement_register_immediate(self):
"""Check tainting assignment register <- immediate."""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintRegister(Triton.registers.rax)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintAssignmentRegisterImmediate(Triton.registers.rax)
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
def test_taint_assignement_register_memory(self):
"""Check tainting assignment register <- memory."""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintRegister(Triton.registers.rax)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintAssignmentRegisterMemory(Triton.registers.rax, MemoryAccess(0x2000, 8))
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintMemory(MemoryAccess(0x2000, 8))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 8)))
Triton.taintAssignmentRegisterMemory(Triton.registers.rax, MemoryAccess(0x2000, 8))
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintAssignmentRegisterMemory(Triton.registers.rax, MemoryAccess(0x3000, 8))
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
def test_taint_assignement_register_register(self):
"""Check tainting assignment register <- register."""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintRegister(Triton.registers.rax)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintAssignmentRegisterRegister(Triton.registers.rax, Triton.registers.rax)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
Triton.untaintRegister(Triton.registers.rax)
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintAssignmentRegisterRegister(Triton.registers.rax, Triton.registers.rax)
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rbx))
Triton.taintRegister(Triton.registers.rbx)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rbx))
Triton.taintAssignmentRegisterRegister(Triton.registers.rax, Triton.registers.rbx)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
def test_taint_union_memory_immediate(self):
"""Check tainting union memory U immediate."""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
Triton.taintMemory(MemoryAccess(0x2000, 4))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
Triton.taintUnionMemoryImmediate(MemoryAccess(0x2000, 4))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
Triton.untaintMemory(MemoryAccess(0x2000, 4))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
def test_taint_union_memory_memory(self):
"""Check tainting union memory U memory."""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
Triton.taintMemory(MemoryAccess(0x2000, 4))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
Triton.taintUnionMemoryMemory(MemoryAccess(0x2000, 4), MemoryAccess(0x3000, 4))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x3000, 4)))
Triton.untaintMemory(MemoryAccess(0x2000, 4))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
Triton.taintUnionMemoryMemory(MemoryAccess(0x2000, 4), MemoryAccess(0x3000, 4))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x3000, 4)))
Triton.taintMemory(MemoryAccess(0x3000, 4))
Triton.taintUnionMemoryMemory(MemoryAccess(0x2000, 4), MemoryAccess(0x3000, 4))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x3000, 4)))
def test_taint_union_memory_register(self):
"""Check tainting union memory U register."""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
Triton.taintMemory(MemoryAccess(0x2000, 4))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
Triton.taintUnionMemoryRegister(MemoryAccess(0x2000, 4), Triton.registers.rax)
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
Triton.untaintMemory(MemoryAccess(0x2000, 4))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintUnionMemoryRegister(MemoryAccess(0x2000, 4), Triton.registers.rax)
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintRegister(Triton.registers.rax)
Triton.taintUnionMemoryRegister(MemoryAccess(0x2000, 4), Triton.registers.rax)
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
def test_taint_union_register_immediate(self):
"""Check tainting union register U immediate."""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintRegister(Triton.registers.rax)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintUnionRegisterImmediate(Triton.registers.rax)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
Triton.untaintRegister(Triton.registers.rax)
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintUnionRegisterImmediate(Triton.registers.rax)
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
def test_taint_union_register_memory(self):
"""Check tainting union register U memory."""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintRegister(Triton.registers.rax)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintUnionRegisterMemory(Triton.registers.rax, MemoryAccess(0x2000, 4))
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
Triton.untaintRegister(Triton.registers.rax)
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintUnionRegisterMemory(Triton.registers.rax, MemoryAccess(0x2000, 4))
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
# !T U T
Triton.untaintRegister(Triton.registers.rax)
Triton.taintMemory(MemoryAccess(0x2000, 4))
Triton.taintUnionRegisterMemory(Triton.registers.rax, MemoryAccess(0x2000, 4))
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
# T U T
Triton.taintRegister(Triton.registers.rax)
Triton.taintMemory(MemoryAccess(0x2000, 4))
Triton.taintUnionRegisterMemory(Triton.registers.rax, MemoryAccess(0x2000, 4))
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
def test_taint_union_register_register(self):
"""Check tainting union register U register."""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintRegister(Triton.registers.rax)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintUnionRegisterRegister(Triton.registers.rax, Triton.registers.rbx)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rbx))
Triton.taintRegister(Triton.registers.rbx)
Triton.taintUnionRegisterRegister(Triton.registers.rax, Triton.registers.rbx)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rbx))
Triton.untaintRegister(Triton.registers.rax)
Triton.taintRegister(Triton.registers.rbx)
Triton.taintUnionRegisterRegister(Triton.registers.rax, Triton.registers.rbx)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rbx))
Triton.untaintRegister(Triton.registers.rax)
Triton.untaintRegister(Triton.registers.rbx)
Triton.taintUnionRegisterRegister(Triton.registers.rax, Triton.registers.rbx)
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rbx))
def test_taint_get_tainted_registers(self):
"""Get tainted registers"""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
r = Triton.getTaintedRegisters()
self.assertTrue(len(r) == 0)
Triton.taintRegister(Triton.registers.eax)
Triton.taintRegister(Triton.registers.ax)
Triton.taintRegister(Triton.registers.rbx)
Triton.taintRegister(Triton.registers.cl)
Triton.taintRegister(Triton.registers.di)
r = Triton.getTaintedRegisters()
self.assertTrue(Triton.registers.rax in r)
self.assertTrue(Triton.registers.rbx in r)
self.assertTrue(Triton.registers.rcx in r)
self.assertTrue(Triton.registers.rdi in r)
def test_taint_get_tainted_memory(self):
"""Get tainted memory"""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
m = Triton.getTaintedMemory()
self.assertTrue(len(m) == 0)
Triton.taintMemory(0x1000)
Triton.taintMemory(0x2000)
Triton.taintMemory(0x3000)
Triton.taintMemory(MemoryAccess(0x4000, 4))
m = Triton.getTaintedMemory()
self.assertTrue(0x1000 in m)
self.assertTrue(0x2000 in m)
self.assertTrue(0x3000 in m)
self.assertTrue(0x4000 in m)
self.assertTrue(0x4001 in m)
self.assertTrue(0x4002 in m)
self.assertTrue(0x4003 in m)
self.assertFalse(0x5000 in m)
def test_taint_set_register(self):
"""Set taint register"""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
Triton.setTaintRegister(Triton.registers.rax, True)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
Triton.setTaintRegister(Triton.registers.rax, False)
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
def test_taint_set_memory(self):
"""Set taint memory"""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
self.assertFalse(Triton.isMemoryTainted(0x1000))
Triton.setTaintMemory(MemoryAccess(0x1000, 1), True)
self.assertTrue(Triton.isMemoryTainted(0x1000))
Triton.setTaintMemory(MemoryAccess(0x1000, 1), False)
self.assertFalse(Triton.isMemoryTainted(0x1000))
def test_taint_off_on(self):
"""Taint off / on"""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
self.assertTrue(Triton.isTaintEngineEnabled())
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
Triton.setTaintRegister(Triton.registers.rax, True)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
Triton.enableTaintEngine(False)
self.assertFalse(Triton.isTaintEngineEnabled())
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
Triton.setTaintRegister(Triton.registers.rax, False)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
|
<gh_stars>1-10
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
from scipy.signal import butter, lfilter
#from control import matlab
def decimate(data,fs_befor,fs_after):
from scipy.signal import decimate
if fs_after<=8:
data_ = decimate(data,int(fs_befor/8),ftype='iir')
data_ = decimate(data_,int(8/fs_after),ftype='iir')
else:
data_ = decimate(data,int(fs_befor/fs_after),ftype='iir')
return data_
def butter_bandpass_filter(data, lowcut, highcut, fs, order=4):
nyq = 0.5 * fs
low = lowcut/nyq
high = highcut/nyq
b, a = butter(order, [low, high], btype='band')
y = lfilter(b, a, data)
return y
def bandpass(data, lowcut, highcut, fs, order=4):
nyq = 0.5 * fs
if highcut==None:
b, a = butter(order, lowcut/nyq, btype='low')
elif lowcut==None:
b, a = butter(order, highcut/nyq, btype='high')
else:
b, a = butter(order, [lowcut/nyq, highcut/nyq], btype='band')
y = lfilter(b, a, data)
return y,b,a
def filt_iirpeak(dic,fs,f0,Q,plot=False):
w0 = f0/(fs/2)
num, den = signal.iirpeak(w0, Q)
data = { key:signal.lfilter(num,den,dic[key]) for key in dic.keys()}
if plot == True:
w, h = signal.freqz(num, den,worN=10000)
freq = w*fs/(2*np.pi)
fig, ax = plt.subplots(2, 1, figsize=(8, 6))
ax[0].semilogx(freq, 20*np.log10(abs(h)), color='blue')
ax[0].set_title("Frequency Response")
ax[0].set_ylabel("Amplitude (dB)", color='blue')
#ax[0].set_xlim([0, 100])
#ax[0].set_ylim([-50, 10])
ax[0].grid()
ax[1].semilogx(freq, np.unwrap(np.angle(h))*180/np.pi, color='green')
ax[1].set_ylabel("Angle (degrees)", color='green')
ax[1].set_xlabel("Frequency (Hz)")
#ax[1].set_xlim([0, 100])
#ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
#ax[1].set_ylim([-90, 90])
ax[1].grid()
plt.savefig('hoge.png')
plt.close()
return data
def filt_butterBandPass(dic,fs,lowcut,highcut,order,plot=False):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
num, den = butter(order, [low, high], btype='band')
data = { key:signal.lfilter(num,den,dic[key]) for key in dic.keys()}
if plot == True:
w, h = signal.freqz(num, den,worN=1000)
freq = w*fs/(2*np.pi)
fig, ax = plt.subplots(2, 1, figsize=(8, 6))
ax[0].semilogx(freq, 20*np.log10(abs(h)), color='blue')
ax[0].set_title("Frequency Response")
ax[0].set_ylabel("Amplitude (dB)", color='blue')
#ax[0].set_xlim([0, 100])
#ax[0].set_ylim([-50, 10])
ax[0].grid()
ax[1].semilogx(freq, np.unwrap(np.angle(h))*180/np.pi, color='green')
ax[1].set_ylabel("Angle (degrees)", color='green')
ax[1].set_xlabel("Frequency (Hz)")
#ax[1].set_xlim([0, 100])
#ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
#ax[1].set_ylim([-90, 90])
ax[1].grid()
plt.savefig('hoge.png')
plt.close()
return data
|
<gh_stars>0
"""Switches for AVM Fritz!Box functions."""
from __future__ import annotations
import logging
from typing import Any
import xmltodict
from homeassistant.components.network import async_get_source_ip
from homeassistant.components.switch import SwitchEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import DeviceInfo, Entity, EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.util import slugify
from .common import (
AvmWrapper,
FritzBoxBaseEntity,
FritzData,
FritzDevice,
FritzDeviceBase,
SwitchInfo,
device_filter_out_from_trackers,
)
from .const import (
DATA_FRITZ,
DOMAIN,
SWITCH_TYPE_DEFLECTION,
SWITCH_TYPE_PORTFORWARD,
SWITCH_TYPE_WIFINETWORK,
WIFI_STANDARD,
MeshRoles,
)
_LOGGER = logging.getLogger(__name__)
def deflection_entities_list(
avm_wrapper: AvmWrapper, device_friendly_name: str
) -> list[FritzBoxDeflectionSwitch]:
"""Get list of deflection entities."""
_LOGGER.debug("Setting up %s switches", SWITCH_TYPE_DEFLECTION)
deflections_response = avm_wrapper.get_ontel_num_deflections()
if not deflections_response:
_LOGGER.debug("The FRITZ!Box has no %s options", SWITCH_TYPE_DEFLECTION)
return []
_LOGGER.debug(
"Specific %s response: GetNumberOfDeflections=%s",
SWITCH_TYPE_DEFLECTION,
deflections_response,
)
if deflections_response["NewNumberOfDeflections"] == 0:
_LOGGER.debug("The FRITZ!Box has no %s options", SWITCH_TYPE_DEFLECTION)
return []
deflection_list = avm_wrapper.get_ontel_deflections()
if not deflection_list:
return []
items = xmltodict.parse(deflection_list["NewDeflectionList"])["List"]["Item"]
if not isinstance(items, list):
items = [items]
return [
FritzBoxDeflectionSwitch(avm_wrapper, device_friendly_name, dict_of_deflection)
for dict_of_deflection in items
]
def port_entities_list(
avm_wrapper: AvmWrapper, device_friendly_name: str, local_ip: str
) -> list[FritzBoxPortSwitch]:
"""Get list of port forwarding entities."""
_LOGGER.debug("Setting up %s switches", SWITCH_TYPE_PORTFORWARD)
entities_list: list[FritzBoxPortSwitch] = []
if not avm_wrapper.device_conn_type:
_LOGGER.debug("The FRITZ!Box has no %s options", SWITCH_TYPE_PORTFORWARD)
return []
# Query port forwardings and setup a switch for each forward for the current device
resp = avm_wrapper.get_num_port_mapping(avm_wrapper.device_conn_type)
if not resp:
_LOGGER.debug("The FRITZ!Box has no %s options", SWITCH_TYPE_DEFLECTION)
return []
port_forwards_count: int = resp["NewPortMappingNumberOfEntries"]
_LOGGER.debug(
"Specific %s response: GetPortMappingNumberOfEntries=%s",
SWITCH_TYPE_PORTFORWARD,
port_forwards_count,
)
_LOGGER.debug("IP source for %s is %s", avm_wrapper.host, local_ip)
for i in range(port_forwards_count):
portmap = avm_wrapper.get_port_mapping(avm_wrapper.device_conn_type, i)
if not portmap:
_LOGGER.debug("The FRITZ!Box has no %s options", SWITCH_TYPE_DEFLECTION)
continue
_LOGGER.debug(
"Specific %s response: GetGenericPortMappingEntry=%s",
SWITCH_TYPE_PORTFORWARD,
portmap,
)
# We can only handle port forwards of the given device
if portmap["NewInternalClient"] == local_ip:
port_name = portmap["NewPortMappingDescription"]
for entity in entities_list:
if entity.port_mapping and (
port_name in entity.port_mapping["NewPortMappingDescription"]
):
port_name = f"{port_name} {portmap['NewExternalPort']}"
entities_list.append(
FritzBoxPortSwitch(
avm_wrapper,
device_friendly_name,
portmap,
port_name,
i,
avm_wrapper.device_conn_type,
)
)
return entities_list
def wifi_entities_list(
avm_wrapper: AvmWrapper, device_friendly_name: str
) -> list[FritzBoxWifiSwitch]:
"""Get list of wifi entities."""
_LOGGER.debug("Setting up %s switches", SWITCH_TYPE_WIFINETWORK)
#
# https://avm.de/fileadmin/user_upload/Global/Service/Schnittstellen/wlanconfigSCPD.pdf
#
wifi_count = len(
[
s
for s in avm_wrapper.connection.services
if s.startswith("WLANConfiguration")
]
)
_LOGGER.debug("WiFi networks count: %s", wifi_count)
networks: dict = {}
for i in range(1, wifi_count + 1):
network_info = avm_wrapper.connection.call_action(
f"WLANConfiguration{i}", "GetInfo"
)
# Devices with 4 WLAN services, use the 2nd for internal communications
if not (wifi_count == 4 and i == 2):
networks[i] = {
"ssid": network_info["NewSSID"],
"bssid": network_info["NewBSSID"],
"standard": network_info["NewStandard"],
"enabled": network_info["NewEnable"],
"status": network_info["NewStatus"],
}
for i, network in networks.copy().items():
networks[i]["switch_name"] = network["ssid"]
if len([j for j, n in networks.items() if n["ssid"] == network["ssid"]]) > 1:
networks[i]["switch_name"] += f" ({WIFI_STANDARD[i]})"
_LOGGER.debug("WiFi networks list: %s", networks)
return [
FritzBoxWifiSwitch(
avm_wrapper, device_friendly_name, index, data["switch_name"]
)
for index, data in networks.items()
]
def profile_entities_list(
avm_wrapper: AvmWrapper,
data_fritz: FritzData,
) -> list[FritzBoxProfileSwitch]:
"""Add new tracker entities from the AVM device."""
new_profiles: list[FritzBoxProfileSwitch] = []
if "X_AVM-DE_HostFilter1" not in avm_wrapper.connection.services:
return new_profiles
if avm_wrapper.unique_id not in data_fritz.profile_switches:
data_fritz.profile_switches[avm_wrapper.unique_id] = set()
for mac, device in avm_wrapper.devices.items():
if device_filter_out_from_trackers(
mac, device, data_fritz.profile_switches.values()
):
continue
new_profiles.append(FritzBoxProfileSwitch(avm_wrapper, device))
data_fritz.profile_switches[avm_wrapper.unique_id].add(mac)
return new_profiles
def all_entities_list(
avm_wrapper: AvmWrapper,
device_friendly_name: str,
data_fritz: FritzData,
local_ip: str,
) -> list[Entity]:
"""Get a list of all entities."""
if avm_wrapper.mesh_role == MeshRoles.SLAVE:
return []
return [
*deflection_entities_list(avm_wrapper, device_friendly_name),
*port_entities_list(avm_wrapper, device_friendly_name, local_ip),
*wifi_entities_list(avm_wrapper, device_friendly_name),
*profile_entities_list(avm_wrapper, data_fritz),
]
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up entry."""
_LOGGER.debug("Setting up switches")
avm_wrapper: AvmWrapper = hass.data[DOMAIN][entry.entry_id]
data_fritz: FritzData = hass.data[DATA_FRITZ]
_LOGGER.debug("Fritzbox services: %s", avm_wrapper.connection.services)
local_ip = await async_get_source_ip(avm_wrapper.hass, target_ip=avm_wrapper.host)
entities_list = await hass.async_add_executor_job(
all_entities_list,
avm_wrapper,
entry.title,
data_fritz,
local_ip,
)
async_add_entities(entities_list)
@callback
def update_avm_device() -> None:
"""Update the values of the AVM device."""
async_add_entities(profile_entities_list(avm_wrapper, data_fritz))
entry.async_on_unload(
async_dispatcher_connect(hass, avm_wrapper.signal_device_new, update_avm_device)
)
class FritzBoxBaseSwitch(FritzBoxBaseEntity):
"""Fritz switch base class."""
_attr_is_on: bool | None = False
def __init__(
self,
avm_wrapper: AvmWrapper,
device_friendly_name: str,
switch_info: SwitchInfo,
) -> None:
"""Init Fritzbox port switch."""
super().__init__(avm_wrapper, device_friendly_name)
self._description = switch_info["description"]
self._friendly_name = switch_info["friendly_name"]
self._icon = switch_info["icon"]
self._type = switch_info["type"]
self._update = switch_info["callback_update"]
self._switch = switch_info["callback_switch"]
self._name = f"{self._friendly_name} {self._description}"
self._unique_id = f"{self._avm_wrapper.unique_id}-{slugify(self._description)}"
self._attributes: dict[str, str] = {}
self._is_available = True
@property
def name(self) -> str:
"""Return name."""
return self._name
@property
def icon(self) -> str:
"""Return name."""
return self._icon
@property
def unique_id(self) -> str:
"""Return unique id."""
return self._unique_id
@property
def available(self) -> bool:
"""Return availability."""
return self._is_available
@property
def extra_state_attributes(self) -> dict[str, str]:
"""Return device attributes."""
return self._attributes
async def async_update(self) -> None:
"""Update data."""
_LOGGER.debug("Updating '%s' (%s) switch state", self.name, self._type)
await self._update()
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn on switch."""
await self._async_handle_turn_on_off(turn_on=True)
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn off switch."""
await self._async_handle_turn_on_off(turn_on=False)
async def _async_handle_turn_on_off(self, turn_on: bool) -> None:
"""Handle switch state change request."""
await self._switch(turn_on)
self._attr_is_on = turn_on
class FritzBoxPortSwitch(FritzBoxBaseSwitch, SwitchEntity):
"""Defines a FRITZ!Box Tools PortForward switch."""
def __init__(
self,
avm_wrapper: AvmWrapper,
device_friendly_name: str,
port_mapping: dict[str, Any] | None,
port_name: str,
idx: int,
connection_type: str,
) -> None:
"""Init Fritzbox port switch."""
self._avm_wrapper = avm_wrapper
self._attributes = {}
self.connection_type = connection_type
self.port_mapping = port_mapping # dict in the format as it comes from fritzconnection. eg: {'NewRemoteHost': '0.0.0.0', 'NewExternalPort': 22, 'NewProtocol': 'TCP', 'NewInternalPort': 22, 'NewInternalClient': '192.168.178.31', 'NewEnabled': True, 'NewPortMappingDescription': 'Beast SSH ', 'NewLeaseDuration': 0}
self._idx = idx # needed for update routine
self._attr_entity_category = EntityCategory.CONFIG
if port_mapping is None:
return
switch_info = SwitchInfo(
description=f"Port forward {port_name}",
friendly_name=device_friendly_name,
icon="mdi:check-network",
type=SWITCH_TYPE_PORTFORWARD,
callback_update=self._async_fetch_update,
callback_switch=self._async_switch_on_off_executor,
)
super().__init__(avm_wrapper, device_friendly_name, switch_info)
async def _async_fetch_update(self) -> None:
"""Fetch updates."""
self.port_mapping = await self._avm_wrapper.async_get_port_mapping(
self.connection_type, self._idx
)
_LOGGER.debug(
"Specific %s response: %s", SWITCH_TYPE_PORTFORWARD, self.port_mapping
)
if not self.port_mapping:
self._is_available = False
return
self._attr_is_on = self.port_mapping["NewEnabled"] is True
self._is_available = True
attributes_dict = {
"NewInternalClient": "internal_ip",
"NewInternalPort": "internal_port",
"NewExternalPort": "external_port",
"NewProtocol": "protocol",
"NewPortMappingDescription": "description",
}
for key, attr in attributes_dict.items():
self._attributes[attr] = self.port_mapping[key]
async def _async_switch_on_off_executor(self, turn_on: bool) -> bool:
if self.port_mapping is None:
return False
self.port_mapping["NewEnabled"] = "1" if turn_on else "0"
resp = await self._avm_wrapper.async_add_port_mapping(
self.connection_type, self.port_mapping
)
return bool(resp is not None)
class FritzBoxDeflectionSwitch(FritzBoxBaseSwitch, SwitchEntity):
"""Defines a FRITZ!Box Tools PortForward switch."""
def __init__(
self,
avm_wrapper: AvmWrapper,
device_friendly_name: str,
dict_of_deflection: Any,
) -> None:
"""Init Fritxbox Deflection class."""
self._avm_wrapper = avm_wrapper
self.dict_of_deflection = dict_of_deflection
self._attributes = {}
self.id = int(self.dict_of_deflection["DeflectionId"])
self._attr_entity_category = EntityCategory.CONFIG
switch_info = SwitchInfo(
description=f"Call deflection {self.id}",
friendly_name=device_friendly_name,
icon="mdi:phone-forward",
type=SWITCH_TYPE_DEFLECTION,
callback_update=self._async_fetch_update,
callback_switch=self._async_switch_on_off_executor,
)
super().__init__(self._avm_wrapper, device_friendly_name, switch_info)
async def _async_fetch_update(self) -> None:
"""Fetch updates."""
resp = await self._avm_wrapper.async_get_ontel_deflections()
if not resp:
self._is_available = False
return
self.dict_of_deflection = xmltodict.parse(resp["NewDeflectionList"])["List"][
"Item"
]
if isinstance(self.dict_of_deflection, list):
self.dict_of_deflection = self.dict_of_deflection[self.id]
_LOGGER.debug(
"Specific %s response: NewDeflectionList=%s",
SWITCH_TYPE_DEFLECTION,
self.dict_of_deflection,
)
self._attr_is_on = self.dict_of_deflection["Enable"] == "1"
self._is_available = True
self._attributes["type"] = self.dict_of_deflection["Type"]
self._attributes["number"] = self.dict_of_deflection["Number"]
self._attributes["deflection_to_number"] = self.dict_of_deflection[
"DeflectionToNumber"
]
# Return mode sample: "eImmediately"
self._attributes["mode"] = self.dict_of_deflection["Mode"][1:]
self._attributes["outgoing"] = self.dict_of_deflection["Outgoing"]
self._attributes["phonebook_id"] = self.dict_of_deflection["PhonebookID"]
async def _async_switch_on_off_executor(self, turn_on: bool) -> None:
"""Handle deflection switch."""
await self._avm_wrapper.async_set_deflection_enable(self.id, turn_on)
class FritzBoxProfileSwitch(FritzDeviceBase, SwitchEntity):
"""Defines a FRITZ!Box Tools DeviceProfile switch."""
_attr_icon = "mdi:router-wireless-settings"
def __init__(self, avm_wrapper: AvmWrapper, device: FritzDevice) -> None:
"""Init Fritz profile."""
super().__init__(avm_wrapper, device)
self._attr_is_on: bool = False
self._name = f"{device.hostname} Internet Access"
self._attr_unique_id = f"{self._mac}_internet_access"
self._attr_entity_category = EntityCategory.CONFIG
@property
def is_on(self) -> bool | None:
"""Switch status."""
return self._avm_wrapper.devices[self._mac].wan_access
@property
def available(self) -> bool:
"""Return availability of the switch."""
if self._avm_wrapper.devices[self._mac].wan_access is None:
return False
return super().available
@property
def device_info(self) -> DeviceInfo:
"""Return the device information."""
return DeviceInfo(
connections={(CONNECTION_NETWORK_MAC, self._mac)},
default_manufacturer="AVM",
default_model="FRITZ!Box Tracked device",
default_name=self.name,
identifiers={(DOMAIN, self._mac)},
via_device=(
DOMAIN,
self._avm_wrapper.unique_id,
),
)
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn on switch."""
await self._async_handle_turn_on_off(turn_on=True)
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn off switch."""
await self._async_handle_turn_on_off(turn_on=False)
async def _async_handle_turn_on_off(self, turn_on: bool) -> bool:
"""Handle switch state change request."""
if not self.ip_address:
return False
await self._avm_wrapper.async_set_allow_wan_access(self.ip_address, turn_on)
self.async_write_ha_state()
return True
class FritzBoxWifiSwitch(FritzBoxBaseSwitch, SwitchEntity):
"""Defines a FRITZ!Box Tools Wifi switch."""
def __init__(
self,
avm_wrapper: AvmWrapper,
device_friendly_name: str,
network_num: int,
network_name: str,
) -> None:
"""Init Fritz Wifi switch."""
self._avm_wrapper = avm_wrapper
self._attributes = {}
self._attr_entity_category = EntityCategory.CONFIG
self._network_num = network_num
switch_info = SwitchInfo(
description=f"Wi-Fi {network_name}",
friendly_name=device_friendly_name,
icon="mdi:wifi",
type=SWITCH_TYPE_WIFINETWORK,
callback_update=self._async_fetch_update,
callback_switch=self._async_switch_on_off_executor,
)
super().__init__(self._avm_wrapper, device_friendly_name, switch_info)
async def _async_fetch_update(self) -> None:
"""Fetch updates."""
wifi_info = await self._avm_wrapper.async_get_wlan_configuration(
self._network_num
)
_LOGGER.debug(
"Specific %s response: GetInfo=%s", SWITCH_TYPE_WIFINETWORK, wifi_info
)
if not wifi_info:
self._is_available = False
return
self._attr_is_on = wifi_info["NewEnable"] is True
self._is_available = True
std = wifi_info["NewStandard"]
self._attributes["standard"] = std if std else None
self._attributes["bssid"] = wifi_info["NewBSSID"]
self._attributes["mac_address_control"] = wifi_info[
"NewMACAddressControlEnabled"
]
async def _async_switch_on_off_executor(self, turn_on: bool) -> None:
"""Handle wifi switch."""
await self._avm_wrapper.async_set_wlan_configuration(self._network_num, turn_on)
|
'''
MIT License
Copyright (c) 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
'''
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
'''
import asyncio
import typing
import datetime
import discord
import asyncpg
import humanize
from discord.ext import commands
from eris import Eris
from utils import database
from utils.context import ErisContext
from utils.time import UserFriendlyTime
from utils.menus import ErisMenuPages, SourceType
class Reminders(database.Table):
id = database.PrimaryKeyColumn()
expires = database.Column(database.Datetime, index=True)
created = database.Column(database.Datetime, default="NOW() AT TIME ZONE 'utc'")
event = database.Column(database.String)
extra = database.Column(database.Json, default="'{}'::jsonb")
class Timer:
__slots__ = ('args', 'kwargs', 'event', 'id', 'created_at', 'expires')
def __init__(self, *, record: asyncpg.Record):
self.id = record['id']
extra = record['extra']
self.args = extra.get('args', [])
self.kwargs = extra.get('kwargs', {})
self.event = record['event']
self.created_at = record['created']
self.expires = record['expires']
@classmethod
def temporary(cls, *, expires, created, event, args, kwargs):
pseudo = {
'id': None,
'extra': {'args': args, 'kwargs': kwargs},
'event': event,
'created': created,
'expires': expires
}
return cls(record=pseudo)
def __eq__(self, other: typing.Any):
return isinstance(other, type(self)) and other.id == self.id
def __repr__(self):
return f'<Timer created={self.created_at} expires={self.expires} event={self.event!r}>'
@property
def delta(self) -> str:
return humanize.precisedelta(self.created_at - self.expires, format='%0.0f')
class Reminder(commands.Cog, name='Lembretes'):
'''Comandos relacionados e lembretes e timers.'''
def __init__(self, bot: Eris):
self.bot = bot
self._have_data = asyncio.Event(loop=bot.loop)
self._current_timer = None
self._task = bot.loop.create_task(self.dispatch_timers())
def cog_unload(self):
self._task.cancel()
async def dispatch_timers(self):
try:
while not self.bot.is_closed():
timer = self._current_timer = await self.wait_for_active_timers(days=40)
await discord.utils.sleep_until(timer.expires)
await self.call_timer(timer)
except asyncio.CancelledError:
raise
except (OSError, discord.ConnectionClosed, asyncpg.PostgresConnectionError):
self._task.cancel()
self._task = self.bot.loop.create_task(self.dispatch_timers())
async def call_timer(self, timer: Timer):
sql = 'DELETE FROM reminders WHERE id = $1;'
await self.bot.pool.execute(sql, timer.id)
self.bot.dispatch(f'{timer.event}_complete', timer)
async def wait_for_active_timers(self, *, days: int = 7):
timer = await self.get_active_timer(days=days)
if timer:
self._have_data.set()
return timer
self._have_data.clear()
self._current_timer = None
await self._have_data.wait()
return await self.get_active_timer(days=days)
async def get_active_timer(self, *, days: int = 7) -> Timer:
sql = '''
SELECT * FROM reminders
WHERE expires < (CURRENT_DATE + $1::interval)
ORDER BY expires
LIMIT 1;
'''
record = await self.bot.pool.fetchrow(sql, datetime.timedelta(days=days))
return Timer(record=record) if record else None
async def short_timer_optimisation(self, seconds: int, timer: Timer):
await asyncio.sleep(seconds)
self.bot.dispatch(f'{timer.event}_complete', timer)
async def create_timer(self, *args, **kwargs) -> Timer:
'''Cria um timer.
Parameters
----------
when: :class:`datetime.datetime`
Quando o timer deve ativar.
event: :class:`str`
O nome do evento para ativar.
Vai ser transformado em um evento `on_{event}_complete`
\*args
Os argumentos para passar no evento.
\*\*kwargs
As keywords para passar no evento.
created: :class:`datetime.datetime`
Uma keyword especial que diz o tempo da criação do timer.
Deve fazer os timedeltas mais consistentes.
Note
------
Os argumentos e as keywords devem ser objetos JSON válidos.
Returns
-------
:class:`Timer`
O timer a ser usado.
'''
when, event, *args = args
now = kwargs.pop('created', datetime.datetime.utcnow())
when = when.replace(microsecond=0)
now = now.replace(microsecond=0)
timer = Timer.temporary(expires=when, created=now, event=event, args=args, kwargs=kwargs)
delta = (when - now).total_seconds()
if delta <= 60:
self.bot.loop.create_task(self.short_timer_optimisation(delta, timer))
return timer
sql = '''
INSERT INTO reminders (event, extra, expires, created)
VALUES ($1, $2::jsonb, $3, $4)
RETURNING id;
'''
record = await self.bot.pool.fetchrow(sql, event, {'args': args, 'kwargs': kwargs}, when, now)
timer.id = record[0]
if delta <= (86400 * 40):
self._have_data.set()
if self._current_timer and when < self._current_timer.expires:
self._task.cancel()
self._task = self.bot.loop.create_task(self.dispatch_timers())
return timer
@commands.group(invoke_without_command=True, aliases=['timer', 'remind'])
async def reminder(self, ctx: ErisContext, *, when: UserFriendlyTime(commands.clean_content, default='...')):
'''
Te lembra de alguma coisa depois de uma certa quantida de tempo.
'''
args = (ctx.author.id, ctx.channel.id, when.arg)
kwargs = {'created': ctx.message.created_at, 'message_id': ctx.message.id}
timer = await self.create_timer(when.datetime, 'reminder', *args, **kwargs)
await ctx.reply(f'Certo, em {timer.delta}: **{when.arg}**.')
@reminder.command(name='list', ignore_extra=False)
async def reminder_list(self, ctx: ErisContext):
'''
Mostra seus timers ativos.
'''
sql = '''
SELECT id, expires, extra #>> '{args,2}' FROM reminders
WHERE event = 'reminder'
AND extra #>> '{args,0}'= $1
ORDER BY expires
LIMIT 10;
'''
fetch = await ctx.pool.fetch(sql, str(ctx.author.id))
if len(fetch) == 0:
return await ctx.reply('Você não possui nenhum lembrete ativo.')
fields = []
for reminder_id, expires, message in fetch:
now = datetime.datetime.utcnow()
delta = humanize.precisedelta(expires - now.replace(microsecond=0), format='%0.0f')
field = {'name': f'[{reminder_id}] Em {delta}', 'value': message, 'inline': False}
fields.append(field)
menu = ErisMenuPages(fields, source=SourceType.FIELD)
await menu.start(ctx, wait=True)
@commands.Cog.listener()
async def on_reminder_complete(self, timer: Timer):
# Só quero que os timers respondam caso o bot esteja pronto.
await self.bot.wait_until_ready()
author_id, channel_id, content = timer.args
author = self.bot.cosmic.get_member(author_id)
channel = self.bot.cosmic.get_channel(channel_id)
if not channel:
return
# Uma maneira hardcoded de obter o link da mensagem.
message_id = timer.kwargs.get('message_id')
message_url = f'https://discord.com/channels/{self.bot.cosmic.id}/{channel.id}/{message_id}'
messages = [
f'Há {timer.delta}: **{content}**.',
f'**Clique [aqui]({message_url}) para ver a mensagem.**'
]
embed = discord.Embed(description='\n\n'.join(messages), colour=0x2f3136)
embed.set_author(name=author.display_name, icon_url=author.avatar_url)
await channel.send(author.mention, embed=embed)
def setup(bot: Eris):
bot.add_cog(Reminder(bot))
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from pprint import pprint
from nltk import NgramTagger
from nltk import jsontags
from nltk.corpus import brown
import nltk
"""
5 章 単語の分類とタグ付け
37. 1つ前のタグ情報を利用するデフォルトタガーを作る
'I like to blog on Kim's blog' の blog にどうやってタグを付けるか?
a. 1つ前の単語を調べるが、現在の単語は無視するユニグラムタガーを作る
b. バックオフタガーに組み込む、組み込むのはデフォルトタガーの直前
c. 性能がどのぐらいか評価してみる
ADJ 形容詞 new, good, high, special, big, local
ADP 接置詞 on, of, at, with, by, into, under
ADV 副詞 really, already, still, early, now
CNJ 接続詞 and, or, but, if, while, although
DET 限定詞 the, a, some, most, every, no, which
EX 存在詞 there, theres
FW 外来語 dolce, ersatz, esprit, quo, maitre
MOD 助動詞 will, can, would, may, must, should
N 名詞 year, home, costs, time, Africa
NP 固有名詞 Alison, Africa, April, Washington
NUM 数詞 twenty-four, fourth, 1991, 14:24
PRO 代名詞 he, their, her, its, my, I, us
P 不変化詞 at, on, out, over per, that, up, with
TO 単語[to_] to
UH 間投詞 ah, bang, ha, whee, hmpf, oops
V 動詞 is, say, told, given, playing, would
VD 動詞(過去形) said, took, told, made, asked
VG 動詞(現在分詞) making, going, playing, working
VN 動詞(過去分詞) given, taken, begun, sung
WH wh 限定子 who, which, when, what, where, how
"""
@jsontags.register_tag
class PreviousTagTagger(NgramTagger):
json_tag = 'nltk.tag.sequential.PreviousTagTagger'
def __init__(self, train=None, model=None,
backoff=None, cutoff=0, verbose=False):
NgramTagger.__init__(self, 1, train, model,
backoff, cutoff, verbose)
def context(self, tokens, index, history):
if index == 0:
previous_tag = None
return None, tokens[index]
else:
previous_tag = history[index - 1]
return previous_tag, tokens[index]
def evaluate_tagger(tagger, test_sents, unseen_sents):
def get_backoff_tagger_name(tagger):
yield repr(tagger)
backoff_tagger = tagger.backoff
if backoff_tagger is None:
raise StopIteration()
else:
for name in get_backoff_tagger_name(backoff_tagger):
yield name
result = tagger.evaluate(test_sents)
print(' -> '.join(get_backoff_tagger_name(tagger)))
pprint(tagger.tag(unseen_sents))
print('result: %f' % result)
print('-' * 32)
print('')
def main():
brown_tagged_sents = brown.tagged_sents(categories='news')
brown_sents = brown.sents(categories='news')
train_size = int(len(brown_tagged_sents) * 0.9)
train_sents = brown_tagged_sents[:train_size]
test_sents = brown_tagged_sents[train_size:]
unseen_sents = brown_sents[train_size + 117]
# unigram only
unigram_tagger = nltk.UnigramTagger(train_sents, verbose=True)
evaluate_tagger(unigram_tagger, test_sents, unseen_sents)
# previous only
previous_tagger = PreviousTagTagger(train_sents, verbose=True)
evaluate_tagger(previous_tagger, test_sents, unseen_sents)
# default tagger
t0 = nltk.DefaultTagger('NN')
# backoff 2
t1 = nltk.UnigramTagger(train_sents, backoff=t0)
t2 = nltk.BigramTagger(train_sents, backoff=t1)
evaluate_tagger(t2, test_sents, unseen_sents)
# backoff 3
t1 = nltk.UnigramTagger(train_sents, backoff=t0)
t2 = nltk.BigramTagger(train_sents, backoff=t1)
t3 = nltk.TrigramTagger(train_sents, backoff=t2)
evaluate_tagger(t3, test_sents, unseen_sents)
# backoff previous 2
t1 = PreviousTagTagger(train_sents, backoff=t0)
t2 = nltk.BigramTagger(train_sents, backoff=t1)
evaluate_tagger(t2, test_sents, unseen_sents)
# backoff previous 3
t1 = PreviousTagTagger(train_sents, backoff=t0)
t2 = nltk.UnigramTagger(train_sents, backoff=t1)
t3 = nltk.BigramTagger(train_sents, backoff=t2)
evaluate_tagger(t3, test_sents, unseen_sents)
# backoff previous 4
t1 = PreviousTagTagger(train_sents, backoff=t0)
t2 = nltk.UnigramTagger(train_sents, backoff=t1)
t3 = nltk.BigramTagger(train_sents, backoff=t2)
t4 = nltk.TrigramTagger(train_sents, backoff=t3)
evaluate_tagger(t4, test_sents, unseen_sents)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
import datetime
from copy import deepcopy
from pip_services3_commons.config.ConfigParams import ConfigParams
from pip_services3_commons.refer.Descriptor import Descriptor
from pip_services3_container.refer.ManagedReferences import ManagedReferences
# from pip_services3_mongodb.build.DefaultMongoDbFactory import DefaultMongoDbFactory
from pip_services3_rpc.build.DefaultRpcFactory import DefaultRpcFactory
from pip_facades_sample_python.build.ClientFacadeFactory import ClientFacadeFactory
from pip_facades_sample_python.clients.version1.AccountV1 import AccountV1
from pip_facades_sample_python.clients.version1.AccountsMemoryClientV1 import AccountsMemoryClientV1
from pip_facades_sample_python.clients.version1.EmailSettingsMemoryClientV1 import EmailSettingsMemoryClientV1
from pip_facades_sample_python.clients.version1.IAccountsClientV1 import IAccountsClientV1
from pip_facades_sample_python.clients.version1.IRolesClientV1 import IRolesClientV1
from pip_facades_sample_python.clients.version1.ISessionsClientV1 import ISessionsClientV1
from pip_facades_sample_python.clients.version1.ISitesClientV1 import ISitesClientV1
from pip_facades_sample_python.clients.version1.PasswordsNullClientV1 import PasswordsNullClientV1
from pip_facades_sample_python.clients.version1.RolesMemoryClientV1 import RolesMemoryClientV1
from pip_facades_sample_python.clients.version1.SessionsMemoryClientV1 import SessionsMemoryClientV1
from pip_facades_sample_python.clients.version1.SiteV1 import SiteV1
from pip_facades_sample_python.clients.version1.SitesMemoryClientV1 import SitesMemoryClientV1
from pip_facades_sample_python.services.version1.FacadeServiceV1 import FacadeServiceV1
from pip_facades_sample_python.services.version2.FacadeServiceV2 import FacadeServiceV2
from test.fixtures.TestSites import TestSites
from test.fixtures.TestUsers import TestUsers
class ReferencesTest(ManagedReferences):
def __init__(self):
super(ReferencesTest, self).__init__()
self._factory = ClientFacadeFactory()
self.__append_dependencies()
self.__configure_service()
self.__create_user_and_sessions()
def __append_dependencies(self):
# Add factories
self.put(None, ClientFacadeFactory())
self.put(None, DefaultRpcFactory())
# Add service
self.put(None, FacadeServiceV1())
self.put(None, FacadeServiceV2())
# Add services
self.put(Descriptor('pip-services-accounts', 'client', 'memory', 'default', '*'), AccountsMemoryClientV1())
self.put(Descriptor('pip-services-sessions', 'client', 'memory', 'default', '*'), SessionsMemoryClientV1())
self.put(Descriptor('pip-services-passwords', 'client', 'null', 'default', '*'), PasswordsNullClientV1())
self.put(Descriptor('pip-services-roles', 'client', 'memory', 'default', '*'), RolesMemoryClientV1())
self.put(Descriptor('pip-services-emailsettings', 'client', 'memory', 'default', '*'),
EmailSettingsMemoryClientV1())
self.put(Descriptor('pip-services-sites', 'client', 'direct', 'memory', '*'), SitesMemoryClientV1())
def __configure_service(self):
# Configure Facade service
service = self.get_one_required(Descriptor('pip-services', 'endpoint', 'http', 'default', '*'))
service.configure(ConfigParams.from_tuples(
'root_path', '', # '/api/v1',
'connection.protocol', 'http',
'connection.host', 'localhost',
'connection.port', 3000
))
def __create_user_and_sessions(self):
# Create accounts
accounts_client: IAccountsClientV1 = self.get_one_required(
Descriptor('pip-services-accounts', 'client', '*', '*', '*'))
admin_user_account = AccountV1(
id=TestUsers.AdminUserId,
login=TestUsers.AdminUserLogin,
name=TestUsers.AdminUserName,
active=True,
create_time=datetime.datetime.now()
)
accounts_client.create_account(None, admin_user_account)
user_1_account = AccountV1(
id=TestUsers.User1Id,
login=TestUsers.User1Login,
name=TestUsers.User1Name,
active=True,
create_time=datetime.datetime.now()
)
accounts_client.create_account(None, user_1_account)
user_2_account = AccountV1(
id=TestUsers.User2Id,
login=TestUsers.User2Login,
name=TestUsers.User2Name,
active=True,
create_time=datetime.datetime.now()
)
accounts_client.create_account(None, user_2_account)
# Create test site(s)
sites_client: ISitesClientV1 = self.get_one_required(Descriptor('pip-services-sites', 'client', '*', '*', '*'))
site1 = SiteV1(
id=TestSites.Site1Id,
name=TestSites.Site1Name
)
sites_client.create_site(None, site1)
# Create user roles
roles_client: IRolesClientV1 = self.get_one_required(Descriptor('pip-services-roles', 'client', '*', '*', '*'))
roles_client.set_roles(None, TestUsers.AdminUserId, ['admin', TestSites.Site1Id + ':admin'])
roles_client.set_roles(None, TestUsers.User1Id, [TestSites.Site1Id + ':manager'])
roles_client.set_roles(None, TestUsers.User2Id, [TestSites.Site1Id + ':user'])
# Create opened sessions
sessions_client: ISessionsClientV1 = self.get_one_required(
Descriptor('pip-services-sessions', 'client', '*', '*', '*'))
admin_user_data = deepcopy(admin_user_account)
admin_user_data.roles = ['admin', TestSites.Site1Id + ':admin']
sessions_client.open_session(None, TestUsers.AdminUserId, TestUsers.AdminUserName,
None, None, admin_user_data, None).id = TestUsers.AdminUserSessionId
user_1_data = deepcopy(user_1_account)
user_1_data.roles = [TestSites.Site1Id + ':manager']
sessions_client.open_session(None, TestUsers.User1Id, TestUsers.User1Name,
None, None, user_1_data, None, ).id = TestUsers.User1SessionId
user_2_data = deepcopy(user_2_account)
user_2_data.roles = [TestSites.Site1Id + ':manager']
sessions_client.open_session(None, TestUsers.User2Id, TestUsers.User2Name,
None, None, user_2_data, None, ).id = TestUsers.User2SessionId
|
<reponame>claydodo/tinkt<gh_stars>0
# -*- coding:utf-8 -*-
import six
import numpy as np
from krux.types.check import is_seq
from matplotlib import colors as mpl_colors
from .. import cmap_utils
class ColorMap(object):
def __init__(self, name='unknown',
type='Normal',
base_cmap_name=None,
clip_min=None, clip_max=None,
N=None,
sample_points=None,
colors=None,
positions=None,
bad=None, over=None, under=None,
**kwargs
):
self.name = name
self.type = type
if self.type == 'Normal':
self.base_cmap = cmap_utils.get_cmap(base_cmap_name,
clip_min=clip_min, clip_max=clip_max,
N=N,
sample_points=sample_points,
bad=bad, over=over, under=under)
elif self.type == 'Listed':
if colors:
self.base_cmap = mpl_colors.ListedColormap(colors, name=self.name, N=N)
cmap_utils.set_under_over_bad_colors(self.base_cmap, under=under, over=over, bad=bad)
else:
raise NotImplementedError()
elif self.type == 'Linear':
if colors:
self.base_cmap = self._build_linear(name=self.name, colors=colors, positions=positions, padded=kwargs.get('padded', True), seg=kwargs.get('seg', False), N=N)
cmap_utils.set_under_over_bad_colors(self.base_cmap, under=under, over=over, bad=bad)
else:
raise NotImplementedError()
def generate(self, *args):
if self.type in ('Normal', 'Linear'):
return self._gen_normal(*args)
elif self.type == 'Listed':
return self._gen_listed(*args)
def _gen_normal(self, clip_min=None, clip_max=None, N=None, *args, **kwargs):
return cmap_utils.get_cmap(self.base_cmap, clip_min=clip_min, clip_max=clip_max, N=N)
def _gen_listed(self, *args):
# TODO: implement
return self.base_cmap
def _build_linear(self, name, colors, positions=None, padded=True, seg=False, N=None):
if N is None:
N = 256
red = []
green = []
blue = []
alpha = []
used_pos = []
n = len(colors)
if positions is None:
if seg:
positions = np.linspace(0.0, 1.0, n+1)
else:
if padded:
positions = np.linspace(0.0, 1.0, 2*n+1)[1::2]
else:
positions = np.linspace(0.0, 1.0, n)
if seg:
for i in range(1, n):
pos = positions[i]
r1, g1, b1, a1 = mpl_colors.to_rgba(colors[i-1])
r2, g2, b2, a2 = mpl_colors.to_rgba(colors[i])
red.append((pos, r1, r2))
green.append((pos, g1, g2))
blue.append((pos, b1, b2))
alpha.append((pos, a1, a2))
used_pos.append(pos)
else:
for pos, color in zip(positions, colors):
if is_seq(color) and len(color) == 2:
r1, g1, b1, a1 = mpl_colors.to_rgba(color[0])
r2, g2, b2, a2 = mpl_colors.to_rgba(color[1])
else:
r1, g1, b1, a1 = mpl_colors.to_rgba(color)
r2, g2, b2, a2 = r1, g1, b1, a1
red.append((pos, r1, r2))
green.append((pos, g1, g2))
blue.append((pos, b1, b2))
alpha.append((pos, a1, a2))
used_pos.append(pos)
if used_pos[0] > 0.001:
first_color = colors[0]
if is_seq(first_color) and len(first_color) == 2:
first_color = first_color[0]
r, g, b, a = mpl_colors.to_rgba(first_color)
red.insert(0, (0, r, r))
green.insert(0, (0, g, g))
blue.insert(0, (0, b, b))
alpha.insert(0, (0, a, a))
if used_pos[-1] < 0.999:
last_color = colors[-1]
if is_seq(last_color) and len(last_color) == 2:
last_color = last_color[1]
r, g, b, a = mpl_colors.to_rgba(last_color)
red.append((1, r, r))
green.append((1, g, g))
blue.append((1, b, b))
alpha.append((1, a, a))
return mpl_colors.LinearSegmentedColormap(name, {
'red': red,
'green': green,
'blue': blue,
'alpha': alpha
}, N=N)
|
<reponame>d-amien-b/simple-getwordpress
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Dell EMC OpenManage Ansible Modules
# Version 2.0
# Copyright (C) 2018-2019 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
---
module: idrac_firmware
short_description: Firmware update from a repository on a network share (CIFS, NFS).
description:
- Update the Firmware by connecting to a network share (either CIFS or NFS) that contains a catalog of
available updates.
- Network share should contain a valid repository of Update Packages (DUPs) and a catalog file describing the DUPs.
- All applicable updates contained in the repository are applied to the system.
- This feature is available only with iDRAC Enterprise License.
options:
idrac_ip:
description: iDRAC IP Address.
type: str
required: True
idrac_user:
description: iDRAC username.
type: str
required: True
idrac_password:
description: iDRAC user password.
type: str
required: True
aliases: ['idrac_pwd']
idrac_port:
description: iDRAC port.
type: int
default: 443
share_name:
description: CIFS or NFS Network share.
type: str
required: True
share_user:
description: Network share user in the format 'user@domain' or 'domain\\user' if user is
part of a domain else 'user'. This option is mandatory for CIFS Network Share.
type: str
share_password:
description: Network share user password. This option is mandatory for CIFS Network Share.
type: str
aliases: ['share_pwd']
share_mnt:
description: Local mount path of the network share with read-write permission for ansible user.
This option is mandatory for Network Share.
type: str
required: True
reboot:
description: Whether to reboots after applying the updates or not.
type: bool
default: false
job_wait:
description: Whether to wait for job completion or not.
type: bool
default: true
catalog_file_name:
required: False
description: Catalog file name relative to the I(share_name).
type: str
default: 'Catalog.xml'
requirements:
- "omsdk"
- "python >= 2.7.5"
author: "<NAME> (@rajeevarakkal)"
'''
EXAMPLES = """
---
- name: Update firmware from repository on a Network Share
community.general.idrac_firmware:
idrac_ip: "192.168.0.1"
idrac_user: "user_name"
idrac_password: "<PASSWORD>"
share_name: "192.168.0.0:/share"
share_user: "share_user_name"
share_password: "<PASSWORD>"
share_mnt: "/mnt/share"
reboot: True
job_wait: True
catalog_file_name: "Catalog.xml"
"""
RETURN = """
---
msg:
type: str
description: Over all firmware update status.
returned: always
sample: "Successfully updated the firmware."
update_status:
type: dict
description: Firmware Update job and progress details from the iDRAC.
returned: success
sample: {
'InstanceID': 'JID_XXXXXXXXXXXX',
'JobState': 'Completed',
'Message': 'Job completed successfully.',
'MessageId': 'REDXXX',
'Name': 'Repository Update',
'JobStartTime': 'NA',
'Status': 'Success',
}
"""
from ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.dellemc_idrac import iDRACConnection
from ansible.module_utils.basic import AnsibleModule
try:
from omsdk.sdkcreds import UserCredentials
from omsdk.sdkfile import FileOnShare
HAS_OMSDK = True
except ImportError:
HAS_OMSDK = False
def _validate_catalog_file(catalog_file_name):
normilized_file_name = catalog_file_name.lower()
if not normilized_file_name:
raise ValueError('catalog_file_name should be a non-empty string.')
elif not normilized_file_name.endswith("xml"):
raise ValueError('catalog_file_name should be an XML file.')
def update_firmware(idrac, module):
"""Update firmware from a network share and return the job details."""
msg = {}
msg['changed'] = False
msg['update_status'] = {}
try:
upd_share = FileOnShare(remote=module.params['share_name'] + "/" + module.params['catalog_file_name'],
mount_point=module.params['share_mnt'],
isFolder=False,
creds=UserCredentials(
module.params['share_user'],
module.params['share_password'])
)
idrac.use_redfish = True
if '12' in idrac.ServerGeneration or '13' in idrac.ServerGeneration:
idrac.use_redfish = False
apply_update = True
msg['update_status'] = idrac.update_mgr.update_from_repo(upd_share,
apply_update,
module.params['reboot'],
module.params['job_wait'])
except RuntimeError as e:
module.fail_json(msg=str(e))
if "Status" in msg['update_status']:
if msg['update_status']['Status'] == "Success":
if module.params['job_wait']:
msg['changed'] = True
else:
module.fail_json(msg='Failed to update firmware.', update_status=msg['update_status'])
return msg
def main():
module = AnsibleModule(
argument_spec={
"idrac_ip": {"required": True, "type": 'str'},
"idrac_user": {"required": True, "type": 'str'},
"idrac_password": {"required": True, "type": 'str', "aliases": ['idrac_pwd'], "no_log": True},
"idrac_port": {"required": False, "default": 443, "type": 'int'},
"share_name": {"required": True, "type": 'str'},
"share_user": {"required": False, "type": 'str'},
"share_password": {"required": False, "type": 'str', "aliases": ['share_pwd'], "no_log": True},
"share_mnt": {"required": True, "type": 'str'},
"catalog_file_name": {"required": False, "type": 'str', "default": "Catalog.xml"},
"reboot": {"required": False, "type": 'bool', "default": False},
"job_wait": {"required": False, "type": 'bool', "default": True},
},
supports_check_mode=False)
try:
# Validate the catalog file
_validate_catalog_file(module.params['catalog_file_name'])
# Connect to iDRAC and update firmware
with iDRACConnection(module.params) as idrac:
update_status = update_firmware(idrac, module)
except (ImportError, ValueError, RuntimeError) as e:
module.fail_json(msg=str(e))
module.exit_json(msg='Successfully updated the firmware.', update_status=update_status)
if __name__ == '__main__':
main()
|
import os
from abc import ABC, ABCMeta
import json
import jsonschema
from svlib.svtools import svtools as svt
log = svt.log
class SecureVisionSchemaError(Exception):
"""Error raised when a json document does not match its corresponding
schema.
"""
def __init__(self, message: str, payload: str = None) -> None:
self.message = message
self.payload = payload
def __repr__(self):
return str(self.message + self.payload)
class XRayScanSchemaError(SecureVisionSchemaError):
"""Error raised when a document that carries an X-ray image of a carried
object does not matches its defined schema.
"""
def __init__(self, message: str, payload: str = None) -> None:
super().__init__(message, payload)
class ThreatPredictionSchemaError(SecureVisionSchemaError):
"""Error raised when a document that carries a prediction for an X-ray
image does not matches its defined schema.
"""
def __init__(self, message: str, payload: str = None) -> None:
super().__init__(message, payload)
class SchemaErrorFactory(object):
builder = {
'XRayScan': XRayScanSchemaError,
'ThreatPrediction': ThreatPredictionSchemaError
}
@classmethod
def create_error(cls, error_type: str, **kwargs):
return SchemaErrorFactory.builder[error_type](**kwargs)
class AbstractSchemaHelper(ABC):
def __init__(
self,
path_schema: str,
error: str
) -> None:
self.schema = path_schema
self.schema_name = path_schema.split(os.sep)[-1]
self.error = error
self.validator = jsonschema.Draft7Validator(self.schema)
self.sef = SchemaErrorFactory()
@property
def schema(self):
return self._schema
@schema.setter
def schema(self, path_schema: str):
"""It reads the schema related to the class and validates its
correctness according to its version.
:param path_schema: Path where from where the schema can be loaded
"""
try:
with open(path_schema, 'r', encoding='utf-8') as file:
schema = json.load(file)
jsonschema.Draft7Validator.check_schema(schema)
log.info(
f"Schema definition {path_schema.split(os.sep)[-1]} is loaded "
"and it is valid!"
)
self._schema = schema
except jsonschema.exceptions.SchemaError as error:
log.error(
"Invalid schema definition for file: "
f"{path_schema.split(os.sep)[-1]}!"
)
raise error
def validate(self, json_data: dict):
"""Validates the passed JSON data according to the corresponding
schema. Raises an error it the document is not valid.
:param json_data: A document of a Kafka message
"""
try:
self.validator.validate(json_data)
log.info(f"Validated the message with schema: {self.schema_name}")
except jsonschema.exceptions.ValidationError as error:
d = {
"message":
"Document is not valid according to the defined schema!",
"payload": f"{error}"
}
raise self.sef.create_error(self.error, **d)
class XRayScanSchemaHelper(AbstractSchemaHelper):
def __init__(self):
super().__init__(
os.path.join(os.getcwd(), 'schemas', 'xray_scan.json'),
'XRayScan'
)
class ThreatPredictionSchemaHelper(AbstractSchemaHelper):
def __init__(self):
super().__init__(
os.path.join(
os.getcwd(), 'schemas', 'threat_prediction.json'
),
'ThreatPrediction'
)
class SchemaHelperFactory(object):
builder = {
'XRayScan': XRayScanSchemaHelper,
'ThreatPrediction': ThreatPredictionSchemaHelper
}
@classmethod
def create_helper(cls, helper_type: str):
return SchemaHelperFactory.builder[helper_type]()
|
<reponame>lrwb-aou/curation<filename>tests/integration_tests/data_steward/cdr_cleaner/cleaning_rules/cancer_concept_suppression_test.py
"""
Integration test for cancer_concept_suppression module
This rule sandboxes and suppresses reccords whose concept_codes end in
'History_WhichConditions', 'Condition_OtherCancer', ‘History_AdditionalDiagnosis’,
and 'OutsideTravel6MonthsWhere'.
Runs on the controlled tier.
Original Issue: DC-1381
"""
# Python Imports
import os
# Third party imports
from dateutil import parser
#Project imports
from app_identity import PROJECT_ID
from cdr_cleaner.cleaning_rules.cancer_concept_suppression import CancerConceptSuppression
from tests.integration_tests.data_steward.cdr_cleaner.cleaning_rules.bigquery_tests_base import BaseTest
from common import CONCEPT, OBSERVATION
class CancerConceptSuppressionTest(BaseTest.CleaningRulesTestBase):
@classmethod
def setUpClass(cls):
print('**************************************************************')
print(cls.__name__)
print('**************************************************************')
super().initialize_class_vars()
# set the test project identifier
project_id = os.environ.get(PROJECT_ID)
cls.project_id = project_id
# set the expected test datasets
dataset_id = os.environ.get('COMBINED_DATASET_ID')
cls.dataset_id = dataset_id
sandbox_id = dataset_id + '_sandbox'
cls.sandbox_id = sandbox_id
cls.rule_instance = CancerConceptSuppression(project_id, dataset_id,
sandbox_id)
sb_table_names = cls.rule_instance.sandbox_table_for(OBSERVATION)
cls.fq_sandbox_table_names.append(
f'{cls.project_id}.{cls.sandbox_id}.{sb_table_names}')
cls.fq_table_names = [
f'{project_id}.{dataset_id}.{OBSERVATION}',
f'{project_id}.{dataset_id}.{CONCEPT}',
]
# call super to set up the client, create datasets, and create
# empty test tables
# NOTE: does not create empty sandbox tables.
super().setUpClass()
def setUp(self):
fq_dataset_name = self.fq_table_names[0].split('.')
self.fq_dataset_name = '.'.join(fq_dataset_name[:-1])
self.date = parser.parse('2020-05-05').date()
super().setUp()
def test_cancer_concept_suppression_cleaning(self):
"""
Tests that the sepcifications for QUERYNAME perform as designed.
Validates pre conditions, tests execution, and post conditions based on the load
statements and the tables_and_counts variable.
"""
queries = []
#Append some queries
create_concepts_query_tmpl = self.jinja_env.from_string("""
INSERT INTO `{{fq_dataset_name}}.concept`
(concept_id, concept_name, domain_id, vocabulary_id, concept_class_id, concept_code, valid_start_date, valid_end_date)
VALUES
(43529626, "some text", "some text", "some text", "some text", "OutsideTravel6Month_OutsideTravel6MonthWhereTravel", date('2020-05-05'), date('2020-05-05')),
(43529099, "some text", "some text", "some text", "some text", "OutsideTravel6Month_OutsideTravel6MonthWhere", date('2020-05-05'), date('2020-05-05')),
(43529102, "some text", "some text", "some text", "some text", "MotherDiagnosisHistory_WhichConditions", date('2020-05-05'), date('2020-05-05')),
(43529627, "some text", "some text", "some text", "some text", "CancerCondition_OtherCancer", date('2020-05-05'), date('2020-05-05')),
(43529625, "some text", "some text", "some text", "some text", "FatherCancerCondition_OtherCancers", date('2020-05-05'), date('2020-05-05')),
(43529100, "some text", "some text", "some text", "some text", "SonCancerCondition_History_AdditionalDiagnosis", date('2020-05-05'), date('2020-05-05')),
(10821410, "some text", "some text", "some text", "some text", "Sister_History_AdditionalDiagnoses", date('2020-05-05'), date('2020-05-05')),
(42181902, "some text", "some text", "some text", "some text", "Cancer", date('2020-05-05'), date('2020-05-05')),
(24182910, "some text", "some text", "some text", "some text", "", date('2020-05-05'), date('2020-05-05')),
(43529098, "some text", "some text", "some text", "some text", "FatherDiagnosisHistory_WhichConditions", date('2020-05-05'), date('2020-05-05'))
""").render(fq_dataset_name=self.fq_dataset_name)
drop_records_query_tmpl = self.jinja_env.from_string("""
INSERT INTO `{{fq_dataset_name}}.observation`
(observation_id, person_id, observation_concept_id, observation_date,
observation_type_concept_id)
VALUES
(1, 1, 43529626, date('2020-05-05'), 1),
(2, 2, 43529099, date('2020-05-05'), 2),
(3, 3, 43529102, date('2020-05-05'), 3),
(4, 4, 43529627, date('2020-05-05'), 4),
(5, 5, 43529625, date('2020-05-05'), 5),
(6, 6, 43529100, date('2020-05-05'), 6),
(7, 7, 43529098, date('2020-05-05'), 7),
(8, 8, 10821410, date('2020-05-05'), 8),
(9, 9, 42181902, date('2020-05-05'), 9),
(10, 10, 24182910, date('2020-05-05'), 10)
""").render(fq_dataset_name=self.fq_dataset_name)
queries = [create_concepts_query_tmpl, drop_records_query_tmpl]
self.load_test_data(queries)
#Uncomment below and fill
tables_and_counts = [{
'fq_table_name':
'.'.join([self.fq_dataset_name, 'observation']),
'fq_sandbox_table_name':
self.fq_sandbox_table_names[0],
'loaded_ids': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'sandboxed_ids': [1, 2, 3, 4, 5, 6, 7],
'fields': [
'observation_id', 'person_id', 'observation_concept_id',
'observation_date', 'observation_type_concept_id'
],
'cleaned_values': [(8, 8, 10821410, self.date, 8),
(9, 9, 42181902, self.date, 9),
(10, 10, 24182910, self.date, 10)]
}]
self.default_test(tables_and_counts) |
__copyright__ = """
Copyright 2019 Amazon.com, Inc. or its affiliates.
Copyright 2019 Netflix Inc.
Copyright 2019 Google LLC
"""
__license__ = """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from threading import Event
from threading import Timer
from threading import Thread
from threading import Lock
from paho.mqtt.client import Client
import os
import logging
import re
import json
import traceback
class MqttClient:
"""A wrapper around the paho mqtt client specifically for device automation bus
This wrapper is mean to handle connect/disconnect as well as sending and
receiving messages.
Clients can register handlers to be called when certain messages are received.
Clients also use this class to publish messages onto the bus.
"""
class MqttClientEvents:
"""Container for MqttClient thread events"""
def __init__(self):
self.connected_event = Event()
self.disconnected_event = Event()
self.apps_discovered_event = Event()
self.capabilities_discovered_event = Event()
def __init__(self, host, port):
self.host = host
self.port = port
self.mqtt_client_events = MqttClient.MqttClientEvents()
self.thread = None
self.client = Client(userdata=self.mqtt_client_events)
self.app_registry = {}
self.app_registry_timer = None
self.app_registry_timer_lock = Lock()
self.capabilities_registry = {}
self.capabilities_registry_timer = None
self.capabilities_registry_timer_lock = Lock()
self.topic_handlers = [
{'topic': 'apps/+', 'regex': 'apps/[^/]*', 'handler': self.on_app_message},
{'topic': 'platform/+', 'regex': 'platform/[^/]*', 'handler': self.on_app_message},
{'topic': 'platform/telemetry/monitor/#', 'regex': 'platform/telemetry/monitor/.*', 'handler': self.on_monitor_message}
]
self.logger = logging.getLogger(__name__)
def on_connect(self, client, mqtt_client_events, flags, rc):
"""Set the connected state and subscribe to existing known topics."""
del mqtt_client_events, flags, rc # Delete unused parameters to prevent warnings.
for topic_handler in self.topic_handlers:
client.subscribe(topic_handler["topic"])
self.mqtt_client_events.connected_event.set()
self.mqtt_client_events.disconnected_event.clear()
def on_disconnect(self, client, user_data, rc):
"""Set the disconnected state."""
del client, user_data, rc # Delete unused parameters to prevent warnings.
self.mqtt_client_events.connected_event.clear()
self.mqtt_client_events.disconnected_event.set()
def on_message(self, client, user_data, packet):
"""The main message dispatcher
This function is called for all messages on all registered topics.
It handles dispatching messages to the registered handlers.
"""
del client, user_data # Delete unused parameters to prevent warnings.
try:
if packet:
self.logger.info("topic: " + packet.topic)
# Message payload can come in as a char array instead of string.
# Normalize it to a string for easier access by handlers.
if type(packet.payload) is str:
message = packet.payload
else:
message = packet.payload.decode("utf-8")
self.logger.info("message: " + message)
# Since this function receives messages for all topics, use the specified regex
# to call the correct handler(s).
for topic_handler in self.topic_handlers:
if re.fullmatch(topic_handler["regex"], packet.topic):
topic_handler["handler"](packet.topic, message)
# Since paho eats all the exceptions, catch them here beforehand
# and make sure a stack trace is printed.
except Exception:
traceback.print_exc()
raise
def on_app_message(self, topic, message):
"""Local handler for handling registration of media apps.
This function receives a message for each registered media application.
Since these messages are published as retained, normally this will be
called with all the messages in on batch.
To know that we have received all of the initial batch, use a simple timeout
to measure how long it has been since the last message. When enough time goes
by, allow callers to get the list of apps from the registry.
"""
app = json.loads(message)
app["app_id"] = os.path.basename(topic)
self.app_registry.update({app["name"]: app})
self.app_registry_timer_lock.acquire(True)
if self.app_registry_timer:
self.app_registry_timer.cancel()
while self.app_registry_timer.is_alive():
pass
self.app_registry_timer = Timer(1.0, self.mqtt_client_events.apps_discovered_event.set).start()
self.app_registry_timer_lock.release()
def on_platform_message(self, topic, message):
"""Local handler for handling platform messages
This function receives a message for each registered platform capability.
"""
capability = json.loads(message)
capability_id = os.path.basename(topic)
self.capabilities_registry.update({capability_id: capability})
self.capabilities_registry_timer_lock.acquire(True)
if self.capabilities_registry_timer:
self.capabilities_registry_timer.cancel()
while self.capabilities_registry_timer.is_alive():
pass
self.capabilities_registry_timer = Timer(1.0, self.mqtt_client_events.capabilities_discovered_event.set).start()
self.capabilities_registry_timer_lock.release()
def on_monitor_message(self, topic, message):
"""Local handler for handling process monitor messages
"""
message = json.loads(message)
def get_discovered_apps(self):
"""Method to get the discovered apps.
TODO: Get the app registry out of here into it's own class.
"""
self.mqtt_client_events.apps_discovered_event.wait()
self.app_registry_timer_lock.acquire(True)
discovered_apps = self.app_registry.copy().values()
self.app_registry_timer_lock.release()
return discovered_apps
def get_discovered_capabilities(self):
"""Method to get the discovered platform capabilities.
TODO: Get the registry out of here into it's own class.
"""
self.mqtt_client_events.capabilities_discovered_event.wait()
self.capabilities_registry_timer_lock.acquire(True)
discovered_capabilities = self.capabilities_registry.copy().values()
self.capabilities_registry_timer_lock.release()
return discovered_capabilities
def _start(self):
"""Private start method that does th actual work of starting the client in a new thread."""
self.client.enable_logger(self.logger)
self.client.on_connect = self.on_connect
self.client.on_message = self.on_message
self.client.on_disconnect = self.on_disconnect
self.client.connect(self.host, self.port)
self.client.loop_start()
self.mqtt_client_events.connected_event.wait(15.0)
if not self.mqtt_client_events.connected_event.is_set():
raise Exception("Connect timed out after 15 seconds.")
self.mqtt_client_events.disconnected_event.wait() # yes, forever
def start(self):
"""Public start method used by callers to start the client."""
self.thread = Thread(target=self._start)
self.thread.start()
def stop(self):
"""Stop method used to shutdown the client."""
for topic_handler in self.topic_handlers:
self.client.unsubscribe(topic_handler["topic"])
self.client.disconnect()
self.mqtt_client_events.apps_discovered_event.set()
def publish(self, topic, message):
"""Publish a message to the specified topic."""
self.logger.info("publish: Sending '{}' to topic '{}'".format(message, topic))
self.client.publish(topic, message)
def subscribe(self, topic, handler, regex=None):
"""Add a handler for a particular topic
The handler is a function that will be called when a message is received on the specified topic.
An optional regex can be provided to filter the topic even more.
"""
if not regex:
regex = topic
self.topic_handlers.append({'topic': topic, 'handler': handler, 'regex': regex})
self.client.subscribe(topic)
|
<gh_stars>0
import random
from datetime import datetime
from functools import total_ordering
from .exceptions import DepartmentNotAvailableError, TooManyMembersOnDayError
from .constants import GRANEN_ID, TALLEN_ID
today = datetime.now().date()
@total_ordering
class Member:
def __init__(self, id=0, first_name="", last_name="", sos_percentage=100, family=0, sponsor_for_family=None,
sponsored_by_family=None, end_date=None, start_date=None, partner_id=None, sponsored_by_member=None):
self.id = id
self.first_name = first_name
self.last_name = last_name
self.sos_percentage = sos_percentage
self.family = family
self.sponsor_for_family = sponsor_for_family
self.sponsored_by_family = sponsored_by_family
self.start_date = start_date
self.sponsored_by_member = sponsored_by_member
self.partner_id = partner_id
self.family_name = None
if isinstance(end_date, str):
self.end_date = datetime.strptime(end_date, "%Y-%m-%d").date()
else:
self.end_date = end_date
@property
def name(self):
return "%s %s" % (self.first_name, self.last_name)
@property
def is_sponsor(self):
return self.sponsor_for_family is not None
@property
def is_sponsored(self):
return self.sponsored_by_family is not None
def __repr__(self):
return "<%s>" % self.name
# def __eq__(self, other):
# return self.id == other.id
def __lt__(self, other):
return self.id < other.id
class MemberList(list):
@staticmethod
def create_from_dicts(dicts):
members = MemberList()
for m in dicts:
members.append(Member(**m))
members._parse_families()
members._parse_sponsors()
return members
def _parse_families(self):
family_id = 0
members_dict = self.__members_by_id()
new_members_list = []
while len(members_dict) > 0:
family_id += 1
member1 = members_dict.popitem()[1]
member1.family = family_id
new_members_list.append(member1)
family_name = '%s %s' % (member1.first_name, member1.last_name)
if member1.partner_id:
member2 = members_dict.pop(member1.partner_id)
member2.family = family_id
new_members_list.append(member2)
family_name = "Familjen %s & %s %s" % (family_name, member2.first_name, member2.last_name)
member2.family_name = family_name
member1.family_name = family_name
def _parse_sponsors(self):
for member in self:
if member.sponsored_by_member:
if MemberList.__is_within_sponsor_period(member):
sponsor = self.get_by_id(member.sponsored_by_member)
member.sponsored_by_family = sponsor.family
if member.partner_id is not None:
member_partner = self.get_by_id(member.partner_id)
member_partner.sponsored_by_family = sponsor.family
sponsor.sponsor_for_family = member.family
if sponsor.partner_id is not None:
sponsor_partner = self.get_by_id(sponsor.partner_id)
sponsor_partner.sponsor_for_family = member.family
@staticmethod
def __is_within_sponsor_period(member):
return member.start_date and (today - member.start_date).days < 90
def __members_by_id(self):
return {x.id: x for x in self}
def get_by_id(self, id):
for member in self:
if member.id == id:
return member
return None
def get_family_name_by_family_id(self, family_id):
for member in self:
if member.family == family_id:
return member.family_name
class Day():
def __init__(self, date, members=[]):
random.shuffle(members)
self.date = date
self.members = [None] * 2
for m in members:
self.put(m)
@property
def is_full(self):
return len(self.members) >= 2 and self.members[0] is not None and self.members[1] is not None
@property
def tallen(self):
return self.members[0] if self.members else None
@property
def granen(self):
return self.members[1] if self.members and len(self.members) == 2 else None
def put(self, member, department=None):
if self.is_full:
raise TooManyMembersOnDayError
if department == TALLEN_ID:
if not self.tallen:
self.members[0] = member
else:
raise DepartmentNotAvailableError
elif department == GRANEN_ID and not self.granen:
if not self.granen:
self.members[1] = member
else:
raise DepartmentNotAvailableError
else:
if not self.tallen:
self.members[0] = member
elif not self.granen:
self.members[1] = member
def contains_family(self, family):
for m in self.members:
if m and m.family == family:
return True
return False
def __repr__(self):
return "<%s T: %s, G: %s>" % (self.date, self.tallen, self.granen)
class DayList(list):
def __init__(self, work_days_service):
self.work_days_service = work_days_service
def append_member(self, member: Member, department=None):
if len(self) == 0:
last_day = Day(self.work_days_service.next())
self.append(last_day)
else:
last_day = self[-1]
if last_day.is_full:
last_day = Day(self.work_days_service.next())
self.append(last_day)
if DayList.is_day_within_members_end_grace_period(member, last_day) \
or DayList.is_day_within_members_start_grace_period(member, last_day):
return
last_day.put(member, department)
@staticmethod
def is_day_within_members_end_grace_period(member, day):
if member.end_date:
member_end_date = member.end_date
prev_month_date = DayList.prev_month(member_end_date)
day_date = datetime.strptime(day.date, "%Y-%m-%d").date()
return prev_month_date < day_date
return False
@staticmethod
def is_day_within_members_start_grace_period(member, day):
if member.start_date:
member_start_date = member.start_date
next_month_date = DayList.next_month(member_start_date)
day_date = datetime.strptime(day.date, "%Y-%m-%d").date()
return day_date < next_month_date
return False
@staticmethod
def prev_month(date):
if date.month == 1:
return date.replace(month=12, year=date.year - 1)
else:
try:
return date.replace(month=date.month - 1)
except ValueError:
return DayList.prev_month(date=date.replace(day=date.day - 1))
@staticmethod
def next_month(date):
if date.month == 12:
return date.replace(month=1, year=date.year + 1)
else:
try:
return date.replace(month=date.month + 1)
except ValueError:
return DayList.prev_month(date=date.replace(day=date.day - 1))
@property
def members(self):
members = []
for day in self:
members.extend(day.members)
return members
|
<gh_stars>10-100
# ==============================================================================
# Authors: <NAME>
#
# Python functions: A streaming VHDL parser
#
# Description:
# ------------------------------------
# TODO:
#
# License:
# ==============================================================================
# Copyright 2017-2021 <NAME> - Boetzingen, Germany
# Copyright 2016-2017 <NAME> - Dresden, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# load dependencies
from pydecor.decorators import export
from pyVHDLParser.Blocks import CommentBlock, EndOfDocumentBlock
from pyVHDLParser.Blocks.Common import LinebreakBlock, IndentationBlock
import pyVHDLParser.Blocks.InterfaceObject
from pyVHDLParser.Blocks.List import GenericList, ParameterList, PortList, SensitivityList
from pyVHDLParser.Groups import ParserState, GroupParserException, Group, EndOfDocumentGroup
from pyVHDLParser.Groups.Comment import WhitespaceGroup, CommentGroup
__all__ = []
__api__ = __all__
@export
class GenericListGroup(Group):
def __init__(self, previousGroup, startBlock, endBlock=None):
super().__init__(previousGroup, startBlock, endBlock)
self._subGroups = {
CommentGroup: [],
WhitespaceGroup: [],
GenericListItemGroup: []
}
@classmethod
def stateParse(cls, parserState: ParserState):
currentBlock = parserState.Block
if isinstance(currentBlock, GenericList.OpenBlock):
return
elif isinstance(currentBlock, pyVHDLParser.Blocks.InterfaceObject.InterfaceConstantBlock):
parserState.PushState = GenericListItemGroup.stateParse
parserState.NextGroup = GenericListItemGroup(parserState.LastGroup, currentBlock)
parserState.BlockMarker = currentBlock
parserState.ReIssue = True
return
elif isinstance(currentBlock, GenericList.CloseBlock):
parserState.Pop()
return
elif isinstance(currentBlock, (LinebreakBlock, IndentationBlock)):
parserState.PushState = WhitespaceGroup.stateParse
parserState.NextGroup = WhitespaceGroup(parserState.LastGroup, currentBlock)
parserState.BlockMarker = currentBlock
parserState.ReIssue = True
return
elif isinstance(currentBlock, CommentBlock):
parserState.PushState = CommentGroup.stateParse
parserState.NextGroup = CommentGroup(parserState.LastGroup, currentBlock)
parserState.BlockMarker = currentBlock
parserState.ReIssue = True
return
if isinstance(currentBlock, EndOfDocumentBlock):
parserState.NextGroup = EndOfDocumentGroup(currentBlock)
return
raise GroupParserException("End of generic list not found.", currentBlock)
@export
class GenericListItemGroup(Group):
@classmethod
def stateParse(cls, parserState: ParserState):
for block in parserState.GetBlockIterator:
if isinstance(block, GenericList.DelimiterBlock):
parserState.Pop()
return
elif isinstance(block, GenericList.CloseBlock):
# parserState.NextGroup = cls(parserState.LastGroup, parserState.BlockMarker, block)
parserState.Pop()
parserState.ReIssue = True
return
raise GroupParserException("End of generic not found.", block)
@export
class GenericMapGroup(Group):
@classmethod
def stateParse(cls, parserState: ParserState):
block = parserState.Block
raise NotImplementedError("State=Parse: {0!r}".format(block))
@export
class GenericMapItemGroup(Group):
@classmethod
def stateParse(cls, parserState: ParserState):
block = parserState.Block
raise NotImplementedError("State=Parse: {0!r}".format(block))
@export
class PortListGroup(Group):
def __init__(self, previousGroup, startBlock, endBlock=None):
super().__init__(previousGroup, startBlock, endBlock)
self._subGroups = {
CommentGroup: [],
WhitespaceGroup: [],
PortListItemGroup: []
}
@classmethod
def stateParse(cls, parserState: ParserState):
currentBlock = parserState.Block
if isinstance(currentBlock, PortList.OpenBlock):
return
elif isinstance(currentBlock, (pyVHDLParser.Blocks.InterfaceObject.InterfaceSignalBlock, PortList.DelimiterBlock)):
return
elif isinstance(currentBlock, PortList.CloseBlock):
parserState.Pop()
return
elif isinstance(currentBlock, (LinebreakBlock, IndentationBlock)):
parserState.PushState = WhitespaceGroup.stateParse
parserState.NextGroup = WhitespaceGroup(parserState.LastGroup, currentBlock)
parserState.BlockMarker = currentBlock
parserState.ReIssue = True
return
elif isinstance(currentBlock, CommentBlock):
parserState.PushState = CommentGroup.stateParse
parserState.NextGroup = CommentGroup(parserState.LastGroup, currentBlock)
parserState.BlockMarker = currentBlock
parserState.ReIssue = True
return
if isinstance(currentBlock, EndOfDocumentBlock):
parserState.NextGroup = EndOfDocumentGroup(currentBlock)
return
raise GroupParserException("End of generic list not found.", currentBlock)
@export
class PortListItemGroup(Group):
@classmethod
def stateParse(cls, parserState: ParserState):
for block in parserState.GetBlockIterator:
if isinstance(block, PortList.DelimiterBlock):
parserState.Pop()
return
elif isinstance(block, PortList.CloseBlock):
# parserState.NextGroup = cls(parserState.LastGroup, parserState.BlockMarker, block)
parserState.Pop()
parserState.ReIssue = True
return
raise GroupParserException("End of port not found.", block)
@export
class PortMapGroup(Group):
@classmethod
def stateParse(cls, parserState: ParserState):
block = parserState.Block
raise NotImplementedError("State=Parse: {0!r}".format(block))
@export
class PortMapItemGroup(Group):
@classmethod
def stateParse(cls, parserState: ParserState):
block = parserState.Block
raise NotImplementedError("State=Parse: {0!r}".format(block))
@export
class ParameterListGroup(Group):
def __init__(self, previousGroup, startBlock, endBlock=None):
super().__init__(previousGroup, startBlock, endBlock)
self._subGroups = {
CommentGroup: [],
WhitespaceGroup: [],
ParameterListItemGroup: []
}
@classmethod
def stateParse(cls, parserState: ParserState):
currentBlock = parserState.Block
if isinstance(currentBlock, ParameterList.OpenBlock):
return
elif isinstance(currentBlock, (ParameterList.ItemBlock, ParameterList.DelimiterBlock)):
return
elif isinstance(currentBlock, ParameterList.CloseBlock):
parserState.Pop()
return
elif isinstance(currentBlock, (LinebreakBlock, IndentationBlock)):
parserState.PushState = WhitespaceGroup.stateParse
parserState.NextGroup = WhitespaceGroup(parserState.LastGroup, currentBlock)
parserState.BlockMarker = currentBlock
parserState.ReIssue = True
return
elif isinstance(currentBlock, CommentBlock):
parserState.PushState = CommentGroup.stateParse
parserState.NextGroup = CommentGroup(parserState.LastGroup, currentBlock)
parserState.BlockMarker = currentBlock
parserState.ReIssue = True
return
if isinstance(currentBlock, EndOfDocumentBlock):
parserState.NextGroup = EndOfDocumentGroup(currentBlock)
return
raise GroupParserException("End of generic list not found.", currentBlock)
@export
class ParameterListItemGroup(Group):
@classmethod
def stateParse(cls, parserState: ParserState):
for block in parserState.GetBlockIterator:
if isinstance(block, ParameterList.DelimiterBlock):
parserState.Pop()
return
elif isinstance(block, ParameterList.CloseBlock):
# parserState.NextGroup = cls(parserState.LastGroup, parserState.BlockMarker, block)
parserState.Pop()
parserState.ReIssue = True
return
raise GroupParserException("End of parameter not found.", block)
@export
class ParameterMapGroup(Group):
@classmethod
def stateParse(cls, parserState: ParserState):
block = parserState.Block
raise NotImplementedError("State=Parse: {0!r}".format(block))
@export
class ParameterMapItemGroup(Group):
@classmethod
def stateParse(cls, parserState: ParserState):
block = parserState.Block
raise NotImplementedError("State=Parse: {0!r}".format(block))
@export
class SensitivityListGroup(Group):
def __init__(self, previousGroup, startBlock, endBlock=None):
super().__init__(previousGroup, startBlock, endBlock)
self._subGroups = {
CommentGroup: [],
WhitespaceGroup: [],
SensitivityListItemGroup: []
}
@classmethod
def stateParse(cls, parserState: ParserState):
currentBlock = parserState.Block
if isinstance(currentBlock, SensitivityList.OpenBlock):
return
elif isinstance(currentBlock, (SensitivityList.ItemBlock, SensitivityList.DelimiterBlock)):
return
elif isinstance(currentBlock, SensitivityList.CloseBlock):
parserState.Pop()
return
elif isinstance(currentBlock, (LinebreakBlock, IndentationBlock)):
parserState.PushState = WhitespaceGroup.stateParse
parserState.NextGroup = WhitespaceGroup(parserState.LastGroup, currentBlock)
parserState.BlockMarker = currentBlock
parserState.ReIssue = True
return
elif isinstance(currentBlock, CommentBlock):
parserState.PushState = CommentGroup.stateParse
parserState.NextGroup = CommentGroup(parserState.LastGroup, currentBlock)
parserState.BlockMarker = currentBlock
parserState.ReIssue = True
return
if isinstance(currentBlock, EndOfDocumentBlock):
parserState.NextGroup = EndOfDocumentGroup(currentBlock)
return
raise GroupParserException("End of generic list not found.", currentBlock)
@export
class SensitivityListItemGroup(Group):
@classmethod
def stateParse(cls, parserState: ParserState):
block = parserState.Block
raise NotImplementedError("State=Parse: {0!r}".format(block))
|
<filename>EasyDraw/__init__.py
"""
EasyDraw
-------------------------------
A graphical library built for visual arts.
EasyDraw is built on top of tkinter and has more functionalities.
Author: <NAME>
https://github.com/vafakaramzadegan/EasyDraw
"""
import tkinter as tk
import time
from EasyDraw import Color
from EasyDraw import Canvas
from EasyDraw.Tools import *
from EasyDraw import Vector
class EasyDraw(object):
# EasyDraw main class
def __init__(self, **kwargs):
print('Hello from EasyDraw!')
# app clock
self.tick = 1
self.__start_time = time.time()
# window size
self.width = kwargs.get('width', 400)
self.height = kwargs.get('height', 400)
# run in fullscreen
self.fullscreen = kwargs.get('fullscreen', False)
# clear screen on each frame
self.autoClear = kwargs.get('autoClear', True)
# show stats on screen
self.showStats = kwargs.get('showStats', False)
# frames per second
self.interval = kwargs.get('fps', 24)
if self.interval not in range(0, 1001):
raise ValueError("invalid fps value should be between 1 and 1000 but '%d' was entered." % self.interval)
self.interval = 1000 // self.interval
# path to save rendered frames as GIF file
self.export_path = kwargs.get('exportPath', '')
if self.export_path != '':
print('Recording frames...')
# mouse position relative to origin
self.mouse_x = 0
self.mouse_y = 0
# mouse distance to top left corner
self.mouse_left = 0
self.mouse_top = 0
# indicates whether to use XY coordination system
self.useBounds = False
self.bounds = kwargs.get('bounds', None)
self.scale_x = 0
self.scale_y = 0
self.bound_center = (0, 0)
master = tk.Tk()
self.master = master
if self.fullscreen:
master.attributes("-fullscreen", True)
master.update()
self.width = master.winfo_width()
self.height = master.winfo_height()
# bind mouse event handlers
master.bind('<Motion>' , self.__motion_event)
master.bind('<Button-1>' , self.__mouse_left_btn_click)
master.bind('<Button-2>' , self.__mouse_middle_btn_click)
master.bind('<Button-3>' , self.__mouse_right_btn_click)
master.bind('<B1-Motion>' , self.__left_mouse_btn_down)
master.bind('<B3-Motion>' , self.__right_mouse_btn_down)
master.bind('<ButtonRelease-1>', self.__left_mouse_btn_up)
master.bind('<ButtonRelease-3>', self.__right_mouse_btn_up)
# bind keyboard events
master.bind('<Escape>', self.__on_escape_key)
master.bind('<Key>', self.__on_key_press)
master.bind('<KeyRelease>', self.__on_key_release)
# set window title
master.title(kwargs.get('title', 'EasyDraw App'))
# bind onWindowClose event
master.protocol("WM_DELETE_WINDOW", self.__on_closing)
self.bg_color = kwargs.get('background', 'silver')
self.showGrid = kwargs.get('showGrid', False)
self.canvas = Canvas.Canvas(master,
app = self,
width = self.width,
height = self.height,
background = self.bg_color,
showGrid = self.showGrid)
# deprecated -------------------------
self.tools = Tools()
self.color = Color.Color()
# ------------------------------------
# try to load 'setup' and 'draw' callback methods automatically
try:
from __main__ import setup
self.setupFunction = setup
except ImportError:
self.setupFunction = kwargs.get('setupFunc', None)
try:
from __main__ import draw
self.drawFunction = draw
except ImportError:
self.drawFunction = kwargs.get('drawFunc', None)
self.keyPressFunc = kwargs.get('keyPressFunc', None)
self.keyReleaseFunc = kwargs.get('keyReleaseFunc', None)
# mouse event callbacks
self.mouseMoveFunction = kwargs.get('mouseMoveFunc', None)
self.clickFunction = kwargs.get('clickFunc', None)
self.mouseDownFunction = kwargs.get('mouseDownFunc', None)
self.mouseUpFunction = kwargs.get('mouseUpFunc', None)
if isinstance(self.bounds, tuple):
min_x = self.bounds[0]
min_y = self.bounds[1]
max_x = self.bounds[2]
max_y = self.bounds[3]
if max_x <= min_x:
raise ValueError('Max X cannot be less than or equal to min X')
if max_y <= min_y:
raise ValueError('Max Y cannot be less than or equal to min Y')
self.useBounds = True
self.scale_x = self.width // abs(max_x-min_x)
self.scale_y = self.width // abs(max_y-min_y)
bx = 0
if min_x >= 0:
bx = -(min_x*self.scale_x)
else:
bx = self.width - (max_x*self.scale_x)
by = 0
if min_y >= 0:
by = -(min_y*self.scale_y)
else:
by = (max_y*self.scale_y)
self.bound_center = (bx, by)
master.after(100, self.__setup())
master.after(100, self.__animate())
master.mainloop()
def clearBounds(self):
self.useBounds = False
# mouse related methods
def __get_mouse_positions(self, e):
if self.useBounds:
self.mouse_x = e.x
self.mouse_y = e.y
self.mouse_x = map(self.mouse_x,
0,
self.width,
self.bounds[0],
self.bounds[2])
self.mouse_y = map(self.mouse_y,
0,
self.width,
self.bounds[3],
self.bounds[1])
else:
self.mouse_x = e.x - self.canvas.get_center_pos()[0]
self.mouse_y = e.y - self.canvas.get_center_pos()[1]
self.mouse_left = e.x
self.mouse_top = e.y
def __motion_event(self, event):
if self.mouseMoveFunction:
self.mouseMoveFunction(self)
def __mouse_left_btn_click(self, event):
if self.clickFunction:
self.clickFunction(self, 'left')
def __mouse_middle_btn_click(self, event):
if self.clickFunction:
self.clickFunction(self, 'middle')
def __mouse_right_btn_click(self, event):
if self.clickFunction:
self.clickFunction(self, 'right')
def __left_mouse_btn_down(self, event):
if self.mouseDownFunction:
self.mouseDownFunction(self, 'left')
def __right_mouse_btn_down(self, event):
if self.mouseDownFunction:
self.mouseDownFunction(self, 'right')
def __left_mouse_btn_up(self, event):
if self.mouseUpFunction:
self.mouseUpFunction(self, 'left')
def __right_mouse_btn_up(self, event):
if self.mouseUpFunction:
self.mouseUpFunction(self, 'right')
def __show_stats(self):
c = self.canvas
c.push()
c.translate(0, 0)
c.font_color('white')
c.text_anchor('nw')
c.font_family('courier 12')
text = (
f'fps: {(self.tick / (time.time() - self.__start_time)):.2f}\n'\
f'tick: {self.tick}\n\n'\
f'win width: {self.width}\n'\
f'win height: {self.height}\n\n'\
f'mouse left: {self.mouse_left}\n'\
f'mouse top: {self.mouse_top}\n'\
f'mouse x: {self.mouse_x:.2f}\n'\
f'mouse y: {self.mouse_y:.2f}'
)
obj = c.text(24, 24, text)
bounds = c.handle.bbox(obj)
c.fill('black')
c.stroke('black')
c.rect(bounds[0] - 16, bounds[1] - 16, bounds[2] + 16, bounds[3] + 16, alpha = .7)
c.bring_to_front(obj)
c.pop()
def __setup(self):
if callable(self.setupFunction):
self.setupFunction(self)
if self.showStats:
self.__show_stats()
else:
raise TypeError('Setup function is either undefined or not callable!')
def __animate(self):
if self.autoClear is True:
self.canvas.clear('all')
self.canvas.clear_data()
else:
if self.showGrid:
self.canvas.showGrid()
if callable(self.drawFunction):
self.tick += 1
mx = self.master.winfo_pointerx() - self.master.winfo_rootx()
my = self.master.winfo_pointery() - self.master.winfo_rooty()
if mx > 0 and mx <= self.width and my > 0 and my <= self.height:
self.__get_mouse_positions(Vector.Vector(mx, my))
self.drawFunction(self)
else:
raise Exception('Draw function is either undefined or not callable!')
if self.export_path != '':
self.canvas.export_frame()
if self.showStats:
self.__show_stats()
self.canvas.handle.after(self.interval, self.__animate)
def __on_closing(self):
if self.export_path != '':
print('Saving frames as GIF file... please wait!')
self.canvas.save_frames(self.export_path, self.interval)
print('Done!')
self.master.destroy()
def __on_escape_key(self, e):
self.__on_closing()
def __on_key_press(self, e):
if self.keyPressFunc:
self.keyPressFunc(self, e)
def __on_key_release(self, e):
if self.keyReleaseFunc:
self.keyReleaseFunc(self, e)
|
<reponame>liupeng678/FastAudioVisual
#!/usr/bin/env python
import os
import glob
import argparse
import fnmatch
from collections import defaultdict
__version__ = '0.1.2'
__author__ = 'hj24'
class LineCounter(object):
def __init__(self, dir):
# current location
self.current_dir = dir
# Core counters for the number of lines and files
self.line_count = 0
self.file_count = 0
# sets to record the filter files suffix or specific files suffix
self.select_suffix = set()
self.filter_suffix = set()
# dict for detail results, default count = 0
self.final_files_dict = defaultdict(int)
self.files_lines_dict = defaultdict(int)
# set for files' name
self.final_files = set()
def __repr__(self):
info = '''
author: \t{}
count-line version: \t{}
'''
return info.format(__version__, __author__)
def filter_mod(self, filter_list):
self.filter_suffix = {f for f in filter_list}
self.find_files(self.current_dir, mod='filter')
for file in self.final_files:
self.analyze_files(file)
def specific_mod(self, suffix_list):
self.select_suffix = {s for s in suffix_list}
self.find_files(self.current_dir, mod='specific')
for file in self.final_files:
self.analyze_files(file)
def normal_mod(self):
self.find_files(self.current_dir, mod='normal')
for file in self.final_files:
self.analyze_files(file)
def find_files(self, path, mod):
root = path
files_or_folders = os.listdir(path)
for item in files_or_folders:
# ignore hidden files
if item[0] != '.':
if os.path.isdir(root + os.sep + item):
sub_path = root + os.sep + item
self.find_files(sub_path, mod=mod)
elif os.path.isfile(root + os.sep + item):
if mod == 'normal':
self.add_file(root + os.sep, item)
elif mod == 'filter':
if self.filter_conditions(root + os.sep + item):
self.add_file(root + os.sep, item)
elif mod == 'specific':
if self.specific_conditions(root + os.sep + item):
self.add_file(root + os.sep, item)
def filter_conditions(self, path):
for f in self.filter_suffix:
if path.endswith(f):
return False
return True
def specific_conditions(self, path):
for s in self.select_suffix:
if path.endswith(s):
return True
return False
def add_file(self, path, name):
self.final_files.add(path + name)
file_name = name.split('.')[-1]
self.final_files_dict[file_name] += 1
# analyze single file: update self.line_count, self.file_count
def analyze_files(self, file):
#if file not in self.filter_suffix:
file_line = self.count_lines(file)
file_name = file.split('.')[-1]
self.files_lines_dict[file_name] += file_line
self.line_count += file_line
self.file_count += 1
# count lines of single file
def count_lines(self, file):
cnt = 0
for file_line in open(file, 'rb'):
cnt += 1
return cnt
def show_results(self):
print(f'file count: {self.file_count}')
print(f'line count: {self.line_count}')
def show_detail_results(self):
info = '''
=====================================================
\t文件后缀名\t文件数\t\t总行数
'''
data = '''
\t.{}\t\t{}\t\t{}
'''
end = '''
\t总文件数: {}\t总行数: {}
=====================================================
'''
print(info)
for (k, v) in self.final_files_dict.items():
print(data.format(k, v, self.files_lines_dict[k]))
print(end.format(self.file_count, self.line_count))
def main():
__usage__ = "count the amount of lines and files under the current directory"
parser = argparse.ArgumentParser(description=__usage__)
group = parser.add_mutually_exclusive_group()
group.add_argument("-s", "--suffix", type=str,
help="count by suffix file name, format: .suffix1.suffix2... e.g: .cpp.py (without space)")
group.add_argument("-f", "--filter", type=str,
help="count without filter name, format: .suffix1.suffix2... e.g: .cpp.py (without space)")
parser.add_argument("-d", "--detail", action="store_true",
help="show detail results")
args = parser.parse_args()
current_dir = os.getcwd()
counter = LineCounter(current_dir)
print('Search in {}'.format(current_dir + os.sep))
if args.filter:
args_list = args.filter.split('.')
args_list.remove('')
counter.filter_mod(args_list)
elif args.suffix:
args_list = args.suffix.split('.')
args_list.remove('')
counter.specific_mod(args_list)
else:
counter.normal_mod()
if args.detail:
counter.show_detail_results()
else:
counter.show_results()
if __name__ == '__main__':
main() |
from IPython.core.display import Markdown, display
import numpy as np
def printmd(string:str):
'''
Markdown printout in Jupyter
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
prints a ``string`` that contains markdown (or more precisely, can contain markdown) in Jupyter cell output
'''
display(Markdown(string))
def print_matrix(matrix:np.ndarray, decimals:int=None, name:str=None, maxSize:int=20):
'''
Matrix Markdown in Jupyter
==========================
prints a ``matrix:numpy.ndarray`` as Matrix in Jupyter cell output \
``decimals`` decimal places for each element. None= use unformatted output. \
``name`` can be specified to print name= before the matrix
``maxSize`` can be specified to limit the columns/rows printed to maxSize elements. More rows/columns will be skipped indicated with ...
List use: Both ``matrix`` and ``name`` can be a python list, preferably wit hthe same numbers of elements, to print multiple matrices at once.
Raises:
~~~~~~~
* Exception if the matrix to print has more than two dimensions!
* TypeError if the matrix is not an numpy.ndarray
'''
def oneMatrix(matr, name):
if isinstance(matr, np.ndarray):
if name!=None:
mdheader = f'$ {name} = \\begin{{bmatrix}}'
else:
mdheader = '$ \\begin{bmatrix}'
mstr = ''
if len(matr.shape)==1:
matr = matr.reshape(matr.shape[0],1)
if len(matr.shape)>2:
raise Exception('cannot print more than two dimensions on a flat screen')
cskipat = matr.shape[1]+1
cskipto = matr.shape[1]+1
rskipat = matr.shape[0]+1
rskipto = matr.shape[0]+1
if matr.shape[1]>maxSize:
cskipat = maxSize-2
cskipto = matr.shape[1]-1
if matr.shape[0]>maxSize:
rskipat = maxSize-2
rskipto = matr.shape[0]-1
rskip=False
for row in range(matr.shape[0]):
if row>=rskipat and row<rskipto:
if not rskip:
# row to skip
if cskipat!=cskipto: # there are columns to skip, too: Use diagonal dots
mstr += "\\vdots & " * (maxSize-2) + ' \\ddots & \\vdots \\\\ '
else:
mstr += "\\vdots & " * (min(maxSize, matr.shape[1])-1) + ' \\vdots \\\\ '
rskip=True
else:
cskip=False
for col in range(matr.shape[1]):
# debug {
#mstr += '[' + str(col) +'<=>' + str(cskipat) + ']'
#debug }
if col>=cskipat and col<cskipto:
# column to skip
if not cskip:
mstr += "\cdots & "
cskip = True
else:
if decimals!=None:
mstr += "{{:.{}f}}".format(decimals).format(matr[row][col])
else:
mstr += str(round(matr[row][col],15))
if col<matr.shape[1]-1:
mstr += ' & '
if row<matr.shape[0]-1:
mstr += ' \\\\ '
mdfooter = f' \end{{bmatrix}}_{{Shape{matr.shape}}} $'
return mdheader+mstr+mdfooter
else:
# return (type(matr) + ' is not supported')
raise TypeError('Wrong type of matrix: only numpy.ndarray is supported.')
if isinstance(matrix, list):
coll = ''
if isinstance(name, list):
if len(name)<len(matrix):
name = name + [None]*(len(matrix)-len(name))
else:
name = [name] + [None]*(len(matrix)-1)
for m,n in zip(matrix,name):
coll += oneMatrix(m,n)
else:
coll = oneMatrix(matrix,name)
printmd(coll)
#print(coll)
def matrixInfo(matrix:np.ndarray, name:str='A', verbose:bool=False, decimals:int=None, maxSize:int=20, surfaceGraph=False):
'''
Matrix quick analysis in Jupyter
================================
Prints some short analysis of the matrix passed, such as determinant, eingenvectors and -values, inverse
``decimals`` decimal places for each element. None= use unformatted output.
``name`` can be specified to print name= before the matrix
``maxSize`` can be specified to limit the columns/rows printed to maxSize elements. More rows/columns will be skipped indicated with ...
``verbose`` True will print more hints to the analyses, e.g. Wikipedia links.
'''
if len(matrix.shape) in [1,2]:
printmd(f'## Overview for the {len(matrix.shape)}-dimensional matrix {name}')
print_matrix(matrix, name=name, decimals=decimals)
else:
printmd(f'## Overview for the {len(matrix.shape)}-dimensional matrix {name}')
eigval, eigvec = np.linalg.eig(matrix)
printmd('### Eigenvalues and corresponding eigenvectors')
if verbose:
printmd('Eigenvectors are the vectors (different from the nullvector) that are only _scaled_ by a transformation matrix operation _but not rotated_. ')
printmd('The eigenvalues are the measure of scaling. Eigenvectors by numpy are normalized in length. ')
printmd('There might not be a solution in real space, so the eigenvectors and eigenvalues can be complex vectors and numbers respectively. ')
printmd('[Wikipedia link.](https://en.wikipedia.org/wiki/Eigenvalues_and_eigenvectors) ')
print_matrix([eigvec[:,x] for x in range(eigvec.shape[0])], name=['v_{{{}}}'.format(x) for x in list(eigval)], decimals=decimals, maxSize=maxSize)
printmd('## Euclidian Norm (2nd)')
if verbose:
# https://en.wikipedia.org/wiki/Matrix_norm
printmd('[Wikipedia link.](https://en.wikipedia.org/wiki/Matrix_norm) ')
printmd(f'$ {{\|{name}\|_2}} =' + str(np.linalg.norm(matrix))+'$')
printmd('### Determinant')
if verbose:
# https://en.wikipedia.org/wiki/Matrix_norm
printmd('[Wikipedia link.](https://en.wikipedia.org/wiki/Determinant) ')
printmd(f'${{det}}_{{{name}}} = $' + str(np.linalg.det(matrix)))
printmd('### Rank')
if verbose: #
printmd('[Wikipedia link](https://en.wikipedia.org/wiki/Rank_(linear_algebra))')
r = np.linalg.matrix_rank(matrix)
printmd(f'$rank({name}) = $' + str(r))
if r==min(matrix.shape):
printmd('Matrix is FULL RANK')
printmd('### Inverse')
try:
i = np.linalg.inv(matrix)
print_matrix(i, name= f'{{{name}}}^{{-1}}', decimals=decimals, maxSize=maxSize)
except Exception as exc:
printmd('_there is no inverse to that matrix, or at least it could not be computed._')
print(exc)
if surfaceGraph and len(matrix.shape)==2:
import plotly.graph_objects as go
fig = go.Figure(go.Surface(
contours = {
"z": {"show": True, "start": np.mean(matrix.flatten())-np.std(matrix.flatten()), "end": np.mean(matrix.flatten())+np.std(matrix.flatten())*1.01, "size": np.std(matrix.flatten())}
},
x = list(range(matrix.shape[0])),
y = list(range(matrix.shape[1])),
z = matrix))
fig.layout.title.text = "Surface approximation (with +/- one std deviation markers"
fig.update_layout(xaxis_title = 'column',
yaxis_title='row')
fig.show()
|
<filename>RNN/DeepRNN_KERAS.py
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
@author:LXM
@file:DeepRNN_KERAS.py
@time:2020/10/13
"""
import numpy as np
import tensorflow.compat.v1 as tf
import matplotlib.pyplot as plt
from tensorflow.keras import layers
from tensorflow import keras
# 定义RNN参数
HIDDEN_SIZE = 30 # LSTM中隐藏节点的个数。
NUM_LAYERS = 2 # Deep_LSTM的层数。
TIMESTEPS = 10 # 循环神经网络的训练序列长度。
TRAINING_STEPS = 10000 # 训练轮数。
BATCH_SIZE = 32 # batch大小。
TRAINING_EXAMPLES = 10000 # 训练数据个数。
TESTING_EXAMPLES = 1000 # 测试数据个数。
SAMPLE_GAP = 0.01 # 采样间隔。
# 正弦函数采样
def generate_data(seq):
X = []
y = []
# 序列的第i项和后面的TIMESTEPS-1项合在一起作为输入;第i + TIMESTEPS项作为输
# 出。即用sin函数前面的TIMESTEPS个点的信息,预测第i + TIMESTEPS个点的函数值。
for i in range(len(seq) - TIMESTEPS):
X.append([seq[i: i + TIMESTEPS]])
y.append([seq[i + TIMESTEPS]])
return np.array(X, dtype=np.float32), np.array(y, dtype=np.float32)
if __name__ == '__main__':
# tf.disable_eager_execution()
# 用正弦函数生成训练和测试数据集合。
test_start = (TRAINING_EXAMPLES + TIMESTEPS) * SAMPLE_GAP
test_end = test_start + (TESTING_EXAMPLES + TIMESTEPS) * SAMPLE_GAP
train_X, train_y = generate_data(np.sin(np.linspace(
0, test_start, TRAINING_EXAMPLES + TIMESTEPS, dtype=np.float32)))
test_X, test_y = generate_data(np.sin(np.linspace(
test_start, test_end, TESTING_EXAMPLES + TIMESTEPS, dtype=np.float32)))
inputs = keras.Input(shape=(1, TIMESTEPS))
cell = tf.nn.rnn_cell.MultiRNNCell([
tf.nn.rnn_cell.BasicLSTMCell(HIDDEN_SIZE)
for _ in range(NUM_LAYERS)])
output = layers.RNN(cell)(inputs)
predictions = layers.Dense(1, activation=None)(output)
model = keras.Model(inputs=inputs, outputs=predictions, name="DeepRNN_model")
model.summary()
model.compile(
loss=keras.losses.MeanSquaredError(),
optimizer=keras.optimizers.Adagrad(learning_rate=0.01),
metrics=["mape"]
)
model.fit(train_X, train_y, batch_size=BATCH_SIZE, epochs=5)
model.evaluate(train_X, train_y,batch_size=128)
# 将预测结果存入一个数组。
# print(model.predict(train_X[10:20]))
# print(train_y[10:20])
# predictions = []
# labels = []
# for i in range(TESTING_EXAMPLES):
# predictions.append(model.predict(train_X[i]))
# labels.append(train_y[i])
# # 计算rmse作为评价指标。
# predictions = np.array(predictions).squeeze()
# labels = np.array(labels).squeeze()
# 对预测的sin函数曲线进行绘图。
plt.figure()
plt.plot(model.predict(test_X), label='predictions')
plt.plot(test_y, label='real_sin')
plt.legend()
plt.show()
|
""" Cisco_IOS_XR_subscriber_srg_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR subscriber\-srg package configuration.
This module contains definitions
for the following management objects\:
subscriber\-redundancy\: Subscriber Redundancy configuration
Copyright (c) 2013\-2018 by Cisco Systems, Inc.
All rights reserved.
"""
import sys
from collections import OrderedDict
from ydk.types import Entity as _Entity_
from ydk.types import EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class SrgAddrFamily(Enum):
"""
SrgAddrFamily (Enum Class)
Srg addr family
.. data:: ipv4 = 2
IPv4
.. data:: ipv6 = 10
IPv6
"""
ipv4 = Enum.YLeaf(2, "ipv4")
ipv6 = Enum.YLeaf(10, "ipv6")
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_srg_cfg as meta
return meta._meta_table['SrgAddrFamily']
class SubscriberRedundancyGroupRole(Enum):
"""
SubscriberRedundancyGroupRole (Enum Class)
Subscriber redundancy group role
.. data:: master = 1
Master Role
.. data:: slave = 2
Slave Role
"""
master = Enum.YLeaf(1, "master")
slave = Enum.YLeaf(2, "slave")
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_srg_cfg as meta
return meta._meta_table['SubscriberRedundancyGroupRole']
class SubscriberRedundancyGroupSlaveMode(Enum):
"""
SubscriberRedundancyGroupSlaveMode (Enum Class)
Subscriber redundancy group slave mode
.. data:: warm = 1
Warm Mode
.. data:: hot = 2
Hot Mode
"""
warm = Enum.YLeaf(1, "warm")
hot = Enum.YLeaf(2, "hot")
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_srg_cfg as meta
return meta._meta_table['SubscriberRedundancyGroupSlaveMode']
class SubscriberRedundancy(_Entity_):
"""
Subscriber Redundancy configuration
.. attribute:: groups
Table of Group
**type**\: :py:class:`Groups <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg.SubscriberRedundancy.Groups>`
.. attribute:: revertive_timer
None
**type**\: :py:class:`RevertiveTimer <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg.SubscriberRedundancy.RevertiveTimer>`
.. attribute:: cpe_tracking
Enable
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: enable
Enable Subscriber Redundancy configuration. Deletion of this object also causes deletion of all associated objects under SubscriberRedundancy
**type**\: :py:class:`Empty<ydk.types.Empty>`
**mandatory**\: True
.. attribute:: virtual_mac_prefix
Virtual MAC Prefix for Subscriber Redundancy
**type**\: str
**pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5}
.. attribute:: preferred_role
Set preferred role
**type**\: :py:class:`SubscriberRedundancyGroupRole <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg.SubscriberRedundancyGroupRole>`
.. attribute:: source_interface
Source Interface for Redundancy Peer Communication
**type**\: str
**pattern:** [a\-zA\-Z0\-9.\_/\-]+
.. attribute:: slave_mode
Set slave
**type**\: :py:class:`SubscriberRedundancyGroupSlaveMode <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg.SubscriberRedundancyGroupSlaveMode>`
.. attribute:: hold_timer
Set hold time (in Minutes)
**type**\: int
**range:** 1..65535
**units**\: minute
.. attribute:: sync_timer
Set sync time (in Minutes)
**type**\: int
**range:** 1..255
**units**\: minute
.. attribute:: redundancy_disable
Disable
**type**\: :py:class:`Empty<ydk.types.Empty>`
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'subscriber-srg-cfg'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(SubscriberRedundancy, self).__init__()
self._top_entity = None
self.yang_name = "subscriber-redundancy"
self.yang_parent_name = "Cisco-IOS-XR-subscriber-srg-cfg"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("groups", ("groups", SubscriberRedundancy.Groups)), ("revertive-timer", ("revertive_timer", SubscriberRedundancy.RevertiveTimer))])
self.is_presence_container = True
self._leafs = OrderedDict([
('cpe_tracking', (YLeaf(YType.empty, 'cpe-tracking'), ['Empty'])),
('enable', (YLeaf(YType.empty, 'enable'), ['Empty'])),
('virtual_mac_prefix', (YLeaf(YType.str, 'virtual-mac-prefix'), ['str'])),
('preferred_role', (YLeaf(YType.enumeration, 'preferred-role'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg', 'SubscriberRedundancyGroupRole', '')])),
('source_interface', (YLeaf(YType.str, 'source-interface'), ['str'])),
('slave_mode', (YLeaf(YType.enumeration, 'slave-mode'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg', 'SubscriberRedundancyGroupSlaveMode', '')])),
('hold_timer', (YLeaf(YType.uint32, 'hold-timer'), ['int'])),
('sync_timer', (YLeaf(YType.uint32, 'sync-timer'), ['int'])),
('redundancy_disable', (YLeaf(YType.empty, 'redundancy-disable'), ['Empty'])),
])
self.cpe_tracking = None
self.enable = None
self.virtual_mac_prefix = None
self.preferred_role = None
self.source_interface = None
self.slave_mode = None
self.hold_timer = None
self.sync_timer = None
self.redundancy_disable = None
self.groups = SubscriberRedundancy.Groups()
self.groups.parent = self
self._children_name_map["groups"] = "groups"
self.revertive_timer = SubscriberRedundancy.RevertiveTimer()
self.revertive_timer.parent = self
self._children_name_map["revertive_timer"] = "revertive-timer"
self._segment_path = lambda: "Cisco-IOS-XR-subscriber-srg-cfg:subscriber-redundancy"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(SubscriberRedundancy, ['cpe_tracking', 'enable', 'virtual_mac_prefix', 'preferred_role', 'source_interface', 'slave_mode', 'hold_timer', 'sync_timer', 'redundancy_disable'], name, value)
class Groups(_Entity_):
"""
Table of Group
.. attribute:: group
Redundancy Group configuration
**type**\: list of :py:class:`Group <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg.SubscriberRedundancy.Groups.Group>`
"""
_prefix = 'subscriber-srg-cfg'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(SubscriberRedundancy.Groups, self).__init__()
self.yang_name = "groups"
self.yang_parent_name = "subscriber-redundancy"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("group", ("group", SubscriberRedundancy.Groups.Group))])
self._leafs = OrderedDict()
self.group = YList(self)
self._segment_path = lambda: "groups"
self._absolute_path = lambda: "Cisco-IOS-XR-subscriber-srg-cfg:subscriber-redundancy/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(SubscriberRedundancy.Groups, [], name, value)
class Group(_Entity_):
"""
Redundancy Group configuration
.. attribute:: group_id (key)
Group ID
**type**\: int
**range:** 1..4000
.. attribute:: interface_list
List of Interfaces for this Group
**type**\: :py:class:`InterfaceList <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg.SubscriberRedundancy.Groups.Group.InterfaceList>`
**presence node**\: True
.. attribute:: peer
None
**type**\: :py:class:`Peer <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg.SubscriberRedundancy.Groups.Group.Peer>`
.. attribute:: revertive_timer
None
**type**\: :py:class:`RevertiveTimer <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg.SubscriberRedundancy.Groups.Group.RevertiveTimer>`
.. attribute:: virtual_mac
Virtual MAC Address for this Group
**type**\: :py:class:`VirtualMac <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg.SubscriberRedundancy.Groups.Group.VirtualMac>`
.. attribute:: state_control_route
None
**type**\: :py:class:`StateControlRoute <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg.SubscriberRedundancy.Groups.Group.StateControlRoute>`
.. attribute:: disable_tracking_object
Disable Tracking Object for this Group
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: core_tracking_object
Core Tracking Object for this Group
**type**\: str
.. attribute:: enable
Enable Redundancy Group configuration. Deletion of this object also causes deletion of all associated objects under Group
**type**\: :py:class:`Empty<ydk.types.Empty>`
**mandatory**\: True
.. attribute:: preferred_role
Set preferred role
**type**\: :py:class:`SubscriberRedundancyGroupRole <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg.SubscriberRedundancyGroupRole>`
.. attribute:: description
Description for this Group
**type**\: str
.. attribute:: l2tp_source_ip_address
Enter an IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: slave_mode
Set Slave Mode
**type**\: :py:class:`SubscriberRedundancyGroupSlaveMode <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg.SubscriberRedundancyGroupSlaveMode>`
.. attribute:: hold_timer
Set hold time (in Minutes)
**type**\: int
**range:** 1..65535
**units**\: minute
.. attribute:: access_tracking_object
Access Tracking Object for this Group
**type**\: str
.. attribute:: enable_fast_switchover
Enable fast switchover for this Group
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: redundancy_disable
Disable
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'subscriber-srg-cfg'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(SubscriberRedundancy.Groups.Group, self).__init__()
self.yang_name = "group"
self.yang_parent_name = "groups"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['group_id']
self._child_classes = OrderedDict([("interface-list", ("interface_list", SubscriberRedundancy.Groups.Group.InterfaceList)), ("peer", ("peer", SubscriberRedundancy.Groups.Group.Peer)), ("revertive-timer", ("revertive_timer", SubscriberRedundancy.Groups.Group.RevertiveTimer)), ("virtual-mac", ("virtual_mac", SubscriberRedundancy.Groups.Group.VirtualMac)), ("state-control-route", ("state_control_route", SubscriberRedundancy.Groups.Group.StateControlRoute))])
self._leafs = OrderedDict([
('group_id', (YLeaf(YType.uint32, 'group-id'), ['int'])),
('disable_tracking_object', (YLeaf(YType.empty, 'disable-tracking-object'), ['Empty'])),
('core_tracking_object', (YLeaf(YType.str, 'core-tracking-object'), ['str'])),
('enable', (YLeaf(YType.empty, 'enable'), ['Empty'])),
('preferred_role', (YLeaf(YType.enumeration, 'preferred-role'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg', 'SubscriberRedundancyGroupRole', '')])),
('description', (YLeaf(YType.str, 'description'), ['str'])),
('l2tp_source_ip_address', (YLeaf(YType.str, 'l2tp-source-ip-address'), ['str'])),
('slave_mode', (YLeaf(YType.enumeration, 'slave-mode'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg', 'SubscriberRedundancyGroupSlaveMode', '')])),
('hold_timer', (YLeaf(YType.uint32, 'hold-timer'), ['int'])),
('access_tracking_object', (YLeaf(YType.str, 'access-tracking-object'), ['str'])),
('enable_fast_switchover', (YLeaf(YType.empty, 'enable-fast-switchover'), ['Empty'])),
('redundancy_disable', (YLeaf(YType.empty, 'redundancy-disable'), ['Empty'])),
])
self.group_id = None
self.disable_tracking_object = None
self.core_tracking_object = None
self.enable = None
self.preferred_role = None
self.description = None
self.l2tp_source_ip_address = None
self.slave_mode = None
self.hold_timer = None
self.access_tracking_object = None
self.enable_fast_switchover = None
self.redundancy_disable = None
self.interface_list = None
self._children_name_map["interface_list"] = "interface-list"
self.peer = SubscriberRedundancy.Groups.Group.Peer()
self.peer.parent = self
self._children_name_map["peer"] = "peer"
self.revertive_timer = SubscriberRedundancy.Groups.Group.RevertiveTimer()
self.revertive_timer.parent = self
self._children_name_map["revertive_timer"] = "revertive-timer"
self.virtual_mac = SubscriberRedundancy.Groups.Group.VirtualMac()
self.virtual_mac.parent = self
self._children_name_map["virtual_mac"] = "virtual-mac"
self.state_control_route = SubscriberRedundancy.Groups.Group.StateControlRoute()
self.state_control_route.parent = self
self._children_name_map["state_control_route"] = "state-control-route"
self._segment_path = lambda: "group" + "[group-id='" + str(self.group_id) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-subscriber-srg-cfg:subscriber-redundancy/groups/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(SubscriberRedundancy.Groups.Group, ['group_id', 'disable_tracking_object', 'core_tracking_object', 'enable', 'preferred_role', 'description', 'l2tp_source_ip_address', 'slave_mode', 'hold_timer', 'access_tracking_object', 'enable_fast_switchover', 'redundancy_disable'], name, value)
class InterfaceList(_Entity_):
"""
List of Interfaces for this Group
.. attribute:: interfaces
Table of Interface
**type**\: :py:class:`Interfaces <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg.SubscriberRedundancy.Groups.Group.InterfaceList.Interfaces>`
.. attribute:: interface_ranges
Table of InterfaceRange
**type**\: :py:class:`InterfaceRanges <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg.SubscriberRedundancy.Groups.Group.InterfaceList.InterfaceRanges>`
.. attribute:: enable
Enable List of Interfaces for this Group. Deletion of this object also causes deletion of all associated objects under InterfaceList
**type**\: :py:class:`Empty<ydk.types.Empty>`
**mandatory**\: True
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'subscriber-srg-cfg'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(SubscriberRedundancy.Groups.Group.InterfaceList, self).__init__()
self.yang_name = "interface-list"
self.yang_parent_name = "group"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("interfaces", ("interfaces", SubscriberRedundancy.Groups.Group.InterfaceList.Interfaces)), ("interface-ranges", ("interface_ranges", SubscriberRedundancy.Groups.Group.InterfaceList.InterfaceRanges))])
self.is_presence_container = True
self._leafs = OrderedDict([
('enable', (YLeaf(YType.empty, 'enable'), ['Empty'])),
])
self.enable = None
self.interfaces = SubscriberRedundancy.Groups.Group.InterfaceList.Interfaces()
self.interfaces.parent = self
self._children_name_map["interfaces"] = "interfaces"
self.interface_ranges = SubscriberRedundancy.Groups.Group.InterfaceList.InterfaceRanges()
self.interface_ranges.parent = self
self._children_name_map["interface_ranges"] = "interface-ranges"
self._segment_path = lambda: "interface-list"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(SubscriberRedundancy.Groups.Group.InterfaceList, ['enable'], name, value)
class Interfaces(_Entity_):
"""
Table of Interface
.. attribute:: interface
Interface for this Group
**type**\: list of :py:class:`Interface <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg.SubscriberRedundancy.Groups.Group.InterfaceList.Interfaces.Interface>`
"""
_prefix = 'subscriber-srg-cfg'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(SubscriberRedundancy.Groups.Group.InterfaceList.Interfaces, self).__init__()
self.yang_name = "interfaces"
self.yang_parent_name = "interface-list"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("interface", ("interface", SubscriberRedundancy.Groups.Group.InterfaceList.Interfaces.Interface))])
self._leafs = OrderedDict()
self.interface = YList(self)
self._segment_path = lambda: "interfaces"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(SubscriberRedundancy.Groups.Group.InterfaceList.Interfaces, [], name, value)
class Interface(_Entity_):
"""
Interface for this Group
.. attribute:: interface_name (key)
Interface name
**type**\: str
**pattern:** [a\-zA\-Z0\-9.\_/\-]+
.. attribute:: interface_id
Interface Id for the interface
**type**\: int
**range:** 1..65535
**mandatory**\: True
"""
_prefix = 'subscriber-srg-cfg'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(SubscriberRedundancy.Groups.Group.InterfaceList.Interfaces.Interface, self).__init__()
self.yang_name = "interface"
self.yang_parent_name = "interfaces"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['interface_name']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('interface_name', (YLeaf(YType.str, 'interface-name'), ['str'])),
('interface_id', (YLeaf(YType.uint32, 'interface-id'), ['int'])),
])
self.interface_name = None
self.interface_id = None
self._segment_path = lambda: "interface" + "[interface-name='" + str(self.interface_name) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(SubscriberRedundancy.Groups.Group.InterfaceList.Interfaces.Interface, ['interface_name', 'interface_id'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_srg_cfg as meta
return meta._meta_table['SubscriberRedundancy.Groups.Group.InterfaceList.Interfaces.Interface']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_srg_cfg as meta
return meta._meta_table['SubscriberRedundancy.Groups.Group.InterfaceList.Interfaces']['meta_info']
class InterfaceRanges(_Entity_):
"""
Table of InterfaceRange
.. attribute:: interface_range
Interface for this Group
**type**\: list of :py:class:`InterfaceRange <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg.SubscriberRedundancy.Groups.Group.InterfaceList.InterfaceRanges.InterfaceRange>`
"""
_prefix = 'subscriber-srg-cfg'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(SubscriberRedundancy.Groups.Group.InterfaceList.InterfaceRanges, self).__init__()
self.yang_name = "interface-ranges"
self.yang_parent_name = "interface-list"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("interface-range", ("interface_range", SubscriberRedundancy.Groups.Group.InterfaceList.InterfaceRanges.InterfaceRange))])
self._leafs = OrderedDict()
self.interface_range = YList(self)
self._segment_path = lambda: "interface-ranges"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(SubscriberRedundancy.Groups.Group.InterfaceList.InterfaceRanges, [], name, value)
class InterfaceRange(_Entity_):
"""
Interface for this Group
.. attribute:: interface_name (key)
Interface name
**type**\: str
**pattern:** [a\-zA\-Z0\-9.\_/\-]+
.. attribute:: sub_interface_range_start (key)
Sub Interface Start Range
**type**\: int
**range:** 0..2147483647
.. attribute:: sub_interface_range_end (key)
Sub Interface End Range
**type**\: int
**range:** 0..2147483647
.. attribute:: interface_id_range_start
Interface ID Start Range
**type**\: int
**range:** 1..65535
.. attribute:: interface_id_range_end
Interface ID End Range
**type**\: int
**range:** 1..65535
"""
_prefix = 'subscriber-srg-cfg'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(SubscriberRedundancy.Groups.Group.InterfaceList.InterfaceRanges.InterfaceRange, self).__init__()
self.yang_name = "interface-range"
self.yang_parent_name = "interface-ranges"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['interface_name','sub_interface_range_start','sub_interface_range_end']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('interface_name', (YLeaf(YType.str, 'interface-name'), ['str'])),
('sub_interface_range_start', (YLeaf(YType.uint32, 'sub-interface-range-start'), ['int'])),
('sub_interface_range_end', (YLeaf(YType.uint32, 'sub-interface-range-end'), ['int'])),
('interface_id_range_start', (YLeaf(YType.uint32, 'interface-id-range-start'), ['int'])),
('interface_id_range_end', (YLeaf(YType.uint32, 'interface-id-range-end'), ['int'])),
])
self.interface_name = None
self.sub_interface_range_start = None
self.sub_interface_range_end = None
self.interface_id_range_start = None
self.interface_id_range_end = None
self._segment_path = lambda: "interface-range" + "[interface-name='" + str(self.interface_name) + "']" + "[sub-interface-range-start='" + str(self.sub_interface_range_start) + "']" + "[sub-interface-range-end='" + str(self.sub_interface_range_end) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(SubscriberRedundancy.Groups.Group.InterfaceList.InterfaceRanges.InterfaceRange, ['interface_name', 'sub_interface_range_start', 'sub_interface_range_end', 'interface_id_range_start', 'interface_id_range_end'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_srg_cfg as meta
return meta._meta_table['SubscriberRedundancy.Groups.Group.InterfaceList.InterfaceRanges.InterfaceRange']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_srg_cfg as meta
return meta._meta_table['SubscriberRedundancy.Groups.Group.InterfaceList.InterfaceRanges']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_srg_cfg as meta
return meta._meta_table['SubscriberRedundancy.Groups.Group.InterfaceList']['meta_info']
class Peer(_Entity_):
"""
None
.. attribute:: ipaddress
IPv4 or IPv6 Address of SRG Peer
**type**\: :py:class:`Ipaddress <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg.SubscriberRedundancy.Groups.Group.Peer.Ipaddress>`
.. attribute:: route_add_disable
Set Route add disable
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'subscriber-srg-cfg'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(SubscriberRedundancy.Groups.Group.Peer, self).__init__()
self.yang_name = "peer"
self.yang_parent_name = "group"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("ipaddress", ("ipaddress", SubscriberRedundancy.Groups.Group.Peer.Ipaddress))])
self._leafs = OrderedDict([
('route_add_disable', (YLeaf(YType.empty, 'route-add-disable'), ['Empty'])),
])
self.route_add_disable = None
self.ipaddress = SubscriberRedundancy.Groups.Group.Peer.Ipaddress()
self.ipaddress.parent = self
self._children_name_map["ipaddress"] = "ipaddress"
self._segment_path = lambda: "peer"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(SubscriberRedundancy.Groups.Group.Peer, ['route_add_disable'], name, value)
class Ipaddress(_Entity_):
"""
IPv4 or IPv6 Address of SRG Peer
.. attribute:: address_family
Type of IPv4/IPv6 address
**type**\: :py:class:`SrgAddrFamily <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg.SrgAddrFamily>`
.. attribute:: prefix_string
IPv4/IPv6 address
**type**\: union of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'subscriber-srg-cfg'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(SubscriberRedundancy.Groups.Group.Peer.Ipaddress, self).__init__()
self.yang_name = "ipaddress"
self.yang_parent_name = "peer"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('address_family', (YLeaf(YType.enumeration, 'address-family'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg', 'SrgAddrFamily', '')])),
('prefix_string', (YLeaf(YType.str, 'prefix-string'), ['str','str'])),
])
self.address_family = None
self.prefix_string = None
self._segment_path = lambda: "ipaddress"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(SubscriberRedundancy.Groups.Group.Peer.Ipaddress, ['address_family', 'prefix_string'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_srg_cfg as meta
return meta._meta_table['SubscriberRedundancy.Groups.Group.Peer.Ipaddress']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_srg_cfg as meta
return meta._meta_table['SubscriberRedundancy.Groups.Group.Peer']['meta_info']
class RevertiveTimer(_Entity_):
"""
None
.. attribute:: max_value
Value of MAX Revertive Timer
**type**\: int
**range:** 1..65535
.. attribute:: value
Value of revertive time in minutes
**type**\: int
**range:** 1..65535
**units**\: minute
"""
_prefix = 'subscriber-srg-cfg'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(SubscriberRedundancy.Groups.Group.RevertiveTimer, self).__init__()
self.yang_name = "revertive-timer"
self.yang_parent_name = "group"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('max_value', (YLeaf(YType.uint32, 'max-value'), ['int'])),
('value', (YLeaf(YType.uint32, 'value'), ['int'])),
])
self.max_value = None
self.value = None
self._segment_path = lambda: "revertive-timer"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(SubscriberRedundancy.Groups.Group.RevertiveTimer, ['max_value', 'value'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_srg_cfg as meta
return meta._meta_table['SubscriberRedundancy.Groups.Group.RevertiveTimer']['meta_info']
class VirtualMac(_Entity_):
"""
Virtual MAC Address for this Group
.. attribute:: address
Virtual MAC Address for this Group
**type**\: str
**pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5}
.. attribute:: disable
Disable Virtual MAC Address for this Group
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'subscriber-srg-cfg'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(SubscriberRedundancy.Groups.Group.VirtualMac, self).__init__()
self.yang_name = "virtual-mac"
self.yang_parent_name = "group"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('address', (YLeaf(YType.str, 'address'), ['str'])),
('disable', (YLeaf(YType.empty, 'disable'), ['Empty'])),
])
self.address = None
self.disable = None
self._segment_path = lambda: "virtual-mac"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(SubscriberRedundancy.Groups.Group.VirtualMac, ['address', 'disable'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_srg_cfg as meta
return meta._meta_table['SubscriberRedundancy.Groups.Group.VirtualMac']['meta_info']
class StateControlRoute(_Entity_):
"""
None
.. attribute:: ipv4_routes
Table of IPv4Route
**type**\: :py:class:`Ipv4Routes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg.SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv4Routes>`
.. attribute:: ipv6_route
None
**type**\: :py:class:`Ipv6Route <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg.SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route>`
"""
_prefix = 'subscriber-srg-cfg'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(SubscriberRedundancy.Groups.Group.StateControlRoute, self).__init__()
self.yang_name = "state-control-route"
self.yang_parent_name = "group"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("ipv4-routes", ("ipv4_routes", SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv4Routes)), ("ipv6-route", ("ipv6_route", SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route))])
self._leafs = OrderedDict()
self.ipv4_routes = SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv4Routes()
self.ipv4_routes.parent = self
self._children_name_map["ipv4_routes"] = "ipv4-routes"
self.ipv6_route = SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route()
self.ipv6_route.parent = self
self._children_name_map["ipv6_route"] = "ipv6-route"
self._segment_path = lambda: "state-control-route"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(SubscriberRedundancy.Groups.Group.StateControlRoute, [], name, value)
class Ipv4Routes(_Entity_):
"""
Table of IPv4Route
.. attribute:: ipv4_route
None
**type**\: list of :py:class:`Ipv4Route <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg.SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv4Routes.Ipv4Route>`
"""
_prefix = 'subscriber-srg-cfg'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv4Routes, self).__init__()
self.yang_name = "ipv4-routes"
self.yang_parent_name = "state-control-route"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("ipv4-route", ("ipv4_route", SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv4Routes.Ipv4Route))])
self._leafs = OrderedDict()
self.ipv4_route = YList(self)
self._segment_path = lambda: "ipv4-routes"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv4Routes, [], name, value)
class Ipv4Route(_Entity_):
"""
None
.. attribute:: vrfname (key)
VRF name
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: prefix_length (key)
Prefix of the IP Address
**type**\: int
**range:** 0..4294967295
.. attribute:: prefix_string (key)
IPv4 address with prefix\-length
**type**\: union of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
.. attribute:: tagvalue
Tag value
**type**\: int
**range:** 1..4294967295
**mandatory**\: True
"""
_prefix = 'subscriber-srg-cfg'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv4Routes.Ipv4Route, self).__init__()
self.yang_name = "ipv4-route"
self.yang_parent_name = "ipv4-routes"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['vrfname','prefix_length','prefix_string']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('vrfname', (YLeaf(YType.str, 'vrfname'), ['str'])),
('prefix_length', (YLeaf(YType.uint32, 'prefix-length'), ['int'])),
('prefix_string', (YLeaf(YType.str, 'prefix-string'), ['str','str'])),
('tagvalue', (YLeaf(YType.uint32, 'tagvalue'), ['int'])),
])
self.vrfname = None
self.prefix_length = None
self.prefix_string = None
self.tagvalue = None
self._segment_path = lambda: "ipv4-route" + "[vrfname='" + str(self.vrfname) + "']" + "[prefix-length='" + str(self.prefix_length) + "']" + "[prefix-string='" + str(self.prefix_string) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv4Routes.Ipv4Route, ['vrfname', 'prefix_length', 'prefix_string', 'tagvalue'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_srg_cfg as meta
return meta._meta_table['SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv4Routes.Ipv4Route']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_srg_cfg as meta
return meta._meta_table['SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv4Routes']['meta_info']
class Ipv6Route(_Entity_):
"""
None
.. attribute:: ipv6na_routes
Table of IPv6NARoute
**type**\: :py:class:`Ipv6naRoutes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg.SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route.Ipv6naRoutes>`
.. attribute:: ipv6pd_routes
Table of IPv6PDRoute
**type**\: :py:class:`Ipv6pdRoutes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg.SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route.Ipv6pdRoutes>`
"""
_prefix = 'subscriber-srg-cfg'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route, self).__init__()
self.yang_name = "ipv6-route"
self.yang_parent_name = "state-control-route"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("ipv6na-routes", ("ipv6na_routes", SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route.Ipv6naRoutes)), ("ipv6pd-routes", ("ipv6pd_routes", SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route.Ipv6pdRoutes))])
self._leafs = OrderedDict()
self.ipv6na_routes = SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route.Ipv6naRoutes()
self.ipv6na_routes.parent = self
self._children_name_map["ipv6na_routes"] = "ipv6na-routes"
self.ipv6pd_routes = SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route.Ipv6pdRoutes()
self.ipv6pd_routes.parent = self
self._children_name_map["ipv6pd_routes"] = "ipv6pd-routes"
self._segment_path = lambda: "ipv6-route"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route, [], name, value)
class Ipv6naRoutes(_Entity_):
"""
Table of IPv6NARoute
.. attribute:: ipv6na_route
None
**type**\: list of :py:class:`Ipv6naRoute <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg.SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route.Ipv6naRoutes.Ipv6naRoute>`
"""
_prefix = 'subscriber-srg-cfg'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route.Ipv6naRoutes, self).__init__()
self.yang_name = "ipv6na-routes"
self.yang_parent_name = "ipv6-route"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("ipv6na-route", ("ipv6na_route", SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route.Ipv6naRoutes.Ipv6naRoute))])
self._leafs = OrderedDict()
self.ipv6na_route = YList(self)
self._segment_path = lambda: "ipv6na-routes"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route.Ipv6naRoutes, [], name, value)
class Ipv6naRoute(_Entity_):
"""
None
.. attribute:: vrfname (key)
VRF name
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: prefix_length (key)
Prefix of the IP Address
**type**\: int
**range:** 0..4294967295
.. attribute:: prefix_string (key)
IPv6 address with prefix\-length
**type**\: union of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
.. attribute:: tagvalue
Tag value
**type**\: int
**range:** 1..4294967295
**mandatory**\: True
"""
_prefix = 'subscriber-srg-cfg'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route.Ipv6naRoutes.Ipv6naRoute, self).__init__()
self.yang_name = "ipv6na-route"
self.yang_parent_name = "ipv6na-routes"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['vrfname','prefix_length','prefix_string']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('vrfname', (YLeaf(YType.str, 'vrfname'), ['str'])),
('prefix_length', (YLeaf(YType.uint32, 'prefix-length'), ['int'])),
('prefix_string', (YLeaf(YType.str, 'prefix-string'), ['str','str'])),
('tagvalue', (YLeaf(YType.uint32, 'tagvalue'), ['int'])),
])
self.vrfname = None
self.prefix_length = None
self.prefix_string = None
self.tagvalue = None
self._segment_path = lambda: "ipv6na-route" + "[vrfname='" + str(self.vrfname) + "']" + "[prefix-length='" + str(self.prefix_length) + "']" + "[prefix-string='" + str(self.prefix_string) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route.Ipv6naRoutes.Ipv6naRoute, ['vrfname', 'prefix_length', 'prefix_string', 'tagvalue'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_srg_cfg as meta
return meta._meta_table['SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route.Ipv6naRoutes.Ipv6naRoute']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_srg_cfg as meta
return meta._meta_table['SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route.Ipv6naRoutes']['meta_info']
class Ipv6pdRoutes(_Entity_):
"""
Table of IPv6PDRoute
.. attribute:: ipv6pd_route
None
**type**\: list of :py:class:`Ipv6pdRoute <ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_srg_cfg.SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route.Ipv6pdRoutes.Ipv6pdRoute>`
"""
_prefix = 'subscriber-srg-cfg'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route.Ipv6pdRoutes, self).__init__()
self.yang_name = "ipv6pd-routes"
self.yang_parent_name = "ipv6-route"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("ipv6pd-route", ("ipv6pd_route", SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route.Ipv6pdRoutes.Ipv6pdRoute))])
self._leafs = OrderedDict()
self.ipv6pd_route = YList(self)
self._segment_path = lambda: "ipv6pd-routes"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route.Ipv6pdRoutes, [], name, value)
class Ipv6pdRoute(_Entity_):
"""
None
.. attribute:: vrfname (key)
VRF name
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: prefix_length (key)
Prefix of the IP Address
**type**\: int
**range:** 0..4294967295
.. attribute:: prefix_string (key)
IPv6 address with prefix\-length
**type**\: union of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
.. attribute:: tagvalue
Tag value
**type**\: int
**range:** 1..4294967295
**mandatory**\: True
"""
_prefix = 'subscriber-srg-cfg'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route.Ipv6pdRoutes.Ipv6pdRoute, self).__init__()
self.yang_name = "ipv6pd-route"
self.yang_parent_name = "ipv6pd-routes"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['vrfname','prefix_length','prefix_string']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('vrfname', (YLeaf(YType.str, 'vrfname'), ['str'])),
('prefix_length', (YLeaf(YType.uint32, 'prefix-length'), ['int'])),
('prefix_string', (YLeaf(YType.str, 'prefix-string'), ['str','str'])),
('tagvalue', (YLeaf(YType.uint32, 'tagvalue'), ['int'])),
])
self.vrfname = None
self.prefix_length = None
self.prefix_string = None
self.tagvalue = None
self._segment_path = lambda: "ipv6pd-route" + "[vrfname='" + str(self.vrfname) + "']" + "[prefix-length='" + str(self.prefix_length) + "']" + "[prefix-string='" + str(self.prefix_string) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route.Ipv6pdRoutes.Ipv6pdRoute, ['vrfname', 'prefix_length', 'prefix_string', 'tagvalue'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_srg_cfg as meta
return meta._meta_table['SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route.Ipv6pdRoutes.Ipv6pdRoute']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_srg_cfg as meta
return meta._meta_table['SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route.Ipv6pdRoutes']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_srg_cfg as meta
return meta._meta_table['SubscriberRedundancy.Groups.Group.StateControlRoute.Ipv6Route']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_srg_cfg as meta
return meta._meta_table['SubscriberRedundancy.Groups.Group.StateControlRoute']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_srg_cfg as meta
return meta._meta_table['SubscriberRedundancy.Groups.Group']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_srg_cfg as meta
return meta._meta_table['SubscriberRedundancy.Groups']['meta_info']
class RevertiveTimer(_Entity_):
"""
None
.. attribute:: max_value
Value of MAX Revertive Timer
**type**\: int
**range:** 1..65535
.. attribute:: value
Value of revertive time in minutes
**type**\: int
**range:** 1..65535
**units**\: minute
"""
_prefix = 'subscriber-srg-cfg'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(SubscriberRedundancy.RevertiveTimer, self).__init__()
self.yang_name = "revertive-timer"
self.yang_parent_name = "subscriber-redundancy"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('max_value', (YLeaf(YType.uint32, 'max-value'), ['int'])),
('value', (YLeaf(YType.uint32, 'value'), ['int'])),
])
self.max_value = None
self.value = None
self._segment_path = lambda: "revertive-timer"
self._absolute_path = lambda: "Cisco-IOS-XR-subscriber-srg-cfg:subscriber-redundancy/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(SubscriberRedundancy.RevertiveTimer, ['max_value', 'value'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_srg_cfg as meta
return meta._meta_table['SubscriberRedundancy.RevertiveTimer']['meta_info']
def clone_ptr(self):
self._top_entity = SubscriberRedundancy()
return self._top_entity
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_subscriber_srg_cfg as meta
return meta._meta_table['SubscriberRedundancy']['meta_info']
|
<reponame>TRASAL/ALERT_R3<filename>scripts/cumulative_distribution.py
from math import *
import numpy as np
import json, logging
import argparse
import pandas as pd
from astropy.time import Time, TimeDelta
from astropy import units as u
import datetime
import pylab as plt
from matplotlib.patches import Rectangle
from matplotlib.collections import PatchCollection
import matplotlib.gridspec as gridspec
from frbpa.utils import get_phase#, get_cycle, get_params
from scipy.optimize import curve_fit
def pl(x, xmin=None):
""" Get the maximum likelihood power-law
index for the distribution x
"""
if xmin is None:
xmin = x.min()
return (len(x)-1)/(float(len(x)))*(1./len(x) * np.sum(np.log(x/xmin)))**-1
def sort_dict(dictionary, list):
sorted_dict = {k: dictionary[k] for k in list if k in dictionary.keys()}
return sorted_dict
def open_json(data_json):
with open(data_json, 'r') as f:
data = json.load(f)
assert 'obs_duration' in data.keys()
assert 'bursts' in data.keys()
assert 'obs_startmjds' in data.keys()
burst_dict = data['bursts']
snr_dict = data['snr']
obs_duration_dict = data['obs_duration']
obs_startmjds_dict = data['obs_startmjds']
fmin_dict = data['freq_min']
fmax_dict = data['freq_max']
assert len(obs_duration_dict.keys()) == len(obs_startmjds_dict.keys())
assert len(obs_duration_dict.keys()) < 20
assert len(burst_dict.keys()) < 10
assert len(fmin_dict.keys()) == len(fmax_dict.keys())
telescopes = list(obs_duration_dict.keys())
new_obs_startmjds_dict = {}
new_obs_duration_dict = {}
fcen_dict = {}
for k in obs_startmjds_dict.keys():
start_times = obs_startmjds_dict[k]
durations = obs_duration_dict[k]
fmin = fmin_dict[k]
fmax = fmax_dict[k]
#new_start_times = []
new_durations = []
for i, t in enumerate(start_times):
new_durations.append(durations[i]/(3600))
new_obs_duration_dict[k] = new_durations
fcen_dict[k] = (fmax + fmin)/2
obs_duration_dict = new_obs_duration_dict
# Sorting dictionaries by central frequency
fcen_dict = {k: v for k, v in sorted(fcen_dict.items(),
key=lambda item: item[1])}
burst_dict = sort_dict(burst_dict, fcen_dict.keys())
snr_dict = sort_dict(snr_dict, fcen_dict.keys())
obs_duration_dict = sort_dict(obs_duration_dict, fcen_dict.keys())
obs_startmjds_dict = sort_dict(obs_startmjds_dict, fcen_dict.keys())
fmin_dict = sort_dict(fmin_dict, fcen_dict.keys())
fmax_dict = sort_dict(fmax_dict, fcen_dict.keys())
return burst_dict, snr_dict, obs_duration_dict, obs_startmjds_dict, fmin_dict, fmax_dict, fcen_dict
def fluence_to_energy(fluence, d_L=149, BW=300, f_b=1):
"""
Converting fluence (Jy ms) into energy (erg)
Parameters
----------
fluence: float or np.array in Jy ms
d_L: luminosity distance in Mpc
BW: bandwidth in MHz
f_b: beaming fraction
Returns
-------
energy in ergs
"""
fluence = fluence * u.Jy * u.ms
d_L = d_L * u.Mpc
BW = BW * u.MHz
energy = 4*pi * d_L**2 * f_b * fluence * BW
return energy.to('erg')
def func_powerlaw(x, alpha, c):
return c * x**(alpha+1)
def brokenpl(x, *p):
"Broken power law"
(c1, xb, a1, a2) = p
c2 = c1 * xb ** (a1 - a2)
res = np.zeros(x.shape)
for ii,xx in enumerate(x):
if xx < xb:
res[ii] = c1 * xx ** a1
else:
res[ii] = c2 * xx ** a2
return res
def brokenpl2(x, *p):
"Two times broken power law"
(c1, xb1, xb2, a1, a2, a3) = p
c2 = c1 * xb1 ** (a1 - a2)
c3 = c2 * xb2 ** (a2 - a3)
res = np.zeros(x.shape)
for ii,xx in enumerate(x):
if xx < xb1:
res[ii] = c1 * xx ** a1
elif xx < xb2:
res[ii] = c2 * xx ** a2
else:
res[ii] = c3 * xx ** a3
return res
# ------------------------------------------------------------------------- #
# Initial parameters
period = 16.29
ref_mjd = 58369.9
d_L = 149
BW = 300
# Opening files
data_json = '/home/ines/Documents/projects/R3/periodicity/r3all_data.json'
# Liam edit
#data_json = './r3all_data.json'
burst_dict, snr_dict, obs_duration_dict, obs_startmjds_dict, fmin_dict, fmax_dict, fcen_dict = open_json(data_json)
fluence_fn = '/home/ines/Documents/projects/R3/arts/fluxcal/fluence_int.txt'
# Liam edit
#fluence_fn = './fluence_int.txt'
fl = np.genfromtxt(fluence_fn, names=True)
arts_fluence, arts_ferr = [], []
for i in range(len(fl)):
arts_fluence.append(fl['fint_Jyms'][i])
arts_ferr.append(fl['fint_err'][i])
# Sorting by fluence
arts_width = [x for _,x in sorted(zip(arts_fluence,fl['width_ms']))]
arts_snr = [x for _,x in sorted(zip(arts_fluence,fl['snr']))]
arts_mjd = [x for _,x in sorted(zip(arts_fluence,fl['MJD']))]
arts_ferr = [x for _,x in sorted(zip(arts_fluence,arts_ferr))]
arts_phase = get_phase(fl['MJD'], period, ref_mjd=ref_mjd)
arts_phase = [x for _,x in sorted(zip(arts_fluence,arts_phase))]
# Liam edit: get observing time in each phase bin
arts_time_phase_bin = get_phase(np.array(obs_startmjds_dict['Apertif']), period, ref_mjd=ref_mjd)
arts_obs_duration = np.array(obs_duration_dict['Apertif'])
print("Fluence boxcar", fl['fluence_Jyms'])
print("ARTS fluences", fl['fint_Jyms'])
# Plotting fluence vs. phase
plt.errorbar(arts_phase, arts_fluence, yerr=arts_ferr, fmt='o', color='k',
zorder=10)
plt.ylabel('Fluence (Jy ms)')
plt.xlabel('Phase')
plt.xlim(0.35,0.6)
plt.ylim(0,1.15*max(arts_fluence))
#plt.show()
# Comparing fluence SNR-width and fluence integral
arts_fluence = []
for i in range(len(arts_mjd)):
j = i+1
if fl['snr'][i] >= 15:
plt.errorbar(j, fl['fluence_Jyms'][i], yerr=fl['fluence_err'][i],
marker='^', color='k', zorder=10)
plt.errorbar(j, fl['fint_Jyms'][i], yerr=fl['fint_err'][i],
marker='o', color='c', zorder=10)
arts_fluence.append(fl['fint_Jyms'][i])
else:
plt.errorbar(j, fl['fluence_Jyms'][i], yerr=fl['fluence_err'][i],
marker='o', color='k', zorder=10)
plt.errorbar(j, fl['fint_Jyms'][i], yerr=fl['fint_err'][i], marker='^',
color='c', zorder=10)
arts_fluence.append(fl['fluence_Jyms'][i])
lines = [plt.plot([], 'o', color='k')[0],
plt.plot([], 'o', color='c')[0]]
labels=['boxcar', 'integral']
plt.legend(lines, labels)
plt.ylabel('Fluence (Jy ms)')
plt.xlabel('ID')
#plt.show()
# ------------------------------------------------------------------------- #
# Cumulative distribution function
## ARTS
csvname = '/home/ines/Documents/projects/R3/arts/arts_r3_properties.csv'
#csvname = 'arts_r3_properties.csv'
burst_data = np.genfromtxt(csvname, delimiter=',', names=True)
arts_fluence = burst_data['fluence_Jyms']
arts_snr = [x for _,x in sorted(zip(arts_fluence,burst_data['snr']))]
arts_mjd = [x for _,x in sorted(zip(arts_fluence,burst_data['bary_mjd']))]
arts_ferr = [x for _,x in sorted(zip(arts_fluence,burst_data['fluence_err']))]
arts_phase = get_phase(burst_data['bary_mjd'], period, ref_mjd=ref_mjd)
arts_phase = [x for _,x in sorted(zip(arts_fluence,arts_phase))]
arts_fluence.sort()
arts_obs_time = np.sum(obs_duration_dict['Apertif'])
cumulative_rate = np.array([(len(arts_fluence)-i)/arts_obs_time
for i in range(len(arts_fluence))])
cumulative_n = np.array([len(arts_fluence)-i for i in range(len(arts_fluence))])
cumulative_snr = np.array([len(arts_snr)-i for i in range(len(arts_fluence))])
## LOFAR
csvname = '/home/ines/Documents/projects/R3/lofar/lofar_r3_properties.csv'
burst_data = np.genfromtxt(csvname, delimiter=',', names=True)
Tobs_lofar = 48.3
duty_cycle_lofar = 1.0
lofar_fluence = burst_data['fluence_Jyms']
lofar_snr = burst_data['detection_snr']
lofar_fluence.sort()
# do the same for LOFAR
cumulative_n_lofar = np.array([len(lofar_fluence)-i
for i in range(len(lofar_fluence))])
print("LOFAR fluence slope %0.2f" % pl(np.array(lofar_fluence)))
print("ARTS fluence slope %0.2f" % pl(np.array(arts_fluence)))
print("LOFAR SNR slope %0.2f" % pl(np.array(lofar_snr)))
print("ARTS SNR slope %0.2f" % pl(np.array(arts_snr)))
# Converting fluence to energy
arts_energy = fluence_to_energy(arts_fluence)
# Fitting CFD to powerlaw and plotting
#cm = plt.cm.get_cmap('twilight')
#cm = ''
fig = plt.figure(figsize=(10,7))
plt.style.use('/home/ines/.config/matplotlib/stylelib/paper.mplstyle')
plt.rcParams.update({
'lines.linewidth': 1,
'legend.fontsize': 10,
'legend.loc': 'lower left'})
gs = gridspec.GridSpec(1,1)
colors = ['#7ECCA5', '#9E0142']
ax1 = fig.add_subplot(gs[0, 0])
# ax1.errorbar(arts_fluence, cumulative_n, yerr=np.sqrt(cumulative_n),
# errorevery=3, zorder=10, linestyle='-', lw=1, marker='o', color='gray',
# label="All bursts")
ax1.plot(arts_fluence, cumulative_n/arts_obs_time, zorder=10, linestyle='-',
lw=1, marker='o', color=colors[0], label="All Apertif bursts")
ax1.plot(lofar_fluence, cumulative_n_lofar/Tobs_lofar*duty_cycle_lofar,
zorder=10, linestyle='-', lw=1,
marker='s', color=colors[1], label="All LOFAR bursts")
ax1.set_xlabel('Fluence (Jy ms)')
ax1.set_ylabel(r'Rate (>F) hr$^{-1}$')
ax1.set_xscale('log')
ax1.set_yscale('log')
ax1.set_xlim(7e-1,400)
ax1.set_ylim(1e-3, 1)
# FITTING CDF
# Fitting Apertif to 2 times broken power law
c, x1, x2, a1, a2, a3 = 100, 2.7, 3.5, -0.17, -0.58, -1.38
p0 = [c, x1, x2, a1, a2, a3]
coeff, var = curve_fit(brokenpl2, arts_fluence, cumulative_n, p0=p0,
sigma=np.sqrt(cumulative_n))
ax1.plot(np.logspace(-1,2),
brokenpl2(np.logspace(-1,2), *coeff)/arts_obs_time,
color='k', alpha=0.4, linestyle='-.', label='Apertif broken pl')
c, x1, x2 = coeff[0], coeff[1], coeff[2]
a1, a2, a3= coeff[3]-1, coeff[4]-1, coeff[5]-1
(c_err, x1_err, x2_err, a1_err, a2_err, a3_err) = np.sqrt(np.diag(var))
print("Apertif fit\n", coeff, "\n", np.sqrt(np.diag(var)))
# Fitting LOFAR to broken power law
cl, xl, a1l, a2l = 100, 100, -0.15, -1.4
p0 = [cl, xl, a1l, a2l]
coeff, var = curve_fit(brokenpl, lofar_fluence, cumulative_n_lofar, p0=p0,
sigma=np.sqrt(cumulative_n_lofar))
ax1.plot(np.logspace(1,3),
brokenpl(np.logspace(1,3), *coeff)/Tobs_lofar*duty_cycle_lofar,
color='k', alpha=0.4, linestyle='dotted', label='LOFAR broken pl')
xl = coeff[1]
print("LOFAR\n", coeff, "\n", np.sqrt(np.diag(var)))
# Dividing Apertif phase range
phase_range = [0.35, 0.46, 0.51, 0.62]
color_test = ['#98C56D', '#34835A', '#17343A']
for i,p in enumerate(phase_range[:-1]):
c = color_test[i]
flist = []
for j,f in enumerate(arts_fluence):
if arts_phase[j] > p and arts_phase[j] < phase_range[i+1]:
# Liam edit: convert y-axis into a rate
arts_time_phase_bin = get_phase(
np.array(obs_startmjds_dict['Apertif']), period,
ref_mjd=ref_mjd)
tobs_j = np.sum(arts_obs_duration[np.where(
(arts_time_phase_bin<phase_range[i+1]) & \
(arts_time_phase_bin>p))[0]])
flist.append(f)
leglabel="phase: %0.2f-%0.2f "%(p,phase_range[i+1])
ax1.plot(flist, ([len(flist)-i for i in range(len(flist))])/tobs_j,
linestyle='-', marker='', color=c, label=leglabel, markersize=5,
linewidth=0.8)
ax1.legend()
ax1.axvline(x1, ymin=0, ymax=1e3, zorder=0, color='k', ls=(0, (5, 1)),
alpha=0.3)
ax1.axvline(x2, ymin=0, ymax=1e3, zorder=0, color='k', ls=(0, (5, 1)),
alpha=0.3)
ax1.axvline(xl, ymin=0, ymax=1e3, zorder=0, color='k', ls=(0, (5, 1)),
alpha=0.3)
plt_fl = '/home/ines/Documents/projects/R3/arts/fluxcal/cdf_fluence.pdf'
#plt_fl = '/home/ines/Documents/PhD/meetings/20210303-Astrolunch_talk/figs/cdf_fluence.png'
#plt_fl = 'cdf_fluence.pdf'
print("Saving figure", plt_fl)
plt.savefig(plt_fl, pad_inches=0, bbox_inches='tight', dpi=200)
plt.show()
|
<reponame>thezakman/CTF-Toolz<filename>Toolz/fimap/src/singleScan.py
#
# This file is part of fimap.
#
# Copyright(c) 2009-2012 <NAME>(<EMAIL>).
# http://fimap.googlecode.com
#
# This file may be licensed under the terms of of the
# GNU General Public License Version 2 (the ``GPL'').
#
# Software distributed under the License is distributed
# on an ``AS IS'' basis, WITHOUT WARRANTY OF ANY KIND, either
# express or implied. See the GPL for the specific language
# governing rights and limitations.
#
# You should have received a copy of the GPL along with this
# program. If not, go to http://www.gnu.org/licenses/gpl.html
# or write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
from baseClass import baseClass
from targetScanner import targetScanner
import sys, time
__author__="<NAME>(<EMAIL>)"
__date__ ="$03.09.2009 01:29:37$"
class singleScan(baseClass):
def _load(self):
self.URL = None
self.quite = False
def setURL(self, URL):
self.URL = URL
def setQuite(self, b):
self.quite = b
def scan(self):
try:
self.localLog("SingleScan is testing URL: '%s'" %self.URL)
t = targetScanner(self.config)
t.MonkeyTechnique = self.config["p_monkeymode"]
idx = 0
if (t.prepareTarget(self.URL)):
res = t.testTargetVuln()
if (len(res) == 0):
self.localLog("Target URL isn't affected by any file inclusion bug :(")
else:
for i in res:
report = i[0]
files = i[1]
idx = idx +1
boxarr = []
header = "[%d] Possible File Inclusion"%(idx)
if (report.getLanguage() != None):
header = "[%d] Possible %s-File Inclusion"%(idx, report.getLanguage())
boxarr.append("::REQUEST")
boxarr.append(" [URL] %s"%report.getURL())
if (report.getPostData() != None and report.getPostData() != ""): boxarr.append(" [POST] %s"%report.getPostData())
if (report.getHeader() != None and report.getHeader().keys() > 0):
modkeys = ",".join(report.getHeader().keys())
boxarr.append(" [HEAD SENT] %s"%(modkeys))
boxarr.append("::VULN INFO")
if (report.isPost == 0):
boxarr.append(" [GET PARAM] %s"%report.getVulnKey())
elif (report.isPost == 1):
boxarr.append(" [POSTPARM] %s"%report.getVulnKey())
elif (report.isPost == 2):
boxarr.append(" [VULN HEAD] %s"%report.getVulnHeader())
boxarr.append(" [VULN PARA] %s"%report.getVulnKey())
if (report.isBlindDiscovered()):
boxarr.append(" [PATH] Not received (Blindmode)")
else:
boxarr.append(" [PATH] %s"%report.getServerPath())
if (report.isUnix()):
boxarr.append(" [OS] Unix")
else:
boxarr.append(" [OS] Windows")
boxarr.append(" [TYPE] %s"%report.getType())
if (not report.isBlindDiscovered()):
if (report.isSuffixBreakable() == None):
boxarr.append(" [TRUNCATION] No Need. It's clean.")
else:
if (report.isSuffixBreakable()):
boxarr.append(" [TRUNCATION] Works with '%s'. :)" %(report.getSuffixBreakTechName()))
else:
boxarr.append(" [TRUNCATION] Doesn't work. :(")
else:
if (report.isSuffixBreakable()):
boxarr.append(" [TRUNCATION] Is needed.")
else:
boxarr.append(" [TRUNCATION] Not tested.")
boxarr.append(" [READABLE FILES]")
if (len(files) == 0):
boxarr.append(" No Readable files found :(")
else:
fidx = 0
for file in files:
payload = "%s%s%s"%(report.getPrefix(), file, report.getSurfix())
if (file != payload):
if report.isWindows() and file[1]==":":
file = file[3:]
txt = " [%d] %s -> %s"%(fidx, file, payload)
#if (fidx == 0): txt = txt.strip()
boxarr.append(txt)
else:
txt = " [%d] %s"%(fidx, file)
#if (fidx == 0): txt = txt.strip()
boxarr.append(txt)
fidx = fidx +1
self.drawBox(header, boxarr)
except KeyboardInterrupt:
if (self.quite): # We are in google mode.
print "\nCancelled current target..."
print "Press CTRL+C again in the next second to terminate fimap."
try:
time.sleep(1)
except KeyboardInterrupt:
raise
else: # We are in single mode. Simply raise the exception.
raise
def localLog(self, txt):
if (not self.quite):
print txt |
<reponame>pmulcaire/allennlp
import torch
from torch.nn.utils.rnn import pack_padded_sequence
from allennlp.common.checks import ConfigurationError
from allennlp.modules.seq2vec_encoders.seq2vec_encoder import Seq2VecEncoder
from allennlp.nn.util import sort_batch_by_length, get_lengths_from_binary_sequence_mask
class PytorchSeq2VecWrapper(Seq2VecEncoder):
"""
Pytorch's RNNs have two outputs: the hidden state for every time step, and the hidden state at
the last time step for every layer. We just want the second one as a single output. This
wrapper pulls out that output, and adds a :func:`get_output_dim` method, which is useful if you
want to, e.g., define a linear + softmax layer on top of this to get some distribution over a
set of labels. The linear layer needs to know its input dimension before it is called, and you
can get that from ``get_output_dim``.
Also, there are lots of ways you could imagine going from an RNN hidden state at every
timestep to a single vector - you could take the last vector at all layers in the stack, do
some kind of pooling, take the last vector of the top layer in a stack, or many other options.
We just take the final hidden state vector, or in the case of a bidirectional RNN cell, we
concatenate the forward and backward final states together. TODO(mattg): allow for other ways
of wrapping RNNs.
In order to be wrapped with this wrapper, a class must have the following members:
- ``self.input_size: int``
- ``self.hidden_size: int``
- ``def forward(inputs: PackedSequence, hidden_state: torch.autograd.Variable) ->
Tuple[PackedSequence, torch.autograd.Variable]``.
- ``self.bidirectional: bool`` (optional)
This is what pytorch's RNN's look like - just make sure your class looks like those, and it
should work.
Note that we *require* you to pass sequence lengths when you call this module, to avoid subtle
bugs around masking. If you already have a ``PackedSequence`` you can pass ``None`` as the
second parameter.
"""
def __init__(self, module: torch.nn.modules.RNNBase) -> None:
super(PytorchSeq2VecWrapper, self).__init__()
self._module = module
try:
if not self._module.batch_first:
raise ConfigurationError("Our encoder semantics assumes batch is always first!")
except AttributeError:
pass
def get_input_dim(self) -> int:
return self._module.input_size
def get_output_dim(self) -> int:
try:
is_bidirectional = self._module.bidirectional
except AttributeError:
is_bidirectional = False
return self._module.hidden_size * (2 if is_bidirectional else 1)
def forward(self, # pylint: disable=arguments-differ
inputs: torch.Tensor,
mask: torch.Tensor,
hidden_state: torch.Tensor = None) -> torch.Tensor:
if mask is None:
# If a mask isn't passed, there is no padding in the batch of instances, so we can just
# return the last sequence output as the state. This doesn't work in the case of
# variable length sequences, as the last state for each element of the batch won't be
# at the end of the max sequence length, so we have to use the state of the RNN below.
return self._module(inputs, hidden_state)[0][:, -1, :]
# In some circumstances you may have sequences of zero length. For example, if this is the
# embedding layer for a character-based encoding, the original input will have size
# (batch_size, max_sentence_length, max_word_length, encoding_dim)
# and then ``TimeDistributed`` will reshape it to
# (batch_size * max_sentence_length, max_word_length, encoding_dim)
# in which case all the rows corresponding to word padding will be
# "empty character sequences".
#
# ``pack_padded_sequence`` requires all sequence lengths to be > 0, so here we
# adjust the ``mask`` so that every sequence has length at least 1. Then after
# running the RNN we zero out the corresponding rows in the result.
# First count how many sequences are empty.
batch_size = mask.size()[0]
num_valid = torch.sum(mask[:, 0]).int().data[0]
# Force every sequence to be length at least one. Need to `.clone()` the mask
# to avoid a RuntimeError from shared storage.
if num_valid < batch_size:
mask = mask.clone()
mask[:, 0] = 1
sequence_lengths = get_lengths_from_binary_sequence_mask(mask)
sorted_inputs, sorted_sequence_lengths, restoration_indices = sort_batch_by_length(inputs,
sequence_lengths)
packed_sequence_input = pack_padded_sequence(sorted_inputs,
sorted_sequence_lengths.data.tolist(),
batch_first=True)
# Actually call the module on the sorted PackedSequence.
_, state = self._module(packed_sequence_input, hidden_state)
# Deal with the fact the LSTM state is a tuple of (state, memory).
if isinstance(state, tuple):
state = state[0]
# We sorted by length, so if there are invalid rows that need to be zeroed out
# they will be at the end. Note that at this point batch_size is the second
# dimension of state.
if num_valid < batch_size:
state[:, num_valid:, :] = 0.
# Restore the original indices and return the final state of the
# top layer. Pytorch's recurrent layers return state in the form
# (num_layers * num_directions, batch_size, hidden_size) regardless
# of the 'batch_first' flag, so we transpose, extract the relevant
# layer state (both forward and backward if using bidirectional layers)
# and return them as a single (batch_size, self.get_output_dim()) tensor.
# now of shape: (batch_size, num_layers * num_directions, hidden_size).
unsorted_state = state.transpose(0, 1).index_select(0, restoration_indices)
# Extract the last hidden vector, including both forward and backward states
# if the cell is bidirectional. Then reshape by concatenation (in the case
# we have bidirectional states) or just squash the 1st dimension in the non-
# bidirectional case. Return tensor has shape (batch_size, hidden_size * num_directions).
try:
last_state_index = 2 if self._module.bidirectional else 1
except AttributeError:
last_state_index = 1
last_layer_state = unsorted_state[:, -last_state_index:, :]
return last_layer_state.contiguous().view([-1, self.get_output_dim()])
|
<reponame>anuragpeshne/voyager<filename>server/main.py<gh_stars>0
# -*- coding: utf-8 -*-
from flask import Flask, render_template, redirect, request, url_for
import json
import map_generator
import uuid
app = Flask(__name__, static_url_path='/static')
state = {}
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
@app.route("/")
def index():
return render_template('login.html')
@app.route("/maps", methods = ['GET'])
def get_maps():
maps = ['nile_s', 'himalaya_s']
return json.dumps(maps)
@app.route("/game", methods = ['POST'])
def game():
player_data = request.form
print(player_data)
map_name = player_data['map']
player_name = player_data['name']
random_uuid = uuid.uuid4()
map_data = map_generator.generate(map_name)
state[random_uuid] = {
'map_data': map_data,
'cur_pos': map_data['start'],
'energy_spent': 0,
'pname': player_name,
'explored_cell': {}
}
map_ = map_data['map']
return render_template('game.html',
map_dim=[len(map_), len(map_[0])],
start=map_data['start'],
dest=map_data['dest'],
player_name=player_name,
uuid=random_uuid)
@app.route("/move", methods = ['POST'])
def move():
print("in move", request.json)
move_data = request.json
key_ = move_data['key']
to_cell = move_data['to']
key_uuid = uuid.UUID(key_)
if key_ is None or key_uuid not in state:
return redirect(url_for('index'))
current_state = state[key_uuid]
map_data = current_state['map_data']
map_ = map_data['map']
map_dim = [len(map_), len(map_[0])]
neighbours = __get_neighbours(to_cell, map_dim)
explored_neighbours = [[row, col] for [row, col] in neighbours
if (row, col) in current_state['explored_cell']]
if __validate_move(to_cell, current_state):
if not __check_victory(to_cell, current_state):
current_state['energy_spent'] += map_[to_cell[0]][to_cell[1]]
current_state['explored_cell'][tuple(to_cell)] = True
return json.dumps({
'peek_cells': [[[row, col], map_[row][col]] for [row, col] in neighbours],
'energy_spent' : current_state['energy_spent']
})
else:
msg = "Cell [%d, %d] not reachable" % (to_cell[0], to_cell[1])
print (msg)
return msg, 401
def __get_neighbours(cell, dim):
possible_neighbours = [[cell[0] + 1, cell[1]],
[cell[0], cell[1] + 1],
[cell[0] - 1, cell[1]],
[cell[0], cell[1] - 1]]
return [[row, col] for [row, col] in possible_neighbours
if row >= 0 and row < dim[0] and col >= 0 and col < dim[1]]
def __validate_move(dest_cell, current_state):
is_first_move = current_state['cur_pos'] == current_state['map_data']['start']
map_ = current_state['map_data']['map']
map_dim=[len(map_), len(map_[0])]
is_dest_cell_explored = dest_cell in __get_neighbours(current_state['cur_pos'],
map_dim)
return is_first_move or is_dest_cell_explored
def __check_victory(to_cell, current_state):
is_to_destination_cell = to_cell == current_state['map_data']['dest']
if is_to_destination_cell:
current_state['is_victorious'] = True
return ('is_victorious' in current_state or
is_to_destination_cell)
if __name__ == "__main__":
app.run(host='0.0.0.0')
|
<reponame>omari-funzone/commcare-hq
# -*- coding: utf-8 -*-
# Generated by Django 1.11.28 on 2020-05-04 00:23
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('commtrack', '0002_stockstate_last_modified_form_id'),
]
operations = [
migrations.CreateModel(
name='SQLActionConfig',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('action', models.CharField(max_length=40, null=True)),
('subaction', models.CharField(max_length=40, null=True)),
('_keyword', models.CharField(max_length=40, null=True)),
('caption', models.CharField(max_length=40, null=True)),
],
options={
'db_table': 'commtrack_actionconfig',
},
),
migrations.CreateModel(
name='SQLCommtrackConfig',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('domain', models.CharField(db_index=True, max_length=126, unique=True)),
('couch_id', models.CharField(db_index=True, max_length=126, null=True)),
('use_auto_emergency_levels', models.BooleanField(default=False)),
('sync_consumption_fixtures', models.BooleanField(default=False)),
('use_auto_consumption', models.BooleanField(default=False)),
('individual_consumption_defaults', models.BooleanField(default=False)),
],
options={
'db_table': 'commtrack_commtrackconfig',
},
),
migrations.CreateModel(
name='SQLAlertConfig',
fields=[
('stock_out_facilities', models.BooleanField(default=False)),
('stock_out_commodities', models.BooleanField(default=False)),
('stock_out_rates', models.BooleanField(default=False)),
('non_report', models.BooleanField(default=False)),
('commtrack_config', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE,
primary_key=True, serialize=False,
to='commtrack.SQLCommtrackConfig')),
],
options={
'db_table': 'commtrack_alertconfig',
},
),
migrations.CreateModel(
name='SQLConsumptionConfig',
fields=[
('min_transactions', models.IntegerField(default=2, null=True)),
('min_window', models.IntegerField(default=10, null=True)),
('optimal_window', models.IntegerField(null=True)),
('use_supply_point_type_default_consumption', models.BooleanField(default=False)),
('exclude_invalid_periods', models.BooleanField(default=False)),
('commtrack_config', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE,
primary_key=True, serialize=False,
to='commtrack.SQLCommtrackConfig')),
],
options={
'db_table': 'commtrack_consumptionconfig',
},
),
migrations.CreateModel(
name='SQLStockLevelsConfig',
fields=[
('emergency_level', models.DecimalField(decimal_places=2, default=0.5, max_digits=4)),
('understock_threshold', models.DecimalField(decimal_places=2, default=1.5, max_digits=4)),
('overstock_threshold', models.DecimalField(decimal_places=2, default=3, max_digits=4)),
('commtrack_config', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE,
primary_key=True, serialize=False,
to='commtrack.SQLCommtrackConfig')),
],
options={
'db_table': 'commtrack_stocklevelsconfig',
},
),
migrations.CreateModel(
name='SQLStockRestoreConfig',
fields=[
('section_to_consumption_types',
django.contrib.postgres.fields.jsonb.JSONField(default=dict, null=True)),
('force_consumption_case_types',
django.contrib.postgres.fields.jsonb.JSONField(default=list, null=True)),
('use_dynamic_product_list', models.BooleanField(default=False)),
('commtrack_config', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE,
primary_key=True, serialize=False,
to='commtrack.SQLCommtrackConfig')),
],
options={
'db_table': 'commtrack_stockrestoreconfig',
},
),
migrations.AddField(
model_name='sqlactionconfig',
name='commtrack_config',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
to='commtrack.SQLCommtrackConfig'),
),
migrations.AlterOrderWithRespectTo(
name='sqlactionconfig',
order_with_respect_to='commtrack_config',
),
]
|
<gh_stars>0
import random
from time import time
from matplotlib import pyplot as plt
DICE_SIDES = 6
FAILS = 1
def simulation(turns=2 * 10**6, max_strat=70):
"""
This is a simulation for the game pig assuming that strategies are based on stop bidding when you reach a particular
score for a round. This function will return the expected value of each turn given a particular goal score.
:param turns: int this is how many turns the function will simulate to get the answer.
:param max_strat: int The simulation will give expected returns for each goal score up to this number.
:return: this will return a list of floats with max_strat entries where each entry is an expected return on a
single turn.
"""
hist = [0] * max_strat
for turn in range(1, turns+1):
if not turn %10**4:
print("{} turns finished {:.2f}% done".format(turn, (turn/turns)*100), end="\r")
turn_score = 0
while turn_score < max_strat:
roll = random.randint(1, DICE_SIDES)
if roll <= FAILS:
break
for ndx in range(turn_score,min(turn_score + roll, max_strat)):
hist[ndx] += (turn_score + roll)
turn_score += roll
print("")
hist = [x/turn for x in hist]
return hist
def solver(max_strat=70):
"""
This is a solver for the game pig assuming that strategies are based on stop bidding when you reach a particular
score for a round. This function will return the expected value of each turn given a particular goal score.
:param max_strat: this is an int. The solver will give expected returns for each goal score up to this number.
:return: this will return a list of floats with max_strat entries where each entry is an expected return on a
single turn.
"""
if max_strat < DICE_SIDES:
raise ValueError("need to increase max strategy for this one, fam.")
hist = [0] * (max_strat + 1)
hist[0] = 1
solved = [0] * (max_strat + 1)
for ndx in range(1, len(hist)):
for roll in range(FAILS+1, DICE_SIDES+1):
hist[ndx] += hist[ndx - roll] / DICE_SIDES
for ndx in range(1, len(hist)):
for offset in range(1, DICE_SIDES+1):
if ndx-offset >= 0:
for roll in range(FAILS + 1, DICE_SIDES + 1):
if offset <= roll:
solved[ndx] += hist[ndx - offset] * (ndx - offset + roll) / DICE_SIDES
return solved[1:]
def solver2(max_strat=70):
"""
This is a solver for the game pig assuming that strategies are based on stop bidding when you reach a particular
score for a round. This function will return the chace for each point total of each turn given a particular goal
score.
:param max_strat: this is an int. The solver will give expected returns for each goal score up to this number.
:return: this will return a list of dicts that are corresponding to the with max_strat entries where each entry is an expected return on a
single turn.
"""
if max_strat < DICE_SIDES:
raise ValueError("need to increase max strategy for this one, fam.")
hist = [0] * (max_strat + 1)
hist[0] = 1
solved = [{}] * (max_strat + 1)
for ndx in range(1, len(hist)):
for roll in range(FAILS+1, DICE_SIDES+1):
hist[ndx] += hist[ndx - roll] / DICE_SIDES
for ndx in range(1, len(hist)):
solved[ndx] = {0: 0}
for offset in range(1, DICE_SIDES+1):
if ndx-offset >= 0:
for roll in range(FAILS + 1, DICE_SIDES + 1):
if offset <= roll:
if (ndx - offset + roll) not in solved[ndx]:
solved[ndx][(ndx - offset + roll)] = 0
solved[ndx][(ndx - offset + roll)] += hist[ndx - offset] / DICE_SIDES
left = 1.
for i in solved[ndx]:
left -= solved[ndx][i]
solved[ndx][0] = left
return solved[1:]
def game_strat_approximater(chances, goal=200, scans=30):
"""
this will give an output of the chances and appropriate strategy to follow at the start of a round in the pig game
:param chances: this is a result of solver2 for the game that you want to play
:param goal: this is an int that describes the number of points that you need to win
:param scans: this is how many times the approximater will update each of the game states before resolving
:return: 2 2-d lists the first says what your optimal strategy is, the second says what your chances of wining
with that strategy are.
"""
expected = [[.25 for _ in range(goal)] for __ in range(goal)]
strats = [[0. for _ in range(goal)] for __ in range(goal)]
for yyy in range(scans):
print("{} scans complete {:.2f}% done".format(yyy, yyy / scans * 100), end="\r")
for x_score in range(goal-1, -1, -1):
for n_score in range(goal - 1, -1, -1):
best_strat = None
best_score = 0. #this should never be below 0 so I chosse this instead of float("-inf")
for ndx in range(len(chances)):
strat = chances[ndx]
score = 0.
for points in strat:
if (points + x_score) >= goal:
score += strat[points]
else:
score += strat[points] * (1. - expected[n_score][x_score + points])
if score > best_score:
best_score = score
best_strat = ndx + 1
expected[x_score][n_score] = best_score
strats[x_score][n_score] = best_strat
print("{} scans complete {:.2f}% done".format(yyy+1, (yyy+1) / scans * 100))
return expected, strats
def game_strat_approximater2(chances, goal=200, threshold=10**-3):
"""
this will give an output of the chances and appropriate strategy to follow at the start of a round in the pig game
:param chances: this is a result of solver2 for the game that you want to play
:param goal: this is an int that describes the number of points that you need to win
:param threshold: this is a maximum level of change from any point value's chance of winning in a perticular
iteration compared to it's chance of wining in the last iteration before the approximater will resolve.
:return: 2 2-d lists the first says what your optimal strategy is, the second says what your chances of wining
with that strategy are.
"""
expected = [[.25 for _ in range(goal)] for __ in range(goal)]
strats = [[0 for _ in range(goal)] for __ in range(goal)]
delta = 1
yyy = 0
while delta >= threshold:
print("{} scans complete delta = {:.6f}".format(yyy, delta), end="\r")
delta = 0
yyy += 1
for x_score in range(goal-1, -1, -1):
for n_score in range(goal - 1, -1, -1):
best_strat = None
best_score = 0. #this should never be below 0 so I chosse this instead of float("-inf")
for ndx in range(len(chances)):
strat = chances[ndx]
score = 0.
for points in strat:
if (points + x_score) >= goal:
score += strat[points]
else:
score += strat[points] * (1. - expected[n_score][x_score + points])
if score >= best_score:
best_score = score
best_strat = ndx + 1
delta = max(delta, abs(expected[x_score][n_score] - best_score))
expected[x_score][n_score] = best_score
strats[x_score][n_score] = best_strat
print("{} scans complete delta = {:.6f}".format(yyy, delta))
return expected, strats
def main():
goal = 100
chances = solver2(max_strat=goal)
time1 = time()
expected1, strat1 = game_strat_approximater(chances, goal=goal, scans=30)
time2 = time()
expected2, strat2 = game_strat_approximater2(chances, goal=goal, threshold=10 ** -3)
time3 = time()
print("approx. 1:\t{:.3f}s\napprox. 2:\t{:.3f}s".format(time2 - time1, time3 - time2))
plt.imshow(strat2)
plt.show()
if __name__ == "__main__":
main()
|
from django.contrib.auth import login
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.http import HttpResponseForbidden
from django.shortcuts import render, redirect, get_object_or_404
from django.urls import reverse
from django.utils.safestring import mark_safe
from apps.home.forms import CustomUserCreationForm, AvatarChangeForm, CustomUserChangeForm
# TODO: add class creation request
from apps.public_api.models import Post, Comment
# Create your views here.
def index(request):
# rick roll
# return redirect('https://www.youtube.com/watch?v=dQw4w9WgXcQ')
if request.user.is_authenticated:
return render(request, "home/index.html",
{
'courses': list(map(str, request.user.profile.courses.all()))
})
else:
return render(request, "home/index.html")
def register(request):
if request.method == "GET":
form = CustomUserCreationForm
elif request.method == "POST":
form = CustomUserCreationForm(request.POST)
if form.is_valid():
user = form.save()
login(request, user)
return redirect(reverse("homePage"))
else:
return HttpResponseForbidden()
return render(
request, "registration/register.html",
{"form": form}
)
# DONE: ability to see others profiles
@login_required
def view_profile(request, username=None):
current = False
if username:
user = get_object_or_404(User, username=username)
else:
user = request.user
current = True
args = {'user': user, 'courses': ", ".join(list(map(str, user.profile.courses.all()))), 'current': current}
return render(request, 'home/profile.html', args)
@login_required
def edit_profile(request):
"""Edit your profile"""
second_form = AvatarChangeForm(instance=request.user.profile)
if request.method == 'POST':
form = CustomUserChangeForm(request.POST, instance=request.user)
if form.is_valid():
form.save()
return redirect(reverse('profile'))
else:
form = CustomUserChangeForm(instance=request.user)
return render(request, 'home/edit_profile.html',
{'form': form, 'second_form': second_form, 'files': True})
@login_required
def change_avatar(request):
"""Form to change avatar/profile (rename needed)"""
if request.method == 'POST':
form = AvatarChangeForm(request.POST, request.FILES,
instance=request.user.profile)
if form.is_valid():
form.save()
return redirect(reverse('profile'))
else:
form = AvatarChangeForm(instance=request.user.profile)
return render(request, 'home/avatar_change.html', {'form': form})
def view_post_detail(request, pk=1):
"""See a post in detail with comments and links"""
post = get_object_or_404(Post, pk=pk) # raise 404 if invalid user
comments = Comment.objects.all().filter(post__id=post.id)
return render(request, 'home/posts_detail.html', {"pk": mark_safe(pk),
'post': post, 'user': request.user,
'comments': comments,
'author': post.author})
def view_posts(request):
"""See all posts """
if request.user.is_authenticated:
return render(request, "home/posts.html",
{
'courses': list(map(str, request.user.profile.courses.all()))
})
else:
return render(request, "home/posts.html")
def view_user_posts(request, pk=None):
"""View your own posts or others posts"""
if pk is None:
pk = request.user.pk
user = get_object_or_404(User, pk=pk)
return render(request, 'home/view_user_posts.html',
{"pk": user.username, "user": user})
|
<gh_stars>0
from PyQt6 import QtCore, QtGui, QtWidgets
from look_password import PasswordEdit
import sqlite3
class Ui_EditLibrarian(object):
def setupUi(self, EditLibrarian, Login):
EditLibrarian.setObjectName("EditLibrarian")
EditLibrarian.resize(518, 516)
EditLibrarian.setStyleSheet(
".QWidget{background-color: #CBB1A0;border-radius: 10px}")
EditLibrarian.setWindowFlags(QtCore.Qt.WindowType.FramelessWindowHint)
self.verticalLayout_2 = QtWidgets.QVBoxLayout(EditLibrarian)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.border = QtWidgets.QFrame(EditLibrarian)
self.border.setStyleSheet("#border{\n"
" color: #842a2d;\n"
"}")
self.border.setFrameShape(QtWidgets.QFrame.Shape.Box)
self.border.setLineWidth(5)
self.border.setMidLineWidth(5)
self.border.setObjectName("border")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.border)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.label_title = QtWidgets.QLabel(self.border)
font = QtGui.QFont()
font.setPointSize(20)
font.setBold(True)
self.label_title.setFont(font)
self.label_title.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)
self.label_title.setObjectName("label_title")
self.verticalLayout_4.addWidget(self.label_title)
spacerItem = QtWidgets.QSpacerItem(
20, 40, QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Expanding)
self.verticalLayout_4.addItem(spacerItem)
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.line_8 = QtWidgets.QFrame(self.border)
self.line_8.setStyleSheet("border: 2px solid #842a2d;")
self.line_8.setFrameShape(QtWidgets.QFrame.Shape.HLine)
self.line_8.setFrameShadow(QtWidgets.QFrame.Shadow.Sunken)
self.line_8.setObjectName("line_8")
self.verticalLayout_3.addWidget(self.line_8)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label_name_2 = QtWidgets.QLabel(self.border)
font = QtGui.QFont()
font.setPointSize(12)
self.label_name_2.setFont(font)
self.label_name_2.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight |
QtCore.Qt.AlignmentFlag.AlignTrailing | QtCore.Qt.AlignmentFlag.AlignVCenter)
self.label_name_2.setObjectName("label_name_2")
self.horizontalLayout.addWidget(self.label_name_2)
self.input_search_username = QtWidgets.QLineEdit(self.border)
font = QtGui.QFont()
font.setPointSize(15)
font.setBold(False)
font.setItalic(False)
self.input_search_username.setFont(font)
self.input_search_username.setStyleSheet("QLineEdit {\n"
" color: #000000;\n"
" font: 15pt \"Verdana\";\n"
" border: None;\n"
" border-bottom-color: white;\n"
" border-radius: 10px;\n"
" padding: 0 8px;\n"
" background: #CBB1A0;\n"
" selection-background-color: darkgray;\n"
"}")
self.input_search_username.setMaxLength(15)
self.input_search_username.setClearButtonEnabled(True)
self.input_search_username.setObjectName("input_search_username")
self.horizontalLayout.addWidget(self.input_search_username)
self.search_username_button = QtWidgets.QPushButton(self.border)
self.search_username_button.setEnabled(True)
self.search_username_button.setCursor(
QtGui.QCursor(QtCore.Qt.CursorShape.PointingHandCursor))
self.search_username_button.setStyleSheet("QPushButton{\n"
" color: #842a2d;\n"
" font: 17pt \"Franklin Gothic Book\";\n"
" border: 2px solid #842a2d;\n"
" padding: 2px;\n"
" border-radius: 10px;\n"
" opacity: 100;\n"
"}\n"
"\n"
"QPushButton:hover{\n"
" background-color: #842a2d;\n"
" color: #CBB1A0;\n"
"}\n"
"QPushButton:pressed{\n"
" background-color: #b34044;\n"
" border: 5px solid #b34044;\n"
"}")
self.search_username_button.setObjectName("search_username_button")
self.search_username_button.clicked.connect(self.search_for_username)
self.horizontalLayout.addWidget(self.search_username_button)
self.verticalLayout_3.addLayout(self.horizontalLayout)
self.line_9 = QtWidgets.QFrame(self.border)
self.line_9.setStyleSheet("border: 2px solid #842a2d;")
self.line_9.setFrameShape(QtWidgets.QFrame.Shape.HLine)
self.line_9.setFrameShadow(QtWidgets.QFrame.Shadow.Sunken)
self.line_9.setObjectName("line_9")
self.verticalLayout_3.addWidget(self.line_9)
self.verticalLayout_4.addLayout(self.verticalLayout_3)
spacerItem1 = QtWidgets.QSpacerItem(
17, 19, QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Expanding)
self.verticalLayout_4.addItem(spacerItem1)
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.label_name = QtWidgets.QLabel(self.border)
font = QtGui.QFont()
font.setPointSize(12)
self.label_name.setFont(font)
self.label_name.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight |
QtCore.Qt.AlignmentFlag.AlignTrailing | QtCore.Qt.AlignmentFlag.AlignVCenter)
self.label_name.setObjectName("label_name")
self.gridLayout.addWidget(self.label_name, 1, 0, 1, 1)
self.line_5 = QtWidgets.QFrame(self.border)
self.line_5.setStyleSheet("border: 2px solid #842a2d;")
self.line_5.setFrameShape(QtWidgets.QFrame.Shape.HLine)
self.line_5.setFrameShadow(QtWidgets.QFrame.Shadow.Sunken)
self.line_5.setObjectName("line_5")
self.gridLayout.addWidget(self.line_5, 6, 0, 1, 2)
self.input_password = PasswordEdit(self.border)
font = QtGui.QFont()
font.setPointSize(15)
font.setBold(False)
font.setItalic(False)
self.input_password.setFont(font)
self.input_password.setStyleSheet("QLineEdit {\n"
" color: #000000;\n"
" font: 15pt \"Verdana\";\n"
" border: None;\n"
" border-bottom-color: white;\n"
" border-radius: 10px;\n"
" padding: 0 8px;\n"
" background: #CBB1A0;\n"
" selection-background-color: darkgray;\n"
"}")
self.input_password.setText("")
self.input_password.setMaxLength(15)
self.input_password.setClearButtonEnabled(True)
self.input_password.setObjectName("input_password")
self.gridLayout.addWidget(self.input_password, 5, 1, 1, 1)
self.input_password_confirm = PasswordEdit(self.border)
font = QtGui.QFont()
font.setPointSize(15)
font.setBold(False)
font.setItalic(False)
self.input_password_confirm.setFont(font)
self.input_password_confirm.setStyleSheet("QLineEdit {\n"
" color: #000000;\n"
" font: 15pt \"Verdana\";\n"
" border: None;\n"
" border-bottom-color: white;\n"
" border-radius: 10px;\n"
" padding: 0 8px;\n"
" background: #CBB1A0;\n"
" selection-background-color: darkgray;\n"
"}")
self.input_password_confirm.setText("")
self.input_password_confirm.setMaxLength(15)
self.input_password_confirm.setClearButtonEnabled(True)
self.input_password_confirm.setObjectName("input_password_confirm")
self.gridLayout.addWidget(self.input_password_confirm, 7, 1, 1, 1)
self.line_2 = QtWidgets.QFrame(self.border)
self.line_2.setStyleSheet("border: 2px solid #842a2d;")
self.line_2.setFrameShape(QtWidgets.QFrame.Shape.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Shadow.Sunken)
self.line_2.setObjectName("line_2")
self.gridLayout.addWidget(self.line_2, 2, 0, 1, 2)
self.label_username = QtWidgets.QLabel(self.border)
font = QtGui.QFont()
font.setPointSize(12)
self.label_username.setFont(font)
self.label_username.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight |
QtCore.Qt.AlignmentFlag.AlignTrailing | QtCore.Qt.AlignmentFlag.AlignVCenter)
self.label_username.setObjectName("label_username")
self.gridLayout.addWidget(self.label_username, 3, 0, 1, 1)
self.input_username = QtWidgets.QLineEdit(self.border)
font = QtGui.QFont()
font.setPointSize(15)
font.setBold(False)
font.setItalic(False)
self.input_username.setFont(font)
self.input_username.setStyleSheet("QLineEdit {\n"
" color: #000000;\n"
" font: 15pt \"Verdana\";\n"
" border: None;\n"
" border-bottom-color: white;\n"
" border-radius: 10px;\n"
" padding: 0 8px;\n"
" background: #CBB1A0;\n"
" selection-background-color: darkgray;\n"
"}")
self.input_username.setMaxLength(15)
self.input_username.setClearButtonEnabled(True)
self.input_username.setObjectName("input_username")
self.gridLayout.addWidget(self.input_username, 3, 1, 1, 1)
self.line_4 = QtWidgets.QFrame(self.border)
self.line_4.setStyleSheet("border: 2px solid #842a2d;")
self.line_4.setFrameShape(QtWidgets.QFrame.Shape.HLine)
self.line_4.setFrameShadow(QtWidgets.QFrame.Shadow.Sunken)
self.line_4.setObjectName("line_4")
self.gridLayout.addWidget(self.line_4, 4, 0, 1, 2)
self.label_pass = QtWidgets.QLabel(self.border)
font = QtGui.QFont()
font.setPointSize(12)
self.label_pass.setFont(font)
self.label_pass.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight |
QtCore.Qt.AlignmentFlag.AlignTrailing | QtCore.Qt.AlignmentFlag.AlignVCenter)
self.label_pass.setObjectName("label_pass")
self.gridLayout.addWidget(self.label_pass, 5, 0, 1, 1)
self.input_fullname = QtWidgets.QLineEdit(self.border)
font = QtGui.QFont()
font.setPointSize(15)
font.setBold(False)
font.setItalic(False)
self.input_fullname.setFont(font)
self.input_fullname.setStyleSheet("QLineEdit {\n"
" color: #000000;\n"
" font: 15pt \"Verdana\";\n"
" border: None;\n"
" border-bottom-color: white;\n"
" border-radius: 10px;\n"
" padding: 0 8px;\n"
" background: #CBB1A0;\n"
" selection-background-color: darkgray;\n"
"}")
self.input_fullname.setMaxLength(50)
self.input_fullname.setClearButtonEnabled(True)
self.input_fullname.setObjectName("input_fullname")
self.gridLayout.addWidget(self.input_fullname, 1, 1, 1, 1)
self.label_retype_pass = QtWidgets.QLabel(self.border)
font = QtGui.QFont()
font.setPointSize(12)
self.label_retype_pass.setFont(font)
self.label_retype_pass.setAlignment(
QtCore.Qt.AlignmentFlag.AlignRight | QtCore.Qt.AlignmentFlag.AlignTrailing | QtCore.Qt.AlignmentFlag.AlignVCenter)
self.label_retype_pass.setObjectName("label_retype_pass")
self.gridLayout.addWidget(self.label_retype_pass, 7, 0, 1, 1)
self.line_6 = QtWidgets.QFrame(self.border)
self.line_6.setStyleSheet("border: 2px solid #842a2d;")
self.line_6.setFrameShape(QtWidgets.QFrame.Shape.HLine)
self.line_6.setFrameShadow(QtWidgets.QFrame.Shadow.Sunken)
self.line_6.setObjectName("line_6")
self.gridLayout.addWidget(self.line_6, 8, 0, 1, 2)
self.line_7 = QtWidgets.QFrame(self.border)
self.line_7.setStyleSheet("border: 2px solid #842a2d;")
self.line_7.setFrameShape(QtWidgets.QFrame.Shape.HLine)
self.line_7.setFrameShadow(QtWidgets.QFrame.Shadow.Sunken)
self.line_7.setObjectName("line_7")
self.gridLayout.addWidget(self.line_7, 0, 0, 1, 2)
self.verticalLayout_4.addLayout(self.gridLayout)
self.label = QtWidgets.QLabel(self.border)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(True)
self.label.setFont(font)
self.label.setStyleSheet("color: rgb(255, 0, 0);")
self.label.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)
self.label.setObjectName("label")
self.verticalLayout_4.addWidget(self.label)
spacerItem2 = QtWidgets.QSpacerItem(
20, 40, QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Expanding)
self.verticalLayout_4.addItem(spacerItem2)
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.edit_librarian = QtWidgets.QPushButton(self.border)
self.edit_librarian.setEnabled(True)
self.edit_librarian.setCursor(QtGui.QCursor(
QtCore.Qt.CursorShape.PointingHandCursor))
self.edit_librarian.setStyleSheet("QPushButton{\n"
" color: #842a2d;\n"
" font: 17pt \"Franklin Gothic Book\";\n"
" border: 2px solid #842a2d;\n"
" padding: 2px;\n"
" border-radius: 10px;\n"
" opacity: 100;\n"
"}\n"
"\n"
"QPushButton:hover{\n"
" background-color: #842a2d;\n"
" color: #CBB1A0;\n"
"}\n"
"QPushButton:pressed{\n"
" background-color: #b34044;\n"
" border: 5px solid #b34044;\n"
"}")
self.edit_librarian.setObjectName("edit_librarian")
self.edit_librarian.clicked.connect(
lambda: self.validate_names(EditLibrarian, Login))
self.verticalLayout.addWidget(self.edit_librarian)
self.cancel_button = QtWidgets.QPushButton(self.border)
self.cancel_button.setCursor(QtGui.QCursor(
QtCore.Qt.CursorShape.PointingHandCursor))
self.cancel_button.setStyleSheet("QPushButton{\n"
" color: #842a2d;\n"
" font: 17pt \"Franklin Gothic Book\";\n"
" border: 2px solid #842a2d;\n"
" padding: 2px;\n"
" border-radius: 10px;\n"
" opacity: 100;\n"
"}\n"
"\n"
"QPushButton:hover{\n"
" background-color: #842a2d;\n"
" color: #CBB1A0;\n"
"}\n"
"QPushButton:pressed{\n"
" background-color: #b34044;\n"
" border: 5px solid #b34044;\n"
"}")
self.cancel_button.setObjectName("cancel_button")
self.cancel_button.clicked.connect(
lambda: self.close_actions(EditLibrarian, Login))
self.verticalLayout.addWidget(self.cancel_button)
self.verticalLayout_4.addLayout(self.verticalLayout)
self.verticalLayout_2.addWidget(self.border)
self.retranslateUi(EditLibrarian)
QtCore.QMetaObject.connectSlotsByName(EditLibrarian)
self.init_disabled_inputs()
def close_actions(self, EditLibrarian, Login):
EditLibrarian.close()
Login.show()
def search_for_username(self):
username_to_search = self.input_search_username.text()
# * query if that username exists in the database
con = sqlite3.connect('./db/library.db')
query = "SELECT Librarian_Username, Librarian_Name FROM LIBRARIAN;"
result = [form[1] for form in list(enumerate(con.execute(query)))]
usernames = [each_data[0] for each_data in result]
if username_to_search in usernames:
data_to_populate = [
each_data for each_data in result if each_data[0] == username_to_search][0]
self.enable_inputs()
self.input_username.setDisabled(True)
self.populate_data(
username=data_to_populate[0], fullname=data_to_populate[1])
else:
self.informative_message(
text="Username was not found in the database!",
subtext="Check for typographical errors or data doesn't exist in the database at all.",
window_title="Username Not Found"
)
def validate_names(self, EditLibrarian, Login):
username_search = self.input_search_username.text()
fullname = self.input_fullname.text()
username = self.input_username.text()
init_password = self.input_password.text()
chck_password = self.input_password_confirm.text()
if (len(fullname) == 0 or len(username) == 0 or len(init_password) == 0 or len(chck_password) == 0):
self.label.setText("Input field/s are empty!")
elif (init_password != chck_password):
self.label.setText("Passwords do not match!")
elif (len(init_password) < 8 or len(chck_password) < 8):
self.label.setText("Passwords must be at least 8 characters!")
else:
self.label.setText("")
self.update_data(fullname=fullname, username=username, password=<PASSWORD>,
EditLibrarian=EditLibrarian, username_search=username_search, Login=Login)
def update_data(self, fullname, username, password, EditLibrarian, username_search, Login):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Icon.Information)
msg.setText(
"You'll be updating the following information in the database: ")
msg.setInformativeText(
f"Name:\t\t{fullname}\nUsername:\t{username}\nPassword:\t{password}\n\nAre you sure?")
msg.setStandardButtons(
QtWidgets.QMessageBox.StandardButton.Yes | QtWidgets.QMessageBox.StandardButton.No)
msg.setWindowTitle("Confirmation Check")
result = msg.exec()
if (result == QtWidgets.QMessageBox.StandardButton.Yes):
# * Step 1: Connect to the database.
con = sqlite3.connect('./db/library.db')
cur = con.cursor()
# * Step 2: Put the query inside the string.
query = """
UPDATE LIBRARIAN
SET Librarian_Name = ?, Librarian_Password = ?
WHERE Librarian_Username = ?;
"""
# * Step 3: Put all the data to be interpolated inside a list.
interpolate_data = [fullname, password, username]
# * Step 4: Execute, Commit, Close
cur.execute(query, interpolate_data)
con.commit()
con.close()
self.informative_message(
text="Data Updated Successfully!",
subtext="You'll be redirected now to the Login Window...",
window_title="Updated Successfully",
icon_type="information"
)
self.close_actions(EditLibrarian, Login)
elif (result == QtWidgets.QMessageBox.StandardButton.No):
pass
def informative_message(self, text, subtext, window_title, icon_type="critical"):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Icon.Critical)
if icon_type == "information":
msg.setIcon(QtWidgets.QMessageBox.Icon.Information)
msg.setText(text)
msg.setInformativeText(subtext)
msg.setWindowTitle(window_title)
msg.exec()
def init_disabled_inputs(self):
# * setEnabled to False
self.input_username.setEnabled(False)
self.input_fullname.setEnabled(False)
self.input_password.setEnabled(False)
self.input_password_confirm.setEnabled(False)
self.edit_librarian.setEnabled(False)
def enable_inputs(self):
# * setEnabled to True
self.input_username.setEnabled(True)
self.input_fullname.setEnabled(True)
self.input_password.setEnabled(True)
self.input_password_confirm.setEnabled(True)
self.edit_librarian.setEnabled(True)
def populate_data(self, username, fullname):
self.input_username.setText(username)
self.input_fullname.setText(fullname)
def retranslateUi(self, EditLibrarian):
_translate = QtCore.QCoreApplication.translate
EditLibrarian.setWindowTitle(_translate(
"EditLibrarian", "Sign Up Librarian"))
self.label_title.setText(_translate(
"EditLibrarian", "Change Librarian Information"))
self.label_name_2.setText(_translate(
"EditLibrarian", "Search Username:"))
self.search_username_button.setText(
_translate("EditLibrarian", "SEARCH"))
self.label_name.setText(_translate("EditLibrarian", "Full Name:"))
self.label_username.setText(_translate("EditLibrarian", "Username:"))
self.label_pass.setText(_translate(
"EditLibrarian", "Preferred Password:"))
self.label_retype_pass.setText(
_translate("EditLibrarian", "Retype Password:"))
self.label.setText(_translate("EditLibrarian", ""))
self.edit_librarian.setText(_translate(
"EditLibrarian", "Edit Librarian"))
self.cancel_button.setText(_translate("EditLibrarian", "Cancel"))
|
import numpy as np
import matplotlib.pyplot as plt
import os, sys
from scipy.interpolate import interp2d
from pylab import *
filelist=[]
#error = []
biomrsd1= []
biomrsd2= []
dirname1 = "/home/renato/groimp_efficient/run_1/"
dirname2 = "/home/renato/groimp_efficient/run_1c/jules/"
list = [94]
for i in range(1,2):
#for i in list:
filelist.append("feddes.ivs")
#print filelist
for fname in filelist:
counter = filelist.index(fname)
#counter = 93
f1 = open(os.path.join(dirname1,fname),"r")
f2 = open(os.path.join(dirname2,fname),"r")
#print f
data1 = f1.readlines()
data2 = f2.readlines()
x1 = []
y1 = []
z1 = []
h_w1 = []
p_w1 = []
s_w1 = []
theta_w1 = []
transp1 = []
evap1 = []
x2 = []
y2 = []
z2 = []
h_w2 = []
p_w2 = []
s_w2 = []
theta_w2 = []
transp2 = []
evap2 = []
for i in range(3,len(data1)):
#print data[i]
line = data1[i].strip()
columns = data1[i].split()
x1.append(str(columns[0]))
y1.append(str(columns[1]))
z1.append(str(columns[2]))
h_w1.append(str(columns[3]))
p_w1.append(str(columns[4]))
s_w1.append(str(columns[5]))
theta_w1.append(str(columns[6]))
transp1.append(str(columns[7]))
evap1.append(str(columns[8]))
for i in range(3,len(data2)):
#print data[i]
line = data2[i].strip()
columns = data2[i].split()
x2.append(str(columns[0]))
y2.append(str(columns[1]))
z2.append(str(columns[2]))
h_w2.append(str(columns[3]))
p_w2.append(str(columns[4]))
s_w2.append(str(columns[5]))
theta_w2.append(str(columns[6]))
transp2.append(str(columns[7]))
evap2.append(str(columns[8]))
#print x,z,RSD
ndata = 50
x1 = np.linspace(0., 2., ndata)
x2 = np.linspace(-2., 0., ndata)
z = np.linspace(0., 2., ndata)
transp1 = np.reshape(theta_w1, (-1, ndata))
transp2 = np.reshape(theta_w2, (-1, ndata))
X1, Y = meshgrid(x1, z)
X2, Y = meshgrid(x2, z)
# simple fast plot
#plt.pcolor(X, Y, RSD, vmin=0, vmax=20)
#plt.colorbar()
#plt.savefig('images/RSD_%s.png' %counter)
#plt.close("all")
output_array = [1,2,3,4,5,6,7,8,9,10,15,20,30,40,50,60]
# scipy interp. cubic
f1 = interp2d(x1, z, transp1, kind='cubic')
f2 = interp2d(x2, z, transp2, kind='cubic')
xnew1 = np.arange(0, 2., .01)
xnew2 = np.arange(-2., 0., .01)
ynew = np.arange(0, 2., .01)
data1 = f1(xnew1,ynew)
data2 = f2(xnew2,ynew)
Xn1, Yn = np.meshgrid(xnew1, ynew)
Xn2, Yn = np.meshgrid(xnew2, ynew)
#cs = plt.pcolormesh(Xn1, Yn, data1, cmap='jet', vmin=min(data1.min(),data2.min()), vmax=max(data1.max(),data2.max()))
#cs = plt.pcolormesh(Xn2, Yn, data2, cmap='jet', vmin=min(data1.min(),data2.min()), vmax=max(data1.max(),data2.max()))
cs = plt.pcolormesh(Xn1, Yn, data1, cmap='jet', vmin=0.15, vmax=0.25)
cs = plt.pcolormesh(Xn2, Yn, data2, cmap='jet', vmin=0.15, vmax=0.25)
print min(data1.min(),data2.min()),max(data1.max(),data2.max())
cbar = plt.colorbar()
cbar.ax.set_ylabel('Soil moisture availability', rotation=270, labelpad=20)
#cbar.ax.set_ylabel('Root water uptake (m$^{-2}$m$^{-3}$)', rotation=270, labelpad=20)
#plt.xlabel("x (m)", labelpad=20)
plt.ylabel("z (m)", labelpad=20)
xname = [-1.0,1.0]
labels = ['hydraulic head = -100','hydraulic head = -5']
plt.xticks(xname,labels)
plt.ylim(0.,2.)
plt.xlim(-2.,2.)
#plt.title('DAY = %d'%output_array[counter])
plt.title('DAY = %d' %(counter + 1))
plt.tight_layout()
#plt.savefig('/home/renato/groimp_efficient/run_1c/paper_fig/transp_%02d.png' %(counter + 1),dpi = 300)
plt.savefig('/home/renato/groimp_efficient/run_1c/paper_fig/feddes_ivs_minus_100.png')
#plt.show()
print 'Figure transp_%02d.png saved sucessfully!' %(counter + 1)
plt.close("all")
#error.append(np.sum(RSD))
biomrsd1.append(np.sum(np.array(transp1).astype(np.float)))
biomrsd2.append(np.sum(np.array(transp2).astype(np.float)))
sys.exit()
biomrsd2 = np.array(biomrsd2)/(100*100)
biomrsd1 = np.array(biomrsd1)/(100*100)
plt.plot(biomrsd2,label='Beta factor')
plt.plot(biomrsd1,label='No limitation')
plt.xlabel("Time (days)", labelpad=20)
plt.ylabel("Integrated soil moisture availability", labelpad=20)
#plt.ylabel("Integrated root water uptake (m$^{-2}$m$^{-3}$)", labelpad=20)
# plt.title('DAY = 94')
plt.legend()
plt.title('Cereal')
plt.tight_layout()
plt.savefig('/home/renato/groimp_efficient/run_1c/paper_fig/theta_total.png')
plt.show()
sys.exit()
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""
Module for frame handling classes.
"""
from abc import ABC, abstractmethod
from typing import Union, List, Optional, Tuple
import numpy as np
from qiskit import QiskitError
from qiskit.quantum_info.operators import Operator
from qiskit.quantum_info.operators.predicates import is_hermitian_matrix
from qiskit_dynamics.dispatch import Array
from qiskit_dynamics.type_utils import to_array
class BaseFrame(ABC):
r"""Abstract base class for core frame handling functionality.
A 'frame' is given by an anti-Hermitian matrix :math:`F`, specified
either directly, or in terms of a Hermitian matrix :math:`H` with
:math:`F = -iH`. Frames have relevance within the context of linear
matrix differential equations (LMDEs), which are of the form:
.. math::
\dot{y}(t) = G(t)y(t).
For the above DE, 'entering the frame' specified by :math:`F`
corresponds to a change of state variable
:math:`y(t) \mapsto z(t) = e^{-tF}y(t)`.
Using the definition, we may write down a differential equation for
:math:`z(t)`:
.. math::
\dot{z}(t) &= -F z(t) + e^{-tF}G(t)y(t) \\
&= (e^{-tF}G(t)e^{tF} - F)z(t)
In some cases it is computationally easier to solve for :math:`z(t)`
than it is to solve for :math:`y(t)`.
While entering a frame is mathematically well-defined for arbitrary
matrices :math:`F`, this interface assumes that :math:`F` is
anti-Hermitian, ensuring beneficial properties:
- :math:`F` is unitarily diagonalizable.
- :math:`e^{\pm tF}` is easily inverted by taking the adjoint.
- The frame transformation is norm preserving.
That :math:`F` is diagonalizable is especially important, as :math:`e^{tF}`
will need repeated evaluation for different :math:`t` (e.g. at every RHS
sample point when solving a DE), so it is useful to work in a basis in
which :math:`F` is diagonal to minimize the cost of this.
Given an anti-Hermitian matrix :math:`F`, this class offers functions
for:
- Bringing a "state" into/out of the frame:
:math:`t, y \mapsto e^{\mp tF}y`
- Bringing an "operator" into/out of the frame:
:math:`t, A \mapsto e^{\mp tF}Ae^{\pm tF}`
- Bringing a generator for a BMDE into/out of the frame:
:math:`t, G \mapsto e^{\mp tF}Ge^{\pm tF} - F`
It also contains functions for bringing states/operators into/out of
the basis in which :math:`F` is diagonalized, which we refer to as the
"frame basis". All previously mentioned functions also include optional
arguments specifying whether the input/output are meant to be in the
frame basis. This is to facilitate use in solvers in which working
completely in the frame basis is beneficial to minimize costs associated
with evaluation of :math:`e^{tF}`.
Finally, this class offers support for evaluating linear combinations of
operators with coefficients with carrier frequencies, along with frequency
cutoffs for implementing the Rotating Wave Approximation (RWA). Frame
information and carrier frequency information are intrinsically tied
together in this context.
Note: all abstract doc strings are written in a `numpy` style
"""
@property
@abstractmethod
def frame_operator(self) -> Union[Operator, Array]:
"""The original frame operator."""
@property
@abstractmethod
def frame_diag(self) -> Array:
"""Diagonal of the frame operator as a 1d array."""
@property
@abstractmethod
def frame_basis(self) -> Array:
r"""Array containing the unitary :math:`U` that diagonalizes the
frame operator, i.e. :math:`U` such that :math:`F = U D U^\dagger`.
"""
@property
@abstractmethod
def frame_basis_adjoint(self) -> Array:
r"""Adjoint of ``self.frame_basis``."""
@abstractmethod
def state_into_frame_basis(self, y: Array) -> Array:
r"""Take a state into the frame basis, i.e. return
``self.frame_basis_adjoint @ y``.
Args:
y: the state
Returns:
Array: the state in the frame basis
"""
@abstractmethod
def state_out_of_frame_basis(self, y: Array) -> Array:
r"""Take a state out of the frame basis, i.e.
``return self.frame_basis @ y``.
Args:
y: the state
Returns:
Array: the state in the frame basis
"""
@abstractmethod
def operator_into_frame_basis(self, op: Union[Operator, Array]) -> Array:
r"""Take an operator into the frame basis, i.e. return
``self.frame_basis_adjoint @ A @ self.frame_basis``
Args:
op: the operator or array of operators.
Returns:
Array: the operator in the frame basis
"""
@abstractmethod
def operator_out_of_frame_basis(self, op: Union[Operator, Array]) -> Array:
r"""Take an operator out of the frame basis, i.e. return
``self.frame_basis @ to_array(op) @ self.frame_basis_adjoint``.
Args:
op: the operator or array of operators.
Returns:
Array: the operator in the frame basis
"""
@abstractmethod
def state_into_frame(
self,
t: float,
y: Array,
y_in_frame_basis: Optional[bool] = False,
return_in_frame_basis: Optional[bool] = False,
) -> Array:
r"""Take a state into the frame, i.e. return ``exp(-tF) @ y``.
Args:
t: time
y: state (array of appropriate size)
y_in_frame_basis: whether or not the array y is already in
the basis in which the frame is diagonal
return_in_frame_basis: whether or not to return the result
in the frame basis
Returns:
Array: state in frame
"""
def state_out_of_frame(
self,
t: float,
y: Array,
y_in_frame_basis: Optional[bool] = False,
return_in_frame_basis: Optional[bool] = False,
) -> Array:
r"""Take a state out of the frame, i.e. ``return exp(tF) @ y``.
Default implementation is to call ``self.state_into_frame``.
Args:
t: time
y: state (array of appropriate size)
y_in_frame_basis: whether or not the array y is already in
the basis in which the frame is diagonal
return_in_frame_basis: whether or not to return the result
in the frame basis
Returns:
Array: state out of frame
"""
return self.state_into_frame(-t, y, y_in_frame_basis, return_in_frame_basis)
@abstractmethod
def _conjugate_and_add(
self,
t: float,
operator: Array,
op_to_add_in_fb: Optional[Array] = None,
operator_in_frame_basis: Optional[bool] = False,
return_in_frame_basis: Optional[bool] = False,
) -> Array:
r"""Generalized helper function for taking operators and generators
into/out of the frame.
Given operator :math:`G`, and ``op_to_add_in_fb`` :math:`B`, returns
:math:`exp(-tF)Gexp(tF) + B`, where :math:`B` is assumed to be
specified in the frame basis.
Args:
t: time.
operator: The operator G above.
op_to_add_in_fb: The operator B above.
operator_in_frame_basis: Whether G is specified in the frame basis.
return_in_frame_basis: Whether the returned result should be in the
frame basis.
Returns:
Array:
"""
def operator_into_frame(
self,
t: float,
operator: Union[Operator, Array],
operator_in_frame_basis: Optional[bool] = False,
return_in_frame_basis: Optional[bool] = False,
) -> Array:
r"""Bring an operator into the frame, i.e. return
``exp(-tF) @ operator @ exp(tF)``
Default implementation is to use ``self._conjugate_and_add``.
Args:
t: time
operator: array of appropriate size
operator_in_frame_basis: whether or not the operator is already in
the basis in which the frame is diagonal
return_in_frame_basis: whether or not to return the result
in the frame basis
Returns:
Array: operator in frame
"""
return self._conjugate_and_add(
t,
operator,
operator_in_frame_basis=operator_in_frame_basis,
return_in_frame_basis=return_in_frame_basis,
)
def operator_out_of_frame(
self,
t: float,
operator: Union[Operator, Array],
operator_in_frame_basis: Optional[bool] = False,
return_in_frame_basis: Optional[bool] = False,
):
r"""Bring an operator into the frame, i.e. return
``exp(tF) @ operator @ exp(-tF)``.
Default implmentation is to use `self.operator_into_frame`.
Args:
t: time
operator: array of appropriate size
operator_in_frame_basis: whether or not the operator is already in
the basis in which the frame is diagonal
return_in_frame_basis: whether or not to return the result
in the frame basis
Returns:
Array: operator out of frame
"""
return self.operator_into_frame(
-t,
operator,
operator_in_frame_basis=operator_in_frame_basis,
return_in_frame_basis=return_in_frame_basis,
)
def generator_into_frame(
self,
t: float,
operator: Union[Operator, Array],
operator_in_frame_basis: Optional[bool] = False,
return_in_frame_basis: Optional[bool] = False,
):
r"""Take an generator into the frame, i.e. return
``exp(-tF) @ operator @ exp(tF) - F``.
Default implementation is to use `self._conjugate_and_add`.
Args:
t: time
operator: generator (array of appropriate size)
operator_in_frame_basis: whether or not the generator is already in
the basis in which the frame is diagonal
return_in_frame_basis: whether or not to return the result
in the frame basis
Returns:
Array: generator in frame
"""
if self.frame_operator is None:
return to_array(operator)
else:
# conjugate and subtract the frame diagonal
return self._conjugate_and_add(
t,
operator,
op_to_add_in_fb=-np.diag(self.frame_diag),
operator_in_frame_basis=operator_in_frame_basis,
return_in_frame_basis=return_in_frame_basis,
)
def generator_out_of_frame(
self,
t: float,
operator: Union[Operator, Array],
operator_in_frame_basis: Optional[bool] = False,
return_in_frame_basis: Optional[bool] = False,
) -> Array:
r"""Take an operator out of the frame, i.e. return
``exp(tF) @ operator @ exp(-tF) + F``.
Default implementation is to use `self._conjugate_and_add`.
Args:
t: time
operator: generator (array of appropriate size)
operator_in_frame_basis: whether or not the operator is already in
the basis in which the frame is diagonal
return_in_frame_basis: whether or not to return the result
in the frame basis
Returns:
Array: generator out of frame
"""
if self.frame_operator is None:
return to_array(operator)
else:
# conjugate and add the frame diagonal
return self._conjugate_and_add(
-t,
operator,
op_to_add_in_fb=Array(np.diag(self.frame_diag)),
operator_in_frame_basis=operator_in_frame_basis,
return_in_frame_basis=return_in_frame_basis,
)
@abstractmethod
def operators_into_frame_basis_with_cutoff(
self,
operators: Union[Array, List[Operator]],
cutoff_freq: Optional[float] = None,
carrier_freqs: Optional[Array] = None,
) -> Tuple[Array]:
r"""Transform operators into the frame basis, and return two lists of
operators: one with the 'frequency cutoff' and one with 'conjugate
frequency cutoff' (explained below). This serves as a helper function
for evaluating a time-dependent operator :math:`A(t)` specified as a
linear combination of terms with carrier frequencies, in the frame
:math:`F` with a cutoff frequency (in the frame basis).
In particular, this function assumes the operator :math:`A(t)` is
specified as:
.. math::
A(t) = \sum_j Re[f_j(t) e^{i 2 \pi \nu_j t}] A_j
For some functions :math:`f_j`, carrier frequencies :math:`nu_j`,
and operators :math:`A_j`.
Assume we are already in a basis in which :math:`F` is diagonal, and
let :math:`D=F`. As described elsewhere in the docstrings for this
class, evaluating :math:`A(t)` in this frame at a time :math:`t`
means computing :math:`\exp(-t D)A(t)\exp(tD)`. The benefit of working
in the basis in which the frame is diagonal is that this computation
simplifies to:
.. math::
[\exp( (-d_j + d_k) t)] \odot A(t),
where above :math:`[\exp( (-d_j + d_k) t)]` denotes the matrix whose
:math:`(j,k)` entry is :math:`\exp( (-d_j + d_k) t)`, and :math:`\odot`
denotes entrywise multiplication.
Evaluating the above with 'frequency cutoffs' requires expanding
:math:`A(t)` into its linear combination. A single term in the sum
(dropping the summation subscript) is:
.. math::
Re[f(t) e^{i 2 \pi \nu t}] [\exp( (-d_j + d_k) t)] \odot A.
Next, we expand this further using
.. math::
Re[f(t) e^{i 2 \pi \nu t}] =
\frac{1}{2}(f(t) e^{i 2 \pi \nu t} +
\overline{f(t)} e^{-i 2 \pi \nu t})
to get:
.. math::
\frac{1}{2}f(t) e^{i 2 \pi \nu t} [\exp( (-d_j + d_k) t)] \odot A +
\frac{1}{2}\overline{f(t)} e^{-i 2 \pi \nu t}
[\exp( (-d_j + d_k) t)] \odot A
Examining the first term in the sum, the 'frequency' associated with
matrix element :math:`(j,k)` is
:math:`\nu + \frac{Im[-d_j + d_k]}{2 \pi}`, and similarly for the
second term: :math:`-\nu + \frac{Im[-d_j + d_k]}{2 \pi}`.
Evaluating the above expression with a 'frequency cutoff' :math:`\nu_*`
means computing it, but setting all matrix elements in either term
with a frequency above :math:`\nu_*` to zero. This can be achieved
by defining two matrices :math:`A^\pm` to be equal to :math:`A`,
except the :math:`(j,k)` is set to zero if
:math:`\pm\nu + \frac{Im[-d_j + d_k]}{2 \pi} \geq \nu_*`.
Thus, the above expression is evaluated with frequency cutoff via
.. math::
\frac{1}{2}f(t) e^{i 2 \pi \nu t} [\exp( (-d_j + d_k) t)] \odot A^+
+ \frac{1}{2}\overline{f(t)} e^{-i 2 \pi \nu t}
[\exp( (-d_j + d_k) t)] \odot A^-
Relative to the initial list of operators :math:`A_j`, this function
returns two lists of matrices as a 3d array: :math:`A_j^+` and
:math:`A_j^-`, corresponding to :math:`A_j` with frequency cutoffs and
'conjugate' frequency cutoffs, in the basis in which the frame has
been diagonalized.
To use the output of this function to evalute the original operator
:math:`A(t)` in the frame, compute the linear combination
.. math::
\frac{1}{2} \sum_j f_j(t) e^{i 2 \pi \nu t} A_j^+
+ \overline{f(t)} e^{-i 2 \pi \nu t} A_j^-
then use `self.operator_into_frame` or `self.generator_into_frame`
the frame transformation as required, using `operator_in_frame=True`.
Args:
operators: list of operators
cutoff_freq: cutoff frequency
carrier_freqs: list of carrier frequencies
Returns:
Tuple[Array, Array]: The operators with frequency cutoff
and conjugate frequency cutoff.
"""
class Frame(BaseFrame):
"""Concrete implementation of `BaseFrame` implemented
using `Array`.
"""
def __init__(
self,
frame_operator: Union[BaseFrame, Operator, Array],
atol: float = 1e-10,
rtol: float = 1e-10,
):
"""Initialize with a frame operator.
Args:
frame_operator: the frame operator, must be either
Hermitian or anti-Hermitian.
atol: absolute tolerance when verifying that the frame_operator is
Hermitian or anti-Hermitian.
rtol: relative tolerance when verifying that the frame_operator is
Hermitian or anti-Hermitian.
"""
if issubclass(type(frame_operator), BaseFrame):
frame_operator = frame_operator.frame_operator
self._frame_operator = frame_operator
frame_operator = to_array(frame_operator)
if frame_operator is None:
self._dim = None
self._frame_diag = None
self._frame_basis = None
self._frame_basis_adjoint = None
# if frame_operator is a 1d array, assume already diagonalized
elif frame_operator.ndim == 1:
# verify Hermitian or anti-Hermitian
# if Hermitian convert to anti-Hermitian
frame_operator = _is_herm_or_anti_herm(frame_operator, atol=atol, rtol=rtol)
self._frame_diag = Array(frame_operator)
self._frame_basis = Array(np.eye(len(frame_operator)))
self._frame_basis_adjoint = self.frame_basis
self._dim = len(self._frame_diag)
# if not, diagonalize it
else:
# verify Hermitian or anti-Hermitian
# if Hermitian convert to anti-Hermitian
frame_operator = _is_herm_or_anti_herm(frame_operator, atol=atol, rtol=rtol)
# diagonalize with eigh, utilizing assumption of anti-hermiticity
frame_diag, frame_basis = np.linalg.eigh(1j * frame_operator)
self._frame_diag = Array(-1j * frame_diag)
self._frame_basis = Array(frame_basis)
self._frame_basis_adjoint = frame_basis.conj().transpose()
self._dim = len(self._frame_diag)
@property
def dim(self) -> int:
"""The dimension of the frame."""
return self._dim
@property
def frame_operator(self) -> Array:
"""The original frame operator."""
return self._frame_operator
@property
def frame_diag(self) -> Array:
"""Diagonal of the frame operator."""
return self._frame_diag
@property
def frame_basis(self) -> Array:
"""Array containing diagonalizing unitary."""
return self._frame_basis
@property
def frame_basis_adjoint(self) -> Array:
"""Adjoint of the diagonalizing unitary."""
return self._frame_basis_adjoint
def state_into_frame_basis(self, y: Array) -> Array:
if self._frame_operator is None:
return to_array(y)
return self.frame_basis_adjoint @ y
def state_out_of_frame_basis(self, y: Array) -> Array:
if self._frame_operator is None:
return to_array(y)
return self.frame_basis @ y
def operator_into_frame_basis(self, op: Union[Operator, List[Operator], Array]) -> Array:
op = to_array(op)
if self._frame_operator is None:
return op
return self.frame_basis_adjoint @ op @ self.frame_basis
def operator_out_of_frame_basis(self, op: Union[Operator, Array]) -> Array:
op = to_array(op)
if self._frame_operator is None:
return op
return self.frame_basis @ op @ self.frame_basis_adjoint
def state_into_frame(
self,
t: float,
y: Array,
y_in_frame_basis: Optional[bool] = False,
return_in_frame_basis: Optional[bool] = False,
):
"""Take a state into the frame, i.e. return exp(-tF) @ y.
Args:
t: time
y: state (array of appropriate size)
y_in_frame_basis: whether or not the array y is already in
the basis in which the frame is diagonal
return_in_frame_basis: whether or not to return the result
in the frame basis
Returns:
Array: state in frame
"""
if self._frame_operator is None:
return to_array(y)
out = y
# if not in frame basis convert it
if not y_in_frame_basis:
out = self.state_into_frame_basis(out)
# go into the frame
out = np.diag(np.exp(-t * self.frame_diag)) @ out
# if output is requested to not be in the frame basis, convert it
if not return_in_frame_basis:
out = self.state_out_of_frame_basis(out)
return out
def _conjugate_and_add(
self,
t: float,
operator: Array,
op_to_add_in_fb: Optional[Array] = None,
operator_in_frame_basis: Optional[bool] = False,
return_in_frame_basis: Optional[bool] = False,
):
r"""Concrete implementation of general helper function for computing
exp(-tF)Gexp(tF) + B
Note: B is added in the frame basis before any potential final change
out of the frame basis.
"""
if self._frame_operator is None:
if op_to_add_in_fb is None:
return to_array(operator)
else:
return to_array(operator + op_to_add_in_fb)
out = to_array(operator)
# if not in frame basis convert it
if not operator_in_frame_basis:
out = self.operator_into_frame_basis(out)
# get frame transformation matrix in diagonal basis
# assumption that F is anti-Hermitian implies conjugation of
# diagonal gives inversion
exp_freq = np.exp(t * self.frame_diag)
frame_mat = np.outer(exp_freq.conj(), exp_freq)
out = frame_mat * out
if op_to_add_in_fb is not None:
out = out + op_to_add_in_fb
# if output is requested to not be in the frame basis, convert it
if not return_in_frame_basis:
out = self.operator_out_of_frame_basis(out)
return out
def operators_into_frame_basis_with_cutoff(
self,
operators: Union[Array, List[Operator]],
cutoff_freq: Optional[float] = None,
carrier_freqs: Optional[Array] = None,
):
ops_in_frame_basis = self.operator_into_frame_basis(operators)
# if no cutoff freq is specified, the two arrays are the same
if cutoff_freq is None:
return ops_in_frame_basis, ops_in_frame_basis
# if no carrier frequencies set, set to 0
if carrier_freqs is None:
carrier_freqs = np.zeros(len(operators))
carrier_freqs = Array(carrier_freqs)
# create difference matrix for diagonal elements
dim = len(ops_in_frame_basis[0])
freq_diffs = None
if self._frame_operator is None:
freq_diffs = Array(np.zeros((1, dim, dim)))
else:
freq_diffs = Array(np.ones((1, dim, dim))) * self.frame_diag
freq_diffs = freq_diffs - np.transpose(freq_diffs, (0, 2, 1))
# set up matrix encoding frequencies
im_angular_freqs = 1j * 2 * np.pi * np.reshape(carrier_freqs, (len(carrier_freqs), 1, 1))
freq_array = im_angular_freqs + freq_diffs
cutoff_array = ((np.abs(freq_array.imag) / (2 * np.pi)) < cutoff_freq).astype(int)
return (
cutoff_array * ops_in_frame_basis,
cutoff_array.transpose([0, 2, 1]) * ops_in_frame_basis,
)
def _is_herm_or_anti_herm(mat: Array, atol: Optional[float] = 1e-10, rtol: Optional[float] = 1e-10):
r"""Given `mat`, the logic of this function is:
- if `mat` is hermitian, return `-1j * mat`
- if `mat` is anti-hermitian, return `mat`
- otherwise:
- if `mat.backend == 'jax'` return `jnp.inf * mat`
- otherwise raise an error
The main purpose of this function is to hide the pecularities of the
implementing the above logic in a compileable way in `jax`.
Args:
mat: array to check
atol: absolute tolerance
rtol: relative tolerance
Returns:
Array: anti-hermitian version of `mat` if applicable
Raises:
ImportError: if backend is jax and jax is not installed.
QiskitError: if `mat` is not Hermitian or anti-Hermitian
"""
mat = to_array(mat)
mat = Array(mat, dtype=complex)
if mat.backend == "jax":
from jax.lax import cond
import jax.numpy as jnp
mat = mat.data
if mat.ndim == 1:
# this function checks if pure imaginary. If yes it returns the
# array, otherwise it multiplies it by jnp.nan to raise an error
# Note: pathways in conditionals in jax cannot raise Exceptions
def anti_herm_conditional(b):
aherm_pred = jnp.allclose(b, -b.conj(), atol=atol, rtol=rtol)
return cond(aherm_pred, lambda A: A, lambda A: jnp.nan * A, b)
# Check if it is purely real, if not apply anti_herm_conditional
herm_pred = jnp.allclose(mat, mat.conj(), atol=atol, rtol=rtol)
return Array(cond(herm_pred, lambda A: -1j * A, anti_herm_conditional, mat))
else:
# this function checks if anti-hermitian, if yes returns the array,
# otherwise it multiplies it by jnp.nan
def anti_herm_conditional(b):
aherm_pred = jnp.allclose(b, -b.conj().transpose(), atol=atol, rtol=rtol)
return cond(aherm_pred, lambda A: A, lambda A: jnp.nan * A, b)
# the following lines check if a is hermitian, otherwise it feeds
# it into the anti_herm_conditional
herm_pred = jnp.allclose(mat, mat.conj().transpose(), atol=atol, rtol=rtol)
return Array(cond(herm_pred, lambda A: -1j * A, anti_herm_conditional, mat))
else:
if mat.ndim == 1:
if np.allclose(mat, mat.conj(), atol=atol, rtol=rtol):
return -1j * mat
elif np.allclose(mat, -mat.conj(), atol=atol, rtol=rtol):
return mat
else:
if is_hermitian_matrix(mat, rtol=rtol, atol=atol):
return -1j * mat
elif is_hermitian_matrix(1j * mat, rtol=rtol, atol=atol):
return mat
# raise error if execution has made it this far
raise QiskitError(
"""frame_operator must be either a Hermitian or
anti-Hermitian matrix."""
)
|
#
# Author: <NAME> <<EMAIL>
# Version: 1.0
#
import logging
import socket
import re
from importlib import import_module
from .exception import GrumpyException, GrumpyRuntimeException
from .config import GrumpyConfig
class Grumpy:
config = None
plugins = {}
connection = None
def __init__(self, config_name='config.ini'):
try:
self.config = GrumpyConfig(config_name)
self.init_logging()
self.init_plugins()
except GrumpyException:
raise
def init_logging(self):
level = logging.DEBUG if self.config['main']['debug'] else logging.INFO
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=level)
def init_plugins(self):
logging.info('Loading plugins')
for name in self.config['main']['plugins']:
try:
logging.info('Loading plugin: {}'.format(name))
module = import_module('grumpy.plugins.{}'.format(name))
plugin = getattr(module, 'GrumpyPlugin')
self.plugins[name] = plugin(self.config, self.connection)
except ImportError:
raise GrumpyRuntimeException('Cannot load plugin: {}'.format(name)) from None
def run(self):
logging.info('Starting bot')
conf = self.config['main']
self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
logging.info('Connecting to: {}:{}'.format(conf['server'], conf['port']))
self.connection.connect((conf['server'], conf['port']))
user = 'USER {} {} {}: {}'.format(conf['nick'],
conf['hostname'],
conf['servername'],
conf['realname'])
nick = 'NICK {}'.format(conf['nick'])
self._send_raw_message(user)
self._send_raw_message(nick)
buffer = ''
while True:
buffer += self.connection.recv(1024).decode()
buffer = self._handle_response(buffer)
def _send_raw_message(self, message):
logging.info('======> "{}"'.format(message))
self.connection.send('{}\n'.format(message).encode())
def _send_message(self, dest, message):
output = 'PRIVMSG {} :{}'.format(dest, message)
self._send_raw_message(output)
def _handle_response(self, buffer):
conf = self.config['main']
uconf = self.config['userserv']
lines = buffer.splitlines(keepends=True)
for line in lines:
# We process just complete lines
if not line.endswith('\n'):
return line
line = line.strip()
if line == '':
continue
# NOTICE messages
if re.match('^NOTICE AUTH', line):
logging.info('<====== "{}"'.format(line))
# Initialization is done
elif re.search('^:{} 376 {} :End of /MOTD command.'.format(conf['server'], conf['nick']), line):
logging.info('<====== "{}"'.format(line))
# if userserv is defined
if all([uconf['username'], uconf['password']]):
self._send_message('userserv', 'login {} {}'.format(uconf['username'], uconf['password']))
# regain nick using nickserv
if uconf['nickserv']:
self._send_message('nickserv', 'REGAIN {}'.format(conf['nick']))
# join all channels
for channel in conf['channels']:
self._send_message('chanserv', 'INVITE {}'.format(channel))
self._send_raw_message('JOIN {}'.format(channel))
# INVITES from chanserv
elif re.match('^:CHANSERV!<EMAIL> INVITE', line):
logging.info('<====== "{}"'.format(line))
m = re.match('^:CHANSERV!<EMAIL> INVITE .+:(.+)$', line)
if not m:
continue
self._send_raw_message('JOIN {}'.format(m.group(1)))
# Status messages
elif re.match('^:{} [0-9]+ {} '.format(conf['server'], conf['nick']), line):
logging.info('<====== "{}"'.format(line))
# PING messages
elif re.match('^PING :', line):
logging.info('<====== "{}"'.format(line))
self._send_raw_message('PONG :{}'.format(conf['server']))
# PRIVMSG
elif re.match('^:.+ PRIVMSG .+ :', line):
logging.info('<====== "{}"'.format(line))
m = re.match('^:(.+)!~.+ PRIVMSG (.+) :(.+)$'.format(conf['nick']), line)
if not m:
continue
self._handle_message(m.group(1), m.group(2), m.group(3))
# Unhandled messages
else:
logging.warning('<=== "{}"'.format(line))
# In case all lines were processed empty the buffer
return ''
def _handle_message(self, sender, destination, message):
for name, plugin in self.plugins.items():
try:
logging.debug('{} | {} | {} | {}'.format(name, sender, destination, message))
messages = plugin.run(sender, destination, message)
for message in messages:
self._send_message(message['destination'], message['message'])
except GrumpyRuntimeException as e:
logging.error(e)
|
import datetime
import boto.ec2
import boto.ec2.cloudwatch
import boto.ec2.autoscale
import boto.ses
from boto.ec2.autoscale import LaunchConfiguration, AutoScalingGroup
from boto.ec2.autoscale.tag import Tag
import boto.utils
from juliabox.plugins.compute_ec2 import CompEC2
from juliabox.jbox_util import LoggerMixin
class Cluster(LoggerMixin):
@staticmethod
def get_spot_price(inst_type, minutes=60):
conn = Cluster._ec2()
end = datetime.datetime.utcnow()
start = end - datetime.timedelta(minutes=minutes)
next_token = None
avzone_pricevals = {}
avzone_pricestats = {}
def median(lst):
lst = sorted(lst)
if len(lst) < 1:
return None
if len(lst) %2 == 1:
return lst[((len(lst)+1)/2)-1]
else:
return float(sum(lst[(len(lst)/2)-1:(len(lst)/2)+1]))/2.0
def add_price(az, price):
if az in avzone_pricevals:
pricevals = avzone_pricevals[az]
else:
avzone_pricevals[az] = pricevals = []
pricevals.append(price)
while True:
prices = conn.get_spot_price_history(instance_type=inst_type,
start_time=start.isoformat(), end_time=end.isoformat(),
next_token=next_token)
for p in prices:
add_price(p.availability_zone, p.price)
next_token = prices.next_token
if (next_token is None) or (len(next_token) == 0):
break
for avzone, prices in avzone_pricevals.iteritems():
avzone_pricestats[avzone] = {
'count': len(prices),
'min': min(prices),
'avg': sum(prices)/float(len(prices)),
'median': median(prices),
'max': max(prices)
}
return avzone_pricestats
@staticmethod
def terminate_by_placement_group(gname):
conn = Cluster._ec2()
instances = conn.get_only_instances(filters={"placement-group-name": gname, "instance-state-name": "running"})
conn.terminate_instances(instance_ids=[i.id for i in instances])
@staticmethod
def get_placement_group(gname):
existing = Cluster.get_placement_groups(gname)
return existing if (existing is None) else existing[0]
@staticmethod
def get_placement_groups(gname=None):
conn = Cluster._ec2()
try:
existing = conn.get_all_placement_groups(gname)
except boto.exception.EC2ResponseError as ex:
#print("\t%s" % (repr(ex),))
return None
if len(existing) == 0:
return None
return existing
@staticmethod
def create_placement_group(gname):
if Cluster.get_placement_group(gname) is None:
conn = Cluster._ec2()
return conn.create_placement_group(gname, strategy='cluster')
return True
@staticmethod
def delete_placement_group(gname):
pgrp = Cluster.get_placement_group(gname)
if pgrp is not None:
pgrp.delete()
Cluster.log_info("Deleted placement group %s", gname)
else:
Cluster.log_info("Placement group %s does not exist", gname)
@staticmethod
def get_launch_config(lconfig_name):
auto_scale_conn = Cluster._autoscale()
configs = auto_scale_conn.get_all_launch_configurations(names=[lconfig_name])
if len(configs) > 0:
return configs[0]
return None
@staticmethod
def create_launch_config(lconfig_name, image_id, inst_type, key_name, security_groups,
spot_price=0,
user_data_file=None,
user_data=None,
block_dev_mappings=None,
ebs_optimized=False,
overwrite=False):
existing_config = Cluster.get_launch_config(lconfig_name)
if existing_config is not None:
if overwrite:
existing_config.delete()
Cluster.log_info("Deleted launch config %s to overwrite new config", lconfig_name)
else:
Cluster.log_error("Launch config %s already exists.", lconfig_name)
raise Exception("Launch configuration already exists")
auto_scale_conn = Cluster._autoscale()
if user_data is None:
if user_data_file is not None:
with open(user_data_file, 'r') as udf:
user_data = udf.read()
lconfig = LaunchConfiguration()
lconfig.instance_type = inst_type
lconfig.name = lconfig_name
lconfig.image_id = image_id
lconfig.key_name = key_name
lconfig.security_groups = security_groups
lconfig.user_data = user_data
if spot_price > 0:
lconfig.spot_price = spot_price
if block_dev_mappings is not None:
lconfig.block_device_mappings = block_dev_mappings
if ebs_optimized:
lconfig.ebs_optimized = True
auto_scale_conn.create_launch_configuration(lconfig)
Cluster.log_info("Created launch configuration %s", lconfig.name)
@staticmethod
def delete_launch_config(lconfig_name):
existing_config = Cluster.get_launch_config(lconfig_name)
if existing_config is not None:
existing_config.delete()
Cluster.log_info("Deleted launch config %s", lconfig_name)
else:
Cluster.log_info("Launch config %s does not exist", lconfig_name)
@staticmethod
def create_autoscale_group(gname, lconfig_name, placement_group, size, zones=None):
existing_group = CompEC2._get_autoscale_group(gname)
if existing_group is not None:
Cluster.log_error("Autoscale group %s already exists!", gname)
return None
tags = [Tag(key='Name', value=gname, propagate_at_launch=True, resource_id=gname)]
if zones is None:
zones = [x.name for x in Cluster._ec2().get_all_zones()]
Cluster.log_info("zones: %r", zones)
ag = AutoScalingGroup(group_name=gname, availability_zones=zones,
launch_config=lconfig_name,
placement_group=placement_group,
tags=tags,
desired_capacity=0, min_size=0, max_size=size)
conn = Cluster._autoscale()
return conn.create_auto_scaling_group(ag)
@staticmethod
def delete_autoscale_group(gname, force=False):
existing_group = CompEC2._get_autoscale_group(gname)
if existing_group is not None:
existing_group.delete(force_delete=force)
Cluster.log_error("Autoscale group %s deleted (forced=%r)", gname, force)
else:
Cluster.log_info("Autoscale group %s does not exist", gname)
return None
# @staticmethod
# def launch_into_placement_group(gname, ami_name, key, inst_type, num_inst, sec_grp, spot_price=None):
# conn = CloudHost.connect_ec2()
#
# ami = CloudHost.get_image(ami_name)
# if ami is None:
# CloudHost.log_error("Image with name %s not found.", ami_name)
# return None
#
# ami_id = ami.id
#
# if spot_price is None:
# resev = conn.run_instances(ami_id, min_count=num_inst, max_count=num_inst,
# key_name=key, instance_type=inst_type, security_groups=[sec_grp],
# placement=CloudHost.REGION, placement_group=gname)
# else:
# resev = conn.request_spot_instances(spot_price, ami_id, count=num_inst,
# launch_group=gname,
# key_name=key, instance_type=inst_type, security_groups=[sec_grp],
# placement=CloudHost.REGION, placement_group=gname)
# return resev.id
#
# # @staticmethod
# # def get_spot_request(gname):
# # conn = CloudHost.connect_ec2()
# # conn.get_all_spot_instance_requests()
#
# @staticmethod
# def wait_for_placement_group(gname, num_inst):
# if Cluster.get_placement_group(gname) is None:
# return False, -1
# count = len(CloudHost.get_public_addresses_by_placement_group(gname))
# return (num_inst == count), count
# @staticmethod
# def get_public_hostnames_by_tag(tag, value):
# conn = CompEC2._connect_ec2()
# instances = conn.get_only_instances(filters={"tag:"+tag: value, "instance-state-name": "running"})
# return [i.public_dns_name for i in instances]
#
# @staticmethod
# def get_private_hostnames_by_tag(tag, value):
# conn = CompEC2._connect_ec2()
# instances = conn.get_only_instances(filters={"tag:"+tag: value, "instance-state-name": "running"})
# return [i.private_dns_name for i in instances]
@staticmethod
def get_public_hostnames_by_placement_group(gname):
conn = Cluster._ec2()
instances = conn.get_only_instances(filters={"placement-group-name": gname, "instance-state-name": "running"})
return [i.public_dns_name for i in instances]
@staticmethod
def get_public_ips_by_placement_group(gname):
conn = Cluster._ec2()
instances = conn.get_only_instances(filters={"placement-group-name": gname, "instance-state-name": "running"})
return [i.ip_address for i in instances]
@staticmethod
def get_private_hostnames_by_placement_group(gname):
conn = Cluster._ec2()
instances = conn.get_only_instances(filters={"placement-group-name": gname, "instance-state-name": "running"})
return [i.private_dns_name for i in instances]
@staticmethod
def get_private_ips_by_placement_group(gname):
conn = Cluster._ec2()
instances = conn.get_only_instances(filters={"placement-group-name": gname, "instance-state-name": "running"})
return [i.private_ip_address for i in instances]
@staticmethod
def _ec2():
return CompEC2._connect_ec2()
@staticmethod
def _autoscale():
return CompEC2._connect_autoscale()
@staticmethod
def get_autoscale_group(gname):
return CompEC2._get_autoscale_group(gname)
@staticmethod
def get_autoscaled_instances(gname=None):
return CompEC2.get_all_instances(gname) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.