input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
"""Colormaps."""
# --- import --------------------------------------------------------------------------------------
import collections
import numpy as np
from numpy import r_
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as mplcolors
import matplotlib.gridspec as grd
# --- define -------------------------------------------------------------------------------------
__all__ = [
"colormaps",
"get_color_cycle",
"grayify_cmap",
"overline_colors",
"plot_colormap_components",
]
# --- functions ----------------------------------------------------------------------------------
def make_cubehelix(name="WrightTools", gamma=0.5, s=0.25, r=-1, h=1.3, reverse=False, darkest=0.7):
"""Define cubehelix type colorbars.
Look `here`__ for more information.
__ http://arxiv.org/abs/1108.5083
Parameters
----------
name : string (optional)
Name of new cmap. Default is WrightTools.
gamma : number (optional)
Intensity factor. Default is 0.5
s : number (optional)
Start color factor. Default is 0.25
r : number (optional)
Number and direction of rotations. Default is -1
h : number (option)
Hue factor. Default is 1.3
reverse : boolean (optional)
Toggle reversal of output colormap. By default (Reverse = False),
colormap goes from light to dark.
darkest : number (optional)
Default is 0.7
Returns
-------
matplotlib.colors.LinearSegmentedColormap
See Also
--------
plot_colormap_components
Displays RGB components of colormaps.
"""
rr = .213 / .30
rg = .715 / .99
rb = .072 / .11
def get_color_function(p0, p1):
def color(x):
# Calculate amplitude and angle of deviation from the black to
# white diagonal in the plane of constant perceived intensity.
xg = darkest * x ** gamma
lum = 1 - xg # starts at 1
if reverse:
lum = lum[::-1]
a = lum.copy()
a[lum < 0.5] = h * lum[lum < 0.5] / 2.
a[lum >= 0.5] = h * (1 - lum[lum >= 0.5]) / 2.
phi = 2 * np.pi * (s / 3 + r * x)
out = lum + a * (p0 * np.cos(phi) + p1 * np.sin(phi))
return out
return color
rgb_dict = {
"red": get_color_function(-0.14861 * rr, 1.78277 * rr),
"green": get_color_function(-0.29227 * rg, -0.90649 * rg),
"blue": get_color_function(1.97294 * rb, 0.0),
}
cmap = matplotlib.colors.LinearSegmentedColormap(name, rgb_dict)
return cmap
def make_colormap(seq, name="CustomMap", plot=False):
"""Generate a LinearSegmentedColormap.
Parameters
----------
seq : list of tuples
A sequence of floats and RGB-tuples. The floats should be increasing
and in the interval (0,1).
name : string (optional)
A name for the colormap
plot : boolean (optional)
Use to generate a plot of the colormap (Default is False).
Returns
-------
matplotlib.colors.LinearSegmentedColormap
`Source`__
__ http://nbviewer.ipython.org/gist/anonymous/a4fa0adb08f9e9ea4f94
"""
seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]
cdict = {"red": [], "green": [], "blue": []}
for i, item in enumerate(seq):
if isinstance(item, float):
r1, g1, b1 = seq[i - 1]
r2, g2, b2 = seq[i + 1]
cdict["red"].append([item, r1, r2])
cdict["green"].append([item, g1, g2])
cdict["blue"].append([item, b1, b2])
cmap = mplcolors.LinearSegmentedColormap(name, cdict)
if plot:
plot_colormap_components(cmap)
return cmap
def nm_to_rgb(nm):
"""Convert a wavelength to corresponding RGB values [0.0-1.0].
Parameters
----------
nm : int or float
The wavelength of light.
Returns
-------
List of [R,G,B] values between 0 and 1
`original code`__
__ http://www.physics.sfasu.edu/astro/color/spectra.html
"""
w = int(nm)
# color ---------------------------------------------------------------------------------------
if w >= 380 and w < 440:
R = -(w - 440.) / (440. - 350.)
G = 0.0
B = 1.0
elif w >= 440 and w < 490:
R = 0.0
G = (w - 440.) / (490. - 440.)
B = 1.0
elif w >= 490 and w < 510:
R = 0.0
G = 1.0
B = -(w - 510.) / (510. - 490.)
elif w >= 510 and w < 580:
R = (w - 510.) / (580. - 510.)
G = 1.0
B = 0.0
elif w >= 580 and w < 645:
R = 1.0
G = -(w - 645.) / (645. - 580.)
B = 0.0
elif w >= 645 and w <= 780:
R = 1.0
G = 0.0
B = 0.0
else:
R = 0.0
G = 0.0
B = 0.0
# intensity correction ------------------------------------------------------------------------
if w >= 380 and w < 420:
SSS = 0.3 + 0.7 * (w - 350) / (420 - 350)
elif w >= 420 and w <= 700:
SSS = 1.0
elif w > 700 and w <= 780:
SSS = 0.3 + 0.7 * (780 - w) / (780 - 700)
else:
SSS = 0.0
SSS *= 255
return [float(int(SSS * R) / 256.), float(int(SSS * G) / 256.), float(int(SSS * B) / 256.)]
def plot_colormap_components(cmap):
"""Plot the components of a given colormap."""
from ._helpers import set_ax_labels # recursive import protection
plt.figure(figsize=[8, 4])
gs = grd.GridSpec(3, 1, height_ratios=[1, 10, 1], hspace=0.05)
# colorbar
ax = plt.subplot(gs[0])
gradient = np.linspace(0, 1, 256)
gradient = np.vstack((gradient, gradient))
ax.imshow(gradient, aspect="auto", cmap=cmap, vmin=0., vmax=1.)
ax.set_title(cmap.name, fontsize=20)
ax.set_axis_off()
# components
ax = plt.subplot(gs[1])
x = np.arange(cmap.N)
colors = cmap(x)
r = colors[:, 0]
g = colors[:, 1]
b = colors[:, 2]
RGB_weight = [0.299, 0.587, 0.114]
k = np.sqrt(np.dot(colors[:, :3] ** 2, RGB_weight))
r.clip(0, 1, out=r)
g.clip(0, 1, out=g)
b.clip(0, 1, out=b)
xi = np.linspace(0, 1, x.size)
plt.plot(xi, r, "r", linewidth=5, alpha=0.6)
plt.plot(xi, g, "g", linewidth=5, alpha=0.6)
plt.plot(xi, b, "b", linewidth=5, alpha=0.6)
plt.plot(xi, k, "k", linewidth=5, alpha=0.6)
ax.set_xlim(0, 1)
ax.set_ylim(-0.1, 1.1)
set_ax_labels(ax=ax, xlabel=None, xticks=False, ylabel="intensity")
# grayified colorbar
cmap = grayify_cmap(cmap)
ax = plt.subplot(gs[2])
gradient = np.linspace(0, 1, 256)
gradient = np.vstack((gradient, gradient))
ax.imshow(gradient, aspect="auto", cmap=cmap, vmin=0., vmax=1.)
ax.set_axis_off()
def grayify_cmap(cmap):
"""Return a grayscale version of the colormap.
`Source`__
__ https://jakevdp.github.io/blog/2014/10/16/how-bad-is-your-colormap/
"""
cmap = plt.cm.get_cmap(cmap)
colors = cmap(np.arange(cmap.N))
# convert RGBA to perceived greyscale luminance
# cf. http://alienryderflex.com/hsp.html
RGB_weight = [0.299, 0.587, 0.114]
luminance = np.sqrt(np.dot(colors[:, :3] ** 2, RGB_weight))
colors[:, :3] = luminance[:, np.newaxis]
return mplcolors.LinearSegmentedColormap.from_list(cmap.name + "_grayscale", colors, cmap.N)
def get_color_cycle(n, cmap="rainbow", rotations=3):
"""Get a list of RGBA colors following a colormap.
Useful for plotting lots of elements, keeping the color of each unique.
Parameters
----------
n : integer
The number of colors to return.
cmap : string (optional)
The colormap to use in the cycle. Default is rainbow.
rotations : integer (optional)
The number of times to repeat the colormap over the cycle. Default is 3.
Returns
-------
list
List of RGBA lists.
"""
cmap = colormaps[cmap]
if np.mod(n, rotations) == 0:
per = np.floor_divide(n, rotations)
else:
per = np.floor_divide(n, rotations) + 1
vals = list(np.linspace(0, 1, per))
vals = vals * rotations
vals = vals[:n]
out = cmap(vals)
return out
# --- color maps ----------------------------------------------------------------------------------
cubehelix = make_cubehelix()
experimental = [
"#FFFFFF",
"#0000FF",
"#0080FF",
"#00FFFF",
"#00FF00",
"#FFFF00",
"#FF8000",
"#FF0000",
"#881111",
]
greenscale = ["#000000", "#00FF00"] # black # green
greyscale = ["#FFFFFF", "#000000"] # white # black
invisible = ["#FFFFFF", "#FFFFFF"] # white # white
# isoluminant colorbar based on the research of Kindlmann et al.
# http://dx.doi.org/10.1109/VISUAL.2002.1183788
c = mplcolors.ColorConverter().to_rgb
isoluminant1 = make_colormap(
[
c(r_[1.000, 1.000, 1.000]),
c(r_[0.847, 0.057, 0.057]),
1 / 6.,
c(r_[0.847, 0.057, 0.057]),
c(r_[0.527, 0.527, 0.000]),
2 / 6.,
c(r_[0.527, 0.527, 0.000]),
c(r_[0.000, 0.592, 0.000]),
3 / 6.,
c(r_[0.000, 0.592, 0.000]),
c(r_[0.000, 0.559, 0.559]),
4 / 6.,
c(r_[0.000, 0.559, 0.559]),
c(r_[0.316, 0.316, 0.991]),
5 / 6.,
c(r_[0.316, 0.316, 0.991]),
c(r_[0.718, 0.000, 0.718]),
],
name="isoluminant`",
)
isoluminant2 = make_colormap(
[
c(r_[1.000, 1.000, 1.000]),
c(r_[0.718, 0.000, 0.718]),
1 / 6.,
c(r_[0.718, 0.000, 0.718]),
c(r_[0.316, 0.316, 0.991]),
2 / 6.,
c(r_[0.316, 0.316, 0.991]),
c(r_[0.000, 0.559, 0.559]),
3 / 6.,
c(r_[0.000, 0.559, 0.559]),
c(r_[0.000, 0.592, 0.000]),
4 / 6.,
c(r_[0.000, 0.592, 0.000]),
c(r_[0.527, 0.527, 0.000]),
5 / 6.,
c(r_[0.527, 0.527, 0.000]),
c(r_[0.847, 0.057, 0.057]),
],
name="isoluminant2",
)
isoluminant3 = make_colormap(
[
c(r_[1.000, 1.000, 1.000]),
c(r_[0.316, 0.316, 0.991]),
1 / 5.,
c(r_[0.316, 0.316, 0.991]),
c(r_[0.000, 0.559, 0.559]),
2 / 5.,
c(r_[0.000, 0.559, 0.559]),
c(r_[0.000, 0.592, 0.000]),
3 / 5.,
c(r_[0.000, 0.592, 0.000]),
c(r_[0.527, 0.527, 0.000]),
4 / 5.,
c(r_[0.527, 0.527, 0.000]),
c(r_[0.847, 0.057, 0.057]),
],
name="isoluminant3",
)
signed = [
"#0000FF", # blue
"#002AFF",
"#0055FF",
"#007FFF",
"#00AAFF",
"#00D4FF",
"#00FFFF",
"#FFFFFF", # white
"#FFFF00",
"#FFD400",
"#FFAA00",
"#FF7F00",
"#FF5500",
"#FF2A00",
"#FF0000",
] # red
signed_old = [
"#0000FF", # blue
"#00BBFF", # blue-aqua
"#00FFFF", # aqua
"#FFFFFF", # white
"#FFFF00", # yellow
"#FFBB00", # orange
"#FF0000",
] # red
skyebar = [
"#FFFFFF", # white
"#000000", # black
"#0000FF", # blue
"#00FFFF", # cyan
"#64FF00", # light green
"#FFFF00", # yellow
"#FF8000", # orange
"#FF0000", # red
"#800000",
] # dark red
skyebar_d = [
"#000000", # | |
import numpy as np
import pydart2 as pydart
import QPsolver
import IKsolve_one
import momentum_con
import motionPlan
from scipy import optimize
import yulTrajectoryOpt
from fltk import *
from PyCommon.modules.GUI import hpSimpleViewer as hsv
from PyCommon.modules.Renderer import ysRenderer as yr
from PyCommon.modules.Simulator import hpDartQpSimulator as hqp
render_vector = []
render_vector_origin = []
push_force = []
push_force_origin = []
blade_force = []
blade_force_origin = []
rd_footCenter = []
class State(object):
def __init__(self, name, dt, c_d, c_v, angles):
self.name = name
self.dt = dt
self.c_d = c_d
self.c_v = c_v
self.angles = angles
class MyWorld(pydart.World):
def __init__(self, ):
pydart.World.__init__(self, 1.0 / 1000.0, './data/skel/cart_pole_blade_3dof.skel')
# pydart.World.__init__(self, 1.0 / 1000.0, './data/skel/cart_pole_blade.skel')
# pydart.World.__init__(self, 1.0 / 2000.0, './data/skel/cart_pole.skel')
self.force = None
self.duration = 0
self.skeletons[0].body('ground').set_friction_coeff(0.02)
skel = self.skeletons[2]
# print("mass: ", skel.m, "kg")
# print('[Joint]')
# for joint in skel.joints:
# print("\t" + str(joint))
# print("\t\tparent = " + str(joint.parent_bodynode))
# print("\t\tchild = " + str(joint.child_bodynode))
# print("\t\tdofs = " + str(joint.dofs))
# skel.joint("j_abdomen").set_position_upper_limit(10, 0.0)
# skel.joint("j_heel_left").set_position_upper_limit(0, 0.0)
# skel.joint("j_heel_left").set_position_lower_limit(0, -0.0)
pelvis_x = skel.dof_indices((["j_pelvis_rot_x"]))
pelvis = skel.dof_indices((["j_pelvis_rot_y", "j_pelvis_rot_z"]))
upper_body = skel.dof_indices(["j_abdomen_x", "j_abdomen_y", "j_abdomen_z"])
spine = skel.dof_indices(["j_spine_x", "j_spine_y", "j_spine_z"])
right_leg = skel.dof_indices(["j_thigh_right_x", "j_thigh_right_y", "j_thigh_right_z", "j_shin_right_z"])
left_leg = skel.dof_indices(["j_thigh_left_x", "j_thigh_left_y", "j_thigh_left_z", "j_shin_left_z"])
knee = skel.dof_indices(["j_shin_left_x", "j_shin_right_x"])
arms = skel.dof_indices(["j_bicep_left_x", "j_bicep_right_x"])
foot = skel.dof_indices(["j_heel_left_x", "j_heel_left_y", "j_heel_left_z", "j_heel_right_x", "j_heel_right_y", "j_heel_right_z"])
leg_y = skel.dof_indices(["j_thigh_right_y", "j_thigh_left_y"])
# blade = skel.dof_indices(["j_heel_right_2"])
# #----------------------------------
# # pushing side to side new (180718)
# #----------------------------------
#
s0q = np.zeros(skel.ndofs)
# s0q[pelvis] = 0., -0.
# s0q[upper_body] = 0.0, -0.5
s0q[right_leg] = -0., -0., -0.0, -0.0
s0q[left_leg] = 0., 0., 0.0, -0.0
# s0q[leg_y] = -0.785, 0.785
s0q[arms] = 1.5, -1.5
# s0q[foot] = 0.1, 0.0, 0.1, -0.0
state0 = State("state0", 0.2, 0.0, 0.2, s0q)
# s01q = np.zeros(skel.ndofs)
# # s01q[pelvis] = 0., -0.3
# s01q[upper_body] = 0.0, 0., -0.2
# s01q[left_leg] = 0.1, 0.3, 0.3, -0.3
# # s01q[right_leg] = -0.0, -0.785, -0.66, -0.0
# s01q[right_leg] = -0.2, -0.9, 0.2, -0.5
# s01q[arms] = 1.5, -1.5
# # s01q[blade] = -0.3
# s01q[foot] = -0.0, 0.0, 0., 0.0, 0.0, 0.
# state01 = State("state01", 0.5, 2.2, 0.0, s01q)
s01q = np.zeros(skel.ndofs)
# s01q[pelvis] = 0., -0.3
s01q[upper_body] = 0.0, 0., -0.5
s01q[spine] = 0.0, 0., 0.5
s01q[left_leg] = -0., 0., 0.3, -0.5
# s01q[right_leg] = -0.0, -0.785, -0.66, -0.0
s01q[right_leg] = -0., -0., 0.3, -0.5
s01q[arms] = 1.5, -1.5
# s01q[blade] = -0.3
s01q[foot] = -0.0, 0.0, 0.2, 0.0, 0.0, 0.2
state01 = State("state01", 0.5, 2.2, 0.0, s01q)
# s011q = np.zeros(skel.ndofs)
# # s01q[pelvis] = 0., -0.3
# s011q[upper_body] = 0.0, 0., -0.5
# s011q[spine] = 0.0, 0., 0.5
# s011q[left_leg] = -0.1, 0., 0.3, -0.5
# # s01q[right_leg] = -0.0, -0.785, -0.66, -0.0
# s011q[right_leg] = -0., -0.785, 0.0, -0.5
# s011q[arms] = 1.5, -1.5
# # s01q[blade] = -0.3
# s011q[foot] = -0.0, 0.0, 0.2, 0.0, 0.0, 0.2
# state011 = State("state011", 0.5, 2.2, 0.0, s011q)
#
# s1q = np.zeros(skel.ndofs)
# # s1q[pelvis] = 0., -0.1
# s1q[upper_body] = 0.0, 0., -0.5
# s1q[spine] = 0.0, 0., 0.5
# s1q[left_leg] = -0.1, 0., 0.3, -0.5
# # s1q[right_leg] = -0.0, -0.785, -0.66, -0.0
# s1q[right_leg] = -0., -0.785, 0., -0.5
# s1q[arms] = 1.5, -1.5
# # s1q[blade] = -0.3
# s1q[foot] = -0.0, 0.0, 0.2, 0.0, 0.0, 0.2
# state1 = State("state1", 0.5, 2.2, 0.0, s1q)
#
# s2q = np.zeros(skel.ndofs)
# # s2q[pelvis] = -0.3, -0.0
# s2q[upper_body] = -0., 0, -0.
# s2q[left_leg] = 0., 0., 0., -0.17
# s2q[right_leg] = -0.4, -0.785, -0.2, -0.17
# s2q[arms] = 1.5, -1.5
# s2q[foot] = -0.0, 0.0, 0.2, 0.0, 0.0, -0.
# state2 = State("state2", 0.5, 0.0, 0.2, s2q)
#
# s3q = np.zeros(skel.ndofs)
# # s3q[upper_body] = 0.0, 0., 0.3
# s3q[left_leg] = -0.1, -0., 0., -0.
# s3q[right_leg] = 0.0, 0., 0.5, -1.5
# s3q[arms] = 1.5, -1.5
# # s3q[foot] = -0.0, 0.0, 0.3, 0.0, 0.0, -0.
# state3 = State("state3", 1.0, 0.0, 0.2, s3q)
# #s1q[pelvis] = -0.3", 0.2, 2.2, 0.0, s3q)
# self.state_list = [state0, state01, state011, state1, state2, state3]
s011q = np.zeros(skel.ndofs)
# s01q[pelvis] = 0., -0.3
s011q[upper_body] = 0.0, 0., -0.5
s011q[spine] = 0.0, 0., 0.5
s011q[left_leg] = -0., 0., 0.3, -0.5
# s01q[right_leg] = -0.0, -0.785, -0.66, -0.0
s011q[right_leg] = -0., -0., 0.3, -0.5
s011q[arms] = 1.5, -1.5
# s01q[blade] = -0.3
s011q[foot] = -0.0, 0.0, 0.2, 0.0, 0.0, 0.2
state011 = State("state011", 0.5, 2.2, 0.0, s011q)
s1q = np.zeros(skel.ndofs)
# s1q[pelvis] = 0., -0.1
s1q[upper_body] = 0., 0., -0.2
s1q[spine] = 0.0, 0., 0.2
s1q[left_leg] = -0.1, 0., 0.3, -0.5
# s1q[right_leg] = -0.0, -0.785, -0.66, -0.0
s1q[right_leg] = 0.1, -0., 0.9, -1.2
s1q[arms] = 1.5, -1.5
# s1q[blade] = -0.3
s1q[foot] = -0.0, 0.0, 0.2, 0.0, 0.0, 0.2
state1 = State("state1", 0.8, 2.2, 0.0, s1q)
s2q = np.zeros(skel.ndofs)
# s2q[pelvis] = -0.3, -0.0
s2q[upper_body] = -0., 0, -0.3
s2q[left_leg] = 0., 0., 0.3, -0.5
s2q[right_leg] = -0., -0.2, 0.3, -0.2
s2q[arms] = 1.5, -1.5
s2q[foot] = -0.0, 0.0, 0.2, 0.0, -0., 0.2
# state2 = State("state2", 0.25, 0.0, 0.2, s2q)
state2 = State("state2", 0.3, 0.0, 0.2, s2q)
# s02q = np.zeros(skel.ndofs)
# # s02q[pelvis] = -0.3, -0.0
# s02q[upper_body] = -0., 0, -0.3
# s02q[left_leg] = 0., 0., 0.2, -0.5
# s02q[right_leg] = -0., -0., -0.2, -0.2
# s02q[arms] = 1.5, -1.5
# s02q[foot] = -0.0, 0.0, 0.2, 0.0, 0.0, 0.2
# # state02 = State("state02", 0.25, 0.0, 0.2, s02q)
# state02 = State("state02", 0.3, 0.0, 0.2, s02q)
s3q = np.zeros(skel.ndofs)
s3q[upper_body] = 0.0, 0., -0.3
s3q[spine] = 0.0, 0., 0.3
s3q[left_leg] = 0.1, -0., 0.9, -1.2
s3q[right_leg] = -0., -0., 0.3, -0.3
s3q[arms] = 1.5, -1.5
s3q[foot] = -0.0, 0.0, 0.2, 0.0, 0.0, 0.
state3 = State("state3", 0.8, 0.0, 0.2, s3q)
#s1q[pelvis] = -0.3", 0.2, 2.2, 0.0, s3q)
s03q = np.zeros(skel.ndofs)
s03q[upper_body] = 0.0, 0., -0.3
# s03q[spine] = 0.0, 0., 0.3
s03q[left_leg] = 0.1, 0.3, 0.7, -0.3
s03q[right_leg] = -0., 0., 0.3, -0.3
s03q[arms] = 1.5, -1.5
s03q[foot] = -0.0, 0.0, 0.2, 0.0, 0.0, 0.
state03 = State("state03", 0.5, 0.0, 0.2, s03q)
# s1q[pelvis] = -0.3", 0.2, 2.2, 0.0, s3q)
s4q = np.zeros(skel.ndofs)
s4q[arms] = 1.5, -1.5
# s4q[upper_body] = 0., 0., 0.
# s4q[left_leg] = 0.1, -0., 0., -0.
# s4q[right_leg] = 0., -0., 0.5, -0.5
# s4q[knee] = 0., -0.2
state4 = State("state4", 0.5, 0.0, 0.2, s4q)
s04q = np.zeros(skel.ndofs)
s04q[arms] = 1.5, -1.5
# s04q[left_leg] = 0.2, -0., 0., -0.
state04 = State("state04", 10.0, 0.0, 0.2, s04q)
self.state_list = [state0, state01, state011, state1, state2, state3, state03, state1, state2, state3, state03]
# self.state_list = [state0, state01, state011, state1, state2, state3, state03, state4, state04]
# self.state_list = [state0, state1]
state_num = len(self.state_list)
self.state_num = state_num
# print("state_num: ", state_num)
self.curr_state = self.state_list[0]
self.elapsedTime = 0.0
self.curr_state_index = 0
# print("backup angle: ", backup_q)
# print("cur angle: ", self.curr_state.angles)
self.controller = QPsolver.Controller(skel, self.skeletons[3], self.dt, self.curr_state.name)
self.mo_con = momentum_con.momentum_control(self.skeletons[2], self.skeletons[3], self.time_step())
self.skeletons[3].set_positions(self.curr_state.angles)
# self.skeletons[3].set_positions(np.zeros(skel.ndofs))
# self.ik = IKsolve_one.IKsolver(self.skeletons[2], self.dt)
# merged_target = self.curr_state.angles
# self.ik.update_target(self.curr_state.name)
# merged_target = np.zeros(skel.ndofs)
# merged_target[:6] = self.curr_state.angles[:6]
# merged_target[6:18] = self.ik.solve()
# merged_target[18:] = self.curr_state.angles[18:]
# print("ik res: ", self.ik.solve())
# print("merged_target: ", merged_target)
# self.controller.target = merged_target
self.controller.target = self.curr_state.angles
# self.controller.target = skel.q
# skel.set_controller(self.controller)
print('create controller OK')
self.contact_force = []
self.contactPositionLocals = []
self.bodyIDs = []
# print("dof: ", skel.ndofs)
def step(self):
# print("self.curr_state: ", self.curr_state.name)
# if self.curr_state.name == "state2" or self.curr_state.name == "state3":
# if self.curr_state.name == "state1":
# if self.time() > 1.0 and self.time() < 2.0:
# self.force = np.array([20.0, 0.0, 0.0])
# else:
# self.force = None
# print("left foot pos:", self.skeletons[2].body('h_blade_left').to_world([0.0, 0.0, 0.0]))
# self.force = np.array([20.0, 0.0, 0.0])
# self.skeletons[2].body('h_pelvis').add_ext_force(self.force)
# if self.curr_state.name == "state1":
# self.force = np.array([10.0, 0.0, 0.0])
# else:
# self.force = None
self.controller.cur_state = self.curr_state.name
if self.force is not None:
self.skeletons[2].body('h_pelvis').add_ext_force(self.force)
# self.skeletons[2].body('h_spine').add_ext_force(self.force)
# if self.curr_state.name == "state2":
# self.skeletons[2].body('h_pelvis').add_ext_force(self.force)
# if self.curr_state.name == "state3":
# self.skeletons[2].body('h_pelvis').add_ext_force(self.force)
# if | |
dataf_r = open(self.filename, 'rb')
dataread = csv.reader(dataf_r)
except:
pass
else:
try:
header_file = dataread.next()
except: # empty file
write_head = True
else:
if header == header_file:
write_head = False
else:
write_head = True
dataf_r.close()
if write_head:
self.datawriter.writerow(header)
self._header_written = True
class Experiment(ExperimentHandler, Task):
def __init__(self,
name='',
version='0.1',
info=None,
rp=None,
actions=None,
computer=default_computer,
paths=None,
data_fname=None,
**kwargs
):
"""
An extension of ExperimentHandler and TrialHandler with many
useful functions.
.. note:: When you inherit this class, you must have at least
``info`` and ``rp`` (or simply ``**kwargs``) keywords
because :class:`~psychopy.ui.Control` expects them.
:Kwargs:
- name (str, default: '')
Name of the experiment. It will be used to call the
experiment from the command-line.
- version (str, default: '0.1')
Version of your experiment.
- info (tuple, list of tuples, or dict, default: None)
Information about the experiment that you want to see in the
output file. This is equivalent to PsychoPy's ``extraInfo``.
It will contain at least ``('subjid', 'subj')`` even if a
user did not specify that.
- rp (tuple, list of tuples, or dict, default: None)
Run parameters that apply for this particular run but need
not be stored in the data output. It will contain at least
the following::
[('no_output', False), # do you want output? or just playing around?
('debug', False), # not fullscreen presentation etc
('autorun', 0), # if >0, will autorun at the specified speed
('unittest', False), # like autorun but no breaks at show_instructions
('repository', ('do nothing', 'commit and push', 'only commit')), # add, commit and push to a hg repo?
# add and commit changes, like new data files?
]
- actions (list of function names, default: None)
A list of function names (as ``str``) that can be called from
GUI.
- computer (module, default: ``default_computer``)
Computer parameter module.
- paths (dict, default: None)
A dictionary of paths where to store different outputs.
If None, :func:`~psychopy_ext.exp.set_paths()` is called.
- data_fname (str, default=None)
The name of the main data file for storing output. If None,
becomes ``self.paths['data'] + self.info['subjid'] + '.csv'``.
Then a :class:`~psychopy_ext.exp.Datafile` instance is
created in ``self.datafile`` for easy writing to a csv
format.
- \*\*kwargs
"""
ExperimentHandler.__init__(self,
name=name,
version=version,
extraInfo=info,
dataFileName='.data' # for now so that PsychoPy doesn't complain
)
self.computer = computer
if paths is None:
self.paths = set_paths()
else:
self.paths = paths
self._initialized = False
# minimal parameters that Experiment expects in info and rp
self.info = OrderedDict([('subjid', 'subj')])
if info is not None:
if isinstance(info, (list, tuple)):
try:
info = OrderedDict(info)
except:
info = OrderedDict([info])
self.info.update(info)
self.rp = OrderedDict([ # these control how the experiment is run
('no_output', False), # do you want output? or just playing around?
('debug', False), # not fullscreen presentation etc
('autorun', 0), # if >0, will autorun at the specified speed
('unittest', False), # like autorun but no breaks when instructions shown
('repository', ('do nothing', 'commit & push', 'only commit')), # add, commit and push to a hg repo?
# add and commit changes, like new data files?
])
if rp is not None:
if isinstance(rp, (tuple, list)):
try:
rp = OrderedDict(rp)
except:
rp = OrderedDict([rp])
self.rp.update(rp)
#if not self.rp['notests']:
#run_tests(self.computer)
self.actions = actions
if data_fname is None:
filename = self.paths['data'] + self.info['subjid'] + '.csv'
self.datafile = Datafile(filename, writeable=not self.rp['no_output'])
else:
self.datafile = Datafile(data_fname, writeable=not self.rp['no_output'])
if self.rp['unittest']:
self.rp['autorun'] = 100
self.tasks = [] # a list to store all tasks for this exp
Task.__init__(self,
self,
#name=name,
version=version,
**kwargs
)
def __str__(self, **kwargs):
"""string representation of the object"""
return 'psychopy_ext.exp.Experiment'
#def add_tasks(self, tasks):
#if isinstance(tasks, str):
#tasks = [tasks]
#for task in tasks:
#task = task()
#task.computer = self.computer
#task.win = self.win
#if task.info is not None:
#task.info.update(self.info)
#if task.rp is not None:
#task.rp.update(self.rp)
#self.tasks.append(task)
def set_logging(self, logname='log.log', level=logging.WARNING):
"""Setup files for saving logging information.
New folders might be created.
:Kwargs:
logname (str, default: 'log.log')
The log file name.
"""
if not self.rp['no_output']:
# add .log if no extension given
if not logname.endswith('.log'): logname += '.log'
# Setup logging file
try_makedirs(os.path.dirname(logname))
if os.path.isfile(logname):
writesys = False # we already have sysinfo there
else:
writesys = True
self.logfile = logging.LogFile(logname, filemode='a', level=level)
# Write system information first
if writesys:
self.logfile.write('%s' % self.runtime_info)
self.logfile.write('\n\n\n' + '#'*40 + '\n\n')
self.logfile.write('$ python %s\n\n' % ' '.join(sys.argv))
self.logfile.write('Start time: %s\n\n' % data.getDateStr(format="%Y-%m-%d %H:%M"))
else:
self.logfile = None
# output to the screen
logging.console.setLevel(level)
def create_seed(self, seed=None):
"""
SUPERSEDED by `psychopy.info.RunTimeInfo`
Creates or assigns a seed for a reproducible randomization.
When a seed is set, you can, for example, rerun the experiment with
trials in exactly the same order as before.
:Kwargs:
seed (int, default: None)
Pass a seed if you already have one.
:Returns:
self.seed (int)
"""
if seed is None:
try:
self.seed = np.sum([ord(d) for d in self.info['date']])
except:
self.seed = 1
logging.warning('No seed provided. Setting seed to 1.')
else:
self.seed = seed
return self.seed
def _guess_participant(self, data_path, default_subjid='01'):
"""Attempts to guess participant ID (it must be int).
.. :Warning:: Not usable yet
First lists all csv files in the data_path, then finds a maximum.
Returns maximum+1 or an empty string if nothing is found.
"""
datafiles = glob.glob(data_path+'*.csv')
partids = []
#import pdb; pdb.set_trace()
for d in datafiles:
filename = os.path.split(d)[1] # remove the path
filename = filename.split('.')[0] # remove the extension
partid = filename.split('_')[-1] # take the numbers at the end
try:
partids.append(int(partid))
except:
logging.warning('Participant ID %s is invalid.' %partid)
if len(partids) > 0: return '%02d' %(max(partids) + 1)
else: return default_subjid
def _guess_runno(self, data_path, default_runno = 1):
"""Attempts to guess run number.
.. :Warning:: Not usable yet
First lists all csv files in the data_path, then finds a maximum.
Returns maximum+1 or an empty string if nothing is found.
"""
if not os.path.isdir(data_path): runno = default_runno
else:
datafiles = glob.glob(data_path + '*.csv')
# Splits file names into ['data', %number%, 'runType.csv']
allnums = [int(os.path.basename(thisfile).split('_')[1]) for thisfile in datafiles]
if allnums == []: # no data files yet
runno = default_runno
else:
runno = max(allnums) + 1
# print 'Guessing runNo: %d' %runNo
return runno
def get_mon_sizes(self, screen=None):
warnings.warn('get_mon_sizes is deprecated; '
'use exp.get_mon_sizes instead')
return get_mon_sizes(screen=screen)
def create_win(self, debug=False, color='DimGray', units='deg',
winType='pyglet', **kwargs):
"""Generates a :class:`psychopy.visual.Window` for presenting stimuli.
:Kwargs:
- debug (bool, default: False)
- If True, then the window is half the screen size.
- If False, then the windon is full screen.
- color (str, str with a hexadecimal value, or a tuple of 3 values, default: "DimGray')
Window background color. Default is dark gray. (`See accepted
color names <http://www.w3schools.com/html/html_colornames.asp>`_
"""
current_level = logging.getLevel(logging.console.level)
logging.console.setLevel(logging.ERROR)
monitor = monitors.Monitor(self.computer.name,
distance=self.computer.distance,
width=self.computer.width)
logging.console.setLevel(current_level)
res = get_mon_sizes(self.computer.screen)
monitor.setSizePix(res)
if 'size' not in kwargs:
try:
kwargs['size'] = self.computer.win_size
except:
if not debug:
kwargs['size'] = tuple(res)
else:
kwargs['size'] = (res[0]/2, res[1]/2)
for key in kwargs:
if key in ['monitor', 'fullscr', 'allowGUI', 'screen', 'viewScale']:
del kwargs[key]
self.win = visual.Window(
monitor=monitor,
units=units,
fullscr=not debug,
allowGUI=debug, # mouse will not be seen unless debugging
color=color,
winType=winType,
screen=self.computer.screen,
viewScale=self.computer.view_scale,
**kwargs
)
self.win.flip_orig = self.win.flip
self.win.flipnames = []
def setup(self):
"""
Initializes the experiment.
A random seed is set for `random` and `numpy.random`. The seed
is set using the 'set:time' option.
Also, runtime information is fully recorded, log file is set
and a window is created.
"""
try:
with open(sys.argv[0], 'r') as f: lines = f.read()
except:
author = 'None'
version = 'None'
else:
author = None
version = None
#if not self.rp['no_output']:
self.runtime_info = psychopy.info.RunTimeInfo(author=author,
version=version, verbose=True, win=False)
key, value = get_version()
self.runtime_info[key] = value # updates with psychopy_ext version
self._set_keys_flat()
self.seed = int(self.runtime_info['experimentRunTime.epoch'])
np.random.seed(self.seed)
#else:
#self.runtime_info = None
#self.seed = None
self.set_logging(self.paths['logs'] + self.info['subjid'])
self.create_win(debug=self.rp['debug'])
self.mouse = event.Mouse(win=self.win)
self._initialized = True
#if len(self.tasks) == 0:
##self.setup = Task.setup
#Task.setup(self)
def before_exp(self, text=None, wait=.5, wait_stim=None, **kwargs):
"""
Instructions at the beginning of the experiment.
:Kwargs:
- text (str, default: None)
Text to show.
- wait (float, | |
+ self.rest
raw_data = self.dec_TY5(raw8 + self.raw16 + self.raw32)
else:
dtype = numpy.dtype(numpy.int32)
nbytes = dim1 * dim2 * dtype.itemsize
raw_data = numpy.frombuffer(infile.read(nbytes), dtype).copy()
# Always assume little-endian on the disk
if not numpy.little_endian:
raw_data.byteswap(True)
logger.debug('OVER_SHORT2: %s', raw_data.dtype)
logger.debug("%s" % (raw_data < 0).sum())
logger.debug("BYTECODE: %s", raw_data.dtype.type)
self.data = raw_data.reshape((dim2, dim1))
self._dtype = None
return self
def _writeheader(self):
"""
:return: a string containing the header for Oxford images
"""
linesep = "\r\n"
for key in DEFAULT_HEADERS:
if key not in self.header:
self.header[key] = DEFAULT_HEADERS[key]
if "NX" not in self.header.keys() or "NY" not in self.header.keys():
dim2, dim1 = self.shape
self.header['NX'] = dim1
self.header['NY'] = dim2
ascii_headers = [self.header['Header Version'],
"COMPRESSION=%s (%5.1f)" % (self.header["Compression"], self.getCompressionRatio()),
"NX=%4i NY=%4i OI=%7i OL=%7i " % (self.header["NX"], self.header["NY"], self.header["OI"], self.header["OL"]),
"NHEADER=%7i NG=%7i NS=%7i NK=%7i NS=%7i NH=%7i" % (self.header['Header Size In Bytes'],
self.header['General Section size in Byte'],
self.header['Special Section size in Byte'],
self.header['KM4 Section size in Byte'],
self.header['Statistic Section in Byte'],
self.header['History Section in Byte']),
"NSUPPLEMENT=%7i" % (self.header["NSUPPLEMENT"])]
if "Time" in self.header:
ascii_headers.append("TIME=%s" % self.header["Time"])
else:
ascii_headers.append("TIME=%s" % time.ctime())
header = (linesep.join(ascii_headers)).ljust(256).encode("ASCII")
NG = Section(self.header['General Section size in Byte'], self.header)
NG.setData('Binning in x', 0, numpy.uint16)
NG.setData('Binning in y', 2, numpy.uint16)
NG.setData('Detector size x', 22, numpy.uint16)
NG.setData('Detector size y', 24, numpy.uint16)
NG.setData('Pixels in x', 26, numpy.uint16)
NG.setData('Pixels in y', 28, numpy.uint16)
NG.setData('No of pixels', 36, numpy.uint32)
header += NG.__repr__()
NS = Section(self.header['Special Section size in Byte'], self.header)
NS.setData('Gain', 56, numpy.float)
NS.setData('Overflows flag', 464, numpy.int16)
NS.setData('Overflow after remeasure flag', 466, numpy.int16)
NS.setData('Overflow threshold', 472, numpy.int32)
NS.setData('Exposure time in sec', 480, numpy.float)
NS.setData('Overflow time in sec', 488, numpy.float)
NS.setData('Monitor counts of raw image 1', 528, numpy.int32)
NS.setData('Monitor counts of raw image 2', 532, numpy.int32)
NS.setData('Monitor counts of overflow raw image 1', 536, numpy.int32)
NS.setData('Monitor counts of overflow raw image 2', 540, numpy.int32)
NS.setData('Unwarping', 544, numpy.int32)
if 'Detector type' in self.header:
for key, value in DETECTOR_TYPES.items():
if value == self.header['Detector type']:
NS.setData(None, 548, numpy.int32, default=key)
NS.setData('Real pixel size x (mm)', 568, numpy.float)
NS.setData('Real pixel size y (mm)', 576, numpy.float)
header += NS.__repr__()
KM = Section(self.header['KM4 Section size in Byte'], self.header)
KM.setData('Spatial correction file date', 0, "|S26")
KM.setData('Spatial correction file', 26, "|S246")
# Angles are in steps due to stepper motors - conversion factor RAD
# angle[0] = omega, angle[1] = theta, angle[2] = kappa, angle[3] = phi,
if self.header.get('Omega step in deg', None):
KM.setData(None, 368, numpy.float64, deg2rad(self.header["Omega step in deg"]))
if self.header.get('Omega start in deg', None):
KM.setData(None, 284, numpy.int32, self.header["Omega start in deg"] / self.header["Omega step in deg"])
if self.header.get('Omega end in deg', None):
KM.setData(None, 324, numpy.int32, self.header["Omega end in deg"] / self.header["Omega step in deg"])
if self.header.get('Omega zero corr. in deg', None):
KM.setData(None, 512, numpy.int32, self.header['Omega zero corr. in deg'] / self.header["Omega step in deg"])
if self.header.get('Theta step in deg', None):
KM.setData(None, 368 + 8, numpy.float64, deg2rad(self.header["Theta step in deg"]))
if self.header.get('Theta start in deg', None):
KM.setData(None, 284 + 4, numpy.int32, self.header["Theta start in deg"] / self.header["Theta step in deg"])
if self.header.get('Theta end in deg', None):
KM.setData(None, 324 + 4, numpy.int32, self.header["Theta end in deg"] / self.header["Theta step in deg"])
if self.header.get('Theta zero corr. in deg', None):
KM.setData(None, 512 + 4, numpy.int32, self.header['Theta zero corr. in deg'] / self.header["Theta step in deg"])
if self.header.get('Kappa step in deg', None):
KM.setData(None, 368 + 16, numpy.float64, deg2rad(self.header["Kappa step in deg"]))
if self.header.get('Kappa start in deg', None):
KM.setData(None, 284 + 8, numpy.int32, self.header["Kappa start in deg"] / self.header["Kappa step in deg"])
if self.header.get('Kappa end in deg', None):
KM.setData(None, 324 + 8, numpy.int32, self.header["Kappa end in deg"] / self.header["Kappa step in deg"])
if self.header.get('Kappa zero corr. in deg', None):
KM.setData(None, 512 + 8, numpy.int32, self.header['Kappa zero corr. in deg'] / self.header["Kappa step in deg"])
if self.header.get('Phi step in deg', None):
KM.setData(None, 368 + 24, numpy.float64, deg2rad(self.header["Phi step in deg"]))
if self.header.get('Phi start in deg', None):
KM.setData(None, 284 + 12, numpy.int32, self.header["Phi start in deg"] / self.header["Phi step in deg"])
if self.header.get('Phi end in deg', None):
KM.setData(None, 324 + 12, numpy.int32, self.header["Phi end in deg"] / self.header["Phi step in deg"])
if self.header.get('Phi zero corr. in deg', None):
KM.setData(None, 512 + 12, numpy.int32, self.header['Phi zero corr. in deg'] / self.header["Phi step in deg"])
# Beam rotation about e2,e3
KM.setData('Beam rot in deg (e2)', 552, numpy.float64)
KM.setData('Beam rot in deg (e3)', 560, numpy.float64)
# Wavelenghts alpha1, alpha2, beta
KM.setData('Wavelength alpha1', 568, numpy.float64)
KM.setData('Wavelength alpha2', 576, numpy.float64)
KM.setData('Wavelength alpha', 584, numpy.float64)
KM.setData('Wavelength beta', 592, numpy.float64)
# Detector tilts around e1,e2,e3 in deg
KM.setData('Detector tilt e1 in deg', 640, numpy.float64)
KM.setData('Detector tilt e2 in deg', 648, numpy.float64)
KM.setData('Detector tilt e3 in deg', 656, numpy.float64)
# Beam center
KM.setData('Beam center x', 664, numpy.float64)
KM.setData('Beam center y', 672, numpy.float64)
# Angle (alpha) between kappa rotation axis and e3 (ideally 50 deg)
KM.setData('Alpha angle in deg', 680, numpy.float64)
# Angle (beta) between phi rotation axis and e3 (ideally 0 deg)
KM.setData('Beta angle in deg', 688, numpy.float64)
# Detector distance
KM.setData('Distance in mm', 712, numpy.float64)
header += KM.__repr__()
SS = Section(self.header['Statistic Section in Byte'], self.header)
SS.setData('Stat: Min ', 0, numpy.int32)
SS.setData('Stat: Max ', 4, numpy.int32)
SS.setData('Stat: Average ', 24, numpy.float64)
if self.header.get('Stat: Stddev ', None):
SS.setData(None, 32, numpy.float64, self.header['Stat: Stddev '] ** 2)
SS.setData('Stat: Skewness ', 40, numpy.float64)
header += SS.__repr__()
HS = Section(self.header['History Section in Byte'], self.header)
HS.setData('Flood field image', 99, "|S27")
header += HS.__repr__()
return header
def write(self, fname):
"""Write Oxford diffraction images: this is still beta
Only TY1 compressed images is currently possible
:param fname: output filename
"""
if self.header.get("Compression") != "TY1":
logger.warning("Enforce TY1 compression")
self.header["Compression"] = "TY1"
datablock8, datablock16, datablock32 = compTY1(self.data)
self.header["OI"] = len(datablock16) / 2
self.header["OL"] = len(datablock32) / 4
with self._open(fname, mode="wb") as outfile:
outfile.write(self._writeheader())
outfile.write(datablock8)
outfile.write(datablock16)
outfile.write(datablock32)
def getCompressionRatio(self):
"calculate the compression factor obtained vs raw data"
return 100.0 * (self.data.size + 2 * self.header["OI"] + 4 * self.header["OL"]) / (self.data.size * 4)
@staticmethod
def checkData(data=None):
if data is None:
return None
else:
return data.astype(int)
def dec_TY5(self, stream):
"""
Attempt to decode TY5 compression scheme
:param stream: input stream
:return: 1D array with data
"""
logger.info("TY5 decompression is slow for now")
array_size = self._shape[0] * self._shape[1]
stream_size = len(stream)
data = numpy.zeros(array_size)
raw = numpy.frombuffer(stream, dtype=numpy.uint8)
pos_inp = pos_out = current = ex1 = ex2 = 0
dim2 = self._shape[0]
while pos_inp < stream_size and pos_out < array_size:
if pos_out % dim2 == 0:
last = 0
else:
last = current
value = raw[pos_inp]
if value < 254:
# this is the normal case
# 1 bytes encode one pixel
current = last + value - 127
pos_inp += 1
elif value == 254:
ex1 += 1
# this is the special case 1:
# if the marker 254 is found the next 2 bytes encode one pixel
value = raw[pos_inp + 1:pos_inp + 3].view(numpy.int16)
if not numpy.little_endian:
value = value.byteswap(True)
current = last + value[0]
pos_inp += 3
elif value == 255:
# this is the special case 2:
# if the marker 255 is found the next 4 bytes encode one pixel
ex2 += 1
logger.info('special case 32 bits.')
value = raw[pos_inp + 1:pos_inp + 5].view(numpy.int32)
if not numpy.little_endian:
value = value.byteswap(True)
current = last + value[0]
pos_inp += 5
data[pos_out] = current
pos_out += 1
logger.info("TY5: Exception: 16bits: %s, 32bits: %s", ex1, ex2)
return data
OXDimage = OxdImage
class Section(object):
"""
Small helper class for writing binary headers
"""
def __init__(self, size, dictHeader):
"""
:param size: size of the header section in bytes
:param dictHeader: headers of the image
"""
self.size = size
self.header = dictHeader
self.lstChr = bytearray(size)
self._dictSize = {}
def __repr__(self):
return bytes(self.lstChr)
def getSize(self, dtype):
if dtype not in self._dictSize:
self._dictSize[dtype] = len(numpy.zeros(1, dtype=dtype).tostring())
return self._dictSize[dtype]
def setData(self, key, offset, dtype, default=None):
"""
:param offset: int, starting position in the section
:param key: name of the header key
:param dtype: type of the data to insert (defines the size!)
"""
if key in | |
#!/usr/bin/python
# Copyright (c) 2018
# United States Government as represented by <NAME> <<EMAIL>>
# No copyright is claimed in the United States under Title 17, U.S.Code. All Other Rights Reserved.
# This program attempts to forward tunneled traffic and send it over multiple
# interfaces that can reach the same destination. One use case is when sending
# data through multiple PPP links. The performance of this solution will be
# better than multi-link PPP (ML-PPP) for links that are lossy or unreliable.
# The name udp_proxy comes from original intent of this program to supplement
# Multipath TCP (MPTCP). UDP traffic would be forwarded to the tun interface,
# and this proxy would service any packets placing them on the PPP interfaces.
# Currently transmission of a packet is tried N number of times, where N is the
# number of expected interfaces. Future versions of this code may attempt to
# leverage traffic priority classes or other more advanced features.
import os, sys, socket, time, signal
import struct, logging, math
from argparse import ArgumentParser, ArgumentTypeError, SUPPRESS
import fcntl
import select
def sigint_handler(signal, stackframe):
# Handler for Ctrl-C (SIGINT)
print ""
logging.debug('Caught Interrupt, Exiting!')
cleanup()
sys.exit(2)
def main_exit(msg="",val=1):
if (val == 0):
logging.warn(msg)
else:
logging.critical(msg)
cleanup()
sys.exit(val)
def cleanup():
global sock
global fd_stream
global fd_trace
if sock is not None:
sock.close()
if fd_stream is not None:
fd_stream.close()
if fd_trace is not None:
fd_trace.close()
class tunnel():
tun = None
def __init__ (self, name, mode='r+b'):
TUNSETIFF = 0x400454ca
TUNSETOWNER = TUNSETIFF + 2
IFF_TUN = 0x0001
IFF_NO_PI = 0x1000
self.tun = open('/dev/net/tun', mode)
ifr = struct.pack('16sH', str(name), IFF_TUN | IFF_NO_PI)
fcntl.ioctl(self.tun, TUNSETIFF, ifr)
#fcntl.ioctl(self.tun, TUNSETOWNER, 1000)
fcntl.fcntl(self.tun, fcntl.F_SETFL, os.O_NONBLOCK)
self.fileno = self.tun.fileno()
def read(self, size):
return os.read(self.fileno, size)
def write(self, buff):
return os.write(self.fileno, buff)
def close(self):
return True
if __name__ == '__main__':
# Administrative Stuff
signal.signal(signal.SIGINT, sigint_handler)
# Globals that will require "cleanup"
sock = None
sock_list = []
fd_stream = None
fd_trace = None
# Parse Arguments
parser = ArgumentParser(description="This program will forward UDP traffic to a set of interfaces. It uses raw sockets and thus requires elevated (root) permissions.")
parser.add_argument("-V", "--version", action="version", version='%(prog)s version 1.00, <NAME> (<EMAIL>)')
parser.add_argument("-D", "--debug", action="store_true", dest="debug", default=False, help="Extra debugging information")
parser.add_argument("-v", "--verbose", action="count", dest="verbose", help="Increase verboseness of messages")
parser.add_argument("-q", "--quiet", action="store_const", dest="verbose", const=0, help="Disable any extra dialog")
parser.add_argument("-f", "--force", action="store_true", dest="force", default=False, help="Ignore sanity checks")
parser.add_argument("-b", "--blocking", action="store_true", dest="blocking", default=False, help="Enable blocking. Block input until SDU bytes are received")
parser.add_argument("-d", "--discard", action="store_true", dest="discard", default=False, help="If an interface is down, discard the data instead of retrying on another interface.")
parser.add_argument("-P", "--pcap",
action="store", type=str, dest="pcap", default=None,
help="Read input from the specified pcap file instead of standard in.", metavar="FILE")
parser.add_argument("-p", "--prefix",
action="store", type=str, dest="prefix", default="ppp",
help="Interface PREFIX to search for outbound traffic. (Default: %(default)s)", metavar="PREFIX")
parser.add_argument("-n", "--number-of-interfaces",
action="store", type=int, dest="niface", default=4,
help="Number of interfaces to try and use. (Default: %(default)s)", metavar="NUM")
parser.add_argument("-i", "--index",
action="store", type=int, dest="index", default=range(4), nargs="*",
help="Specific INDEX(es) to use for the interface. NOTE: Order is maintained. (Default is to use %(default)s)", metavar="INDEX")
parser.add_argument("-s", "--size",
action="store", type=int, dest="sdu", default=1500,
help="Max packet SIZE in bytes (SDU) (Default %(default)s)", metavar="SIZE")
parser.add_argument("-r", "--rate",
action="store", type=int, dest="rate", default=0,
help="Limit data to RATE bits/second (Default Unlimited)", metavar="RATE")
parser.add_argument("-T", "--tunnel",
action="store", type=str, dest="tunnel_dev", default=None,
help="Attach to and Send/Receive to TUN device.", metavar="TUN")
parser.add_argument("-t", "--timestamp",
action="store", type=str, dest="trace",
help="Record a timestamped log of outbound and inbound data packets in FILE.", metavar="FILE")
parser.add_argument("-w", "--wait",
action="store", type=int, dest="wait", default=0,
help="Connections which are idle after timeout SEC are terminated. Useful for piping from files.", metavar="SEC")
options = parser.parse_args()
# Option Checking
if (options.sdu <= 0):
parser.error("Packet size too small!")
if (options.pcap is not None):
# TODO - pcap support?
parser.error("Sorry this feature is not implemented yet.")
if (options.rate > 0):
delay = (8.0*float(options.sdu))/float(options.rate)
else:
delay = 0
# Establish logging
# Set log level here in "basicConfig", levels are NOTSET, DEBUG, INFO, WARNING, ERROR and CRITICAL
logging_format = "%(asctime)s; %(levelname)s; %(funcName)s; %(lineno)d; %(message)s"
if options.debug:
logging.basicConfig(level=logging.DEBUG, format=logging_format)
elif (options.verbose >= 2):
logging.basicConfig(level=logging.INFO, format=logging_format)
elif (options.verbose == 1):
logging.basicConfig(level=logging.ERROR, format=logging_format)
else:
logging.basicConfig(level=logging.CRITICAL, format=logging_format)
# Check index values
if (len(options.index) < options.niface):
if (options.force):
while (len(options.index) < options.niface):
options.index.append(options.index[-1]+1)
logging.warn("Not enough indexes given, extrapolating to: {}".format(options.index[:options.niface]))
else:
parser.error("Not enough interfaces identified!")
elif (len(options.index) > options.niface):
if (options.force):
options.index = options.index[:options.niface]
logging.warn("Full index will not be used! Only using: {}".format(options.index[:options.niface]))
else:
parser.error("Too many interfaces identified!")
for i in options.index:
if (i<0):
parser.error("An index cannot be negative!")
# Establish Input Timeout Timer if needed
if (options.wait > 0):
input_to = 0
fd_in_empty = False
# If we are tracing the data, check that we have write access.
if (options.trace is not None):
try: fd_trace = open(options.trace, 'wb')
except IOError, e:
main_exit('Cannot write to %s: %s'%(options.trace,e),1)
# Show all set options for Debugging output
if (options.debug):
print "Set Options:", options
# Check for root
if os.geteuid() != 0:
if (options.force):
logging.warning("You need to have root privileges to run this script.")
else:
main_exit("You need to have root privileges to run this script.")
# Establish a RAW socket - Requires ROOT PRIVILEGES
try:
sock = socket.socket(socket.AF_PACKET, socket.SOCK_DGRAM)
except socket.error, err:
main_exit("Unable to create a socket!: {}".format(err))
if (options.tunnel_dev is not None):
# Tunnel Support
try:
fd_stream = tunnel(options.tunnel_dev)
fd_in = fd_stream
fd_out = fd_stream
fd_in_ready = fd_stream.tun
except IOError, e:
main_exit('Error accessing tunnel: %s'%(e),1)
else:
# Normal Input/Output
# This is Handy for just typing in input and sending it, but that's not terribly useful
# TODO - Make this dump into a packet? Will the raw bits to the ppp interface work? (ie show up on the other side?)
try:
fd_in = os.fdopen(sys.stdin.fileno(),'rb',0)
fd_out = os.fdopen(sys.stdout.fileno(),'wb', 0)
fd_stream = fd_out
fd_in_ready = fd_in
if (options.blocking is False):
fcntl.fcntl(fd_in, fcntl.F_SETFL, os.O_NONBLOCK)
except IOError,e:
main_exit('Broken Pipe',1)
sock_list.append(fd_in_ready)
# Don't block on the network interface, but instead raise errors
sock.setblocking(0)
# Message Handling
last_tx = 0
msg = None
msg_part = None
cur_iface = 0
# Error Handling
tx_fail={}
tx_fail_reasons = [None, 19, 100]
# Main Service Loop
while 1:
recv_r, send_r, error_r = select.select(sock_list, [], [sock], 0.2)
# Sending Data
if (fd_in_ready in recv_r):
# Postpone our read if we are rate limiting
if ( (delay == 0) or ((time.time() - last_tx) > delay) ):
# If tracing, store the time before any other processing.
if (fd_trace is not None):
start_tx = time.time()
# Test to see if raw socket is ready for data
_,send_r,_ = select.select([], [sock], [], 0)
if (sock in send_r):
tx_data = ''
try: tx_data = fd_in.read(options.sdu)
except EOFError, e: pass
except IOError, e: pass
except OSError, e: pass
data_len = len(tx_data)
if (options.wait > 0):
if (data_len > 0):
fd_in_empty = False
else:
fd_in_empty = True
if (data_len > 0):
# Setup a loop here that will retry each interface
# Leveraging: Socket Error: [Errno 19] No such device
# Also Errno 100 when the interface exists but it not established yet
tx_success=False
tx_fail['cnt'] = 0
tx_fail['reason'] = None
while ((not tx_success) and (tx_fail['reason'] in tx_fail_reasons) and (tx_fail['cnt'] < options.niface)):
iface = options.prefix+str(options.index[cur_iface])
cur_iface = (cur_iface + 1)%options.niface
try:
sock.sendto(tx_data, (iface, 0x0800))
logging.info("Data SENT on {}".format(iface))
tx_success=True
except socket.error, err:
tx_fail['cnt'] += 1
tx_fail['reason'] = err.errno
logging.debug("Socket Error: {}".format(err))
if (options.discard or (tx_fail['reason'] not in tx_fail_reasons) or (tx_fail['cnt'] == options.niface)):
logging.info("Data DROP on {}".format(iface))
else:
logging.info("Data SKIP on {}".format(iface))
tx_success=False
if ((options.trace) or (delay > 0)):
last_tx = time.time()
if (fd_trace is not None):
fd_trace.write("Tx, {:f}, {:f}, {}, {}, {}, {}, {}\n".format(last_tx,start_tx,data_len,iface,tx_success,tx_fail['reason'],str(tx_data).encode('hex')))
fd_trace.flush()
if (options.discard):
# Don't ever retry if discard is set
break
else:
logging.debug("Failed to write to interface: Nothing to Send")
else:
logging.debug("Interface blocked write!")
# Receive Data
if (sock in recv_r):
# For this code, we are not expecting to receive anything on this socket... warn if it happens
logging.warn("Raw socket has data pending??")
# Erred Socks
if (sock in error_r):
# Not expecting this either...
main_exit("Got note that raw sock is broken... | |
x = x + 1
# if constructed firelines are on then
# initialize constructed fire lines on day 6th
if t == 6 and typeOfFireline == 2:
# loop through every cell in grid except boundaries
for row in range(1,rows - 1):
for col in range(1, cols - 1):
# check where bruning fire front (state 10) is
if grid[row][col] == 10:
# construct fireline of shape 1
if shapeConstructedFireline == 1:
# construct fireline at distance '1'
if distanceConstructedFireline == 1:
# set chosen gridcells to fire line state
grid[row-2][col] = 13
grid[row-2][col-1] = 13
grid[row-2][col+1] = 13
grid[row-2][col-2] = 13
grid[row-2][col+2] = 13
grid[row-1][col+2] = 13
grid[row-1][col-2] = 13
grid[row-1][col-3] = 13
grid[row-1][col+3] = 13
grid[row][col-3] = 13
grid[row][col+3] = 13
# construct fireline at distance '2'
if distanceConstructedFireline == 2:
# set chosen gridcells to fire line state
grid[row-3][col] = 13
grid[row-3][col-1] = 13
grid[row-3][col+1] = 13
grid[row-3][col-2] = 13
grid[row-3][col+2] = 13
grid[row-2][col+2] = 13
grid[row-2][col-1] = 13
grid[row-2][col-3] = 13
grid[row-2][col+3] = 13
grid[row-1][col-3] = 13
grid[row-1][col+3] = 13
# constructing the fireline at distance '3'
if distanceConstructedFireline == 3:
# set chosen gridcells to fire line state
grid[row-4][col] = 13
grid[row-4][col-1] = 13
grid[row-4][col+1] = 13
grid[row-4][col-2] = 13
grid[row-4][col+2] = 13
grid[row-3][col+2] = 13
grid[row-3][col-2] = 13
grid[row-3][col-3] = 13
grid[row-3][col+3] = 13
grid[row-2][col-3] = 13
grid[row-2][col+3] = 13
# construct fire line of shape 2
if shapeConstructedFireline == 2:
# constructing the fireline at distance '1'
if distanceConstructedFireline == 1:
# set chosen gridcells to fire line state
grid[row-2][col] = 13
grid[row-2][col-1] = 13
grid[row-2][col+1] = 13
grid[row-2][col+2] = 13
grid[row-2][col-2] = 13
grid[row-1][col-2] = 13
grid[row-1][col+2] = 13
# constructing the fireline at distance '2'
if distanceConstructedFireline == 2:
# set chosen gridcells to fire line state
grid[row-3][col] = 13
grid[row-3][col-1] = 13
grid[row-3][col+1] = 13
grid[row-3][col+2] = 13
grid[row-3][col-2] = 13
grid[row-2][col-2] = 13
grid[row-2][col+2] = 13
# constructing the fireline at distance '3'
if distanceConstructedFireline == 3:
# set chosen gridcells to fire line state
grid[row-4][col] = 13
grid[row-4][col-1] = 13
grid[row-4][col+1] = 13
grid[row-4][col+2] = 13
grid[row-4][col-2] = 13
grid[row-3][col-2] = 13
grid[row-3][col+2] = 13
# construct fire line of shape 3
if shapeConstructedFireline == 3:
# constructing the fireline at distance '3'
if distanceConstructedFireline == 1:
# set chosen gridcells to fire line state
grid[row-2][col] = 13
grid[row-2][col-1] = 13
grid[row-2][col+1] = 13
# constructing the fireline at distance '3'
if distanceConstructedFireline == 2:
# set chosen gridcells to fire line state
grid[row-3][col] = 13
grid[row-3][col-1] = 13
grid[row-3][col+1] = 13
# constructing the fireline at distance '3'
if distanceConstructedFireline == 3:
# set chosen gridcells to fire line state
grid[row-4][col] = 13
grid[row-4][col-1] = 13
grid[row-4][col+1] = 13
# loop through every gridcell except at the boundaries
for row in range(1,rows-1):
for col in range(1,cols-1):
# keep track of how long a cell has been burning
if grid[row][col] == 10:
timeBurning[row][col] += 1
# allocate Pden and Pveg values depending
# on state of cell (see state list above)
# sparse vegetation
if grid[row][col] == 1:
Pden = -0.4
Pveg = -0.3
elif grid[row][col] == 2:
Pden = -0.4
Pveg = 0
elif grid[row][col] == 3:
Pden = -0.4
Pveg = 0.4
# normal vegetation
elif grid[row][col] == 4:
Pden = 0
Pveg = -0.3
elif grid[row][col] == 5:
Pden = 0
Pveg = 0
elif grid[row][col] == 6:
Pden = 0
Pveg = 0.4
# dense vegetation
elif grid[row][col] == 7:
Pden = 0.3
Pveg = -0.3
elif grid[row][col] == 8:
Pden = 0.3
Pveg = 0
elif grid[row][col] == 9:
Pden = 0.3
Pveg = 0.4
# compute Pburn for burning neighbour cells
# using the a Moore neighborhood (8 neighbours)
# and the location/angle (theta) of neighbour cell
# relative to cell and the North
if grid[row+1][col-1] == 10:
theta = 45
Pburn_1 = burning_probability(theta, V, c2, c1, Ph, Pden, Pveg)
if grid[row+1][col+1] == 10:
theta = 45
Pburn_2 = burning_probability(theta, V, c2, c1, Ph, Pden, Pveg)
if grid[row+1][col] == 10:
theta = 0
Pburn_3 = burning_probability(theta, V, c2, c1, Ph, Pden, Pveg)
if grid[row][col-1] == 10:
theta = 90
Pburn_4 = burning_probability(theta, V, c2, c1, Ph, Pden, Pveg)
if grid[row][col+1] == 10:
theta = 90
Pburn_5 = burning_probability(theta, V, c2, c1, Ph, Pden, Pveg)
if grid[row-1][col-1] == 10:
theta = 135
Pburn_6 = burning_probability(theta, V, c2, c1, Ph, Pden, Pveg)
if grid[row-1][col+1] == 10:
theta = 135
Pburn_7 = burning_probability(theta, V, c2, c1, Ph, Pden, Pveg)
if grid[row-1][col] == 10:
theta = 180
Pburn_8 = burning_probability(theta, V, c2, c1, Ph, Pden, Pveg)
# Pburn = 0 if neighbour is not burning
if grid[row+1][col-1] != 10:
Pburn_1 = 0
if grid[row+1][col+1] != 10:
Pburn_2 = 0
if grid[row+1][col] != 10:
Pburn_3 = 0
if grid[row][col-1] != 10:
Pburn_4 = 0
if grid[row][col+1] != 10:
Pburn_5 = 0
if grid[row-1][col-1] != 10:
Pburn_6 = 0
if grid[row-1][col+1] != 10:
Pburn_7 = 0
if grid[row-1][col] != 10:
Pburn_8 = 0
# compute chance of current cell catching fire
# cell can only catch fire if state is 1-9
# chance of neighbour setting cell on fire
# is independent of other neighbour cells
if (grid[row][col] != 0 and grid[row][col] != 10 and grid[row][col] != 11 and grid[row][col] != 13) and \
(np.random.uniform() < Pburn_1 or np.random.uniform() < Pburn_2 or np.random.uniform() < Pburn_3 or\
np.random.uniform() < Pburn_4 or np.random.uniform() < Pburn_5 or np.random.uniform() < Pburn_6 or \
np.random.uniform() < Pburn_7 or np.random.uniform() < Pburn_8):
# set cell to burning
grid[row][col] = 10
# start count of number of time steps
# cell is burning
timeBurning[row][col] += 1
# after 2 timesteps (and if cell is not a fireline (13))
# turn burning state (10) to burnt state (11)
if timeBurning[row][col] == 2 and grid[row][col] != 13:
grid[row][col] = 11
# keep track of number of burnt cells for statistics
if grid[row][col] == 11:
burnt += 1
# create visualisation of simulation
if repeats == 1:
# create barplot once
if t == 0:
# specify colorbar characteristics
cmap = colors.ListedColormap(['aqua', 'goldenrod', 'gold', 'yellow', 'greenyellow', 'lawngreen', 'olivedrab', 'green', 'limegreen', 'darkgreen', 'orangered', 'red', 'dimgray', 'brown'])
colormap = plt.imshow(grid, cmap= cmap)
cbar = plt.colorbar(colormap)
cbar.set_ticks([0,1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13])
cbar.set_ticklabels(["Water", "Fuel (Sparse/-0.3)", "Fuel (Sparse/0)", "Fuel (Sparse/0.4)", "Fuel (Normal/-0.3)", "Fuel (Normal/0)",
"Fuel(Normal/0.4)", "Fuel (Dense/-0.3)", "Fuel (Dense/0)", "Fuel (Dense/0.4)", "Burning", "Burnt", "City", "Fire lines"])
# draw grid for every time step
plt.imshow(grid, cmap=cmap)
plt.draw()
plt.pause(0.2)
# statistics
# 50 times the end state of fraction burnt cells
burnt_list.append(burnt/total_vegetation)
# filenames for storing data
if noFireLines == 0:
if typeOfFireline == 1 and distanceTemporaryFireline == 1:
filename = "temporary_distance_1"
if typeOfFireline == 1 and distanceTemporaryFireline == 2:
filename = "temporary_distance_2"
if typeOfFireline == 1 and distanceTemporaryFireline == 3:
filename = "temporary_distance_3"
if typeOfFireline == 2 and distanceConstructedFireline == 1 and shapeConstructedFireline == 1:
filename = "constructed_distance_1_shape_1"
if typeOfFireline == 2 and distanceConstructedFireline == 1 and shapeConstructedFireline == 2:
filename = "constructed_distance_1_shape_2"
if typeOfFireline == 2 and distanceConstructedFireline == 1 and shapeConstructedFireline == 3:
filename = "constructed_distance_1_shape_3"
if typeOfFireline == 2 and distanceConstructedFireline == 2 and shapeConstructedFireline == 1:
filename = "constructed_distance_2_shape_1"
if typeOfFireline == 2 and distanceConstructedFireline == 2 and shapeConstructedFireline == 2:
filename = "constructed_distance_2_shape_2"
if typeOfFireline == 2 and distanceConstructedFireline == 2 and shapeConstructedFireline == 3:
filename = "constructed_distance_2_shape_3"
if typeOfFireline == 2 and distanceConstructedFireline == 3 and shapeConstructedFireline == 1:
filename = "constructed_distance_3_shape_1"
if typeOfFireline == 2 and distanceConstructedFireline == 3 and shapeConstructedFireline == 2:
| |
<gh_stars>10-100
# ------------------------------------------------------------------------
# BEAUTY DETR
# Copyright (c) 2022 <NAME> & <NAME>
# Licensed under CC-BY-NC [see LICENSE for details]
# All Rights Reserved
# ------------------------------------------------------------------------
# Parts adapted from Group-Free
# Copyright (c) 2021 <NAME>. All Rights Reserved.
# Licensed under the MIT License.
# ------------------------------------------------------------------------
"""Shared utilities for all main scripts."""
import argparse
import json
import os
import random
import time
import numpy as np
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel
from models import HungarianMatcher, SetCriterion, compute_hungarian_loss
from utils import get_scheduler, setup_logger
def parse_option():
"""Parse cmd arguments."""
parser = argparse.ArgumentParser()
# Model
parser.add_argument('--num_target', type=int, default=256,
help='Proposal number')
parser.add_argument('--sampling', default='kps', type=str,
help='Query points sampling method (kps, fps)')
# Transformer
parser.add_argument('--num_encoder_layers', default=3, type=int)
parser.add_argument('--num_decoder_layers', default=6, type=int)
parser.add_argument('--self_position_embedding', default='loc_learned',
type=str, help='(none, xyz_learned, loc_learned)')
parser.add_argument('--self_attend', action='store_true')
# Loss
parser.add_argument('--query_points_obj_topk', default=4, type=int)
parser.add_argument('--use_contrastive_align', action='store_true')
parser.add_argument('--use_soft_token_loss', action='store_true')
parser.add_argument('--detect_intermediate', action='store_true')
parser.add_argument('--joint_det', action='store_true')
# Data
parser.add_argument('--batch_size', type=int, default=8,
help='Batch Size during training')
parser.add_argument('--dataset', type=str, default=['sr3d'],
nargs='+', help='list of datasets to train on')
parser.add_argument('--test_dataset', default='sr3d')
parser.add_argument('--data_root', default='./')
parser.add_argument('--use_height', action='store_true',
help='Use height signal in input.')
parser.add_argument('--use_color', action='store_true',
help='Use RGB color in input.')
parser.add_argument('--use_multiview', action='store_true')
parser.add_argument('--butd', action='store_true')
parser.add_argument('--butd_gt', action='store_true')
parser.add_argument('--butd_cls', action='store_true')
parser.add_argument('--augment_det', action='store_true')
parser.add_argument('--num_workers', type=int, default=4)
# Training
parser.add_argument('--start_epoch', type=int, default=1)
parser.add_argument('--max_epoch', type=int, default=400)
parser.add_argument('--optimizer', type=str, default='adamW')
parser.add_argument('--weight_decay', type=float, default=0.0005)
parser.add_argument("--lr", default=1e-3, type=float)
parser.add_argument("--lr_backbone", default=1e-4, type=float)
parser.add_argument("--text_encoder_lr", default=1e-5, type=float)
parser.add_argument('--lr-scheduler', type=str, default='step',
choices=["step", "cosine"])
parser.add_argument('--lr_decay_epochs', type=int, default=[280, 340],
nargs='+', help='when to decay lr, can be a list')
parser.add_argument('--lr_decay_rate', type=float, default=0.1,
help='for step scheduler. decay rate for lr')
parser.add_argument('--clip_norm', default=0.1, type=float,
help='gradient clipping max norm')
parser.add_argument('--bn_momentum', type=float, default=0.1)
parser.add_argument('--syncbn', action='store_true')
parser.add_argument('--warmup-epoch', type=int, default=-1)
parser.add_argument('--warmup-multiplier', type=int, default=100)
# io
parser.add_argument('--checkpoint_path', default=None,
help='Model checkpoint path')
parser.add_argument('--log_dir', default='log',
help='Dump dir to save model checkpoint')
parser.add_argument('--print_freq', type=int, default=10) # batch-wise
parser.add_argument('--save_freq', type=int, default=10) # epoch-wise
parser.add_argument('--val_freq', type=int, default=5) # epoch-wise
# others
parser.add_argument("--local_rank", type=int,
help='local rank for DistributedDataParallel')
parser.add_argument('--ap_iou_thresholds', type=float, default=[0.25, 0.5],
nargs='+', help='A list of AP IoU thresholds')
parser.add_argument("--rng_seed", type=int, default=0, help='manual seed')
parser.add_argument("--debug", action='store_true',
help="try to overfit few samples")
parser.add_argument('--eval', default=False, action='store_true')
parser.add_argument('--eval_train', action='store_true')
parser.add_argument('--pp_checkpoint', default=None)
parser.add_argument('--reduce_lr', action='store_true')
args, _ = parser.parse_known_args()
args.eval = args.eval or args.eval_train
return args
def load_checkpoint(args, model, optimizer, scheduler):
"""Load from checkpoint."""
print("=> loading checkpoint '{}'".format(args.checkpoint_path))
checkpoint = torch.load(args.checkpoint_path, map_location='cpu')
try:
args.start_epoch = int(checkpoint['epoch']) + 1
except Exception:
args.start_epoch = 0
model.load_state_dict(checkpoint['model'], strict=True)
if not args.eval and not args.reduce_lr:
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
print("=> loaded successfully '{}' (epoch {})".format(
args.checkpoint_path, checkpoint['epoch']
))
del checkpoint
torch.cuda.empty_cache()
def save_checkpoint(args, epoch, model, optimizer, scheduler, save_cur=False):
"""Save checkpoint if requested."""
if save_cur or epoch % args.save_freq == 0:
state = {
'config': args,
'save_path': '',
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict(),
'epoch': epoch
}
spath = os.path.join(args.log_dir, f'ckpt_epoch_{epoch}.pth')
state['save_path'] = spath
torch.save(state, spath)
print("Saved in {}".format(spath))
else:
print("not saving checkpoint")
class BaseTrainTester:
"""Basic train/test class to be inherited."""
def __init__(self, args):
"""Initialize."""
name = args.log_dir.split('/')[-1]
# Create log dir
args.log_dir = os.path.join(
args.log_dir,
','.join(args.dataset),
f'{int(time.time())}'
)
os.makedirs(args.log_dir, exist_ok=True)
# Create logger
self.logger = setup_logger(
output=args.log_dir, distributed_rank=dist.get_rank(),
name=name
)
# Save config file and initialize tb writer
if dist.get_rank() == 0:
path = os.path.join(args.log_dir, "config.json")
with open(path, 'w') as f:
json.dump(vars(args), f, indent=2)
self.logger.info("Full config saved to {}".format(path))
self.logger.info(str(vars(args)))
@staticmethod
def get_datasets(args):
"""Initialize datasets."""
train_dataset = None
test_dataset = None
return train_dataset, test_dataset
def get_loaders(self, args):
"""Initialize data loaders."""
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
np.random.seed(np.random.get_state()[1][0] + worker_id)
# Datasets
train_dataset, test_dataset = self.get_datasets(args)
# Samplers and loaders
g = torch.Generator()
g.manual_seed(0)
train_sampler = DistributedSampler(train_dataset)
train_loader = DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers,
worker_init_fn=seed_worker,
pin_memory=True,
sampler=train_sampler,
drop_last=True,
generator=g
)
test_sampler = DistributedSampler(test_dataset, shuffle=False)
test_loader = DataLoader(
test_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers,
worker_init_fn=seed_worker,
pin_memory=True,
sampler=test_sampler,
drop_last=False,
generator=g
)
return train_loader, test_loader
@staticmethod
def get_model(args):
"""Initialize the model."""
return None
@staticmethod
def get_criterion(args):
"""Get loss criterion for training."""
matcher = HungarianMatcher(1, 0, 2, args.use_soft_token_loss)
losses = ['boxes', 'labels']
if args.use_contrastive_align:
losses.append('contrastive_align')
set_criterion = SetCriterion(
matcher=matcher,
losses=losses, eos_coef=0.1, temperature=0.07
)
criterion = compute_hungarian_loss
return criterion, set_criterion
@staticmethod
def get_optimizer(args, model):
"""Initialize optimizer."""
param_dicts = [
{
"params": [
p for n, p in model.named_parameters()
if "backbone_net" not in n and "text_encoder" not in n
and p.requires_grad
]
},
{
"params": [
p for n, p in model.named_parameters()
if "backbone_net" in n and p.requires_grad
],
"lr": args.lr_backbone
},
{
"params": [
p for n, p in model.named_parameters()
if "text_encoder" in n and p.requires_grad
],
"lr": args.text_encoder_lr
}
]
optimizer = optim.AdamW(param_dicts,
lr=args.lr,
weight_decay=args.weight_decay)
return optimizer
def main(self, args):
"""Run main training/testing pipeline."""
# Get loaders
train_loader, test_loader = self.get_loaders(args)
n_data = len(train_loader.dataset)
self.logger.info(f"length of training dataset: {n_data}")
n_data = len(test_loader.dataset)
self.logger.info(f"length of testing dataset: {n_data}")
# Get model
model = self.get_model(args)
# Get criterion
criterion, set_criterion = self.get_criterion(args)
# Get optimizer
optimizer = self.get_optimizer(args, model)
# Get scheduler
scheduler = get_scheduler(optimizer, len(train_loader), args)
# Move model to devices
if torch.cuda.is_available():
model = model.cuda()
model = DistributedDataParallel(
model, device_ids=[args.local_rank],
broadcast_buffers=False # , find_unused_parameters=True
)
# Check for a checkpoint
if args.checkpoint_path:
assert os.path.isfile(args.checkpoint_path)
load_checkpoint(args, model, optimizer, scheduler)
# Just eval and end execution
if args.eval:
print("Testing evaluation.....................")
self.evaluate_one_epoch(
args.start_epoch, test_loader,
model, criterion, set_criterion, args
)
return
# Training loop
for epoch in range(args.start_epoch, args.max_epoch + 1):
train_loader.sampler.set_epoch(epoch)
tic = time.time()
self.train_one_epoch(
epoch, train_loader, model,
criterion, set_criterion,
optimizer, scheduler, args
)
self.logger.info(
'epoch {}, total time {:.2f}, '
'lr_base {:.5f}, lr_pointnet {:.5f}'.format(
epoch, (time.time() - tic),
optimizer.param_groups[0]['lr'],
optimizer.param_groups[1]['lr']
)
)
if epoch % args.val_freq == 0:
if dist.get_rank() == 0: # save model
save_checkpoint(args, epoch, model, optimizer, scheduler)
print("Test evaluation.......")
self.evaluate_one_epoch(
epoch, test_loader,
model, criterion, set_criterion, args
)
# Training is over, evaluate
save_checkpoint(args, 'last', model, optimizer, scheduler, True)
saved_path = os.path.join(args.log_dir, 'ckpt_epoch_last.pth')
self.logger.info("Saved in {}".format(saved_path))
self.evaluate_one_epoch(
args.max_epoch, test_loader,
model, criterion, set_criterion, args
)
return saved_path
@staticmethod
def _to_gpu(data_dict):
if torch.cuda.is_available():
for key in data_dict:
if isinstance(data_dict[key], torch.Tensor):
data_dict[key] = data_dict[key].cuda(non_blocking=True)
return data_dict
@staticmethod
def _get_inputs(batch_data):
return {
'point_clouds': batch_data['point_clouds'].float(),
'text': batch_data['utterances']
}
@staticmethod
def _compute_loss(end_points, criterion, set_criterion, args):
loss, end_points = criterion(
end_points, args.num_decoder_layers,
set_criterion,
query_points_obj_topk=args.query_points_obj_topk
)
return loss, end_points
@staticmethod
def _accumulate_stats(stat_dict, end_points):
for key in end_points:
if 'loss' in key or 'acc' in key or 'ratio' in key:
if key not in stat_dict:
stat_dict[key] = 0
if isinstance(end_points[key], (float, int)):
stat_dict[key] += end_points[key]
else:
stat_dict[key] += end_points[key].item()
return stat_dict
def train_one_epoch(self, epoch, train_loader, model,
criterion, set_criterion,
optimizer, scheduler, args):
"""
Run a single epoch.
Some of the args:
model: a nn.Module that returns end_points (dict)
criterion: a function that returns (loss, end_points)
"""
stat_dict = {} # collect statistics
model.train() # set model to training mode
# Loop over batches
for batch_idx, batch_data in enumerate(train_loader):
# Move to GPU
batch_data = self._to_gpu(batch_data)
inputs = self._get_inputs(batch_data)
# Forward pass
end_points = model(inputs)
# Compute loss and gradients, update parameters.
for key in batch_data:
assert (key not in end_points)
end_points[key] = batch_data[key]
loss, end_points = self._compute_loss(
end_points, criterion, set_criterion, args
)
optimizer.zero_grad()
loss.backward()
if args.clip_norm > 0:
grad_total_norm = torch.nn.utils.clip_grad_norm_(
model.parameters(), args.clip_norm
)
stat_dict['grad_norm'] = grad_total_norm
optimizer.step()
scheduler.step()
# Accumulate statistics and print out
stat_dict = self._accumulate_stats(stat_dict, end_points)
if (batch_idx + 1) % args.print_freq == 0:
# Terminal logs
self.logger.info(
f'Train: [{epoch}][{batch_idx + 1}/{len(train_loader)}] '
)
self.logger.info(''.join([
f'{key} {stat_dict[key] / args.print_freq:.4f} \t'
for key in sorted(stat_dict.keys())
if 'loss' in key and 'proposal_' not in key
and 'last_' not in key and 'head_' not in key
]))
for key in sorted(stat_dict.keys()):
stat_dict[key] = 0
@torch.no_grad()
def _main_eval_branch(self, batch_idx, batch_data, test_loader, model,
stat_dict,
criterion, set_criterion, args):
# Move to GPU
batch_data = self._to_gpu(batch_data)
inputs = self._get_inputs(batch_data)
if "train" not in inputs:
inputs.update({"train": False})
else:
inputs["train"] = False
# Forward pass
end_points = model(inputs)
# Compute loss
for key in batch_data:
assert (key not in end_points)
end_points[key] = batch_data[key]
_, end_points = self._compute_loss(
end_points, criterion, set_criterion, args
)
for key in end_points:
if 'pred_size' in key:
end_points[key] = torch.clamp(end_points[key], min=1e-6)
# Accumulate statistics and print out
stat_dict = self._accumulate_stats(stat_dict, end_points)
if (batch_idx + 1) % args.print_freq == 0:
self.logger.info(f'Eval: [{batch_idx + 1}/{len(test_loader)}] ')
self.logger.info(''.join([
f'{key} {stat_dict[key] / (float(batch_idx + 1)):.4f} \t'
for key | |
from __future__ import print_function
# -*- coding: utf-8 -*-
import base64
from importlib import import_module
import unittest
import socket
import json
import os
from time import sleep
import pytest
try:
from sauceclient import SauceClient
USE_SAUCE = True
WAIT_TIMEOUT = 20
except ImportError:
USE_SAUCE = False
WAIT_TIMEOUT = 10
from django.test import TestCase as DjangoTestCase, TransactionTestCase
from django.test.testcases import LiveServerThread as DjangoLiveServerThread, _MediaFilesHandler
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.contrib.staticfiles.handlers import StaticFilesHandler
from django.contrib.auth.models import User as DjangoUser
from django.core.urlresolvers import reverse
from anaf.identities.models import Contact, ContactType
from anaf.core.models import Group
import datetime
from django.conf import settings
from django.contrib.auth import get_user_model, authenticate, login
from django.contrib.auth.models import Permission
from django.core.cache import cache
from django.test.utils import override_settings
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import NoSuchElementException, NoAlertPresentException, ElementNotVisibleException
# from cms.api import create_page, create_title, add_plugin
# from cms.apphook_pool import apphook_pool
# from cms.exceptions import AppAlreadyRegistered
# from cms.models import CMSPlugin, Page, Placeholder
# from cms.test_utils.project.placeholderapp.cms_apps import Example1App
# from cms.test_utils.project.placeholderapp.models import Example1
# from cms.utils.conf import get_cms_setting
from django.db import connections
from django.utils import six
from django.core.exceptions import ImproperlyConfigured
from django.core.handlers.wsgi import WSGIHandler
from django.core.servers.basehttp import WSGIServer, WSGIRequestHandler
import sys
import errno
def form_to_dict(form):
"""Helper function useful for tests, receives a django form and returns a dict so it can be used in client.post
:type form: django.forms.ModelForm
:rtype dict
"""
return {f: form.fields[f].initial if form.fields[f].initial else '' for f in form.fields}
@pytest.mark.skipif(os.environ.get('SELENIUM', False), reason='Selenium env is set to 1')
class AnafTestCase(DjangoTestCase):
"""
Base class for tests, common functionality will be here
"""
def cmpDataApi(self, old, new, fieldname='root'):
"""
Compares data using the old API with data retrieved with the new
They don't need to be equivalent, the new API may return at least the data the old API was able to and may add
:param str or dict or list old: content retrieved using the old API
:param str or dict or list new: content retrieved using the new DRF API
:return bool: is it kosher?
"""
if isinstance(old, six.string_types):
old = json.loads(old)
if isinstance(new, six.string_types):
new = json.loads(new)
if isinstance(old, dict) and isinstance(new, dict):
for k, v in sorted(old.items()):
if k == 'resource_uri':
continue
assert k in new, 'Field {}.{} not found on new.\nold:{}\nnew:{}'.format(fieldname, k, old, new)
assert isinstance(v, type(new[k])),\
'Field {}.{} exists but have different content type.\nold:{}\nnew:{}'.format(fieldname, k, v, new[k])
if isinstance(v, dict):
self.cmpDataApi(v, new[k], '{}.{}'.format(fieldname, k))
elif isinstance(v, six.string_types):
assert v == new[k], 'Field {}.{} exists but have different value.\nold:{}\nnew:{}'.format(fieldname, k,
v, new[k])
else:
assert v == new[k]
elif isinstance(old, list) and isinstance(new, list):
old.sort(key=lambda x: x['id'])
new.sort(key=lambda x: x['id'])
for i, v in enumerate(old):
self.cmpDataApi(v, new[i], str(i))
else:
assert False, 'old and new have different types'
class MyStaticFilesHandler(StaticFilesHandler):
def serve(self, request):
if request.path == '/static/favicon.ico':
from django.http import HttpResponseForbidden
return HttpResponseForbidden() # I'm not serving favicon during test
return super(MyStaticFilesHandler, self).serve(request)
class WSGITestRequestHandler(WSGIRequestHandler):
"""
Just a regular WSGIRequestHandler except it doesn't log to the standard
output any of the requests received, so as to not clutter the output for
the tests results, instead it saves to a given list, useful to print the logs only when a test fails
"""
def log_message(self, *args):
# print(args)
self.logs.append(args)
class LiveServerThread(DjangoLiveServerThread):
"""
Almost an copy of django's LiveServerThread, but I wanted it to use my own WSGI handler
"""
def __init__(self, *args, **kwargs):
self.logs = kwargs.pop('logs', None)
super(LiveServerThread, self).__init__(*args, **kwargs)
def run(self):
"""
Sets up the live server and databases, and then loops over handling
http requests.
"""
if self.connections_override:
# Override this thread's database connections with the ones
# provided by the main thread.
for alias, conn in self.connections_override.items():
connections[alias] = conn
try:
# Create the handler for serving static and media files
handler = self.static_handler(_MediaFilesHandler(WSGIHandler()))
# Go through the list of possible ports, hoping that we can find
# one that is free to use for the WSGI server.
for index, port in enumerate(self.possible_ports):
try:
WSGITestRequestHandler.logs = self.logs
self.httpd = WSGIServer(
(self.host, port), WSGITestRequestHandler)
except socket.error as e:
if (index + 1 < len(self.possible_ports) and
e.errno == errno.EADDRINUSE):
# This port is already in use, so we go on and try with
# the next one in the list.
continue
else:
# Either none of the given ports are free or the error
# is something else than "Address already in use". So
# we let that error bubble up to the main thread.
raise
else:
# A free port was found.
self.port = port
break
self.httpd.set_app(handler)
self.is_ready.set()
self.httpd.serve_forever()
except Exception as e:
self.error = e
self.is_ready.set()
class LiveServerTestCase(TransactionTestCase):
"""
This is almost an copy from django's LiveServerTestCase, I wanted to override setUpClass
so it can use my own LiveServerThread
"""
static_handler = MyStaticFilesHandler
@property
def live_server_url(self):
return 'http://%s:%s' % (self.server_thread.host, self.server_thread.port)
@classmethod
def setUpClass(cls):
connections_override = {}
for conn in connections.all():
# If using in-memory sqlite databases, pass the connections to
# the server thread.
if (conn.vendor == 'sqlite'
and conn.settings_dict['NAME'] == ':memory:'):
# Explicitly enable thread-shareability for this connection
conn.allow_thread_sharing = True
connections_override[conn.alias] = conn
# Launch the live server's thread
specified_address = os.environ.get(
'DJANGO_LIVE_TEST_SERVER_ADDRESS', '127.0.0.1:8080-8090')
# The specified ports may be of the form '8000-8010,8080,9200-9300'
# i.e. a comma-separated list of ports or ranges of ports, so we break
# it down into a detailed list of all possible ports.
possible_ports = []
try:
host, port_ranges = specified_address.split(':')
for port_range in port_ranges.split(','):
# A port range can be of either form: '8000' or '8000-8010'.
extremes = list(map(int, port_range.split('-')))
assert len(extremes) in [1, 2]
if len(extremes) == 1:
# Port range of the form '8000'
possible_ports.append(extremes[0])
else:
# Port range of the form '8000-8010'
for port in range(extremes[0], extremes[1] + 1):
possible_ports.append(port)
except Exception:
msg = 'Invalid address ("%s") for live server.' % specified_address
six.reraise(ImproperlyConfigured, ImproperlyConfigured(msg), sys.exc_info()[2])
cls.logs = []
cls.server_thread = LiveServerThread(host, possible_ports,
cls.static_handler,
connections_override=connections_override, logs=cls.logs)
cls.server_thread.daemon = True
cls.server_thread.start()
# Wait for the live server to be ready
cls.server_thread.is_ready.wait()
if cls.server_thread.error:
# Clean up behind ourselves, since tearDownClass won't get called in
# case of errors.
cls._tearDownClassInternal()
raise cls.server_thread.error
super(LiveServerTestCase, cls).setUpClass()
@classmethod
def _tearDownClassInternal(cls):
# There may not be a 'server_thread' attribute if setUpClass() for some
# reasons has raised an exception.
if hasattr(cls, 'server_thread'):
# Terminate the live server's thread
cls.server_thread.terminate()
cls.server_thread.join()
# Restore sqlite connections' non-shareability
for conn in connections.all():
if (conn.vendor == 'sqlite'
and conn.settings_dict['NAME'] == ':memory:'):
conn.allow_thread_sharing = False
@classmethod
def tearDownClass(cls):
cls._tearDownClassInternal()
super(LiveServerTestCase, cls).tearDownClass()
def _ifailed(self):
"""Call this on tearDown, check if it was the last run test that failed
"""
return not sys.exc_info() == (None, None, None)
def tearDown(self):
super(LiveServerTestCase, self).tearDown()
if self._ifailed():
for log in self.logs:
print(*log)
del self.logs[:]
class AttributeObject(object):
"""
mock = AttributeObject(hello='world')
mock.hello # 'world'
"""
def __init__(self, **kwargs):
self.kwargs = kwargs
for key, value in kwargs.items():
setattr(self, key, value)
def __repr__(self):
return '<AttributeObject: %r>' % self.kwargs
@pytest.mark.skipif(not os.environ.get('SELENIUM', False), reason='Selenium env is set to 0')
class LiveTestCase(LiveServerTestCase):
username = "fronttestuser"
password = "password"
static_handler = MyStaticFilesHandler
driver = None
@classmethod
def setUpClass(cls):
super(LiveTestCase, cls).setUpClass()
cache.clear()
cls.accept_next_alert = True
@classmethod
def tearDownClass(cls):
super(LiveTestCase, cls).tearDownClass()
cls.server_thread.terminate()
cls.server_thread.join()
def setUp(self):
super(LiveTestCase, self).setUp()
self.group, created = Group.objects.get_or_create(name='test_group')
self.user, created = DjangoUser.objects.get_or_create(username=self.username)
self.user.set_password(self.password)
self.user.save()
self.contact_type = ContactType(name='front_test_contacttype')
self.contact_type.set_default_user()
self.contact_type.save()
self.contact_type2 = ContactType(name='front_test second contacttype')
self.contact_type2.set_default_user()
self.contact_type2.save()
self.contact = Contact(name='front_test_contact', contact_type=self.contact_type, related_user=self.user.profile)
self.contact.set_default_user()
self.contact.save()
self.contact2 = Contact(name='front_test second contact', contact_type=self.contact_type2)
self.contact2.set_default_user()
self.contact2.save()
if USE_SAUCE:
capabilities = webdriver.DesiredCapabilities.CHROME
# If this capability is null, an empty string, or omitted altogether, the latest version of the browser will be used automatically. # noqa
# capabilities['version'] = '45'
capabilities['platform'] = 'Windows 7'
capabilities['screenResolution'] = '1920x1080'
capabilities['name'] = self.id()
capabilities['build'] = os.environ.get("TRAVIS_BUILD_NUMBER")
capabilities['tags'] = [os.environ.get("TRAVIS_PYTHON_VERSION"), "CI"]
username = os.environ.get("SAUCE_USERNAME")
access_key = os.environ.get("SAUCE_ACCESS_KEY")
capabilities["tunnel-identifier"] = os.environ.get("TRAVIS_JOB_NUMBER")
hub_url = "http://%s:%s@ondemand.saucelabs.com/wd/hub" % (username, access_key)
self.driver = webdriver.Remote(desired_capabilities=capabilities, command_executor=hub_url)
self.driver.implicitly_wait(30)
else:
# self.driver = webdriver.Firefox()
self.driver = webdriver.Chrome()
self.driver.set_window_size(1366, 768)
self.driver.implicitly_wait(5)
def tearDown(self):
super(LiveTestCase, self).tearDown()
if self.driver:
self.driver.refresh()
if not USE_SAUCE:
sleep(1)
self.driver.quit()
if USE_SAUCE:
self._report_pass_fail()
cache.clear()
def _report_pass_fail(self):
session_id = self.driver.session_id
job_name = self.id()
sauce_client = SauceClient(os.environ.get("SAUCE_USERNAME"), os.environ.get("SAUCE_ACCESS_KEY"))
status = (sys.exc_info() == (None, None, None))
sauce_client.jobs.update_job(session_id, passed=status)
print("SauceOnDemandSessionID=%s job-name=%s" % (session_id, job_name))
def get(self, url_name):
"""Get the | |
<reponame>tsnouidui/energyplustofmu
#!/usr/bin/env python
#--- Purpose.
#
# Export an EnergyPlus model as a Functional Mockup Unit (FMU) for co-simulation.
#--- Note on directory location.
#
# This script uses relative paths to locate some of the files it needs.
# Therefore it should not be moved from its default directory.
# However, this script can be run from a different working directory.
#--- Running this script.
#
# To run this script from the command line:
# > python [python options] <this-file-name> <arguments>
#
# On unix-like systems, this command-line invocation should work as well:
# > ./<this-file-name> <arguments>
#
# To call this script from the Python interpreter, or from another Python script:
# >>> import <this-file-base-name>
# >>> <this-file-base-name>.exportEnergyPlusAsFMU(arguments)
#--- Runtime help.
#
def printCmdLineUsage():
#
print 'USAGE:', os.path.basename(__file__), \
'-i <path-to-idd-file> [-w <path-to-weather-file>] [-d] [-L] <path-to-idf-file>'
#
print '-- Export an EnergyPlus model as a Functional Mockup Unit (FMU) for co-simulation'
print '-- Input -i, use the named Input Data Dictionary (required)'
print '-- Option -w, use the named weather file'
print '-- Option -d, print diagnostics'
print '-- Option -L, litter, that is, do not clean up intermediate files'
# TODO: Add -V to set version number of FMI standard. Currently 1.0 is only one supported.
#
# End fcn printCmdLineUsage().
#--- Ensure access.
#
import os
import subprocess
import sys
import zipfile
#--- Fcn to print diagnostics.
#
def printDiagnostic(messageStr):
#
print '!', os.path.basename(__file__), '--', messageStr
#
# End fcn printDiagnostic().
#--- Fcn to quit due to an error.
#
def quitWithError(messageStr, showCmdLine):
#
print 'ERROR from script file {' +os.path.basename(__file__) +'}'
#
if( messageStr is not None ):
print messageStr
#
if( showCmdLine ):
print
printCmdLineUsage()
#
sys.exit(1)
#
# End fcn quitWithError().
#--- Fcn to verify a file exists.
#
# If file exists, return its absolute path. Otherwise, quit.
#
def findFileOrQuit(fileDesc, fileName):
#
if( not os.path.isfile(fileName) ):
(dirName, fileName) = os.path.split(os.path.abspath(fileName))
if( not os.path.isdir(dirName) ):
quitWithError('Missing directory {' +dirName +'} for ' +fileDesc +' file {' +fileName +'}', False)
quitWithError('Missing ' +fileDesc +' file {' +fileName +'} in directory {' +dirName +'}', False)
#
return( os.path.abspath(fileName) )
#
# End fcn findFileOrQuit().
#--- Fcn to delete a file.
#
# OK if file does not exist.
#
def deleteFile(fileName):
#
if( os.path.isfile(fileName) ):
try:
os.remove(fileName)
except:
quitWithError('Unable to delete file {' +fileName +'}', False)
elif( os.path.isdir(fileName) ):
quitWithError('Expecting {' +fileName +'} to be a file; found a directory', False)
#
# End fcn deleteFile().
#--- Fcn to add a file to a zip file.
#
def addToZipFile(theZipFile, addFileName, toDir, addAsName):
#
# Get path and name for use in zip file.
if( addAsName is None ):
addAsName = os.path.basename(addFileName)
if( toDir is not None ):
addAsName = os.path.join(toDir, addAsName)
#
try:
theZipFile.write(addFileName, addAsName)
except:
# Diagnose error if possible.
if( theZipFile.__class__ != zipfile.ZipFile ):
quitWithError('Expecting a zip file, got {' +theZipFile.__class__.__name__ +'}', False)
# Here, {theZipFile} is a zip file. Won't be using it any further.
theZipFile.close()
# Check whether {addFileName} exists.
addFileName = findFileOrQuit('zip member', addFileName)
# Here, {addFileName} is a good file.
quitWithError('Failed to add file {' +addFileName +'} to zip file; reason unknown', False)
#
# Here, successfully added {addFileName} to {theZipFile}.
#
# End fcn addToZipFile().
#--- Fcn to export an EnergyPlus IDF file as an FMU.
#
def exportEnergyPlusAsFMU(showDiagnostics, litter, iddFileName, wthFileName, idfFileName):
#
if( showDiagnostics ):
printDiagnostic('Begin exporting IDF file {' +idfFileName +'} as an FMU')
#
# Check file names passed as arguments, and get absolute paths.
idfFileName = findFileOrQuit('IDF', idfFileName)
iddFileName = findFileOrQuit('IDD', iddFileName)
if( wthFileName is None ):
if( showDiagnostics ):
printDiagnostic('Note no WTH file given')
else:
wthFileName = findFileOrQuit('WTH', wthFileName)
#
# Get directory of this script file.
scriptDirName = os.path.abspath(os.path.dirname(__file__))
#
# Load modules expect to find in same directory as this script file.
if( scriptDirName not in sys.path ):
sys.path.append(scriptDirName)
#
findFileOrQuit('utility script', os.path.join(scriptDirName, 'makeFMULib.py'))
try:
import makeFMULib
except:
quitWithError('Unable to import {makeFMULib.py}', False)
#
findFileOrQuit('utility script', os.path.join(scriptDirName, 'makeExportPrepApp.py'))
try:
import makeExportPrepApp
except:
quitWithError('Unable to import {makeExportPrepApp.py}', False)
#
# Get valid model identifier.
modelIdName = os.path.basename(idfFileName)
if( modelIdName.endswith('.idf') or modelIdName.endswith('.IDF') ):
modelIdName = modelIdName[:-4]
modelIdName = makeFMULib.sanitizeIdentifier(modelIdName)
if( showDiagnostics ):
printDiagnostic('Using model identifier {' +modelIdName +'}')
#
# Delete expected outputs if they already exist.
# To prevent confusion in case of an error.
OUT_modelDescFileName = 'modelDescription.xml'
deleteFile(OUT_modelDescFileName)
#
OUT_variablesFileName = 'variables.cfg'
deleteFile(OUT_variablesFileName)
#
OUT_workZipFileName = modelIdName +'.zip'
deleteFile(OUT_workZipFileName)
#
OUT_fmuFileName = modelIdName +'.fmu'
deleteFile(OUT_fmuFileName)
#
# Create export-prep application.
# The resulting executable will extract FMU-related information from an
# EnergyPlus IDF file.
# Do not force a rebuild.
if( showDiagnostics ):
printDiagnostic('Checking for export-prep application')
exportPrepExeName = makeExportPrepApp.makeExportPrepApp(showDiagnostics, litter, False)
#
# Run the export-prep application.
if( showDiagnostics ):
printDiagnostic('Running export-prep application {' +exportPrepExeName +'}')
runList = [os.path.join(os.path.curdir, exportPrepExeName)]
if( wthFileName is not None ):
runList.extend(['-w', wthFileName])
runList.extend([iddFileName, idfFileName])
subprocess.call(runList)
if( (not os.path.isfile(OUT_modelDescFileName)) or (not os.path.isfile(OUT_variablesFileName)) ):
quitWithError('Failed to extract FMU information from IDF file {' +idfFileName +'}', False)
#
# Create the shared library.
(OUT_fmuSharedLibName, fmuBinDirName) = makeFMULib.makeFmuSharedLib(showDiagnostics, litter, modelIdName)
findFileOrQuit('shared library', OUT_fmuSharedLibName)
#
# Create zip file that will become the FMU.
# Note to get compression, need zlib, but can proceed without it.
try:
import zlib
if( showDiagnostics ):
printDiagnostic('Creating zip file {' +OUT_workZipFileName +'}, with compression on')
workZipFile = zipfile.ZipFile(OUT_workZipFileName, 'w', zipfile.ZIP_DEFLATED)
except:
# Here, either didn't find zlib, or couldn't create zip file.
if( showDiagnostics ):
printDiagnostic('Creating zip file {' +OUT_workZipFileName +'}, without compression')
try:
workZipFile = zipfile.ZipFile(OUT_workZipFileName, 'w', zipfile.ZIP_STORED)
except:
quitWithError('Failed to create zip file {' +OUT_workZipFileName +'}', False)
#
# Populate zip file.
# Note fcn addToZipFile() closes the zip file if it encounters an error.
addToZipFile(workZipFile, OUT_modelDescFileName, None, None)
addToZipFile(workZipFile, idfFileName, 'resources', modelIdName+'.idf')
addToZipFile(workZipFile, OUT_variablesFileName, 'resources', None)
addToZipFile(workZipFile, iddFileName, 'resources', None)
addToZipFile(workZipFile, exportPrepExeName, 'resources', None)
if( wthFileName is not None ):
addToZipFile(workZipFile, wthFileName, 'resources', None)
addToZipFile(workZipFile, OUT_fmuSharedLibName, os.path.join('binaries',fmuBinDirName), None)
#
# Finish up zip file.
if( showDiagnostics ):
printDiagnostic('Renaming completed zip file {' +OUT_workZipFileName +'} to {' +OUT_fmuFileName +'}')
workZipFile.close()
findFileOrQuit('zip', OUT_workZipFileName)
os.rename(OUT_workZipFileName, OUT_fmuFileName)
#
# Clean up intermediates.
if( not litter ):
if( showDiagnostics ):
printDiagnostic('Cleaning up intermediate files')
# deleteFile(exportPrepExeName) # Keep this executable, since it does not vary from run to run (i.e., not really intermediate).
deleteFile(OUT_modelDescFileName)
deleteFile(OUT_variablesFileName)
deleteFile(OUT_fmuSharedLibName)
#
# End fcn exportEnergyPlusAsFMU().
#--- Run if called from command line.
#
# If called from command line, {__name__} is "__main__". Otherwise,
# {__name__} is base name of the script file, without ".py".
#
if __name__ == '__main__':
#
# Set defaults for command-line options.
iddFileName = None
wthFileName = None
showDiagnostics = False
litter = False
#
# Get command-line options.
lastIdx = len(sys.argv) - 1
currIdx = 1
while( currIdx < lastIdx ):
currArg = sys.argv[currIdx]
if( currArg.startswith('-i') ):
currIdx += 1
iddFileName = sys.argv[currIdx]
if( showDiagnostics ):
printDiagnostic('Setting IDD file to {' +iddFileName +'}')
elif( currArg.startswith('-w') ):
currIdx += 1
wthFileName = sys.argv[currIdx]
if( showDiagnostics ):
printDiagnostic('Setting WTH file to {' +wthFileName +'}')
elif( currArg.startswith('-d') ):
showDiagnostics = True
elif( currArg.startswith('-L') ):
litter = True
else:
quitWithError('Bad command-line option {' +currArg +'}', True)
# Here, processed option at {currIdx}.
currIdx += 1
#
# Get {idfFileName}.
if( currIdx != lastIdx ):
# Here, either an option like {-i} consumed the entry at {lastIdx}, or had
# no options or arguments at all.
quitWithError('Require exactly one command-line argument, <path-to-idf-file>', True)
idfFileName = sys.argv[lastIdx]
if( showDiagnostics ):
printDiagnostic('Setting IDF file to {' +idfFileName +'}')
if( idfFileName.startswith('-') and len(idfFileName)==2 ):
quitWithError('Expecting IDF file name, got what looks like a command-line option {' +idfFileName +'}', True)
#
# Get {iddFileName}.
if( iddFileName is None ):
quitWithError('Missing required input, <path-to-idd-file>', True)
#
# Run.
exportEnergyPlusAsFMU(showDiagnostics, litter, iddFileName, wthFileName, idfFileName)
#--- Copyright notice.
#
# Functional Mock-up Unit Export of EnergyPlus (C)2013, The Regents of
# the University of California, through Lawrence Berkeley National
# Laboratory (subject to receipt of any required approvals from
# the U.S. Department of Energy). All rights reserved.
#
# If you have questions about your rights to use or distribute this software,
# please contact Berkeley Lab's Technology Transfer Department at
# <EMAIL>.referring to "Functional Mock-up Unit Export
# of | |
<reponame>ysadamori/-GiNZA<gh_stars>1-10
import importlib
import re
import sys
# Traverse policy definition (needed for non top-level elements)
# Element name for traverse type elements
ARC = 'arc'
# for parents with in max_hop, match only once
PARENT = 'parent'
# for descendants with in max_hop, for all, match all
CHILDREN = 'children'
# for parents with in max_hop, for all, match all
ANCESTORS = 'ancestors'
# for descendants with in max_hop, for all, match only once
DESCENDANT = 'descendant'
# Element name for max_hop elements (default=1)
MAX_HOP = 'max_hop'
# Matching regexp pattern definition
# Element name for dependency label (token.dep_) patterns (default='.*')
LABEL = 'label'
# Element name for word (token.orth_) patterns (default='.*')
WORD = 'word'
# Element name for stop-word patterns applied to word (token.orth_) (default=None)
SW = 'sw'
# Element name for lemma (token.lemma_) patterns (default='.*')
LEMMA = 'lemma'
# Element name for part-of-speech (token.pos_) patterns (default='.*')
POS = 'pos'
# Sub rule definition
# Element name for lists which have recursive sub rule list
DEPS = 'deps'
# Candidate generation action definition
# Element name for action set definition
ACTION = 'action'
# use current token as one of the candidates if the rule matches to current token
USE = {'USE'}
# generate candidates if the rule matches to current token
POP = {'POP'}
# cancel traversing if the rule matches to current token
FAIL = {'FAIL'}
# use current token as one of the candidates if the rule does not match to current token
USE_ON_FAIL = {'USE_ON_FAIL'}
# generate candidates if the rule does rule match to current token
POP_ON_FAIL = {'POP_ON_FAIL'}
# continue traversing if the rule does not match to current token
CONTINUE_ON_FAIL = {'CONTINUE_ON_FAIL'}
GOAL = USE | POP
GREEDY = USE | POP_ON_FAIL
USE_ALWAYS = USE | USE_ON_FAIL
POP_ALWAYS = POP | POP_ON_FAIL
GOAL_ALWAYS = USE_ALWAYS | POP_ALWAYS
actions = {
v: v for v in USE | POP | FAIL | USE_ON_FAIL | POP_ON_FAIL | CONTINUE_ON_FAIL | {
'GOAL', 'GREEDY', 'USE_ALWAYS', 'POP_ALWAYS', 'GOAL_ALWAYS',
}
}
# Element name for enabling debug mode (default=False)
DEBUG = 'debug'
# Element name for debug information (default='')
INFO = 'info'
class DependencyRule:
@staticmethod
def _get_re(data, field, count):
if field in data:
return re.compile('^({})$'.format(data[field].lower())), count + 1
else:
return re.compile(r'^.*$'), count
def __init__(self, data=None, nest_level=0):
is_top = nest_level == 0
field_count = 0
self.nest_level = nest_level
if ARC in data:
if is_top:
raise Exception('arc not allowed in top rule')
v = data[ARC]
if v not in [ANCESTORS, DESCENDANT, PARENT, CHILDREN]:
raise Exception('arc must be ancestors, descendants, parent, or children: {}'.format(v))
field_count += 1
self.arc = v
elif is_top:
self.arc = None
else:
raise Exception('arc must be specified')
if MAX_HOP in data:
if is_top:
raise Exception('max_hop not allowed in top rule')
field_count += 1
v = data[MAX_HOP]
if isinstance(v, int):
if v > 0:
self.max_hop = v
else:
raise Exception('hop must be > 0: {}', format(v))
else:
raise Exception('hop must be int: {}', format(v))
else:
self.max_hop = 1
if LABEL in data and is_top:
raise Exception('label not allowed in top rule')
self.label, field_count = DependencyRule._get_re(data, LABEL, field_count)
self.word, field_count = DependencyRule._get_re(data, WORD, field_count)
self.lemma, field_count = DependencyRule._get_re(data, LEMMA, field_count)
self.pos, field_count = DependencyRule._get_re(data, POS, field_count)
if DEPS in data:
field_count += 1
self.deps = [DependencyRule(sub_data, nest_level + 1) for sub_data in data[DEPS]]
elif ACTION not in data:
raise Exception('action must be specified if no deps')
else:
self.deps = None
if ACTION in data:
v = data[ACTION]
if isinstance(v, list):
self.actions = set(v)
elif isinstance(v, set):
self.actions = v
else:
self.actions = {actions[v]}
for v in self.actions:
if v not in actions:
raise Exception('invalid action: {}'.format(v))
field_count += 1
else:
self.actions = set()
if SW in data:
self.stop_word, field_count = DependencyRule._get_re(data, SW, field_count)
else:
self.stop_word = None
if DEBUG in data:
self.debug = data[DEBUG]
field_count += 1
else:
self.debug = False
if INFO in data:
self.info = data[INFO]
field_count += 1
else:
self.info = None
if field_count != len(data.keys()):
print('invalid field(s) contained: {}'.format(data.keys() - {
ARC,
MAX_HOP,
LABEL,
WORD,
LEMMA,
POS,
DEPS,
ACTION,
SW,
DEBUG,
INFO,
}), file=sys.stderr)
@staticmethod
def _content(regexp):
return str(regexp)[14:-4]
def __str__(self):
return 'DependencyRule({})'.format(','.join([
'{}:{}'.format(f, v) if f else str(v) for f, v in [
kv for kv in [
('', self.info),
('', self.arc),
('max_hop', self.max_hop),
('', DependencyRule._content(self.label)),
('word', DependencyRule._content(self.word)),
('lemma', DependencyRule._content(self.lemma)),
('pos', DependencyRule._content(self.pos)),
('', str(self.actions) if self.actions else ''),
] if kv[1]
]
]))
def extract_candidates(self, utterance, debug=False):
for token in utterance:
matched_tokens = self.check_token(token, 1, {token.i}, debug)
if matched_tokens is not None:
return sorted(matched_tokens, key=lambda t: t.i)
return None
def _indent(self):
return ' ' * ((self.nest_level + 1) * 4)
def filter_stop_words(self, tokens, debug):
if not self.stop_word:
return tokens
else:
if debug:
indent = self._indent()
else:
indent = None
debug and print(indent + 'apply stop_word')
result = [token for token in tokens if not self.stop_word.match(token.orth_)]
if len(result) == len(tokens):
return tokens
else:
debug and print(indent + 'stop_word matched: {}'.format(DependencyRule._content(self.stop_word)))
return result
def match_stop_words(self, token, debug):
if self.stop_word:
indent = self._indent() if debug else None
debug and print(indent + 'apply stop_word')
if self.stop_word.match(token.orth_.lower()):
debug and print(indent + 'stop_word matched: {}'.format(DependencyRule._content(self.stop_word)))
return True
return False
def check_token(self, token, hop, token_history, debug=False):
debug = debug or self.debug
indent = self._indent() if debug else None
debug and print(indent[2:] + 'check_token({}, {}:{}, {}, {})'.format(
str(self), token.i, token.orth_, token.lemma_, hop, token_history))
# check word-level rules
if (
not self.word.match(token.orth_.lower())
) or (
not self.lemma.match(token.lemma_.lower())
) or (
not self.pos.match(token.pos_.lower())
) or (
self.match_stop_words(token, debug=debug)
):
debug and print(indent + 'word not matched')
if not CONTINUE_ON_FAIL <= self.actions:
debug and print(indent + 'failed')
return None
debug and print(indent + 'word matched')
sub_result = []
if self.deps:
# dive into sub rules
for i, sub in enumerate(self.deps):
sub_result = sub.traverse_dependency(token, 0, token_history, debug)
if sub_result is not None:
debug and print(indent + 'traverse_dependency({}, {}) -> {}'.format(
self,
token.lemma_,
[t.lemma_ for t in sub_result],
))
break
if sub_result is not None:
debug and print(indent + 'deps matched')
if FAIL <= self.actions:
debug and print(indent + 'fail')
return None
if USE <= self.actions:
debug and print(indent + 'use: {}'.format(token.lemma_))
return self.filter_stop_words([token] + sub_result, debug)
else:
return self.filter_stop_words(sub_result, debug)
else:
debug and print(indent + 'deps not matched')
result = [token]
if POP_ON_FAIL <= self.actions:
debug and print(indent + 'pop_on_fail: {}'.format([t.lemma_ for t in result]))
return self.filter_stop_words(result, debug)
debug and print(indent + 'failed')
return None
def traverse_dependency(self, token, hop, token_history, debug=False):
debug = debug or self.debug
indent = self._indent() if debug else None
debug and print(indent[2:] + 'traverse_dependency({}, {}, {}, {})'.format(
self, token.lemma_, hop, token_history))
hop += 1
# over max_hop
if hop > self.max_hop:
debug and print(indent + 'hop overs {}'.format(self.max_hop))
return None
if self.arc in [ANCESTORS, PARENT]:
target = token.head
debug and print(indent + 'target: {}'.format(target.lemma_))
# in history
if target.i in token_history:
debug and print(indent + 'in history')
return None
dep = token.dep_.lower()
debug and print(indent + '{}>{}>{}'.format(token.lemma_, dep, target.lemma_))
result = None
if self.label.match(dep):
debug and print(indent + 'label matched')
result = self.check_token(target, hop, token_history | {target.i}, debug)
if result is None:
debug and print(indent + 'no result')
else:
debug and print(indent + 'label not matched')
if result is None:
return self.traverse_dependency(target, hop, token_history | {target.i}, debug)
if self.arc == PARENT:
return result
sub_result = self.traverse_dependency(target, hop, token_history | {target.i}, debug)
if sub_result is None:
return result
else:
return result + sub_result
elif self.arc in [DESCENDANT, CHILDREN]:
all_result = None
for target in token.children:
debug and print(indent + 'target: {}'.format(target.lemma_))
# in history
if target.i in token_history:
debug and print(indent + 'in history')
continue
dep = target.dep_.lower()
debug and print(indent + '{}<{}<{}'.format(token.lemma_, dep, target.lemma_))
if self.label.match(dep):
debug and print(indent + 'label matched')
result = self.check_token(target, hop, token_history | {target.i}, debug)
if result is None:
debug and print(indent + 'no result')
else:
if self.arc == CHILDREN:
if all_result is None:
all_result = result
else:
all_result += result
debug and print(indent + 'continue')
else:
return result
else:
debug and print(indent + 'label not matched')
return all_result
else:
raise Exception('arc must be ancestors, descendants, parent, or children: {}'.format(self.arc))
def parse_rule_maps(json):
return [DependencyRule(rule) for rule in json]
def import_from_module(module_name):
module = importlib.import_module(module_name)
return parse_rule_maps(module.DEPENDENCY_RULES)
__all__ = [
'DependencyRule',
'parse_rule_maps',
'import_from_module',
'ARC',
'ANCESTORS',
'DESCENDANT',
'PARENT',
'CHILDREN',
| |
<filename>src/pyon/container/procs.py
#!/usr/bin/env python
"""
Component of the container that manages ION processes etc.
The ProcManager keeps an IonProcessThreadManager as proc_sup (supervisor) to spawn
the ION process threads.
It also instantiates the BaseService instance with the app business logic,
and it registers the process listeners for a new ION process depending on process type.
An ION process is an IonProcessThread instance with a main greenlet, referenced
by the proc attribute, and a BaseService instance, referenced by the service attribute.
It has a control thread that processes all the incoming requests sequentially.
An ION process has a thread manager that spawns PyonThread greenlets for any
listeners and manages their lifecyle and termination.
New processes register in the RR. The first process of a service registers in the RR.
Agents register in the directory. Registration is removed when the process is terminated.
"""
__author__ = '<NAME>'
from copy import deepcopy
import multiprocessing
import os
import re
import time
import socket
import sys
from gevent.lock import RLock
from pyon.core import (PROCTYPE_SERVICE, PROCTYPE_AGENT, PROCTYPE_IMMEDIATE, PROCTYPE_SIMPLE, PROCTYPE_STANDALONE,
PROCTYPE_STREAMPROC)
from pyon.core.bootstrap import CFG
from pyon.core.exception import ContainerConfigError, BadRequest, NotFound
from pyon.core.thread import ThreadManager
from pyon.ion.endpoint import ProcessRPCServer
from pyon.ion.event import EventPublisher
from pyon.ion.process import IonProcessThreadManager, IonProcessError
from pyon.ion.resource import OT, PRED, RT
from pyon.ion.service import BaseService
from pyon.ion.stream import StreamPublisher, StreamSubscriber
from pyon.net.messaging import IDPool
from pyon.util.containers import DotDict, for_name, named_any, dict_merge, get_safe, is_valid_identifier
from pyon.util.log import log
from interface.objects import ProcessStateEnum, CapabilityContainer, Service, Process, ServiceStateEnum
class ProcManager(object):
def __init__(self, container):
self.container = container
# Define the callables that can be added to Container public API, and add
self.container_api = [self.spawn_process, self.terminate_process]
for call in self.container_api:
setattr(self.container, call.__name__, call)
self.proc_id_pool = IDPool()
# Registry of running processes
self.procs = {}
self.procs_by_name = {} # BAD: This is not correct if procs have the same name
# mapping of greenlets we spawn to process_instances for error handling
self._spawned_proc_to_process = {}
# Effective execution engine config (after merging in child process overrides)
self.ee_cfg = self._get_execution_engine_config()
# Process dispatcher (if configured/enabled and not a child container process)
self.pd_cfg = CFG.get_safe("service.process_dispatcher") or {}
self.pd_enabled = self.pd_cfg.get("enabled", False) is True and not self.ee_cfg["container"]["is_child"]
self.pd_core = None
self.event_pub = EventPublisher()
self.publish_events = CFG.get_safe("container.process.publish_events") is True
# Passive manager for simple threads/greenlets, to keep them registered (these are not OS threads)
# Note that each ION process has its own thread manager, so this is for container level threads
self.thread_manager = ThreadManager(heartbeat_secs=None, failure_notify_callback=None)
# Active supervisor for ION processes
self.proc_sup = IonProcessThreadManager(heartbeat_secs=CFG.get_safe("container.timeout.heartbeat"),
failure_notify_callback=self._spawned_proc_failed)
# list of callbacks for process state changes
self._proc_state_change_callbacks = []
def start(self):
log.debug("ProcManager starting ...")
if self.pd_enabled:
self._start_process_dispatcher()
self.proc_sup.start()
if self.container.has_capability(self.container.CCAP.RESOURCE_REGISTRY):
# Register container as resource object
cc_obj = self._get_capability_container_object()
self.cc_id, _ = self.container.resource_registry.create(cc_obj)
# Create an association to an Org object if not the rot ION org and only if found
if CFG.get_safe("container.org_name") != CFG.get_safe("system.root_org"):
org, _ = self.container.resource_registry.find_resources(
restype=RT.Org, name=CFG.get_safe("container.org_name"), id_only=True)
if org:
self.container.resource_registry.create_association(org[0], PRED.hasResource, self.cc_id) # TODO - replace with proper association
log.debug("ProcManager started, OK.")
def stop(self):
log.debug("ProcManager stopping ...")
# Call quit on procs to give them ability to clean up in reverse order
procs_list = sorted(self.procs.values(), key=lambda proc: proc._proc_start_time, reverse=True)
for proc in procs_list:
try:
self.terminate_process(proc.id)
except Exception as ex:
log.warn("Failed to terminate process (%s): %s", proc.id, ex)
# TODO: Have a choice of shutdown behaviors for waiting on children, timeouts, etc
self.proc_sup.shutdown(CFG.get_safe("container.timeout.shutdown"))
if self.procs:
log.warn("ProcManager procs not empty: %s", self.procs)
if self.procs_by_name:
log.warn("ProcManager procs_by_name not empty: %s", self.procs_by_name)
# Remove Resource registration
if self.container.has_capability(self.container.CCAP.RESOURCE_REGISTRY):
try:
self.container.resource_registry.delete(self.cc_id, del_associations=True)
except NotFound:
# already gone, this is ok
pass
if self.pd_enabled:
self._stop_process_dispatcher()
log.debug("ProcManager stopped, OK.")
def _get_execution_engine_config(self):
ee_base_cfg = CFG.get_safe("container.execution_engine") or {}
if ee_base_cfg.get("type", None) != "scioncc":
raise ContainerConfigError("Execution engine config invalid: %s", ee_base_cfg)
ee_cfg = deepcopy(ee_base_cfg)
# If we are a child process, merge in child config override
proc_name = multiprocessing.current_process().name
ee_cfg["container"] = dict(child_proc_name=proc_name, is_child=False)
child_cfgs = ee_base_cfg.get("child_configs", None) or {}
if proc_name.startswith("Container-child-"):
ee_cfg["container"]["is_child"] = True
if proc_name in child_cfgs:
log.info("Applying execution engine config override for child: %s", proc_name)
dict_merge(ee_cfg, child_cfgs[proc_name], inplace=True)
else:
for cfg_name, ch_cfg in child_cfgs.iteritems():
pattern = ch_cfg.get("name_pattern", None)
if pattern and re.match(pattern, proc_name):
log.info("Applying execution engine config override %s for child: %s", cfg_name, proc_name)
dict_merge(ee_cfg, ch_cfg, inplace=True)
break
ee_cfg.pop("child_configs", None)
return ee_cfg
def _get_capability_container_object(self):
container_info = dict(proc_name=multiprocessing.current_process().name,
process_id=os.getpid(),
parent_process_id=os.getppid(),
hostname=socket.gethostname(),
host=socket.gethostbyname(socket.gethostname()),
platform=sys.platform,
argv=sys.argv,
python_version=sys.version,
cwd=os.getcwd(),
start_time=self.container.start_time,
)
# Other possibilities: username, os package versions, IP address
host_info = {k: v for (k, v) in zip(("os_sysname", "os_nodename", "os_release", "os_version", "os_machine"), os.uname())}
container_info.update(host_info)
container_info["env"] = {k: str(v) for (k,v) in os.environ.iteritems()}
container_info["python_path"] = sys.path
cc_obj = CapabilityContainer(name=self.container.id, version=self.container.version,
cc_agent=self.container.name,
container_info=container_info,
execution_engine_config=self.ee_cfg)
return cc_obj
# -----------------------------------------------------------------
def spawn_process(self, name=None, module=None, cls=None, config=None, process_id=None):
"""
Spawn a process within the container. Processes can be of different type.
"""
if process_id and not is_valid_identifier(process_id, ws_sub='_'):
raise BadRequest("Given process_id %s is not a valid identifier" % process_id)
# PROCESS ID. Generate a new process id if not provided
# TODO: Ensure it is system-wide unique
process_id = process_id or "%s.%s" % (self.container.id, self.proc_id_pool.get_id())
log.debug("ProcManager.spawn_process(name=%s, module.cls=%s.%s, config=%s) as pid=%s", name, module, cls, config, process_id)
# CONFIG
process_cfg = self._create_process_config(config)
try:
service_cls = named_any("%s.%s" % (module, cls))
except AttributeError as ae:
# Try to nail down the error
import importlib
importlib.import_module(module)
raise
# PROCESS TYPE. Determines basic process context (messaging, service interface)
process_type = get_safe(process_cfg, "process.type") or getattr(service_cls, "process_type", PROCTYPE_SERVICE)
process_start_mode = get_safe(config, "process.start_mode")
process_instance = None
# alert we have a spawning process, but we don't have the instance yet, so give the class instead (more accurate than name)
# Note: this uses a str as first argument instead of a process instance
self._call_proc_state_changed("%s.%s" % (module, cls), ProcessStateEnum.PENDING)
try:
# Additional attributes to set with the process instance
proc_attr = {"_proc_type": process_type,
"_proc_spawn_cfg": config
}
# SPAWN. Determined by type
if process_type == PROCTYPE_SERVICE:
process_instance = self._spawn_service_process(process_id, name, module, cls, process_cfg, proc_attr)
elif process_type == PROCTYPE_STREAMPROC:
process_instance = self._spawn_stream_process(process_id, name, module, cls, process_cfg, proc_attr)
elif process_type == PROCTYPE_AGENT:
process_instance = self._spawn_agent_process(process_id, name, module, cls, process_cfg, proc_attr)
elif process_type == PROCTYPE_STANDALONE:
process_instance = self._spawn_standalone_process(process_id, name, module, cls, process_cfg, proc_attr)
elif process_type == PROCTYPE_IMMEDIATE:
process_instance = self._spawn_immediate_process(process_id, name, module, cls, process_cfg, proc_attr)
elif process_type == PROCTYPE_SIMPLE:
process_instance = self._spawn_simple_process(process_id, name, module, cls, process_cfg, proc_attr)
else:
raise BadRequest("Unknown process type: %s" % process_type)
# REGISTER.
self._register_process(process_instance, name)
process_instance.errcause = "OK"
log.info("ProcManager.spawn_process: %s.%s -> pid=%s OK", module, cls, process_id)
if process_type == PROCTYPE_IMMEDIATE:
log.debug('Terminating immediate process: %s', process_instance.id)
self.terminate_process(process_instance.id)
# Terminate process also triggers TERMINATING/TERMINATED
self._call_proc_state_changed(process_instance, ProcessStateEnum.EXITED)
else:
# Update local policies for the new process
if self.container.has_capability(self.container.CCAP.GOVERNANCE_CONTROLLER):
self.container.governance_controller.update_process_policies(
process_instance, safe_mode=True, force_update=False)
return process_instance.id
except IonProcessError:
errcause = process_instance.errcause if process_instance else "instantiating process"
log.exception("Error spawning %s %s process (process_id: %s): %s", name, process_type, process_id, errcause)
return None
except Exception:
errcause = process_instance.errcause if process_instance else "instantiating process"
log.exception("Error spawning %s %s process (process_id: %s): %s", name, process_type, process_id, errcause)
# trigger failed notification - catches problems in init/start
self._call_proc_state_changed(process_instance, ProcessStateEnum.FAILED)
raise
def _create_process_config(self, config):
""" Prepare the config for the new process. Clone system config and apply process overrides.
Support including config by reference of a resource attribute or object from object store.
"""
process_cfg = deepcopy(CFG)
if config:
# Use provided config. Must be dict or DotDict
if not isinstance(config, DotDict):
config = DotDict(config)
if config.get_safe("process.config_ref"):
# Use a reference
config_ref = config.get_safe("process.config_ref")
log.info("Enhancing new process spawn config from ref=%s" % config_ref)
matches = re.match(r'^([A-Za-z]+):([A-Za-z0-9_\.]+)/(.*)$', config_ref)
if matches:
ref_type, ref_id, ref_ext = matches.groups()
if ref_type == "resources":
if self.container.has_capability(self.container.CCAP.RESOURCE_REGISTRY):
try:
obj = self.container.resource_registry.read(ref_id)
if obj and hasattr(obj, ref_ext):
ref_config = getattr(obj, ref_ext)
if isinstance(ref_config, dict):
dict_merge(process_cfg, ref_config, inplace=True)
else:
raise BadRequest("config_ref %s exists but not dict" % config_ref)
else:
raise BadRequest("config_ref %s - attribute not found" % config_ref)
except NotFound as nf:
log.warn("config_ref %s - object not found" % config_ref)
raise
else:
log.error("Container missing RESOURCE_REGISTRY capability to resolve process config ref %s" % config_ref)
elif ref_type == "objects":
if self.container.has_capability(self.container.CCAP.OBJECT_STORE):
try:
obj = self.container.object_store.read_doc(ref_id)
ref_config = obj
if ref_ext:
ref_config = get_safe(obj, ref_ext, None)
if ref_config is None:
raise BadRequest("config_ref %s - attribute not found" % config_ref)
if isinstance(ref_config, dict):
| |
import logging
from typing import Optional, Dict, Any
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import transaction, IntegrityError
from django.db.models import QuerySet
from django.utils import timezone
from baserow.core.exceptions import (
ApplicationNotInGroup,
GroupDoesNotExist,
ApplicationDoesNotExist,
TrashItemDoesNotExist,
)
from baserow.core.models import TrashEntry, Application, Group
from baserow.core.trash.exceptions import (
CannotRestoreChildBeforeParent,
ParentIdMustBeProvidedException,
ParentIdMustNotBeProvidedException,
CannotDeleteAlreadyDeletedItem,
)
from baserow.core.trash.registries import TrashableItemType, trash_item_type_registry
from baserow.core.trash.signals import permanently_deleted
logger = logging.getLogger(__name__)
User = get_user_model()
class TrashHandler:
@staticmethod
def trash(
requesting_user: User,
group: Group,
application: Optional[Application],
trash_item,
parent_id=None,
) -> TrashEntry:
"""
Marks the provided trashable item as trashed meaning it will no longer be
visible or usable in Baserow. However any user with access to its group can
restore the item after it is trashed to make it visible and usable again. After
a configurable timeout period or when the a user explicitly empties the
trash trashed items will be permanently deleted.
:param parent_id: The id of the parent object if known
:param requesting_user: The user who is requesting that this item be trashed.
:param group: The group the trashed item is in.
:param application: If the item is in an application the application.
:param trash_item: The item to be trashed.
:return: A newly created entry in the TrashEntry table for this item.
"""
# Check if the parent has a trash entry, if so link this new entry to it
# via a cascading on delete FK to ensure if the parent entry is deleted then
# this one is also deleted. We do this as say if a table is perm deleted,
# we don't then want to
with transaction.atomic():
trash_item_type = trash_item_type_registry.get_by_model(trash_item)
_check_parent_id_valid(parent_id, trash_item_type)
items_to_trash = trash_item_type.get_items_to_trash(trash_item)
for item in items_to_trash:
item.trashed = True
item.save()
parent = trash_item_type.get_parent(trash_item, parent_id)
if parent is not None:
parent_type = trash_item_type_registry.get_by_model(parent)
parent_name = parent_type.get_name(parent)
else:
parent_name = None
try:
return TrashEntry.objects.create(
user_who_trashed=requesting_user,
group=group,
application=application,
trash_item_type=trash_item_type.type,
trash_item_id=trash_item.id,
name=trash_item_type.get_name(trash_item),
parent_name=parent_name,
parent_trash_item_id=parent_id,
# If we ever introduce the ability to trash many rows at once this
# call will generate a model per row currently, instead a model
# cache should be added so generated models can be shared.
extra_description=trash_item_type.get_extra_description(
trash_item, parent
),
)
except IntegrityError as e:
if "unique constraint" in e.args[0]:
raise CannotDeleteAlreadyDeletedItem()
else:
raise e
@staticmethod
def restore_item(user, trash_item_type, trash_item_id, parent_trash_item_id=None):
"""
Restores an item from the trash re-instating it back in Baserow exactly how it
was before it was trashed.
:param user: The user requesting to restore trashed item.
:param trash_item_type: The trashable item type of the item to restore.
:param parent_trash_item_id: The parent id of the item to restore.
:param trash_item_id: The trash item id of the item to restore.
:raises CannotRestoreChildBeforeParent: Raised if the item being restored has
any parent, or parent of a parent etc which is trashed as that item should
be restored first.
"""
with transaction.atomic():
trashable_item_type = trash_item_type_registry.get(trash_item_type)
_check_parent_id_valid(parent_trash_item_id, trashable_item_type)
trash_entry = _get_trash_entry(
user, trash_item_type, parent_trash_item_id, trash_item_id
)
trash_item = trashable_item_type.lookup_trashed_item(trash_entry, {})
items_to_restore = trashable_item_type.get_items_to_trash(trash_item)
if TrashHandler.item_has_a_trashed_parent(
trash_item,
parent_id=trash_entry.parent_trash_item_id,
):
raise CannotRestoreChildBeforeParent()
trash_entry.delete()
# Restore everything in the database first before we run any restored
# hooks otherwise signals etc might try to be sent when dependent items are
# still trashed in the database.
for item in items_to_restore:
item.trashed = False
item.save()
for item in items_to_restore:
restore_type = trash_item_type_registry.get_by_model(item)
restore_type.trashed_item_restored(item, trash_entry)
@staticmethod
def get_trash_structure(user: User) -> Dict[str, Any]:
"""
Returns the structure of the trash available to the user. This consists of the
groups and their applications the user has access to. Each group and application
indicates whether it itself has been trashed.
:param user: The user to return the trash structure for.
:return: An ordered list of groups and their applications which could possibly
have trash contents.
"""
structure = {"groups": []}
groups = _get_groups_excluding_perm_deleted(user)
for group in groups:
applications = _get_applications_excluding_perm_deleted(group)
structure["groups"].append(
{
"id": group.id,
"trashed": group.trashed,
"name": group.name,
"applications": applications,
}
)
return structure
@staticmethod
def mark_old_trash_for_permanent_deletion():
"""
Updates all trash entries which are older than a django setting for permanent
deletion. Does not perform the deletion itself.
"""
now = timezone.now()
hours = settings.HOURS_UNTIL_TRASH_PERMANENTLY_DELETED
cutoff = now - timezone.timedelta(hours=hours)
updated_count = TrashEntry.objects.filter(trashed_at__lte=cutoff).update(
should_be_permanently_deleted=True
)
logger.info(
f"Successfully marked {updated_count} old trash items for deletion as they "
f"were older than {hours} hours."
)
@staticmethod
def empty(requesting_user: User, group_id: int, application_id: Optional[int]):
"""
Marks all items in the selected group (or application in the group if
application_id is provided) as should be permanently deleted.
"""
with transaction.atomic():
trash_contents = TrashHandler.get_trash_contents(
requesting_user, group_id, application_id
)
trash_contents.update(should_be_permanently_deleted=True)
@staticmethod
def permanently_delete_marked_trash():
"""
Looks up every trash item marked for permanent deletion and removes them
irreversibly from the database along with their corresponding trash entries.
"""
trash_item_lookup_cache = {}
deleted_count = 0
while True:
with transaction.atomic():
# Perm deleting a group or application can cause cascading deletion of
# other trash entries hence we only look up one a time. If we instead
# looped over a single queryset lookup of all TrashEntries then we could
# end up trying to delete TrashEntries which have already been deleted
# by a previous cascading delete of a group or application.
trash_entry = TrashEntry.objects.filter(
should_be_permanently_deleted=True
).first()
if not trash_entry:
break
trash_item_type = trash_item_type_registry.get(
trash_entry.trash_item_type
)
try:
to_delete = trash_item_type.lookup_trashed_item(
trash_entry, trash_item_lookup_cache
)
TrashHandler._permanently_delete_and_signal(
trash_item_type,
to_delete,
trash_entry.parent_trash_item_id,
trash_item_lookup_cache,
)
except TrashItemDoesNotExist:
# When a parent item is deleted it should also delete all of it's
# children. Hence we expect that many of these TrashEntries to no
# longer point to an existing item. In such a situation we just want
# to delete the entry as the item itself has been correctly deleted.
pass
trash_entry.delete()
deleted_count += 1
logger.info(
f"Successfully deleted {deleted_count} trash entries and their associated "
"trashed items."
)
@staticmethod
def _permanently_delete_and_signal(
trash_item_type: Any,
to_delete: Any,
parent_id: Optional[int],
trash_item_lookup_cache: Optional[Dict[str, Any]] = None,
):
"""
Internal method which actually permanently deletes the provided to_delete object
and also triggers the correct signal so plugins can do appropriate clean-up.
:param trash_item_type: The trashable item type of the item being deleted.
:param to_delete: The actual instance of the thing to delete.
:param parent_id: If required for the trashable item type then the id of the
parent of to_delete.
:param trash_item_lookup_cache: An optional dictionary used for caching during
many different invocations of permanently_delete.
"""
_check_parent_id_valid(parent_id, trash_item_type)
trash_item_id = to_delete.id
trash_item_type.permanently_delete_item(
to_delete,
trash_item_lookup_cache,
)
permanently_deleted.send(
sender=trash_item_type.type,
trash_item_id=trash_item_id,
trash_item=to_delete,
parent_id=parent_id,
)
@staticmethod
def permanently_delete(trashable_item, parent_id=None):
"""
Actually removes the provided trashable item from the database irreversibly.
:param trashable_item: An instance of a TrashableItemType model_class to delete.
:param parent_id: If required to look-up the item to delete or related items
this should be set to the parent id of the item to delete.
"""
trash_item_type = trash_item_type_registry.get_by_model(trashable_item)
TrashHandler._permanently_delete_and_signal(
trash_item_type, trashable_item, parent_id
)
@staticmethod
def get_trash_contents(
user: User, group_id: int, application_id: Optional[int]
) -> QuerySet:
"""
Looks up the trash contents for a particular group optionally filtered by
the provided application id.
:param user: The user who is requesting to see the trash contents.
:param group_id: The group to lookup trash contents inside of.
:param application_id: The optional application to filter down the trash
contents to only this group.
:raises GroupDoesNotExist: If the group_id is for an non
existent group.
:raises ApplicationDoesNotExist: If the application_id is for an non
existent application.
:raises ApplicationNotInGroup: If the application_id is for an application
not in the requested group.
:raises UserNotInGroup: If the user does not belong to the group.
:return: a queryset of the trash items in the group optionally filtered by
the provided application.
"""
group = _get_group(group_id, user)
application = _get_application(application_id, group, user)
trash_contents = TrashEntry.objects.filter(
group=group, should_be_permanently_deleted=False
)
if application:
trash_contents = trash_contents.filter(application=application)
return trash_contents.order_by("-trashed_at")
@staticmethod
def item_has_a_trashed_parent(item, parent_id=None, check_item_also=False):
"""
Given an instance of a model which is trashable (item) checks if it has a parent
which is trashed. Returns True if it's parent, or parent's parent (and so on)
is trashed, False if | |
<reponame>McZazz/PythonPrettyPrint
import re
# read in .txt and strip each line
with open('prettyPY_input.txt', 'r') as f:
linesInList = f.readlines()
# basic preparations after reading text
linesInList = [_.rstrip() for _ in linesInList]
newlinesinlist = ' \n'.join(linesInList)
splitList = list(newlinesinlist)
# list where all the operation ranges are places
areaList = []
# splitList will stay unedited with our individual chars
# areaList starts with empty values of '---'
for _ in range(len(splitList)):
areaList.append('---')
lasti = len(splitList) - 1
# label # comment areas (this one must come first!!!)
flag = False
for i, _ in enumerate(splitList):
if areaList[i] == '---':
openn = True
else:
openn = False
if openn and flag == False and _ == '#':
flag = True
elif openn and flag and _ == '\n':
areaList[i] = 'inHashComm'
flag = False
if openn and flag:
areaList[i] = 'inHashComm'
# label """ comment areas
flag = False
for i, _ in enumerate(splitList):
if areaList[i] == '---':
openn = True
else:
openn = False
if openn and i+2 <= lasti and flag == False and _ == '"' and splitList[i+1] == '"' and splitList[i+2] == '"':
flag = True
elif openn and i+2 <= lasti and flag and _ == '"' and splitList[i+1] == '"' and splitList[i+2] == '"':
areaList[i] = 'inDbleComm'
areaList[i+1] = 'inDbleComm'
areaList[i+2] = 'inDbleComm'
flag = False
if openn and flag:
areaList[i] = 'inDbleComm'
# label ''' comment areas
flag = False
for i, _ in enumerate(splitList):
if areaList[i] == '---':
openn = True
else:
openn = False
if openn and i+2 <= lasti and flag == False and _ == "'" and splitList[i+1] == "'" and splitList[i+2] == "'":
flag = True
elif openn and i+2 <= lasti and flag and _ == "'" and splitList[i+1] == "'" and splitList[i+2] == "'":
areaList[i] = "inSnglComm"
areaList[i+1] = "inSnglComm"
areaList[i+2] = "inSnglComm"
flag = False
if openn and flag:
areaList[i] = "inSnglComm"
# label " string areas
# only works if it comes after the comment areas!!!
flag1 = False
flag2 = False
for i, _ in enumerate(splitList):
if areaList[i] == '---':
openn = True
else:
openn = False
# doing double quote strings
if openn and flag1 == False and flag2 == False and _ == '"':
flag1 = True
elif openn and i+1 <= lasti and flag1 and\
(_ == '"' or _ == '\"' or areaList[i+1] == "inSnglComm" or areaList[i+1] == 'inDbleComm' or areaList[i+1] == "inHashComm"):
areaList[i] = 'inDoubleStr'
flag1 = False
if openn and flag1:
areaList[i] = 'inDoubleStr'
# doing single quote strings
if openn and flag2 == False and flag1 == False and _ == "'":
flag2 = True
elif openn and i+1 <= lasti and flag2 and\
(_ == "'" or _ == "\'" or areaList[i+1] == "inSnglComm" or areaList[i+1] == 'inDbleComm' or areaList[i+1] == "inHashComm"):
areaList[i] = "inSingleStr"
flag2 = False
if openn and flag2:
areaList[i] = "inSingleStr"
# labeling python reserved words
flag = False
befores = "[^a-zA-Z]" # anything but letters
afters = "[^a-zA-Z0-9]" # anything but letters and nums
for i, _ in enumerate(splitList):
if areaList[i] == '---':
openn = True
else:
openn = False
# 2 char words
if openn and i>0 and i+2 <= lasti and\
((re.search(befores,splitList[i-1]) and _ == "a" and splitList[i+1] == "s" and re.search(afters,splitList[i+2]))\
or(re.search(befores,splitList[i-1]) and _ == "i" and splitList[i+1] == "f" and re.search(afters,splitList[i+2]))\
or(re.search(befores,splitList[i-1]) and _ == "i" and splitList[i+1] == "n" and re.search(afters,splitList[i+2]))\
or(re.search(befores,splitList[i-1]) and _ == "i" and splitList[i+1] == "s" and re.search(afters,splitList[i+2]))\
or(re.search(befores,splitList[i-1]) and _ == "o" and splitList[i+1] == "r" and re.search(afters,splitList[i+2]))):
areaList[i-1] = 'pyWord'
areaList[i] = 'pyWord'
areaList[i+1] = 'pyWord'
areaList[i+2] = 'pyWord'
if openn and i==0 and i+2 <= lasti and\
((_ == "a" and splitList[i+1] == "s" and re.search(afters,splitList[i+2]))\
or(_ == "i" and splitList[i+1] == "f" and re.search(afters,splitList[i+2]))\
or(_ == "i" and splitList[i+1] == "n" and re.search(afters,splitList[i+2]))\
or(_ == "i" and splitList[i+1] == "s" and re.search(afters,splitList[i+2]))\
or(_ == "o" and splitList[i+1] == "r" and re.search(afters,splitList[i+2]))):
areaList[i] = 'pyWord'
areaList[i+1] = 'pyWord'
areaList[i+2] = 'pyWord'
# as
# if
# in
# is
# or
elif openn and i>0 and i+3 <= lasti and\
((re.search(befores,splitList[i-1]) and _ == "a" and splitList[i+1] == "n" and splitList[i+2] == "d" and re.search(afters,splitList[i+3]))\
or(re.search(befores,splitList[i-1]) and _ == "t" and splitList[i+1] == "r" and splitList[i+2] == "y" and re.search(afters,splitList[i+3]))\
or(re.search(befores,splitList[i-1]) and _ == "d" and splitList[i+1] == "e" and splitList[i+2] == "f" and re.search(afters,splitList[i+3]))\
or(re.search(befores,splitList[i-1]) and _ == "d" and splitList[i+1] == "e" and splitList[i+2] == "l" and re.search(afters,splitList[i+3]))\
or(re.search(befores,splitList[i-1]) and _ == "f" and splitList[i+1] == "o" and splitList[i+2] == "r" and re.search(afters,splitList[i+3]))\
or(re.search(befores,splitList[i-1]) and _ == "n" and splitList[i+1] == "o" and splitList[i+2] == "t" and re.search(afters,splitList[i+3]))):
areaList[i-1] = 'pyWord'
areaList[i] = 'pyWord'
areaList[i+1] = 'pyWord'
areaList[i+2] = 'pyWord'
areaList[i+3] = 'pyWord'
elif openn and i==0 and i+3 <= lasti and\
((_ == "a" and splitList[i+1] == "n" and splitList[i+2] == "d" and re.search(afters,splitList[i+3]))\
or(_ == "t" and splitList[i+1] == "r" and splitList[i+2] == "y" and re.search(afters,splitList[i+3]))\
or(_ == "d" and splitList[i+1] == "e" and splitList[i+2] == "f" and re.search(afters,splitList[i+3]))\
or(_ == "d" and splitList[i+1] == "e" and splitList[i+2] == "l" and re.search(afters,splitList[i+3]))\
or(_ == "f" and splitList[i+1] == "o" and splitList[i+2] == "r" and re.search(afters,splitList[i+3]))\
or(_ == "n" and splitList[i+1] == "o" and splitList[i+2] == "t" and re.search(afters,splitList[i+3]))):
areaList[i] = 'pyWord'
areaList[i+1] = 'pyWord'
areaList[i+2] = 'pyWord'
areaList[i+3] = 'pyWord'
# and
# try
# def
# del
# for
# not
elif openn and i>0 and i+4 <= lasti and\
((re.search(befores,splitList[i-1]) and _ == "e" and splitList[i+1] == "l" and splitList[i+2] == "i" and splitList[i+3] == "f" and re.search(afters,splitList[i+4]))\
or(re.search(befores,splitList[i-1]) and _ == "e" and splitList[i+1] == "l" and splitList[i+2] == "s" and splitList[i+3] == "e" and re.search(afters,splitList[i+4]))\
or(re.search(befores,splitList[i-1]) and _ == "f" and splitList[i+1] == "r" and splitList[i+2] == "o" and splitList[i+3] == "m" and re.search(afters,splitList[i+4]))\
or(re.search(befores,splitList[i-1]) and _ == "N" and splitList[i+1] == "o" and splitList[i+2] == "n" and splitList[i+3] == "e" and re.search(afters,splitList[i+4]))\
or(re.search(befores,splitList[i-1]) and _ == "p" and splitList[i+1] == "a" and splitList[i+2] == "s" and splitList[i+3] == "s" and re.search(afters,splitList[i+4]))\
or(re.search(befores,splitList[i-1]) and _ == "T" and splitList[i+1] == "r" and splitList[i+2] == "u" and splitList[i+3] == "e" and re.search(afters,splitList[i+4]))\
or(re.search(befores,splitList[i-1]) and _ == "w" and splitList[i+1] == "i" and splitList[i+2] == "t" and splitList[i+3] == "h" and re.search(afters,splitList[i+4]))):
areaList[i-1] = 'pyWord'
areaList[i] = 'pyWord'
areaList[i+1] = 'pyWord'
areaList[i+2] = 'pyWord'
areaList[i+3] = 'pyWord'
areaList[i+4] = 'pyWord'
elif openn and i==0 and i+4 <= lasti and\
((_ == "e" and splitList[i+1] == "l" and splitList[i+2] == "i" and splitList[i+3] == "f" and re.search(afters,splitList[i+4]))\
or(_ == "e" and splitList[i+1] == "l" and splitList[i+2] == "s" and splitList[i+3] == "e" and re.search(afters,splitList[i+4]))\
or(_ == "f" and splitList[i+1] == "r" and splitList[i+2] == "o" and splitList[i+3] == "m" and re.search(afters,splitList[i+4]))\
or(_ == "N" and splitList[i+1] == "o" and splitList[i+2] == "n" and splitList[i+3] == "e" and re.search(afters,splitList[i+4]))\
or(_ == "p" and splitList[i+1] == "a" and splitList[i+2] == "s" and splitList[i+3] == "s" and re.search(afters,splitList[i+4]))\
or(_ == "T" and splitList[i+1] == "r" and splitList[i+2] == "u" and splitList[i+3] == "e" and re.search(afters,splitList[i+4]))\
or(_ == "w" and splitList[i+1] == "i" and splitList[i+2] == "t" and splitList[i+3] == "h" and re.search(afters,splitList[i+4]))):
areaList[i] = 'pyWord'
areaList[i+1] = 'pyWord'
areaList[i+2] = 'pyWord'
areaList[i+3] = 'pyWord'
areaList[i+4] = 'pyWord'
# elif
# else
# from
# None
# pass
# True
# with
elif openn and i>0 and i+5 <= lasti and\
((re.search(befores,splitList[i-1]) and _ == "r" and splitList[i+1] == "a" and splitList[i+2] == "i" and splitList[i+3] == "s" and splitList[i+4] == "e" and re.search(afters,splitList[i+5]))\
or(re.search(befores,splitList[i-1]) and _ == "F" and splitList[i+1] == "a" and splitList[i+2] == "l" and splitList[i+3] == "s" and splitList[i+4] == "e" and re.search(afters,splitList[i+5]))\
or(re.search(befores,splitList[i-1]) and _ == "b" and splitList[i+1] == "r" and splitList[i+2] == "e" and splitList[i+3] == "a" and splitList[i+4] == "k" and re.search(afters,splitList[i+5]))\
or(re.search(befores,splitList[i-1]) and _ == "c" and splitList[i+1] == "l" and splitList[i+2] == "a" and splitList[i+3] == "s" and splitList[i+4] | |
return f"async hello: {hello} {world}"
>>> get_async_type(async_func)
'coro func'
>>> get_async_type(async_func(5))
'coro'
>>> get_async_type(sync_func)
'sync func'
>>> get_async_type(sync_func(10))
'unknown'
:param Any obj: Object to check for async type
:return str async_type: Either ``'coro func'``, ``'coro'``, ``'awaitable'``, ``'sync func'`` or ``'unknown'``
"""
if _is_coro(obj):
if asyncio.iscoroutinefunction(obj): return "coro func"
if asyncio.iscoroutine(obj): return "coro"
if isinstance(obj, Awaitable): return "awaitable"
if callable(obj): return "sync func"
return "unknown"
AWAITABLE_BLACKLIST_FUNCS: List[str] = []
"""
A list of plain function names - for which :func:`.awaitable` decorated function/methods should always
run synchronously.
"""
AWAITABLE_BLACKLIST_MODS: List[str] = []
"""
A list of fully qualified module names - for which :func:`.awaitable` decorated function/methods should always
run synchronously.
"""
AWAITABLE_BLACKLIST: List[str] = []
"""
A list of fully qualified module paths to functions/methods, if any of these functions/methods call an :func:`.awaitable`
decorated function/method, then the awaitable will be ran synchronously regardless of whether there's an active
AsyncIO context or not.
"""
def is_async_context() -> bool:
"""Returns ``True`` if currently in an async context, otherwise ``False``"""
try:
import sniffio
except ImportError as e:
raise ImportError(f"is_async_context / @awaitable unavailable - 'sniffio' not installed. Exc: {type(e)} {str(e)} ")
try:
# Detect if we're in async context
sniffio.current_async_library()
return True
except sniffio.AsyncLibraryNotFoundError:
return False
def _awaitable_blacklisted(skip=3) -> bool:
"""
Returns ``True`` if the caller of the function calling ``_awaitable_blacklisted`` is present in the awaitable
blacklists such as :attr:`.AWAITABLE_BLACKLIST` - otherwise ``False`` if they're not blacklisted.
:param int skip: Scan the caller function this far up the stack
(2 = callee of _awaitable_blacklisted, 3 = callee of callee #2, 4 = callee of #3, 5 = callee of #4 etc.)
:return bool is_blacklisted: ``True`` if the calling method/module/function is blacklisted, otherwise ``False``.
"""
try:
from privex.helpers.black_magic import calling_module, calling_function, caller_name
# Exact module + function/method path match
if caller_name(skip=skip) in AWAITABLE_BLACKLIST:
return True
# Plain function name match
if calling_function(skip=skip) in AWAITABLE_BLACKLIST_FUNCS:
return True
_mod = calling_module(skip=skip)
# Exact module path match
if _mod in AWAITABLE_BLACKLIST_MODS:
return True
# Sub-modules path match (e.g. if hello.world is blacklisted, then hello.world.example is also blacklisted)
for _m in AWAITABLE_BLACKLIST_MODS:
if _mod.startswith(_m + '.'):
return True
except Exception:
log.exception("Failed to check blacklist for awaitable function. Falling back to standard async sniffing.")
return False
class AwaitableMixin:
def __getattribute__(self, item):
a = object.__getattribute__(self, item)
if not _is_coro(a): return a
def _wrp(*args, **kwargs):
return a if is_async_context() and not _awaitable_blacklisted() else loop_run(a, *args, **kwargs)
return _wrp
def awaitable_class(cls: Type[T]) -> Type[T]:
"""
Wraps a class, allowing all async methods to be used in non-async code as if they were normal synchronous methods.
**Example Usage**
Simply decorate your class with ``@awaitable_class`` (no brackets! takes no arguments), and once you create an instance of your
class, all of your async methods can be used by synchronous code as-if they were plain functions::
>>> from privex.helpers import awaitable_class
>>>
>>> @awaitable_class
>>> class ExampleAsyncCls:
>>> async def example_async(self):
>>> return "hello async world"
>>>
>>> def example_sync(self):
>>> return "hello non-async world"
>>>
NOTE - You can also wrap a class without using a decorator - just pass the class as the first argument like so::
>>> class _OtherExample:
... async def hello(self):
... return 'world'
>>> OtherExample = awaitable_class(_OtherExample)
If we call ``.example_async()`` on the above class from a synchronous REPL, it will return ``'hello async world'`` as if it were a
normal synchronous method. We can also call the non-async ``.example_sync()`` which works like normal::
>>> k = ExampleAsyncCls()
>>> k.example_async()
'hello async world'
>>> k.example_sync()
'hello non-async world'
However, inside of an async context (e.g. an async function), ``awaitable_class`` will be returning coroutines, so you should
``await`` the methods, as you would expect when dealing with an async function::
>>> async def test_async():
>>> exmp = ExampleAsyncCls()
>>> return await exmp.example_async()
>>>
>>> await test_async()
'hello async world'
:param type cls: The class to wrap
:return type wrapped_class: The class after being wrapped
"""
cls: Type[object]
class _AwaitableClass(cls):
__AWAITABLE_CLS = True
"""
This sub-class is modified to appear as if it were the original class being sub-classed, unfortunately this means
it would be difficult to check whether or not a class has been wrapped with _AwaitableClass.
To allow you to check whether or not a class has been sub-classed by _AwaitableClass, this class private attribute
is present on the returned class and any instances of it.
"""
def __getattribute__(self, item):
a: Union[Coroutine, callable, Any] = super().__getattribute__(item)
cls_name = super().__getattribute__('__class__').__name__
# full_attr = f"{self.__class__.__name__}.{item}"
full_attr = f"{cls_name}.{item}"
if not _is_coro(a):
log.debug("Attribute %s is not a coroutine or coro function. Returning normally.", full_attr)
return a
# return awaitable(a) if inspect.iscoroutinefunction(a) else a
def _wrp(*args, **kwargs):
if is_async_context() and not _awaitable_blacklisted():
log.debug("Currently in async context. Returning %s as coroutine", full_attr)
return a(*args, **kwargs)
log.debug("Not in async context or attribute is blacklisted. Returning %s as coroutine", full_attr)
return loop_run(a, *args, **kwargs)
return _wrp
_AwaitableClass.__name__ = cls.__name__
_AwaitableClass.__qualname__ = cls.__qualname__
_AwaitableClass.__module__ = cls.__module__
return _AwaitableClass
def awaitable(func: Callable) -> Callable:
"""
Decorator which helps with creation of async wrapper functions.
**Usage**
Define your async function as normal, then create a standard python function using this decorator - the function
should just call your async function and return it.
>>> async def some_func_async(a: str, b: str):
... c = a + b
... return c
...
>>> @awaitable
>>> def some_func(a, b) -> Union[str, Coroutine[Any, Any, str]]:
... return some_func_async(a, b)
...
Now, inside of async functions, we just ``await`` the wrapper function as if it were the original async function.
>>> async def my_async_func():
... res = await some_func("hello", "world")
...
While inside of synchronous functions, we call the wrapper function as if it were a normal synchronous function.
The decorator will create an asyncio event loop, run the function, then return the result - transparent to the
calling function.
>>> def my_sync_func():
... res = some_func("hello world")
...
**Blacklists**
If you mix a lot of synchronous and asynchronous code, :mod:`sniffio` may return coroutines to synchronous functions
that were called from asynchronous functions, which can of course cause problems.
To avoid this issue, you can blacklist function names, module names (and their sub-modules), and/or fully qualified
module paths to functions/methods.
Three blacklists are available in this module, which allow you to specify caller functions/methods, modules, or
fully qualified module paths to functions/methods for which :func:`.awaitable` wrapped functions/methods
should **always** execute in an event loop and return synchronously.
Example::
>>> from privex.helpers import asyncx
>>> # All code within the module 'some.module' and it's sub-modules will always have awaitable's run their wrapped
>>> # functions synchronously.
>>> asyncx.AWAITABLE_BLACKLIST_MODS += ['some.module']
>>> # Whenever a function with the name 'example_func' (in any module) calls an awaitable, it will always run synchronously
>>> asyncx.AWAITABLE_BLACKLIST_FUNCS += ['example_func']
>>> # Whenever the specific class method 'other.module.SomeClass.some_sync' calls an awaitable, it will always run synchronously.
>>> asyncx.AWAITABLE_BLACKLIST += ['other.module.SomeClass.some_sync']
Original source: https://github.com/encode/httpx/issues/572#issuecomment-562179966
"""
def wrapper(*args: Any, **kwargs: Any) -> Union[Any, Coroutine[Any, Any, Any]]:
coroutine = func(*args, **kwargs)
# The wrapped function isn't a coroutine function, nor a coroutine. This may be caused by an adapter
# wrapper class which deals with both synchronous and asynchronous adapters.
# Since it doesn't appear to be a coroutine, just return the result.
if not _is_coro(coroutine):
return coroutine
# Always run the coroutine in an event loop if the caller function is blacklisted in the AWAITABLE_BLACKLIST* lists
if _awaitable_blacklisted():
return asyncio.get_event_loop().run_until_complete(coroutine)
if is_async_context(): # We're in async context, return the coroutine for await usage
return coroutine
loop = asyncio.get_event_loop() # Not in async context, run coroutine in event loop.
return loop.run_until_complete(coroutine)
return wrapper
# noinspection All
class aobject(object):
"""
Inheriting this class allows you to define an async __init__.
To | |
# coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Eval utils."""
# pylint: disable=g-bare-generic
# pylint: disable=g-doc-args
# pylint: disable=g-doc-return-or-yield
# pylint: disable=g-importing-member
# pylint: disable=g-no-space-after-docstring-summary
# pylint: disable=g-short-docstring-punctuation
# pylint: disable=logging-format-interpolation
# pylint: disable=logging-fstring-interpolation
# pylint: disable=missing-function-docstring
import collections
import time
from typing import Any, Dict
from absl import logging
import numpy as np
import robustness_metrics as rm
from sklearn.metrics import accuracy_score
from sklearn.metrics import auc
from sklearn.metrics import log_loss
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import check_array
from sklearn.utils import check_consistent_length
import tensorflow as tf
from . import metric_utils # local file import
from . import results_storage_utils # local file import
@tf.function
def eval_step_tf(dataset_iterator, dataset_steps, strategy, estimator,
estimator_args, uncertainty_estimator_fn, is_deterministic):
"""Eval step.
Run TensorFlow model evaluation, using an `uncertainty_estimator_fn`
to produce predictions and decomposed uncertainty estimates for each example.
Args:
dataset_iterator: tf.data.Dataset, dataset on which we will evaluate the
model.
dataset_steps: int, number of gradient steps in the dataset.
strategy: tf.distribute strategy, used to distribute datasets.
estimator: model wrapped to produce a `tf.Tensor`, predictive mean, with
shape [B].
estimator_args: Dict, extra args for the `uncertainty_estimator_fn`, such as
the number of MC samples, `num_samples`.
uncertainty_estimator_fn: Callable, method to produce predictive means along
with various metrics of uncertainty, e.g., predictive_entropy, epistemic
uncertainty (mutual information).
is_deterministic: bool, is the model a single deterministic network. In this
case, we cannot capture epistemic uncertainty.
Returns:
Dict, contains `tf.Tensor` predictions, ground truth,
and uncertainty estimates.
"""
print('Tracing in `eval_utils.eval_step_tf`.')
def step_fn(inputs):
print('Tracing in `eval_utils.eval_step_tf.step_fn`.')
images = inputs['features']
labels = inputs['labels']
# Compute prediction, total, aleatoric, and epistemic uncertainty estimates
pred_and_uncert = uncertainty_estimator_fn(
images, estimator, training_setting=False, **estimator_args)
# Return a tuple
y_true = labels
y_pred = pred_and_uncert['prediction']
y_pred_entropy = pred_and_uncert['predictive_entropy']
if not is_deterministic:
y_pred_variance = pred_and_uncert['predictive_variance']
y_aleatoric_uncert = pred_and_uncert['aleatoric_uncertainty']
y_epistemic_uncert = pred_and_uncert['epistemic_uncertainty']
else:
y_pred_variance = tf.zeros(0)
y_aleatoric_uncert = tf.zeros(0)
y_epistemic_uncert = tf.zeros(0)
return (y_true, y_pred, y_pred_entropy, y_pred_variance, y_aleatoric_uncert,
y_epistemic_uncert)
# Containers for storage of
# predictions, ground truth, uncertainty estimates
# Construct tf.TensorArrays to store model results
n_per_core_batches = dataset_steps * strategy.num_replicas_in_sync
y_true = tf.TensorArray(tf.int32, size=n_per_core_batches)
y_pred = tf.TensorArray(tf.float32, size=n_per_core_batches)
y_pred_entropy = tf.TensorArray(tf.float32, size=n_per_core_batches)
y_pred_variance = tf.TensorArray(tf.float32, size=n_per_core_batches)
y_aleatoric_uncert = tf.TensorArray(tf.float32, size=n_per_core_batches)
y_epistemic_uncert = tf.TensorArray(tf.float32, size=n_per_core_batches)
for i in tf.range(dataset_steps):
result = strategy.run(step_fn, args=(next(dataset_iterator),))
# Parse results tuple
(y_true_, y_pred_, y_pred_entropy_, y_pred_variance_, y_aleatoric_uncert_,
y_epistemic_uncert_) = result
# Convert from Per-Replica object to tuple
if strategy.num_replicas_in_sync > 1:
y_true_ = y_true_.values
y_pred_ = y_pred_.values
y_pred_entropy_ = y_pred_entropy_.values
if not is_deterministic:
y_pred_variance_ = y_pred_variance_.values
y_aleatoric_uncert_ = y_aleatoric_uncert_.values
y_epistemic_uncert_ = y_epistemic_uncert_.values
# Iterate through per-batch results
# This is written in a very un-Pythonic manner to have updates only
# rely on arguments successfully passed to TPU scope
for replica_id in tf.range(strategy.num_replicas_in_sync):
index = (strategy.num_replicas_in_sync * i) + replica_id
for batch_result in y_true_:
y_true = y_true.write(index, batch_result)
for batch_result in y_pred_:
y_pred = y_pred.write(index, batch_result)
for batch_result in y_pred_entropy_:
y_pred_entropy = y_pred_entropy.write(index, batch_result)
if not is_deterministic:
for batch_result in y_pred_variance_:
y_pred_variance = y_pred_variance.write(index, batch_result)
for batch_result in y_aleatoric_uncert_:
y_aleatoric_uncert = y_aleatoric_uncert.write(index, batch_result)
for batch_result in y_epistemic_uncert_:
y_epistemic_uncert = y_epistemic_uncert.write(index, batch_result)
else:
y_true = y_true.write(i, y_true_)
y_pred = y_pred.write(i, y_pred_)
y_pred_entropy = y_pred_entropy.write(i, y_pred_entropy_)
if not is_deterministic:
y_pred_variance = y_pred_variance.write(i, y_pred_variance_)
y_aleatoric_uncert = y_aleatoric_uncert.write(i, y_aleatoric_uncert_)
y_epistemic_uncert = y_epistemic_uncert.write(i, y_epistemic_uncert_)
results_arrs = {
'y_true': y_true.stack(),
'y_pred': y_pred.stack(),
'y_pred_entropy': y_pred_entropy.stack(),
}
if not is_deterministic:
results_arrs['y_pred_variance'] = y_pred_variance.stack()
results_arrs['y_aleatoric_uncert'] = y_aleatoric_uncert.stack()
results_arrs['y_epistemic_uncert'] = y_epistemic_uncert.stack()
return results_arrs
def evaluate_model_on_datasets(strategy,
datasets,
steps,
estimator,
estimator_args,
uncertainty_estimator_fn,
eval_batch_size,
call_dataset_iter,
is_deterministic=False,
backend='tf',
eval_step_jax=None,
verbose=False):
"""Eval on dataset.
Run model evaluation on all provided datasets, using an
`uncertainty_estimator_fn` to produce predictions and decomposed
uncertainty estimates for each example.
Additionally constructs joint dataset predictions, composed of predictions
on both in-domain and OOD datasets.
Args:
strategy: tf.distribute strategy, used to distribute datasets.
datasets: Dict[str, tf.data.Dataset], datasets on which we evaluate the
model.
steps: Dict[str, int], number of gradient steps in each dataset.
estimator: model wrapped to produce a `tf.Tensor` (if `backend`=='tf') or
`np.ndarray` (if `backend`=='jax'), predictive mean, with shape [B].
estimator_args: Dict, extra args for the `uncertainty_estimator_fn`, such as
the number of MC samples, `num_samples`.
uncertainty_estimator_fn: Callable, method to produce predictive means along
with various metrics of uncertainty, e.g., predictive_entropy, epistemic
uncertainty (mutual information).
eval_batch_size: int, size of evaluation minibatches.
call_dataset_iter: bool, if True, should call `iter()` on each dataset. May
not need if evaluation datasets have been repeated.
is_deterministic: bool, is the model a single deterministic network. In this
case, we cannot capture epistemic uncertainty.
backend: str, in {'tf', 'jax'}, specifies the evaluation method.
eval_step_jax: Callable, evaluation method used for Jax model.
verbose: bool, extra logging.
Returns:
Dict, for each dataset, contains `np.array` predictions, ground truth,
and uncertainty estimates.
"""
# Need to collect these so we can form joint datasets:
# e.g., joint_test = in_domain_test UNION ood_test
dataset_split_to_containers = {}
for dataset_split, dataset in datasets.items():
# Begin iteration for this dataset split
start_time = time.time()
if call_dataset_iter:
dataset_iterator = iter(dataset)
else:
dataset_iterator = dataset
logging.info(f'Creating iterator took {time.time() - start_time} seconds.')
dataset_steps = steps[dataset_split]
logging.info(f'Evaluating split {dataset_split}.')
if backend == 'jax':
eval_epoch_arrs = eval_step_jax(dataset_iterator, dataset_steps,
is_deterministic, **estimator_args)
elif backend == 'tf':
eval_epoch_arrs = eval_step_tf(dataset_iterator,
tf.convert_to_tensor(dataset_steps),
strategy, estimator, estimator_args,
uncertainty_estimator_fn, is_deterministic)
else:
raise NotImplementedError(f'Backend {backend} is not supported yet.')
# Update metadata
time_elapsed = time.time() - start_time
dataset_split_to_containers[dataset_split] = {}
dataset_split_dict = dataset_split_to_containers[dataset_split]
dataset_split_dict['total_ms_elapsed'] = time_elapsed * 1e6
dataset_split_dict['dataset_size'] = dataset_steps * eval_batch_size
# Use vectorized NumPy containers
for eval_key, eval_arr in eval_epoch_arrs.items():
tmp_eval_arr = eval_arr if backend == 'jax' else eval_arr.numpy()
if tmp_eval_arr.ndim > 1:
tmp_eval_arr = np.concatenate(tmp_eval_arr).flatten()
dataset_split_dict[eval_key] = tmp_eval_arr
if verbose:
print(f'Concatenated {eval_key} into shape '
f'{dataset_split_dict[eval_key].shape}')
dataset_split_dict['y_pred'] = dataset_split_dict['y_pred'].astype(
'float64')
# Add Joint Dicts
dataset_split_to_containers = results_storage_utils.add_joint_dicts(
dataset_split_to_containers, is_deterministic=is_deterministic)
return dataset_split_to_containers
def evaluate_model_and_compute_metrics(
strategy,
eval_datasets,
steps,
metrics,
eval_estimator,
uncertainty_estimator_fn,
eval_batch_size,
available_splits,
estimator_args,
call_dataset_iter,
is_deterministic=False,
num_bins=15,
use_tpu=True,
return_per_pred_results=False,
backend='tf',
eval_step_jax=None):
"""Main.
Main method for evaluation and computing metrics using TF or Jax
models. Usable for evaluation during tuning.
Args:
strategy: tf.distribute strategy, used to distribute datasets.
eval_datasets: Dict[str, tf.data.Dataset], datasets on which we evaluate the
model.
steps: Dict[str, int], number of gradient steps in each dataset.
metrics: metrics.
eval_estimator: model wrapped to produce a `tf.Tensor` (if `backend`=='tf')
or `np.ndarray` (if `backend`=='jax'), predictive mean, with shape [B].
uncertainty_estimator_fn: Callable, method to produce predictive means along
with various metrics of uncertainty, e.g., predictive_entropy, epistemic
uncertainty (mutual information).
eval_batch_size: int, size of evaluation minibatches.
available_splits: List[str], names of the evaluation datasets provided, used
to log results only for these splits.
estimator_args: Dict, extra args for the `uncertainty_estimator_fn`, such as
the number of MC samples, `num_samples`.
call_dataset_iter: bool, if True, should call `iter()` on each dataset. May
not need if evaluation datasets have been repeated.
is_deterministic: bool, is the model a single deterministic network. In this
case, we cannot capture epistemic uncertainty.
num_bins: int, number of bins to use with expected calibration error.
use_tpu: bool, currently exists a bug that disallows collecting ECE during
training with TPU, this is used to avoid logging that metric.
return_per_pred_results: bool,
backend: str, in {'tf', 'jax'}, specifies the evaluation method.
eval_step_jax: Callable, evaluation method used for Jax model.
Returns:
Union[Tuple[Dict, Dict], Dict]
If return_per_pred_results, return two Dicts. Else, return only the
second.
first Dict:
for each dataset, per-prediction results (e.g., each prediction,
ground-truth, loss, retention arrays).
second Dict:
for each dataset, contains `np.array` predictions, ground truth,
and uncertainty estimates.
"""
# Compute predictions on all evaluation datasets
# When we eval during training we don't need to re-iterate the
# evaluation datasets
eval_results = evaluate_model_on_datasets(
strategy=strategy,
datasets=eval_datasets,
steps=steps,
estimator=eval_estimator,
estimator_args=estimator_args,
uncertainty_estimator_fn=uncertainty_estimator_fn,
eval_batch_size=eval_batch_size,
call_dataset_iter=call_dataset_iter,
is_deterministic=is_deterministic,
backend=backend,
eval_step_jax=eval_step_jax)
# | |
import datetime
import re
import uuid
from functools import partial
from unittest.mock import ANY, call
import pytest
from bs4 import BeautifulSoup
from flask import url_for
from freezegun import freeze_time
from app.main.views.platform_admin import (
create_global_stats,
format_stats_by_service,
get_tech_failure_status_box_data,
is_over_threshold,
sum_service_usage,
)
from tests import service_json
from tests.conftest import SERVICE_ONE_ID, SERVICE_TWO_ID, normalize_spaces
@pytest.mark.parametrize('endpoint', [
'main.platform_admin',
'main.live_services',
'main.trial_services',
])
def test_should_redirect_if_not_logged_in(
client,
endpoint
):
response = client.get(url_for(endpoint))
assert response.status_code == 302
assert response.location == url_for('main.sign_in', next=url_for(endpoint), _external=True)
@pytest.mark.parametrize('endpoint', [
'main.platform_admin',
'main.platform_admin_splash_page',
'main.live_services',
'main.trial_services',
])
def test_should_403_if_not_platform_admin(
client_request,
endpoint,
):
client_request.get(endpoint, _expected_status=403)
@pytest.mark.parametrize('endpoint, expected_services_shown', [
('main.live_services', 1),
('main.trial_services', 1),
])
def test_should_render_platform_admin_page(
platform_admin_client,
mock_get_detailed_services,
endpoint,
expected_services_shown
):
response = platform_admin_client.get(url_for(endpoint))
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert [
normalize_spaces(column.text)
for column in page.select('tbody tr')[1].select('td')
] == [
'0 emails sent', '0 text messages sent', '0 letters sent',
]
mock_get_detailed_services.assert_called_once_with({'detailed': True,
'include_from_test_key': True,
'only_active': False})
@pytest.mark.parametrize('endpoint', [
'main.live_services',
'main.trial_services',
])
@pytest.mark.parametrize('partial_url_for, inc', [
(partial(url_for), True),
(partial(url_for, include_from_test_key='y', start_date='', end_date=''), True),
(partial(url_for, start_date='', end_date=''), False),
])
def test_live_trial_services_toggle_including_from_test_key(
partial_url_for,
platform_admin_client,
mock_get_detailed_services,
endpoint,
inc
):
response = platform_admin_client.get(partial_url_for(endpoint))
assert response.status_code == 200
mock_get_detailed_services.assert_called_once_with({
'detailed': True,
'only_active': False,
'include_from_test_key': inc,
})
@pytest.mark.parametrize('endpoint', [
'main.live_services',
'main.trial_services'
])
def test_live_trial_services_with_date_filter(
platform_admin_client,
mock_get_detailed_services,
endpoint
):
response = platform_admin_client.get(url_for(endpoint, start_date='2016-12-20', end_date='2016-12-28'))
assert response.status_code == 200
resp_data = response.get_data(as_text=True)
assert 'Platform admin' in resp_data
mock_get_detailed_services.assert_called_once_with({
'include_from_test_key': False,
'end_date': datetime.date(2016, 12, 28),
'start_date': datetime.date(2016, 12, 20),
'detailed': True,
'only_active': False,
})
@pytest.mark.parametrize('endpoint, expected_big_numbers', [
(
'main.live_services', (
'55 emails sent 5 failed – 5.0%',
'110 text messages sent 10 failed – 5.0%',
'15 letters sent 3 failed – 20.0%'
),
),
(
'main.trial_services', (
'6 emails sent 1 failed – 10.0%',
'11 text messages sent 1 failed – 5.0%',
'30 letters sent 10 failed – 33.3%'
),
),
])
def test_should_show_total_on_live_trial_services_pages(
platform_admin_client,
mock_get_detailed_services,
endpoint,
fake_uuid,
expected_big_numbers,
):
services = [
service_json(fake_uuid, 'My Service 1', [], restricted=False),
service_json(fake_uuid, 'My Service 2', [], restricted=True),
]
services[0]['statistics'] = create_stats(
emails_requested=100,
emails_delivered=50,
emails_failed=5,
sms_requested=200,
sms_delivered=100,
sms_failed=10,
letters_requested=15,
letters_delivered=12,
letters_failed=3
)
services[1]['statistics'] = create_stats(
emails_requested=10,
emails_delivered=5,
emails_failed=1,
sms_requested=20,
sms_delivered=10,
sms_failed=1,
letters_requested=30,
letters_delivered=20,
letters_failed=10
)
mock_get_detailed_services.return_value = {'data': services}
response = platform_admin_client.get(url_for(endpoint))
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (
normalize_spaces(page.select('.big-number-with-status')[0].text),
normalize_spaces(page.select('.big-number-with-status')[1].text),
normalize_spaces(page.select('.big-number-with-status')[2].text),
) == expected_big_numbers
def test_create_global_stats_sets_failure_rates(fake_uuid):
services = [
service_json(fake_uuid, 'a', []),
service_json(fake_uuid, 'b', [])
]
services[0]['statistics'] = create_stats(
emails_requested=1,
emails_delivered=1,
emails_failed=0,
)
services[1]['statistics'] = create_stats(
emails_requested=2,
emails_delivered=1,
emails_failed=1,
)
stats = create_global_stats(services)
assert stats == {
'email': {
'delivered': 2,
'failed': 1,
'requested': 3,
'failure_rate': '33.3'
},
'sms': {
'delivered': 0,
'failed': 0,
'requested': 0,
'failure_rate': '0'
},
'letter': {
'delivered': 0,
'failed': 0,
'requested': 0,
'failure_rate': '0'
}
}
def create_stats(
emails_requested=0,
emails_delivered=0,
emails_failed=0,
sms_requested=0,
sms_delivered=0,
sms_failed=0,
letters_requested=0,
letters_delivered=0,
letters_failed=0
):
return {
'sms': {
'requested': sms_requested,
'delivered': sms_delivered,
'failed': sms_failed,
},
'email': {
'requested': emails_requested,
'delivered': emails_delivered,
'failed': emails_failed,
},
'letter': {
'requested': letters_requested,
'delivered': letters_delivered,
'failed': letters_failed,
},
}
def test_format_stats_by_service_returns_correct_values(fake_uuid):
services = [service_json(fake_uuid, 'a', [])]
services[0]['statistics'] = create_stats(
emails_requested=10,
emails_delivered=3,
emails_failed=5,
sms_requested=50,
sms_delivered=7,
sms_failed=11,
letters_requested=40,
letters_delivered=20,
letters_failed=7
)
ret = list(format_stats_by_service(services))
assert len(ret) == 1
assert ret[0]['stats']['email']['requested'] == 10
assert ret[0]['stats']['email']['delivered'] == 3
assert ret[0]['stats']['email']['failed'] == 5
assert ret[0]['stats']['sms']['requested'] == 50
assert ret[0]['stats']['sms']['delivered'] == 7
assert ret[0]['stats']['sms']['failed'] == 11
assert ret[0]['stats']['letter']['requested'] == 40
assert ret[0]['stats']['letter']['delivered'] == 20
assert ret[0]['stats']['letter']['failed'] == 7
@pytest.mark.parametrize('endpoint, restricted, research_mode', [
('main.trial_services', True, False),
('main.live_services', False, False)
])
def test_should_show_email_and_sms_stats_for_all_service_types(
endpoint,
restricted,
research_mode,
platform_admin_client,
mock_get_detailed_services,
fake_uuid,
):
services = [service_json(fake_uuid, 'My Service', [], restricted=restricted, research_mode=research_mode)]
services[0]['statistics'] = create_stats(
emails_requested=10,
emails_delivered=3,
emails_failed=5,
sms_requested=50,
sms_delivered=7,
sms_failed=11
)
mock_get_detailed_services.return_value = {'data': services}
response = platform_admin_client.get(url_for(endpoint))
assert response.status_code == 200
mock_get_detailed_services.assert_called_once_with({'detailed': True,
'include_from_test_key': True,
'only_active': ANY})
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
table_body = page.find_all('table')[0].find_all('tbody')[0]
service_row_group = table_body.find_all('tbody')[0].find_all('tr')
email_stats = service_row_group[1].select('.big-number-number')[0]
sms_stats = service_row_group[1].select('.big-number-number')[1]
assert normalize_spaces(email_stats.text) == '10'
assert normalize_spaces(sms_stats.text) == '50'
@pytest.mark.parametrize('endpoint, restricted', [
('main.live_services', False),
('main.trial_services', True)
], ids=['live', 'trial'])
def test_should_show_archived_services_last(
endpoint,
platform_admin_client,
mock_get_detailed_services,
restricted,
):
services = [
service_json(name='C', restricted=restricted, active=False, created_at='2002-02-02 12:00:00'),
service_json(name='B', restricted=restricted, active=True, created_at='2001-01-01 12:00:00'),
service_json(name='A', restricted=restricted, active=True, created_at='2003-03-03 12:00:00'),
]
services[0]['statistics'] = create_stats()
services[1]['statistics'] = create_stats()
services[2]['statistics'] = create_stats()
mock_get_detailed_services.return_value = {'data': services}
response = platform_admin_client.get(url_for(endpoint))
assert response.status_code == 200
mock_get_detailed_services.assert_called_once_with({'detailed': True,
'include_from_test_key': True,
'only_active': ANY})
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
table_body = page.find_all('table')[0].find_all('tbody')[0]
services = [service.tr for service in table_body.find_all('tbody')]
assert len(services) == 3
assert normalize_spaces(services[0].td.text) == 'A'
assert normalize_spaces(services[1].td.text) == 'B'
assert normalize_spaces(services[2].td.text) == 'C Archived'
@pytest.mark.parametrize('endpoint, restricted, research_mode', [
('main.trial_services', True, False),
('main.live_services', False, False)
])
def test_should_order_services_by_usage_with_inactive_last(
endpoint,
restricted,
research_mode,
platform_admin_client,
mock_get_detailed_services,
fake_uuid,
):
services = [
service_json(fake_uuid, 'My Service 1', [], restricted=restricted, research_mode=research_mode),
service_json(fake_uuid, 'My Service 2', [], restricted=restricted, research_mode=research_mode),
service_json(fake_uuid, 'My Service 3', [], restricted=restricted, research_mode=research_mode, active=False)
]
services[0]['statistics'] = create_stats(
emails_requested=100,
emails_delivered=25,
emails_failed=25,
sms_requested=100,
sms_delivered=25,
sms_failed=25
)
services[1]['statistics'] = create_stats(
emails_requested=200,
emails_delivered=50,
emails_failed=50,
sms_requested=200,
sms_delivered=50,
sms_failed=50
)
services[2]['statistics'] = create_stats(
emails_requested=200,
emails_delivered=50,
emails_failed=50,
sms_requested=200,
sms_delivered=50,
sms_failed=50
)
mock_get_detailed_services.return_value = {'data': services}
response = platform_admin_client.get(url_for(endpoint))
assert response.status_code == 200
mock_get_detailed_services.assert_called_once_with({'detailed': True,
'include_from_test_key': True,
'only_active': ANY})
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
table_body = page.find_all('table')[0].find_all('tbody')[0]
services = [service.tr for service in table_body.find_all('tbody')]
assert len(services) == 3
assert normalize_spaces(services[0].td.text) == 'My Service 2'
assert normalize_spaces(services[1].td.text) == 'My Service 1'
assert normalize_spaces(services[2].td.text) == 'My Service 3 Archived'
def test_sum_service_usage_is_sum_of_all_activity(fake_uuid):
service = service_json(fake_uuid, 'My Service 1')
service['statistics'] = create_stats(
emails_requested=100,
emails_delivered=25,
emails_failed=25,
sms_requested=100,
sms_delivered=25,
sms_failed=25
)
assert sum_service_usage(service) == 200
def test_sum_service_usage_with_zeros(fake_uuid):
service = service_json(fake_uuid, 'My Service 1')
service['statistics'] = create_stats(
emails_requested=0,
emails_delivered=0,
emails_failed=25,
sms_requested=0,
sms_delivered=0,
sms_failed=0
)
assert sum_service_usage(service) == 0
def test_platform_admin_list_complaints(
platform_admin_client,
mocker
):
complaint = {
'id': str(uuid.uuid4()),
'notification_id': str(uuid.uuid4()),
'service_id': str(uuid.uuid4()),
'service_name': 'Sample service',
'ses_feedback_id': 'Some ses id',
'complaint_type': 'abuse',
'complaint_date': '2018-06-05T13:50:30.012354',
'created_at': '2018-06-05T13:50:30.012354',
}
mock = mocker.patch('app.complaint_api_client.get_all_complaints',
return_value={'complaints': [complaint], 'links': {}})
response = platform_admin_client.get(url_for('main.platform_admin_list_complaints'))
assert response.status_code == 200
resp_data = response.get_data(as_text=True)
assert 'Email complaints' in resp_data
assert mock.called
def test_should_show_complaints_with_next_previous(platform_admin_client, mocker, service_one, fake_uuid):
api_response = {
'complaints': [{'complaint_date': None,
'complaint_type': None,
'created_at': '2017-12-18T05:00:00.000000Z',
'id': fake_uuid,
'notification_id': fake_uuid,
'service_id': service_one['id'],
'service_name': service_one['name'],
'ses_feedback_id': 'None'}],
'links': {'last': '/complaint?page=3', 'next': '/complaint?page=3', 'prev': '/complaint?page=1'}
}
mocker.patch('app.complaint_api_client.get_all_complaints', return_value=api_response)
response = platform_admin_client.get(url_for('main.platform_admin_list_complaints', page=2))
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
next_page_link = page.find('a', {'rel': 'next'})
prev_page_link = page.find('a', {'rel': 'previous'})
assert (url_for('main.platform_admin_list_complaints', page=3) in next_page_link['href'])
assert 'Next page' in next_page_link.text.strip()
assert 'page 3' in next_page_link.text.strip()
assert (url_for('main.platform_admin_list_complaints', page=1) in prev_page_link['href'])
assert 'Previous page' in prev_page_link.text.strip()
assert 'page 1' in prev_page_link.text.strip()
def test_platform_admin_list_complaints_returns_404_with_invalid_page(platform_admin_client, mocker):
mocker.patch('app.complaint_api_client.get_all_complaints', return_value={'complaints': [], 'links': {}})
response = platform_admin_client.get(url_for('main.platform_admin_list_complaints', page='invalid'))
assert response.status_code == 404
@pytest.mark.parametrize('number, total, threshold, result', [
(0, 0, 0, False),
(1, 1, 0, True),
(2, 3, 66, True),
(2, 3, 67, False),
])
def test_is_over_threshold(number, total, threshold, result):
assert is_over_threshold(number, total, threshold) is result
def test_get_tech_failure_status_box_data_removes_percentage_data():
stats = {
'failures':
{'permanent-failure': 0, 'technical-failure': 0, 'temporary-failure': 1, 'virus-scan-failed': 0},
'test-key': 0,
'total': 5589
}
tech_failure_data = get_tech_failure_status_box_data(stats)
assert 'percentage' not in tech_failure_data
def test_platform_admin_splash_doesnt_talk_to_api(
client_request,
platform_admin_user,
):
client_request.login(platform_admin_user)
page = client_request.get('main.platform_admin_splash_page')
assert page.select_one('main .govuk-body a')['href'] == url_for(
'main.platform_admin',
)
def test_platform_admin_with_start_and_end_dates_provided(mocker, platform_admin_client):
start_date = '2018-01-01'
end_date = '2018-06-01'
api_args = {'start_date': datetime.date(2018, 1, 1), 'end_date': datetime.date(2018, 6, 1)}
mocker.patch('app.main.views.platform_admin.make_columns')
aggregate_stats_mock = mocker.patch(
'app.main.views.platform_admin.platform_stats_api_client.get_aggregate_platform_stats')
complaint_count_mock = mocker.patch('app.main.views.platform_admin.complaint_api_client.get_complaint_count')
platform_admin_client.get(
url_for('main.platform_admin', start_date=start_date, end_date=end_date)
)
aggregate_stats_mock.assert_called_with(api_args)
complaint_count_mock.assert_called_with(api_args)
@freeze_time('2018-6-11')
def test_platform_admin_with_only_a_start_date_provided(mocker, platform_admin_client):
start_date = '2018-01-01'
api_args = {'start_date': datetime.date(2018, 1, 1), 'end_date': datetime.datetime.utcnow().date()}
mocker.patch('app.main.views.platform_admin.make_columns')
aggregate_stats_mock = mocker.patch(
'app.main.views.platform_admin.platform_stats_api_client.get_aggregate_platform_stats')
complaint_count_mock = mocker.patch('app.main.views.platform_admin.complaint_api_client.get_complaint_count')
platform_admin_client.get(url_for('main.platform_admin', start_date=start_date))
aggregate_stats_mock.assert_called_with(api_args)
complaint_count_mock.assert_called_with(api_args)
def test_platform_admin_without_dates_provided(mocker, platform_admin_client):
api_args = {}
mocker.patch('app.main.views.platform_admin.make_columns')
aggregate_stats_mock = mocker.patch(
'app.main.views.platform_admin.platform_stats_api_client.get_aggregate_platform_stats')
complaint_count_mock = mocker.patch('app.main.views.platform_admin.complaint_api_client.get_complaint_count')
platform_admin_client.get(url_for('main.platform_admin'))
aggregate_stats_mock.assert_called_with(api_args)
complaint_count_mock.assert_called_with(api_args)
def test_platform_admin_displays_stats_in_right_boxes_and_with_correct_styling(
mocker,
platform_admin_client,
):
platform_stats = {
'email': {'failures':
{'permanent-failure': 3, 'technical-failure': 0, 'temporary-failure': 0, 'virus-scan-failed': 0},
'test-key': 0,
'total': 145},
'sms': {'failures':
{'permanent-failure': 0, 'technical-failure': 1, 'temporary-failure': 0, 'virus-scan-failed': 0},
'test-key': 5,
'total': 168},
'letter': {'failures':
{'permanent-failure': 0, 'technical-failure': 0, 'temporary-failure': 1, 'virus-scan-failed': 1},
'test-key': 0,
'total': 500}
}
mocker.patch('app.main.views.platform_admin.platform_stats_api_client.get_aggregate_platform_stats',
return_value=platform_stats)
mocker.patch('app.main.views.platform_admin.complaint_api_client.get_complaint_count', return_value=15)
response = platform_admin_client.get(url_for('main.platform_admin'))
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
# Email permanent failure status box - number is correct
assert '3 permanent failures' in page.find_all(
'div', class_='govuk-grid-column-one-third'
)[0].find(string=re.compile('permanent'))
# Email complaints status box - link exists and number is correct
assert page.find('a', string='15 complaints')
# SMS total box - number is correct
assert page.find_all('span', class_='big-number-number')[1].text.strip() == '168'
# Test SMS box - number is correct
assert '5' in page.find_all('div', class_='govuk-grid-column-one-third')[4].text
# SMS technical failure status box - number is correct and failure class is used
assert '1 technical failures' in page.find_all('div', class_='govuk-grid-column-one-third')[1].find(
'div', class_='big-number-status-failing').text
# Letter virus scan failure status box - number is correct and failure class is used
assert '1 | |
\x00 single null byte
:raises PaddingError: pad is less than 1 bytes in length
Example::
>>> d = NullStripped(Byte)
>>> d.parse(b'\xff\x00\x00')
255
>>> d.build(255)
b'\xff'
"""
def __init__(self, subcon, pad=b"\x00"):
super(NullStripped, self).__init__(subcon)
self.pad = pad
def _parse(self, stream, context, path):
pad = self.pad
unit = len(pad)
if unit < 1:
raise PaddingError("NullStripped pad must be at least 1 byte")
data = stream_read_entire(stream)
if unit == 1:
data = data.rstrip(pad)
else:
tailunit = len(data) % unit
end = len(data)
if tailunit and data[-tailunit:] == pad[:tailunit]:
end -= tailunit
while end-unit >= 0 and data[end-unit:end] == pad:
end -= unit
data = data[:end]
if self.subcon is GreedyBytes:
return data
if type(self.subcon) is GreedyString:
return data.decode(self.subcon.encoding)
return self.subcon._parsereport(io.BytesIO(data), context, path)
def _build(self, obj, stream, context, path):
return self.subcon._build(obj, stream, context, path)
def _sizeof(self, context, path):
raise SizeofError
def _emitfulltype(self, ksy, bitwise):
if len(self.pad) > 1:
raise NotImplementedError
return dict(pad_right=byte2int(self.pad), **self.subcon._compilefulltype(ksy, bitwise))
class RestreamData(Subconstruct):
r"""
Parses a field on external data (but does not build).
Parsing defers to subcon, but provides it a separate BytesIO stream based on data provided by datafunc (a bytes literal or another BytesIO stream or Construct instances that returns bytes or context lambda). Building does nothing. Size is 0 because as far as other fields see it, this field does not produce or consume any bytes from the stream.
:param datafunc: bytes or BytesIO or Construct instance (that parses into bytes) or context lambda, provides data for subcon to parse from
:param subcon: Construct instance
Can propagate any exception from the lambdas, possibly non-ConstructError.
Example::
>>> d = RestreamData(b"\x01", Int8ub)
>>> d.parse(b"")
1
>>> d.build(0)
b''
>>> d = RestreamData(NullTerminated(GreedyBytes), Int16ub)
>>> d.parse(b"\x01\x02\x00")
0x0102
>>> d = RestreamData(FixedSized(2, GreedyBytes), Int16ub)
>>> d.parse(b"\x01\x02\x00")
0x0102
"""
def __init__(self, datafunc, subcon):
super(RestreamData, self).__init__(subcon)
self.datafunc = datafunc
self.flagbuildnone = True
def _parse(self, stream, context, path):
data = evaluate(self.datafunc, context)
if isinstance(data, bytestringtype):
stream2 = io.BytesIO(data)
if isinstance(data, io.BytesIO):
stream2 = data
if isinstance(data, Construct):
stream2 = io.BytesIO(data._parsereport(stream, context, path))
return self.subcon._parsereport(stream2, context, path)
def _build(self, obj, stream, context, path):
return obj
def _sizeof(self, context, path):
return 0
def _emitparse(self, code):
return "restream(%r, lambda io: %s)" % (self.datafunc, self.subcon._compileparse(code), )
class Transformed(Subconstruct):
r"""
Transforms bytes between the underlying stream and the (fixed-sized) subcon.
Parsing reads a specified amount (or till EOF), processes data using a bytes-to-bytes decoding function, then parses subcon using those data. Building does build subcon into separate bytes, then processes it using encoding bytes-to-bytes function, then writes those data into main stream. Size is reported as `decodeamount` or `encodeamount` if those are equal, otherwise its SizeofError.
Used internally to implement :class:`~construct.core.Bitwise` :class:`~construct.core.Bytewise` :class:`~construct.core.ByteSwapped` :class:`~construct.core.BitsSwapped` .
Possible use-cases include encryption, obfuscation, byte-level encoding.
.. warning:: Remember that subcon must consume (or produce) an amount of bytes that is same as `decodeamount` (or `encodeamount`).
.. warning:: Do NOT use seeking/telling classes inside Transformed context.
:param subcon: Construct instance
:param decodefunc: bytes-to-bytes function, applied before parsing subcon
:param decodeamount: integer, amount of bytes to read
:param encodefunc: bytes-to-bytes function, applied after building subcon
:param encodeamount: integer, amount of bytes to write
:raises StreamError: requested reading negative amount, could not read enough bytes, requested writing different amount than actual data, or could not write all bytes
:raises StreamError: subcon build and encoder transformed more or less than `encodeamount` bytes, if amount is specified
:raises StringError: building from non-bytes value, perhaps unicode
Can propagate any exception from the lambdas, possibly non-ConstructError.
Example::
>>> d = Transformed(Bytes(16), bytes2bits, 2, bits2bytes, 2)
>>> d.parse(b"\x00\x00")
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
>>> d = Transformed(GreedyBytes, bytes2bits, None, bits2bytes, None)
>>> d.parse(b"\x00\x00")
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
"""
def __init__(self, subcon, decodefunc, decodeamount, encodefunc, encodeamount):
super(Transformed, self).__init__(subcon)
self.decodefunc = decodefunc
self.decodeamount = decodeamount
self.encodefunc = encodefunc
self.encodeamount = encodeamount
def _parse(self, stream, context, path):
if isinstance(self.decodeamount, type(None)):
data = stream_read_entire(stream)
if isinstance(self.decodeamount, integertypes):
data = stream_read(stream, self.decodeamount)
data = self.decodefunc(data)
if self.subcon is GreedyBytes:
return data
if type(self.subcon) is GreedyString:
return data.decode(self.subcon.encoding)
return self.subcon._parsereport(io.BytesIO(data), context, path)
def _build(self, obj, stream, context, path):
stream2 = io.BytesIO()
buildret = self.subcon._build(obj, stream2, context, path)
data = stream2.getvalue()
data = self.encodefunc(data)
if isinstance(self.encodeamount, integertypes):
if len(data) != self.encodeamount:
raise StreamError("encoding transformation produced wrong amount of bytes, %s instead of expected %s" % (len(data), self.encodeamount, ))
stream_write(stream, data)
return buildret
def _sizeof(self, context, path):
if self.decodeamount is None or self.encodeamount is None:
raise SizeofError
if self.decodeamount == self.encodeamount:
return self.encodeamount
raise SizeofError
class Restreamed(Subconstruct):
r"""
Transforms bytes between the underlying stream and the (variable-sized) subcon.
Used internally to implement :class:`~construct.core.Bitwise` :class:`~construct.core.Bytewise` :class:`~construct.core.ByteSwapped` :class:`~construct.core.BitsSwapped` .
.. warning:: Remember that subcon must consume or produce an amount of bytes that is a multiple of encoding or decoding units. For example, in a Bitwise context you should process a multiple of 8 bits or the stream will fail during parsing/building.
.. warning:: Do NOT use seeking/telling classes inside Restreamed context.
:param subcon: Construct instance
:param decoder: bytes-to-bytes function, used on data chunks when parsing
:param decoderunit: integer, decoder takes chunks of this size
:param encoder: bytes-to-bytes function, used on data chunks when building
:param encoderunit: integer, encoder takes chunks of this size
:param sizecomputer: function that computes amount of bytes outputed
Can propagate any exception from the lambda, possibly non-ConstructError.
Can also raise arbitrary exceptions in RestreamedBytesIO implementation.
Example::
Bitwise <--> Restreamed(subcon, bits2bytes, 8, bytes2bits, 1, lambda n: n//8)
Bytewise <--> Restreamed(subcon, bytes2bits, 1, bits2bytes, 8, lambda n: n*8)
"""
def __init__(self, subcon, decoder, decoderunit, encoder, encoderunit, sizecomputer):
super(Restreamed, self).__init__(subcon)
self.decoder = decoder
self.decoderunit = decoderunit
self.encoder = encoder
self.encoderunit = encoderunit
self.sizecomputer = sizecomputer
def _parse(self, stream, context, path):
stream2 = RestreamedBytesIO(stream, self.decoder, self.decoderunit, self.encoder, self.encoderunit)
obj = self.subcon._parsereport(stream2, context, path)
stream2.close()
return obj
def _build(self, obj, stream, context, path):
stream2 = RestreamedBytesIO(stream, self.decoder, self.decoderunit, self.encoder, self.encoderunit)
buildret = self.subcon._build(obj, stream2, context, path)
stream2.close()
return obj
def _sizeof(self, context, path):
if self.sizecomputer is None:
raise SizeofError("Restreamed cannot calculate size without a sizecomputer")
else:
return self.sizecomputer(self.subcon._sizeof(context, path))
class ProcessXor(Subconstruct):
r"""
Transforms bytes between the underlying stream and the subcon.
Used internally by KaitaiStruct compiler, when translating `process: xor` tags.
Parsing reads till EOF, xors data with the pad, then feeds that data into subcon. Building first builds the subcon into separate BytesIO stream, xors data with the pad, then writes that data into the main stream. Size is the same as subcon, unless it raises SizeofError.
:param padfunc: integer or bytes or context lambda, single or multiple bytes to xor data with
:param subcon: Construct instance
:raises StringError: pad is not integer or bytes
Can propagate any exception from the lambda, possibly non-ConstructError.
Example::
>>> d = ProcessXor(0xf0 or b'\xf0', Int16ub)
>>> d.parse(b"\x00\xff")
0xf00f
>>> d.sizeof()
2
"""
def __init__(self, padfunc, subcon):
super(ProcessXor, self).__init__(subcon)
self.padfunc = padfunc
def _parse(self, stream, context, path):
pad = evaluate(self.padfunc, context)
if not isinstance(pad, (integertypes, bytestringtype)):
raise StringError("ProcessXor needs integer or bytes pad")
if isinstance(pad, bytestringtype) and len(pad) == 1:
pad = byte2int(pad)
data = stream_read_entire(stream)
if isinstance(pad, integertypes):
if not (pad == 0):
data = integers2bytes( (b ^ pad) for b in iterateints(data) )
if isinstance(pad, bytestringtype):
if not (len(pad) <= 64 and pad == bytes(len(pad))):
data = integers2bytes( (b ^ p) for b,p in zip(iterateints(data), itertools.cycle(iterateints(pad))) )
if self.subcon is GreedyBytes:
return data
if type(self.subcon) is GreedyString:
return data.decode(self.subcon.encoding)
return self.subcon._parsereport(io.BytesIO(data), context, path)
def _build(self, obj, stream, context, path):
pad = evaluate(self.padfunc, context)
if not isinstance(pad, (integertypes, bytestringtype)):
raise StringError("ProcessXor needs integer or bytes pad")
if isinstance(pad, bytestringtype) and len(pad) == 1:
pad = byte2int(pad)
stream2 = io.BytesIO()
buildret = self.subcon._build(obj, stream2, context, path)
data = stream2.getvalue()
if isinstance(pad, integertypes):
if not (pad == 0):
data = integers2bytes( (b ^ pad) for b in iterateints(data) )
if isinstance(pad, bytestringtype):
if not (len(pad) <= 64 and pad == bytes(len(pad))):
data = integers2bytes( (b ^ p) for b,p in zip(iterateints(data), itertools.cycle(iterateints(pad))) )
stream_write(stream, data)
return buildret
| |
to CUDA in the future
"""
if type(M)==type(1) or type(M)==type(1.1) or type(M)==type(self.matrix) :
matrix = self.matrix * M
name=self.name+"_times_"+ str(M)
if type(M)==type(self):
matrix = self.matrix * M.matrix
name=self.name+"_times_"+ M.name
return DBZ(dataTime=self.dataTime, matrix=matrix,\
dt=self.dt, dx=self.dx, dy=self.dy,\
name =name,
dataPath =self.outputPath+"_times_"+str(M)+".dat",\
outputPath=self.outputPath+"_times_"+str(M)+".dat",\
imagePath =self.imagePath +"_times_"+str(M)+".png",\
database =self.database,\
cmap=self.cmap, verbose=self.verbose)
# end basic operator overloads
##################################
############################################################
# basic i/o's
def load(self):
"""
DBZ.load - load into DBZ.matrix
adapted from basics.readToArray(path)
"""
m = np.loadtxt(self.dataPath)
self.matrix = ma.array(m)
# setting the mask
self.matrix.fill_value = -999 # -999 for missing values
# self.matrix.fill_value = -20.1 # -20 for missing values
self.matrix.mask = (m < -20) # smaller than -20 considered no echo
# 1 March 2013
##
# THE FOLLOWING IS SKIPPED TO SAVE MEMORY
# loading coastal data
#try:
# self.coastData = np.loadtxt(self.coastDataPath)
#except:
# print "Cannot load coast data from the path: ", self.coastDataPath
def loadCoast(self):
self.coastData = np.loadtxt(self.coastDataPath)
def load100(self):
self.coastData = np.loadtxt(self.relief100DataPath)
def load1000(self):
self.coastData = np.loadtxt(self.relief1000DataPath)
def load2000(self):
self.coastData = np.loadtxt(self.relief2000DataPath)
def load3000(self):
self.coastData = np.loadtxt(self.relief3000DataPath)
def toArray(self):
"""convert return a normal array filled with -999 for missing values for other uses
"""
return ma.filled(self.matrix)
def save(self):
"""
* We convert the masked array into a standard array with masked data filled by -999
* adapted from basics.writeArrayToTxtFile(arr, path, as_integer=False):
if as_integer:
np.savetxt(path, arr, fmt='%.0f') # 0 decimal place
else:
np.savetxt(path, arr, fmt='%.2f') # two decimal places as default
"""
np.savetxt(self.outputPath, self.toArray())
def saveMatrix(self):
""" alias for self.save()
"""
self.save()
def makeImage(self, matrix="", vmin=99999, vmax=-99999, cmap="", title="",\
showColourbar=True, closeAll=True):
"""
requires: matplotlib
to make the plot before you save/print it to screen
*adapted from basics.printToScreen(m,cmap='gray'):
which was in turn adapted from stackoverflow:
http://stackoverflow.com/questions/7875688/how-can-i-create-a-standard-colorbar-for-a-series-of-plots-in-python
def printToScreen(m,cmap='gray'):
fig, axes = plt.subplots(nrows=1, ncols=1)
# The vmin and vmax arguments specify the color limits
im = axes.imshow(m, vmin=-20, vmax=100, cmap=cmap)
cax = fig.add_axes([0.9, 0.1, 0.03, 0.8])
fig.colorbar(im, cax=cax)
plt.show()
!!! TO DO: FIX THE AXES !!!
"""
if isinstance(matrix, str):
matrix = self.matrix
if title =="":
title = self.name
if cmap == "":
cmap = self.cmap
if vmin == 99999:
vmin = self.vmin
if vmax == -99999:
vmax = self.vmax
# clear the canvass
if closeAll:
#plt.clf()
plt.close()
# make the image
fig, axes = plt.subplots(nrows=1, ncols=1)
im = axes.imshow(matrix, # or np.flipud(self.matrix)?
vmin=vmin, vmax=vmax, cmap=cmap) # The vmin and vmax arguments
# specify the color limits
plt.title(title)
if showColourbar :
cax = fig.add_axes([0.9, 0.1, 0.01, 0.8])
fig.colorbar(im,cax=cax)
#plt.show() # wait, don't show!
def saveImage(self):
self.makeImage()
plt.savefig(self.imagePath, dpi=200)
def printToScreen(self, matrix="", cmap=""):
self.makeImage(matrix=matrix, cmap=cmap)
plt.show()
def show(self, matrix="", cmap=""):
"""alias to printToScreen()
"""
self.printToScreen(matrix=matrix, cmap=cmap)
def showWithFlip(self, cmap=""):
"""flip it upside down and show it
"""
self.matrix = np.flipud(self.matrix)
self.printToScreen(cmap=cmap)
def showWithCoast(self, matrix="", cmap='', intensity=9999):
if matrix=="":
matrix=self.matrix
try:
if self.showingWithCoast: # if already showing coast: do nothing
self.show(matrix=matrix)
return None # just show and go
except AttributeError: # if it didn't happen before: default = False
self.showingWithCoast = False # just do something
self.showingWithCoast = True
self.matrix_backup = self.matrix.copy()
if cmap != '':
self.cmap_backup = self.cmap
self.cmap = cmap
else:
self.cmap_backup = self.cmap
try:
if self.coastData == "" : print "haha" #test for existence
except AttributeError:
self.loadCoast()
print "\n... coast data loaded from ", self.coastDataPath, "for ", self.name
for v in self.coastData:
self.matrix[v[0], v[1]] += intensity
self.show(matrix=matrix)
def show2(self, cmap='', intensity=99999):
""" adding the coastline and then flip it
"""
try:
if self.showingWithCoast: # if already showing coast: do nothing
self.show()
return None # just show and go
except AttributeError: # if it didn't happen before: default = False
self.showingWithCoast = False # just do something
self.showingWithCoast = True
self.matrix_backup = self.matrix.copy()
if cmap != '':
self.cmap_backup = self.cmap.copy()
self.cmap = cmap
else:
self.cmap_backup = self.cmap
try:
if self.coastData == "" : print "haha" #test for existence
except AttributeError:
self.loadCoast()
print "\n... coast data loaded from ", self.coastDataPath, "for ", self.name
for v in self.coastData:
self.matrix[v[0], v[1]] = intensity
self.matrix = np.flipud(self.matrix)
self.printToScreen(cmap=cmap)
def showWithoutCoast(self):
"""resetting
"""
self.showingWithCoast = False
self.cmap = self.cmap_backup
self.matrix = self.matrix_backup
self.show()
def show3(self):
"""alias
"""
self.showWithoutCoast()
def showInverted(self):
self.matrix = np.flipud(self.matrix)
self.printToScreen()
self.matrix = np.flipud(self.matrix)
def show0(self):
"""alias
"""
self.showInverted()
def show4(self):
"""alias
"""
self.showInverted()
def backupMatrix(self, name=""):
"""backing up self.matrix for analysis
paired with self.restoreMatrix()
"""
try:
self.backupCount += 1
if name =="":
name = self.backupCount
self.matrix_backups[name] = self.matrix.copy()
except AttributeError:
self.backupCount = 0
self.matrix_backups = {}
if name =="":
name = self.backupCount
self.matrix_backups[name] = self.matrix.copy()
def restoreMatrix(self, name =""):
"""see self.backupMatrix() for comments
"""
if name =="":
name = self.backupCount
self.matrix = self.matrix_backups[name].copy()
# end basic i/o's
############################################################
#############################################################
# new objects from old
def copy(self):
"""returning a copy of itself
9 March 2013
"""
return DBZ(dataTime =self.dataTime,
matrix =self.matrix.copy(),
name =self.name,
dt =self.dt,
dx =self.dx,
dy =self.dy,
dataPath =self.dataPath,
outputPath=self.outputPath,
imagePath=self.imagePath,
coastDataPath=self.coastDataPath,
database =self.database,
cmap =self.cmap,
vmin =self.vmin,
vmax =self.vmax,
coordinateOrigin= self.coordinateOrigin,
lowerLeftCornerLatitudeLongitude = self.lowerLeftCornerLatitudeLongitude,
upperRightCornerLatitudeLongitude =self.upperRightCornerLatitudeLongitude,
verbose =self.verbose)
def drawCross(self, i="", j="", radius=5, intensity=9999):
"""to draw a cross (+) at the marked point
"""
if i=="" or j=="":
i=self.coordinateOrigin[0]
j=self.coordinateOrigin[1]
matrix=self.matrix.copy()
matrix[i-radius:i+radius+1, j ] = intensity
matrix[i , j-radius:j+radius+1] = intensity
return DBZ(dataTime =self.dataTime,
matrix = matrix,
name =self.name + \
", cross at x,y=(%d,%d), radius=%d" %\
(j, i, radius),
dt =self.dt,
dx =self.dx,
dy =self.dy,
dataPath =self.dataPath,
outputPath=self.outputPath,
imagePath=self.imagePath,
coastDataPath=self.coastDataPath,
database =self.database,
cmap =self.cmap,
vmin =self.vmin,
vmax =self.vmax,
coordinateOrigin= self.coordinateOrigin,
lowerLeftCornerLatitudeLongitude = self.lowerLeftCornerLatitudeLongitude,
upperRightCornerLatitudeLongitude =self.upperRightCornerLatitudeLongitude,
verbose =self.verbose)
def drawCoast(self, intensity=9999, newCopy=False):
"""
adapted from DBZ.show2()
"""
if newCopy:
a = self.copy() # no need for this i guess!!!
else:
a = self
try:
if a.coastData == "" : print "haha" #test for existence
except AttributeError:
a.loadCoast()
print "\n... coast data loaded from ", a.coastDataPath, "for ", a.name
for v in a.coastData:
a.matrix[v[0], v[1]] = intensity
return a
def recentreTaichungPark(self):
"""
2013-08-27
use:
a = pattern.a
a.showTaichungPark()
takes as input
attributes:
lowerLeftCornerLatitudeLongitude
upperRightCornerLatitudeLongitude
constants:
taichung park coordinates (24.145056°N 120.683329°E)
changes:
self.coordinateOrigin
self.O
returns:
grid square for taichung park
"""
#global taichungParkLatitude, taichungParkLongitude
height, width = self.matrix.shape
i0 = taichungParkLatitude #defined in defaultParameters.py
j0 = taichungParkLongitude
# the above two lines dont work, here's a hack fix
#import defaultParameters
#j0 = defaultParameters.taichungParkLongitude
#i0 = defaultParameters.taichungParkLatitude
i1, j1 = self.lowerLeftCornerLatitudeLongitude
i2, j2 = self.upperRightCornerLatitudeLongitude
i3 = 1.*(i0-i1)*height/(i2-i1) # (latitudeTCP-latLowerleft) * grid per latitude
j3 = 1.*(j0-j1)*width/(j2-j1) # ditto for longitude
self.coordinateOrigin = (i3,j3)
self.O = (i3,j3)
return i3, j3
def recentre(self):
"""alias for recentreTaichungPark(self)
"""
return self.recentreTaichungPark()
def recenter(self):
"""alias for recentreTaichungPark(self)
"""
return self.recentreTaichungPark()
def drawRectangle(self, bottom=0, left=0, height=100, width=100, intensity=9999):
""" return a copy with a rectangle on the image
"""
vmax = self.vmax
matrix = self.matrix.copy()
for i in range(bottom, bottom+height):
matrix[i , left:left+2] = intensity
matrix[i , left+width] = intensity
for j in range(left, left+width):
matrix[bottom:bottom+2, j] = intensity
matrix[bottom+height, j] = intensity
return DBZ(dataTime =self.dataTime,
matrix = matrix,
name =self.name + \
", rectangle at x,y=(%d,%d), width=%d, height=%d" %\
(left, bottom, width, height),
dt =self.dt,
dx =self.dx,
dy =self.dy,
dataPath =self.dataPath,
outputPath=self.outputPath,
imagePath=self.imagePath,
coastDataPath=self.coastDataPath,
database =self.database,
cmap =self.cmap,
vmin =self.vmin,
vmax =self.vmax,
coordinateOrigin= self.coordinateOrigin,
verbose =self.verbose)
def getWindow(self, bottom=0, left=0, height=100, width=100):
"""return a dbz object, a window view of itself
"""
name = self.name +'_windowed' + '_bottom' + str(bottom) +\
'_left' + str(left) + '_height' + str(height) + '_width' + str(width)
matrix = self.matrix.copy()
matrix = matrix[bottom:bottom+height, left:left+width]
return DBZ(dataTime =self.dataTime,
matrix = matrix,
name = name,
dt =self.dt,
dx =self.dx,
dy =self.dy,
dataPath =self.dataPath,
outputPath=self.outputPath,
imagePath=self.imagePath,
coastDataPath=self.coastDataPath,
database =self.database,
cmap =self.cmap,
vmin =self.vmin,
vmax =self.vmax,
coordinateOrigin = (height//2, width//2) , #hack
verbose =self.verbose)
def shiftMatrix(self,i,j):
"""shifting the array/dbz pattern; masking the edge
codes migrated from shiiba.py (now armor.shiiba.regression) to here
i = shift in | |
#!/usr/bin/env python3
import argparse
import datetime
import json
import os
import requests
import time
from pathlib import Path
DATA_DIR = "./data/"
class Leaderboard:
NO_TIME = ' '
SORTBYS = {
'local': lambda p: (-p['local_score'], p['last_star_ts']),
'stars': lambda p: (-p['stars'], p['last_star_ts']),
'dtlocal': lambda p: p['dt_sortkey']
}
def __init__(self, year, code, sortby, sortlink):
self.year = year
self.code = code
self.sortname = sortby
self.sortby = Leaderboard.SORTBYS[sortby]
self.usehtml = sortlink is not None
self.sortlink = sortlink
self.__process()
self.repeat_name_every_n_columns = 4
def __process(self):
data = self.__get_json()
self.players = [p for p in data['members'].values()]
self.ndays = max(len(p['completion_day_level']) for p in self.players)
self.minmaxts = {
d: {s: [float('inf'), 0]
for s in range(1, 4)}
for d in range(1, self.ndays + 1)
}
for player in self.players:
last_star_ts = player['last_star_ts']
if last_star_ts == "0":
last_star_ts = float('inf')
player['dt_sortkey'] = [0, 0, last_star_ts]
for day, stars in player['completion_day_level'].items():
release_time = datetime.datetime(
year=self.year,
month=12,
day=int(day),
hour=5,
tzinfo=datetime.timezone.utc).timestamp()
for k in list(stars.keys()):
stars[int(k)] = stars[k]['get_star_ts'] - release_time
if '2' in stars:
stars[3] = stars[2] - stars[1]
for k in [1, 2, 3]:
if k in stars:
minmax = self.minmaxts[int(day)][k]
minmax[0] = min(minmax[0], stars[k])
minmax[1] = max(minmax[1], stars[k])
td = datetime.timedelta(seconds=stars[k])
m, s = divmod(td.seconds, 60)
h, m = divmod(m, 60)
h += td.days * 24
stars[str(
k
)] = ">=100hrs" if h >= 100 else f"{h:0>2}:{m:0>2}:{s:0>2}"
else:
stars[str(k)] = Leaderboard.NO_TIME
for d in range(1, self.ndays + 1):
for sub, player in enumerate(
sorted(self.players,
key=lambda p: p['completion_day_level'].get(
str(d), {}).get(3, float('inf')))):
if (str(d) not in player['completion_day_level']) or (
3 not in player['completion_day_level'][str(d)]):
break
player['dt_sortkey'][0] -= len(self.players) - sub
player['dt_sortkey'][1] -= 1
self.index_width = len(str(len(self.players)))
self.score_width = max(len(str(self.__score(p))) for p in self.players)
self.name_width = max(len(p['name']) for p in self.players)
self.players.sort(key=self.sortby)
def __score(self, player):
return -self.sortby(player)[0]
def __print_sort_header(self):
print("Sort by:", end='')
if self.usehtml:
for k in Leaderboard.SORTBYS:
print(' ', end='')
if k == self.sortname:
print("<strong>", end='')
self.sortlink[1] = k
url = ''.join(self.sortlink)
print(f'<a href="{url}">[{k}]</a>', end='')
if k == self.sortname:
print("</strong>", end='')
print()
else:
print(self.sortname)
def __print_table_headers(self):
# idx) sc Longest Name --:--:-- ...
# ^^ ^ # 3 extra spaces before first name field
init_width = self.index_width + self.score_width + 3
# --:--:-- --:--:-- --:--:--
# 11111111112222222
# 12345678901234567890123456
day_width = 26
ndays = self.ndays
def print_row(day_str_generator):
print(" " * (init_width), end='')
for d in range(ndays):
if not (d % self.repeat_name_every_n_columns):
print(" " * (self.name_width + 1), end='')
print(day_str_generator(d), end='')
print()
# Day number row
print_row(lambda d: f"{d+1:^{day_width}} ")
# Day underline row
print_row(lambda _: '=' * day_width + ' ')
# Parts labels row
print_row(lambda _: " S1 S2 Δt ")
# Parts underline row
print_row(lambda _: f"{' '.join(['='*8]*3)} ")
def __printing_time_field(self, player_starval_map, day, star):
base_str = player_starval_map[star]
MIN_OPACITY, MAX_OPACITY = 0.15, 1.0
mint, maxt = self.minmaxts[day][int(star)]
calc_opacity = ((lambda x: MAX_OPACITY - (MAX_OPACITY - MIN_OPACITY) *
(x - mint) / (maxt - mint)) if
(maxt - mint) else lambda x: MAX_OPACITY)
NO_TIME_OPACITY = 0.05
if self.usehtml:
opacity = NO_TIME_OPACITY
if int(star) in player_starval_map:
t = player_starval_map[int(star)]
opacity = calc_opacity(t)
return f'<span class="time" data-opacity="{opacity}" style="filter: opacity({opacity});">{base_str}</span>'
else:
return base_str
def __print_table(self):
ndays = self.ndays
noattempt = {str(i): Leaderboard.NO_TIME for i in range(1, 4)}
for i, player in enumerate(self.players):
print(f"{i+1:>{self.index_width}}) ", end='')
print(f"{self.__score(player):>{self.score_width}} ", end='')
for d in range(ndays):
if not (d % self.repeat_name_every_n_columns):
print(
f"{player['name']:{'^' if d else '>'}{self.name_width}} ",
end='')
times = player['completion_day_level'].get(
str(d + 1), noattempt)
for k in "123":
print(self.__printing_time_field(times, d + 1, k),
'',
end='')
print('', player['name'])
print()
def print(self):
self.__print_sort_header()
self.__print_table_headers()
self.__print_table()
def __get_raw_from_source(self):
url = (f"https://adventofcode.com/{self.year}/"
f"leaderboard/private/view/{self.code}.json")
with open(DATA_DIR + 'cookie', 'r') as cookie_in:
session = cookie_in.read()
jar = requests.cookies.RequestsCookieJar()
jar.set('session', session)
r = requests.get(url, cookies=jar)
return r.json()
def __get_json(self):
cache_path = Path(DATA_DIR + f'.lbcache/{self.year}.{self.code}.json')
use_cache = (cache_path.exists()
and cache_path.stat().st_mtime + 900 > time.time())
if use_cache:
with open(cache_path, 'r') as fin:
return json.load(fin)
else:
res = self.__get_raw_from_source()
with open(cache_path, 'w') as fout:
json.dump(res, fout)
return res
def read_boards_from_config_file():
boards = []
with open(DATA_DIR + 'leaderboards', 'r') as fin:
for l in fin.readlines():
boards.append(l.strip().split('=', 1))
return boards
def display_boards(boards):
for i, (_, name) in enumerate(boards):
print(f'{i}: {name}')
def get_board_id_from_user(boards):
display_boards(boards)
board_idx = -1
while not (0 <= board_idx < len(boards)):
try:
board_idx = int(
input("Select the index of the leaderboard you want to see: "))
boards[board_idx]
except ValueError:
print("You must enter an int")
except IndexError:
print("The index must be in range")
return boards[board_idx][0]
printerrs = False
def get_args():
global printerrs
SORTKEY_FILLER = '~~'
parser = argparse.ArgumentParser(
description="generate more detailed advent of code leaderboard")
parser.add_argument("-y", "--year", help="year of the leaderboard to use")
parser.add_argument("-c",
"--code",
help="code of the leaderboard to use"
" (found at the end of the url for the leaderboard)")
parser.add_argument(
"-s",
"--sort",
default='local',
help="what to sort the players by."
" `local` and `stars` are the same as on the official site."
" `dtlocal` is like `local`, but orders the ranking for each daily star only by the delta time"
)
parser.add_argument(
"-w",
"--web-sortlink-template",
metavar=f'LI{SORTKEY_FILLER}NK',
dest='web_sortlink_template',
help=
"a template sortlink to provide for the link tags in a sort header. omit for a plain text sort header."
f" use `{SORTKEY_FILLER}` in the link to show where the sortkey belongs, `e.g. foo.bar/aoc?sortby={SORTKEY_FILLER}&board=12345`."
f" if you want to use a {SORTKEY_FILLER} in the link otherwise, tough luck for now. if you ping me, maybe I'll do something about it"
)
parser.add_argument(
"-d",
"--debug",
action="store_true",
help=
"suppress the suppression of errors (by default, all exceptions are caught and a web-friendly message presented)"
)
parser.add_argument(
"-r",
"--readme",
action="store_true",
help="prior to all other output, spit out the readme (html)")
args = parser.parse_args()
if args.debug:
printerrs = True
year = args.year
code = args.code
sortby = args.sort
sort_choices = ['local', 'stars', 'dtlocal']
if sortby not in sort_choices:
print(f"Invalid sort identifier - must be in {sort_choices}")
raise Exception("")
sortlink = args.web_sortlink_template
if sortlink is not None:
try:
l, r = sortlink.split(SORTKEY_FILLER)
sortlink = [l, '', r]
except:
raise
if year is None:
year = datetime.datetime.now().year
try:
year = int(year)
except:
print("Invalid year - must be an int")
raise
if code is None:
board_list = read_boards_from_config_file()
code = get_board_id_from_user(board_list)
print()
try:
code = int(code)
except:
print("Invalid leaderboard id - must be an int")
raise
return args.readme, (year, code, sortby, sortlink)
if __name__ == '__main__':
try:
readme, args = get_args()
year, code, _, _ = args
linkify = lambda text: f'[<a href="https://adventofcode.com/{year}/leaderboard/private/view/{code}">{text}</a>]'
if readme:
print("<div id='readme'>")
print(
f"this is a different view of an {linkify('Advent of Code leaderboard')}.\n"
"rather than just displaying stars, the time it took to solve each problem is shown\n"
"\n"
"note that in keeping with the rate limit request tied to the endpoint this page scrapes from,\n"
"this page takes a minimum 15 of minutes between pulling updates from the Advent of Code servers.\n"
"as such, this page does not update in real time and will not always have the most recent results\n"
"\n"
"for each day (up to the most recent any competitor has completed at least one star on), there are three columns:\n"
" -S1: the time it took from the release of the puzzle to when the first star was earned\n"
" -S1: the time it took from the release of the puzzle to when the second star was earned\n"
" -Δt: S2 - S1, or the time it took to finish Part Two after finishing Part One\n"
"\n"
"additionally, another sort option is presented.\n"
f"'local' and 'stars' are the same as on {linkify('the official site')} - click 'Ordering' to see how those are calculated.\n"
"the new option, 'dtlocal', is calculated much like 'local' with the highest ranking earning N points, etc.\n"
"however, the initial ranking is done once per day, and is ordered by lowest Δt"
)
print("</div>")
lb = Leaderboard(*args)
lb.print()
except:
if printerrs:
raise
RUH_ROH = "somethings not working right now, please ping/dm me to let me know (unless it's your fault -- if so, then fix yourself, child!)."
RUH_ROH = "I'm woking on this, sorry for the problmes"
try:
with open(DATA_DIR + | |
table, but direct
assignment to the "__dict__" attribute is not possible (you can write
"m.__dict__['a'] = 1", which defines "m.a" to be "1", but you can't
write "m.__dict__ = {}"). Modifying "__dict__" directly is not
recommended.
Modules built into the interpreter are written like this: "<module
'sys' (built-in)>". If loaded from a file, they are written as
"<module 'os' from '/usr/local/lib/pythonX.Y/os.pyc'>".
"""
, 'typesseq':
"""Sequence Types --- "list", "tuple", "range"
*******************************************
There are three basic sequence types: lists, tuples, and range
objects. Additional sequence types tailored for processing of binary
data and text strings are described in dedicated sections.
Common Sequence Operations
==========================
The operations in the following table are supported by most sequence
types, both mutable and immutable. The "collections.abc.Sequence" ABC
is provided to make it easier to correctly implement these operations
on custom sequence types.
This table lists the sequence operations sorted in ascending priority.
In the table, *s* and *t* are sequences of the same type, *n*, *i*,
*j* and *k* are integers and *x* is an arbitrary object that meets any
type and value restrictions imposed by *s*.
The "in" and "not in" operations have the same priorities as the
comparison operations. The "+" (concatenation) and "*" (repetition)
operations have the same priority as the corresponding numeric
operations. [3]
+----------------------------+----------------------------------+------------+
| Operation | Result | Notes |
+============================+==================================+============+
| "x in s" | "True" if an item of *s* is | (1) |
| | equal to *x*, else "False" | |
+----------------------------+----------------------------------+------------+
| "x not in s" | "False" if an item of *s* is | (1) |
| | equal to *x*, else "True" | |
+----------------------------+----------------------------------+------------+
| "s + t" | the concatenation of *s* and *t* | (6)(7) |
+----------------------------+----------------------------------+------------+
| "s * n" or "n * s" | equivalent to adding *s* to | (2)(7) |
| | itself *n* times | |
+----------------------------+----------------------------------+------------+
| "s[i]" | *i*th item of *s*, origin 0 | (3) |
+----------------------------+----------------------------------+------------+
| "s[i:j]" | slice of *s* from *i* to *j* | (3)(4) |
+----------------------------+----------------------------------+------------+
| "s[i:j:k]" | slice of *s* from *i* to *j* | (3)(5) |
| | with step *k* | |
+----------------------------+----------------------------------+------------+
| "len(s)" | length of *s* | |
+----------------------------+----------------------------------+------------+
| "min(s)" | smallest item of *s* | |
+----------------------------+----------------------------------+------------+
| "max(s)" | largest item of *s* | |
+----------------------------+----------------------------------+------------+
| "s.index(x[, i[, j]])" | index of the first occurrence of | (8) |
| | *x* in *s* (at or after index | |
| | *i* and before index *j*) | |
+----------------------------+----------------------------------+------------+
| "s.count(x)" | total number of occurrences of | |
| | *x* in *s* | |
+----------------------------+----------------------------------+------------+
Sequences of the same type also support comparisons. In particular,
tuples and lists are compared lexicographically by comparing
corresponding elements. This means that to compare equal, every
element must compare equal and the two sequences must be of the same
type and have the same length. (For full details see Comparisons in
the language reference.)
Notes:
1. While the "in" and "not in" operations are used only for simple
containment testing in the general case, some specialised sequences
(such as "str", "bytes" and "bytearray") also use them for
subsequence testing:
>>> "gg" in "eggs"
True
2. Values of *n* less than "0" are treated as "0" (which yields an
empty sequence of the same type as *s*). Note that items in the
sequence *s* are not copied; they are referenced multiple times.
This often haunts new Python programmers; consider:
>>> lists = [[]] * 3
>>> lists
[[], [], []]
>>> lists[0].append(3)
>>> lists
[[3], [3], [3]]
What has happened is that "[[]]" is a one-element list containing
an empty list, so all three elements of "[[]] * 3" are references
to this single empty list. Modifying any of the elements of
"lists" modifies this single list. You can create a list of
different lists this way:
>>> lists = [[] for i in range(3)]
>>> lists[0].append(3)
>>> lists[1].append(5)
>>> lists[2].append(7)
>>> lists
[[3], [5], [7]]
Further explanation is available in the FAQ entry How do I create a
multidimensional list?.
3. If *i* or *j* is negative, the index is relative to the end of
sequence *s*: "len(s) + i" or "len(s) + j" is substituted. But
note that "-0" is still "0".
4. The slice of *s* from *i* to *j* is defined as the sequence of
items with index *k* such that "i <= k < j". If *i* or *j* is
greater than "len(s)", use "len(s)". If *i* is omitted or "None",
use "0". If *j* is omitted or "None", use "len(s)". If *i* is
greater than or equal to *j*, the slice is empty.
5. The slice of *s* from *i* to *j* with step *k* is defined as the
sequence of items with index "x = i + n*k" such that "0 <= n <
(j-i)/k". In other words, the indices are "i", "i+k", "i+2*k",
"i+3*k" and so on, stopping when *j* is reached (but never
including *j*). When *k* is positive, *i* and *j* are reduced to
"len(s)" if they are greater. When *k* is negative, *i* and *j* are
reduced to "len(s) - 1" if they are greater. If *i* or *j* are
omitted or "None", they become "end" values (which end depends on
the sign of *k*). Note, *k* cannot be zero. If *k* is "None", it
is treated like "1".
6. Concatenating immutable sequences always results in a new
object. This means that building up a sequence by repeated
concatenation will have a quadratic runtime cost in the total
sequence length. To get a linear runtime cost, you must switch to
one of the alternatives below:
* if concatenating "str" objects, you can build a list and use
"str.join()" at the end or else write to an "io.StringIO"
instance and retrieve its value when complete
* if concatenating "bytes" objects, you can similarly use
"bytes.join()" or "io.BytesIO", or you can do in-place
concatenation with a "bytearray" object. "bytearray" objects are
mutable and have an efficient overallocation mechanism
* if concatenating "tuple" objects, extend a "list" instead
* for other types, investigate the relevant class documentation
7. Some sequence types (such as "range") only support item
sequences that follow specific patterns, and hence don't support
sequence concatenation or repetition.
8. "index" raises "ValueError" when *x* is not found in *s*. When
supported, the additional arguments to the index method allow
efficient searching of subsections of the sequence. Passing the
extra arguments is roughly equivalent to using "s[i:j].index(x)",
only without copying any data and with the returned index being
relative to the start of the sequence rather than the start of the
slice.
Immutable Sequence Types
========================
The only operation that immutable sequence types generally implement
that is not also implemented by mutable sequence types is support for
the "hash()" built-in.
This support allows immutable sequences, such as "tuple" instances, to
be used as "dict" keys and stored in "set" and "frozenset" instances.
Attempting to hash an immutable sequence that contains unhashable
values will result in "TypeError".
Mutable Sequence Types
======================
The operations in the following table are defined on mutable sequence
types. The "collections.abc.MutableSequence" ABC is provided to make
it easier to correctly implement these operations on custom sequence
types.
In the table *s* is an instance of a mutable sequence type, *t* is any
iterable object and *x* is an arbitrary object that meets any type and
value restrictions imposed by *s* (for example, "bytearray" only
accepts integers that meet the value restriction "0 <= x <= 255").
+--------------------------------+----------------------------------+-----------------------+
| Operation | Result | Notes |
+================================+==================================+=======================+
| "s[i] = x" | item *i* of *s* is replaced by | |
| | *x* | |
+--------------------------------+----------------------------------+-----------------------+
| "s[i:j] = t" | slice of *s* from *i* to *j* is | |
| | replaced by the contents of the | |
| | iterable *t* | |
+--------------------------------+----------------------------------+-----------------------+
| "del s[i:j]" | same as "s[i:j] = []" | |
+--------------------------------+----------------------------------+-----------------------+
| "s[i:j:k] = t" | the elements of "s[i:j:k]" are | (1) |
| | replaced by those of *t* | |
+--------------------------------+----------------------------------+-----------------------+
| "del s[i:j:k]" | removes the elements of | |
| | "s[i:j:k]" from the list | |
+--------------------------------+----------------------------------+-----------------------+
| "s.append(x)" | appends *x* to the end of the | |
| | sequence (same as | | |
<gh_stars>1-10
# Copyright 2013-2015 STACKOPS TECHNOLOGIES S.L.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Sample chargeback <-> invoice system integration
# used in https://cirrusflex.com
#
# python create_invoice.py ADMIN_PASSWORD DEBITOOR_TOKEN dd-mm-yyyy
#
import sys
from keystoneclient.v2_0 import client
import logging
import requests
import json
import time
from subprocess import call
from hashlib import sha1
import hmac
import os
logging.basicConfig(filename='create_invoices.log', level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s')
try:
url = os.environ["PUBLIC_KEYSTONE_URL"]
chargeback_url = os.environ["PUBLIC_CHARGEBACK_URL"]
usr = os.environ["ADMIN_USER"]
admin_tenant_name = os.environ["ADMIN_TENANT"]
os_auth_url = os.environ["PUBLIC_KEYSTONE_URL_FOR_SWIFT"]
os_region_name = os.environ["SWIFT_NAME"]
os_username = os.environ["SWIFT_USER"]
os_password = os.environ["SWIFT_PASSWORD"]
os_tenant_id = os.environ["SWIFT_TENANT_NAME"]
os_container = os.environ["SWIFT_CONTAINER_NAME"]
invoice_site_prefix = os.environ["SWIFT_PUBLIC_URL_PREFIX"]
default_country = os.environ["DEFAULT_COUNTRY"]
except KeyError as e:
print "No variable found: {0}".format(e)
sys.exit()
total = len(sys.argv)
cmdargs = str(sys.argv)
pasw = str(sys.argv[1])
debitoor_token = sys.argv[2]
invoice_range = int(time.mktime(time.strptime(sys.argv[3], "%d-%m-%Y")))-86400
invoice_date = time.strftime( "%Y-%m-%d", time.strptime(sys.argv[3], "%d-%m-%Y"))
multiple_zones = False
paymentTermsId = 1
max_debug = -1 # Number of testing custoemrs
default_tax_rate = 21
custom_account_id = None
total = len(sys.argv)
# European countries
EUROPEAN_COUNTRIES = ["FR", "UK", "DE", "IT"]
EUROPEAN_VAT = {'FR': 21, 'UK': 20, 'DE': 19, 'IT': 22}
# HIDDEN_PRODUCTS
HIDDEN_PRODUCTS = [1]
next_account_id = 0
SEED = "ENTER_YOUR_OWN_ENCRYPTION_SEED".encode("utf8")
LAST_CHAR = 24
FIRST_CHAR = 0
ALPHABET = '123456789abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ'
BASE_COUNT = len(ALPHABET)
def encode(secret):
""" Returns num in a base58-encoded string """
key = secret.encode("utf8")
raw = SEED
hashed = hmac.new(key, raw, sha1)
num = int(hashed.hexdigest(), 16)
encode = ''
if (num < 0):
return ''
while (num >= BASE_COUNT):
mod = num % BASE_COUNT
encode = ALPHABET[mod] + encode
num = num / BASE_COUNT
if (num):
encode = ALPHABET[num] + encode
return encode[FIRST_CHAR:LAST_CHAR]
def non_exp_repr(x):
"""Return a floating point representation without exponential notation.
Result is a string that satisfies:
float(result)==float(x) and 'e' not in result.
>>> non_exp_repr(1.234e-025)
'0.00000000000000000000000012339999999999999'
>>> non_exp_repr(-1.234e+018)
'-1234000000000000000.0'
>>> for e in xrange(-50,51):
... for m in (1.234, 0.018, -0.89, -75.59, 100/7.0, -909):
... x = m * 10 ** e
... s = non_exp_repr(x)
... assert 'e' not in s
... assert float(x) == float(s)
"""
s = repr(float(x))
e_loc = s.lower().find('e')
if e_loc == -1:
return s
mantissa = s[:e_loc].replace('.', '')
exp = int(s[e_loc + 1:])
# assert s[1] == '.' or s[0] == '-' and s[2] == '.', "Unsupported format"
sign = ''
if mantissa[0] == '-':
sign = '-'
mantissa = mantissa[1:]
digitsafter = len(mantissa) - 1 # num digits after the decimal point
if exp >= digitsafter:
return sign + mantissa + '0' * (exp - digitsafter) + '.0'
elif exp <= -1:
return sign + '0.' + '0' * (-exp - 1) + mantissa
ip = exp + 1 # insertion point
return sign + mantissa[:ip] + '.' + mantissa[ip:]
def get_auth_token():
logging.info("Init connection: %s" % url)
keystone = client.Client(username=usr, password=<PASSWORD>, tenant_name=admin_tenant_name, auth_url=url)
auth_token = keystone.auth_token
logging.info("Auth token: %s" % auth_token)
return auth_token
def get_billable_accounts(token):
headers = {"X-Auth-Token": "%s" % token, "Content-Type": "application/json"}
r = requests.get("%s/api/account" % chargeback_url, headers=headers)
data = r.json()
logging.info("size=%s" % len(data["accounts"]))
accounts = []
for account in data["accounts"]:
if int(next_account_id)<=int(account["id"]):
if account["status"] == "ACTIVE" or account["status"] == "SUSPENDED":
if int(account["id"]) not in account_black_list:
if custom_account_id is None or int(custom_account_id) == int(account["id"]):
accounts.append(account)
logging.info("Account billable=%s" % account)
return accounts
def get_billable_account_cycle(token, account_id):
headers = {"X-Auth-Token": "%s" % token, "Content-Type": "application/json"}
r = requests.get("%s/api/account/%s/cycle" % (chargeback_url, account_id), headers=headers)
data = r.json()
cycles = data["cycles"]
max_billing_cycle_id = 0
for cycle in cycles:
start = cycle["start"]
end = cycle["end"]
if invoice_range >= start and invoice_range <= end:
if cycle["id"] > max_billing_cycle_id:
max_billing_cycle_id = cycle["id"]
return max_billing_cycle_id
def get_cycle(token, cycle_id):
headers = {"X-Auth-Token": "%s" % token, "Content-Type": "application/json"}
r = requests.get("%s/api/cycle/%s" % (chargeback_url, cycle_id), headers=headers)
data = r.json()
return data
def get_projects(token, cycle_id):
headers = {"X-Auth-Token": "%s" % token, "Content-Type": "application/json"}
r = requests.get("%s/api/cycle/%s/project" % (chargeback_url, cycle_id), headers=headers)
data = r.json()
return data
def get_products(token, project_id):
headers = {"X-Auth-Token": "%s" % token, "Content-Type": "application/json"}
r = requests.get("%s/api/project/%s/product" % (chargeback_url, project_id), headers=headers)
data = r.json()
return data
def update_account_external_id(token, id, external_id):
headers = {"X-Auth-Token": "%s" % token, "Content-Type": "application/json"}
r = requests.get("%s/api/account/%s" % (chargeback_url, id), headers=headers)
response = r.json()
response["account"]["externalId"] = external_id
r = requests.put("%s/api/account/%s" % (chargeback_url, id), headers=headers, data=json.dumps(response))
data = r.json()
return data
def consume_account_invoice(token, id, payment, description, transactionId, invoice_url):
headers = {"X-Auth-Token": "%s" % token, "Content-Type": "application/json"}
payload = {"transaction": {"amount": payment, "description": description, "transactionId": transactionId,
"invoice": invoice_url}}
r = requests.post("%s/api/account/%s/consume" % (chargeback_url, id), headers=headers, data=json.dumps(payload))
response = r.json()
return response
def create_item(product, description, quantity, unitPrice, tax_rate, country):
if country.upper() in EUROPEAN_COUNTRIES:
productOrService = "service"
else:
productOrService = None
item = {"description": description,
"productOrService": productOrService,
"quantity": quantity,
"unitNetPrice": round(float(unitPrice),2),
"unitGrossPrice": round(float(unitPrice),2),
"unitId": None,
"productId": None,
"productName": product,
"taxEnabled": True,
"taxRate": tax_rate,
"incomeTaxDeductionRate": None}
return item
def create_customer_debitdoor(name, address, country, vat, email, phone):
logging.info("Creating customer:%s" % name)
customer = {
"name": name,
"address": address,
"phone": phone,
"email": email,
"ciNumber": vat,
"vatNumber": vat,
"paymentTermsId": paymentTermsId,
"countryCode": country,
}
headers = {"Content-Type": "application/json"}
r = requests.post("https://api.debitoor.com/api/v1.0/customers?autonumber=true&token=%s" % debitoor_token,
headers=headers,
data=json.dumps(customer))
if r.status_code == 200:
logging.info("Created customer:%s" % name)
return r.json()
else:
logging.error("Cannot create customer:%s error: %i" % (name,r.status_code))
r.raise_for_status()
return {"message": "Cannot create customer", "code": r.status_code, "type": "ERROR", "success": False}
def update_customer_debitdoor(externalId, name, address, country, vat, email, phone):
headers = {"Content-Type": "application/json"}
r = requests.get("https://api.debitoor.com/api/v1.0/customers/%s?token=%s" % (externalId, debitoor_token),
headers=headers)
if r.status_code == 200:
customerTmp = r.json()
number = customerTmp["number"]
customer = {
"name": name,
"number": number,
"address": address,
"phone": phone,
"email": email,
"ciNumber": vat,
"vatNumber": vat,
"paymentTermsId": paymentTermsId,
"countryCode": country,
}
headers = {"Content-Type": "application/json"}
r = requests.put("https://api.debitoor.com/api/v1.0/customers/%s?token=%s" % (externalId, debitoor_token),
headers=headers,
data=json.dumps(customer))
logging.info("Updated customer:%s" % name)
return r.json()
else:
logging.error("Cannot update customer:%i" % r.status_code)
return {"message": "Cannot update customer", "code": r.status_code, "type": "ERROR", "success": False}
auth_token = get_auth_token()
billable_accounts = get_billable_accounts(auth_token)
invoice_list = []
for billable_account in billable_accounts:
billable_cycle_id = get_billable_account_cycle(auth_token, billable_account["id"])
account_tax_rate = 21
billable_country="ES"
if billable_account["accountBilling"] is not None:
if billable_account["accountBilling"]["country"]!="ES":
account_tax_rate = 0
try:
account_tax_rate = EUROPEAN_VAT[billable_account["accountBilling"]["country"].upper()]
except:
pass
billable_country=billable_account["accountBilling"]["country"]
if billable_cycle_id > 0:
data = get_cycle(auth_token, billable_cycle_id)
projectsTotal = float(data["cycle"]["projectsTotal"])
projects = get_projects(auth_token, billable_cycle_id)["projects"]
multiple_projects = len(projects) > 1
if len(projects) > 0 and projectsTotal> 0 :
items = []
for project in projects:
tenantName = project["tenant"]["name"]
zoneName = project["tenant"]["zone"]["name"]
products = get_products(auth_token, project["id"])["products"]
for product in products:
productDescription = product["productType"]["description"]
resources = product["resources"]
productBaseFee = product["baseFee"]
zone_and_tenant = ""
zone_and_tenant = "Zona: " + zoneName + "\n"
zone_and_tenant = zone_and_tenant + "Tenant: " + tenantName + "\n"
item = create_item(productDescription + " tarifa mensual",
zone_and_tenant, 1, productBaseFee, account_tax_rate, billable_country)
if productBaseFee > 0:
items.append(item)
for resource in resources:
resourceDescription = resource["resourceType"]["description"]
resourceUnits = str(resource["ammount"])
resourceAccumulatedFee = str(resource["accumulatedFee"])
resourceUnitFee = str(non_exp_repr(resource["unitFee"]))
resourceFixedFee = str(resource["fixedFee"])
resourceFreeTier = str(resource["freeUnitsPerCycle"])
zone_and_tenant = ""
zone_and_tenant = "- Zona: " + zoneName + " "
zone_and_tenant = zone_and_tenant + " Tenant: " + tenantName + "\n"
if product["productType"]["id"] not in HIDDEN_PRODUCTS:
productLine = productDescription + " - " + resourceDescription
detailLine = "- Unidades consumidas: " + resourceUnits + "\n"
if float(resourceFreeTier)>0.0:
detailLine = detailLine + "- Unidades gratuitas: " + resourceFreeTier + "\n"
billable = int(resourceUnits) - int(resourceFreeTier)
if billable<0:
billable=0
detailLine = detailLine + "- Total unidades facturables: " + str(billable) + "\n"
detailLine = detailLine + "- Coste por unidad: " + resourceUnitFee + " EUR\n"
if float(resourceFixedFee) > 0.0:
detailLine = detailLine + "- Coste fijo mensual: " + resourceFixedFee + " EUR\n"
item = create_item(productLine, zone_and_tenant + detailLine, 1, resourceAccumulatedFee, account_tax_rate, billable_country)
items.append(item)
externalId = billable_account["externalId"]
name_ = ""
address_ = ""
country_ = "ES"
taxId_ = ""
if billable_account["accountBilling"] is not None:
name_ = billable_account["accountBilling"]["companyName"]
address_ = billable_account["accountBilling"]["address"] + "\n" + \
billable_account["accountBilling"]["zipCode"] + " - " + \
billable_account["accountBilling"]["city"] + "\n" + \
billable_account["accountBilling"]["state"]
country_ = billable_account["accountBilling"]["country"]
taxId_ = billable_account["accountBilling"]["taxId"]
if len(externalId) == 0:
# Create account
customer = create_customer_debitdoor(name_,
address_,
country_,
taxId_,
billable_account["accountBilling"]["contactEmail"],
billable_account["accountBilling"]["contactPhone"])
externalId = customer["id"]
id = billable_account["id"]
update_account_external_id(auth_token, id, externalId)
| |
help message for usage of the $rquote command
Parameters
==========
channel : discord.Channel
Channel to send the help message to.
"""
embed = discord.Embed(
title='How to Quote!',
color=discord.Color.red()
)
embed.set_author(name=CLIENT.user, icon_url=CLIENT.user.avatar_url)
embed.add_field(name='Adding a quote', inline=True,
value='React to a message with the `:speech_balloon:` emoji (💬)')
embed.add_field(name='Removing a quote', inline=True,
value='React to a message with the `:x:` emoji (❌)')
embed.add_field(name='Picking a quote', inline=False,
value='`$rquote` without mentions for a random quote')
embed.add_field(name='Picking a quote from a user', inline=False,
value='`$rquote @user` to pick a random quote from `user`')
embed.set_footer(text='Run `$rquote help` to display this message again')
await channel.send(embed=embed)
async def rquote(message):
"""Handle a user's request to use the $rquote command
Parameters
==========
message : discord.Message
User message that triggered the command.
"""
log('$rquote request from {}'.format(message.author.name))
# Asking for help will override any tokens
if 'help' in message.content.split():
await rquote_help(message.channel)
return
# Look to see who was tagged, if any
tagged_member = None
mentions = message.mentions
if len(mentions) > 1:
log(' ERROR: more than one user is tagged')
await message.delete()
await message.channel.send(
'You cannot tag more than one user for `$rquote`!')
return
elif len(mentions) == 1:
tagged_member = mentions[0]
# Filter by channel ID, if cross-channel setting is disabled
if ALLOW_XCHAN:
if tagged_member != None:
where = 'author_id = {}'.format(tagged_member.id)
else:
where = None
else:
where = 'channel_id = {}'.format(message.channel.id)
if tagged_member != None:
where += ' AND author_id = {}'.format(tagged_member.id)
# Grab all results that match our criteria
try:
results = db.select(CONN, QUOTES_TABLE, '*', where)
except:
reset_sql_conn()
results = db.select(CONN, QUOTES_TABLE, '*', where)
if len(results) == 0:
log(' No quotes found.')
await message.channel.send(
'No quotes found! Use `$rquote help` for usage information.')
return
# Pick a random quote from the bunch
result = random.choice(results)
# Reroll if the result is in the repeat buffer
# Message ID is Index 2 of the results tuple
# If there is only one result remaining, then pick that anyway
while (len(results) > 1 and result[2] in REPEAT_BUF):
results.remove(result)
result = random.choice(results)
# If the final chosen one isn't in repeat buffer, then add it
if result[2] not in REPEAT_BUF:
add_to_repeat_buf(result[2])
quote = Quote()
await quote.fill_from_entry(result)
log(' Author :{}'.format(quote.author.name))
log(' Channel :#{}'.format(quote.message.channel.name))
log(' Message :{}'.format(quote.message.content))
await repeat_quote(message.channel, quote)
async def remindme_help(channel):
"""Send a help message for usage of the $remindme command
Parameters
==========
channel : discord.Channel
Channel to send the help message to.
"""
embed = discord.Embed(
title='How to use Reminders!',
color=discord.Color.red()
)
embed.set_author(name=CLIENT.user, icon_url=CLIENT.user.avatar_url)
embed.add_field(name='Usage', inline=False,
value='`$remindme <time> <memo>`')
embed.add_field(name='What it does', inline = False,
value='Get a reminder in the channel some time later')
embed.add_field(name='Memo', inline=False,
value='**[Optional]** Memo is the message that will be repeated to you')
embed.add_field(name='Valid time units', inline=False,
value='`weeks`, `days`, `hours`, `minutes`')
embed.add_field(name='Example', inline=False,
value='`$remindme 1 minute A reminder 1 minute from now!`')
embed.add_field(name='Notes', inline=False,
value='Note that if the bot dies, all pending reminders are lost!')
embed.set_footer(text='Run `$remindme help` to display this message again')
await channel.send(embed=embed)
async def remindme_errmsg(message):
"""Send an error message to the channel.
Parameters
==========
message : discord.Message
The calling message.
"""
log(' ERROR: invalid args for {}'.format(message.content))
await message.channel.send(
'Invalid arguments for `$remindme`! Use `$remindme help` for help.')
async def remindme(message):
"""Set and send a reminder for a user.
Parameters
==========
message : discord.Message
The calling message, starting with `$remindme`
"""
log('$remindme request from {}'.format(message.author.name))
# Asking for help will override any tokens
if 'help' in message.content.split():
await remindme_help(message.channel)
return
token_arr = message.content.split()
# Error out if there are no arguments
if len(token_arr) <= 1:
await remindme_errmsg(message)
return
# Parse the time
weeks = 0
days = 0
hours = 0
minutes = 0
memo = ''
was_number = False
set_time = False
# 0th index of this array should be '$remindme'
for i in range(1, len(token_arr)):
# Skip token if it's a number
if token_arr[i].isnumeric():
if was_number:
await remindme_errmsg(message)
return
was_number = True
continue
if token_arr[i] == 'week' or token_arr[i] == 'weeks':
# If there was no number before, then it's invalid
if not was_number:
await remindme_errmsg(message)
return
weeks = int(token_arr[i-1])
was_number = False
set_time = True
elif token_arr[i] == 'day' or token_arr[i] == 'days':
if not was_number:
await remindme_errmsg(message)
return
days = int(token_arr[i-1])
was_number = False
set_time = True
elif token_arr[i] == 'hour' or token_arr[i] == 'hours' or token_arr[i] == 'hr' or token_arr[i] == 'hrs':
if not was_number:
await remindme_errmsg(message)
return
hours = int(token_arr[i-1])
was_number = False
set_time = True
elif token_arr[i] == 'minute' or token_arr[i] == 'minutes' or token_arr[i] == 'min' or token_arr[i] == 'mins':
if not was_number:
await remindme_errmsg(message)
return
minutes = int(token_arr[i-1])
was_number = False
set_time = True
# Otherwise, the rest of the message is the memo
# Note that if there are more time units after the start of the memo,
# they are subsequently ignored.
else:
# Error if no time was set
if not set_time:
await remindme_errmsg(message)
return
memo = ' '.join(token_arr[i:len(token_arr)])
break
# If the memo is empty, then make it '`<none>`'
if len(memo) == 0:
memo = '`<none>`'
# Log the operation
log(' wk|d|h|m: {}|{}|{}|{}'.format(weeks, days, hours, minutes))
log(' Memo: {}'.format(memo))
# Construct the confirmation message
conf = 'Okay! I\'ll remind you in this channel in**'
if weeks > 0:
if weeks > 1:
conf += ' {} weeks'.format(weeks)
else:
conf += ' {} week'.format(weeks)
if days > 0:
if days > 1:
conf += ' {} days'.format(days)
else:
conf += ' {} day'.format(days)
if hours > 0:
if hours > 1:
conf += ' {} hours'.format(hours)
else:
conf += ' {} hour'.format(hours)
if minutes > 0:
if minutes > 1:
conf += ' {} minutes'.format(minutes)
else:
conf += ' {} minute'.format(minutes)
conf += '**.'
await message.channel.send(conf)
curr_time = datetime.datetime.now()
target_time = curr_time + datetime.timedelta(weeks=weeks, days=days, hours=hours, minutes=minutes)
# NOTE: If the bot is killed, all pending reminders are lost...at this point
# I have no obvious solution so consider it a beta
log(' Awaiting time...')
await discord.utils.sleep_until(target_time)
# Send the reminder as an embed
embed = discord.Embed(title='Your reminder!', color=discord.Color.red())
embed.set_author(name=CLIENT.user, icon_url=CLIENT.user.avatar_url)
embed.add_field(name='Requestor', inline=False, value=message.author.mention)
embed.add_field(name='Reminder', inline=False, value=memo)
embed.add_field(name='Jump to message', inline=False,
value='[{}]({})'.format('Click here', message.jump_url))
await message.channel.send(content=message.author.mention, embed=embed)
log(' Reminder sent!')
async def helpcmd(channel):
"""List all of the available commands.
Parameters
==========
channel : discord.Channel
The channel to send the help message to.
"""
cmdlist = ', '.join(BOT_COMMAND_NAMES)
embed = discord.Embed(title='Available commands', color=discord.Color.red(),
description=cmdlist)
embed.add_field(name='Further usage', inline=False,
value='If the command has extra arguments, add `help` after the command')
await channel.send(embed=embed)
################################################################################
# Discord event functions
################################################################################
@CLIENT.event
async def on_ready():
"""Bot routines to run once it's up and ready"""
log('BEEP BEEP. Logged in as <{0.user}>'.format(CLIENT))
await set_rand_status()
@CLIENT.event
async def on_message(message):
"""Bot routines to run whenever a new messge is sent
Basically just check for target keywords.
Parameters
==========
message : discord.Message
The message that was just sent.
"""
# Ignore the message if it's from this bot
if message.author == CLIENT.user:
return
if startswith_word(message.content, '$help'):
await helpcmd(message.channel)
if startswith_word(message.content, '$hello'):
await message.channel.send('ぉぁ~ょ')
if startswith_word(message.content, '$rquote'):
await rquote(message)
if startswith_word(message.content, '$remindme'):
await remindme(message)
# Chance to change the bot status on new message
await roll_rand_status()
@CLIENT.event
async def on_raw_reaction_add(payload):
"""Bot routine to run whenever a reaction is added to any message
We use the raw event handler since we can't rely on the bot's cache (as this
has to work for ANY message, not just the cached ones).
Parameters
==========
payload : discord.RawReactionActionEvent
The payload of the reaction event.
"""
# Need to cast to string, since Discord emoji not really an emoji
emoji = str(payload.emoji)
# Exit early if not reacting with what we want
if emoji not in KEY_REACTS:
return
# Need these for future ops
guild = CLIENT.get_guild(payload.guild_id)
channel = CLIENT.get_channel(payload.channel_id)
# Get message, quoter, and quote author
message = await channel.fetch_message(payload.message_id)
member_saver = payload.member
user_author = message.author
member_author = await guild.fetch_member(user_author.id)
# Construct new quote object
quote = Quote(member_author, member_saver, message)
# Ugh, why doesn't Python have switch statements...?
if (emoji == EMOJI_QUOTE):
await quote.save_to_db()
elif (emoji == | |
next-segment matches..."
.format(key, parentref))
next_translated_path = (
translated_path + YAMLPath.escape_path_section(
key, translated_path.seperator))
next_ancestry = ancestry + [(data, key)]
for node_coord in self._get_nodes_by_traversal(
val, yaml_path, segment_index,
parent=data, parentref=key,
translated_path=next_translated_path,
ancestry=next_ancestry
):
self.logger.debug(
"Yielding filtered indirect Hash value from KEY"
" '{}' at ref '{}':".format(key, parentref),
prefix="Processor::_get_nodes_by_traversal: ",
data=node_coord.node)
yield node_coord
elif isinstance(data, list):
for idx, ele in enumerate(data):
self.logger.debug(
"Processor::_get_nodes_by_traversal: Recursing into"
" INDEX '{}' at ref '{}' for next-segment matches..."
.format(idx, parentref))
next_translated_path = translated_path + "[{}]".format(idx)
next_ancestry = ancestry + [(data, idx)]
for node_coord in self._get_nodes_by_traversal(
ele, yaml_path, segment_index,
parent=data, parentref=idx,
translated_path=next_translated_path,
ancestry=next_ancestry
):
self.logger.debug(
"Yielding filtered indirect Array value from INDEX"
" {} at {}:".format(idx, parentref),
prefix="Processor::_get_nodes_by_traversal: ",
data=node_coord)
yield node_coord
def _get_required_nodes(
self, data: Any, yaml_path: YAMLPath, depth: int = 0, **kwargs: Any
) -> Generator[NodeCoords, None, None]:
"""
Generate pre-existing NodeCoords from YAML data matching a YAML Path.
Parameters:
1. data (Any) The parsed YAML data to process
2. yaml_path (YAMLPath) The pre-parsed YAML Path to follow
3. depth (int) Index within yaml_path to process; default=0
4. parent (ruamel.yaml node) The parent node from which this query
originates
5. parentref (Any) Key or Index of data within parent
Keyword Arguments:
* parent (ruamel.yaml node) The parent node from which this query
originates
* parentref (Any) The Index or Key of data within parent
* relay_segment (PathSegment) YAML Path segment presently under
evaluation
* translated_path (YAMLPath) YAML Path indicating precisely which node
is being evaluated
* ancestry (List[AncestryEntry]) Stack of ancestors preceding the
present node under evaluation
Returns: (Generator[NodeCoords, None, None]) The requested NodeCoords
as they are matched
Raises: N/A
"""
parent: Any = kwargs.pop("parent", None)
parentref: Any = kwargs.pop("parentref", None)
relay_segment: PathSegment = kwargs.pop("relay_segment", None)
translated_path: YAMLPath = kwargs.pop("translated_path", YAMLPath(""))
ancestry: List[AncestryEntry] = kwargs.pop("ancestry", [])
segments = yaml_path.escaped
if segments and len(segments) > depth:
pathseg: PathSegment = yaml_path.unescaped[depth]
(segment_type, unstripped_attrs) = pathseg
except_segment = str(unstripped_attrs)
self.logger.debug(
"Seeking segment <{}>{} in data of type {}:"
.format(segment_type, except_segment, type(data)),
prefix="Processor::_get_required_nodes: ",
data=data, footer=" ")
for segment_node_coords in self._get_nodes_by_path_segment(
data, yaml_path, depth, parent=parent, parentref=parentref,
translated_path=translated_path, ancestry=ancestry
):
self.logger.debug(
"Got data of type {} at <{}>{} in the data."
.format(
type(segment_node_coords.node
if hasattr(segment_node_coords, "node")
else segment_node_coords),
segment_type,
except_segment),
prefix="Processor::_get_required_nodes: ",
data=segment_node_coords)
if isinstance(segment_node_coords, list):
# Most likely the output of a Collector, this list will be
# of NodeCoords rather than an actual DOM reference. As
# such, it must be treated as a virtual DOM element that
# cannot itself be parented to the real DOM, though each
# of its elements has a real parent.
self.logger.debug(
"Got a list:",
prefix="Processor::_get_required_nodes: ",
data=segment_node_coords)
for subnode_coord in self._get_required_nodes(
segment_node_coords, yaml_path, depth + 1,
parent=parent, parentref=parentref,
translated_path=translated_path,
ancestry=ancestry, relay_segment=pathseg):
yield subnode_coord
else:
self.logger.debug(
"Recursing into the retrieved data...",
prefix="Processor::_get_required_nodes: ")
for subnode_coord in self._get_required_nodes(
segment_node_coords.node, yaml_path, depth + 1,
parent=segment_node_coords.parent,
parentref=segment_node_coords.parentref,
translated_path=segment_node_coords.path,
ancestry=segment_node_coords.ancestry,
relay_segment=pathseg):
self.logger.debug(
"Finally returning segment data of type {} at"
" parentref {}:"
.format(type(subnode_coord.node),
subnode_coord.parentref),
prefix="Processor::_get_required_nodes: ",
data=subnode_coord, footer=" ")
yield subnode_coord
else:
self.logger.debug(
"Finally returning data of type {} at parentref {}:"
.format(type(data), parentref),
prefix="Processor::_get_required_nodes: ",
data=data, footer=" ")
yield NodeCoords(
data, parent, parentref, translated_path, ancestry,
relay_segment)
# pylint: disable=locally-disabled,too-many-statements
def _get_optional_nodes(
self, data: Any, yaml_path: YAMLPath, value: Any = None,
depth: int = 0, **kwargs: Any
) -> Generator[NodeCoords, None, None]:
"""
Return zero or more pre-existing NodeCoords matching a YAML Path.
Will create nodes that are missing, as long as any missing segments are
deterministic (SEARCH and COLLECTOR segments are non-deterministic).
Parameters:
1. data (Any) The parsed YAML data to process
2. yaml_path (YAMLPath) The pre-parsed YAML Path to follow
3. value (Any) The value to assign to the element
4. depth (int) For recursion, this identifies which segment of
yaml_path to evaluate; default=0
Keyword Arguments:
* parent (ruamel.yaml node) The parent node from which this query
originates
* parentref (Any) The Index or Key of data within parent
* relay_segment (PathSegment) YAML Path segment presently under
evaluation
* translated_path (YAMLPath) YAML Path indicating precisely which node
is being evaluated
* ancestry (List[AncestryEntry]) Stack of ancestors preceding the
present node under evaluation
Returns: (Generator[NodeCoords, None, None]) The requested NodeCoords
as they are matched
Raises:
- `YAMLPathException` when the YAML Path is invalid.
- `NotImplementedError` when a segment of the YAML Path indicates
an element that does not exist in data and this code isn't
yet prepared to add it.
"""
parent: Any = kwargs.pop("parent", None)
parentref: Any = kwargs.pop("parentref", None)
relay_segment: PathSegment = kwargs.pop("relay_segment", None)
translated_path: YAMLPath = kwargs.pop("translated_path", YAMLPath(""))
ancestry: List[AncestryEntry] = kwargs.pop("ancestry", [])
segments = yaml_path.escaped
# pylint: disable=locally-disabled,too-many-nested-blocks
if segments and len(segments) > depth:
pathseg: PathSegment = yaml_path.unescaped[depth]
(segment_type, unstripped_attrs) = pathseg
stripped_attrs: PathAttributes = segments[depth][1]
except_segment = str(unstripped_attrs)
self.logger.debug(
"Seeking element <{}>{} in data of type {}:"
.format(segment_type, except_segment, type(data)),
prefix="Processor::_get_optional_nodes: ",
data=data, footer=" ")
# The next element may not exist; this method ensures that it does
matched_nodes = 0
for next_coord in self._get_nodes_by_path_segment(
data, yaml_path, depth, parent=parent, parentref=parentref,
translated_path=translated_path, ancestry=ancestry
):
matched_nodes += 1
if isinstance(next_coord, list):
# Drill into Collector results
for node_coord in self._get_optional_nodes(
next_coord, yaml_path, value, depth + 1,
parent=parent, parentref=parentref,
translated_path=translated_path,
ancestry=ancestry,
relay_segment=pathseg
):
self.logger.debug((
"Relaying a drilled-into Collector node:"),
prefix="Processor::_get_optional_nodes: ",
data={
"node": node_coord,
"parent": parent,
"parentref": parentref
}
)
yield node_coord
continue
if next_coord.node is None:
self.logger.debug((
"Relaying a None element <{}>{} from the data."
).format(segment_type, except_segment),
prefix="Processor::_get_optional_nodes: ",
data=next_coord
)
yield next_coord
continue
self.logger.debug((
"Found element <{}>{} in the data; recursing into it..."
).format(segment_type, except_segment),
prefix="Processor::_get_optional_nodes: ",
data=next_coord
)
for node_coord in self._get_optional_nodes(
next_coord.node, yaml_path, value, depth + 1,
parent=next_coord.parent,
parentref=next_coord.parentref,
translated_path=next_coord.path,
ancestry=next_coord.ancestry,
relay_segment=pathseg
):
yield node_coord
if (
matched_nodes < 1
and segment_type is not PathSegmentTypes.SEARCH
and segment_type is not PathSegmentTypes.KEYWORD_SEARCH
):
# Add the missing element
self.logger.debug(
("Processor::_get_optional_nodes: Element <{}>{} is"
" unknown in the data! Applying default, <{}>{} to"
" data:"
).format(segment_type, except_segment, type(value), value),
data=data
)
if isinstance(data, list):
self.logger.debug(
"Processor::_get_optional_nodes: Dealing with a list"
)
if (
segment_type is PathSegmentTypes.ANCHOR
and isinstance(stripped_attrs, str)
):
next_node = Nodes.build_next_node(
yaml_path, depth + 1, value
)
new_ele = Nodes.append_list_element(
data, next_node, stripped_attrs
)
new_idx = len(data) - 1
next_translated_path = translated_path + "[{}]".format(
new_idx)
next_ancestry = ancestry + [(data, new_idx)]
for node_coord in self._get_optional_nodes(
new_ele, yaml_path, value, depth + 1,
parent=data, parentref=new_idx,
translated_path=next_translated_path,
ancestry=next_ancestry, relay_segment=pathseg
):
matched_nodes += 1
yield node_coord
elif (
segment_type in [
PathSegmentTypes.INDEX,
PathSegmentTypes.KEY]
):
if isinstance(stripped_attrs, int):
newidx = stripped_attrs
else:
try:
newidx = int(str(stripped_attrs))
except ValueError as wrap_ex:
raise YAMLPathException(
("Cannot add non-integer {} subreference"
+ " to lists")
.format(str(segment_type)),
str(yaml_path),
except_segment
) from wrap_ex
for _ in range(len(data) - 1, newidx):
next_node = Nodes.build_next_node(
yaml_path, depth + 1, value
)
Nodes.append_list_element(data, next_node)
next_translated_path = translated_path + "[{}]".format(
newidx)
next_ancestry = ancestry + [(data, newidx)]
for node_coord in self._get_optional_nodes(
data[newidx], yaml_path, value,
depth + 1, parent=data, parentref=newidx,
translated_path=next_translated_path,
ancestry=next_ancestry, relay_segment=pathseg
):
matched_nodes += 1
yield node_coord
else:
raise YAMLPathException(
"Cannot add {} subreference to lists"
.format(str(segment_type)),
str(yaml_path),
except_segment
)
elif isinstance(data, dict):
self.logger.debug(
"Processor::_get_optional_nodes: Dealing with a"
+ " dictionary"
)
if segment_type is PathSegmentTypes.ANCHOR:
raise YAMLPathException(
"Cannot add ANCHOR keys",
str(yaml_path),
str(unstripped_attrs)
)
if segment_type is PathSegmentTypes.KEY:
data[stripped_attrs] = Nodes.build_next_node(
yaml_path, depth + 1, value
)
next_translated_path = (
translated_path + YAMLPath.escape_path_section(
str(stripped_attrs),
translated_path.seperator))
next_ancestry = ancestry + [(data, stripped_attrs)]
for node_coord in self._get_optional_nodes(
data[stripped_attrs], yaml_path, value,
depth + 1, parent=data,
parentref=stripped_attrs,
translated_path=next_translated_path,
ancestry=next_ancestry, relay_segment=pathseg
):
matched_nodes += 1
yield node_coord
else:
raise YAMLPathException(
"Cannot add {} subreference to dictionaries"
.format(str(segment_type)),
str(yaml_path),
except_segment
)
elif isinstance(data, (CommentedSet, set)):
self.logger.debug(
"Processor::_get_optional_nodes: Dealing with a set"
)
if segment_type is not PathSegmentTypes.KEY:
raise YAMLPathException(
"Cannot add {} subreference to sets"
.format(str(segment_type)),
str(yaml_path),
except_segment
)
data.add(stripped_attrs)
yield NodeCoords(
data, parent, parentref,
translated_path, ancestry,
relay_segment)
else:
self.logger.debug(
"Assuming data is scalar and cannot receive a {}"
" subreference at {} ({}/{}):".format(
str(segment_type), str(yaml_path), str(depth + 1),
str(len(yaml_path))),
| |
<gh_stars>10-100
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['Service']
class Service(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
hosting_mode: Optional[pulumi.Input['HostingMode']] = None,
identity: Optional[pulumi.Input[pulumi.InputType['IdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
partition_count: Optional[pulumi.Input[int]] = None,
replica_count: Optional[pulumi.Input[int]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
search_service_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Describes an Azure Cognitive Search service and its current state.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input['HostingMode'] hosting_mode: Applicable only for the standard3 SKU. You can set this property to enable up to 3 high density partitions that allow up to 1000 indexes, which is much higher than the maximum indexes allowed for any other SKU. For the standard3 SKU, the value is either 'default' or 'highDensity'. For all other SKUs, this value must be 'default'.
:param pulumi.Input[pulumi.InputType['IdentityArgs']] identity: The identity of the resource.
:param pulumi.Input[str] location: The geographic location of the resource. This must be one of the supported and registered Azure Geo Regions (for example, West US, East US, Southeast Asia, and so forth). This property is required when creating a new resource.
:param pulumi.Input[int] partition_count: The number of partitions in the Search service; if specified, it can be 1, 2, 3, 4, 6, or 12. Values greater than 1 are only valid for standard SKUs. For 'standard3' services with hostingMode set to 'highDensity', the allowed values are between 1 and 3.
:param pulumi.Input[int] replica_count: The number of replicas in the Search service. If specified, it must be a value between 1 and 12 inclusive for standard SKUs or between 1 and 3 inclusive for basic SKU.
:param pulumi.Input[str] resource_group_name: The name of the resource group within the current subscription. You can obtain this value from the Azure Resource Manager API or the portal.
:param pulumi.Input[str] search_service_name: The name of the Azure Cognitive Search service to create or update. Search service names must only contain lowercase letters, digits or dashes, cannot use dash as the first two or last one characters, cannot contain consecutive dashes, and must be between 2 and 60 characters in length. Search service names must be globally unique since they are part of the service URI (https://<name>.search.windows.net). You cannot change the service name after the service is created.
:param pulumi.Input[pulumi.InputType['SkuArgs']] sku: The SKU of the Search Service, which determines price tier and capacity limits. This property is required when creating a new Search Service.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tags to help categorize the resource in the Azure portal.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if hosting_mode is None:
hosting_mode = 'default'
__props__['hosting_mode'] = hosting_mode
__props__['identity'] = identity
__props__['location'] = location
if partition_count is None:
partition_count = 1
__props__['partition_count'] = partition_count
if replica_count is None:
replica_count = 1
__props__['replica_count'] = replica_count
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['search_service_name'] = search_service_name
__props__['sku'] = sku
__props__['tags'] = tags
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['status'] = None
__props__['status_details'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:search:Service"), pulumi.Alias(type_="azure-nextgen:search/latest:Service"), pulumi.Alias(type_="azure-nextgen:search/v20191001preview:Service"), pulumi.Alias(type_="azure-nextgen:search/v20200313:Service"), pulumi.Alias(type_="azure-nextgen:search/v20200801:Service"), pulumi.Alias(type_="azure-nextgen:search/v20200801preview:Service")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Service, __self__).__init__(
'azure-nextgen:search/v20150819:Service',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Service':
"""
Get an existing Service resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return Service(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="hostingMode")
def hosting_mode(self) -> pulumi.Output[Optional[str]]:
"""
Applicable only for the standard3 SKU. You can set this property to enable up to 3 high density partitions that allow up to 1000 indexes, which is much higher than the maximum indexes allowed for any other SKU. For the standard3 SKU, the value is either 'default' or 'highDensity'. For all other SKUs, this value must be 'default'.
"""
return pulumi.get(self, "hosting_mode")
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.IdentityResponse']]:
"""
The identity of the resource.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
The geographic location of the resource. This must be one of the supported and registered Azure Geo Regions (for example, West US, East US, Southeast Asia, and so forth). This property is required when creating a new resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="partitionCount")
def partition_count(self) -> pulumi.Output[Optional[int]]:
"""
The number of partitions in the Search service; if specified, it can be 1, 2, 3, 4, 6, or 12. Values greater than 1 are only valid for standard SKUs. For 'standard3' services with hostingMode set to 'highDensity', the allowed values are between 1 and 3.
"""
return pulumi.get(self, "partition_count")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The state of the last provisioning operation performed on the Search service. Provisioning is an intermediate state that occurs while service capacity is being established. After capacity is set up, provisioningState changes to either 'succeeded' or 'failed'. Client applications can poll provisioning status (the recommended polling interval is from 30 seconds to one minute) by using the Get Search Service operation to see when an operation is completed. If you are using the free service, this value tends to come back as 'succeeded' directly in the call to Create Search service. This is because the free service uses capacity that is already set up.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="replicaCount")
def replica_count(self) -> pulumi.Output[Optional[int]]:
"""
The number of replicas in the Search service. If specified, it must be a value between 1 and 12 inclusive for standard SKUs or between 1 and 3 inclusive for basic SKU.
"""
return pulumi.get(self, "replica_count")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.SkuResponse']]:
"""
The SKU of the Search Service, which determines price tier and capacity limits. This property is required when creating a new Search Service.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
The status of the Search service. Possible values include: 'running': The Search service is running and no provisioning operations are underway. 'provisioning': The Search service is being provisioned or scaled up or down. 'deleting': The Search service is being deleted. 'degraded': The Search service is degraded. This can occur when the underlying search units are not healthy. The Search service is most likely operational, but performance might be slow and some requests might be dropped. 'disabled': The Search service is disabled. In this state, the service will reject all API requests. 'error': The Search service is in an error state. If your service is in the degraded, disabled, or error states, it means the Azure Cognitive Search team is actively investigating the underlying issue. Dedicated services in these states are still chargeable based on the number of search units provisioned.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="statusDetails")
def | |
# self.ut_self.assertEqual(response.status_code, 400)
#
# def api_user_layer_update_error_401_unauthorized(self, feature_json):
# response = self.session.put(self.URL + '/api/user_layer',
# data=dumps(feature_json), headers=self.headers)
#
# self.ut_self.assertEqual(response.status_code, 401)
# user layer errors - delete
def api_user_layer_delete_error_400_bad_request(self, **arguments):
arguments = get_url_arguments(**arguments)
response = self.session.delete(self.URL + '/api/user_layer/{0}'.format(arguments),
headers=self.headers)
self.ut_self.assertEqual(response.status_code, 400)
def api_user_layer_delete_error_401_unauthorized(self, **arguments):
arguments = get_url_arguments(**arguments)
response = self.session.delete(self.URL + '/api/user_layer/{0}'.format(arguments),
headers=self.headers)
self.ut_self.assertEqual(response.status_code, 401)
def api_user_layer_delete_error_403_forbidden(self, **arguments):
arguments = get_url_arguments(**arguments)
response = self.session.delete(self.URL + '/api/user_layer/{0}'.format(arguments),
headers=self.headers)
self.ut_self.assertEqual(response.status_code, 403)
def api_user_layer_delete_error_404_not_found(self, **arguments):
arguments = get_url_arguments(**arguments)
response = self.session.delete(self.URL + '/api/user_layer/{0}'.format(arguments),
headers=self.headers)
self.ut_self.assertEqual(response.status_code, 404)
##################################################
# REFERENCE
##################################################
def api_reference(self, expected=None, expected_at_least=None, **arguments):
arguments = get_url_arguments(**arguments)
response = self.session.get(self.URL + '/api/reference/{0}'.format(arguments))
self.ut_self.assertEqual(response.status_code, 200)
resulted = loads(response.text) # convert string to dict/JSON
if expected is not None:
self.ut_self.assertEqual(expected, resulted)
elif expected_at_least is not None:
self.compare_expected_at_least_with_resulted(expected_at_least, resulted)
def api_reference_create(self, resource_json, **arguments):
arguments = get_url_arguments(**arguments)
response = self.session.post(self.URL + '/api/reference/create/{0}'.format(arguments),
data=dumps(resource_json), headers=self.headers)
self.ut_self.assertEqual(response.status_code, 200)
resulted = loads(response.text) # convert string to dict/JSON
self.ut_self.assertIn("reference_id", resulted)
self.ut_self.assertNotEqual(resulted["reference_id"], -1)
# put the id received in the original JSON
resource_json["properties"]["reference_id"] = resulted["reference_id"]
return resource_json
def api_reference_update(self, resource_json):
response = self.session.put(self.URL + '/api/reference/',
data=dumps(resource_json), headers=self.headers)
self.ut_self.assertEqual(response.status_code, 200)
def api_reference_delete(self, feature_id):
response = self.session.delete(self.URL + '/api/reference/{0}'.format(feature_id),
headers=self.headers)
self.ut_self.assertEqual(response.status_code, 200)
# reference errors - get
def api_reference_error_400_bad_request(self, **arguments):
arguments = get_url_arguments(**arguments)
response = self.session.get(self.URL + '/api/reference/{0}'.format(arguments))
self.ut_self.assertEqual(response.status_code, 400)
# reference errors - create
def api_reference_create_error_400_bad_request(self, resource_json):
response = self.session.post(self.URL + '/api/reference/create/',
data=dumps(resource_json), headers=self.headers)
self.ut_self.assertEqual(response.status_code, 400)
def api_reference_create_error_401_unauthorized(self, feature_json):
response = self.session.post(self.URL + '/api/reference/create/',
data=dumps(feature_json), headers=self.headers)
self.ut_self.assertEqual(response.status_code, 401)
# reference errors - update
def api_reference_update_error_400_bad_request(self, resource_json):
response = self.session.put(self.URL + '/api/reference',
data=dumps(resource_json), headers=self.headers)
self.ut_self.assertEqual(response.status_code, 400)
def api_reference_update_error_401_unauthorized(self, resource_json):
response = self.session.put(self.URL + '/api/reference',
data=dumps(resource_json), headers=self.headers)
self.ut_self.assertEqual(response.status_code, 401)
def api_reference_update_error_403_forbidden(self, resource_json):
response = self.session.put(self.URL + '/api/reference',
data=dumps(resource_json), headers=self.headers)
self.ut_self.assertEqual(response.status_code, 403)
def api_reference_update_error_404_not_found(self, resource_json):
response = self.session.put(self.URL + '/api/reference',
data=dumps(resource_json), headers=self.headers)
self.ut_self.assertEqual(response.status_code, 404)
# reference errors - delete
def api_reference_delete_error_400_bad_request(self, feature_id):
response = self.session.delete(self.URL + '/api/reference/{0}'.format(feature_id),
headers=self.headers)
self.ut_self.assertEqual(response.status_code, 400)
def api_reference_delete_error_401_unauthorized(self, feature_id):
response = self.session.delete(self.URL + '/api/reference/{0}'.format(feature_id),
headers=self.headers)
self.ut_self.assertEqual(response.status_code, 401)
def api_reference_delete_error_403_forbidden(self, feature_id):
response = self.session.delete(self.URL + '/api/reference/{0}'.format(feature_id),
headers=self.headers)
self.ut_self.assertEqual(response.status_code, 403)
def api_reference_delete_error_404_not_found(self, feature_id):
response = self.session.delete(self.URL + '/api/reference/{0}'.format(feature_id),
headers=self.headers)
self.ut_self.assertEqual(response.status_code, 404)
##################################################
# KEYWORD
##################################################
def api_keyword(self, expected=None, expected_at_least=None, **arguments):
arguments = get_url_arguments(**arguments)
response = self.session.get(self.URL + '/api/keyword/{0}'.format(arguments))
self.ut_self.assertEqual(response.status_code, 200)
resulted = loads(response.text) # convert string to dict/JSON
if expected is not None:
self.ut_self.assertEqual(expected, resulted)
elif expected_at_least is not None:
self.compare_expected_at_least_with_resulted(expected_at_least, resulted)
def api_keyword_create(self, resource_json, **arguments):
arguments = get_url_arguments(**arguments)
response = self.session.post(self.URL + '/api/keyword/create/{0}'.format(arguments),
data=dumps(resource_json), headers=self.headers)
self.ut_self.assertEqual(response.status_code, 200)
resulted = loads(response.text) # convert string to dict/JSON
self.ut_self.assertIn("keyword_id", resulted)
self.ut_self.assertNotEqual(resulted["keyword_id"], -1)
# put the id received in the original JSON
resource_json["properties"]["keyword_id"] = resulted["keyword_id"]
return resource_json
def api_keyword_update(self, resource_json):
response = self.session.put(self.URL + '/api/keyword/',
data=dumps(resource_json), headers=self.headers)
self.ut_self.assertEqual(response.status_code, 200)
def api_keyword_delete(self, feature_id):
response = self.session.delete(self.URL + '/api/keyword/{0}'.format(feature_id),
headers=self.headers)
self.ut_self.assertEqual(response.status_code, 200)
# keyword errors - get
def api_keyword_error_400_bad_request(self, **arguments):
arguments = get_url_arguments(**arguments)
response = self.session.get(self.URL + '/api/keyword/{0}'.format(arguments))
self.ut_self.assertEqual(response.status_code, 400)
# keyword errors - create
def api_keyword_create_error_400_bad_request(self, resource_json):
response = self.session.post(self.URL + '/api/keyword/create/',
data=dumps(resource_json), headers=self.headers)
self.ut_self.assertEqual(response.status_code, 400)
def api_keyword_create_error_401_unauthorized(self, feature_json):
response = self.session.post(self.URL + '/api/keyword/create/',
data=dumps(feature_json), headers=self.headers)
self.ut_self.assertEqual(response.status_code, 401)
# keyword errors - update
def api_keyword_update_error_400_bad_request(self, resource_json):
response = self.session.put(self.URL + '/api/keyword',
data=dumps(resource_json), headers=self.headers)
self.ut_self.assertEqual(response.status_code, 400)
def api_keyword_update_error_401_unauthorized(self, feature_json):
response = self.session.put(self.URL + '/api/keyword',
data=dumps(feature_json), headers=self.headers)
self.ut_self.assertEqual(response.status_code, 401)
def api_keyword_update_error_403_forbidden(self, feature_json):
response = self.session.put(self.URL + '/api/keyword',
data=dumps(feature_json), headers=self.headers)
self.ut_self.assertEqual(response.status_code, 403)
def api_keyword_update_error_404_not_found(self, feature_json):
response = self.session.put(self.URL + '/api/keyword',
data=dumps(feature_json), headers=self.headers)
self.ut_self.assertEqual(response.status_code, 404)
# keyword errors - delete
def api_keyword_delete_error_400_bad_request(self, feature_id):
response = self.session.delete(self.URL + '/api/keyword/{0}'.format(feature_id),
headers=self.headers)
self.ut_self.assertEqual(response.status_code, 400)
def api_keyword_delete_error_401_unauthorized(self, feature_id):
response = self.session.delete(self.URL + '/api/keyword/{0}'.format(feature_id),
headers=self.headers)
self.ut_self.assertEqual(response.status_code, 401)
def api_keyword_delete_error_403_forbidden(self, feature_id):
response = self.session.delete(self.URL + '/api/keyword/{0}'.format(feature_id),
headers=self.headers)
self.ut_self.assertEqual(response.status_code, 403)
def api_keyword_delete_error_404_not_found(self, feature_id):
response = self.session.delete(self.URL + '/api/keyword/{0}'.format(feature_id),
headers=self.headers)
self.ut_self.assertEqual(response.status_code, 404)
##################################################
# CHANGESET
##################################################
def api_changeset(self, expected, **arguments):
arguments = get_url_arguments(**arguments)
response = self.session.get(self.URL + '/api/changeset/{0}'.format(arguments))
self.ut_self.assertEqual(response.status_code, 200)
resulted = loads(response.text) # convert string to dict/JSON
self.ut_self.assertEqual(expected, resulted)
# if expected_at_least is not None:
# """
# Test Case: Changesets can not be removed, because of this, the result of the returned
# changesets may be larger than expected (because there are other tests that create
# changesets). Because of this I pass a subset of minimum changesets that have to exist.
# """
#
# """ Explanation: Generator creating booleans by looping through list
# 'expected_at_least["features"]', checking if that item is in list 'resulted["features"]'.
# all() returns True if every item is truthy, else False.
# https://stackoverflow.com/questions/16579085/python-verifying-if-one-list-is-a-subset-of-the-other
# """
# __set__ = resulted["features"] # set returned
# __subset__ = expected_at_least["features"] # subset expected
#
# # check if the elements of a subset is in a set, if OK, return True, else False
# resulted_bool = all(element in __set__ for element in __subset__)
#
# self.ut_self.assertTrue(resulted_bool)
def api_changeset_create(self, resource_json):
# do a GET call, sending a changeset to add in DB
response = self.session.post(self.URL + '/api/changeset/create',
data=dumps(resource_json), headers=self.headers)
id = int(response.text)
self.ut_self.assertEqual(response.status_code, 200)
self.ut_self.assertGreater(id, 0)
return id
def api_changeset_close(self, resource_json):
response = self.session.post(self.URL + '/api/changeset/close',
data=dumps(resource_json), headers=self.headers)
self.ut_self.assertEqual(response.status_code, 200)
def api_changeset_delete(self, **arguments):
arguments = get_url_arguments(**arguments)
response = self.session.delete(self.URL + '/api/changeset/{0}'.format(arguments),
headers=self.headers)
self.ut_self.assertEqual(response.status_code, 200)
# changeset errors - get
def api_changeset_error_400_bad_request(self, **arguments):
arguments = get_url_arguments(**arguments)
response = self.session.get(self.URL + '/api/changeset/{0}'.format(arguments))
self.ut_self.assertEqual(response.status_code, 400)
# changeset errors - create
def api_changeset_create_error_400_bad_request(self, feature_json):
response = self.session.post(self.URL + '/api/changeset/create/',
data=dumps(feature_json), headers=self.headers)
self.ut_self.assertEqual(response.status_code, 400)
def api_changeset_create_error_401_unauthorized(self, feature_json):
response = self.session.post(self.URL + '/api/changeset/create/',
data=dumps(feature_json), headers=self.headers)
self.ut_self.assertEqual(response.status_code, 401)
# changeset errors - close
def api_changeset_close_error_400_bad_request(self, resource_json):
response = self.session.post(self.URL + '/api/changeset/close',
data=dumps(resource_json), headers=self.headers)
self.ut_self.assertEqual(response.status_code, 400)
def api_changeset_close_error_401_unauthorized(self, resource_json):
response = self.session.post(self.URL + '/api/changeset/close',
data=dumps(resource_json), headers=self.headers)
self.ut_self.assertEqual(response.status_code, 401)
def api_changeset_close_error_404_not_found(self, resource_json):
response = self.session.post(self.URL + '/api/changeset/close',
data=dumps(resource_json), headers=self.headers)
self.ut_self.assertEqual(response.status_code, 404)
def api_changeset_close_error_409_conflict(self, resource_json):
response = self.session.post(self.URL + '/api/changeset/close',
data=dumps(resource_json), headers=self.headers)
self.ut_self.assertEqual(response.status_code, 409)
# changeset errors - delete
def api_changeset_delete_error_400_bad_request(self, **arguments):
arguments = get_url_arguments(**arguments)
response = self.session.delete(self.URL + '/api/changeset/{0}'.format(arguments),
headers=self.headers)
self.ut_self.assertEqual(response.status_code, 400)
def api_changeset_delete_error_401_unauthorized(self, **arguments):
arguments = get_url_arguments(**arguments)
response = self.session.delete(self.URL + '/api/changeset/{0}'.format(arguments),
headers=self.headers)
self.ut_self.assertEqual(response.status_code, 401)
def api_changeset_delete_error_403_forbidden(self, **arguments):
arguments = get_url_arguments(**arguments)
response = self.session.delete(self.URL + '/api/changeset/{0}'.format(arguments),
headers=self.headers)
self.ut_self.assertEqual(response.status_code, 403)
def api_changeset_delete_error_404_not_found(self, **arguments):
arguments = get_url_arguments(**arguments)
response = self.session.delete(self.URL + '/api/changeset/{0}'.format(arguments),
headers=self.headers)
self.ut_self.assertEqual(response.status_code, 404)
##################################################
# NOTIFICATION
##################################################
def api_notification(self, expected=None, expected_at_least=None, **arguments):
arguments = get_url_arguments(**arguments)
response = self.session.get(self.URL + '/api/notification/{0}'.format(arguments))
self.ut_self.assertEqual(response.status_code, 200)
resulted = loads(response.text) # convert string to dict/JSON
if expected is not None:
self.ut_self.assertEqual(expected, resulted)
elif expected_at_least is not None:
self.compare_expected_at_least_with_resulted(expected_at_least, resulted)
def api_notification_create(self, resource_json):
response = self.session.post(self.URL + '/api/notification/create',
data=dumps(resource_json), headers=self.headers)
self.ut_self.assertEqual(response.status_code, 200)
resulted = loads(response.text) # convert string to dict/JSON
self.ut_self.assertIn("notification_id", resulted)
self.ut_self.assertNotEqual(resulted["notification_id"], -1)
# put the id received in the original JSON
resource_json["properties"]["notification_id"] = resulted["notification_id"]
return resource_json
def api_notification_update(self, resource_json):
response = self.session.put(self.URL + '/api/notification/',
data=dumps(resource_json), headers=self.headers)
self.ut_self.assertEqual(response.status_code, 200)
def api_notification_delete(self, **arguments):
arguments = get_url_arguments(**arguments)
response = self.session.delete(self.URL + '/api/notification/{0}'.format(arguments),
headers=self.headers)
self.ut_self.assertEqual(response.status_code, 200)
# notification errors - get
def api_notification_error_400_bad_request(self, **arguments):
arguments = get_url_arguments(**arguments)
response = self.session.get(self.URL + '/api/notification/{0}'.format(arguments))
self.ut_self.assertEqual(response.status_code, 400)
# notification errors - create
def api_notification_create_error_400_bad_request(self, resource_json):
response = self.session.post(self.URL + '/api/notification/create',
data=dumps(resource_json), headers=self.headers)
self.ut_self.assertEqual(response.status_code, 400)
def api_notification_create_error_401_unauthorized(self, feature_json):
response = self.session.post(self.URL + '/api/notification/create',
data=dumps(feature_json), headers=self.headers)
self.ut_self.assertEqual(response.status_code, 401)
# notification errors - update
def api_notification_update_error_400_bad_request(self, resource_json):
response = self.session.put(self.URL + '/api/notification',
data=dumps(resource_json), headers=self.headers)
self.ut_self.assertEqual(response.status_code, 400)
def api_notification_update_error_401_unauthorized(self, feature_json):
response = self.session.put(self.URL + '/api/notification',
data=dumps(feature_json), headers=self.headers)
self.ut_self.assertEqual(response.status_code, 401)
def api_notification_update_error_403_forbidden(self, feature_json):
response = self.session.put(self.URL + '/api/notification',
data=dumps(feature_json), headers=self.headers)
self.ut_self.assertEqual(response.status_code, 403)
def api_notification_update_error_404_not_found(self, feature_json):
response = self.session.put(self.URL + '/api/notification',
data=dumps(feature_json), headers=self.headers)
self.ut_self.assertEqual(response.status_code, 404)
# notification errors - delete
def api_notification_delete_error_400_bad_request(self, **arguments):
arguments = get_url_arguments(**arguments)
response = self.session.delete(self.URL + '/api/notification/{0}'.format(arguments),
headers=self.headers)
self.ut_self.assertEqual(response.status_code, 400)
def api_notification_delete_error_401_unauthorized(self, **arguments):
arguments = get_url_arguments(**arguments)
response = self.session.delete(self.URL + '/api/notification/{0}'.format(arguments),
headers=self.headers)
self.ut_self.assertEqual(response.status_code, 401)
def api_notification_delete_error_403_forbidden(self, **arguments):
arguments = get_url_arguments(**arguments)
response = self.session.delete(self.URL + '/api/notification/{0}'.format(arguments),
headers=self.headers)
self.ut_self.assertEqual(response.status_code, 403)
def api_notification_delete_error_404_not_found(self, **arguments):
arguments = get_url_arguments(**arguments)
response = self.session.delete(self.URL + '/api/notification/{0}'.format(arguments),
headers=self.headers)
self.ut_self.assertEqual(response.status_code, 404)
##################################################
# NOTIFICATION RELATED TO USER
##################################################
def api_notification_related_to_user(self, expected=None, expected_at_least=None, **arguments):
arguments = get_url_arguments(**arguments)
response = self.session.get(self.URL + '/api/notification_related_to_user/{0}'.format(arguments))
self.ut_self.assertEqual(response.status_code, 200)
resulted = loads(response.text) # convert string to dict/JSON
if expected is not None:
self.ut_self.assertEqual(expected, | |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 05 11:55:37 2018
@author: hugonnet
DDEM LIBRARY:
Library of Python functions for manipulating DEM differences
"""
import os, sys, shutil
import csv
import numpy as np
import pandas as pd
from numpy.polynomial.polynomial import polyfit, polyval
import random
from vectlib import simplify_shp_fn, buffer_shp_fn, inters_shp_fn, union_shp_fn, poi_polygon, isempty_firstfeat, copy_shp_fn, extent_shp_ref
from rastlib import rasterize_shp, proximity_shp, polygonize_fn, write_nanarray, read_nanarray, proximity_rast_fn, pixel_size, create_mem_raster_on_ref
from shlib import create_tmp_dir_for_outfile, remove_tmp_dir_for_outfile
from scipy.ndimage.filters import convolve
import scipy.stats as st
from fillalglib import floodfill_discontinuous
import matplotlib.pylab as plt
from matplotlib.backends.backend_pdf import PdfPages
from subprocess import Popen
from pybob.GeoImg import GeoImg
from pybob.plot_tools import plot_ddem_results, plot_polygon_df
import geopandas as gpd
def ddem_discrete_hypso(ddem,dem,mask,gsd,proxi=None,bin_type='fixed',bin_val=50.,filt='5NMAD'):
final_mask = np.logical_and(np.logical_and(np.isfinite(ddem), np.isfinite(dem)),mask)
dem_on_mask = dem[final_mask]
ddem_on_mask = ddem[final_mask]
if proxi is not None:
proxi_on_mask = proxi[final_mask]
ddem_out = np.copy(ddem)
min_elev = np.min(dem_on_mask) - (np.min(dem_on_mask) % bin_val)
max_elev = np.max(dem_on_mask) + 1
if bin_type == 'fixed':
bin_final = bin_val
elif bin_type == 'percentage':
bin_final = np.ceil(bin_val / 100. * (max_elev - min_elev))
else:
sys.exit('Bin type not recognized.')
bins_on_mask = np.arange(min_elev, max_elev, bin_final)
nb_bin = len(bins_on_mask)
elev_bin = np.zeros(nb_bin)*np.nan
nmad_bin = np.zeros(nb_bin)*np.nan
med_bin = np.zeros(nb_bin)*np.nan
std_bin = np.zeros(nb_bin)*np.nan
area_tot_bin = np.zeros(nb_bin)*np.nan
area_meas_bin = np.zeros(nb_bin)*np.nan
prox = np.zeros(nb_bin)*np.nan
for i in np.arange(nb_bin):
idx_bin = np.array(dem_on_mask >= bins_on_mask[i]) & np.array(
dem_on_mask < (bins_on_mask[i] + bin_final))
idx_orig = np.array(dem >= bins_on_mask[i]) & np.array(
dem < (bins_on_mask[i] + bin_final)) & mask
area_tot_bin[i] = np.count_nonzero(idx_orig)*gsd**2
area_meas_bin[i] = np.count_nonzero(idx_bin)*gsd**2
elev_bin[i] = bins_on_mask[i] + bin_final / 2.
dh_bin = ddem_on_mask[idx_bin]
if proxi is not None:
proxi_bin = proxi_on_mask[idx_bin]
if len(proxi_bin[~np.isnan(proxi_bin)])>0:
prox[i] = np.nanmax(proxi_bin)
if len(dh_bin[~np.isnan(dh_bin)]) > 0:
std_bin[i] = np.nanstd(dh_bin)
med_bin[i] = np.nanmedian(dh_bin)
if filt:
median_temp = np.nanmedian(dh_bin)
MAD_temp = np.nanmedian(np.absolute(dh_bin[~np.isnan(dh_bin)] - median_temp))
NMAD_temp = 1.4826 * MAD_temp
nmad_bin[i] = NMAD_temp
# dh_bin[np.absolute(dh_bin - median_temp) > 5 * NMAD_temp] = np.NaN
ddem_out[idx_orig & np.array(np.absolute(ddem_out - median_temp) > 5 * NMAD_temp)] = np.nan
return ddem_out, elev_bin, med_bin, std_bin, nmad_bin, area_tot_bin, area_meas_bin, prox
def plot_hypso_fit(ddem_masked,dem_masked,elev,med,nmad,std,elevfit,poly_order,pp):
mykeep = np.logical_and(np.isfinite(ddem_masked),np.isfinite(dem_masked))
H = dem_masked[mykeep]
dH = ddem_masked[mykeep]
sampsize = 25000
if H.size > sampsize:
mysamp = np.random.randint(0, H.size, sampsize)
else:
mysamp = np.arange(0, H.size)
newelev=np.arange(min(elev),max(elev),1)
interp_dH = polyval(newelev,elevfit)
first_der=np.polyder(elevfit)
second_der=np.polyder(first_der)
der1_dH = polyval(newelev,first_der)
der2_dH = polyval(newelev,second_der)
fig = plt.figure(figsize=(7, 5), dpi=300)
plt.title('Hypsometric distribution of elevation change', fontsize=14)
plt.plot(H[mysamp], dH[mysamp], '^', ms=0.75, color='0.5', rasterized=True, fillstyle='full',
label="Raw [samples]")
plt.plot(elev, med, '-', ms=2, color='0.15', label='median')
plt.plot(elev, nmad, 'r--', ms=2, label="nmad")
plt.plot(elev, std, 'm--', ms=2, label="std")
plt.plot(newelev,interp_dH,'b--',ms=2,label='polynomial fit: order '+str(poly_order))
plt.xlim(np.min(elev), np.max(elev))
plt.ylim(np.min(med) - np.max(std),np.max(std))
plt.xlabel(' Elevation [m]')
plt.ylabel('dH [m]')
plt.legend(loc='lower right')
pp.savefig(fig, dpi=300)
fig2 = plt.figure(figsize=(7, 5), dpi=300)
plt.title('Hypsometric distribution of elevation change', fontsize=14)
plt.plot(newelev,der1_dH,'g--',ms=2,label='first derivative')
plt.xlim(np.min(elev), np.max(elev))
plt.ylim(np.min(der1_dH),np.max(der1_dH))
plt.xlabel(' Elevation [m]')
plt.ylabel('dH [m]')
plt.legend(loc='lower right')
pp.savefig(fig2, dpi=300)
fig3 = plt.figure(figsize=(7, 5), dpi=300)
plt.title('Hypsometric distribution of elevation change', fontsize=14)
plt.plot(newelev,der2_dH,'c--',ms=2,label='second derivative')
plt.xlim(np.min(elev), np.max(elev))
plt.ylim(np.min(der2_dH),np.max(der2_dH))
plt.xlabel(' Elevation [m]')
plt.ylabel('dH [m]')
plt.legend(loc='lower right')
pp.savefig(fig3, dpi=300)
def ddem_poly_hypso(ddem,dem,mask,gsd,poly_order=5,pp=None,filter_3nmad=True,get_gap_filled=False,get_elev_residual=False):
ddem_filtered, elev, med, std, nmad, area_tot, area_meas = ddem_discrete_hypso(ddem, dem, mask,gsd=gsd,bin_val=10.)
if filter_3nmad:
final_mask = np.logical_and(np.logical_and(np.isfinite(ddem_filtered), np.isfinite(dem)),mask)
else:
final_mask = np.logical_and(np.logical_and(np.isfinite(ddem), np.isfinite(dem)), mask)
perc_mask=10.
critic_elev=np.nanmin(dem[mask])+perc_mask*(np.nanmax(dem[mask])-np.nanmin(dem[mask]))/100
final_mask = np.logical_and(final_mask,(dem>critic_elev))
ddem_data = ddem[final_mask]
dem_data = dem[final_mask]
elevfit = polyfit(elev[elev>critic_elev], med[elev>critic_elev], poly_order)
hole_inds = np.where(np.logical_and(np.logical_and(np.isnan(ddem),
np.isfinite(dem)),mask))
ddem_out = np.copy(ddem)
if get_gap_filled:
hole_elevs = dem[hole_inds]
interp_points = polyval(hole_elevs, elevfit)
ddem_out[hole_inds] = interp_points
if get_elev_residual:
#get polynomial fit on nmad elevation trend
nmadfit = polyfit(elev,std,3)
apply_mask = np.logical_and(np.logical_and(np.isfinite(ddem), np.isfinite(dem)), mask)
dem_apply = dem[apply_mask]
# difference each pixel elevation with polynomial fit
interp_elev = polyval(dem_apply, elevfit)
ddem_out[apply_mask] -= interp_elev
# standardized by elevation variance (nmad)
interp_nmad = polyval(dem_apply, nmadfit)
ddem_out[apply_mask] /= interp_nmad
if pp is not None:
plot_hypso_fit(ddem_data,dem_data,elev,med,nmad,std,elevfit,poly_order,pp)
return ddem_out, elev, med, std, nmad, elevfit
def plot_med_hypso_fit(ddem_masked,dem_masked,elev,med,nmad,std,pp):
mykeep = np.logical_and(np.isfinite(ddem_masked),np.isfinite(dem_masked))
H = dem_masked[mykeep]
dH = ddem_masked[mykeep]
sampsize = 25000
if H.size > sampsize:
mysamp = np.random.randint(0, H.size, sampsize)
else:
mysamp = np.arange(0, H.size)
#temporary save sampled points for MdG figure
# df=pd.DataFrame()
# df=df.assign(rdn_elev=H[mysamp],rdn_dh=dH[mysamp])
# df.to_csv('/home/atom/ongoing/std_err/data_vhr/etienne_mb/geophys_var/df_rand_dh.csv')
fig = plt.figure(figsize=(7, 5), dpi=300)
plt.title('Hypsometric distribution of elevation change', fontsize=14)
plt.plot(H[mysamp], dH[mysamp], '^', ms=0.75, color='0.5', rasterized=True, fillstyle='full',
label="Raw [samples]")
plt.plot(elev, med, '-', ms=2, color='0.15', label='median')
plt.plot(elev, nmad, 'r--', ms=2, label="nmad")
plt.plot(elev, std, 'm--', ms=2, label="std")
plt.xlim(np.nanmin(elev), np.nanmax(elev))
plt.ylim(np.nanmin(med) - np.nanmax(std),np.nanmax(std))
plt.xlabel(' Elevation [m]')
plt.ylabel('dH [m]')
plt.legend(loc='lower right')
pp.savefig(fig, dpi=300)
def ddem_med_hypso(ddem,dem,mask,gsd,pp=None,proxi=None,filter_3nmad=True,get_gap_filled=False,get_elev_residual=False):
ddem_filtered, elev, med, std, nmad, area_tot, area_meas, prox = ddem_discrete_hypso(ddem, dem, mask,gsd=gsd,proxi=proxi,bin_val=50.)
if filter_3nmad:
final_mask = np.logical_and(np.logical_and(np.isfinite(ddem_filtered), np.isfinite(dem)),mask)
else:
final_mask = np.logical_and(np.logical_and(np.isfinite(ddem), np.isfinite(dem)), mask)
# perc_mask=10.
# critic_elev=np.nanmin(dem[mask])+perc_mask*(np.nanmax(dem[mask])-np.nanmin(dem[mask]))/100
#
# final_mask = np.logical_and(final_mask,(dem>critic_elev))
ddem_data = ddem[final_mask]
dem_data = dem[final_mask]
res = np.copy(ddem)
res_stdized = np.copy(ddem)
# if get_gap_filled:
# hole_elevs = dem[hole_inds]
# interp_points = polyval(hole_elevs, elevfit)
# ddem_out[hole_inds] = interp_points
if get_elev_residual:
apply_mask = np.logical_and(np.logical_and(np.isfinite(ddem), np.isfinite(dem)), mask)
dem_apply = dem[apply_mask]
# difference each pixel elevation with median interp
interp_elev = np.interp(dem_apply, elev, med)
res[apply_mask] -= interp_elev
# standardized by elevation variance (nmad)
interp_nmad = np.interp(dem_apply, elev, std)
res_stdized = res
res_stdized[apply_mask] /= interp_nmad
if pp is not None:
plot_med_hypso_fit(ddem_data,dem_data,elev,med,nmad,std,pp)
return res, res_stdized, elev, med, std, nmad, area_tot, area_meas, prox
def get_geophys_var_hypso(fn_ddem,fn_dem,fn_shp,out_dir,path_to_r_geophys):
pp = PdfPages(os.path.join(out_dir, 'hypsometric_fit_results.pdf'))
ddem=read_nanarray(fn_ddem)
ddem[np.absolute(ddem)>60]=np.nan
# ddem = ddem*12.
dem=read_nanarray(fn_dem)
mask = rasterize_shp(fn_shp, fn_dem)
gsd = pixel_size(fn_ddem)
fn_proxi=os.path.join(out_dir,'proxi.tif')
proxi = proximity_shp(fn_shp,fn_ddem,type_prox='interior')
#first get residuals of poly fit
res, res_stdized, elev, med, std, nmad, area_tot, area_meas, prox = ddem_med_hypso(ddem,dem,mask,gsd,pp=pp,proxi=proxi,get_elev_residual=True)
plt.close('all')
fn_mask=os.path.join(out_dir,'mask.tif')
write_nanarray(fn_mask,fn_ddem,mask)
fn_res=os.path.join(out_dir,'residual.tif')
fn_res_stdized = os.path.join(out_dir,'residual_standardized.tif')
write_nanarray(fn_res,fn_ddem,res)
write_nanarray(fn_res_stdized,fn_ddem,res_stdized)
mask_geo = GeoImg(fn_mask)
res_geo = GeoImg(fn_res)
res_stdized_geo = GeoImg(fn_res_stdized)
ddem_geo = GeoImg(fn_ddem)
dem_geo=GeoImg(fn_dem)
# res_geo.img[np.invert(mask)] = np.nan
extent = extent_shp_ref(fn_shp,fn_dem)
crop_res = res_geo.crop_to_extent([extent[0],extent[2],extent[1],extent[3]])
crop_res_stdized = res_stdized_geo.crop_to_extent([extent[0],extent[2],extent[1],extent[3]])
crop_ddem = ddem_geo.crop_to_extent([extent[0],extent[2],extent[1],extent[3]])
crop_mask = mask_geo.crop_to_extent([extent[0],extent[2],extent[1],extent[3]])
crop_dem = dem_geo.crop_to_extent([extent[0],extent[2],extent[1],extent[3]])
fn_crop_res_stdized = os.path.join(out_dir,'res_stdized_cropped.tif')
fn_crop_mask = os.path.join(out_dir,'mask_cropped.tif')
fn_crop_dem = os.path.join(out_dir,'dem_cropped.tif')
crop_res_stdized.img[crop_mask.img != 1]=np.nan
crop_res_stdized.write(fn_crop_res_stdized)
crop_mask.write(fn_crop_mask)
crop_dem.write(fn_crop_dem)
crop_res.img[crop_mask.img != 1]=np.nan
crop_res_stdized.img[crop_mask.img != 1]=np.nan
# crop_ddem.img = 12*crop_ddem.img
clim_ddem_raw = np.nanmax(np.absolute(med))
outline_gla = gpd.read_file(fn_shp)
fig, _ = plot_polygon_df(outline_gla, edgecolor='k', lw=2, alpha=0.5)
plt.title('Outline')
pp.savefig(fig, dpi=300)
fig = plot_ddem_results(crop_ddem, clim=(-clim_ddem_raw,clim_ddem_raw), colormap='Spectral')[0]
plt.title('Elevation change [m] (Large scale)')
pp.savefig(fig, dpi=300)
fig = plot_ddem_results(crop_ddem, clim=(-3, 3), colormap='Spectral')[0]
plt.title('Elevation change [m] (Thin scale)')
pp.savefig(fig, dpi=300)
clim_res = np.nanmean(np.absolute(nmad))
fig = plot_ddem_results(crop_res, clim=(-clim_res,clim_res),colormap='Spectral')[0]
plt.title('Hypsometric residual of elevation change [m] \n (Elevation change minus hypsometric median)')
pp.savefig(fig, dpi=300)
fig = plot_ddem_results(crop_res_stdized, clim=(-1, 1), colormap='Spectral')[0]
plt.title('Standardized hypsometric residual of elevation change [no unit] \n (Elevation change minus hypsometric median divided by hypsometric nmad)')
pp.savefig(fig, dpi=300)
pp.close()
plt.close('all')
os.remove(fn_res)
os.remove(fn_mask)
os.remove(fn_res_stdized)
#normalize elevation
max_elev=np.nanmax(elev)
min_elev=np.nanmin(elev)
elev_n = (elev - min_elev) / (max_elev - min_elev)
#normalize dh
max_dh=np.nanmax(med)
min_dh=np.nanmin(med)
accu_elev = min_elev + 80*(max_elev - min_elev)/100
tmp_max_dh=np.nanmean(med[elev>accu_elev]) #use mean of accumulation instead of max
if np.abs((np.nanmax(med)-tmp_max_dh)/(max_dh-min_dh))<0.3:
max_dh = tmp_max_dh
med_n = (min_dh - med) / (max_dh - min_dh)
std_n = std / (max_dh - min_dh)
nmad_n = nmad / (max_dh - min_dh)
#write normalized data
elev_rs = np.arange(0,1,0.01)
med_rs = np.interp(elev_rs,elev_n,med_n)
std_rs = np.interp(elev_rs,elev_n,std_n)
nmad_rs = np.interp(elev_rs,elev_n,nmad_n)
area_rs = np.interp(elev_rs,elev_n,area_tot)
df = pd.DataFrame()
df= df.assign(norm_elev=elev_rs,norm_med_dh=med_rs,norm_std_dh=std_rs,norm_nmad_rs=nmad_rs,area_rs=area_rs)
df_raw = pd.DataFrame()
df_raw =df_raw.assign(elev=elev,med_dh=med,std_dh=std,nmad_dh=nmad,area_tot=area_tot,area_meas=area_meas,prox=prox)
df.to_csv(os.path.join(out_dir,'df_norm_dh_elev.csv'))
df_raw.to_csv(os.path.join(out_dir,'df_raw_dh_elev.csv'))
ddem = dem = mask = res = res_stdized = crop_mask = crop_res_stdized = crop_res = crop_ddem = crop_dem = ddem_geo = dem_geo = res_geo = res_stdized_geo = None
#get variogram with moving elevation window from R
# cmd = 'Rscript '+path_to_r_geophys+' -d '+fn_crop_dem+' -r '+fn_crop_res_stdized+' -m '+fn_crop_mask+' -v Exp -o '+out_dir
# fn_log = os.path.join(out_dir,'r_geophys.log')
# log=open(fn_log,'w')
# p=Popen(cmd,stdout=log,stderr=log,shell=True)
# p.wait()
# log.close()
os.remove(fn_crop_dem)
os.remove(fn_crop_res_stdized)
os.remove(fn_crop_mask)
def kernel_tvol_outline_adjust(fn_ddem,fn_dem,fn_outline_in,fn_outline_out,fn_mask_stable,fn_mask_gla,tolerance_factor=5.,kernel_size=9,only_thinning=True):
def gkern(kernlen=21):
#source: https://stackoverflow.com/questions/29731726/how-to-calculate-a-gaussian-kernel-matrix-efficiently-in-numpy
"""Returns a 2D Gaussian kernel."""
lim = kernlen // 2 + (kernlen % 2) / 2
x = np.linspace(-lim, lim, kernlen + 1)
kern1d = np.diff(st.norm.cdf(x))
kern2d = np.outer(kern1d, kern1d)
return kern2d / kern2d.sum()
#inside parameters based on prior knowledge:
tmp_dir=create_tmp_dir_for_outfile(fn_outline_out)
#read
ddem = read_nanarray(fn_ddem)
ddem[np.absolute(ddem)>60]=np.nan
dem = read_nanarray(fn_dem)
mask_other_gla = (read_nanarray(fn_mask_stable) == 1)
mask_gla = (read_nanarray(fn_mask_gla) == 1)
mask_stable = np.invert(np.logical_or(mask_gla,mask_other_gla))
res = pixel_size(fn_dem)
elev, med, std, nmad = ddem_discrete_hypso(ddem, dem, mask_gla,gsd=res,bin_val=10)[1:5]
#first, keep stable terrain
keep_stable = np.logical_and(mask_stable,np.isfinite(ddem))
ddem_on_stable=ddem[keep_stable]
#get nmad as representative statistic of stable terrain
median_stable = np.nanmedian(ddem_on_stable)
mad_stable = np.nanmedian(np.absolute(ddem_on_stable[~np.isnan(ddem_on_stable)] - median_stable))
nmad_stable = 1.4826 * mad_stable
# #filter heavy outliers to get a fair representation of overall stable terrain (without unmapped glaciers, landslides, etc...)
# ddem_on_stable[np.array(np.absolute(ddem_on_stable - median_stable) > 3 * nmad_stable)] = np.nan
#rasterize outline
# mask_outline = rasterize_shp(fn_outline_in,fn_ddem)
#get an idea of thinning rate and amplitude of adjustment
# dh_on_outline = ddem_poly_elev(ddem,dem,5,mask_outline)
| |
i1IIi / I1ii11iIi11i - O0
def lisp_get_lookup_string ( input_str ) :
if 85 - 85: i1IIi . i1IIi
if 16 - 16: I1IiiI - OOooOOo % Ii1I . OOooOOo + I1ii11iIi11i % i11iIiiIii
if 59 - 59: i11iIiiIii - I11i
if 59 - 59: OoooooooOO * o0oOOo0O0Ooo / I1Ii111
OOOO0oo0 = input_str
I11iiI1i1 = None
if ( input_str . find ( "->" ) != - 1 ) :
I1i1Iiiii = input_str . split ( "->" )
OOOO0oo0 = I1i1Iiiii [ 0 ]
I11iiI1i1 = I1i1Iiiii [ 1 ]
if 75 - 75: o0oOOo0O0Ooo - OoooooooOO
if 21 - 21: I1IiiI + iIii1I11I1II1 / i11iIiiIii / oO0o
Ooo000 = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
I1111IiII1 = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
if 26 - 26: i1IIi / I1IiiI / I11i + I11i
if 46 - 46: I1Ii111 % I1ii11iIi11i + Ii1I
if 67 - 67: iIii1I11I1II1 . i11iIiiIii . i11iIiiIii . i11iIiiIii / I11i + ooOoO0o
if 10 - 10: ooOoO0o - Oo0Ooo % II111iiii
oo = OOOO0oo0 . split ( "/" )
if ( len ( oo ) == 1 ) :
Ooo000 . store_address ( oo [ 0 ] )
i11iI1111ii1I = False
else :
Ooo000 . store_prefix ( OOOO0oo0 )
i11iI1111ii1I = True
if 89 - 89: i11iIiiIii / O0 - i1IIi % Oo0Ooo + i11iIiiIii
if 44 - 44: i11iIiiIii / OOooOOo * ooOoO0o
Ooo00o000o = i11iI1111ii1I
if ( I11iiI1i1 ) :
oo = I11iiI1i1 . split ( "/" )
if ( len ( oo ) == 1 ) :
I1111IiII1 . store_address ( oo [ 0 ] )
Ooo00o000o = False
else :
I1111IiII1 . store_prefix ( OOOO0oo0 )
Ooo00o000o = True
if 57 - 57: I11i - I11i % II111iiii % Oo0Ooo . o0oOOo0O0Ooo % Oo0Ooo
if 91 - 91: I1IiiI - OoO0O00 - Oo0Ooo - Ii1I * iIii1I11I1II1
return ( [ Ooo000 , i11iI1111ii1I , I1111IiII1 , Ooo00o000o ] )
if 68 - 68: OoO0O00 % O0 * iIii1I11I1II1 / oO0o * o0oOOo0O0Ooo + OOooOOo
if 89 - 89: ooOoO0o * I1IiiI . oO0o
if 75 - 75: ooOoO0o - iII111i % iII111i + ooOoO0o * o0oOOo0O0Ooo - I1ii11iIi11i
if 26 - 26: I11i * Ii1I % I1IiiI + iII111i
if 38 - 38: iII111i - Oo0Ooo / Ii1I + oO0o . iII111i + IiII
if 19 - 19: Ii1I
if 51 - 51: iIii1I11I1II1
def lisp_show_map_cache_lookup ( eid_str ) :
Ooo000 , i11iI1111ii1I , I1111IiII1 , Ooo00o000o = lisp_get_lookup_string ( eid_str )
if 8 - 8: OoO0O00 / o0oOOo0O0Ooo % iII111i . i11iIiiIii . OoooooooOO . Ii1I
i1iiI11I = "<br>"
if 8 - 8: OoO0O00 * Oo0Ooo
IIiII = Ooo000 if ( I1111IiII1 . is_null ( ) ) else I1111IiII1
IIi1oo0OO0Oo = i11iI1111ii1I if ( I1111IiII1 . is_null ( ) ) else Ooo00o000o
if 4 - 4: OoOoOO00 * O0 - I11i
Oo = lisp . lisp_map_cache . lookup_cache ( IIiII , IIi1oo0OO0Oo )
if ( Oo == None ) :
i1iiI11I += "{} {}" . format ( lisp . lisp_print_sans ( "Lookup not found for" ) ,
lisp . lisp_print_cour ( eid_str ) )
else :
if ( IIiII == I1111IiII1 ) :
O0o0oo0 = Oo . lookup_source_cache ( Ooo000 , i11iI1111ii1I )
if ( O0o0oo0 ) : Oo = O0o0oo0
if 86 - 86: i11iIiiIii + iIii1I11I1II1
if 87 - 87: OoO0O00 * OoOoOO00 - Oo0Ooo % OOooOOo * i11iIiiIii
Oo000o = lisp . lisp_print_elapsed ( Oo . uptime )
i1iiI11I += "{} {} {} {} {} {} {}" . format ( lisp . lisp_print_sans ( "Exact" if i11iI1111ii1I else "Longest" ) ,
# OoOoOO00 % II111iiii * II111iiii . I1IiiI
lisp . lisp_print_sans ( "match lookup for" ) ,
lisp . lisp_print_cour ( eid_str ) ,
lisp . lisp_print_sans ( "found" ) ,
lisp . lisp_print_cour ( Oo . print_eid_tuple ( ) ) ,
lisp . lisp_print_sans ( "with uptime" ) ,
lisp . lisp_print_cour ( Oo000o ) )
if 11 - 11: iII111i
i1iiI11I += "<br>"
return ( i1iiI11I )
if 20 - 20: Ii1I . I1Ii111 % Ii1I
if 5 - 5: OOooOOo + iII111i
if 23 - 23: I1Ii111 % iIii1I11I1II1 . I11i
if 95 - 95: Oo0Ooo + i11iIiiIii % OOooOOo - oO0o
if 11 - 11: I1ii11iIi11i / O0 + II111iiii
if 95 - 95: I1Ii111 + IiII * iIii1I11I1II1
if 17 - 17: OoO0O00 - Oo0Ooo * O0 / Ii1I
if 19 - 19: i1IIi - iIii1I11I1II1 . I11i
def lisp_get_clause_for_api ( command ) :
OO0O0OO = open ( "./lisp.config" , "r" )
Oo000 = { command : [ ] }
iiIIi1i111i = { }
iII = [ ]
if 55 - 55: iIii1I11I1II1 . IiII - o0oOOo0O0Ooo . I1ii11iIi11i * i1IIi
i1IiiI = 0
iiii11IiIiI = False
for oOO in OO0O0OO :
if ( lisp_end_file ( oOO ) ) : break
if ( lisp_comment ( oOO ) ) : continue
if 76 - 76: i1IIi + O0 / IiII + i11iIiiIii % I1Ii111 % Oo0Ooo
if 61 - 61: iIii1I11I1II1 % Ii1I - oO0o * OoooooooOO % II111iiii - Ii1I
if 44 - 44: O0
if 9 - 9: oO0o . Oo0Ooo + iII111i + I1IiiI * I1IiiI - I1IiiI
if 95 - 95: IiII + OOooOOo % oO0o * OOooOOo
if 58 - 58: OoOoOO00 . o0oOOo0O0Ooo + oO0o
if ( oOO . find ( command + " {" ) != - 1 ) :
i1IiiI += 1
iiii11IiIiI = True
continue
if 26 - 26: II111iiii / o0oOOo0O0Ooo
if ( iiii11IiIiI == False ) : continue
if 32 - 32: I1ii11iIi11i * I1IiiI + o0oOOo0O0Ooo % II111iiii + OOooOOo + Ii1I
if ( lisp_begin_clause ( oOO ) ) :
i1IiiI += 1
oo0OOo0Oo00 = oOO . replace ( " " , "" )
oo0OOo0Oo00 = oo0OOo0Oo00 . replace ( "\t" , "" )
oo0OOo0Oo00 = oo0OOo0Oo00 . replace ( "\n" , "" )
oo0OOo0Oo00 = oo0OOo0Oo00 . replace ( "{" , "" )
iiIIi1i111i = { oo0OOo0Oo00 : { } }
continue
if 64 - 64: iII111i % OoooooooOO
if 65 - 65: OoOoOO00 / I1ii11iIi11i / o0oOOo0O0Ooo
if 15 - 15: ooOoO0o / ooOoO0o % OoooooooOO . I1Ii111
if 93 - 93: I1ii11iIi11i * I1ii11iIi11i / OoooooooOO
if 6 - 6: I1ii11iIi11i * Oo0Ooo + iIii1I11I1II1
if 19 - 19: O0 % II111iiii * o0oOOo0O0Ooo
if ( lisp_end_clause ( oOO ) ) :
i1IiiI -= 1
if ( i1IiiI ) :
Oo000 [ command ] . append ( iiIIi1i111i )
iiIIi1i111i = { }
continue
if 27 - 27: OOooOOo * IiII / i11iIiiIii - oO0o + II111iiii
iII . append ( Oo000 )
Oo000 = { command : [ ] }
iiii11IiIiI = False
continue
if 43 - 43: I1ii11iIi11i - II111iiii
if 56 - 56: I1ii11iIi11i . i1IIi / iII111i % oO0o / O0 * I11i
oOO = oOO . replace ( " " , "" )
oOO = oOO . replace ( "\t" , "" )
oOO = oOO . replace ( "\n" , "" )
oOO = oOO . replace ( "{" , "" )
oOO = oOO . split ( "=" )
oo00oO0O0 = "" if len ( oOO ) == 1 else oOO [ 1 ]
oOOOooOo0O = oOO [ 0 ]
if 98 - 98: O0 + iII111i
if ( len ( iiIIi1i111i ) == 0 ) :
Oo000 [ command ] . append ( { oOOOooOo0O : oo00oO0O0 } )
else :
iiIIi1i111i [ oo0OOo0Oo00 ] [ oOOOooOo0O ] = oo00oO0O0
if 23 - 23: OoooooooOO . iIii1I11I1II1 / i1IIi
if 31 - 31: Oo0Ooo - iIii1I11I1II1 / I11i . OoO0O00
if 74 - 74: Oo0Ooo - | |
new data set which are not classified correctly. To get the *next* sample we simply call the `next` method on our generator.
# %%
print(next(gn))
# %% [markdown]
# After looking at a few examples, maybe we decide to look at the most frequently appearing `5000` words in each data set, the original training data set and the new data set. The reason for looking at this might be that we expect the frequency of use of different words to have changed, maybe there is some new slang that has been introduced or some other artifact of popular culture that has changed the way that people write movie reviews.
#
# To do this, we start by fitting a `CountVectorizer` to the new data.
# %%
new_vectorizer = CountVectorizer(max_features=5000,
preprocessor=lambda x: x, tokenizer=lambda x: x)
new_vectorizer.fit(new_X)
# %% [markdown]
# Now that we have this new `CountVectorizor` object, we can check to see if the corresponding vocabulary has changed between the two data sets.
# %%
original_vocabulary = set(vocabulary.keys())
new_vocabulary = set(new_vectorizer.vocabulary_.keys())
# %% [markdown]
# We can look at the words that were in the original vocabulary but not in the new vocabulary.
# %%
print(original_vocabulary - new_vocabulary)
# %% [markdown]
# And similarly, we can look at the words that are in the new vocabulary but which were not in the original vocabulary.
# %%
print(new_vocabulary - original_vocabulary)
# %% [markdown]
# These words themselves don't tell us much, however if one of these words occured with a large frequency, that might tell us something. In particular, we wouldn't really expect any of the words above to appear with too much frequency.
#
# **Question** What exactly is going on here. Not only what (if any) words appear with a larger than expected frequency but also, what does this mean? What has changed about the world that our original model no longer takes into account?
#
# **NOTE:** This is meant to be a very open ended question. To investigate you may need more cells than the one provided below. Also, there isn't really a *correct* answer, this is meant to be an opportunity to explore the data.
# %%
print("Words that were in the original vocabulary but not in the new vocabulary: ")
for key in (original_vocabulary - new_vocabulary):
print(f"{key}: {vocabulary[key]}")
print("\n")
print("Words that were in the new vocabulary but not in the original vocabulary: ")
for key in (new_vocabulary - original_vocabulary):
print(f"{key}: {new_vectorizer.vocabulary_[key]}")
# %% [markdown]
# ### (TODO) Build a new model
#
# Supposing that we believe something has changed about the underlying distribution of the words that our reviews are made up of, we need to create a new model. This way our new model will take into account whatever it is that has changed.
#
# To begin with, we will use the new vocabulary to create a bag of words encoding of the new data. We will then use this data to train a new XGBoost model.
#
# **NOTE:** Because we believe that the underlying distribution of words has changed it should follow that the original vocabulary that we used to construct a bag of words encoding of the reviews is no longer valid. This means that we need to be careful with our data. If we send an bag of words encoded review using the *original* vocabulary we should not expect any sort of meaningful results.
#
# In particular, this means that if we had deployed our XGBoost model like we did in the Web App notebook then we would need to implement this vocabulary change in the Lambda function as well.
# %%
new_XV = new_vectorizer.transform(new_X).toarray()
# %% [markdown]
# And a quick check to make sure that the newly encoded reviews have the correct length, which should be the size of the new vocabulary which we created.
# %%
len(new_XV[0])
# %% [markdown]
# Now that we have our newly encoded, newly collected data, we can split it up into a training and validation set so that we can train a new XGBoost model. As usual, we first split up the data, then save it locally and then upload it to S3.
# %%
import pandas as pd
# Earlier we shuffled the training dataset so to make things simple we can just assign
# the first 10 000 reviews to the validation set and use the remaining reviews for training.
new_val_X = pd.DataFrame(new_XV[:10000])
new_train_X = pd.DataFrame(new_XV[10000:])
new_val_y = pd.DataFrame(new_Y[:10000])
new_train_y = pd.DataFrame(new_Y[10000:])
# %% [markdown]
# In order to save some memory we will effectively delete the `new_X` variable. Remember that this contained a list of reviews and each review was a list of words. Note that once this cell has been executed you will need to read the new data in again if you want to work with it.
# %%
new_X = None
# %% [markdown]
# Next we save the new training and validation sets locally. Note that we overwrite the training and validation sets used earlier. This is mostly because the amount of space that we have available on our notebook instance is limited. Of course, you can increase this if you'd like but to do so may increase the cost of running the notebook instance.
# %%
pd.DataFrame(new_XV).to_csv(os.path.join(data_dir, 'new_data.csv'), header=False, index=False)
pd.concat([new_val_y, new_val_X], axis=1).to_csv(os.path.join(data_dir, 'new_validation.csv'), header=False, index=False)
pd.concat([new_train_y, new_train_X], axis=1).to_csv(os.path.join(data_dir, 'new_train.csv'), header=False, index=False)
# %% [markdown]
# Now that we've saved our data to the local instance, we can safely delete the variables to save on memory.
# %%
new_val_y = new_val_X = new_train_y = new_train_X = new_XV = None
# %% [markdown]
# Lastly, we make sure to upload the new training and validation sets to S3.
#
# **TODO:** Upload the new data as well as the new training and validation data sets to S3.
# %%
# TODO: Upload the new data and the new validation.csv and train.csv files in the data_dir directory to S3.
new_data_location = session.upload_data(os.path.join(data_dir, "new_data.csv"), key_prefix=prefix)
new_val_location = session.upload_data(os.path.join(data_dir, "new_validation.csv"), key_prefix=prefix)
new_train_location = session.upload_data(os.path.join(data_dir, "new_train.csv"), key_prefix=prefix)
# %% [markdown]
# Once our new training data has been uploaded to S3, we can create a new XGBoost model that will take into account the changes that have occured in our data set.
#
# **TODO:** Create a new XGBoost estimator object.
# %%
# TODO: First, create a SageMaker estimator object for our model.
new_xgb = sagemaker.estimator.Estimator(container,
role,
train_instance_count=1,
train_instance_type="ml.m4.xlarge",
output_path=f"s3://{session.default_bucket()}/{prefix}/output",
sagemaker_session=session)
# TODO: Then set the algorithm specific parameters. You may wish to use the same parameters that were
# used when training the original model.
new_xgb.set_hyperparameters(max_depth=5,
eta=0.2,
gamma=4,
min_child_weight=6,
subsample=0.8,
silent=0,
objective='binary:logistic',
early_stopping_rounds=10,
num_round=500)
# %% [markdown]
# Once the model has been created, we can train it with our new data.
#
# **TODO:** Train the new XGBoost model.
# %%
# TODO: First, make sure that you create s3 input objects so that SageMaker knows where to
# find the training and validation data.
s3_new_input_train = sagemaker.s3_input(s3_data=new_train_location, content_type="text/csv")
s3_new_input_validation = sagemaker.s3_input(s3_data=new_val_location, content_type="text/csv")
# %%
# TODO: Using the new validation and training data, 'fit' your new model.
new_xgb.fit({"train": s3_new_input_train, "validation": s3_new_input_validation})
# %% [markdown]
# ### (TODO) Check the new model
#
# So now we have a new XGBoost model that we believe more accurately represents the state of the world at this time, at least in how it relates to the sentiment analysis problem that we are working on. The next step is to double check that our model is performing reasonably.
#
# To do this, we will first test our model on the new data.
#
# **Note:** In practice this is a pretty bad idea. We already trained our model on the new data, so testing it shouldn't really tell us much. In fact, this is sort of a textbook example of leakage. We are only doing it here so that we have a numerical baseline.
#
# **Question:** How might you address the leakage problem?
# %% [markdown]
# First, we create a new transformer based on our new XGBoost model.
#
# **TODO:** Create a transformer object from the newly created XGBoost model.
# %%
# TODO: Create a transformer object from the new_xgb model
new_xgb_transformer = new_xgb.transformer(instance_count=1, instance_type="ml.m4.xlarge")
# %% [markdown]
# Next we test our model on the new data.
#
# **TODO:** Use the transformer object to transform the new data (stored in the `new_data_location` variable)
# %%
# TODO: Using new_xgb_transformer, transform the new_data_location data. You | |
<filename>Exercise-Sheet-01/Planetary_Evolution.py
#!/usr/bin/env python3
"""
@author: <NAME>
@email: <EMAIL>
Albert-Ludwigs-Universität Freiburg
Computational Physics: Material Science
Exercise Sheet 01 - Planetary Evolution
The following are the referenced equations in the code:
Eq(1), Eq(2) : Euler algorithm for r(t+dt) and v(t+dt)
Eq(3), Eq(3) : Verlet algorithm for r(t+dt) and v(t)
Eq(4), Eq(5) : Velocity-Verlet algorithm for r(t+dt) and v(t+dt)
Please see the report for an in-depth formulation.
The code consists in two main classes: PARTICLE and SYSTEM. Planets are modelled as particles
and the Solar System as a system. The class particle contains all the informations about a given
particle (mass, position, velocity, net_force). The class system contains all the particles and
meaningful informations about the system, such as potential and kinetic energy of the whole
ensemble of particles.
"""
import numpy as np
import sys
import itertools
import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# Global constant definition
# //DT simulation timestep (days)
DT = 0.5
# //number of iterations (default is 500 years)
ITERATIONS = 2*365*10
# //the following are constant and conversions useful
# //for astronomical simulations (e.g. normalized Gravitational constant)
AU = 1.49597870691E+11
AUday = 1.49597870691E+11/86400.
G_CONST = 6.67408E-11/(AUday**2)/AU
UNITS_OF_MASS = 1E29 #mass in kg
# Routine for loading planets mass and initial conditions from file
# //note that the initial files mass.dat and planets.dat have been re-formatted
def load_planets():
position = np.loadtxt("planets.txt", usecols=(0,1,2))
velocity = np.loadtxt("planets.txt", usecols=(3,4,5))
names = np.loadtxt("planets.txt", dtype=object, usecols=(6) )
mass = np.loadtxt("mass.txt", usecols=(0))
return names, mass, position, velocity
# Particle CLASS. Contains all the information on a given particle
# //practical for large number of particles
class Particle:
def __init__(self,ID, name, mass, position, velocity):
# particle ID: useful if index is more significant than the particle name
# particle name: relevant only for planets
# particle mass
self.ID = ID
self.name = name
self.mass = mass
# position and velocity arrays
# position array is of shape (3,3) and contains the information
# on x(t-dt), x(t), x(t+dt), accessible respectively by the indices {-1,0,1}
self._position = np.stack((position, np.zeros(3), np.zeros(3)), axis=0)
self._velocity = velocity
# net force on particle
# additional force array needed for the velocity-verlet scheme
self.net_force = np.zeros(3,dtype=np.float)
self.v_verlet_net_force = None
# Set of routines for setting and accessing _position and _velocity
# //position requires the timestamp {-1,0,1}
def pos(self, timestamp):
return self._position[timestamp,:]
def vel(self):
return self._velocity
def set_vel(self, velocity):
self._velocity = velocity
def set_pos(self, position, timestamp):
self._position[timestamp,:] = position
# Routine for updating the coordinate, i.e. shifting new->current, current->previous
# //this function is called by the 'parent' System.update_coordinates after each iteration
# //net_force is set to zero after each iteration
def update_coord(self):
self._position = np.stack((self.pos(1),np.zeros(3),self.pos(0)), axis=0)
self.net_force = np.zeros(3,dtype=np.float)
# Routine for printing on standard output the current coordinates
# //the values are printed with precision to the 5th digit (by default)
# //although can be overwritten by passing precision argument
def print_current_coordinates(self, precision = 5):
print(np.around(self.pos(0)[0], precision),np.around(self.pos(0)[1],precision), np.around(self.pos(0)[2],precision))
# System CLASS. Contains all the particles of a given system. Implements as methods all the various
# integrator, and includes routines for energy evaluation .
class System:
def __init__(self, name):
# system ID: useful if more system are needed in the same simulation
# planets: list containing all the planets
# time: by accessing self.time the time elapsed (in days) is given
self.ID = name
self.planets = []
self.time = 0
# the energy values are instantaneous values
# //history values are not saved on memory
self.k_energy = None
self.u_energy = None
# Routine for adding the system planets
def add_planet(self,ID, name, mass, position, velocity):
self.planets.append(Particle(ID,name, mass, position, velocity))
# Routine for updating the planets coordinates. It is foundamental for optimal handling
# of positions and velocities at different timestamp (t-dt, t, t+dt)
# //the routine itself calls a routine implemented in the class particle
# //the sun coordinates are not updated. It is assumed stationary (sun planet_ID = 0)
def update_coordinates(self):
for planet in self.planets:
if(planet.ID > 0):
planet.update_coord()
# Routine for computing the force between two planets
# //the function adds the force contribution for both planet_1 and planet_2 (saves a little computational time)
# //the function is called by compute_system_force which computes the force for all planets
# //F_12 = G m_1 m_2 (r1-r2)/(|r1-r2|)^3
def compute_force(self, planet_1, planet_2, t_stamp):
distance_cubed = (np.linalg.norm(planet_1.pos(t_stamp) - planet_2.pos(t_stamp)))**3
force = G_CONST*planet_1.mass*planet_2.mass * np.reciprocal(distance_cubed) * (planet_1.pos(t_stamp) - planet_2.pos(t_stamp))
planet_1.net_force += -force
planet_2.net_force += +force
# Routine for computing the force between all planets in the system
# //t_stamp {-1,0,1} represents the position stored at t-dt, t and t+dt
# //itertools_combinations() returns all the combination of planets
# //avoiding duplicates (e.g. AB and BA) and self-combinations (e.g. AA, BB)
def compute_system_force(self, t_stamp):
for planet_1, planet_2 in itertools.combinations(self.planets, 2):
self.compute_force(planet_1, planet_2, t_stamp)
# Routine for computing the kinetic energy of the system (evaluated at timestamp t)
# //no need to store k_energy for each individual planet
def compute_k_energy(self):
# kinetic energy is set to zero each time it is required
# to avoid unexpected values
self.k_energy = 0
for planet in self.planets:
self.k_energy += 0.5*planet.mass*np.linalg.norm(planet.vel())
# Routine for computing the potential energy of the system (evaluated at timestamp t)
# //as for compute_system_force the routine is inefficient for large number of particles
def compute_u_energy(self):
# potential energy is set to zero each time it is required
# to avoid unexpected values
self.u_energy = 0
for planet_1, planet_2 in itertools.combinations(self.planets, 2):
distance = np.linalg.norm(planet_1.pos(0) - planet_2.pos(0))
self.u_energy += -G_CONST*planet_1.mass*planet_2.mass * np.reciprocal(distance)
# Routine for printing on standard output the current coordinates and the system energies
# //the values are printed with precision to the 5th digit (by default)
# //although can be overwritten by passing precision argument
def print_system_coordinates(self, precision = 5):
for planet in self.planets:
print(np.around(planet.pos(0)[0], precision), np.around(planet.pos(0)[1], precision), np.around(planet.pos(0)[2], precision), end=' ')
print(np.around(self.k_energy,precision), np.around(self.u_energy,precision), np.around(self.k_energy+self.u_energy,precision))
print("")
# Routine that implements the Euler integrator
# //the number of iterations is set globally
# //the routine is essentially a double for loop -> slow for large number of particles
# //a routine for saving computational results on an output file is called for each iteration
#
# The Euler integrator is given by the following equations for position and velocities
# //note that in practice this equations are vectorial
# Eq(1) r(t+dt) = r(t) + v(t) * dt + 1/2m * dt^2 * f(t)
# Eq(2) v(t+dt) = v(t) + dt/m * f(t)
def evolve_euler(self):
iter = 0
while iter < ITERATIONS:
# energies are computed at the beginning of each iterations
self.compute_k_energy()
self.compute_u_energy()
# force computation at current coordinates (timestamp = 0)
self.compute_system_force(0)
for planet in self.planets:
# remember that the sun (ID=0) is supposed stationary
if(planet.ID != 0):
new_pos = planet.pos(0) + DT*planet.vel() + planet.net_force*DT*DT/planet.mass
new_vel = planet.vel() + DT*planet.net_force/planet.mass
planet.set_pos(new_pos, 1)
planet.set_vel(new_vel)
iter += 1
self.time += DT
# after each iterations the coordinates need to be updated
# and printed to standard output (thinned down by a factor of 100 (once every 50 days))
self.update_coordinates()
self.print_system_coordinates()
# Routine that implements the Verlet integrator
# //the number of iterations is set globally
# //the routine is essentially a double for loop -> slow for large number of particles
# //a routine for saving computational results on an output file is called for each iteration
#
# The Verlet integrator is given by the following equations for position and velocities
# //note that in practice this equations are vectorial
# Eq(3) r(t+dt) = 2r(t) -r(t-dt) + 1/2m * dt^2 * f(t)
# Eq(4) v(t) = [r(t+dt) - r(t-dt)] / 2dt
def evolve_verlet(self):
iter = 0
while iter<ITERATIONS:
# energies are computed at the beginning of each iteration
self.compute_k_energy()
self.compute_u_energy()
# force computation at current coordinates (timestamp = 0)
self.compute_system_force(0)
# If the current iteration is the first iteration it is necessary to
# propagate the positions to the new "mid-positions", i.e shift the
# initial conditions to first iteration. This is done with a simple Euler propagation of
# the coordinates. Although the Euler scheme has larger errors a single iterations
# has no influence on | |
<gh_stars>100-1000
import numpy as np
from ukfm import SO3, SEK3
import matplotlib.pyplot as plt
class INERTIAL_NAVIGATION:
"""3D inertial navigation on flat Earth, where the vehicle obtains
observations of known landmarks. See a text description in
:cite:`barrauInvariant2017`, Section V.
:arg T: sequence time (s).
:arg imu_freq: IMU frequency (Hz).
"""
g = np.array([0, 0, -9.82])
"gravity vector (m/s^2) :math:`\\mathbf{g}`."
N_ldk = 3
ldks = np.zeros((3, 3))
"known landmarks :math:`\\mathbf{p}_i^l,~i=1,\\ldots,L`."
ldks[0] = np.array([0, 2, 2])
ldks[1] = np.array([-2, -2, -2])
ldks[2] = np.array([2, -2, -2])
class STATE:
"""State of the system.
It represents the state of the vehicle.
.. math::
\\boldsymbol{\\chi} \in \\mathcal{M} = \\left\\{ \\begin{matrix}
\\mathbf{C} \in SO(3),
\\mathbf{v} \in \\mathbb R^3,
\\mathbf{p} \in \\mathbb R^3
\\end{matrix} \\right\\}
:ivar Rot: rotation matrix :math:`\mathbf{C}`.
:ivar v: velocity vector :math:`\mathbf{v}`.
:ivar p: position vector :math:`\mathbf{p}`.
"""
def __init__(self, Rot, v, p):
self.Rot = Rot
self.v = v
self.p = p
class INPUT:
"""Input of the propagation model.
The input is a measurement from an Inertial Measurement Unit (IMU).
.. math::
\\boldsymbol{\\omega} \in \\mathcal{U} = \\left\\{ \\begin{matrix}
\\mathbf{u} \in \\mathbb R^3,
\\mathbf{a}_b \in \\mathbb R^3
\\end{matrix} \\right\\}
:ivar gyro: 3D gyro :math:`\mathbf{u}`.
:ivar acc: 3D accelerometer (measurement in body frame)
:math:`\mathbf{a}_b`.
"""
def __init__(self, gyro, acc):
self.gyro = gyro
self.acc = acc
def __init__(self, T, imu_freq):
# sequence time (s)
self.T = T
# IMU frequency (Hz)
self.imu_freq = imu_freq
# total number of timestamps
self.N = T * imu_freq
# integration step (s)
self.dt = 1 / imu_freq
@classmethod
def f(cls, state, omega, w, dt):
""" Propagation function.
.. math::
\\mathbf{C}_{n+1} &= \\mathbf{C}_{n} \\exp\\left(\\left(\\mathbf{u}
+ \\mathbf{w}^{(0:3)} \\right) dt\\right), \\\\
\\mathbf{v}_{n+1} &= \\mathbf{v}_{n} + \\mathbf{a} dt, \\\\
\\mathbf{p}_{n+1} &= \\mathbf{p}_{n} + \\mathbf{v}_{n} dt +
\mathbf{a} dt^2/2,
where
.. math::
\\mathbf{a} = \\mathbf{C}_{n} \\left( \\mathbf{a}_b +
\\mathbf{w}^{(3:6)} \\right) + \\mathbf{g}
:var state: state :math:`\\boldsymbol{\\chi}`.
:var omega: input :math:`\\boldsymbol{\\omega}`.
:var w: noise :math:`\\mathbf{w}`.
:var dt: integration step :math:`dt` (s).
"""
acc = state.Rot.dot(omega.acc + w[3:6]) + cls.g
new_state = cls.STATE(
Rot=state.Rot.dot(SO3.exp((omega.gyro + w[:3])*dt)),
v=state.v + acc*dt,
p=state.p + state.v*dt + 1/2*acc*dt**2
)
return new_state
@classmethod
def h(cls, state):
""" Observation function.
.. math::
h\\left(\\boldsymbol{\\chi}\\right) = \\begin{bmatrix}
\\mathbf{C}^T \\left( \\mathbf{p} - \\mathbf{p}^l_1\\right) \\\\
\\vdots \\\\
\\mathbf{C}^T \\left( \\mathbf{p} - \\mathbf{p}^l_L\\right)
\\end{bmatrix}
where :math:`\\mathbf{p}^l_i \in \\mathbb R^3,~i=1,\\ldots,L` are known
landmarks.
:var state: state :math:`\\boldsymbol{\\chi}`.
"""
y = np.zeros(cls.N_ldk*3)
for i in range(cls.N_ldk):
y[3*i: 3*(i+1)] = state.Rot.T.dot(cls.ldks[i] - state.p)
return y
@classmethod
def phi(cls, state, xi):
"""Retraction.
.. math::
\\varphi\\left(\\boldsymbol{\\chi}, \\boldsymbol{\\xi}\\right) =
\\left( \\begin{matrix}
\\mathbf{C} \\exp\\left(\\boldsymbol{\\xi}^{(0:3)}\\right) \\\\
\\mathbf{v} + \\boldsymbol{\\xi}^{(3:6)} \\\\
\\mathbf{p} + \\boldsymbol{\\xi}^{(6:9)}
\\end{matrix} \\right)
The state is viewed as a element :math:`\\boldsymbol{\chi} \\in SO(3)
\\times \\mathbb R^6`.
Its corresponding inverse operation is
:meth:`~ukfm.INERTIAL_NAVIGATION.phi_inv`.
:var state: state :math:`\\boldsymbol{\\chi}`.
:var xi: state uncertainty :math:`\\boldsymbol{\\xi}`.
"""
new_state = cls.STATE(
Rot=SO3.exp(xi[:3]).dot(state.Rot),
v=state.v + xi[3:6],
p=state.p + xi[6:9]
)
return new_state
@classmethod
def phi_inv(cls, state, hat_state):
"""Inverse retraction.
.. math::
\\varphi^{-1}_{\\boldsymbol{\\hat{\\chi}}}\\left(\\boldsymbol{\\chi}
\\right) = \\left( \\begin{matrix}
\\log\\left(\\mathbf{C} \\mathbf{\\hat{C}}^T \\right)\\\\
\\mathbf{v} - \\mathbf{\\hat{v}} \\\\
\\mathbf{p} - \\mathbf{\\hat{p}}
\\end{matrix} \\right)
The state is viewed as a element :math:`\\boldsymbol{\chi} \\in SO(3)
\\times \\mathbb R^6`.
Its corresponding retraction is :meth:`~ukfm.INERTIAL_NAVIGATION.phi`.
:var state: state :math:`\\boldsymbol{\\chi}`.
:var hat_state: noise-free state :math:`\\boldsymbol{\hat{\\chi}}`.
"""
xi = np.hstack([SO3.log(state.Rot.dot(hat_state.Rot.T)),
state.v - hat_state.v,
state.p - hat_state.p])
return xi
@classmethod
def left_phi(cls, state, xi):
"""Retraction.
.. math::
\\varphi\\left(\\boldsymbol{\\chi}, \\boldsymbol{\\xi}\\right) =
\\left( \\begin{matrix}
\\mathbf{C} \\mathbf{C}_\\mathbf{T} \\\\
\\mathbf{v} + \\mathbf{C} \\mathbf{r_1} \\\\
\\mathbf{p} + \\mathbf{C} \\mathbf{r_2}
\\end{matrix} \\right)
where
.. math::
\\mathbf{T} = \\exp\\left(\\boldsymbol{\\xi}\\right) =
\\begin{bmatrix}
\\mathbf{C}_\\mathbf{T} & \\mathbf{r_1} &\\mathbf{r}_2 \\\\
\\mathbf{0}^T & & \\mathbf{I}
\\end{bmatrix}
The state is viewed as a element :math:`\\boldsymbol{\chi} \\in SE_2(3)`
with left multiplication.
Its corresponding inverse operation is
:meth:`~ukfm.INERTIAL_NAVIGATION.left_phi_inv`.
:var state: state :math:`\\boldsymbol{\\chi}`.
:var xi: state uncertainty :math:`\\boldsymbol{\\xi}`.
"""
T = SEK3.exp(xi)
new_state = cls.STATE(
Rot=state.Rot.dot(T[:3, :3]),
v=state.Rot.dot(T[:3, 3]) + state.v,
p=state.Rot.dot(T[:3, 4]) + state.p
)
return new_state
@classmethod
def left_phi_inv(cls, state, hat_state):
"""Inverse retraction.
.. math::
\\varphi^{-1}_{\\boldsymbol{\\hat{\\chi}}}
\\left(\\boldsymbol{\\chi}\\right) =
\\log\\left(
\\boldsymbol{\chi}^{-1} \\boldsymbol{\\hat{\\chi}} \\right)
The state is viewed as a element :math:`\\boldsymbol{\chi} \\in SE_2(3)`
with left multiplication.
Its corresponding retraction is
:meth:`~ukfm.INERTIAL_NAVIGATION.left_phi`.
:var state: state :math:`\\boldsymbol{\\chi}`.
:var hat_state: noise-free state :math:`\\boldsymbol{\hat{\\chi}}`.
"""
chi = cls.state2chi(state)
hat_chi = cls.state2chi(hat_state)
xi = SEK3.log(SEK3.inv(chi).dot(hat_chi))
return xi
@classmethod
def right_phi(cls, state, xi):
"""Retraction.
.. math::
\\varphi\\left(\\boldsymbol{\\chi}, \\boldsymbol{\\xi}\\right) =
\\left( \\begin{matrix}
\\mathbf{C}_\\mathbf{T} \\mathbf{C} \\\\
\\mathbf{C}_\\mathbf{T}\\mathbf{v} + \\mathbf{r_1} \\\\
\\mathbf{C}_\\mathbf{T}\\mathbf{p} + \\mathbf{r_2}
\\end{matrix} \\right)
where
.. math::
\\mathbf{T} = \\exp\\left(\\boldsymbol{\\xi}\\right) =
\\begin{bmatrix}
\\mathbf{C}_\\mathbf{T} & \\mathbf{r_1} &\\mathbf{r}_2 \\\\
\\mathbf{0}^T & & \\mathbf{I}
\\end{bmatrix}
The state is viewed as a element :math:`\\boldsymbol{\chi} \\in SE_2(3)`
with right multiplication.
Its corresponding inverse operation is
:meth:`~ukfm.INERTIAL_NAVIGATION.right_phi_inv`.
:var state: state :math:`\\boldsymbol{\\chi}`.
:var xi: state uncertainty :math:`\\boldsymbol{\\xi}`.
"""
chi = SEK3.exp(xi)
new_state = cls.STATE(
Rot=chi[:3, :3].dot(state.Rot),
v=chi[:3, :3].dot(state.v) + chi[:3, 3],
p=chi[:3, :3].dot(state.p) + chi[:3, 4]
)
return new_state
@classmethod
def right_phi_inv(cls, state, hat_state):
"""Inverse retraction.
.. math::
\\varphi^{-1}_{\\boldsymbol{\\hat{\\chi}}}\\left(\\boldsymbol{\\chi}
\\right) = \\log\\left(
\\boldsymbol{\\hat{\\chi}}^{-1} \\boldsymbol{\\chi} \\right)
The state is viewed as a element :math:`\\boldsymbol{\chi} \\in SE_2(3)`
with right multiplication.
Its corresponding retraction is
:meth:`~ukfm.INERTIAL_NAVIGATION.right_phi`.
:var state: state :math:`\\boldsymbol{\\chi}`.
:var hat_state: noise-free state :math:`\\boldsymbol{\hat{\\chi}}`.
"""
chi = cls.state2chi(state)
hat_chi = cls.state2chi(hat_state)
xi = SEK3.log(hat_chi.dot(SEK3.inv(chi)))
return xi
@classmethod
def state2chi(cls, state):
chi = np.eye(5)
chi[:3, :3] = state.Rot
chi[:3, 3] = state.v
chi[:3, 4] = state.p
return chi
@classmethod
def ekf_FG_ana(cls, state, omega, dt):
F = np.eye(9)
F[3:6, :3] = -SO3.wedge(state.Rot.dot(omega.acc)*dt)
F[6:9, :3] = F[3:6, :3]*dt/2
F[6:9, 3:6] = dt*np.eye(3)
G = np.zeros((9, 6))
G[:3, :3] = state.Rot*dt
G[3:6, 3:6] = state.Rot*dt
return F, G
@classmethod
def ekf_H_ana(cls, state):
H = np.zeros((3 * cls.N_ldk, 9))
for i in range(cls.N_ldk):
H[3*i: 3*(i+1), :3] = state.Rot.T.dot(SO3.wedge(cls.ldks[i] -
state.p))
H[3*i: 3*(i+1), 6:9] = -state.Rot.T
return H
@classmethod
def iekf_FG_ana(cls, state, omega, dt):
F = np.eye(9)
F[3:6, :3] = SO3.wedge(cls.g)*dt
F[6:9, :3] = F[3:6, :3]*dt/2
F[6:9, 3:6] = dt*np.eye(3)
G = np.zeros((9, 6))
G[:3, :3] = state.Rot*dt
G[3:6, 3:6] = state.Rot*dt
G[3:6, :3] = SO3.wedge(state.v).dot(state.Rot)*dt
G[6:9, :3] = SO3.wedge(state.p).dot(state.Rot)*dt
return F, G
@classmethod
def iekf_H_ana(cls, state):
H = np.zeros((3 * cls.N_ldk, 9))
for i in range(cls.N_ldk):
H[3*i: 3*(i+1), :3] = state.Rot.T.dot(SO3.wedge(cls.ldks[i]))
H[3*i: 3*(i+1), 6:9] = -state.Rot.T
return H
def simu_f(self, imu_std):
# rayon (m)
r = 5
# set noise to zero to compute true trajectory
w = np.zeros(6)
# compute acceleration from trajectory
t = np.linspace(0, self.T, self.N)
p = r * np.vstack([np.sin(t / self.T * 2 * np.pi),
np.cos(t / self.T * 2 * np.pi), np.zeros(self.N)])
v = np.hstack([np.zeros((3, 1)), np.diff(p)]) / self.dt
acc = np.hstack([np.zeros((3, 1)), np.diff(v)]) / self.dt
# init variables at zero and do for loop
omegas = []
states = [self.STATE(np.eye(3), v[:, 0], p[:, 0])]
for n in range(1, self.N):
# true input
omegas.append(self.INPUT(
gyro=np.zeros(3),
acc=states[n-1].Rot.T.dot(acc[:, n-1] - self.g)
))
# propagate state
states.append(self.f(states[n-1], omegas[n-1], w, self.dt))
# noisy input
omegas[n-1].gyro = omegas[n-1].gyro + \
imu_std[0] * np.random.randn(3)
omegas[n-1].acc = omegas[n-1].acc + imu_std[1] * np.random.randn(3)
return states, omegas
def simu_h(self, states, obs_freq, obs_std):
# vector to know where measurement happen
one_hot_ys = np.zeros(self.N)
# imu_freq/obs_freq must be integer
one_hot_ys[::int(self.imu_freq / obs_freq)] = 1
idxs = np.where(one_hot_ys == 1)[0] # indexes where measurement happen
# total number of measurements
K = idxs.shape[0]
# measurement iteration number
ys = np.zeros((K, 3*self.N_ldk))
for k in range(K):
ys[k] = self.h(states[idxs[k]]) + obs_std * \
np.random.randn(3*self.N_ldk)
return ys, one_hot_ys
def errors(self, Rots, vs, ps, hat_Rots, hat_vs, hat_ps):
errors = np.zeros((self.N, 9))
for n in range(self.N):
errors[n, :3] = SO3.log(Rots[n].T.dot(hat_Rots[n]))
errors[:, 3:6] = vs - hat_vs
errors[:, 6:9] = ps - hat_ps
return errors
def plot_results(self, hat_states, hat_P, states):
Rots, vs, ps = self.get_states(states, self.N)
hat_Rots, hat_vs, hat_ps = self.get_states(hat_states, self.N)
errors = self.errors(Rots, vs, ps, hat_Rots, hat_vs, hat_ps)
errors[:, 0] = np.sqrt(errors[:, 0]**2 + errors[:, 1]**2 +
errors[:, 2]**2)
errors[:, 1] = np.sqrt(errors[:, 3]**2 + errors[:, 4]**2 +
errors[:, 5]**2)
errors[:, 2] = np.sqrt(errors[:, 6]**2 + errors[:, 7]**2 +
errors[:, 8]**2)
t = np.linspace(0, self.T, self.N)
fig, | |
str(eachReadLineAL)
stringArrayAL = str.split(unicodeReadLineAL)
agentMemePath = stringArrayAL[0]
agentID = rmlEngine.api.createEntityFromMeme(agentMemePath)
agents.append(agentID)
n = 0
for eachReadLineT in allLinesT:
unicodeReadLineT = str(eachReadLineT)
stringArray = str.split(unicodeReadLineT)
stimulusMemePath = stringArray[0]
stimulusID = rmlEngine.api.createEntityFromMeme(stimulusMemePath)
column = 0
for agentID in agents:
n = n + 1
column = column + 1
expectedResult = stringArray[column]
argumentMap = {}
argumentMap["controllerID"] = None
stimulusMessage = Engine.StimulusMessage(stimulusID, argumentMap, [agentID])
Engine.siQ.put(stimulusMessage)
timeout = 10.0
time.sleep(timeout)
resultList = []
while True:
testResult = None
try:
report = Engine.broadcasterRegistrar.broadcasterIndex['test'].get_nowait()
testResult = report.resolvedDescriptor
resultList.append(testResult)
#Clear the queue
while not Engine.broadcasterRegistrar.broadcasterIndex['test'].empty():
try:
unusedReport = Engine.broadcasterRegistrar.broadcasterIndex['test'].get_nowait()
except queue.Empty:
#ok. Concurrency is being squirrelly. The queue tests as not empty, but ends up empty.
# Let's not burn the world down over this
break
except Exception as e:
break
if len(resultList) < 1:
resultList.append("***")
testResult = ""
for result in resultList:
if (len(resultList) > 1) and (result == "***"):
#if we have more than one result, then ignore any *** values that get tacked on when resultQueue is emptied
pass
else:
testResult = "%s%s" %(testResult, result)
agentMemePath = rmlEngine.api.getEntityMemeType(agentID)
testcase = "Stimulus= %s, Agent=%s" %(stimulusMemePath, agentMemePath)
results = [n, testcase, testResult, expectedResult, ""]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testStimulusEngine2(filenameAgentList, filenameTestcase, restrictAgents = True):
"""
Create the usual set of four agents.
For each stimulus
Create a stimulus request and drop it into the SiQ.
Then look in the broadcast queue:
Wait for the trailer report (or timeout)
Then compare the results:
When the stimulus engine returns a stimulus report, it includes a set of all agents for which it is relevant.
All agents in the test go into one of two buckets (sets), either the set for which no return is expected,
or the set for which we expect the stimulus engine to return in the report.
Each line in the test definition tells us which "bucket" (rendered descriptror)
that each agent belongs to. E.g. if the agent should see 'HelloAgent',then it will
be in the agent list for the report object that has 'HelloAgent' as the report.resolvedDescriptor
if restrictAgents == True, then the agents defined in filenameAgentList will be the targets.
otherwise, no targets will be defined and all possible agents will be checked
"""
method = moduleName + '.' + 'testStimulusEngine2'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
resultSet =[]
#filenameAgentList has a list of the agents
testFileNameAL = os.path.join(testDirPath, filenameAgentList)
readLocAL = codecs.open(testFileNameAL, "r", "utf-8")
allLinesAL = readLocAL.readlines()
readLocAL.close
#filenameTestcase has a list of the test cases
testFileNameT = os.path.join(testDirPath, filenameTestcase)
readLocT = codecs.open(testFileNameT, "r", "utf-8")
allLinesT = readLocT.readlines()
readLocT.close
#Stimuli are singletons, but we still need to aquire the UUID of the trailer
stimulusTrailerPath = "TestPackageStimulusEngine.SimpleStimuli.Stimulus_Trailer"
stimulusID = rmlEngine.api.createEntityFromMeme(stimulusTrailerPath)
agents = []
for eachReadLineAL in allLinesAL:
unicodeReadLineAL = str(eachReadLineAL)
stringArrayAL = str.split(unicodeReadLineAL)
agentMemePath = stringArrayAL[0]
agentID = rmlEngine.api.createEntityFromMeme(agentMemePath)
agents.append(agentID)
'''
If the stimulus engine never returns anything (because there are no agents for which it is relevant), then we'll
need a set listing all agents used in the test run.
'''
standingBadResultSet = set(agents)
n = 0
for eachReadLineT in allLinesT:
n = n + 1
unicodeReadLineT = str(eachReadLineT)
stringArray = str.split(unicodeReadLineT)
stimulusMemePath = stringArray[0]
stimulusID = rmlEngine.api.createEntityFromMeme(stimulusMemePath)
column = 0
expectedBuckets = {}
for agentID in agents:
column = column + 1
expectedColumnResult = stringArray[column]
if expectedColumnResult in expectedBuckets:
agentList = expectedBuckets[expectedColumnResult]
agentList.append(agentID)
expectedBuckets[expectedColumnResult] = agentList
else:
expectedBuckets[expectedColumnResult] = [agentID]
testResult = True
expectedResult = True
argumentMap = {}
argumentMap["controllerID"] = None
if restrictAgents == True:
stimulusMessage = Engine.StimulusMessage(stimulusID, argumentMap, agents)
Engine.siQ.put(stimulusMessage)
else:
stimulusMessage = Engine.StimulusMessage(stimulusID, argumentMap)
Engine.siQ.put(stimulusMessage)
timeout = 5.0
time.sleep(timeout)
resultList = []
while True:
try:
report = Engine.broadcasterRegistrar.broadcasterIndex['test'].get_nowait()
resultList.append(report)
#Clear the queue
while not Engine.broadcasterRegistrar.broadcasterIndex['test'].empty():
try:
unusedReport = Engine.broadcasterRegistrar.broadcasterIndex['test'].get_nowait()
except queue.Empty:
#ok. Concurrency is being squirrelly. The queue tests as not empty, but ends up empty.
# Let's not burn the world down over this
break
except Exception as e:
break
if len(resultList) < 1:
#nothing came back from the stimulus engine, but some tests expect this situation. Create a dummy report
emptyAgentSet = set([])
emptyReport = Engine.StimulusReport(None, None, emptyAgentSet, "***", False, [], [])
resultList.append(emptyReport)
notes = ""
for result in resultList:
if result is not None:
try:
resultDescriptor = result.resolvedDescriptor
try:
#The agentIDs in expectedBuckets should be a subset of that in the report's agent set
myAgentList = expectedBuckets[resultDescriptor]
myAgentSet = set(myAgentList)
# If the stimulus engine never responded (because none of the agents were supposed to be in a report)
# then result.agentset will be empty.
if standingBadResultSet == myAgentSet:
badResultList = expectedBuckets["***"]
badResultset = set(badResultList)
if bool(result.agentSet) == True:
#if we are expecting an empty result set (that is, the expected bucket for *** matches up with standingBadResultSet,
# then result.agentSet should be empty
testResult = False
notes = "%s\nAgents with descriptor %s,Should be empty, but contain %s" %(notes, resultDescriptor, list(result.agentSet))
elif myAgentSet.issubset(result.agentSet) == False:
testResult = False
notes = "%s\nAgents with descriptor %s,%snot a subset of %s" %(notes, resultDescriptor, myAgentList, list(result.agentSet))
#We must also ensure that the "***" results don't get into another bucket
# If the resultDescriptor was "***" (meaning that we times out waiting for a response from the stimulus engine),
# then we already tested for the
if resultDescriptor != "***":
try:
badResultList = expectedBuckets["***"]
badResultset = set(badResultList)
if badResultset.issubset(result.agentSet) == True:
testResult = False
notes = "%s\nAgents with descriptor %s,%snot a subset of %s" %(notes, resultDescriptor, badResultset, list(result.agentSet))
except KeyError:
# this is ok
pass
except Exception as e:
testResult = False
notes = "%s %s" %(notes, e)
except KeyError:
#not every testcase will have agents for every possible returned descriptor
pass
except Exception as e:
notes = "%s %s" %(notes, e)
except AttributeError as e:
testResult = False
agentMemePath = rmlEngine.api.getEntityMemeType(agentID)
testcase = "Stimulus= %s" %(stimulusMemePath)
results = [n, testcase, str(testResult), str(expectedResult), notes]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testStimulusEngine(filename):
"""
Create a set of four agents.
For each agent/stimuluscombination,
Create a stimulus request and drop it into the SiQ.
Create a trailer request and drop it into the SiQ.
Then look in the broadcast queue:
Wait for the trailer report (or timeout)
Then compare the pre-trailer report results
"""
method = moduleName + '.' + 'testStimulusEngineTrailer'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
testResult = False
#Let's create a Hello Agent
agentPath = "AgentTest.HelloAgent"
agentID = rmlEngine.api.createEntityFromMeme(agentPath)
#Stimuli are singletons, but we still need to aquire the UUID of the trailer
stimulusTrailerPath = "TestPackageStimulusEngine.SimpleStimuli.Stimulus_Trailer"
stimulusID = rmlEngine.api.createEntityFromMeme(stimulusTrailerPath)
#Create the message and put it into the SiQ
#Import here as it causes problems when imported at start of module
try:
argumentMap = {}
argumentMap["controllerID"] = None
stimulusMessage = Engine.StimulusMessage(stimulusID, argumentMap, [agentID])
#stimulusMessage = Engine.StimulusMessage(stimulusID, argumentMap)
Engine.siQ.put(stimulusMessage)
dummyIgnoreThis = "I'm just here to have a place to put a breakpoint"
except Exception as e:
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Error testing trailer. Traceback = %s" %e])
timeout = 10.0
time.sleep(timeout)
try:
report = responseQueue.get_nowait()
if report.stimulusID == stimulusID:
testResult = True
else:
Graph.logQ.put( [logType , logLevel.WARNING , method , "Trailer Stimulus Failed"])
except Exception as e:
Graph.logQ.put( [logType , logLevel.WARNING , method , "Trailer Stimulus Timed Out."])
testResult = False
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
resultSet = []
testcase = "Trailer Stimulus"
testResult = str(testResult)
expectedResult = str('True')
results = [1, testcase, testResult, expectedResult, ""]
resultSet.append(results)
return resultSet
def testDescriptorSimpleDirect():
"""
Runs the I18N descriptor testcases in RMLDescriptor_Simple.atestby directly calling their executors.
"""
method = moduleName + '.' + | |
Add line if the file is already created
else:
# Add line when the new session is detect
if add_line :
with open(path_script+'Results'+'/'+Patient_First_Name+'_'+Patient_Name+'/'+Report_Number +
'_'+'following.csv', 'ab') as fp:
# Define format as excel format
a = csv.writer(fp, delimiter=';')
# Create row of new session
a.writerow("---------------------------------------------------------")
# Close file
fp.close()
add_line = False
#== FUNCTION: Tracking hands ===========================================================================================
def tracking():
# Open visualizer program of Leap Motion Device
subprocess.Popen('C:\Program Files (x86)\Leap Motion\Core Services\VisualizerApp.exe')
#=======================================================================================================================
#== CLASS: SampleListener; receives events from controller and executes accordingly ====================================
class SampleListener(Leap.Listener):
# Function executed for each frame:
def on_frame(self, controller):
# Get the most recent frame and report some basic information
frame = controller.frame()
#----------------------------------------------------
# Get hands (for each hand in the current frame...):
#----------------------------------------------------
for hand in frame.hands:
# Find out which hand is in the frame (h = 0 for left and 1 for right)
h = 0 if hand.is_left else 1
#---------------------------------------------------------------------
# Get fingers (for each finger of each hand in the current frame...):
#---------------------------------------------------------------------
for finger in hand.fingers:
#--------------------------------------------------------------------------------
# Get bones (for each bone of each finger of each hand in the current frame...):
#--------------------------------------------------------------------------------
# If its a thumb...
if finger.type() == 0:
# Iterate through Proximal, Intermediate and Distal (no Metacarpal).
for b in range(2, 4):
# Determinate joint type
j = b - 2
# Identification of the previous bone
bone1 = finger.bone(b - 1)
# Identification of the actual bone
bone2 = finger.bone(b)
# Store direction unit vector of previous bone in v0
v0 = bone1.direction
# Store direction unit vector of actual bone
v1 = bone2.direction
# Calculate scalar product
sp = dot_product(v0, v1)
# Verify if scalar product is valid, i.e. it between -1 and 1
if (sp <= 1) and (sp >= -1):
# Get angle (in radians) between the 2 unit vectors
angle_in_radians = math.acos(sp)
# Get angle in degrees
angle_in_degrees = math.degrees(angle_in_radians)
# Detect hyper-extension with direction bone1 and normal bone2 (see testAngleDirection.py)
if dot_product(bone1.direction, -bone2.basis.y_basis) < 0:
angle_in_degrees = -angle_in_degrees
#-------------------------------------------------------------
# Determinate minimum and maximum
#-------------------------------------------------------------
# Replace maximum value if the new value is superior
if Hands_Angle[h][finger.type()][j][Max] < angle_in_degrees:
Hands_Angle[h][finger.type()][j][Max] = angle_in_degrees
# Replace minimum value if the new value is inferior
if Hands_Angle[h][finger.type()][j][Min] > angle_in_degrees:
# Change value of min
Hands_Angle[h][finger.type()][j][Min] = angle_in_degrees
# Store actual value of angle
Hands_Angle[h][finger.type()][j][Value] = angle_in_degrees
# ... otherwise (i.e., if its NOT a thumb)...
else:
# Iterate through Proximal, Intermediate and Distal (no Metacarpal).
for b in range(1, 4):
# Determinate joint type
j = b - 1
# Identification of the previous bone
bone1 = finger.bone(b - 1)
# Identification of the actual bone
bone2 = finger.bone(b)
# Store direction unit vector of previous bone in v0
v0 = bone1.direction
# Store direction unit vector of actual bone
v1 = bone2.direction
# Calculate scalar product
sp = dot_product(v0, v1)
# Verify if scalar product is valid, i.e. it between -1 and 1
if (sp <= 1) and (sp >= -1):
# Get angle (in radians) between the 2 unit vectors
angle_in_radians = math.acos(sp)
# Get angle in degrees
angle_in_degrees = math.degrees(angle_in_radians)
# Detect hyper-extension with direction bone1 and normal bone2 (see testAngleDirection.py)
if dot_product(bone1.direction, -bone2.basis.y_basis) < 0:
angle_in_degrees = -angle_in_degrees
#-------------------------------------------------------------
# Determinate minimum and maximum
#-------------------------------------------------------------
# Replace maximum value if the new value is superior
if Hands_Angle[h][finger.type()][j][Max] < angle_in_degrees:
Hands_Angle[h][finger.type()][j][Max] = angle_in_degrees
# Replace minimum value if the new value is inferior
if Hands_Angle[h][finger.type()][j][Min] > angle_in_degrees:
# Change value of min
Hands_Angle[h][finger.type()][j][Min] = angle_in_degrees
# Store actual value of angle
Hands_Angle[h][finger.type()][j][Value] = angle_in_degrees
#=======================================================================================================================
#== FUNCTION: acquisition_angle; calculate angle each time =============================================================
def acquisition_angle():
# Create controller
controller = Leap.Controller()
# Create a sample listener
listener = SampleListener()
# Have the sample listener receive events from the controller
controller = Leap.Controller(listener)
#=======================================================================================================================
#== FUNCTION THREAD #1 : kill_handler ; exit program at any time when user press enter =================================
def kill_handler():
# Read input (key on the keyboard)
sys.stdin.read(1)
# Exit the program if the user press
os.system('cls' if os.name == 'nt' else 'clear')
os.kill(os.getpid(), signal.SIGINT)
#=======================================================================================================================
#== FUNCTION THREAD #2 : consumer_display ; implement display as a consumer in multi-threading==========================
def consumer_display():
# Infinite loop
while True:
# Take angles calculated which is in queue
Angles = data_queue.get()
# Apply priority of multi-threading
with lock:
# Display angles calculated
display(Angles)
#=======================================================================================================================
#== FUNCTION THREAD #3 : producer_acquisition_angle ; implement acquisition_angle as a producer in multi-threading =====
def producer_acquisition_angle():
# Infinite loop
while True:
# Apply priority of multi-threading
with lock:
# Acquire angles calculated
acquisition_angle()
# Put angles calculated in a queue
data_queue.put(Hands_Angle)
#=======================================================================================================================
#== FUNCTION THREAD #4 : consumer_save ; implement save as a consumer in multi-threading================================
def consumer_save():
# Infinite loop
while True:
# Take angles calculated which is in queue
Angles = data_queue.get()
# Apply priority of multi-threading
with lock:
# Save angles calculated
save(Angles)
#=======================================================================================================================
#== FUNCTION MULTI THREADING : multi_threading ; implement multi-threading as producer-consumer pattern ================
# Define data queue for angles calculated each time
data_queue = Queue.Queue()
# Define priority of each thread of multi-threading
lock = threading.Lock()
def multi_threading():
#--------------------------------------------------------
# THREAD #1 : Check if user press enter for quit program
#--------------------------------------------------------
kill_thread = threading.Thread(target=kill_handler) # Define thread for kill_handler
kill_thread.daemon = True # Daemon mode (kill thread automatically at program end)
kill_thread.start() # Start this thread in first position
#--------------------------------------------------------
# THREAD #2 : Display angles calculated with acquisition
#--------------------------------------------------------
c = threading.Thread(target=consumer_display) # Define thread for consumer_display
c.daemon = True # Daemon mode (kill thread automatically at program end)
c.start() # Start this thread in second position
#--------------------------------------------------------
# THREAD #3 : Acquire angles calculated with Leap Motion
#--------------------------------------------------------
p = threading.Thread(target=producer_acquisition_angle) # Define thread for producer_acquisition_angle
p.daemon = True # Daemon mode (kill thread automatically at program end)
p.start() # Start this thread in third position
#--------------------------------------------------------
# THREAD #4 : Save angles calculated in file .csv
#--------------------------------------------------------
cc = threading.Thread(target=consumer_save) # Define thread for consumer_save
cc.daemon = True # Daemon mode (kill thread automatically at program end)
cc.start() # Start this thread in fourth position
#--------------------------------------------------------
# Wait for the producers threads to finish
#--------------------------------------------------------
p.join()
#--------------------------------------------------------
# wait till all the jobs are done in the queue
#--------------------------------------------------------
data_queue.join()
#=======================================================================================================================
#== FUNCTION: main; executes program ===================================================================================
def main():
# Display only template of hands without angles
display_template()
# Apply multi-threading for acquisition, display angles calculated and save results
multi_threading()
#=======================================================================================================================
#== FUNCTION: usage; displays user guide ===============================================================================
def usage():
print \
'=============================================================================================================\n'\
' LEAP MOTION APP \n'\
'=============================================================================================================\n'\
'\nDESCRIPTION: \n\n'\
' Articular measurements. \n'\
'\nUSAGE: \n\n'\
' python leapMotionAngleAttemp_2.py -h <hand> -f <finger> -j <joint> \n'\
'\n' \
'OPTIONAL ARGUMENTS: \n\n'\
' -h hand | <r> for right hand, <l> for left hand \n'\
' -f finger | <t> for thumb, <i> for index, <m> for middle, <r> for ring, <p> for pinky \n'\
' -j joint | <mp>, <ipp> or <ipd> \n'\
' -a help \n'\
'\n' \
'NOTE: \n\n'\
' If no arguments are supplied, the program will launch by selecting all angles for both hands (default). \n\n' \
'EXAMPLE: \n\n'\
' python leapMotionAngle.py -h r -f i -j ipp \n'
#=======================================================================================================================
#== FUNCTION: usage_help; displays user help banner ====================================================================
def usage_help():
print '\n-------------------------------------------------------------------------------------------------------------\n'\
' HELP \n'\
'------------------------------------------------------------------------------------------------------------- \n'\
' Follow instructions below. '
#=======================================================================================================================
#== START PROGRAM ======================================================================================================
if __name__ == "__main__":
#-------------------------------------------------------------------------------------------------------------------
# Global variables
#-------------------------------------------------------------------------------------------------------------------
# Index of data, joints, fingers and hands
Min, Value, Max = 0, 1, 2 # Define type of data
MP, IPP, IPD = 0, 1, 2 # Define type of each joint
Thumb, Index, Middle, Ring, Pinky = 0, 1, 2, 3, 4 # Define type of each finger
Left, Right = 0, 1 # Define type of each hand
# List of finger extrema, joints, fingers and hands
Hands_Angle = [[[[180, 0, 0], [180, 0, 0]], # [Min, Value, Max]--> [MP][IPP] --> [Thumb]
[[180, 0, 0], [180, 0, 0], [180, 0, 0]], # [Min, Value, Max]--> [[MP][IPP][IPD]] | |
'حساب',
'Current Address': 'العنوان الحالي',
'Current Beneficiaries': 'المستفيدين الحاليين',
'Current community priorities': 'أولويات المجتمع الحالي',
'Current greatest needs of vulnerable groups': 'أكبرالاحتياجات الحالية للفئات الضعيفة',
'Current Group Members': 'أعضاء الفريق الحالي',
'Current health problems': 'المشاكل الصحية الحالية',
'Current Home Address': 'عنوان السكن الحالي',
'Current Indicators Status': 'حالة المؤشرات الحالية ',
'Current Location': 'الموقع الحالي',
'Current Log Entries': 'إدخالات السجل الحالي',
'Current Occupation': 'المهنة الحالية',
'Current problems, details': 'المشاكل الحالية، تفاصيل',
'Current response': 'إستجابة حالية',
'Current Status': 'الحالة الحالية',
'Current Status of Project': 'الوضع الحالي للمشروع',
'Current Team Members': 'أعضاء الفريق الحالي',
'Current Twitter account': 'حساب twitter الحالي',
'Current Weather': 'الطقس لحالي',
"Current Year's Actual Progress": 'التقدم الفعلي السنة الحالية',
"Current Year's Planned Progress": 'التقدم المخطط للسنة الحالية',
'Current?': 'الحالية ؟',
'Currently no Appraisals entered': 'لا يوجد حاليا أي تقييم دخل',
'Currently no awards registered': 'حاليا لا يوجد أي جوائز مسجلة',
'Currently no Certifications registered': 'حاليا لا توجد شهادات مسجلة',
'Currently no Course Certificates registered': 'لا يوجد حاليا شهادات شهادة مسجلة',
'Currently no Credentials registered': 'حاليًا لم تسجل بيانات اعتماد',
'Currently no entries in the catalog': 'حاليا لا يوجد إدخالات في الكاتالوج',
'Currently no hours recorded for this volunteer': 'حاليا لا يوجد ساعات سجلت لهذا المتطوع',
'Currently no Participants registered': 'لا يوجد حاليا مشاركين مسجلين',
'Currently no Professional Experience entered': 'لا يوجد حاليا خبرة فنية تم ادخالها',
'Currently no programs registered': 'حاليا لا توجد برامج مسجلة',
'Currently no recommendation letter types registered': 'لا توجد حاليا أنواع رسائل توصية مسجلة',
'Currently no recommendation letters registered': 'حاليا لا توجد أي رسائل توصية مسجلة',
'Currently no salary registered': 'حاليا لا يوجد أي راتب مسجل',
'Currently no Skill Equivalences registered': 'حاليا لا يوجد أي مكافئات مهارة مسجلة',
'Currently no Skills registered': 'حاليا لا يوجد أي مهارات مسجلة',
'Currently no staff assigned': 'حاليا لا يوجد أي موظف تعيين',
'Currently no training events registered': 'حاليا لا يوجد أي دورات تدريبية مسجلة',
'CV': 'السيرة الذاتية',
'Cyclone': 'الإعصار',
'Daily': 'اليومي',
'dark': 'داكن',
'Dashboard': 'لوحة التحكم',
'data uploaded': 'تم رفع البيانات',
'Data uploaded': 'تم رفع البيانات',
'database %s select': 'اختر قاعدة البيانات بـ%s',
'Database Development': 'تطوير قاعدة بيانات',
'Date': '',
'Date & Time': 'التاريخ والوقت',
'Date and time this report relates to.': 'تاريخ و وقت هذا التقرير يتعلق بـ.',
'Date Created': 'تاريخ الإنشاء',
'Date Due': 'تاريخ الاستحقاق',
'Date Exported': 'تاريخ تصديرها',
'Date for Follow-up': 'تاريخ المتابعة',
'Date is required!': 'التأريخ مطلوب',
'Date Modified': 'تأريخ التعديل',
'Date must be %(max)s or earlier!': 'يجب أن يكون التاريخ %(max)s أو في وقت سابق!',
'Date must be %(min)s or later!': 'يجب أن يكون التاريخ %(mins)s أو في وقت لاحق!',
'Date must be between %(min)s and %(max)s!': 'يجب أن يكون التاريخ بين %(min)s و %(max)s المفضل',
'Date Needed By': 'تاريخ الوصول',
'Date of Birth': 'تاريخ الميلاد',
'Date of Birth is Required': 'تاريخ الميلاد مطلوب',
'Date of Dismissal': 'تاريخ الفصل',
'Date of Latest Information on Beneficiaries Reached': 'لقد وصل تاريخ آخر المعلومات عن المستفيدين',
'Date of Re-recruitment': 'تاريخ إعادة التوظيف',
'Date of Recruitment': 'تاريخ التوظيف',
'Date Printed': 'تاريخ الطبع',
'Date Question': 'تاريخ السؤال',
'Date Received': 'تاريخ استلامه',
'Date Repacked': 'تاريخ إعادة التعبئة',
'Date Requested': 'الموعد المطلوب',
'Date Responded': 'تاريخ الرد',
'Date Sent': 'تاريخ الإرسال',
'Date/Time': 'التاريخ / الوقت',
'Date/Time is required!': 'التأريخ / الوقت مطلوب',
'Date/Time must be %(max)s or earlier!': 'يجب أن يكون تاريخ / وقت %(max)s الصورة أو في وقت سابق!',
'Date/Time must be %(min)s or later!': 'ويجب أن يكون تاريخ / وقت %(min)s الصورة أو في وقت لاحق!',
'Date/Time must be between %(min)s and %(max)s!': 'يجب أن يكون تاريخ / وقت بين %(min)s و %(max)s المفضل',
'Day': 'يوم',
'Dead Body Details': 'تفاصيل الجثة الميت',
'Dead Body Reports': 'تقارير الجثة الميتة',
'Dear %(person_name)s': 'عزيزي %(person_name)s',
'Dear Brother %(person_name)s': 'أخي العزيز %(person_name)s',
'Dear Sister %(person_name)s': 'عزيزتي الأخت %(person_name)s',
'deceased': 'متوفي',
'Deceased': 'متوفى',
'Decimal Degrees': 'الدرجات العشرية',
'Decomposed': 'متحللة',
'default': 'الإفتراضي',
'Default': 'اساسي',
'Default Answer': 'الإجابة الإفتراضية',
'Default Language': 'اللغة الافتراضية',
'Default Width of the map window.': 'العرض الافتراضي لإطار الخريطة.',
'Defines the icon used for display of features on interactive map & KML exports.': 'يعرف الرمز المستخدم لعرض ملامح من الخريطة التفاعلية وتصدير KML.',
'Defines the marker used for display & the attributes visible in the popup.': 'يحدد العلامة المستخدمة للعرض والسمات الظاهرة للتوضيح',
'Definition': 'تعريف',
'Degrees in a latitude must be between -90 to 90.': 'ويجب أن تكون درجة في خط العرض بين -90 إلى 90.',
'Degrees in a longitude must be between -180 to 180.': 'ويجب أن تكون درجة في الطول بين -180 إلى 180.',
'Degrees must be a number.': 'ويجب أن تكون درجة عددا.',
'Delete': 'مسح',
'Delete Activity': 'حذف النشاط',
'Delete Activity Type': 'حذف نوع النشاط',
'Delete Affiliation': 'حذف الانتساب',
'Delete all data of this type which the user has permission to before upload. This is designed for workflows where the data is maintained in an offline spreadsheet and uploaded just for Reads.': 'احذف كافة البيانات من هذا النوع التي يملك المستخدم الإذن قبل تحميلها. تم تصميم هذا الأمر لمهام سير العمل حيث يتم الاحتفاظ بالبيانات في جدول بيانات غير متصل ويتم تحميلها فقط للقراء.',
'Delete Appraisal': 'حذف تقييم',
'Delete Assessment': 'حذف التقييم',
'Delete Assessment Summary': 'حذف ملخص التقييم',
'Delete Award': 'حذف جائزة',
'Delete Baseline': 'حذف الخط القاعدي',
'Delete Branch': 'حذف فرع',
'Delete Certificate': 'حذف شهادة',
'Delete Certification': 'حذف شهادة',
'Delete Cluster': 'حذف الكتلة',
'Delete Commitment': 'حذف الالتزام',
'Delete Competency Rating': 'حذف تصنيف الكفاءات',
'Delete Contact': 'حذف اتصال',
'Delete Contact Information': 'حذف معلومات الشخص المراد الاتصال به',
'Delete Course': 'حذف الدورة',
'Delete Course Certificate': 'حذف شهادة المقرر',
'Delete Credential': 'حذف الاعتمادات',
'Delete Department': 'حذف القسم',
'Delete Deployment': 'حذف النشر',
'Delete Donor': 'حذف مانح',
'Delete Emergency Contact': 'حذف الاتصال في حالات الطوارئ',
'Delete Entry': 'حذف الإدخال ',
'Delete Event': 'حذف الحدث',
'Delete Facility': 'حذف مرفق',
'Delete Facility Type': 'حذف نوع مرفق',
'Delete Feature Layer': 'حذف خاصية الطبقة',
'Delete Group': 'حذف المجموعة',
'Delete Group Member Role': 'حذف دور عضو المجموعة',
'Delete Group Status': 'حذف حالة المجموعة',
'Delete Hazard': 'حذف المخاطر',
'Delete Hours': 'حذف ساعات',
'Delete Image': 'حذف صورة',
'Delete Job Title': 'حذف المسمى الوظيفي',
'Delete Kit': 'حذف طقم أدوات',
'Delete Layer': 'حذف طبقة',
'Delete Level 2 Assessment': 'حذف تقييم المستوى 2',
'Delete Location': 'حذف الموقع',
'Delete Map Profile': 'حذف تكوين خريطة',
'Delete Membership': 'حذف العضوية',
'Delete Message': 'حذف الرسالة',
'Delete Mission': 'حذف المهمة',
'Delete National Society': 'حذف الجمعية الوطنية',
'Delete Need': 'حذف الحاجة',
'Delete Need Type': 'حذف نوع الحاجة',
'Delete Occupation Type': 'حذف نوع الوظيفة ',
'Delete Office': 'حذف مكتب',
'Delete Office Type': 'حذف نوع المكتب ',
'Delete Organization': 'حذف المنظمة',
'Delete Organization Type': 'حذف نوع المنظمة',
'Delete Participant': 'حذف مشارك',
'Delete Partner Organization': 'حذف المنظمة الشريكة',
'Delete Person': 'حذف شخص',
'Delete Person Subscription': 'حذف اشتراك الشخص',
'Delete Photo': 'حذف الصورة',
'Delete Population Statistic': 'حذف إحصاء السكان',
'Delete Professional Experience': 'حذف الخبرة المهنية',
'Delete Program': 'حذف برنامج',
'Delete Project': 'حذف مشروع',
'Delete Projection': 'حذف التخطيط',
'Delete Rapid Assessment': 'حذف التقييم السريع',
'Delete Received Item': 'تم حذف العنصر المستلم',
'Delete Received Shipment': 'حذف الشحنة المستلمة',
'Delete Recommendation Letter': 'حذف رسالة التوصية',
'Delete Recommendation Letter Type': 'حذف نوع رسالة التوصية',
'Delete Record': 'حذف سجل',
'Delete Region': 'حذف منطقة',
'Delete Religion': 'حذف الدين',
'Delete Request': 'حذف الطلب',
'Delete Request Item': 'حذف عنصرالطلب',
'Delete Resource': 'حذف الموارد',
'Delete Resource Type': 'حذف نوع المورد',
'Delete Response': 'حذف الرد',
'Delete Role': 'حذف دور',
'Delete Room': 'حذف غرفة',
'Delete Salary': 'حذف الراتب',
'Delete Scenario': 'حذف السيناريو',
'Delete Sector': 'حذف قطاع',
'Delete Service': 'حذف خدمة',
'Delete Setting': 'حذف إعداد',
'Delete Skill': 'حذف مهارة',
'Delete Skill Equivalence': 'حذف مكافئ المهارة',
'Delete Skill Type': 'حذف نوع المهارة',
'Delete Staff Assignment': 'حذف تعيين الموظفين',
'Delete Staff Member': 'حذف موظف',
'Delete Status': 'حذف حالة',
'Delete Strategy': 'حذف استراتيجية',
'Delete Supplier': 'حذف المورد',
'Delete Survey': 'حذف مسح',
'Delete Survey Series': 'حذف سلاسل المسح',
'Delete Theme': 'حذف النسق',
'Delete this Filter': 'حذف هذا المرشح',
'Delete Training': 'حذف التدريب',
'Delete Training Center': 'حذف مركز التدريب',
'Delete Training Event': 'حذف حدث التدريب',
'Delete Unit': ' حذف الوحدة',
'Delete Volunteer': 'حذف المتطوع',
'Delete Volunteer Role': 'حذف نوع المتطوع',
'Delete Warehouse': 'حذف المستودع',
'Delete Warehouse Type': 'حذف نوع المستودع',
'deleted': 'محذوف',
'Deletion failed': 'فشل الحذف',
'Deletion Failed': 'فشل الحذف',
'Deliver To': 'يسلم إلى',
'Delphi Decision Maker': 'صانع قرار دلفي',
'Demographic': 'السكانية',
'Demographics': 'السكانية',
'Demonstrations': 'مظاهرات',
'Denominator': 'وسيط',
'Department / Unit': 'القسم / الوحدة',
'Department added': ' تم إضافة قسم',
'Department Catalog': 'لائحة الاقسام',
'Department deleted': 'تم حذف قسم',
'Department Details': 'تفاصيل القسم',
'Department updated': 'قسم تم تحديثه',
'Deployables': 'قابل للنشر',
'Deployed': 'نشر',
'Deploying NS': 'نشر NS',
'Deployment': 'نشر',
'Deployment added': 'تم إضافة النشر',
'Deployment Alert': 'تنبيه النشر',
'Deployment Date': 'تاريخ النشر',
'Deployment deleted': 'تم حذف النشر',
'Deployment Details': 'تفاصيل النشر',
'Deployment Details updated': 'تفاصيل النشر تم تحديثه',
'Deployment Team': 'فريق للنشر',
'Deployments': 'عمليات النشر',
'Describe the condition of the roads to your hospital.': 'وصف حالة الطرق الى المستشفى الخاص بك.',
"Describe the procedure which this record relates to (e.g. 'medical examination')": "وصف الإجراء الذي يتعلق به هذا السجل (مثل 'الفحص الطبي')",
'Description': 'الوصف',
'Description of defecation area': 'وصف منطقة التغوط',
'Description of drinking water source': 'وصف مصدر مياه الشرب',
'Deselect All': 'الغاء تحديد الكل',
'Design, deploy & analyze surveys.': 'تصميم-نشر-تحليل المسوحات',
'Designation(s)': 'التعيين',
'Desluding ': 'إنهاء',
'Destination': 'الوجهة',
'Detailed Description': 'الوصف التفصيلي',
'Details': 'التفاصيل',
'Dialysis': 'غسيل الكلى',
'Diarrhea': 'إسهال',
'Died': 'مات',
'Direct Date': 'التأريخ المباشر',
'Direct Number': 'رقم المباشرة',
'Disable': 'تعطيل',
'Disabled': 'معطل',
'Disabled participating in coping activities': 'تعطيل المشاركة في أنشطة المواجهة',
'Disabled?': 'معاق؟',
'Disaster Assessments': 'تقييمات الكوارث',
'Disaster clean-up/repairs': 'الإصلاحات/التنظيف بعد الكارثة',
'Disaster Law': 'قانون الكوارث',
'Disaster Management': 'ادارة الكوارث',
'Disaster Preparedness & Risk Reduction': | |
"""Test the search module"""
from collections.abc import Iterable, Sized
from io import StringIO
from itertools import chain, product
from functools import partial
import pickle
import sys
from types import GeneratorType
import re
import numpy as np
import scipy.sparse as sp
import pytest
from sklearn.utils.fixes import sp_version
from sklearn.utils._testing import assert_raises
from sklearn.utils._testing import assert_warns
from sklearn.utils._testing import assert_warns_message
from sklearn.utils._testing import assert_raise_message
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import ignore_warnings
from sklearn.utils._mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.base import clone
from sklearn.exceptions import NotFittedError
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection import fit_grid_point
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import ParameterSampler
from sklearn.model_selection._search import BaseSearchCV
from sklearn.model_selection._validation import FitFailedWarning
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import f1_score
from sklearn.metrics import recall_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import Ridge, SGDClassifier, LinearRegression
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.model_selection.tests.common import OneTimeSplitter
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier:
"""Dummy classifier to test the parameter search algorithms"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert len(X) == len(Y)
self.classes_ = np.unique(Y)
return self
def predict(self, T):
return T.shape[0]
def transform(self, X):
return X + self.foo_param
def inverse_transform(self, X):
return X - self.foo_param
predict_proba = predict
predict_log_proba = predict
decision_function = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert list(grid) == [grid[i] for i in range(len(grid))]
@pytest.mark.parametrize("klass", [ParameterGrid,
partial(ParameterSampler, n_iter=10)])
@pytest.mark.parametrize(
"input, error_type, error_message",
[(0, TypeError, r'Parameter .* is not a dict or a list \(0\)'),
([{'foo': [0]}, 0], TypeError, r'Parameter .* is not a dict \(0\)'),
({'foo': 0}, TypeError, "Parameter.* value is not iterable .*"
r"\(key='foo', value=0\)")]
)
def test_validate_parameter_input(klass, input, error_type, error_message):
with pytest.raises(error_type, match=error_message):
klass(input)
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert isinstance(grid1, Iterable)
assert isinstance(grid1, Sized)
assert len(grid1) == 3
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert len(grid2) == 6
# loop to assert we can iterate over the grid multiple times
for i in range(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert (points ==
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert len(empty) == 1
assert list(empty) == [{}]
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert len(has_empty) == 4
assert list(has_empty) == [{'C': 1}, {'C': 10}, {}, {'C': .5}]
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=3, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert grid_search.best_estimator_.foo_param == 2
assert_array_equal(grid_search.cv_results_["param_foo_param"].data,
[1, 2, 3])
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
def test_grid_search_pipeline_steps():
# check that parameters that are estimators are cloned before fitting
pipe = Pipeline([('regressor', LinearRegression())])
param_grid = {'regressor': [LinearRegression(), Ridge()]}
grid_search = GridSearchCV(pipe, param_grid, cv=2)
grid_search.fit(X, y)
regressor_results = grid_search.cv_results_['param_regressor']
assert isinstance(regressor_results[0], LinearRegression)
assert isinstance(regressor_results[1], Ridge)
assert not hasattr(regressor_results[0], 'coef_')
assert not hasattr(regressor_results[1], 'coef_')
assert regressor_results[0] is not grid_search.best_estimator_
assert regressor_results[1] is not grid_search.best_estimator_
# check that we didn't modify the parameter grid that was passed
assert not hasattr(param_grid['regressor'][0], 'coef_')
assert not hasattr(param_grid['regressor'][1], 'coef_')
@pytest.mark.parametrize("SearchCV", [GridSearchCV, RandomizedSearchCV])
def test_SearchCV_with_fit_params(SearchCV):
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(expected_fit_params=['spam', 'eggs'])
searcher = SearchCV(
clf, {'foo_param': [1, 2, 3]}, cv=2, error_score="raise"
)
# The CheckingClassifier generates an assertion error if
# a parameter is missing or has length != len(X).
err_msg = r"Expected fit parameter\(s\) \['eggs'\] not seen."
with pytest.raises(AssertionError, match=err_msg):
searcher.fit(X, y, spam=np.ones(10))
err_msg = "Fit parameter spam has length 1; expected"
with pytest.raises(AssertionError, match=err_msg):
searcher.fit(X, y, spam=np.ones(1), eggs=np.zeros(10))
searcher.fit(X, y, spam=np.ones(10), eggs=np.zeros(10))
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert grid_search_no_score.best_params_ == grid_search.best_params_
# check that we can call score and that it gives the correct result
assert grid_search.score(X, y) == grid_search_no_score.score(X, y)
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc'
).fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = search_no_scoring.score(X, y)
score_accuracy = search_accuracy.score(X, y)
score_no_score_auc = search_no_score_method_auc.score(X, y)
score_auc = search_auc.score(X, y)
# ensure the test is sane
assert score_auc < 1.0
assert score_accuracy < 1.0
assert score_auc != score_accuracy
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_grid_search_groups():
# Check if ValueError (when groups is None) propagates to GridSearchCV
# And also check if groups is correctly passed to the cv object
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
groups = rng.randint(0, 3, 15)
clf = LinearSVC(random_state=0)
grid = {'C': [1]}
group_cvs = [LeaveOneGroupOut(), LeavePGroupsOut(2),
GroupKFold(n_splits=3), GroupShuffleSplit()]
for cv in group_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
assert_raise_message(ValueError,
"The 'groups' parameter should not be None.",
gs.fit, X, y)
gs.fit(X, y, groups=groups)
non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit()]
for cv in non_group_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
# Should not raise an error
gs.fit(X, y)
def test_classes__property():
# Test that classes_ property matches best_estimator_.classes_
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
Cs = [.1, 1, 10]
grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs})
grid_search.fit(X, y)
assert_array_equal(grid_search.best_estimator_.classes_,
grid_search.classes_)
# Test that regressors do not have a classes_ attribute
grid_search = GridSearchCV(Ridge(), {'alpha': [1.0, 2.0]})
grid_search.fit(X, y)
assert not hasattr(grid_search, 'classes_')
# Test that the grid searcher has no classes_ attribute before it's fit
grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs})
assert not hasattr(grid_search, 'classes_')
# Test that the grid searcher has no classes_ attribute without a refit
grid_search = GridSearchCV(LinearSVC(random_state=0),
{'C': Cs}, refit=False)
grid_search.fit(X, y)
assert not hasattr(grid_search, 'classes_')
def test_trivial_cv_results_attr():
# Test search over a "grid" with only one point.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]}, cv=3)
grid_search.fit(X, y)
assert hasattr(grid_search, "cv_results_")
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1, cv=3)
random_search.fit(X, y)
assert hasattr(grid_search, "cv_results_")
def test_no_refit():
# Test that GSCV can be used for model selection alone without refitting
clf = MockClassifier()
for scoring in [None, ['accuracy', 'precision']]:
grid_search = GridSearchCV(
clf, {'foo_param': [1, 2, 3]}, refit=False, cv=3
)
grid_search.fit(X, y)
assert not hasattr(grid_search, "best_estimator_") and \
hasattr(grid_search, "best_index_") and \
hasattr(grid_search, "best_params_")
# Make sure the functions predict/transform etc raise meaningful
# error messages
for fn_name in ('predict', 'predict_proba', 'predict_log_proba',
'transform', 'inverse_transform'):
assert_raise_message(NotFittedError,
('refit=False. %s is available only after '
'refitting on the best parameters'
% fn_name), getattr(grid_search, fn_name), X)
# | |
bool
:param use_user_defaults: If ``True`` and a user configuration is found, this will be used as a default for all non
set arguments. So the value will be determined according to the first found instance of: argument,
user default, tool default
:type user: :class:`evaluation_system.model.user.User`
:param user: The user for whom this arguments are parsed.
:type config_file: str
:param config_file: path to a file from where the setup will read a configuration. If None, the default
user dependent one will be used. This will be completely skipped if ``use_user_defaults`` is ``False``.
:return: A dictionary with the parsed configuration."""
plugin_name = plugin_name.lower()
if user is None:
user = User()
p = getPluginInstance(plugin_name, user)
complete_conf = p.__parameters__.parseArguments(arguments, use_defaults=True, check_errors=False)
# if we are using user defaults then load them first
if use_user_defaults:
user_config_file = user.getUserToolConfig(plugin_name)
if os.path.isfile(user_config_file):
with open(user_config_file, 'r') as f:
complete_conf.update(p.readConfiguration(f))
# now if we still have a config file update what the configuration with it
if isinstance(config_file, basestring):
if config_file == '-':
# reading from stdin
complete_conf.update(p.readConfiguration(sys.stdin))
elif config_file is not None:
with open(config_file, 'r') as f:
complete_conf.update(p.readConfiguration(f))
elif config_file is not None:
# if it's not a string and is something, we assume is something that can be read from
complete_conf.update(p.readConfiguration(config_file))
# update with user defaults if desired
complete_conf.update(p.__parameters__.parseArguments(arguments, check_errors=False))
# we haven't check for errors because we might have a half implemented configuration
# some required field might have already been setup (user/system defaults, files, etc)
# but better if we check them
if check_errors:
p.__parameters__.validate_errors(complete_conf, raise_exception=True)
return complete_conf
def writeSetup(plugin_name, config_dict=None, user=None, config_file=None):
"""Writes the plug-in setup to disk. This is the configuration for the plug-in itself and not that
of the tool (which is what normally the plug-in encapsulates). The plug-in is not required to write anything to
disk when running the tool; it might instead configure it from the command line, environmental variables or
any other method.
:type plugin_name: str
:param plugin_name: name of the referred plugin.
:type config_dict: dict or metadict
:param config_dict: The configuration being stored. If is None, the default configuration will be stored,
this might be incomplete.
:type user: :class:`evaluation_system.model.user.User`
:param user: The user for whom this arguments are parsed.
:type config_file: str
:param config_file: path to a file where the setup will be stored. If None, the default user dependent one will be
used. This will be completely skipped if ``use_user_defaults`` is ``False``.
:returns: The path to the configuration file that was written."""
plugin_name = plugin_name.lower()
if user is None:
user = User()
p = getPluginInstance(plugin_name, user, user.getName())
complete_conf = p.setupConfiguration(config_dict=config_dict, check_cfg=False, substitute=False)
if config_file is None:
# make sure the required directory structure and data is in place
user.prepareDir()
config_file = user.getUserToolConfig(plugin_name, create=True)
if config_file == '-':
p.saveConfiguration(sys.stdout, config_dict=complete_conf)
else:
with open(config_file, 'w') as f:
p.saveConfiguration(f, config_dict=complete_conf)
return config_file
def _preview_copy(source_path, dest_path):
"""
Copy images for preview
:type source_path: str
:param source_path: the source
:type dest_path: str
:param dest_path: the destination
"""
# previously used
# shutil.copyfile(source_path, dest_path)
# a not very pythonic work-around
if source_path.split('.')[-1] in ['pdf', 'zip']: # don't resize pdf files
shutil.copyfile(source_path, dest_path)
else:
command = ['convert', '-resize', '800x>', source_path, dest_path]
sub.call(command)
os.chmod(dest_path, 509)
def _preview_convert(source_path, dest_path):
"""
Converts images
:type source_path: str
:param source_path: the file name of the file to convert
:type dest_path: str
:param dest_path: The file name of the converted file
"""
# a not very pythonic work-around
command = ['convert', '-resize', '800x', source_path, dest_path]
sub.call(command)
# we need this on mistral, becuase otherwise apache can't read the files
# TODO: Why was is working on MiKlip?
os.chmod(dest_path, 509)
# The following is preferable when supported by the installed PIT version
# im = Image.open(source_path)
# im.save(dest_path)
def _preview_generate_name(plugin_name, file_name, metadata):
"""
Creates a filename according to the plugin_name, timestamp and
an eight character random string
:type plugin_name: str
:param plugin_name: name of the referred plugin.
:type file_name: str
:param file_name: the file to create a preview name for
:type ext: str
:param ext: the extension of the file to be created
:type metadata: dict
:param metadata: the meta-data for the file, to access timestamp
"""
random_suffix = ''.join(random.choice(string.letters) for i in xrange(8))
ctime = metadata.get('timestamp', '')
if ctime:
time_string = datetime.datetime.fromtimestamp(ctime).strftime('%Y%m%d_%H%M%S')
ctime = '%s_' % time_string
return plugin_name + '_' + ctime + random_suffix
def _preview_unique_file(plugin_name, file_name, ext, metadata):
"""
This routine creates a unique filename for the preview
:type plugin_name: str
:param plugin_name: name of the referred plugin.
:type file_name: str
:param file_name: the file to create a preview name for
:type ext: str
:param ext: the extension of the file to be created
:type metadata: dict
:param metadata: the meta-data for the file, to access timestamp
"""
path = config.get(config.PREVIEW_PATH)
subdir = datetime.datetime.now().strftime('%Y%m%d')
name = _preview_generate_name(plugin_name, file_name, metadata)
name += ext
full_path = os.path.join(path, subdir)
full_name = os.path.join(full_path, name)
if path.strip() and not os.path.isdir(full_path):
utils.supermakedirs(full_path, 0777)
if os.path.isfile(full_name):
return _preview_unique_file(plugin_name, file_name, ext, metadata)
return full_name
def _preview_create(plugin_name, result):
"""
This routine creates the preview. And adds the created files
to the result dictionary.
:type plugin_name: str
:param plugin_name: name of the referred plugin.
:type result: meta_dict
:param result: a meta dictionary describing the result files
"""
todo_list = []
result_list = []
for file_name in result:
metadata = result[file_name]
todo = metadata.get('todo', '')
if todo == 'copy':
ext = os.path.splitext(file_name)[-1]
target_name = _preview_unique_file(plugin_name, file_name, ext, metadata)
todo_list.append((_preview_copy, file_name, target_name))
metadata['preview_path'] = target_name
result_list.append(target_name)
elif todo == 'convert':
target_name = _preview_unique_file(plugin_name, file_name, '.png', metadata)
todo_list.append((_preview_convert, file_name, target_name))
metadata['preview_path'] = target_name
result_list.append(target_name)
result[file_name] = metadata
preview_path = config.get(config.PREVIEW_PATH)
if preview_path.strip() and todo_list:
p = Pool(config.NUMBER_OF_PROCESSES)
p.map(utils.mp_wrap_fn, todo_list)
return result_list
def generateCaption(caption, toolname):
"""
Generates a standardized caption including the toolname.
:type caption: str
:param caption: The caption to be standardized
:type toolname: str
:param toolname: the toolname
:return: String containing the standardized caption
"""
import re
caption = caption.strip()
toolname = toolname.strip().upper()
retval = toolname
if caption.lower() != toolname.lower():
pattern = "^\*"
if re.search(pattern, caption, re.IGNORECASE):
caption = caption[1:]
pattern = '\(' + toolname + '\)$'
if re.search(pattern, caption, re.IGNORECASE) is None:
retval = caption + ' (' + toolname + ')'
else:
retval = caption
else:
# this assures that the toolname appears in the user preferred case
retval = caption
return retval
def runTool(plugin_name, config_dict=None, user=None, scheduled_id=None,
caption=None, unique_output=True):
"""Runs a tool and stores this "run" in the :class:`evaluation_system.model.db.UserDB`.
:type plugin_name: str
:param plugin_name: name of the referred plugin.
:type config_dict: dict or metadict
:param config_dict: The configuration used for running the tool. If is None, the default configuration will be stored,
this might be incomplete.
:type user: :class:`evaluation_system.model.user.User`
:param user: The user starting the tool
:type scheduled_id: int
:param scheduled_id: if the process is already scheduled then put the row id here
:type caption: str
:param caption: the caption to set.
"""
plugin_name = plugin_name.lower()
if user is None:
user = User()
p = getPluginInstance(plugin_name, user)
complete_conf = None
# check whether a scheduled id is given
if scheduled_id:
config_dict = loadScheduledConf(plugin_name, scheduled_id, user)
if config_dict is None:
conf_file = user.getUserToolConfig(plugin_name)
if os.path.isfile(conf_file):
log.debug('Loading config file %s', conf_file)
with open(conf_file, 'r') as f:
complete_conf = p.readConfiguration(f)
else:
log.debug('No config file was found in %s', conf_file)
if complete_conf is None:
# at this stage we want to resolve or tokens and perform some kind of sanity check before going further
complete_conf = p.setupConfiguration(config_dict=config_dict, recursion=True)
log.debug('Running %s with %s', plugin_name, complete_conf)
rowid = 0
if scheduled_id:
user.getUserDB().upgradeStatus(scheduled_id,
user.getName(),
History.processStatus.running)
rowid = scheduled_id
elif user:
version_details = getVersion(plugin_name)
rowid = user.getUserDB().storeHistory(p,
complete_conf,
user.getName(),
History.processStatus.running,
version_details=version_details,
caption=caption)
# follow the notes
followHistoryTag(rowid, user.getName(), 'Owner')
try:
# we want that the rowid is visible to the tool
p.rowid = rowid
# In any case we have now a complete setup in complete_conf
result = p._runTool(config_dict=complete_conf,
unique_output=unique_output)
# save results when | |
self.INIT_transition = 10
#Initialize the fields
bsz = 1
if self.resume:
#Import the saved fields
velx = np.load(f'{self.out_dir}A_{self.alpha}_RE_{self.Re}_dx_{self.Nx}_{self.Ny}_velocity_x_field.npy')[-1,0,:,:]
vely = np.load(f'{self.out_dir}A_{self.alpha}_RE_{self.Re}_dx_{self.Nx}_{self.Ny}_velocity_y_field.npy')[-1,0,:,:]
velx = torch.from_numpy(velx).cuda()
vely = torch.from_numpy(vely).cuda()
velx = velx[None, None, :, :]
vely = vely[None, None, :, :]
velocity_big = torch.cat((velx, vely), dim=1)
tensor_U = math.wrap(velocity_big.squeeze(2), 'batch,vector,x,y')
tensor_U_unstack = unstack_staggered_tensor(tensor_U)
self.velocity = StaggeredGrid(tensor_U_unstack, self.DOMAIN.bounds)
try:
velmaskx = np.load(f'{self.out_dir}A_{self.alpha}_RE_{self.Re}_dx_{self.Nx}_{self.Ny}_vel_mask_x_field.npy')[-1,0,:,:]
velmasky = np.load(f'{self.out_dir}A_{self.alpha}_RE_{self.Re}_dx_{self.Nx}_{self.Ny}_vel_mask_y_field.npy')[-1,0,:,:]
velmaskx = torch.from_numpy(velmaskx).cuda()
velmasky = torch.from_numpy(velmasky).cuda()
velmaskx = velmaskx[None, None, :, :]
velmasky = velmasky[None, None, :, :]
velmaskbig = torch.cat((velmaskx, velmasky), dim=1)
tensor_U_mask = math.wrap(velmaskbig.squeeze(2), 'batch,vector,x,y')
tensor_U_mask_unstack = unstack_staggered_tensor(tensor_U_mask)
self.vel_mask = StaggeredGrid(tensor_U_mask_unstack, self.DOMAIN.bounds)
except:
self.vel_mask = ((self.DOMAIN.staggered_grid(Noise(batch=bsz)) * 0 )+1)
print('the vel mask was not imported in resume')
pfield = np.load(f'{self.out_dir}A_{self.alpha}_RE_{self.Re}_dx_{self.Nx}_{self.Ny}_pressure_field.npy')[-1,:,:,:]
self.pressure = CenteredGrid(tensor(torch.from_numpy(pfield).cuda(), names=['batch', 'x', 'y']), self.DOMAIN.bounds)
self.density = CenteredGrid(tensor(torch.zeros((bsz, self.Nx, self.Ny)).cuda(), names=['batch', 'x', 'y']), self.DOMAIN.bounds)
else:
#Create the fields
self.velocity = ((self.DOMAIN.staggered_grid(Noise(batch=bsz)) * 0 )+1) *(1,0)
self.vel_mask = ((self.DOMAIN.staggered_grid(Noise(batch=bsz)) * 0 )+1)
self.pressure = CenteredGrid(tensor(torch.zeros((bsz, self.Nx, self.Ny)), names=['batch', 'x', 'y']), self.DOMAIN.bounds)
self.density = CenteredGrid(tensor(torch.zeros((bsz, self.Nx, self.Ny)), names=['batch', 'x', 'y']), self.DOMAIN.bounds)
self.time_recorder.record(point_name='end_define_simulation_fields')
def initialize_aux_variables(self):
self.time_recorder.record(point_name='init_initialize_aux_variables')
#Output Variables Initialization
if self.resume:
if self.post_computations:
self.velocity_probe = np.load(f'{self.out_dir}A_{self.alpha}_RE_{self.Re}_dx_{self.Nx}_{self.Ny}_velocity_probe.npy')
self.vforce = np.load(f'{self.out_dir}A_{self.alpha}_RE_{self.Re}_dx_{self.Nx}_{self.Ny}_vforce.npy')
self.hforce = np.load(f'{self.out_dir}A_{self.alpha}_RE_{self.Re}_dx_{self.Nx}_{self.Ny}_hforce.npy')
self.velocity_x_field=np.load(f'{self.out_dir}A_{self.alpha}_RE_{self.Re}_dx_{self.Nx}_{self.Ny}_velocity_x_field.npy').tolist()
self.velocity_y_field=np.load(f'{self.out_dir}A_{self.alpha}_RE_{self.Re}_dx_{self.Nx}_{self.Ny}_velocity_y_field.npy').tolist()
self.pressure_field=np.load(f'{self.out_dir}A_{self.alpha}_RE_{self.Re}_dx_{self.Nx}_{self.Ny}_pressure_field.npy').tolist()
self.iteration_field=np.load(f'{self.out_dir}A_{self.alpha}_RE_{self.Re}_dx_{self.Nx}_{self.Ny}_iteration_field.npy').tolist()
try:
self.vel_mask_x_field=np.load(f'{self.out_dir}A_{self.alpha}_RE_{self.Re}_dx_{self.Nx}_{self.Ny}_vel_mask_x_field.npy').tolist()
self.vel_mask_y_field=np.load(f'{self.out_dir}A_{self.alpha}_RE_{self.Re}_dx_{self.Nx}_{self.Ny}_vel_mask_y_field.npy').tolist()
except:
self.vel_mask_x_field=[]
self.vel_mask_y_field=[]
else:
if self.post_computations:
self.velocity_probe=np.zeros(self.Nt)
self.vforce=np.zeros(self.Nt)
self.hforce=np.zeros(self.Nt)
self.velocity_x_field=[]
self.velocity_y_field=[]
self.pressure_field=[]
self.vel_mask_x_field=[]
self.vel_mask_y_field=[]
self.iteration_field=[]
if self.plot_field:
self.gif_pressure = GIF(gifname=f'{self.out_dir}A_{self.alpha}_RE_{self.Re}_dx_{self.Nx}_{self.Ny}_pressure', total_frames=self.Nt)
self.gif_vorticity = GIF(gifname=f'{self.out_dir}A_{self.alpha}_RE_{self.Re}_dx_{self.Nx}_{self.Ny}_vorticity', total_frames=self.Nt)
self.gif_velocity = GIF(gifname=f'{self.out_dir}A_{self.alpha}_RE_{self.Re}_dx_{self.Nx}_{self.Ny}_velocity', total_frames=self.Nt)
self.gif_distribution = GIF(gifname=f'{self.out_dir}A_{self.alpha}_RE_{self.Re}_dx_{self.Nx}_{self.Ny}_distribution', total_frames=self.Nt)
self.bar = IncrementalBar(f' [RE={self.Re}, Nx={self.Nx}]', max=self.Nt, suffix= '%(percent)d%% [%(index)d/%(max)d] | %(eta_td)s remaining')
self.time_recorder.record(point_name='end_initialize_aux_variables')
def solve_poisson(self):
self.time_recorder.record(point_name=f'ite_{self.ite}_>init_poisson')
if self.sim_method == 'CG':
self.time_recorder.record(point_name=f'ite_{self.ite}_>init_poisson__CG')
self.velocity, self.pressure, self._iterations, self.div_in, time_CG = fluid.make_incompressible_BC(self.velocity, self.DOMAIN, (), pressure_guess=self.pressure,
solve_params=math.LinearSolve(absolute_tolerance = self.precision, max_iterations = self.max_iterations ), solver=self.sim_method)
self.time_recorder.add_single_interval(time_CG, interval_name = f'ite_{self.ite}_>CG_inference_interval')
self.div_out = divergence(self.velocity)
self.time_recorder.record(point_name=f'ite_{self.ite}_>end_poisson__CG')
elif self.sim_method == 'PHI':
self.time_recorder.record(point_name=f'ite_{self.ite}_>init_poisson__PHI')
self.cylinder = self.cylinder.copied_with(geometry=self.cylinder.geometry.rotated(-self.cylinder.angular_velocity * self.dt))
self.velocity, self.pressure, self._iterations, self.div_in = fluid.make_incompressible(self.velocity, self.DOMAIN, (self.cylinder, ), pressure_guess=self.pressure,
solve_params=math.LinearSolve(absolute_tolerance = self.precision, max_iterations = self.max_iterations ))
self.div_out = divergence(self.velocity)
self.time_recorder.record(point_name=f'ite_{self.ite}_>end_poisson__PHI')
elif self.sim_method == 'convnet':
self.time_recorder.record(point_name=f'ite_{self.ite}_>init_poisson__convnet')
if self.ite<int(self.ite_transition):
self.velocity, self.pressure, self._iterations, self.div_in, time_CG = fluid.make_incompressible_BC(self.velocity, self.DOMAIN, (), pressure_guess=self.pressure,
solve_params=math.LinearSolve(absolute_tolerance = self.precision, max_iterations = self.max_iterations ), solver=self.sim_method)
self.time_recorder.add_single_interval(time_CG, interval_name = f'ite_{self.ite}_>CG_inference_interval')
self.div_out = divergence(self.velocity)
else:
in_density_t = self.density.values._native.transpose(-1, -2)
in_U_t = torch.cat((self.velocity.staggered_tensor().tensors[0]._native.transpose(-1, -2).unsqueeze(1),
self.velocity.staggered_tensor().tensors[1]._native.transpose(-1, -2).unsqueeze(1)), dim=1)
in_U_t[:,0, :, :2] = 1
in_U_t[:,0, :, -2:] = 1
data = torch.cat((in_density_t.unsqueeze(1).unsqueeze(1),
in_U_t[:,0,:-1,:-1].unsqueeze(1).unsqueeze(1),
in_U_t[:,1,:-1,:-1].unsqueeze(1).unsqueeze(1),
(self.flags+1),
in_density_t.unsqueeze(1).unsqueeze(1)), dim = 1)
with torch.no_grad():
if self.new_train:
# Apply input/output BC
_, _, UDiv_CG = convert_phi_to_torch(self.velocity, self.pressure, self.pressure)
UDiv_CG = UDiv_CG.unsqueeze(2)
UDiv_CG[:, 0, :, :, -2:] = 1.0
UDiv_CG[:, 0, :, :, :2] = 1.0
self.velocity, _ = load_values(UDiv_CG, 1-self.flags, self.DOMAIN)
self.pressure, self.velocity, self.div_out, self.div_in, time_Unet = self.model(self.velocity, 1-self.flags,
self.DOMAIN, self.config_norm, self.ite, 0, 'vk_inside')
time_Unet = float(time_Unet[0]) #to pick the total the rest are steps
self.time_recorder.add_single_interval(time_Unet, interval_name = f'ite_{self.ite}_>UNET_inference_interval')
else:
p, U_torch, self.time = self.model(data, self.ite, self.out_dir)
self.pressure, self.velocity, self.vel_mask, self.div_out, self.div_in = convert_torch_to_phi(p, U_torch, in_U_t, self.flags, self.DOMAIN)
#Net scale prediction correction
self.pressure = self.pressure *self.dx
#Center aproximation to account for pressure zero
self.pressure = self.pressure-2
self.time_recorder.record(point_name=f'ite_{self.ite}_>end_poisson__convnet')
#Correct pseudo-pressure to pressure
self.pressure = self.pressure / self.dt
self.time_recorder.record(point_name=f'ite_{self.ite}_>end_poisson')
def plot_pressure(self, zoom_pos = []):
if self.plot_field_gif:
self.gif_pressure.add_frame(self.ite, self.pressure,
plot_type=['surface'],
options=[ ['limits', [-0.5, 0.5]],
['full_zoom', True],
['zoom_position', zoom_pos],
['aux_contourn', True],
['indeces', False],
['grid', False]
],
Lx=self.Lx, Ly=self.Ly, dx=self.dx, dy=self.dy,
lx='x', ly='y',lbar='pressure []',
ltitle=f'VK @ t={np.round(self.dt*self.ite, decimals=1)} s [ A={self.alpha}, Re={self.Re}, N=[{self.Nx}x{self.Ny}] ]')
if self.plot_field_steps:
plot_field(self.pressure,
plot_type=['surface'],
options=[ ['limits', [-0.5, 0.5]],
['full_zoom', False],
['zoom_position', zoom_pos],
['aux_contourn', True],
],
Lx=self.Lx, Ly=self.Ly, dx=self.dx, dy=self.dy,
lx='x', ly='y',lbar='pressure []',
ltitle=f'VK @ t={np.round(self.dt*self.ite, decimals=1)} s [ A={self.alpha}, Re={self.Re}, N=[{self.Nx}x{self.Ny}] ]',
save=True, filename=f'{self.out_dir}A_{self.alpha}_RE_{self.Re}_dx_{self.Nx}_{self.Ny}_pressure_timestep_{self.ite}.png')
def plot_vorticity(self, zoom_pos = []):
vorticity = calculate_vorticity(self.Lx,self.Ly,self.dx,self.dy,self.velocity)
if self.plot_field_gif:
self.gif_vorticity.add_frame(self.ite, vorticity,
plot_type=['surface'],
options=[ ['limits', [-0.2, 0.5]],
['full_zoom', False],
['aux_contourn', True],
['indeces', False],
['grid', False]
],
Lx=self.Lx, Ly=self.Ly, dx=self.dx, dy=self.dy,
lx='x', ly='y',lbar='vorticity []',
ltitle=f'VK @ t={np.round(self.dt*self.ite, decimals=1)} s [ A={self.alpha}, Re={self.Re}, N=[{self.Nx}x{self.Ny}] ]')
if self.plot_field_steps:
plot_field(vorticity,
plot_type=['surface'],
options=[ ['limits', [-0.2, 0.5]],
['full_zoom', False],
['zoom_position', zoom_pos],
['aux_contourn', True],
],
Lx=self.Lx, Ly=self.Ly, dx=self.dx, dy=self.dy,
lx='x', ly='y',lbar='vorticity []',
ltitle=f'VK @ t={np.round(self.dt*self.ite, decimals=1)} s [ A={self.alpha}, Re={self.Re}, N=[{self.Nx}x{self.Ny}] ]',
save=True, filename=f'{self.out_dir}A_{self.alpha}_RE_{self.Re}_dx_{self.Nx}_{self.Ny}_vorticity_timestep_{self.ite}.png')
def plot_velocity_norm(self, zoom_pos = []):
norm_velocity = calculate_norm_velocity(self.velocity)
if self.plot_field_gif:
self.gif_velocity.add_frame(self.ite, norm_velocity,
plot_type=['surface'],
options=[ ['limits', [0, 0.8]],
['full_zoom', False],
['aux_contourn', True],
['indeces', False],
['grid', False]
],
Lx=self.Lx, Ly=self.Ly, dx=self.dx, dy=self.dy,
lx='x', ly='y',lbar='norm velocity []',
ltitle=f'VK @ t={np.round(self.dt*self.ite, decimals=1)} s [ A={self.alpha}, Re={self.Re}, N=[{self.Nx}x{self.Ny}] ]')
def plot_cp(self):
pressure_ref = np.mean(self.pressure.values._native.cpu().numpy()[0][4,:])
cp = calculate_cp(self.pressure, pressure_ref=pressure_ref, rho_ref=1, vel_ref=1)
if self.plot_field_gif:
self.gif_distribution.add_frame2(self.ite, cp, self.CYLINDER_2, plot_type=['full'],
options=[['limits', [-2, 1.2] ]
],
lx='angle', ly='pressure coeficient', ltitle=f'Cp @ t={np.round(self.dt*self.ite, decimals=1)} s [ A={self.alpha}, Re={self.Re}, N=[{self.Nx}x{self.Ny}] ]')
def reconstruct_velocity_probe(self):
xp1 = int((self.xD + self.D*2)/self.dx)
xp2 = int((self.xD + self.D*2.5)/self.dx)
yp1 = int((self.Ly/2 - self.D*0.25)/self.dy)
yp2 = int((self.Ly/2 + self.D*0.25)/self.dy)
print('calculate_velocity_probe>> read file')
print('BE AWARE THAT ONLY THE SAVE ITERATIONS WILL BE CALCULATED-> THE OTHERS 0')
velocity_y_field = np.load(f'{self.out_dir}A_{self.alpha}_RE_{self.Re}_dx_{self.Nx}_{self.Ny}_velocity_y_field.npy')
iteration_field = np.load(f'{self.out_dir}A_{self.alpha}_RE_{self.Re}_dx_{self.Nx}_{self.Ny}_iteration_field.npy')
velocity_probe=np.zeros(self.Nt)
for i, ite in enumerate(iteration_field):
velocity_probe[ite] = np.mean(velocity_y_field[i][0][xp1:xp2,yp1:yp2]) #to squeeze
np.save(f'{self.out_dir}A_{self.alpha}_RE_{self.Re}_dx_{self.Nx}_{self.Ny}_velocity_probe.npy', velocity_probe)
def calculate_velocity_probe(self):
xp1 = int((self.xD + self.D*2)/self.dx)
xp2 = int((self.xD + self.D*2.5)/self.dx)
yp1 = int((self.Ly/2 - self.D*0.25)/self.dy)
yp2 = int((self.Ly/2 + self.D*0.25)/self.dy)
self.velocity_probe[self.ite] = np.mean(self.velocity.staggered_tensor().tensors[1]._native.cpu().squeeze().numpy()[xp1:xp2,yp1:yp2])
def plot_geometry(self, zoom_pos = []):
xp1 = int(self.xD + self.D*2)
xp2 = int(self.xD + self.D*2.5)
yp1 = int(self.Ly/2 - self.D*0.25)
yp2 = int(self.Ly/2 + self.D*0.25)
plot_field(self.CYLINDER,
plot_type=['surface'],
options=[ ['limits', [-1, 1]],
['full_zoom', False],
['zoom_position', zoom_pos],
['aux_contourn', False],
['square', [xp1,xp2,yp1,yp2]]
],
Lx=self.Lx, Ly=self.Ly, dx=self.dx, dy=self.dy,
lx='x', ly='y',lbar='geometry',
ltitle=f'VK @ [ N=[{self.Nx}x{self.Ny}] ]',
save=True, filename=f'{self.out_dir}A_{self.alpha}_RE_{self.Re}_dx_{self.Nx}_{self.Ny}_geometry.png')
def save_variables(self):
#3.1.SAVE INTERMIDATE RESULTS OF POST-PROCESS
if self.post_computations and ( ( self.ite%self.save_post_x_ite == 0 if self.DEBUG else False) or self.ite == self.Nt-1 ):
np.save(f'{self.out_dir}A_{self.alpha}_RE_{self.Re}_dx_{self.Nx}_{self.Ny}_velocity_probe.npy', self.velocity_probe)
np.save(f'{self.out_dir}A_{self.alpha}_RE_{self.Re}_dx_{self.Nx}_{self.Ny}_vforce.npy', self.vforce)
np.save(f'{self.out_dir}A_{self.alpha}_RE_{self.Re}_dx_{self.Nx}_{self.Ny}_hforce.npy', self.hforce)
#3.2.SAVE FIELDS
if self.save_field and ( ( self.ite%self.save_field_x_ite == 0 if self.DEBUG else False ) or self.ite == self.Nt-1 ):
np.save(f'{self.out_dir}A_{self.alpha}_RE_{self.Re}_dx_{self.Nx}_{self.Ny}_velocity_x_field.npy', self.velocity_x_field)
np.save(f'{self.out_dir}A_{self.alpha}_RE_{self.Re}_dx_{self.Nx}_{self.Ny}_velocity_y_field.npy', self.velocity_y_field)
np.save(f'{self.out_dir}A_{self.alpha}_RE_{self.Re}_dx_{self.Nx}_{self.Ny}_pressure_field.npy', self.pressure_field)
np.save(f'{self.out_dir}A_{self.alpha}_RE_{self.Re}_dx_{self.Nx}_{self.Ny}_vel_mask_x_field.npy', self.vel_mask_x_field)
np.save(f'{self.out_dir}A_{self.alpha}_RE_{self.Re}_dx_{self.Nx}_{self.Ny}_vel_mask_y_field.npy', self.vel_mask_y_field)
np.save(f'{self.out_dir}A_{self.alpha}_RE_{self.Re}_dx_{self.Nx}_{self.Ny}_iteration_field.npy', self.iteration_field)
#export_csv('./results',pressure, self.Lx, self.Ly, self.dx, self.dy)
def run(self):
_init_sim = torch.cuda.Event(enable_timing=True) #Internal timer, to check simulation time, in order to avoid 24h max
_now_sim = torch.cuda.Event(enable_timing=True)
_init_sim.record()
self.time_recorder.record(point_name='run_init')
if self.FP64:
print('FP64 ACTIVE!!!!!!')
set_global_precision(64) #PhiFlow
torch.set_default_dtype(torch.float64) #Torch
'''NOTICE: since both backends (phiflow & torch) are defined as default here. It is not necessary nor recommended
to define its precision on other parts of the code. Except for very specific purpouses.
'''
if self.GPU == True:
TORCH_BACKEND.set_default_device('GPU') #PhiFlow
torch.set_default_tensor_type('torch.cuda.FloatTensor') #Torch
'''NOTICE: since both backends (phiflow & torch) are defined as default here. It is not necessary nor recommended
to use .cuda() in other parts of the software. Since then, will probably create internal conflicts when using
multiple tensors located in different devices.
Another thing, is the .cpu() used for instance, in the plots since this doesn't provoke any conflict since always
will be required to pass it to cpu. And if it was already on cpu it doesn't bring any problem.
'''
self.time_recorder.record(point_name='loading_gpu')
# Initialize network
if self.sim_method == 'convnet':
self.load_model()
self.time_recorder.record(point_name='loading_network')
#0.PREPARE SIMULATION
self.define_simulation_geometry()
self.define_simulation_fields()
self.initialize_aux_variables()
#1.COMPUTATIONS ITERATIONs OVER TIME
if self.resume:
ite_init = self.iteration_field[-1]
else:
ite_init = 0
self.time_recorder.record(point_name='init_iterations')
for self.ite in range(ite_init, self.Nt):
#1.0.Check if simulation time exceeded maximum allocation (24h gpu on pando -> 23h)
_now_sim.record()
torch.cuda.synchronize()
if _init_sim.elapsed_time(_now_sim) >= self.MAX_TIME:
print(f'the simulation took more than {_init_sim.elapsed_time(_now_sim)/(1000*60)} min, so a new job will be launched to proceed.')
self.prepare_resume()
exit()
self.time_recorder.record(point_name=f'init_iteration_{self.ite}')
if True: #try:
#1.1.Diffuse Velocity
if not self.Re_INF:
self.velocity_free = diffuse.explicit(self.velocity, self.viscosity, self.dt)
self.time_recorder.record(point_name=f'ite_{self.ite}_>diffuse')
else:
print('DIFFUSION STEP AVOIDED SINCE RE >= 50000')
self.velocity_free = self.velocity
#1.2.Advect Velocity
self.velocity = advect.semi_lagrangian(self.velocity_free, self.velocity, self.dt)
if torch.isnan(self.velocity.staggered_tensor().tensors[0]._native).any():
print('Nan in Domain')
self.velocity = change_nan_zero(self.velocity, self.DOMAIN)
self.time_recorder.record(point_name=f'ite_{self.ite}_>advect')
#1.3.Apply Boundary Conditions
self.velocity = self.velocity * (1 - self.INFLOW) + self.INFLOW * (1, 0) + self.INIT*(0,0.5) if self.ite<int(self.INIT_transition) else self.velocity * (1 - self.INFLOW) + self.INFLOW * (1, 0)
if self.sim_method == 'CG' or self.sim_method == 'convnet':
self.velocity = apply_boundaries(self.velocity, self.bc_mask, self.bc_value)
self.time_recorder.record(point_name=f'ite_{self.ite}_>apply_bc')
#1.4.Solve Poisson Equation
self.solve_poisson()
#1.5.Reenforce Boundary Conditions
if self.sim_method == 'CG' or self.sim_method == 'convnet':
self.velocity = apply_boundaries(self.velocity, self.bc_mask, self.bc_value)
self.time_recorder.record(point_name=f'ite_{self.ite}_>reinforce_bc')
#2.POST-PROCESSING
self.time_recorder.record(point_name=f'ite_{self.ite}_>init_post')
if True: #try:
if self.post_computations: # and self.ite%self.save_post_x_ite == 0:
#2.1.VELOCITY PROBE
self.calculate_velocity_probe()
self.time_recorder.record(point_name=f'ite_{self.ite}_>probe')
#2.2.CALCULATE FORCES
self.hforce[self.ite], self.vforce[self.ite] = calculate_forces(self.pressure, self.CYLINDER_2, self.dx, self.dy)
#hforce[ite], vforce[ite] = calculate_forces_with_momentum(pressure, velocity, FORCES_MASK, factor=1, | |
"[V][Conc][Trns][A_4Pl][P_1Du]": [
"@–nger+megtekuk"
],
"[V][Conc][Trns][A_4Pl][P_1Pl]": [
"@–nger+megtekut"
],
"[V][Conc][Trns][A_4Pl][P_1Sg]": [
"@–nger+megtenga"
],
"[V][Conc][Trns][A_4Pl][P_2Du]": [
"@–nger+megcetek"
],
"[V][Conc][Trns][A_4Pl][P_2Pl]": [
"@–nger+megceci"
],
"[V][Conc][Trns][A_4Pl][P_2Sg]": [
"@–nger+megteggen"
],
"[V][Conc][Trns][A_4Pl][P_3Du]": [
"@–nger+megtekek"
],
"[V][Conc][Trns][A_4Pl][P_3Pl]": [
"@–nger+megteki"
],
"[V][Conc][Trns][A_4Pl][P_3Sg]": [
"@–nger+megteggu"
],
"[V][Conc][Trns][A_4Sg][P_1Du]": [
"@–nger+mikuk"
],
"[V][Conc][Trns][A_4Sg][P_1Pl]": [
"@–nger+mikut"
],
"[V][Conc][Trns][A_4Sg][P_1Sg]": [
"@–nger+mia"
],
"[V][Conc][Trns][A_4Sg][P_2Du]": [
"@–nger+mitek"
],
"[V][Conc][Trns][A_4Sg][P_2Pl]": [
"@–nger+mici"
],
"[V][Conc][Trns][A_4Sg][P_2Sg]": [
"@–nger+miten"
],
"[V][Conc][Trns][A_4Sg][P_3Du]": [
"@–nger+mikek"
],
"[V][Conc][Trns][A_4Sg][P_3Pl]": [
"@–nger+miki"
],
"[V][Conc][Trns][A_4Sg][P_3Sg]": [
"@–nger+miu"
],
"[V][Cond][Intr][S_1Du]": [
"@~–ku–megnuk"
],
"[V][Cond][Intr][S_1Pl]": [
"@~–ku–mta"
],
"[V][Cond][Intr][S_1Sg]": [
"@~–ku+ma"
],
"[V][Cond][Intr][S_2Du]": [
"@~–ku.vtek"
],
"[V][Cond][Intr][S_2Pl]": [
"@~–ku.veci"
],
"[V][Cond][Intr][S_2Sg]": [
"@~–ku.vet"
],
"[V][Cond][Intr][S_3Du]": [
"@~–ku:agnek"
],
"[V][Cond][Intr][S_3Pl]": [
"@~–ku:ata"
],
"[V][Cond][Intr][S_3Sg]": [
"@~–ku:an"
],
"[V][Cond][Intr][S_4Du]": [
"@~–ku+nek"
],
"[V][Cond][Intr][S_4Pl]": [
"@~–ku+neng"
],
"[V][Cond][Intr][S_4Sg]": [
"@~–ku+ni"
],
"[V][Cond][Trns][A_1Du][P_2Du]": [
"@~–ku–megtek"
],
"[V][Cond][Trns][A_1Du][P_2Pl]": [
"@~–ku–megci"
],
"[V][Cond][Trns][A_1Du][P_2Sg]": [
"@~–ku–megten"
],
"[V][Cond][Trns][A_1Du][P_3Du]": [
"@~–ku–megkek"
],
"[V][Cond][Trns][A_1Du][P_3Pl]": [
"@~–ku–megki"
],
"[V][Cond][Trns][A_1Du][P_3Sg]": [
"@~–ku–megnegu"
],
"[V][Cond][Trns][A_1Du][P_4Du]": [
"@~–ku–megtek"
],
"[V][Cond][Trns][A_1Du][P_4Pl]": [
"@~–ku–megteng"
],
"[V][Cond][Trns][A_1Du][P_4Sg]": [
"@~–ku–megni"
],
"[V][Cond][Trns][A_1Pl][P_2Du]": [
"@~–ku–mcetek"
],
"[V][Cond][Trns][A_1Pl][P_2Pl]": [
"@~–ku–mceci"
],
"[V][Cond][Trns][A_1Pl][P_2Sg]": [
"@~–ku–mteggen"
],
"[V][Cond][Trns][A_1Pl][P_3Du]": [
"@~–ku–mtekek"
],
"[V][Cond][Trns][A_1Pl][P_3Pl]": [
"@~–ku–mteki"
],
"[V][Cond][Trns][A_1Pl][P_3Sg]": [
"@~–ku–mteggu"
],
"[V][Cond][Trns][A_1Pl][P_4Du]": [
"@~–ku–mcetek"
],
"[V][Cond][Trns][A_1Pl][P_4Pl]": [
"@~–ku–mceteng"
],
"[V][Cond][Trns][A_1Pl][P_4Sg]": [
"@~–ku–mten̄i"
],
"[V][Cond][Trns][A_1Sg][P_2Du]": [
"@~–ku–mtek"
],
"[V][Cond][Trns][A_1Sg][P_2Pl]": [
"@~–ku–mci"
],
"[V][Cond][Trns][A_1Sg][P_2Sg]": [
"@~–ku–mken"
],
"[V][Cond][Trns][A_1Sg][P_3Du]": [
"@~–ku–mkek"
],
"[V][Cond][Trns][A_1Sg][P_3Pl]": [
"@~–ku–mki"
],
"[V][Cond][Trns][A_1Sg][P_3Sg]": [
"@~–ku–mku"
],
"[V][Cond][Trns][A_1Sg][P_4Du]": [
"@~–ku–mtek"
],
"[V][Cond][Trns][A_1Sg][P_4Pl]": [
"@~–ku–mteng"
],
"[V][Cond][Trns][A_1Sg][P_4Sg]": [
"@~–ku–mni"
],
"[V][Cond][Trns][A_2Du][P_1Du]": [
"@~–ku.vtegkuk"
],
"[V][Cond][Trns][A_2Du][P_1Pl]": [
"@~–ku.vtegkut"
],
"[V][Cond][Trns][A_2Du][P_1Sg]": [
"@~–ku.vtegnga"
],
"[V][Cond][Trns][A_2Du][P_3Du]": [
"@~–ku.vtegkek"
],
"[V][Cond][Trns][A_2Du][P_3Pl]": [
"@~–ku.vtegki"
],
"[V][Cond][Trns][A_2Du][P_3Sg]": [
"@~–ku.vtegnegu"
],
"[V][Cond][Trns][A_2Du][P_4Du]": [
"@~–ku.vtegtek"
],
"[V][Cond][Trns][A_2Du][P_4Pl]": [
"@~–ku.vtegteng"
],
"[V][Cond][Trns][A_2Du][P_4Sg]": [
"@~–ku.vtegni"
],
"[V][Cond][Trns][A_2Pl][P_1Du]": [
"@~–ku.vcikuk"
],
"[V][Cond][Trns][A_2Pl][P_1Pl]": [
"@~–ku.vcikut"
],
"[V][Cond][Trns][A_2Pl][P_1Sg]": [
"@~–ku.vcia"
],
"[V][Cond][Trns][A_2Pl][P_3Du]": [
"@~–ku.vcikek"
],
"[V][Cond][Trns][A_2Pl][P_3Pl]": [
"@~–ku.vciki"
],
"[V][Cond][Trns][A_2Pl][P_3Sg]": [
"@~–ku.vciu"
],
"[V][Cond][Trns][A_2Pl][P_4Du]": [
"@~–ku.vcitek"
],
"[V][Cond][Trns][A_2Pl][P_4Pl]": [
"@~–ku.vciteng"
],
"[V][Cond][Trns][A_2Pl][P_4Sg]": [
"@~–ku.vcini"
],
"[V][Cond][Trns][A_2Sg][P_1Du]": [
"@~–ku.vkuk"
],
"[V][Cond][Trns][A_2Sg][P_1Pl]": [
"@~–ku.vkut"
],
"[V][Cond][Trns][A_2Sg][P_1Sg]": [
"@~–ku.vnga"
],
"[V][Cond][Trns][A_2Sg][P_3Du]": [
"@~–ku.vkek"
],
"[V][Cond][Trns][A_2Sg][P_3Pl]": [
"@~–ku.vki"
],
"[V][Cond][Trns][A_2Sg][P_3Sg]": [
"@~–ku.vgu"
],
"[V][Cond][Trns][A_2Sg][P_4Du]": [
"@~–ku.vtek"
],
"[V][Cond][Trns][A_2Sg][P_4Pl]": [
"@~–ku.vteng"
],
"[V][Cond][Trns][A_2Sg][P_4Sg]": [
"@~–ku.vni"
],
"[V][Cond][Trns][A_3Du][P_1Du]": [
"@~–ku:agkuk"
],
"[V][Cond][Trns][A_3Du][P_1Pl]": [
"@~–ku:agkut"
],
"[V][Cond][Trns][A_3Du][P_1Sg]": [
"@~–ku:agnga"
],
"[V][Cond][Trns][A_3Du][P_2Du]": [
"@~–ku:agtek"
],
"[V][Cond][Trns][A_3Du][P_2Pl]": [
"@~–ku:agci"
],
"[V][Cond][Trns][A_3Du][P_2Sg]": [
"@~–ku:agten"
],
"[V][Cond][Trns][A_3Du][P_3Du]": [
"@~–ku:agkek"
],
"[V][Cond][Trns][A_3Du][P_3Pl]": [
"@~–ku:agki"
],
"[V][Cond][Trns][A_3Du][P_3Sg]": [
"@~–ku:agnegu"
],
"[V][Cond][Trns][A_3Du][P_4Du]": [
"@~–ku:agtek"
],
"[V][Cond][Trns][A_3Du][P_4Pl]": [
"@~–ku:agteng"
],
"[V][Cond][Trns][A_3Du][P_4Sg]": [
"@~–ku:agni"
],
"[V][Cond][Trns][A_3Pl][P_1Du]": [
"@~–ku:atkuk"
],
"[V][Cond][Trns][A_3Pl][P_1Pl]": [
"@~–ku:atkut"
],
"[V][Cond][Trns][A_3Pl][P_1Sg]": [
"@~–ku:atnga"
],
"[V][Cond][Trns][A_3Pl][P_2Du]": [
"@~–ku:acetek"
],
"[V][Cond][Trns][A_3Pl][P_2Pl]": [
"@~–ku:aceci"
],
"[V][Cond][Trns][A_3Pl][P_2Sg]": [
"@~–ku:atgen"
],
"[V][Cond][Trns][A_3Pl][P_3Du]": [
"@~–ku:atkek"
],
"[V][Cond][Trns][A_3Pl][P_3Pl]": [
"@~–ku:atki"
],
"[V][Cond][Trns][A_3Pl][P_3Sg]": [
"@~–ku:atgu"
],
"[V][Cond][Trns][A_3Pl][P_4Du]": [
"@~–ku:acetek"
],
"[V][Cond][Trns][A_3Pl][P_4Pl]": [
"@~–ku:aceteng"
],
"[V][Cond][Trns][A_3Pl][P_4Sg]": [
"@~–ku:atni"
],
"[V][Cond][Trns][A_3Sg][P_1Du]": [
"@~–ku:akuk"
],
"[V][Cond][Trns][A_3Sg][P_1Pl]": [
"@~–ku:akut"
],
"[V][Cond][Trns][A_3Sg][P_1Sg]": [
"@~–ku:anga"
],
"[V][Cond][Trns][A_3Sg][P_2Du]": [
"@~–ku:atek"
],
"[V][Cond][Trns][A_3Sg][P_2Pl]": [
"@~–ku:aci"
],
"[V][Cond][Trns][A_3Sg][P_2Sg]": [
"@~–ku:aten"
],
"[V][Cond][Trns][A_3Sg][P_3Du]": [
"@~–ku:akek"
],
"[V][Cond][Trns][A_3Sg][P_3Pl]": [
"@~–ku:aki"
],
"[V][Cond][Trns][A_3Sg][P_3Sg]": [
"@~–ku:aku"
],
"[V][Cond][Trns][A_3Sg][P_4Du]": [
"@~–ku:atek"
],
"[V][Cond][Trns][A_3Sg][P_4Pl]": [
"@~–ku:ateng"
],
"[V][Cond][Trns][A_3Sg][P_4Sg]": [
"@~–ku:ani"
],
"[V][Cond][Trns][A_4Du][P_1Du]": [
"@~–ku+negnekuk"
],
"[V][Cond][Trns][A_4Du][P_1Pl]": [
"@~–ku+negnekut"
],
"[V][Cond][Trns][A_4Du][P_1Sg]": [
"@~–ku+negnenga"
],
"[V][Cond][Trns][A_4Du][P_2Du]": [
"@~–ku+negnetek"
],
"[V][Cond][Trns][A_4Du][P_2Pl]": [
"@~–ku+negneci"
],
"[V][Cond][Trns][A_4Du][P_2Sg]": [
"@~–ku+negnegen"
],
"[V][Cond][Trns][A_4Du][P_3Du]": [
"@~–ku+negnekek"
],
"[V][Cond][Trns][A_4Du][P_3Pl]": [
"@~–ku+negneki"
],
"[V][Cond][Trns][A_4Du][P_3Sg]": [
"@~–ku+negnegu"
],
"[V][Cond][Trns][A_4Pl][P_1Du]": [
"@~–ku+negtekuk"
],
"[V][Cond][Trns][A_4Pl][P_1Pl]": [
"@~–ku+negtekut"
],
"[V][Cond][Trns][A_4Pl][P_1Sg]": [
"@~–ku+negtenga"
],
"[V][Cond][Trns][A_4Pl][P_2Du]": [
"@~–ku+negcetek"
],
"[V][Cond][Trns][A_4Pl][P_2Pl]": [
"@~–ku+negceci"
],
"[V][Cond][Trns][A_4Pl][P_2Sg]": [
"@~–ku+negteggen"
],
"[V][Cond][Trns][A_4Pl][P_3Du]": [
"@~–ku+negtekek"
],
"[V][Cond][Trns][A_4Pl][P_3Pl]": [
"@~–ku+negteki"
],
"[V][Cond][Trns][A_4Pl][P_3Sg]": [
"@~–ku+negteggu"
],
"[V][Cond][Trns][A_4Sg][P_1Du]": [
"@~–ku+nikuk"
],
"[V][Cond][Trns][A_4Sg][P_1Pl]": [
"@~–ku+nikut"
],
"[V][Cond][Trns][A_4Sg][P_1Sg]": [
"@~–ku+nia"
],
"[V][Cond][Trns][A_4Sg][P_2Du]": [
"@~–ku+nitek"
],
"[V][Cond][Trns][A_4Sg][P_2Pl]": [
"@~–ku+nici"
],
"[V][Cond][Trns][A_4Sg][P_2Sg]": [
"@~–ku+niten"
],
"[V][Cond][Trns][A_4Sg][P_3Du]": [
"@~–ku+nikek"
],
"[V][Cond][Trns][A_4Sg][P_3Pl]": [
"@~–ku+niki"
],
"[V][Cond][Trns][A_4Sg][P_3Sg]": [
"@~–ku+niu"
],
"[V][Cont][Intr][S_1Du]": [
"+'(g)aqa–megnuk"
],
"[V][Cont][Intr][S_1Pl]": [
"+'(g)aqa–mta"
],
"[V][Cont][Intr][S_1Sg]": [
"+'(g)aqa+ma"
],
"[V][Cont][Intr][S_2Du]": [
"+'(g)aqa.vtek"
],
"[V][Cont][Intr][S_2Pl]": [
"+'(g)aqa.veci"
],
"[V][Cont][Intr][S_2Sg]": [
"+'(g)aqa.vet"
],
"[V][Cont][Intr][S_3Du]": [
"+'(g)aqa:agnek"
],
"[V][Cont][Intr][S_3Pl]": [
"+'(g)aqa:ata"
],
"[V][Cont][Intr][S_3Sg]": [
"+'(g)aqa:an"
],
"[V][Cont][Intr][S_4Du]": [
"+'(g)aqa+mek"
],
"[V][Cont][Intr][S_4Pl]": [
"+'(g)aqa+meng"
],
"[V][Cont][Intr][S_4Sg]": [
"+'(g)aqa+mi"
],
"[V][Cont][Trns][A_1Du][P_2Du]": [
"+'(g)aqa–megtek"
],
"[V][Cont][Trns][A_1Du][P_2Pl]": [
"+'(g)aqa–megci"
],
"[V][Cont][Trns][A_1Du][P_2Sg]": [
"+'(g)aqa–megten"
],
"[V][Cont][Trns][A_1Du][P_3Du]": [
"+'(g)aqa–megkek"
],
"[V][Cont][Trns][A_1Du][P_3Pl]": [
"+'(g)aqa–megki"
],
"[V][Cont][Trns][A_1Du][P_3Sg]": [
"+'(g)aqa–megnegu"
],
"[V][Cont][Trns][A_1Du][P_4Du]": [
"+'(g)aqa–megtek"
],
"[V][Cont][Trns][A_1Du][P_4Pl]": [
"+'(g)aqa–megteng"
],
"[V][Cont][Trns][A_1Du][P_4Sg]": [
"+'(g)aqa–megni"
],
"[V][Cont][Trns][A_1Pl][P_2Du]": [
"+'(g)aqa–mcetek"
],
"[V][Cont][Trns][A_1Pl][P_2Pl]": [
"+'(g)aqa–mceci"
],
"[V][Cont][Trns][A_1Pl][P_2Sg]": [
"+'(g)aqa–mteggen"
],
"[V][Cont][Trns][A_1Pl][P_3Du]": [
"+'(g)aqa–mtekek"
],
"[V][Cont][Trns][A_1Pl][P_3Pl]": [
"+'(g)aqa–mteki"
],
"[V][Cont][Trns][A_1Pl][P_3Sg]": [
"+'(g)aqa–mteggu"
],
"[V][Cont][Trns][A_1Pl][P_4Du]": [
"+'(g)aqa–mcetek"
],
"[V][Cont][Trns][A_1Pl][P_4Pl]": [
"+'(g)aqa–mceteng"
],
"[V][Cont][Trns][A_1Pl][P_4Sg]": [
"+'(g)aqa–mten̄i"
],
"[V][Cont][Trns][A_1Sg][P_2Du]": [
"+'(g)aqa–mtek"
],
"[V][Cont][Trns][A_1Sg][P_2Pl]": [
"+'(g)aqa–mci"
],
"[V][Cont][Trns][A_1Sg][P_2Sg]": [
"+'(g)aqa–mken"
],
"[V][Cont][Trns][A_1Sg][P_3Du]": [
"+'(g)aqa–mkek"
],
"[V][Cont][Trns][A_1Sg][P_3Pl]": [
"+'(g)aqa–mki"
],
"[V][Cont][Trns][A_1Sg][P_3Sg]": [
"+'(g)aqa–mku"
],
"[V][Cont][Trns][A_1Sg][P_4Du]": [
"+'(g)aqa–mtek"
],
"[V][Cont][Trns][A_1Sg][P_4Pl]": [
"+'(g)aqa–mteng"
],
"[V][Cont][Trns][A_1Sg][P_4Sg]": [
"+'(g)aqa–mni"
],
"[V][Cont][Trns][A_2Du][P_1Du]": [
"+'(g)aqa.vtegkuk"
],
"[V][Cont][Trns][A_2Du][P_1Pl]": [
"+'(g)aqa.vtegkut"
],
"[V][Cont][Trns][A_2Du][P_1Sg]": [
"+'(g)aqa.vtegnga"
],
"[V][Cont][Trns][A_2Du][P_3Du]": [
"+'(g)aqa.vtegkek"
],
"[V][Cont][Trns][A_2Du][P_3Pl]": [
"+'(g)aqa.vtegki"
],
"[V][Cont][Trns][A_2Du][P_3Sg]": [
"+'(g)aqa.vtegnegu"
],
"[V][Cont][Trns][A_2Du][P_4Du]": [
"+'(g)aqa.vtegtek"
],
"[V][Cont][Trns][A_2Du][P_4Pl]": [
"+'(g)aqa.vtegteng"
],
"[V][Cont][Trns][A_2Du][P_4Sg]": [
"+'(g)aqa.vtegni"
],
"[V][Cont][Trns][A_2Pl][P_1Du]": [
"+'(g)aqa.vcikuk"
],
"[V][Cont][Trns][A_2Pl][P_1Pl]": [
"+'(g)aqa.vcikut"
],
"[V][Cont][Trns][A_2Pl][P_1Sg]": [
"+'(g)aqa.vcia"
],
"[V][Cont][Trns][A_2Pl][P_3Du]": [
"+'(g)aqa.vcikek"
],
"[V][Cont][Trns][A_2Pl][P_3Pl]": [
"+'(g)aqa.vciki"
],
"[V][Cont][Trns][A_2Pl][P_3Sg]": [
"+'(g)aqa.vciu"
],
"[V][Cont][Trns][A_2Pl][P_4Du]": [
"+'(g)aqa.vcitek"
],
"[V][Cont][Trns][A_2Pl][P_4Pl]": [
"+'(g)aqa.vciteng"
],
"[V][Cont][Trns][A_2Pl][P_4Sg]": [
"+'(g)aqa.vcini"
],
"[V][Cont][Trns][A_2Sg][P_1Du]": [
"+'(g)aqa.vkuk"
],
"[V][Cont][Trns][A_2Sg][P_1Pl]": [
"+'(g)aqa.vkut"
],
"[V][Cont][Trns][A_2Sg][P_1Sg]": [
"+'(g)aqa.vnga"
],
"[V][Cont][Trns][A_2Sg][P_3Du]": [
"+'(g)aqa.vkek"
],
"[V][Cont][Trns][A_2Sg][P_3Pl]": [
"+'(g)aqa.vki"
],
"[V][Cont][Trns][A_2Sg][P_3Sg]": [
"+'(g)aqa.vgu"
],
"[V][Cont][Trns][A_2Sg][P_4Du]": [
"+'(g)aqa.vtek"
],
"[V][Cont][Trns][A_2Sg][P_4Pl]": [
"+'(g)aqa.vteng"
],
"[V][Cont][Trns][A_2Sg][P_4Sg]": [
"+'(g)aqa.vni"
],
"[V][Cont][Trns][A_3Du][P_1Du]": [
"+'(g)aqa:agkuk"
],
"[V][Cont][Trns][A_3Du][P_1Pl]": [
"+'(g)aqa:agkut"
],
"[V][Cont][Trns][A_3Du][P_1Sg]": [
"+'(g)aqa:agnga"
],
"[V][Cont][Trns][A_3Du][P_2Du]": [
"+'(g)aqa:agtek"
],
"[V][Cont][Trns][A_3Du][P_2Pl]": [
"+'(g)aqa:agci"
],
"[V][Cont][Trns][A_3Du][P_2Sg]": [
"+'(g)aqa:agten"
],
"[V][Cont][Trns][A_3Du][P_3Du]": [
"+'(g)aqa:agkek"
],
"[V][Cont][Trns][A_3Du][P_3Pl]": [
"+'(g)aqa:agki"
],
"[V][Cont][Trns][A_3Du][P_3Sg]": [
"+'(g)aqa:agnegu"
],
"[V][Cont][Trns][A_3Du][P_4Du]": [
"+'(g)aqa:agtek"
],
"[V][Cont][Trns][A_3Du][P_4Pl]": [
"+'(g)aqa:agteng"
],
"[V][Cont][Trns][A_3Du][P_4Sg]": [
"+'(g)aqa:agni"
],
"[V][Cont][Trns][A_3Pl][P_1Du]": [
"+'(g)aqa:atkuk"
],
"[V][Cont][Trns][A_3Pl][P_1Pl]": [
"+'(g)aqa:atkut"
],
"[V][Cont][Trns][A_3Pl][P_1Sg]": [
"+'(g)aqa:atnga"
],
"[V][Cont][Trns][A_3Pl][P_2Du]": [
"+'(g)aqa:acetek"
],
"[V][Cont][Trns][A_3Pl][P_2Pl]": [
"+'(g)aqa:aceci"
],
"[V][Cont][Trns][A_3Pl][P_2Sg]": [
"+'(g)aqa:atgen"
],
"[V][Cont][Trns][A_3Pl][P_3Du]": [
"+'(g)aqa:atkek"
],
"[V][Cont][Trns][A_3Pl][P_3Pl]": [
"+'(g)aqa:atki"
],
"[V][Cont][Trns][A_3Pl][P_3Sg]": [
"+'(g)aqa:atgu"
],
"[V][Cont][Trns][A_3Pl][P_4Du]": [
"+'(g)aqa:acetek"
],
"[V][Cont][Trns][A_3Pl][P_4Pl]": [
"+'(g)aqa:aceteng"
],
"[V][Cont][Trns][A_3Pl][P_4Sg]": [
"+'(g)aqa:atni"
],
"[V][Cont][Trns][A_3Sg][P_1Du]": [
"+'(g)aqa:akuk"
],
"[V][Cont][Trns][A_3Sg][P_1Pl]": [
"+'(g)aqa:akut"
],
"[V][Cont][Trns][A_3Sg][P_1Sg]": [
"+'(g)aqa:anga"
],
"[V][Cont][Trns][A_3Sg][P_2Du]": [
"+'(g)aqa:atek"
],
"[V][Cont][Trns][A_3Sg][P_2Pl]": [
"+'(g)aqa:aci"
],
"[V][Cont][Trns][A_3Sg][P_2Sg]": [
"+'(g)aqa:aten"
],
"[V][Cont][Trns][A_3Sg][P_3Du]": [
"+'(g)aqa:akek"
],
"[V][Cont][Trns][A_3Sg][P_3Pl]": [
"+'(g)aqa:aki"
],
"[V][Cont][Trns][A_3Sg][P_3Sg]": [
"+'(g)aqa:aku"
],
"[V][Cont][Trns][A_3Sg][P_4Du]": [
"+'(g)aqa:atek"
],
"[V][Cont][Trns][A_3Sg][P_4Pl]": [
"+'(g)aqa:ateng"
],
"[V][Cont][Trns][A_3Sg][P_4Sg]": [
"+'(g)aqa:ani"
],
"[V][Cont][Trns][A_4Du][P_1Du]": [
"+'(g)aqa+megnekuk"
],
"[V][Cont][Trns][A_4Du][P_1Pl]": [
"+'(g)aqa+megnekut"
],
"[V][Cont][Trns][A_4Du][P_1Sg]": [
"+'(g)aqa+megnenga"
],
"[V][Cont][Trns][A_4Du][P_2Du]": [
"+'(g)aqa+megnetek"
],
"[V][Cont][Trns][A_4Du][P_2Pl]": [
"+'(g)aqa+megneci"
],
"[V][Cont][Trns][A_4Du][P_2Sg]": [
"+'(g)aqa+megnegen"
],
"[V][Cont][Trns][A_4Du][P_3Du]": [
"+'(g)aqa+megnekek"
],
"[V][Cont][Trns][A_4Du][P_3Pl]": [
"+'(g)aqa+megneki"
],
"[V][Cont][Trns][A_4Du][P_3Sg]": [
"+'(g)aqa+megnegu"
],
"[V][Cont][Trns][A_4Pl][P_1Du]": [
"+'(g)aqa+megtekuk"
],
"[V][Cont][Trns][A_4Pl][P_1Pl]": [
"+'(g)aqa+megtekut"
],
"[V][Cont][Trns][A_4Pl][P_1Sg]": [
"+'(g)aqa+megtenga"
],
"[V][Cont][Trns][A_4Pl][P_2Du]": [
"+'(g)aqa+megcetek"
],
"[V][Cont][Trns][A_4Pl][P_2Pl]": [
"+'(g)aqa+megceci"
],
"[V][Cont][Trns][A_4Pl][P_2Sg]": [
"+'(g)aqa+megteggen"
],
"[V][Cont][Trns][A_4Pl][P_3Du]": [
"+'(g)aqa+megtekek"
],
"[V][Cont][Trns][A_4Pl][P_3Pl]": [
"+'(g)aqa+megteki"
],
"[V][Cont][Trns][A_4Pl][P_3Sg]": [
"+'(g)aqa+megteggu"
],
"[V][Cont][Trns][A_4Sg][P_1Du]": [
"+'(g)aqa+mikuk"
],
"[V][Cont][Trns][A_4Sg][P_1Pl]": [
"+'(g)aqa+mikut"
],
"[V][Cont][Trns][A_4Sg][P_1Sg]": [
"+'(g)aqa+mia"
],
"[V][Cont][Trns][A_4Sg][P_2Du]": [
"+'(g)aqa+mitek"
],
"[V][Cont][Trns][A_4Sg][P_2Pl]": [
"+'(g)aqa+mici"
],
"[V][Cont][Trns][A_4Sg][P_2Sg]": [
"+'(g)aqa+miten"
],
"[V][Cont][Trns][A_4Sg][P_3Du]": [
"+'(g)aqa+mikek"
],
"[V][Cont][Trns][A_4Sg][P_3Pl]": [
"+'(g)aqa+miki"
],
"[V][Cont][Trns][A_4Sg][P_3Sg]": [
"+'(g)aqa+miu"
],
"[V][CtmpII][Intr][S_1Du]": [
"@%:(ng)inaner–megni",
"@~+nginaner–megni"
],
"[V][CtmpII][Intr][S_1Pl]": [
"@%:(ng)inaner–mten̄i",
"@~+nginaner–mten̄i"
],
"[V][CtmpII][Intr][S_1Sg]": [
"@%:(ng)inaner–mni",
"@~+nginaner–mni"
],
"[V][CtmpII][Intr][S_2Du]": [
"@%:(ng)inaner+petegni",
"@~+nginaner+petegni"
],
"[V][CtmpII][Intr][S_2Pl]": [
"@%:(ng)inaner+pecen̄i",
"@~+nginaner+pecen̄i"
],
"[V][CtmpII][Intr][S_2Sg]": [
"@%:(ng)inaner+peni",
"@~+nginaner+peni"
],
"[V][CtmpII][Intr][S_3Du]": [
"@%:(ng)inaner:agni",
"@~+nginaner:agni"
],
"[V][CtmpII][Intr][S_3Pl]": [
"@%:(ng)inaner:atni",
"@~+nginaner:atni"
],
"[V][CtmpII][Intr][S_3Sg]": [
"@%:(ng)inaner:ani",
"@~+nginaner:ani"
],
"[V][CtmpII][Intr][S_4Du]": [
"@%:(ng)inaner+megni",
"@~+nginaner+megni"
],
"[V][CtmpII][Intr][S_4Pl]": [
"@%:(ng)inaner+meggni",
"@~+nginaner+meggni"
],
"[V][CtmpII][Intr][S_4Sg]": [
"@%:(ng)inaner+mini",
"@~+nginaner+mini"
],
"[V][CtmpII][Trns][A_1Du][P_2Du]": [
"@%:(ng)inaner–megtek",
"@~+nginaner–megtek"
],
"[V][CtmpII][Trns][A_1Du][P_2Pl]": [
"@%:(ng)inaner–megci",
"@~+nginaner–megci"
],
"[V][CtmpII][Trns][A_1Du][P_2Sg]": [
"@%:(ng)inaner–megten",
"@~+nginaner–megten"
],
"[V][CtmpII][Trns][A_1Du][P_3Du]": [
"@%:(ng)inaner–megkek",
"@~+nginaner–megkek"
],
"[V][CtmpII][Trns][A_1Du][P_3Pl]": [
"@%:(ng)inaner–megki",
"@~+nginaner–megki"
],
"[V][CtmpII][Trns][A_1Du][P_3Sg]": [
"@%:(ng)inaner–megnegu",
"@~+nginaner–megnegu"
],
"[V][CtmpII][Trns][A_1Du][P_4Du]": [
"@%:(ng)inaner–megtek",
"@~+nginaner–megtek"
],
"[V][CtmpII][Trns][A_1Du][P_4Pl]": [
"@%:(ng)inaner–megteng",
"@~+nginaner–megteng"
],
"[V][CtmpII][Trns][A_1Du][P_4Sg]": [
"@%:(ng)inaner–megni",
"@~+nginaner–megni"
],
"[V][CtmpII][Trns][A_1Pl][P_2Du]": [
"@%:(ng)inaner–mcetek",
"@~+nginaner–mcetek"
],
"[V][CtmpII][Trns][A_1Pl][P_2Pl]": [
"@%:(ng)inaner–mceci",
"@~+nginaner–mceci"
],
"[V][CtmpII][Trns][A_1Pl][P_2Sg]": [
"@%:(ng)inaner–mteggen",
"@~+nginaner–mteggen"
],
"[V][CtmpII][Trns][A_1Pl][P_3Du]": [
"@%:(ng)inaner–mtekek",
"@~+nginaner–mtekek"
],
"[V][CtmpII][Trns][A_1Pl][P_3Pl]": [
"@%:(ng)inaner–mteki",
"@~+nginaner–mteki"
],
"[V][CtmpII][Trns][A_1Pl][P_3Sg]": [
"@%:(ng)inaner–mteggu",
"@~+nginaner–mteggu"
],
"[V][CtmpII][Trns][A_1Pl][P_4Du]": [
"@%:(ng)inaner–mcetek",
"@~+nginaner–mcetek"
],
"[V][CtmpII][Trns][A_1Pl][P_4Pl]": [
"@%:(ng)inaner–mceteng",
"@~+nginaner–mceteng"
],
"[V][CtmpII][Trns][A_1Pl][P_4Sg]": [
"@%:(ng)inaner–mten̄i",
"@~+nginaner–mten̄i"
],
"[V][CtmpII][Trns][A_1Sg][P_2Du]": [
"@%:(ng)inaner–mtek",
"@~+nginaner–mtek"
],
"[V][CtmpII][Trns][A_1Sg][P_2Pl]": [
"@%:(ng)inaner–mci",
"@~+nginaner–mci"
],
"[V][CtmpII][Trns][A_1Sg][P_2Sg]": [
"@%:(ng)inaner–mken",
"@~+nginaner–mken"
],
"[V][CtmpII][Trns][A_1Sg][P_3Du]": [
"@%:(ng)inaner–mkek",
"@~+nginaner–mkek"
],
"[V][CtmpII][Trns][A_1Sg][P_3Pl]": [
"@%:(ng)inaner–mki",
"@~+nginaner–mki"
],
"[V][CtmpII][Trns][A_1Sg][P_3Sg]": | |
import numpy as np
from .utils.math_utils import subsets
from .aps import aps
r_is_initialized = False
class GlobalImport:
# https://stackoverflow.com/a/53255802
# This doesn't seem to like to be imported from elsewhere, e.g.,
# from utils. Maybe with some work it might be possible too.
def __enter__(self):
return self
def __exit__(self, *args):
import inspect
self.collector = inspect.getargvalues(inspect.getouterframes(inspect.currentframe())[1].frame).locals
globals().update(self.collector)
def init_r():
global r_is_initialized
if r_is_initialized:
return
with GlobalImport() as gi:
try:
from rpy2.robjects import r
from rpy2.robjects import numpy2ri
from rpy2.robjects.packages import importr
except ImportError as e:
msg = ["To use the candidate parent algorithms pc, mb or ges you",
"need to have R installed. Pc and mb require the R-package",
"bnlearn; ges requires pcalg. Finally, you also need to",
"have the Python package rpy2 installed to interface with R."]
raise Exception(' '.join(msg)) from e
load_funcs = """
datapath_or_matrix_to_numeric_dataframe <- function(data_path_or_matrix,
discrete=TRUE,
arities=FALSE)
{
if (typeof(data_path_or_matrix) == "character") {
data <- read.table(data_path_or_matrix, header = FALSE)
}
else {
data <- data_path_or_matrix
mode(data) = "numeric"
data <- data.frame(data)
}
if (discrete) {
if (arities) {
arities <- data[1,]
data <- data[2:nrow(data),]
} else {
arities <- lapply(data, function(x) length(unique(x)))
}
data[] <- lapply(data, as.factor)
for(i in 1:length(arities)) {
levels(data[, i]) <- as.character(0:(arities[[i]] - 1))
}
}
colnames(data) <- 0:(ncol(data)-1)
return(data)
}
"""
r(load_funcs)
r_is_initialized = True
def convert_to_r_data(data):
# Input is sumu.Data
init_r()
numpy2ri.activate()
datar = r.matrix(data.all().flatten(),
nrow=data.N,
ncol=data.n,
byrow=True)
numpy2ri.deactivate()
discrete = data.discrete
arities = True if data.arities is not False else False
datar = r['datapath_or_matrix_to_numeric_dataframe'](datar,
discrete=discrete,
arities=arities)
return datar
def candidates_to_str(C):
return '|'.join([' '.join([str(node) for node in C[v]]) for v in sorted(C)])
def parse_candidates(C_str):
return {v[0]: v[1] for v in zip(range(C_str.count("|") + 1), [tuple(int(c) for c in C.split()) for C in C_str.split("|")])}
def _adjust_number_candidates(K, C, method, scores=None):
assert method in ['random', 'top'], "fill method should be random or top"
if method == 'top':
assert scores is not None, "scorepath (-s) required for fill == top"
C = dict(C.items())
n = len(C)
for v in C:
add_n = max(0, K - len(C[v]))
add_from = [add_node for add_node in range(n) if add_node not in C[v] + (v,)]
if method == 'random':
if len(C[v]) < K:
C[v] = C[v] + tuple(np.random.choice(add_from, add_n, replace=False))
elif len(C[v]) > K:
C[v] = np.random.choice(C[v], K, replace=False)
if method == 'top':
if len(C[v]) < K:
C_v_top = sorted([(parent, scores.local(v, np.array([parent])))
for parent in add_from],
key=lambda item: item[1], reverse=True)[:add_n]
C_v_top = tuple([c[0] for c in C_v_top])
C[v] = C[v] + C_v_top
elif len(C[v]) > K:
C[v] = sorted([(parent, scores.local(v, np.array([parent])))
for parent in C[v]],
key=lambda item: item[1], reverse=True)[:K]
C[v] = [c[0] for c in C[v]]
for v in C:
C[v] = tuple(sorted(C[v]))
return C
def _most_freq_candidates(K, Cs):
C = {v: list() for v in range(len(Cs[0]))}
for C_i in Cs:
for v in C_i:
C[v] += C_i[v]
for v in C:
C[v] = [i[0] for i in sorted([(u, C[v].count(u)) for u in C if C[v].count(u) > 0], key=lambda i: i[1], reverse=True)][:K]
C[v] = tuple(sorted(C[v]))
return C
def hybrid(K, **kwargs):
algos = kwargs.get("halgos")
fill = kwargs.get("hfill")
assert not [algos, fill].count(None), "list of algorithms to use (-ha) and tie breaking method (-hf) required for algo == hybrid"
if fill == "top":
scores = kwargs["scores"]
def vote(Cs):
C = {v: set() for v in Cs[0][0]}
for v in C:
k = 0
while len(C[v]) < K:
to_add = tuple()
for i in range(len(algos)):
to_add += tuple(Cs[i][k][v])
k += 1
if len(C[v].union(set(to_add))) <= K:
C[v] = C[v].union(set(to_add))
else:
to_add = {0: to_add}
for u in set(to_add[0]).difference(C[v]):
if to_add[0].count(u) in to_add:
to_add[to_add[0].count(u)] = to_add[to_add[0].count(u)].union({u})
else:
to_add[to_add[0].count(u)] = {u}
del to_add[0]
for count in sorted(to_add.keys(), reverse=True):
if len(C[v].union(to_add[count])) <= K:
C[v] = C[v].union(to_add[count])
else:
if fill == 'random':
C[v] = C[v].union(np.random.choice(list(to_add[count]), K - len(C[v]), replace=False))
elif fill == 'top':
C_v_top = sorted([(parent, scores[v][(parent,)])
for parent in to_add[count]],
key=lambda item: item[1], reverse=True)[:K - len(C[v])]
C_v_top = set([c[0] for c in C_v_top])
C[v] = C[v].union(C_v_top)
break
return C
C = [tuple(algo[a](k, **kwargs) for k in range(1, K+1)) for a in algos]
C = vote(C)
for v in C:
C[v] = tuple(sorted(C[v]))
return C
def rnd(K, **kwargs):
n = kwargs.get("n")
assert n is not None, "nvars (-n) required for algo == rnd"
C = dict()
for v in range(n):
C[v] = tuple(sorted(np.random.choice([u for u in range(n) if u != v], K, replace=False)))
return C
def ges(K, **kwargs):
"""Greedy equivalence search :footcite:`chickering:2002`.
GES is implemented in the R package pcalg :footcite:`hauser:2012,kalisch:2012`,
for which the function :py:func:`pcalg` provides a Python wrapping.
"""
init_r()
data = kwargs["data"]
data = convert_to_r_data(data)
B = kwargs.get("B", 20)
fill = kwargs.get("fill", "top")
scores = kwargs.get("scores")
if "B" in kwargs:
Cs = list()
for i in range(kwargs["B"]):
bsample = data.rx(r["sample"](data.nrow, data.nrow, replace=True), True)
Cs.append(pcalg("ges", K, bsample))
C = _most_freq_candidates(K, Cs)
else:
C = pcalg("ges", K, data)
if fill:
C = _adjust_number_candidates(K, C, fill, scores=scores)
return C
def pcalg(method, K, data):
init_r()
base = importr("base")
importr('pcalg')
dollar = base.__dict__["$"]
n = data.ncol
C = dict({node: list() for node in range(n)})
data = r["data.matrix"](data)
if method == 'ges':
score = r["new"]("GaussL0penObsScore", data)
cpdag = r["ges"](score).rx2("essgraph")
for v in range(n):
# NOTE: undirected edges are represented as bidirected!
# See pcalg documentation at
# https://cran.r-project.org/web/packages/pcalg/pcalg.pdf
# for ges and EssGraph.
# Also running the ges example confirms this.
C[v] = [v-1 for v in sorted(dollar(cpdag, ".in.edges").rx2(v+1))]
for v in C:
C[v] = tuple(sorted(C[v]))
return C
def pc(K, **kwargs):
init_r()
data = kwargs.get("data")
data = convert_to_r_data(data)
alpha = kwargs.get("alpha", 0.1)
max_sx = kwargs.get("max_sx", 1)
B = kwargs.get("B", 20)
fill = kwargs.get("fill", "top")
scores = kwargs.get("scores")
if B is not None:
Cs = list()
for i in range(B):
bsample = data.rx(r["sample"](data.nrow,
data.nrow,
replace=True), True)
Cs.append(bnlearn("pc", K, bsample, alpha=alpha, max_sx=max_sx))
C = _most_freq_candidates(K, Cs)
else:
C = bnlearn("pc", K, data, alpha=alpha, max_sx=max_sx)
if fill is not None:
C = _adjust_number_candidates(K, C, fill, scores=scores)
return C
def mb(K, **kwargs):
init_r()
data = kwargs.get("data")
data = convert_to_r_data(data)
alpha = kwargs.get("alpha", 0.1)
max_sx = kwargs.get("max_sx", 1)
B = kwargs.get("B", 20)
fill = kwargs.get("fill", "top")
scores = kwargs.get("scores")
if B is not None:
Cs = list()
for i in range(B):
bsample = data.rx(r["sample"](data.nrow,data.nrow,replace=True), True)
Cs.append(bnlearn("mb", K, bsample, alpha=alpha, max_sx=max_sx))
C = _most_freq_candidates(K, Cs)
else:
C = bnlearn("mb", K, data, alpha=alpha, max_sx=max_sx)
if fill is not None:
C = _adjust_number_candidates(K, C, fill, scores=scores)
return C
def hc(K, **kwargs):
init_r()
datapath = kwargs.get("datapath")
assert datapath is not None, "datapath (-d) required for algo == hc"
B = kwargs.get("B")
if B is None:
B = 20
fill = kwargs.get("fill")
if fill is None:
fill = "top"
scores = kwargs.get("scores")
data = r['load_dat'](datapath)
if B != "none":
Cs = list()
for i in range(B):
bsample = data.rx(r["sample"](data.nrow, data.nrow, replace=True), True)
Cs.append(bnlearn("hc", K, bsample))
C = _most_freq_candidates(K, Cs)
else:
C = bnlearn("hc", K, data)
if fill != "none":
C = _adjust_number_candidates(K, C, fill, scores=scores)
return C
def bnlearn(method, K, data, **kwargs):
init_r()
R_bnlearn = importr('bnlearn')
n = data.ncol
C = dict({v: list() for v in range(n)})
if method == 'mb':
bn = R_bnlearn.iamb(data, alpha=kwargs["alpha"], max_sx=kwargs["max_sx"])
if method == 'pc':
bn = R_bnlearn.pc_stable(data, alpha=kwargs["alpha"], max_sx=kwargs["max_sx"])
if method == 'hc':
# Uses BIC by default
bn = R_bnlearn.hc(data)
for v in range(n):
if method == 'mb':
mb = [int(u) for u in bn.rx2('nodes').rx2(str(v)).rx2('mb')]
for u in mb:
if u not in C[v]:
C[v].append(u)
if method == 'pc':
nbr = [int(u) for u in bn.rx2('nodes').rx2(str(v)).rx2('nbr')]
children = [int(u) for u in bn.rx2('nodes').rx2(str(v)).rx2('children')]
for u in nbr:
if u in children:
continue
if u not in C[v]:
C[v].append(u)
if method == 'hc':
pset = [int(u) for u in bn.rx2('nodes').rx2(str(v)).rx2('parents')]
for u in pset:
if u not in C[v]:
C[v].append(u)
for v in C:
C[v] = tuple(sorted(C[v]))
return C
def opt(K, **kwargs):
scores = kwargs.get("scores")
n = kwargs.get("n")
C = np.array([[v for v in range(n) if v != u] for u in range(n)], dtype=np.int32)
pset_posteriors = aps(scores.all_candidate_restricted_scores(C),
as_dict=True, normalize=True)
C = dict()
for v in pset_posteriors:
postsums = dict()
for candidate_set in subsets(set(pset_posteriors).difference({v}), K, K):
postsums[candidate_set] = np.logaddexp.reduce([pset_posteriors[v][pset]
| |
heightmap_lerp_hm(
hm1: np.ndarray, hm2: np.ndarray, hm3: np.ndarray, coef: float
) -> None:
"""Perform linear interpolation between two heightmaps storing the result
in ``hm3``.
This is the same as doing ``hm3[:] = hm1[:] + (hm2[:] - hm1[:]) * coef``
Args:
hm1 (numpy.ndarray): The first heightmap.
hm2 (numpy.ndarray): The second heightmap to add to the first.
hm3 (numpy.ndarray): A destination heightmap to store the result.
coef (float): The linear interpolation coefficient.
"""
lib.TCOD_heightmap_lerp_hm(
_heightmap_cdata(hm1),
_heightmap_cdata(hm2),
_heightmap_cdata(hm3),
coef,
)
@deprecate("Add 2 arrays using `hm3 = hm1 + hm2`")
def heightmap_add_hm(
hm1: np.ndarray, hm2: np.ndarray, hm3: np.ndarray
) -> None:
"""Add two heightmaps together and stores the result in ``hm3``.
Args:
hm1 (numpy.ndarray): The first heightmap.
hm2 (numpy.ndarray): The second heightmap to add to the first.
hm3 (numpy.ndarray): A destination heightmap to store the result.
.. deprecated:: 2.0
Do ``hm3[:] = hm1[:] + hm2[:]`` instead.
"""
hm3[:] = hm1[:] + hm2[:]
@deprecate("Multiply 2 arrays using `hm3 = hm1 * hm2`")
def heightmap_multiply_hm(
hm1: np.ndarray, hm2: np.ndarray, hm3: np.ndarray
) -> None:
"""Multiplies two heightmap's together and stores the result in ``hm3``.
Args:
hm1 (numpy.ndarray): The first heightmap.
hm2 (numpy.ndarray): The second heightmap to multiply with the first.
hm3 (numpy.ndarray): A destination heightmap to store the result.
.. deprecated:: 2.0
Do ``hm3[:] = hm1[:] * hm2[:]`` instead.
Alternatively you can do ``HeightMap(hm1.array[:] * hm2.array[:])``.
"""
hm3[:] = hm1[:] * hm2[:]
@pending_deprecate()
def heightmap_add_hill(
hm: np.ndarray, x: float, y: float, radius: float, height: float
) -> None:
"""Add a hill (a half spheroid) at given position.
If height == radius or -radius, the hill is a half-sphere.
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
x (float): The x position at the center of the new hill.
y (float): The y position at the center of the new hill.
radius (float): The size of the new hill.
height (float): The height or depth of the new hill.
"""
lib.TCOD_heightmap_add_hill(_heightmap_cdata(hm), x, y, radius, height)
@pending_deprecate()
def heightmap_dig_hill(
hm: np.ndarray, x: float, y: float, radius: float, height: float
) -> None:
"""
This function takes the highest value (if height > 0) or the lowest
(if height < 0) between the map and the hill.
It's main goal is to carve things in maps (like rivers) by digging hills
along a curve.
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
x (float): The x position at the center of the new carving.
y (float): The y position at the center of the new carving.
radius (float): The size of the carving.
height (float): The height or depth of the hill to dig out.
"""
lib.TCOD_heightmap_dig_hill(_heightmap_cdata(hm), x, y, radius, height)
@pending_deprecate()
def heightmap_rain_erosion(
hm: np.ndarray,
nbDrops: int,
erosionCoef: float,
sedimentationCoef: float,
rnd: Optional[tcod.random.Random] = None,
) -> None:
"""Simulate the effect of rain drops on the terrain, resulting in erosion.
``nbDrops`` should be at least hm.size.
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
nbDrops (int): Number of rain drops to simulate.
erosionCoef (float): Amount of ground eroded on the drop's path.
sedimentationCoef (float): Amount of ground deposited when the drops
stops to flow.
rnd (Optional[Random]): A tcod.Random instance, or None.
"""
lib.TCOD_heightmap_rain_erosion(
_heightmap_cdata(hm),
nbDrops,
erosionCoef,
sedimentationCoef,
rnd.random_c if rnd else ffi.NULL,
)
@pending_deprecate()
def heightmap_kernel_transform(
hm: np.ndarray,
kernelsize: int,
dx: Sequence[int],
dy: Sequence[int],
weight: Sequence[float],
minLevel: float,
maxLevel: float,
) -> None:
"""Apply a generic transformation on the map, so that each resulting cell
value is the weighted sum of several neighbour cells.
This can be used to smooth/sharpen the map.
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
kernelsize (int): Should be set to the length of the parameters::
dx, dy, and weight.
dx (Sequence[int]): A sequence of x coorinates.
dy (Sequence[int]): A sequence of y coorinates.
weight (Sequence[float]): A sequence of kernelSize cells weight.
The value of each neighbour cell is scaled by
its corresponding weight
minLevel (float): No transformation will apply to cells
below this value.
maxLevel (float): No transformation will apply to cells
above this value.
See examples below for a simple horizontal smoothing kernel :
replace value(x,y) with
0.33*value(x-1,y) + 0.33*value(x,y) + 0.33*value(x+1,y).
To do this, you need a kernel of size 3
(the sum involves 3 surrounding cells).
The dx,dy array will contain:
* dx=-1, dy=0 for cell (x-1, y)
* dx=1, dy=0 for cell (x+1, y)
* dx=0, dy=0 for cell (x, y)
* The weight array will contain 0.33 for each cell.
Example:
>>> import numpy as np
>>> heightmap = np.zeros((3, 3), dtype=np.float32)
>>> heightmap[:,1] = 1
>>> dx = [-1, 1, 0]
>>> dy = [0, 0, 0]
>>> weight = [0.33, 0.33, 0.33]
>>> tcod.heightmap_kernel_transform(heightmap, 3, dx, dy, weight,
... 0.0, 1.0)
"""
cdx = ffi.new("int[]", dx)
cdy = ffi.new("int[]", dy)
cweight = ffi.new("float[]", weight)
lib.TCOD_heightmap_kernel_transform(
_heightmap_cdata(hm), kernelsize, cdx, cdy, cweight, minLevel, maxLevel
)
@pending_deprecate()
def heightmap_add_voronoi(
hm: np.ndarray,
nbPoints: Any,
nbCoef: int,
coef: Sequence[float],
rnd: Optional[tcod.random.Random] = None,
) -> None:
"""Add values from a Voronoi diagram to the heightmap.
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
nbPoints (Any): Number of Voronoi sites.
nbCoef (int): The diagram value is calculated from the nbCoef
closest sites.
coef (Sequence[float]): The distance to each site is scaled by the
corresponding coef.
Closest site : coef[0],
second closest site : coef[1], ...
rnd (Optional[Random]): A Random instance, or None.
"""
nbPoints = len(coef)
ccoef = ffi.new("float[]", coef)
lib.TCOD_heightmap_add_voronoi(
_heightmap_cdata(hm),
nbPoints,
nbCoef,
ccoef,
rnd.random_c if rnd else ffi.NULL,
)
@deprecate("Arrays of noise should be sampled using the tcod.noise module.")
def heightmap_add_fbm(
hm: np.ndarray,
noise: tcod.noise.Noise,
mulx: float,
muly: float,
addx: float,
addy: float,
octaves: float,
delta: float,
scale: float,
) -> None:
"""Add FBM noise to the heightmap.
The noise coordinate for each map cell is
`((x + addx) * mulx / width, (y + addy) * muly / height)`.
The value added to the heightmap is `delta + noise * scale`.
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
noise (Noise): A Noise instance.
mulx (float): Scaling of each x coordinate.
muly (float): Scaling of each y coordinate.
addx (float): Translation of each x coordinate.
addy (float): Translation of each y coordinate.
octaves (float): Number of octaves in the FBM sum.
delta (float): The value added to all heightmap cells.
scale (float): The noise value is scaled with this parameter.
.. deprecated:: 8.1
An equivalent array of noise samples can be taken using a method such
as :any:`Noise.sample_ogrid`.
"""
noise = noise.noise_c if noise is not None else ffi.NULL
lib.TCOD_heightmap_add_fbm(
_heightmap_cdata(hm),
noise,
mulx,
muly,
addx,
addy,
octaves,
delta,
scale,
)
@deprecate("Arrays of noise should be sampled using the tcod.noise module.")
def heightmap_scale_fbm(
hm: np.ndarray,
noise: tcod.noise.Noise,
mulx: float,
muly: float,
addx: float,
addy: float,
octaves: float,
delta: float,
scale: float,
) -> None:
"""Multiply the heighmap values with FBM noise.
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
noise (Noise): A Noise instance.
mulx (float): Scaling of each x coordinate.
muly (float): Scaling of each y coordinate.
addx (float): Translation of each x coordinate.
addy (float): Translation of each y coordinate.
octaves (float): Number of octaves in the FBM sum.
delta (float): The value added to all heightmap cells.
scale (float): The noise value is scaled with this parameter.
.. deprecated:: 8.1
An equivalent array of noise samples can be taken using a method such
as :any:`Noise.sample_ogrid`.
"""
noise = noise.noise_c if noise is not None else ffi.NULL
lib.TCOD_heightmap_scale_fbm(
_heightmap_cdata(hm),
noise,
mulx,
muly,
addx,
addy,
octaves,
delta,
scale,
)
@pending_deprecate()
def heightmap_dig_bezier(
hm: np.ndarray,
px: Tuple[int, int, int, int],
py: Tuple[int, int, int, int],
startRadius: float,
startDepth: float,
endRadius: float,
endDepth: float,
) -> None:
"""Carve a path along a cubic Bezier curve.
Both radius and depth can vary linearly along the path.
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
px (Sequence[int]): The 4 `x` coordinates of the Bezier curve.
py (Sequence[int]): The 4 `y` coordinates of the Bezier curve.
startRadius (float): The starting radius size.
startDepth (float): The starting depth.
| |
= True
compound_operations = ['UNION', 'UNION ALL']
field_overrides = {
'bool': 'BOOL',
'binary': 'BINARY',
'decimal': 'NUMERIC',
'double': 'DOUBLE PRECISION',
'float': 'FLOAT',
'primary_key': 'INTEGER AUTO_INCREMENT',
'text': 'LONGTEXT',
'uuid': 'VARCHAR(255)',
}
for_update = True
interpolation = '%s'
limit_max = 2 ** 64 - 1 # MySQL quirk
op_overrides = {
OP.LIKE: 'LIKE BINARY',
OP.ILIKE: 'LIKE',
OP.XOR: 'XOR',
}
quote_char = '`'
subquery_delete_same_table = False
upsert_sql = 'REPLACE INTO'
def _connect(self, database, **kwargs):
if not mysql:
raise ImproperlyConfigured('MySQLdb or PyMySQL must be installed.')
conn_kwargs = {
'charset': 'utf8',
'use_unicode': True,
}
conn_kwargs.update(kwargs)
if 'password' in conn_kwargs:
conn_kwargs['passwd'] = conn_kwargs.pop('password')
return mysql.connect(db=database, **conn_kwargs)
def get_tables(self, schema=None):
return [row for row, in self.execute_sql('SHOW TABLES')]
def get_indexes(self, table, schema=None):
cursor = self.execute_sql('SHOW INDEX FROM `%s`' % table)
unique = set()
indexes = {}
for row in cursor.fetchall():
if not row[1]:
unique.add(row[2])
indexes.setdefault(row[2], [])
indexes[row[2]].append(row[4])
return [IndexMetadata(name, None, indexes[name], name in unique, table)
for name in indexes]
def get_columns(self, table, schema=None):
sql = """
SELECT column_name, is_nullable, data_type
FROM information_schema.columns
WHERE table_name = %s AND table_schema = DATABASE()"""
cursor = self.execute_sql(sql, (table,))
pks = set(self.get_primary_keys(table))
return [ColumnMetadata(name, dt, null == 'YES', name in pks, table)
for name, null, dt in cursor.fetchall()]
def get_primary_keys(self, table, schema=None):
cursor = self.execute_sql('SHOW INDEX FROM `%s`' % table)
return [row[4] for row in cursor.fetchall() if row[2] == 'PRIMARY']
def get_foreign_keys(self, table, schema=None):
query = """
SELECT column_name, referenced_table_name, referenced_column_name
FROM information_schema.key_column_usage
WHERE table_name = %s
AND table_schema = DATABASE()
AND referenced_table_name IS NOT NULL
AND referenced_column_name IS NOT NULL"""
cursor = self.execute_sql(query, (table,))
return [
ForeignKeyMetadata(column, dest_table, dest_column, table)
for column, dest_table, dest_column in cursor.fetchall()]
def extract_date(self, date_part, date_field):
return fn.EXTRACT(Clause(R(date_part), R('FROM'), date_field))
def truncate_date(self, date_part, date_field):
return fn.DATE_FORMAT(date_field, MYSQL_DATE_TRUNC_MAPPING[date_part])
def default_insert_clause(self, model_class):
return Clause(
EnclosedClause(model_class._meta.primary_key),
SQL('VALUES (DEFAULT)'))
class _callable_context_manager(object):
def __call__(self, fn):
@wraps(fn)
def inner(*args, **kwargs):
with self:
return fn(*args, **kwargs)
return inner
class ExecutionContext(_callable_context_manager):
def __init__(self, database, with_transaction=True):
self.database = database
self.with_transaction = with_transaction
self.connection = None
def __enter__(self):
with self.database._conn_lock:
self.database.push_execution_context(self)
self.connection = self.database._connect(
self.database.database,
**self.database.connect_kwargs)
if self.with_transaction:
self.txn = self.database.transaction()
self.txn.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
with self.database._conn_lock:
if self.connection is None:
self.database.pop_execution_context()
else:
try:
if self.with_transaction:
if not exc_type:
self.txn.commit(False)
self.txn.__exit__(exc_type, exc_val, exc_tb)
finally:
self.database.pop_execution_context()
self.database._close(self.connection)
class Using(ExecutionContext):
def __init__(self, database, models, with_transaction=True):
super(Using, self).__init__(database, with_transaction)
self.models = models
def __enter__(self):
self._orig = []
for model in self.models:
self._orig.append(model._meta.database)
model._meta.database = self.database
return super(Using, self).__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
super(Using, self).__exit__(exc_type, exc_val, exc_tb)
for i, model in enumerate(self.models):
model._meta.database = self._orig[i]
class _atomic(_callable_context_manager):
def __init__(self, db):
self.db = db
def __enter__(self):
if self.db.transaction_depth() == 0:
self._helper = self.db.transaction()
else:
self._helper = self.db.savepoint()
return self._helper.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
return self._helper.__exit__(exc_type, exc_val, exc_tb)
class transaction(_callable_context_manager):
def __init__(self, db):
self.db = db
def _begin(self):
self.db.begin()
def commit(self, begin=True):
self.db.commit()
if begin:
self._begin()
def rollback(self, begin=True):
self.db.rollback()
if begin:
self._begin()
def __enter__(self):
self._orig = self.db.get_autocommit()
self.db.set_autocommit(False)
if self.db.transaction_depth() == 0:
self._begin()
self.db.push_transaction(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
try:
if exc_type:
self.rollback(False)
elif self.db.transaction_depth() == 1:
try:
self.commit(False)
except:
self.rollback(False)
raise
finally:
self.db.set_autocommit(self._orig)
self.db.pop_transaction()
class savepoint(_callable_context_manager):
def __init__(self, db, sid=None):
self.db = db
_compiler = db.compiler()
self.sid = sid or 's' + uuid.uuid4().hex
self.quoted_sid = _compiler.quote(self.sid)
def _execute(self, query):
self.db.execute_sql(query, require_commit=False)
def commit(self):
self._execute('RELEASE SAVEPOINT %s;' % self.quoted_sid)
def rollback(self):
self._execute('ROLLBACK TO SAVEPOINT %s;' % self.quoted_sid)
def __enter__(self):
self._orig_autocommit = self.db.get_autocommit()
self.db.set_autocommit(False)
self._execute('SAVEPOINT %s;' % self.quoted_sid)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
try:
if exc_type:
self.rollback()
else:
try:
self.commit()
except:
self.rollback()
raise
finally:
self.db.set_autocommit(self._orig_autocommit)
class savepoint_sqlite(savepoint):
def __enter__(self):
conn = self.db.get_conn()
# For sqlite, the connection's isolation_level *must* be set to None.
# The act of setting it, though, will break any existing savepoints,
# so only write to it if necessary.
if conn.isolation_level is not None:
self._orig_isolation_level = conn.isolation_level
conn.isolation_level = None
else:
self._orig_isolation_level = None
return super(savepoint_sqlite, self).__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
try:
return super(savepoint_sqlite, self).__exit__(
exc_type, exc_val, exc_tb)
finally:
if self._orig_isolation_level is not None:
self.db.get_conn().isolation_level = self._orig_isolation_level
class FieldProxy(Field):
def __init__(self, alias, field_instance):
self._model_alias = alias
self.model = self._model_alias.model_class
self.field_instance = field_instance
def clone_base(self):
return FieldProxy(self._model_alias, self.field_instance)
def coerce(self, value):
return self.field_instance.coerce(value)
def python_value(self, value):
return self.field_instance.python_value(value)
def db_value(self, value):
return self.field_instance.db_value(value)
def __getattr__(self, attr):
if attr == 'model_class':
return self._model_alias
return getattr(self.field_instance, attr)
class ModelAlias(object):
def __init__(self, model_class):
self.__dict__['model_class'] = model_class
def __getattr__(self, attr):
model_attr = getattr(self.model_class, attr)
if isinstance(model_attr, Field):
return FieldProxy(self, model_attr)
return model_attr
def __setattr__(self, attr, value):
raise AttributeError('Cannot set attributes on ModelAlias instances')
def get_proxy_fields(self):
return [
FieldProxy(self, f) for f in self.model_class._meta.sorted_fields]
def select(self, *selection):
if not selection:
selection = self.get_proxy_fields()
query = SelectQuery(self, *selection)
if self._meta.order_by:
query = query.order_by(*self._meta.order_by)
return query
def __call__(self, **kwargs):
return self.model_class(**kwargs)
if _SortedFieldList is None:
class _SortedFieldList(object):
__slots__ = ('_keys', '_items')
def __init__(self):
self._keys = []
self._items = []
def __getitem__(self, i):
return self._items[i]
def __iter__(self):
return iter(self._items)
def __contains__(self, item):
k = item._sort_key
i = bisect_left(self._keys, k)
j = bisect_right(self._keys, k)
return item in self._items[i:j]
def index(self, field):
return self._keys.index(field._sort_key)
def insert(self, item):
k = item._sort_key
i = bisect_left(self._keys, k)
self._keys.insert(i, k)
self._items.insert(i, item)
def remove(self, item):
idx = self.index(item)
del self._items[idx]
del self._keys[idx]
class DoesNotExist(Exception): pass
if sqlite3:
default_database = SqliteDatabase('peewee.db')
else:
default_database = None
class ModelOptions(object):
def __init__(self, cls, database=None, db_table=None, db_table_func=None,
indexes=None, order_by=None, primary_key=None,
table_alias=None, constraints=None, schema=None,
validate_backrefs=True, only_save_dirty=False, **kwargs):
self.model_class = cls
self.name = cls.__name__.lower()
self.fields = {}
self.columns = {}
self.defaults = {}
self._default_by_name = {}
self._default_dict = {}
self._default_callables = {}
self._default_callable_list = []
self._sorted_field_list = _SortedFieldList()
self.sorted_fields = []
self.sorted_field_names = []
self.database = database or default_database
self.db_table = db_table
self.db_table_func = db_table_func
self.indexes = list(indexes or [])
self.order_by = order_by
self.primary_key = primary_key
self.table_alias = table_alias
self.constraints = constraints
self.schema = schema
self.validate_backrefs = validate_backrefs
self.only_save_dirty = only_save_dirty
self.auto_increment = None
self.composite_key = False
self.rel = {}
self.reverse_rel = {}
for key, value in kwargs.items():
setattr(self, key, value)
self._additional_keys = set(kwargs.keys())
if self.db_table_func and not self.db_table:
self.db_table = self.db_table_func(cls)
def prepared(self):
if self.order_by:
norm_order_by = []
for item in self.order_by:
if isinstance(item, Field):
prefix = '-' if item._ordering == 'DESC' else ''
item = prefix + item.name
field = self.fields[item.lstrip('-')]
if item.startswith('-'):
norm_order_by.append(field.desc())
else:
norm_order_by.append(field.asc())
self.order_by = norm_order_by
def add_field(self, field):
self.remove_field(field.name)
self.fields[field.name] = field
self.columns[field.db_column] = field
self._sorted_field_list.insert(field)
self.sorted_fields = list(self._sorted_field_list)
self.sorted_field_names = [f.name for f in self.sorted_fields]
if field.default is not None:
self.defaults[field] = field.default
if callable(field.default):
self._default_callables[field] = field.default
self._default_callable_list.append((field.name, field.default))
else:
self._default_dict[field] = field.default
self._default_by_name[field.name] = field.default
def remove_field(self, field_name):
if field_name not in self.fields:
return
original = self.fields.pop(field_name)
del self.columns[original.db_column]
self._sorted_field_list.remove(original)
self.sorted_fields = list(self._sorted_field_list)
self.sorted_field_names = [f.name for f in self.sorted_fields]
if original.default is not None:
del self.defaults[original]
if self._default_callables.pop(original, None):
for i, (name, _) in enumerate(self._default_callable_list):
if name == field_name:
self._default_callable_list.pop(i)
break
else:
self._default_dict.pop(original, None)
self._default_by_name.pop(original.name, None)
def get_default_dict(self):
dd = self._default_by_name.copy()
for field_name, default in self._default_callable_list:
dd[field_name] = default()
return dd
def get_field_index(self, field):
try:
return self._sorted_field_list.index(field)
except ValueError:
return -1
def get_primary_key_fields(self):
if self.composite_key:
return [
self.fields[field_name]
for field_name in self.primary_key.field_names]
return [self.primary_key]
def rel_for_model(self, model, field_obj=None, multi=False):
is_field = isinstance(field_obj, Field)
is_node = not is_field and isinstance(field_obj, Node)
if multi:
accum = []
for field in self.sorted_fields:
if isinstance(field, ForeignKeyField) and field.rel_model == model:
is_match = (
(field_obj is None) or
(is_field and field_obj.name == field.name) or
(is_node and field_obj._alias == field.name))
if is_match:
if not multi:
return field
accum.append(field)
if multi:
return accum
def reverse_rel_for_model(self, model, field_obj=None, multi=False):
return model._meta.rel_for_model(self.model_class, field_obj, multi)
def rel_exists(self, model):
return self.rel_for_model(model) or self.reverse_rel_for_model(model)
def related_models(self, backrefs=False):
models = []
stack = [self.model_class]
while stack:
model = stack.pop()
if model in models:
continue
models.append(model)
for fk in model._meta.rel.values():
stack.append(fk.rel_model)
if backrefs:
for fk in model._meta.reverse_rel.values():
stack.append(fk.model_class)
return models
class BaseModel(type):
inheritable = set([
'constraints', 'database', 'db_table_func', 'indexes', 'order_by',
'primary_key', 'schema', 'validate_backrefs', 'only_save_dirty'])
def __new__(cls, name, bases, attrs):
if not bases:
return super(BaseModel, cls).__new__(cls, name, bases, attrs)
meta_options = {}
meta = attrs.pop('Meta', None)
if meta:
for k, v in | |
"""The module for training ENAS."""
import contextlib
import glob
import math
import os
import numpy as np
import scipy.signal
from tensorboard import TensorBoard
import torch
from torch import nn
import torch.nn.parallel
from torch.autograd import Variable
import models
import utils
logger = utils.get_logger()
def _apply_penalties(extra_out, args):
"""Based on `args`, optionally adds regularization penalty terms for
activation regularization, temporal activation regularization and/or hidden
state norm stabilization.
Args:
extra_out[*]:
dropped: Post-dropout activations.
hiddens: All hidden states for a batch of sequences.
raw: Pre-dropout activations.
Returns:
The penalty term associated with all of the enabled regularizations.
See:
Regularizing and Optimizing LSTM Language Models (Merity et al., 2017)
Regularizing RNNs by Stabilizing Activations (Krueger & Memsevic, 2016)
"""
penalty = 0
# Activation regularization.
if args.activation_regularization:
penalty += (args.activation_regularization_amount *
extra_out['dropped'].pow(2).mean())
# Temporal activation regularization (slowness)
if args.temporal_activation_regularization:
raw = extra_out['raw']
penalty += (args.temporal_activation_regularization_amount *
(raw[1:] - raw[:-1]).pow(2).mean())
# Norm stabilizer regularization
if args.norm_stabilizer_regularization:
penalty += (args.norm_stabilizer_regularization_amount *
(extra_out['hiddens'].norm(dim=-1) -
args.norm_stabilizer_fixed_point).pow(2).mean())
return penalty
def discount(x, amount):
return scipy.signal.lfilter([1], [1, -amount], x[::-1], axis=0)[::-1]
def _get_optimizer(name):
if name.lower() == 'sgd':
optim = torch.optim.SGD
elif name.lower() == 'adam':
optim = torch.optim.Adam
return optim
def _get_no_grad_ctx_mgr():
"""Returns a the `torch.no_grad` context manager for PyTorch version >=
0.4, or a no-op context manager otherwise.
"""
if float(torch.__version__[0:3]) >= 0.4:
return torch.no_grad()
return contextlib.suppress()
def _check_abs_max_grad(abs_max_grad, model):
"""Checks `model` for a new largest gradient for this epoch, in order to
track gradient explosions.
"""
finite_grads = [p.grad.data
for p in model.parameters()
if p.grad is not None]
new_max_grad = max([grad.max() for grad in finite_grads])
new_min_grad = min([grad.min() for grad in finite_grads])
new_abs_max_grad = max(new_max_grad, abs(new_min_grad))
if new_abs_max_grad > abs_max_grad:
logger.info(f'abs max grad {abs_max_grad}')
return new_abs_max_grad
return abs_max_grad
class Trainer(object):
"""A class to wrap training code."""
def __init__(self, args, dataset):
"""Constructor for training algorithm.
Args:
args: From command line, picked up by `argparse`.
dataset: Currently only `data.text.Corpus` is supported.
Initializes:
- Data: train, val and test.
- Model: shared and controller.
- Inference: optimizers for shared and controller parameters.
- Criticism: cross-entropy loss for training the shared model.
"""
self.args = args
self.controller_step = 0
self.cuda = args.cuda
self.dataset = dataset
self.epoch = 0
self.shared_step = 0
self.start_epoch = 0
logger.info('regularizing:')
for regularizer in [('activation regularization',
self.args.activation_regularization),
('temporal activation regularization',
self.args.temporal_activation_regularization),
('norm stabilizer regularization',
self.args.norm_stabilizer_regularization)]:
if regularizer[1]:
logger.info(f'{regularizer[0]}')
self.train_data = utils.batchify(dataset.train,
args.batch_size,
self.cuda)
# NOTE(brendan): The validation set data is batchified twice
# separately: once for computing rewards during the Train Controller
# phase (valid_data, batch size == 64), and once for evaluating ppl
# over the entire validation set (eval_data, batch size == 1)
self.valid_data = utils.batchify(dataset.valid,
args.batch_size,
self.cuda)
self.eval_data = utils.batchify(dataset.valid,
args.test_batch_size,
self.cuda)
self.test_data = utils.batchify(dataset.test,
args.test_batch_size,
self.cuda)
self.max_length = self.args.shared_rnn_max_length
if args.use_tensorboard:
self.tb = TensorBoard(args.model_dir)
else:
self.tb = None
self.build_model()
if self.args.load_path:
self.load_model()
shared_optimizer = _get_optimizer(self.args.shared_optim)
controller_optimizer = _get_optimizer(self.args.controller_optim)
self.shared_optim = shared_optimizer(
self.shared.parameters(),
lr=self.shared_lr,
weight_decay=self.args.shared_l2_reg)
self.controller_optim = controller_optimizer(
self.controller.parameters(),
lr=self.args.controller_lr)
self.ce = nn.CrossEntropyLoss()
def build_model(self):
"""Creates and initializes the shared and controller models."""
if self.args.network_type == 'rnn':
self.shared = models.RNN(self.args, self.dataset)
elif self.args.network_type == 'cnn':
self.shared = models.CNN(self.args, self.dataset)
else:
raise NotImplementedError(f'Network type '
f'`{self.args.network_type}` is not '
f'defined')
self.controller = models.Controller(self.args)
if self.args.num_gpu == 1:
self.shared.cuda()
self.controller.cuda()
elif self.args.num_gpu > 1:
raise NotImplementedError('`num_gpu > 1` is in progress')
def train(self, single=False):
"""Cycles through alternately training the shared parameters and the
controller, as described in Section 2.2, Training ENAS and Deriving
Architectures, of the paper.
From the paper (for Penn Treebank):
- In the first phase, shared parameters omega are trained for 400
steps, each on a minibatch of 64 examples.
- In the second phase, the controller's parameters are trained for 2000
steps.
Args:
single (bool): If True it won't train the controller and use the
same dag instead of derive().
"""
dag = utils.load_dag(self.args) if single else None
if self.args.shared_initial_step > 0:
self.train_shared(self.args.shared_initial_step)
self.train_controller()
for self.epoch in range(self.start_epoch, self.args.max_epoch):
# 1. Training the shared parameters omega of the child models
self.train_shared(dag=dag)
# 2. Training the controller parameters theta
if not single:
self.train_controller()
if self.epoch % self.args.save_epoch == 0:
with _get_no_grad_ctx_mgr():
best_dag = dag if dag else self.derive()
self.evaluate(self.eval_data,
best_dag,
'val_best',
max_num=self.args.batch_size*100)
self.save_model()
if self.epoch >= self.args.shared_decay_after:
utils.update_lr(self.shared_optim, self.shared_lr)
def get_loss(self, inputs, targets, hidden, dags):
"""Computes the loss for the same batch for M models.
This amounts to an estimate of the loss, which is turned into an
estimate for the gradients of the shared model.
"""
if not isinstance(dags, list):
dags = [dags]
loss = 0
for dag in dags:
output, hidden, extra_out = self.shared(inputs, dag, hidden=hidden)
output_flat = output.view(-1, self.dataset.num_tokens)
sample_loss = (self.ce(output_flat, targets) /
self.args.shared_num_sample)
loss += sample_loss
assert len(dags) == 1, 'there are multiple `hidden` for multple `dags`'
return loss, hidden, extra_out
def train_shared(self, max_step=None, dag=None):
"""Train the language model for 400 steps of minibatches of 64
examples.
Args:
max_step: Used to run extra training steps as a warm-up.
dag: If not None, is used instead of calling sample().
BPTT is truncated at 35 timesteps.
For each weight update, gradients are estimated by sampling M models
from the fixed controller policy, and averaging their gradients
computed on a batch of training data.
"""
model = self.shared
model.train()
self.controller.eval()
hidden = self.shared.init_hidden(self.args.batch_size)
if max_step is None:
max_step = self.args.shared_max_step
else:
max_step = min(self.args.shared_max_step, max_step)
abs_max_grad = 0
abs_max_hidden_norm = 0
step = 0
raw_total_loss = 0
total_loss = 0
train_idx = 0
# TODO(brendan): Why - 1 - 1?
while train_idx < self.train_data.size(0) - 1 - 1:
if step > max_step:
break
dags = dag if dag else self.controller.sample(
self.args.shared_num_sample)
inputs, targets = self.get_batch(self.train_data,
train_idx,
self.max_length)
loss, hidden, extra_out = self.get_loss(inputs,
targets,
hidden,
dags)
hidden.detach_()
raw_total_loss += loss.data
loss += _apply_penalties(extra_out, self.args)
# update
self.shared_optim.zero_grad()
loss.backward()
h1tohT = extra_out['hiddens']
new_abs_max_hidden_norm = utils.to_item(
h1tohT.norm(dim=-1).data.max())
if new_abs_max_hidden_norm > abs_max_hidden_norm:
abs_max_hidden_norm = new_abs_max_hidden_norm
logger.info(f'max hidden {abs_max_hidden_norm}')
abs_max_grad = _check_abs_max_grad(abs_max_grad, model)
torch.nn.utils.clip_grad_norm(model.parameters(),
self.args.shared_grad_clip)
self.shared_optim.step()
total_loss += loss.data
if ((step % self.args.log_step) == 0) and (step > 0):
self._summarize_shared_train(total_loss, raw_total_loss)
raw_total_loss = 0
total_loss = 0
step += 1
self.shared_step += 1
train_idx += self.max_length
def get_reward(self, dag, entropies, hidden, valid_idx=0):
"""Computes the perplexity of a single sampled model on a minibatch of
validation data.
"""
if not isinstance(entropies, np.ndarray):
entropies = entropies.data.cpu().numpy()
inputs, targets = self.get_batch(self.valid_data,
valid_idx,
self.max_length,
volatile=True)
valid_loss, hidden, _ = self.get_loss(inputs, targets, hidden, dag)
valid_loss = utils.to_item(valid_loss.data)
valid_ppl = math.exp(valid_loss)
# TODO: we don't know reward_c
if self.args.ppl_square:
# TODO: but we do know reward_c=80 in the previous paper
R = self.args.reward_c / valid_ppl ** 2
else:
R = self.args.reward_c / valid_ppl
if self.args.entropy_mode == 'reward':
rewards = R + self.args.entropy_coeff * entropies
elif self.args.entropy_mode == 'regularizer':
rewards = R * np.ones_like(entropies)
else:
raise NotImplementedError(f'Unkown entropy mode: {self.args.entropy_mode}')
return rewards, hidden
def train_controller(self):
"""Fixes the shared parameters and updates the controller parameters.
The controller is updated with a score function gradient estimator
(i.e., REINFORCE), with the reward being c/valid_ppl, where valid_ppl
is computed on a minibatch of validation data.
A moving average baseline is used.
The controller is trained for 2000 steps per epoch (i.e.,
first (Train Shared) phase -> second (Train Controller) phase).
"""
model = self.controller
model.train()
# TODO(brendan): Why can't we call shared.eval() here? Leads to loss
# being uniformly zero for the controller.
# self.shared.eval()
avg_reward_base = None
baseline = None
adv_history = []
entropy_history = []
reward_history = []
hidden = self.shared.init_hidden(self.args.batch_size)
total_loss = 0
valid_idx = 0
for step in range(self.args.controller_max_step):
# sample models
dags, log_probs, entropies = self.controller.sample(
with_details=True)
# calculate reward
np_entropies = entropies.data.cpu().numpy()
# NOTE(brendan): No gradients should be backpropagated to the
# shared model during controller training, obviously.
with _get_no_grad_ctx_mgr():
rewards, hidden = self.get_reward(dags,
np_entropies,
hidden,
valid_idx)
# discount
if 1 > self.args.discount > 0:
rewards = discount(rewards, self.args.discount)
reward_history.extend(rewards)
entropy_history.extend(np_entropies)
# moving average baseline
if baseline is None:
baseline = rewards
else:
decay = self.args.ema_baseline_decay
baseline = decay * baseline + (1 - decay) * rewards
adv = rewards - baseline
adv_history.extend(adv)
# policy | |
<gh_stars>1-10
import json
import logging
import os
import psutil
import time
from dcicutils.misc_utils import environ_bool, PRINT, ignored
from functools import lru_cache
from pkg_resources import resource_filename
from pyramid.events import BeforeRender, subscriber
from pyramid.httpexceptions import (
HTTPMovedPermanently,
HTTPPreconditionFailed,
HTTPUnauthorized,
HTTPUnsupportedMediaType,
HTTPNotAcceptable,
HTTPServerError
)
from pyramid.response import Response
from pyramid.settings import asbool
from pyramid.threadlocal import manager
from pyramid.traversal import split_path_info, _join_path_tuple
from subprocess_middleware.worker import TransformWorker
from urllib.parse import urlencode, urlparse
from webob.cookies import Cookie
from .util import content_type_allowed
log = logging.getLogger(__name__)
def includeme(config):
"""
Can get tween ordering by executing the following on command-line from root dir:
`bin/ptween development.ini`
We could alternatively put these in the base.ini file explicitly.
See: https://docs.pylonsproject.org/projects/pyramid/en/latest/narr/hooks.html#registering-tweens
--- IMPORTANT ---
The `handler` arg of 'tween factory' functions refers to the subsequent tween to be called.
This means that if handler(request) is called, then the downstream tweens are acted upon it,
until response is returned. It's an ONION depending on where handler(request) called within a tween
and NOT necessarily an explicit ordering (unless `return handler(request)` is last line of each tween).
A request goes down the tween chain from INGRESS to MAIN and then back up to INGRESS.
`handler(request)` calls the subsequent tween and returns complete tweened-from-there response.
Tween Chain as of 05/23/2019:
Position Name
-------- ----
- INGRESS
0 snovault.stats.stats_tween_factory
1 .renderers.validate_request_tween_factory
2 .renderers.render_page_html_tween_factory
3 .renderers.set_response_headers_tween_factory
4 pyramid_tm.tm_tween_factory
5 .renderers.security_tween_factory
6 pyramid.tweens.excview_tween_factory
- MAIN
The `handler` kwarg of tween factories refers to the subsequent tween to be called.
This means that if handler(request) is called, then the downstream tweens are acted upon it,
until response is returned. It's an ONION!
"""
config.add_tween('.renderers.validate_request_tween_factory', under='snovault.stats.stats_tween_factory')
# DISABLED - .add_tween('.renderers.remove_expired_session_cookies_tween_factory',
# under='.renderers.validate_request_tween_factory')
config.add_tween('.renderers.render_page_html_tween_factory', under='.renderers.validate_request_tween_factory')
# The above tweens, when using response (= `handler(request)`) act on the _transformed_ response
# (containing HTML body).
# The below tweens run _before_ the JS rendering. Responses in these tweens have not been transformed to HTML yet.
config.add_tween('.renderers.set_response_headers_tween_factory', under='.renderers.render_page_html_tween_factory')
# If this isn't placed under 'pyramid_tm.tm_tween_factory' (e.g. under resp headers or something)
# then the headers aren't preserved or available in server-side render or response.
config.add_tween('.renderers.security_tween_factory', under='pyramid_tm.tm_tween_factory')
config.scan(__name__)
def validate_request_tween_factory(handler, registry):
"""
Updates request.environ's REQUEST_METHOD to be X_REQUEST_METHOD if present.
Asserts that if a POST (or similar) request is in application/json format,
with exception for /metadata/* endpoints.
Apache config:
SetEnvIf Request_Method HEAD X_REQUEST_METHOD=HEAD
"""
ignored(registry)
def validate_request_tween(request):
# Fix Request method changed by mod_wsgi.
# See: https://github.com/GrahamDumpleton/mod_wsgi/issues/2
environ = request.environ
if 'X_REQUEST_METHOD' in environ:
environ['REQUEST_METHOD'] = environ['X_REQUEST_METHOD']
if request.method in ('GET', 'HEAD'):
# If GET request, don't need to check `request.content_type`
# Includes page text/html requests.
return handler(request)
elif content_type_allowed(request):
return handler(request)
else:
detail = "Request content type %s is not 'application/json'" % request.content_type
raise HTTPUnsupportedMediaType(detail)
return validate_request_tween
def security_tween_factory(handler, registry):
ignored(registry)
def security_tween(request):
"""
Executed inside/prior-to any page transforms and inside/prior-to
`pyramid_tm.tm_tween_factory` (transaction management tween).
This is because request.authenticated_userid as well as `request.user_info`
property getters _may_ access Postgres DB to get user properties (if not yet
indexed in ES) and all DB transactions must complete before transaction
management tween is completed.
"""
expected_user = request.headers.get('X-If-Match-User')
if expected_user is not None: # Not sure when this is the case
if request.authenticated_userid != 'mailto.' + expected_user:
detail = 'X-If-Match-User does not match'
raise HTTPPreconditionFailed(detail)
if request.authorization is not None or asbool(request.headers.get('X-Auth-Challenge', False)):
# wget may only send credentials following a challenge response.
if not request.authenticated_userid:
if not hasattr(request, 'auth0_expired'):
# Not a "Bearer" JWT token in Auth header. Or other error.
# We send a 401 "Unauthorized" exception if authentication issue or expiration.
# We send a 403 "Forbidden" (TODO: assert) if authorized correctly but no view permission
raise HTTPUnauthorized(
title="No Access",
comment="Invalid Authorization header or Auth Challenge response.",
headers={
'WWW-Authenticate': ("Bearer realm=\"{}\"; Basic realm=\"{}\""
.format(request.domain, request.domain))
}
)
if hasattr(request, 'auth0_expired'):
# Add some security-related headers on the up-swing
response = handler(request)
if request.auth0_expired:
# return response
#
# If have the attribute and it is true, then our session has expired.
# This is true for both AJAX requests (which have request.authorization) & browser page
# requests (which have cookie); both cases handled in authentication.py
# Informs client or libs/react-middleware.js serverside render of expired token
# to set logged-out state in front-end in either doc request or xhr request & set appropriate alerts
response.headers['X-Request-JWT'] = "expired"
# Especially for initial document requests by browser, but also desired for AJAX and other requests,
# unset jwtToken cookie so initial client-side React render has App(instance).state.session = false
# to be synced w/ server-side
response.set_cookie(
name='jwtToken',
value=None,
domain=request.domain,
max_age=0,
path='/',
overwrite=True
)
# = Same as response.delete_cookie(..)
response.status_code = 401
response.headers['WWW-Authenticate'] = (
"Bearer realm=\"{}\", title=\"Session Expired\"; Basic realm=\"{}\""
.format(request.domain, request.domain)
)
else:
# We have JWT and it's not expired. Add 'X-Request-JWT' & 'X-User-Info' header.
# For performance, only do it if should transform to HTML as is not needed on every request.
if should_transform(request, response):
login = request.authenticated_userid
if login:
authtype, email = login.split('.', 1)
if authtype == 'auth0':
# This header is parsed in renderer.js, or, more accurately,
# by libs/react-middleware.js which is imported by server.js and compiled into
# renderer.js. Is used to get access to User Info on initial web page render.
response.headers['X-Request-JWT'] = request.cookies.get('jwtToken', '')
user_info = request.user_info.copy() # Re-ified property set in authentication.py
response.headers['X-User-Info'] = json.dumps(user_info)
else:
response.headers['X-Request-JWT'] = "null"
return response
return handler(request)
# This was commented out when we introduced JWT authentication
# Theoretically we mitigate CSRF requests now by grabbing JWT for transactional
# requests from Authorization header which acts like a CSRF token.
# See authentication.py - get_jwt()
# Alex notes that we do not use request.session so this is probably very old. -kmp 4-Mar-2021
# token = request.headers.get('X-CSRF-Token')
# if token is not None:
# # Avoid dirtying the session and adding a Set-Cookie header
# # XXX Should consider if this is a good idea or not and timeouts
# if token == dict.get(request.session, '_csrft_', None):
# return handler(request)
# raise CSRFTokenError('Incorrect CSRF token')
# raise CSRFTokenError('Missing CSRF token')
return security_tween
def remove_expired_session_cookies_tween_factory(handler, registry):
"""
CURRENTLY DISABLED
Original purpose of this was to remove expired (session?) cookies.
See: https://github.com/ENCODE-DCC/encoded/commit/75854803c99e5044a6a33aedb3a79d750481b6cd#diff-bc19a9793a1b3b4870cff50e7c7c9bd1R135
We disable it for now via removing from tween chain as are using JWT tokens and handling
their removal in security_tween_factory & authentication.py as well as client-side
(upon "Logout" action). If needed for some reason, can re-enable.
""" # noQA - not going to break the long URL line above
ignored(registry)
ignore = {
'/favicon.ico',
}
def remove_expired_session_cookies_tween(request):
if request.path in ignore or request.path.startswith('/static/'):
return handler(request)
session = request.session
# if session or session._cookie_name not in request.cookies:
# return handler(request)
response = handler(request)
# Below seems to be empty always; though we do have some in request.cookies
existing = response.headers.getall('Set-Cookie')
if existing:
cookies = Cookie()
for header in existing:
cookies.load(header)
if session._cookie_name in cookies:
return response
response.delete_cookie(
session._cookie_name,
path=session._cookie_path,
domain=session._cookie_domain,
)
return response
return remove_expired_session_cookies_tween
def set_response_headers_tween_factory(handler, registry):
"""Add additional response headers here"""
ignored(registry)
def set_response_headers_tween(request):
response = handler(request)
response.headers['X-Request-URL'] = request.url
# Setter automatically converts set back to tuple.
# See: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Vary
response.vary = set((response.vary or ()) + ('Accept', 'Authorization'))
return response
return set_response_headers_tween
@subscriber(BeforeRender)
def canonical_redirect(event):
request = event['request']
# Ignore subrequests
if len(manager.stack) > 1:
return
if request.method not in ('GET', 'HEAD'):
return
if request.response.status_int != 200:
return
if not request.environ.get('encoded.canonical_redirect', True):
return
if request.path_info == '/':
return
if not isinstance(event.rendering_val, dict):
return
canonical = event.rendering_val.get('@id', None)
if canonical is None:
return
canonical_path, _, canonical_qs = canonical.partition('?')
request_path = _join_path_tuple(('',) + split_path_info(request.path_info))
if (request_path == canonical_path.rstrip('/') and
request.path_info.endswith('/') == canonical_path.endswith('/') and
(canonical_qs in ('', request.query_string))):
return
if '/@@' in request.path_info:
return
qs = canonical_qs or request.query_string
# add redirect information to the query string, but not for the routes specified below
if not any(route in canonical_path for route in ['/search/', '/browse/', '/metadata/']):
redir_qs = (qs + '&' if qs else '') + urlencode([('redirected_from', request.path_info)])
else:
redir_qs = qs
location = canonical_path + ('?' if redir_qs else '') + redir_qs
raise HTTPMovedPermanently(location=location, | |
perpendicular to edge
if ((state.constrainedDir == 'y') and (abs(base.direct.dr.mouseX) > 0.9)):
deltaX = 0
deltaY = base.direct.dr.mouseDeltaY
elif ((state.constrainedDir == 'x') and (abs(base.direct.dr.mouseY) > 0.9)):
deltaX = base.direct.dr.mouseDeltaX
deltaY = 0
else:
deltaX = base.direct.dr.mouseDeltaX
deltaY = base.direct.dr.mouseDeltaY
if base.direct.fShift:
base.direct.camera.setHpr(base.direct.camera,
(deltaX * base.direct.dr.fovH),
(-deltaY * base.direct.dr.fovV),
0.0)
if (self.lockRoll == True):
# flatten roll
base.direct.camera.setR(0)
self.camManipRef.setPos(self.coaMarkerPos)
self.camManipRef.setHpr(base.direct.camera, ZERO_POINT)
else:
if base.direct.camera.getPos().getZ() >=0 or not self.switchDirBelowZero:
dirX = -1
else:
dirX = 1
wrt = base.direct.camera.getTransform(self.camManipRef)
self.camManipRef.setHpr(self.camManipRef,
(dirX * deltaX * 180.0),
(deltaY * 180.0),
0.0)
if (self.lockRoll == True):
# flatten roll
self.camManipRef.setR(0)
base.direct.camera.setTransform(self.camManipRef, wrt)
return Task.cont
def spawnMouseRollTask(self):
# Kill any existing tasks
taskMgr.remove('manipulateCamera')
# Set at markers position in render coordinates
self.camManipRef.setPos(self.coaMarkerPos)
self.camManipRef.setHpr(base.direct.camera, ZERO_POINT)
t = Task.Task(self.mouseRollTask)
t.coaCenter = getScreenXY(self.coaMarker)
t.lastAngle = getCrankAngle(t.coaCenter)
# Store the camera/manipRef offset transform
t.wrt = base.direct.camera.getTransform(self.camManipRef)
taskMgr.add(t, 'manipulateCamera')
def mouseRollTask(self, state):
wrt = state.wrt
angle = getCrankAngle(state.coaCenter)
deltaAngle = angle - state.lastAngle
state.lastAngle = angle
self.camManipRef.setHpr(self.camManipRef, 0, 0, deltaAngle)
if (self.lockRoll == True):
# flatten roll
self.camManipRef.setR(0)
base.direct.camera.setTransform(self.camManipRef, wrt)
return Task.cont
def lockCOA(self):
self.fLockCOA = 1
base.direct.message('COA Lock On')
def unlockCOA(self):
self.fLockCOA = 0
base.direct.message('COA Lock Off')
def toggleCOALock(self):
self.fLockCOA = 1 - self.fLockCOA
if self.fLockCOA:
base.direct.message('COA Lock On')
else:
base.direct.message('COA Lock Off')
def pickNextCOA(self):
""" Cycle through collision handler entries """
if self.cqEntries:
# Get next entry and rotate entries
entry = self.cqEntries[0]
self.cqEntries = self.cqEntries[1:] + self.cqEntries[:1]
# Filter out object's under camera
nodePath = entry.getIntoNodePath()
if base.direct.camera not in nodePath.getAncestors():
# Compute new hit point
hitPt = entry.getSurfacePoint(entry.getFromNodePath())
# Move coa marker to new point
self.updateCoa(hitPt, ref = self.coaMarkerRef)
else:
# Remove offending entry
self.cqEntries = self.cqEntries[:-1]
self.pickNextCOA()
def computeCOA(self, entry):
coa = Point3(0)
dr = base.direct.drList.getCurrentDr()
if self.fLockCOA:
# COA is locked, use existing point
# Use existing point
coa.assign(self.coaMarker.getPos(base.direct.camera))
# Reset hit point count
self.nullHitPointCount = 0
elif entry:
# Got a hit point (hit point is in camera coordinates)
# Set center of action
hitPt = entry.getSurfacePoint(entry.getFromNodePath())
hitPtDist = Vec3(hitPt).length()
coa.assign(hitPt)
# Handle case of bad coa point (too close or too far)
if ((hitPtDist < (1.1 * dr.near)) or
(hitPtDist > dr.far)):
# Just use existing point
coa.assign(self.coaMarker.getPos(base.direct.camera))
# Reset hit point count
self.nullHitPointCount = 0
else:
# Increment null hit point count
self.nullHitPointCount = (self.nullHitPointCount + 1) % 7
# No COA lock and no intersection point
# Use a point out in front of camera
# Distance to point increases on multiple null hit points
# MRM: Would be nice to be able to control this
# At least display it
dist = pow(10.0, self.nullHitPointCount)
base.direct.message('COA Distance: ' + repr(dist))
coa.set(0, dist, 0)
# Compute COA Dist
coaDist = Vec3(coa - ZERO_POINT).length()
if coaDist < (1.1 * dr.near):
coa.set(0, 100, 0)
coaDist = 100
# Update coa and marker
self.updateCoa(coa, coaDist = coaDist)
def updateCoa(self, ref2point, coaDist = None, ref = None):
self.coa.set(ref2point[0], ref2point[1], ref2point[2])
if not coaDist:
coaDist = Vec3(self.coa - ZERO_POINT).length()
# Place the marker in render space
if ref == None:
# KEH: use the current display region
# ref = base.cam
ref = base.direct.drList.getCurrentDr().cam
self.coaMarker.setPos(ref, self.coa)
pos = self.coaMarker.getPos()
self.coaMarker.setPosHprScale(pos, Vec3(0), Vec3(1))
# Resize it
self.updateCoaMarkerSize(coaDist)
# Record marker pos in render space
self.coaMarkerPos.assign(self.coaMarker.getPos())
def updateCoaMarkerSizeOnDeath(self, state):
# Needed because tasks pass in state as first arg
self.updateCoaMarkerSize()
def updateCoaMarkerSize(self, coaDist = None):
if not coaDist:
coaDist = Vec3(self.coaMarker.getPos(base.direct.camera)).length()
# Nominal size based on default 30 degree vertical FOV
# Need to adjust size based on distance and current FOV
sf = COA_MARKER_SF * coaDist * (base.direct.drList.getCurrentDr().fovV/30.0)
if sf == 0.0:
sf = 0.1
self.coaMarker.setScale(sf)
# Lerp color to fade out
if self.coaMarkerColorIval:
self.coaMarkerColorIval.finish()
self.coaMarkerColorIval = Sequence(
Func(self.coaMarker.unstash),
self.coaMarker.colorInterval(1.5, Vec4(1, 0, 0, 0),
startColor = Vec4(1, 0, 0, 1),
blendType = 'easeInOut'),
Func(self.coaMarker.stash)
)
self.coaMarkerColorIval.start()
def homeCam(self):
# Record undo point
base.direct.pushUndo([base.direct.camera])
base.direct.camera.reparentTo(render)
base.direct.camera.clearMat()
# Resize coa marker
self.updateCoaMarkerSize()
def uprightCam(self):
taskMgr.remove('manipulateCamera')
# Record undo point
base.direct.pushUndo([base.direct.camera])
# Pitch camera till upright
currH = base.direct.camera.getH()
base.direct.camera.lerpHpr(currH, 0, 0,
CAM_MOVE_DURATION,
other = render,
blendType = 'easeInOut',
task = 'manipulateCamera')
def orbitUprightCam(self):
taskMgr.remove('manipulateCamera')
# Record undo point
base.direct.pushUndo([base.direct.camera])
# Transform camera z axis to render space
mCam2Render = Mat4(Mat4.identMat()) # [gjeon] fixed to give required argument
mCam2Render.assign(base.direct.camera.getMat(render))
zAxis = Vec3(mCam2Render.xformVec(Z_AXIS))
zAxis.normalize()
# Compute rotation angle needed to upright cam
orbitAngle = rad2Deg(math.acos(CLAMP(zAxis.dot(Z_AXIS), -1, 1)))
# Check angle
if orbitAngle < 0.1:
# Already upright
return
# Compute orthogonal axis of rotation
rotAxis = Vec3(zAxis.cross(Z_AXIS))
rotAxis.normalize()
# Find angle between rot Axis and render X_AXIS
rotAngle = rad2Deg(math.acos(CLAMP(rotAxis.dot(X_AXIS), -1, 1)))
# Determine sign or rotation angle
if rotAxis[1] < 0:
rotAngle *= -1
# Position ref CS at coa marker with xaxis aligned with rot axis
self.camManipRef.setPos(self.coaMarker, Vec3(0))
self.camManipRef.setHpr(render, rotAngle, 0, 0)
# Reparent Cam to ref Coordinate system
parent = base.direct.camera.getParent()
base.direct.camera.wrtReparentTo(self.camManipRef)
# Rotate ref CS to final orientation
t = self.camManipRef.lerpHpr(rotAngle, orbitAngle, 0,
CAM_MOVE_DURATION,
other = render,
blendType = 'easeInOut',
task = 'manipulateCamera')
# Upon death, reparent Cam to parent
t.parent = parent
t.setUponDeath(self.reparentCam)
def centerCam(self):
self.centerCamIn(1.0)
def centerCamNow(self):
self.centerCamIn(0.)
def centerCamIn(self, t):
taskMgr.remove('manipulateCamera')
# Record undo point
base.direct.pushUndo([base.direct.camera])
# Determine marker location
markerToCam = self.coaMarker.getPos(base.direct.camera)
dist = Vec3(markerToCam - ZERO_POINT).length()
scaledCenterVec = Y_AXIS * dist
delta = markerToCam - scaledCenterVec
self.camManipRef.setPosHpr(base.direct.camera, Point3(0), Point3(0))
t = base.direct.camera.lerpPos(Point3(delta),
CAM_MOVE_DURATION,
other = self.camManipRef,
blendType = 'easeInOut',
task = 'manipulateCamera')
t.setUponDeath(self.updateCoaMarkerSizeOnDeath)
def zoomCam(self, zoomFactor, t):
taskMgr.remove('manipulateCamera')
# Record undo point
base.direct.pushUndo([base.direct.camera])
# Find a point zoom factor times the current separation
# of the widget and cam
zoomPtToCam = self.coaMarker.getPos(base.direct.camera) * zoomFactor
# Put a target nodePath there
self.camManipRef.setPos(base.direct.camera, zoomPtToCam)
# Move to that point
t = base.direct.camera.lerpPos(ZERO_POINT,
CAM_MOVE_DURATION,
other = self.camManipRef,
blendType = 'easeInOut',
task = 'manipulateCamera')
t.setUponDeath(self.updateCoaMarkerSizeOnDeath)
def spawnMoveToView(self, view):
# Kill any existing tasks
taskMgr.remove('manipulateCamera')
# Record undo point
base.direct.pushUndo([base.direct.camera])
# Calc hprOffset
hprOffset = VBase3()
if view == 8:
# Try the next roll angle
self.orthoViewRoll = (self.orthoViewRoll + 90.0) % 360.0
# but use the last view
view = self.lastView
else:
self.orthoViewRoll = 0.0
# Adjust offset based on specified view
if view == 1:
hprOffset.set(180., 0., 0.)
elif view == 2:
hprOffset.set(0., 0., 0.)
elif view == 3:
hprOffset.set(90., 0., 0.)
elif view == 4:
hprOffset.set(-90., 0., 0.)
elif view == 5:
hprOffset.set(0., -90., 0.)
elif view == 6:
hprOffset.set(0., 90., 0.)
elif view == 7:
hprOffset.set(135., -35.264, 0.)
# Position target
self.camManipRef.setPosHpr(self.coaMarker, ZERO_VEC,
hprOffset)
# Scale center vec by current distance to target
offsetDistance = Vec3(base.direct.camera.getPos(self.camManipRef) -
ZERO_POINT).length()
scaledCenterVec = Y_AXIS * (-1.0 * offsetDistance)
# Now put the camManipRef at that point
self.camManipRef.setPosHpr(self.camManipRef,
scaledCenterVec,
ZERO_VEC)
# Record view for next time around
self.lastView = view
t = base.direct.camera.lerpPosHpr(ZERO_POINT,
VBase3(0, 0, self.orthoViewRoll),
CAM_MOVE_DURATION,
other = self.camManipRef,
blendType = 'easeInOut',
task = 'manipulateCamera')
t.setUponDeath(self.updateCoaMarkerSizeOnDeath)
def swingCamAboutWidget(self, degrees, t):
# Remove existing camera manipulation task
taskMgr.remove('manipulateCamera')
# Record undo point
base.direct.pushUndo([base.direct.camera])
# Coincident with widget
self.camManipRef.setPos(self.coaMarker, ZERO_POINT)
# But aligned with render space
self.camManipRef.setHpr(ZERO_POINT)
parent = base.direct.camera.getParent()
base.direct.camera.wrtReparentTo(self.camManipRef)
manipTask = self.camManipRef.lerpHpr(VBase3(degrees, 0, 0),
CAM_MOVE_DURATION,
blendType = 'easeInOut',
task = 'manipulateCamera')
# Upon death, reparent Cam to parent
manipTask.parent = parent
manipTask.setUponDeath(self.reparentCam)
def reparentCam(self, state):
base.direct.camera.wrtReparentTo(state.parent)
self.updateCoaMarkerSize()
def fitOnWidget(self, nodePath = 'None Given'):
# Fit the node on the screen
# stop any ongoing tasks
taskMgr.remove('manipulateCamera')
# How big is the node?
nodeScale = base.direct.widget.scalingNode.getScale(render)
maxScale = max(nodeScale[0], nodeScale[1], nodeScale[2])
maxDim = min(base.direct.dr.nearWidth, base.direct.dr.nearHeight)
# At what distance does the object fill 30% of the screen?
# Assuming radius of 1 on widget
camY = base.direct.dr.near * (2.0 * maxScale)/(0.3 * maxDim)
# What is the vector through the center of the screen?
centerVec = Y_AXIS * camY
# Where is the node relative to the viewpoint
vWidget2Camera = base.direct.widget.getPos(base.direct.camera)
# How far do you move the camera to be this distance from the node?
deltaMove = vWidget2Camera - centerVec
# Move a target there
try:
self.camManipRef.setPos(base.direct.camera, deltaMove)
| |
<filename>officinam/999999999/0/1603_3_12.py<gh_stars>0
#!/usr/bin/env python3
# ==============================================================================
#
# FILE: 1603_3_12.py
#
# USAGE: ./999999999/0/1603_3_12.py
# ./999999999/0/1603_3_12.py --help
# NUMERORDINATIO_BASIM="/dir/ndata" ./999999999/0/1603_3_12.py
#
# DESCRIPTION: ---
#
# OPTIONS: ---
#
# REQUIREMENTS: - python3
# - requests[socks]
# BUGS: ---
# NOTES: ---
# AUTHORS: <NAME> <rocha[at]ieee.org>
# COLLABORATORS:
# <@TODO: put additional non-anonymous names here>
#
# COMPANY: EticaAI
# LICENSE: Public Domain dedication or Zero-Clause BSD
# SPDX-License-Identifier: Unlicense OR 0BSD
# VERSION: v0.5.0
# CREATED: 2022-01-21 17:07 UTC created. Based on 2600.py
# REVISION: ---
# ==============================================================================
# pytest
# python3 -m doctest ./999999999/0/1603_3_12.py
# ./999999999/0/1603_3_12.py
# NUMERORDINATIO_BASIM="/external/ndata" ./999999999/0/1603_3_12.py
# printf "Q1065\nQ82151\n" | ./999999999/0/1603_3_12.py --actionem-sparql --query
# printf "Q1065\nQ82151\n" | ./999999999/0/1603_3_12.py --actionem-sparql --query | ./999999999/0/1603_3_12.py --actionem-sparql --wikidata-link
# printf "Q1065\nQ82151\n" | ./999999999/0/1603_3_12.py --actionem-sparql --query | ./999999999/0/1603_3_12.py --actionem-sparql --tsv > 999999/0/test.tsv
# printf "Q1065\nQ82151\n" | ./999999999/0/1603_3_12.py --actionem-sparql --query | ./999999999/0/1603_3_12.py --actionem-sparql --csv > 999999/0/test.csv
# printf "Q1065\nQ82151\n" | ./999999999/0/1603_3_12.py --actionem-sparql --query | ./999999999/0/1603_3_12.py --actionem-sparql --csv --hxltm
# 1603_25_1 query
# printf "Q3409626\nQ41055\nQ3321315\nQ160695\nQ9645\nQ9597\nQ713102\nQ133279\n" | ./999999999/0/1603_3_12.py --actionem-sparql --query
# SELECT ?pic (STRAFTER(STR(?item), "entity/") AS ?item__conceptum__codicem) ?item__rem__i_lat__is_latn
# WHERE
# {
# VALUES ?item { wd:Q3409626 wd:Q41055 wd:Q3321315 wd:Q160695 wd:Q9645 wd:Q9597 wd:Q713102 wd:Q133279 }
# bind(xsd:integer(strafter(str(?item), 'Q')) as ?id_numeric) .
# OPTIONAL { ?item wdt:P18 ?pic }
# OPTIONAL { ?item rdfs:label ?item__rem__i_qcc__is_zxxx filter (lang(?item__rem__i_qcc__is_zxxx) = ""). }
# OPTIONAL { ?item rdfs:label ?item__rem__i_lat__is_latn filter (lang(?item__rem__i_lat__is_latn) = "la"). }
# }
# ORDER BY ASC (?id_numeric)
## Example with proxy
# export HTTP_PROXY="socks5://127.0.0.1:9050"
# export HTTPS_PROXY="socks5://127.0.0.1:9050"
# TODO: https://sinaahmadi.github.io/posts/10-essential-sparql-queries-for-lexicographical-data-on-wikidata.html
import os
import sys
import argparse
# from pathlib import Path
from typing import (
# Type,
Union
)
from time import sleep
import math
import urllib.parse
import requests
# from itertools import permutations
from itertools import product
# valueee = list(itertools.permutations([1, 2, 3]))
import csv
NUMERORDINATIO_BASIM = os.getenv('NUMERORDINATIO_BASIM', os.getcwd())
NUMERORDINATIO_DEFALLO = int(os.getenv('NUMERORDINATIO_DEFALLO', '60')) # �
NUMERORDINATIO_MISSING = "�"
DESCRIPTION = """
1603_3_12.py is (...)
"""
# In Python2, sys.stdin is a byte stream; in Python3, it's a text stream
STDIN = sys.stdin.buffer
# @see https://meta.wikimedia.org/wiki/User-Agent_policy
# @see https://www.mediawiki.org/wiki/API:Etiquette
USER_AGENT="EticaAI-multilingual-lexicography/2022.3.9 (https://meta.wikimedia.org/wiki/User:EmericusPetro; <EMAIL>) 1603_3_12.py/0.1"
# print('getcwd: ', os.getcwd())
# print('oi', NUMERORDINATIO_BASIM)
# def quod_1613_2_60_datum():
# datum = {}
# with open(NUMERORDINATIO_BASIM + "/1613/1603.2.60.no1.tm.hxl.tsv") as file:
# tsv_file = csv.DictReader(file, delimiter="\t")
# return list(tsv_file)
# a b aa bb
# printf "30160\n31161\n1830260\n1891267\n" | ./999999999/0/2600.py --actionem-decifram
# a aa aaa
# printf "30160\n1830260\n109830360\n" | ./999999999/0/2600.py --actionem-decifram
# ./999999999/0/1603_3_12.py --actionem-quod-sparql
# SELECT ?item ?itemLabel
# WHERE {
# # <NAME> or <NAME>
# VALUES ?item { wd:Q1065 wd:Q82151 wd:Q125761 wd:Q7809}
# # mother of
# OPTIONAL { ?item wdt:P25 ?pseudoquery. }
# SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
# }
def hxltm_hastag_de_csvhxlated(csv_caput: list) -> list:
"""hxltm_hastag_de_csvhxlated [summary]
Make this type of conversion:
- 'item__conceptum__codicem' => '#item+conceptum+codicem'
- 'item__rem__i_ara__is_arab' => '#item+rem+i_ara+is_arab'
- '' => ''
Args:
csv_caput (list): Array of input items
Returns:
[list]:
"""
resultatum = []
for item in csv_caput:
if len(item):
resultatum.append('#' + item.replace('__', '+').replace('?', ''))
else:
resultatum.append('')
return resultatum
# https://stackoverflow.com/questions/43258341/how-to-get-wikidata-labels-in-more-than-one-language
class CS1603z3z12:
""" [summary]
- https://en.wikibooks.org/wiki/SPARQL
[extended_summary]
"""
def __init__(self):
self.D1613_1_51 = self._init_1613_1_51_datum()
self.linguae_limitibus = 1000
self.linguae_paginarum_limitibus = 1
# langpair_full = self._query_linguam()
# self.D1613_1_51_langpair = self._query_linguam_limit(langpair_full)
self.D1613_1_51_langpair = []
# self.scientia_de_scriptura = {}
# self.scientia_de_scriptura = self.D1613_2_60
# self.cifram_signaturae = 6 # TODO: make it flexible
# self.codex_verbum_tabulae = []
# self.verbum_limiti = 2
self.resultatum_separato = "\t"
self.qid = []
def _init_1613_1_51_datum(self):
# archivum = NUMERORDINATIO_BASIM + "/1613/1603_2_60.no1.tm.hxl.tsv"
# archivum = NUMERORDINATIO_BASIM + "/1603/17/2/60/1613_17_2_60.no1.tm.hxl.tsv"
archivum = NUMERORDINATIO_BASIM + "/1603/1/51/1603_1_51.no1.tm.hxl.csv"
datum = {}
with open(archivum) as file:
# tsv_file = csv.DictReader(file, delimiter="\t")
csv_file = csv.DictReader(file)
# return list(tsv_file)
for conceptum in csv_file:
# print('conceptum', conceptum)
int_clavem = int(conceptum['#item+conceptum+codicem'])
datum[int_clavem] = {}
if conceptum['#item+conceptum+codicem'].startswith('0_'):
continue
if not conceptum['#item+rem+i_qcc+is_zxxx+ix_wikilngm']:
continue
for clavem, rem in conceptum.items():
if not clavem.startswith('#item+conceptum+codicem'):
datum[int_clavem][clavem] = rem
return datum
def _query_linguam(self):
resultatum = []
for clavem, rem in self.D1613_1_51.items():
# for clavem, rem in enumerate(self.D1613_1_51):
# print('clavem rem', clavem, rem)
if '#item+rem+i_qcc+is_zxxx+ix_wikilngm' not in rem or \
'#item+rem+i_qcc+is_zxxx+ix_csvsffxm' not in rem:
continue
resultatum.append([
rem['#item+rem+i_qcc+is_zxxx+ix_wikilngm'],
'item__rem' + rem['#item+rem+i_qcc+is_zxxx+ix_csvsffxm'],
])
# print(self.D1613_1_51)
# print('resultatum', resultatum)
return resultatum
def _query_linguam_limit(self, langpair_full: list):
# resultatum = []
if self.lingua_divisioni < 2:
return langpair_full
# @see https://stackoverflow.com/questions/312443
# /how-do-you-split-a-list-into-evenly-sized-chunks
# def chunks(lst, n):
# """Yield successive n-sized chunks from lst."""
# for i in range(0, len(lst), n):
# yield lst[i:i + n]
# if langpair_full
# def chunks(lst, n):
# """Yield successive n-sized chunks from lst."""
# for i in range(0, len(lst), n):
# yield lst[i:i + n]
# import math
divisio_numero = math.ceil(len(langpair_full) / self.lingua_divisioni)
def chunks(l, n):
n = max(1, n)
return (l[i:i+n] for i in range(0, len(l), n))
# limited = list(chunks(langpair_full, self.lingua_divisioni))
limited = list(chunks(langpair_full, divisio_numero))
limited_group = limited[self.lingua_paginae - 1]
# limited = chunks(langpair_full, self.linguae_limitibus)
# raise ValueError(limited_group)
# raise ValueError([limited_group, limited])
return limited_group
# # print('resultatum', resultatum)
# return resultatum
def est_resultatum_separato(self, resultatum_separato: str):
self.resultatum_separato = resultatum_separato
return self
def est_lingua_divisioni(
self, lingua_divisioni: Union[str, int]):
self.lingua_divisioni = int(lingua_divisioni)
return self
def est_lingua_paginae(
self, lingua_paginae: Union[str, int]):
self.lingua_paginae = int(lingua_paginae)
return self
def est_wikidata_q(self, wikidata_codicem: str):
if wikidata_codicem not in self.qid:
self.qid.append(wikidata_codicem)
return self
# def query(self):
# term = """# https://en.wikiversity.org/wiki/Research_in_programming_Wikidata/Countries#List_of_countries
# # https://w.wiki/4ij4
# SELECT ?item ?item__eng_latn ?item__rus_cyrl
# WHERE
# {
# ?item wdt:P31 wd:Q6256. # instance country
# OPTIONAL {
# ?item rdfs:label ?item__eng_latn filter (lang(?item__eng_latn) = "en").
# ?item rdfs:label ?item__rus_cyrl filter (lang(?item__rus_cyrl) = "ru").
# }
# }
# """
# return term
# SELECT ?item ?item_rem__eng_latn ?item_rem__rus_cyrl
# WHERE
# {
# VALUES ?item { wd:Q1065 wd:Q82151 wd:Q125761 wd:Q7809 }
# OPTIONAL {
# ?item rdfs:label ?item_rem__eng_latn filter (lang(?item_rem__eng_latn) = "en").
# ?item rdfs:label ?item_rem__rus_cyrl filter (lang(?item_rem__rus_cyrl) = "ru").
# }
# }
def query(self):
langpair_full = self._query_linguam()
self.D1613_1_51_langpair = self._query_linguam_limit(langpair_full)
qid = ['wd:' + x for x in self.qid if isinstance(x, str)]
# select = '?item ' + " ".join(self._query_linguam())
# select = ['(?item AS ?item__conceptum__codicem)']
select = [
'(STRAFTER(STR(?item), "entity/") AS ?item__conceptum__codicem)']
# select = [
# '(STRAFTER(STR(?item), "entity/") AS ?item__conceptum__codicem)',
# '(STRAFTER(STR(?item), "entity/") AS ?item__rem__i_qcc__is_zxxx__ix_wikiq)'
# ]
filter_otional = []
for pair in self.D1613_1_51_langpair:
select.append('?' + pair[1])
# filter_otional.append(
# '?item rdfs:label ?' +
# pair[1] + ' filter (lang(?' + pair[1] +
# ') = "' + pair[0] + '").'
# )
filter_otional.append(
'OPTIONAL { ?item rdfs:label ?' +
pair[1] + ' filter (lang(?' + pair[1] +
') = "' + pair[0] + '"). }'
)
filter_otional_done = [' ' + x for x in filter_otional]
# print('select', self.D1613_1_51_langpair)
# print('select', select)
# print('filter_otional', filter_otional)
term = """
SELECT {select}
WHERE
{{
VALUES ?item {{ {qitems} }}
bind(xsd:integer(strafter(str(?item), 'Q')) as ?id_numeric) .
{langfilter}
}}
ORDER BY ASC (?id_numeric)
""".format(
qitems=" ".join(qid),
select=" ".join(select),
langfilter="\n".join(filter_otional_done),
)
# """.format(qitems = " ".join(self.qid))
# [TRY IT ↗]()
return term
def exportatum_sparql(self):
resultatum = []
# resultatum.append('#TODO')
# resultatum.append(str(self.D1613_1_51))
resultatum.append(self.query())
return resultatum
class CLI_2600:
def __init__(self):
"""
Constructs all the necessary attributes for the Cli object.
"""
self.pyargs = None
# self.args = self.make_args()
# Posix exit codes
self.EXIT_OK = 0
self.EXIT_ERROR = 1
self.EXIT_SYNTAX = 2
def make_args(self, hxl_output=True):
parser = argparse.ArgumentParser(description=DESCRIPTION)
# https://en.wikipedia.org/wiki/Code_word
# https://en.wikipedia.org/wiki/Coded_set
# cōdex verbum tabulae
# parser.add_argument(
# '--actionem',
# help='Action to execute. Defaults to codex.',
# # choices=['rock', 'paper', 'scissors'],
# choices=[
# 'codex',
# 'fontem-verbum-tabulae',
# 'neo-scripturam',
# ],
# dest='actionem',
# required=True,
# default='codex',
# const='codex',
# type=str,
# nargs='?'
# )
parser.add_argument(
'--punctum-separato-de-resultatum',
help='Character(s) used as separator for generate output.' +
'Defaults to tab "\t"',
dest='resultatum_separato',
default="\t",
nargs='?'
)
neo_codex = parser.add_argument_group(
"sparql",
"(DEFAULT USE) SPARQL query")
neo_codex.add_argument(
'--actionem-sparql',
help='Define mode to operate with generation of SPARQL ' +
'queries',
metavar='',
dest='actionem_sparql',
const=True,
nargs='?'
)
neo_codex.add_argument(
'--query',
help='Generate SPARQL query',
metavar='',
dest='query',
const=True,
nargs='?'
)
neo_codex.add_argument(
'--wikidata-link',
help='Generate query.wikidata.org link (from piped in query)',
metavar='',
dest='wikidata_link',
const=True,
nargs='?'
)
neo_codex.add_argument(
'--csv',
help='Generate TSV output (from piped in query)',
metavar='',
dest='csv',
const=True,
nargs='?'
)
neo_codex.add_argument(
'--tsv',
help='Generate TSV output (from piped in query)',
metavar='',
dest='tsv',
const=True,
nargs='?'
)
neo_codex.add_argument(
'--hxltm',
help='Generate HXL-tagged output (from piped in query). ' +
'Concepts use #item+conceptum+codicem instead ' +
'of #item+code+v_wiki_q',
metavar='',
dest='hxltm',
const=True,
nargs='?'
)
# linguae, f, pl, (Nominative) https://en.wiktionary.org/wiki/lingua
# pāginārum, f, pl, (Gengitive) https://en.wiktionary.org/wiki/pagina
# dīvīsiōnibus, f, pl, (Dative) https://en.wiktionary.org/wiki/divisio
# līmitibus, m, pl, (Dative) https://en.wiktionary.org/wiki/limes#Latin
# //linguae pāginārum līmitibus//
# lingua, f, s, (Nominative) https://en.wiktionary.org/wiki/lingua#Latin
# pāginae, f, s, (Dative) https://en.wiktionary.org/wiki/lingua#Latin
# dīvīsiōnī, f, s, (Dative) https://en.wiktionary.org/wiki/lingua#Latin
neo_codex.add_argument(
'--lingua-divisioni',
help='For the languages on [1603:1:51], how many divisions ' +
'(or number of chunks) should be done. 1 means no division.' +
'If using more than 1, use --lingua-paginae do paginate the ' +
'Options. Default: | |
= None
self.new_pic = None
self.dim = Dimensions()
self.rec = Dimensions()
self.last_dir = ''
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Methods : File Dialogs /About +++++++++++++++++++++++++++++++++++++++++ #
def set_advanced(self):
if not self.isAdvanced:
self.resize(1812, 810)
self.Box_original.setGeometry(QtCore.QRect(510, 10, 641, 451))
self.Box_wallpaper.setGeometry(QtCore.QRect(1160, 10, 641, 451))
self.Box_info.setGeometry(QtCore.QRect(1160, 470, 641, 291))
self.Box_options.setGeometry(QtCore.QRect(510, 470, 641, 291))
self.Button_advanced.setGeometry(QtCore.QRect(10, 770, 141, 32))
self.Button_open.setGeometry(QtCore.QRect(1160, 770, 141, 32))
self.Button_saveWallpaper.setGeometry(QtCore.QRect(1310, 770, 141, 32))
self.Button_about.setGeometry(QtCore.QRect(1485, 770, 141, 32))
self.Button_quit.setGeometry(QtCore.QRect(1660, 770, 141, 32))
self.isAdvanced = True
self.Button_advanced.setText("Advanced >>")
self.Box_advanced.setVisible(True)
else:
self.resize(1312, 810)
self.Box_original.setGeometry(QtCore.QRect(10, 10, 641, 451))
self.Box_wallpaper.setGeometry(QtCore.QRect(660, 10, 641, 451))
self.Box_info.setGeometry(QtCore.QRect(660, 470, 641, 291))
self.Box_options.setGeometry(QtCore.QRect(10, 470, 641, 291))
self.Button_advanced.setGeometry(QtCore.QRect(10, 770, 141, 32))
self.Button_open.setGeometry(QtCore.QRect(660, 770, 141, 32))
self.Button_saveWallpaper.setGeometry(QtCore.QRect(810, 770, 141, 32))
self.Button_about.setGeometry(QtCore.QRect(985, 770, 141, 32))
self.Button_quit.setGeometry(QtCore.QRect(1160, 770, 141, 32))
self.isAdvanced = False
self.Button_advanced.setText("<< Advanced")
self.Box_advanced.setVisible(False)
def on_treeviewClicked(self, index):
path = self.dirModel.fileInfo(index).absoluteFilePath()
p = Path(path).glob('*.*')
files = [x for x in p if (x.is_file()) and (x.suffix in ['.jpg', '.png', '.bmp', '.jpeg', '.gif', '.pbm', '.pgm', '.ppm', '.xbm', '.xpm'])]
self.files.clear()
self.fileList = []
for file in files:
self.fileList.append(str(file))
self.fileList.sort(key=str.casefold)
self.refresh_listview()
self.fileIndex = -10 if len(self.fileList) == 0 else -1
def on_listviewClicked(self, index):
self.image = str(self.fileList[index.row()])
self.refresh_settings()
self.set_image()
self.set_original()
self.show_original()
self.refresh()
self.last_dir = str(Path(self.image).parent)
self.fileIndex = index.row()
def refresh_listview(self):
self.files.clear()
for file in self.fileList:
if file in self.workedList:
item = QtWidgets.QListWidgetItem(str(Path(file).name))
font = QtGui.QFont()
font.setStyle(QtGui.QFont.StyleItalic)
font.setBold(True)
item.setFont(font)
item.setForeground(QtGui.QBrush(QtGui.QColor(52, 203, 60)))
self.files.addItem(item)
else:
self.files.addItem(QtWidgets.QListWidgetItem(str(Path(file).name)))
def set_Next(self):
if self.fileIndex >= -1 and self.fileIndex < len(self.fileList)-1:
self.fileIndex += 1
self.files.setCurrentRow(self.fileIndex)
self.image = str(self.fileList[self.fileIndex])
self.refresh_settings()
self.set_image()
self.set_original()
self.show_original()
self.refresh()
self.last_dir = str(Path(self.image).parent)
def set_Prev(self):
if self.fileIndex > 0:
self.fileIndex -= 1
self.files.setCurrentRow(self.fileIndex)
self.image = str(self.fileList[self.fileIndex])
self.refresh_settings()
self.set_image()
self.set_original()
self.show_original()
self.refresh()
self.last_dir = str(Path(self.image).parent)
def showAbout(self):
about.show()
def open_Folder(self):
dir = self.last_dir if self.last_dir != '' else str(Path.cwd())
foldername = QtWidgets.QFileDialog.getExistingDirectory(self, 'Open FastSave Folder', dir, options=QtWidgets.QFileDialog.ShowDirsOnly)
fname = str(foldername)
if fname != '':
self.saveDir.setText(fname)
self.last_dir = fname
def FaseSave(self):
if self.new_pic:
foldername = str(Path(self.image).parent) if not Path(self.saveDir.text()).is_dir() else str(Path(self.saveDir.text()))
foldername += '/'
filename = str(Path(self.image).name)
filename = '(cut) '+filename
savename = foldername+filename
img_format = str(Path(self.image).suffix).upper()
img_format = img_format[1:]
try:
self.new_pic.save(savename, img_format)
self.workedList.append(self.image)
self.refresh_listview()
self.files.setCurrentRow(self.fileIndex)
except:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setWindowTitle("Error: Could not save file!")
msg.setText(str(sys.exc_info()))
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
dummie = msg.exec_()
def open_pic(self):
dir = self.last_dir if self.last_dir != '' else str(Path.cwd())
filter = 'Windows Bitmap (*.bmp);;Graphic Interchange Format (*.gif);;Joint Photographic Experts Group (*.jpg);;Joint Photographic Experts Group (*.jpeg);;Portable Network Graphics (*.png);;Portable Bitmap (*.pbm);;Portable Graymap (*.pgm);;Portable Pixmap (*.ppm);;X11 Bitmap (*.xbm);;X11 Bitmap (*.xpm)'
filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Open Imagefile', dir, filter, 'Joint Photographic Experts Group (*.jpg)', options=QtWidgets.QFileDialog.DontUseNativeDialog)
fname = filename[0]
if fname != '':
self.image = fname
try:
self.refresh_settings()
self.set_image()
self.set_original()
self.show_original()
self.refresh()
self.last_dir = str(Path(self.image).parent)
except:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setWindowTitle("Error: Could not open file!")
msg.setText(str(sys.exc_info()))
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
dummie = msg.exec_()
self.image = ''
def save_pic(self):
if self.new_pic:
dir = self.last_dir if self.last_dir != '' else str(Path.cwd())
filter = 'Windows Bitmap (*.bmp);;Joint Photographic Experts Group (*.jpg);;Joint Photographic Experts Group (*.jpeg);;Portable Network Graphics (*.png);;Portable Pixmap (*.ppm);;X11 Bitmap (*.xbm);;X11 Bitmap (*.xpm)'
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Einkaufszettel speichern...', dir, filter, 'Joint Photographic Experts Group (*.jpg)')
fname = filename[0]
if fname:
if 'bmp' in filename[1]:
img_format = 'BMP'
if 'jpg' in filename[1]:
img_format = 'JPG'
if 'jpeg' in filename[1]:
img_format = 'JPEG'
if 'png' in filename[1]:
img_format = 'PNG'
if 'ppm' in filename[1]:
img_format = 'PPM'
if 'xbm' in filename[1]:
img_format = 'XBM'
if 'xpm' in filename[1]:
img_format = 'XPM'
ending = '.' + img_format.lower()
dummy = Path(fname)
if dummy.suffix != ending:
fname += ending
try:
self.new_pic.save(fname, img_format)
self.last_dir = str(dummy.parent)
except:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setWindowTitle("Error: Could not save file!")
msg.setText(str(sys.exc_info()))
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
dummie = msg.exec_()
self.last_dir = ''
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Methods : Mouse Events ++++++++++++++++++++++++++++++++++++++++++++++++ #
def mouseMoveEvent(self, event):
if event.buttons() == QtCore.Qt.RightButton and self.image != '':
leftx = self.dim.startx+510 if self.isAdvanced else self.dim.startx+10
rightx = self.dim.startx+510+self.dim.width if self.isAdvanced else self.dim.startx+10+self.dim.width
topy = self.dim.starty+30
bottomy = self.dim.starty+30+self.dim.height
if (event.x() in range(leftx, rightx)) and (event.y() in range(topy, bottomy)):
self.rec.startx = int(self.rec.startx + (event.x()-self.lastx-self.dim.startx*2)*self.difx)
self.rec.starty = int(self.rec.starty + (event.y()-self.lasty-self.dim.starty*2)*self.dify)
if self.rec.startx < 0:
self.rec.startx = 0
if self.rec.starty < 0:
self.rec.starty = 0
if self.rec.startx > (self.pixmap.width()-self.rec.width):
self.rec.startx = (self.pixmap.width()-self.rec.width)
if self.rec.starty > (self.pixmap.height()-self.rec.height):
self.rec.starty = (self.pixmap.height()-self.rec.height)
self.show_rectangles()
self.lastx = event.x()-self.dim.startx*2
self.lasty = event.y()-self.dim.starty*2
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.RightButton:
self.lastx = event.x()-self.dim.startx*2
self.lasty = event.y()-self.dim.starty*2
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Methods : Set Resolution ++++++++++++++++++++++++++++++++++++++++++++++ #
def refresh(self):
if self.radio_fullHD.isChecked():
self.set_fullHD()
if self.radio_dual.isChecked():
self.set_dual()
if self.radio_custom.isChecked():
self.set_custom()
self.show_information()
def set_fullHD(self):
self.new_pic = None
self.label_gap.setVisible(False)
self.label_mon1.setVisible(False)
self.size_monitors.setVisible(False)
self.label_ar.setVisible(False)
self.resolution.setVisible(False)
self.size_gap.setVisible(False)
self.custom.setVisible(False)
self.label_m.setVisible(False)
self.label_m2.setVisible(False)
self.label_pix.setVisible(False)
self.label_pix2.setVisible(False)
self.checkBox_dualMonitor.setVisible(False)
self.checkBox_dualMonitor.setChecked(False)
if self.set_maxSliderValue():
self.show_rectangles()
elif self.image != '':
self.show_original()
self.show_information()
def set_dual(self):
self.new_pic = None
self.label_gap.setVisible(True)
self.label_mon1.setVisible(True)
self.size_monitors.setVisible(True)
self.size_gap.setVisible(True)
self.label_ar.setVisible(False)
self.resolution.setVisible(False)
self.custom.setVisible(False)
self.label_m.setVisible(True)
self.label_m2.setVisible(True)
self.label_pix.setVisible(False)
self.label_pix2.setVisible(False)
self.checkBox_dualMonitor.setVisible(False)
self.checkBox_dualMonitor.setChecked(False)
if self.set_maxSliderValue():
self.show_rectangles()
elif self.image != '':
self.show_original()
self.show_information()
def set_custom(self):
self.new_pic = None
if self.radio_custom.isChecked():
self.custom.setVisible(True)
self.label_pix.setVisible(True)
self.checkBox_dualMonitor.setVisible(True)
if self.checkBox_dualMonitor.isChecked():
self.custom.setVisible(False)
self.label_pix.setVisible(False)
self.label_mon1.setVisible(True)
self.label_m.setVisible(True)
self.label_m2.setVisible(True)
self.size_monitors.setVisible(True)
self.label_ar.setVisible(True)
self.resolution.setVisible(True)
self.label_gap.setVisible(True)
self.size_gap.setVisible(True)
self.label_pix2.setVisible(True)
else:
self.custom.setVisible(True)
self.label_pix.setVisible(True)
self.label_gap.setVisible(False)
self.label_m.setVisible(False)
self.label_m2.setVisible(False)
self.label_mon1.setVisible(False)
self.size_monitors.setVisible(False)
self.label_ar.setVisible(False)
self.resolution.setVisible(False)
self.size_gap.setVisible(False)
self.label_pix2.setVisible(False)
if self.set_maxSliderValue():
self.show_rectangles()
elif self.image != '':
self.show_original()
self.show_information()
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Methods : Calculations ++++++++++++++++++++++++++++++++++++++++++++++++ #
def calc_dual(self):
success = False
if self.radio_dual.isChecked():
self.dualx = 1920
self.dualy = 1080
check1, d = is_number(self.size_monitors.text())
check2, gap = is_number(self.size_gap.text())
if check1 and check2:
a = d*math.sqrt(1 / (1 + (self.dualy/self.dualx)**2))
self.dualgpx = int((self.dualx / a)*gap)
self.dualspan = self.dualx*2 + self.dualgpx
success = True
if self.radio_custom.isChecked() and self.checkBox_dualMonitor.isChecked():
check1, self.dualx, self.dualy = is_resolution(self.resolution.text())
check2, d = is_number(self.size_monitors.text())
check3, gap = is_number(self.size_gap.text())
if check1 and check2 and check3:
a = d*math.sqrt(1 / (1 + (self.dualy/self.dualx)**2))
self.dualgpx = int((self.dualx / a)*gap)
self.dualspan = self.dualx*2 + self.dualgpx
success = True
print(self.dualx, self.dualy, self.dualspan, self.dualgpx)
return success
def set_max(self):
success = False
if self.radio_fullHD.isChecked():
self.max_width = 1920
self.max_height = 1080
success = True
if self.radio_dual.isChecked() and self.calc_dual():
self.max_width = self.dualspan
self.max_height = self.dualy
success = True
check, w, h = is_resolution(self.custom.text())
if self.radio_custom.isChecked() and check:
self.max_width = w
self.max_height = h
success = True
if self.radio_custom.isChecked() and self.calc_dual():
self.max_width = self.dualspan
self.max_height = self.dualy
success = True
return success
def set_maxSliderValue(self):
self.rec_size.blockSignals(True)
self.rec_size.setMinimum(0)
self.rec_size.setMaximum(0)
self.rec_size.setEnabled(False)
success = False
if self.image != '' and self.set_max():
self.rec_size.setMinimum(10)
self.rec_size.setEnabled(True)
pw = self.pixmap.width()
ph = self.pixmap.height()
mw = self.max_width
mh = self.max_height
vw = pw/mw
vh = ph/mh
if (vw == 1.0 and vh == 1.0) or (vw >= 1.0 and vh > 1.0) or (vw > 1.0 and vh >= 1.0):
self.rec_size.setMaximum(int(vh*100)) if vw >= vh else self.rec_size.setMaximum(int(vw*100))
self.rec_size.setValue(100)
success = True
if (vw < 1.0 and vh < 1.0) or (vw <= 1.0 and vh < 1.0) or (vw < 1.0 and vh <= 1.0):
self.rec_size.setMaximum(int(vh*100)) if vh < vw else self.rec_size.setMaximum(int(vw*100))
self.rec_size.setValue(self.rec_size.maximum())
success = True
if (vw < 1.0 and vh > 1.0) or (vw < 1.0 and vh > 1.0) or (vw <= 1.0 and vh > 1.0):
self.rec_size.setMaximum(int(vw*100))
self.rec_size.setValue(self.rec_size.maximum())
success = True
if (vw > 1.0 and vh < 1.0) or (vw > 1.0 and vh <= 1.0) or (vw >= 1.0 and vh < 1.0):
self.rec_size.setMaximum(int(vh*100))
self.rec_size.setValue(self.rec_size.maximum())
success = True
self.rec_size.blockSignals(False)
return success
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Methods : Show Images +++++++++++++++++++++++++++++++++++++++++++++++++ #
def set_original(self):
if self.image != '':
self.set_image()
startx = self.dim.startx
starty = self.dim.starty
lw = self.dim.width
lh = self.dim.height
ph = self.pixmap.height()
pw = self.pixmap.width()
if pw < lw and ph < lh:
startx = (lw//2 + 10) - (pw//2)
starty = (lh//2 + 30) - (ph//2)
lh = ph
lw = pw
elif pw > lw and ph < lh:
new_height = (lw*ph)//pw
starty = (lh//2 + 30) - (new_height//2)
lh = new_height
elif pw < lw and ph > lh:
new_width = (lh*pw)//ph
startx = (lw//2 + 10) - (new_width//2)
lw = new_width
elif pw > lw and ph > lh:
if not (pw/lw) == (ph/lh):
if (pw/lw) > (ph/lh):
v = pw/lw
new_height = int(ph/v)
starty = (lh//2 + 30) - (new_height//2)
lh = new_height
else:
v = ph/lh
new_width = int(pw/v)
startx = (lw//2 + 10) - (new_width//2)
lw = new_width
self.dim.startx = startx
self.dim.starty = starty
| |
<reponame>sttollgrin/hydrus
import hashlib
import io
import numpy
import numpy.core.multiarray # important this comes before cv!
import struct
import warnings
try:
# more hidden imports for pyinstaller
import numpy.random.common # pylint: disable=E0401
import numpy.random.bounded_integers # pylint: disable=E0401
import numpy.random.entropy # pylint: disable=E0401
except:
pass # old version of numpy, screw it
from PIL import _imaging
from PIL import ImageFile as PILImageFile
from PIL import Image as PILImage
from PIL import ImageCms as PILImageCms
from hydrus.core import HydrusConstants as HC
from hydrus.core import HydrusData
from hydrus.core import HydrusExceptions
from hydrus.core import HydrusGlobals as HG
from hydrus.core import HydrusPaths
from hydrus.core import HydrusTemp
PIL_SRGB_PROFILE = PILImageCms.createProfile( 'sRGB' )
def EnableLoadTruncatedImages():
if hasattr( PILImageFile, 'LOAD_TRUNCATED_IMAGES' ):
# this can now cause load hangs due to the trunc load code adding infinite fake EOFs to the file stream, wew lad
# hence debug only
PILImageFile.LOAD_TRUNCATED_IMAGES = True
return True
else:
return False
if not hasattr( PILImage, 'DecompressionBombError' ):
# super old versions don't have this, so let's just make a stub, wew
class DBE_stub( Exception ):
pass
PILImage.DecompressionBombError = DBE_stub
if not hasattr( PILImage, 'DecompressionBombWarning' ):
# super old versions don't have this, so let's just make a stub, wew
class DBW_stub( Exception ):
pass
PILImage.DecompressionBombWarning = DBW_stub
warnings.simplefilter( 'ignore', PILImage.DecompressionBombWarning )
warnings.simplefilter( 'ignore', PILImage.DecompressionBombError )
OLD_PIL_MAX_IMAGE_PIXELS = PILImage.MAX_IMAGE_PIXELS
PILImage.MAX_IMAGE_PIXELS = None # this turns off decomp check entirely, wew
PIL_ONLY_MIMETYPES = { HC.IMAGE_GIF, HC.IMAGE_ICON }
try:
import cv2
if cv2.__version__.startswith( '2' ):
CV_IMREAD_FLAGS_PNG = cv2.CV_LOAD_IMAGE_UNCHANGED
CV_IMREAD_FLAGS_JPEG = CV_IMREAD_FLAGS_PNG
CV_IMREAD_FLAGS_WEIRD = CV_IMREAD_FLAGS_PNG
CV_JPEG_THUMBNAIL_ENCODE_PARAMS = []
CV_PNG_THUMBNAIL_ENCODE_PARAMS = []
else:
# allows alpha channel
CV_IMREAD_FLAGS_PNG = cv2.IMREAD_UNCHANGED
# this preserves colour info but does EXIF reorientation and flipping
CV_IMREAD_FLAGS_JPEG = cv2.IMREAD_ANYDEPTH | cv2.IMREAD_ANYCOLOR
# this seems to allow weirdass tiffs to load as non greyscale, although the LAB conversion 'whitepoint' or whatever can be wrong
CV_IMREAD_FLAGS_WEIRD = cv2.IMREAD_ANYDEPTH | cv2.IMREAD_ANYCOLOR
CV_JPEG_THUMBNAIL_ENCODE_PARAMS = [ cv2.IMWRITE_JPEG_QUALITY, 92 ]
CV_PNG_THUMBNAIL_ENCODE_PARAMS = [ cv2.IMWRITE_PNG_COMPRESSION, 9 ]
OPENCV_OK = True
except:
OPENCV_OK = False
def ConvertToPNGIfBMP( path ):
with open( path, 'rb' ) as f:
header = f.read( 2 )
if header == b'BM':
( os_file_handle, temp_path ) = HydrusTemp.GetTempPath()
try:
with open( path, 'rb' ) as f_source:
with open( temp_path, 'wb' ) as f_dest:
HydrusPaths.CopyFileLikeToFileLike( f_source, f_dest )
pil_image = GeneratePILImage( temp_path )
pil_image.save( path, 'PNG' )
finally:
HydrusTemp.CleanUpTempPath( os_file_handle, temp_path )
def DequantizeNumPyImage( numpy_image: numpy.array ) -> numpy.array:
# OpenCV loads images in BGR, and we want to normalise to RGB in general
if numpy_image.dtype == 'uint16':
numpy_image = numpy.array( numpy_image // 256, dtype = 'uint8' )
shape = numpy_image.shape
if len( shape ) == 2:
# monochrome image
convert = cv2.COLOR_GRAY2RGB
else:
( im_y, im_x, depth ) = shape
if depth == 4:
convert = cv2.COLOR_BGRA2RGBA
else:
convert = cv2.COLOR_BGR2RGB
numpy_image = cv2.cvtColor( numpy_image, convert )
return numpy_image
def DequantizePILImage( pil_image: PILImage.Image ) -> PILImage.Image:
if HasICCProfile( pil_image ):
try:
pil_image = NormaliseICCProfilePILImageToSRGB( pil_image )
except Exception as e:
HydrusData.ShowException( e )
HydrusData.ShowText( 'Failed to normalise image ICC profile.' )
pil_image = NormalisePILImageToRGB( pil_image )
return pil_image
def GenerateNumPyImage( path, mime, force_pil = False ) -> numpy.array:
if HG.media_load_report_mode:
HydrusData.ShowText( 'Loading media: ' + path )
if not OPENCV_OK:
force_pil = True
if not force_pil:
try:
pil_image = RawOpenPILImage( path )
try:
pil_image.verify()
except:
raise HydrusExceptions.UnsupportedFileException()
if pil_image.mode == 'LAB':
force_pil = True
if HasICCProfile( pil_image ):
if HG.media_load_report_mode:
HydrusData.ShowText( 'Image has ICC, so switching to PIL' )
force_pil = True
except HydrusExceptions.UnsupportedFileException:
# pil had trouble, let's cross our fingers cv can do it
pass
if mime in PIL_ONLY_MIMETYPES or force_pil:
if HG.media_load_report_mode:
HydrusData.ShowText( 'Loading with PIL' )
pil_image = GeneratePILImage( path )
numpy_image = GenerateNumPyImageFromPILImage( pil_image )
else:
if HG.media_load_report_mode:
HydrusData.ShowText( 'Loading with OpenCV' )
if mime == HC.IMAGE_JPEG:
flags = CV_IMREAD_FLAGS_JPEG
elif mime == HC.IMAGE_PNG:
flags = CV_IMREAD_FLAGS_PNG
else:
flags = CV_IMREAD_FLAGS_WEIRD
numpy_image = cv2.imread( path, flags = flags )
if numpy_image is None: # doesn't support some random stuff
if HG.media_load_report_mode:
HydrusData.ShowText( 'OpenCV Failed, loading with PIL' )
pil_image = GeneratePILImage( path )
numpy_image = GenerateNumPyImageFromPILImage( pil_image )
else:
numpy_image = DequantizeNumPyImage( numpy_image )
return numpy_image
def GenerateNumPyImageFromPILImage( pil_image: PILImage.Image ) -> numpy.array:
( w, h ) = pil_image.size
try:
s = pil_image.tobytes()
except OSError as e: # e.g. OSError: unrecognized data stream contents when reading image file
raise HydrusExceptions.UnsupportedFileException( str( e ) )
depth = len( s ) // ( w * h )
return numpy.fromstring( s, dtype = 'uint8' ).reshape( ( h, w, depth ) )
def GeneratePILImage( path, dequantize = True ) -> PILImage.Image:
pil_image = RawOpenPILImage( path )
if pil_image is None:
raise Exception( 'The file at {} could not be rendered!'.format( path ) )
RotateEXIFPILImage( pil_image )
if dequantize:
# note this destroys animated gifs atm, it collapses down to one frame
pil_image = DequantizePILImage( pil_image )
return pil_image
def GeneratePILImageFromNumPyImage( numpy_image: numpy.array ) -> PILImage.Image:
# I'll leave this here as a neat artifact, but I really shouldn't ever be making a PIL from a cv2 image. the only PIL benefits are the .info dict, which this won't generate
if len( numpy_image.shape ) == 2:
( h, w ) = numpy_image.shape
format = 'L'
else:
( h, w, depth ) = numpy_image.shape
if depth == 1:
format = 'L'
elif depth == 2:
format = 'LA'
elif depth == 3:
format = 'RGB'
elif depth == 4:
format = 'RGBA'
pil_image = PILImage.frombytes( format, ( w, h ), numpy_image.data.tobytes() )
return pil_image
def GenerateThumbnailBytesFromStaticImagePath( path, target_resolution, mime ) -> bytes:
if OPENCV_OK:
numpy_image = GenerateNumPyImage( path, mime )
thumbnail_numpy_image = ResizeNumPyImage( numpy_image, target_resolution )
try:
thumbnail_bytes = GenerateThumbnailBytesNumPy( thumbnail_numpy_image, mime )
return thumbnail_bytes
except HydrusExceptions.CantRenderWithCVException:
pass # fallback to PIL
pil_image = GeneratePILImage( path )
thumbnail_pil_image = pil_image.resize( target_resolution, PILImage.ANTIALIAS )
thumbnail_bytes = GenerateThumbnailBytesPIL( pil_image, mime )
return thumbnail_bytes
def GenerateThumbnailBytesNumPy( numpy_image, mime ) -> bytes:
( im_height, im_width, depth ) = numpy_image.shape
if depth == 4:
convert = cv2.COLOR_RGBA2BGRA
else:
convert = cv2.COLOR_RGB2BGR
numpy_image = cv2.cvtColor( numpy_image, convert )
if mime == HC.IMAGE_PNG or depth == 4:
ext = '.png'
params = CV_PNG_THUMBNAIL_ENCODE_PARAMS
else:
ext = '.jpg'
params = CV_JPEG_THUMBNAIL_ENCODE_PARAMS
( result_success, result_byte_array ) = cv2.imencode( ext, numpy_image, params )
if result_success:
thumbnail_bytes = result_byte_array.tostring()
return thumbnail_bytes
else:
raise HydrusExceptions.CantRenderWithCVException( 'Thumb failed to encode!' )
def GenerateThumbnailBytesPIL( pil_image: PILImage.Image, mime ) -> bytes:
f = io.BytesIO()
if mime == HC.IMAGE_PNG or pil_image.mode == 'RGBA':
pil_image.save( f, 'PNG' )
else:
pil_image.save( f, 'JPEG', quality = 92 )
f.seek( 0 )
thumbnail_bytes = f.read()
f.close()
return thumbnail_bytes
def GetGIFFrameDurations( path ):
pil_image = RawOpenPILImage( path )
times_to_play_gif = GetTimesToPlayGIFFromPIL( pil_image )
frame_durations = []
i = 0
| |
<gh_stars>0
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.122614,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.298995,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.78419,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.608634,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 1.05393,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.604461,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 2.26703,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.481382,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 7.16826,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.14815,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0220635,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.200489,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.163173,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.34864,
'Execution Unit/Register Files/Runtime Dynamic': 0.185236,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.518056,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.24113,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 4.39777,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00301262,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00301262,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00260934,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.0010021,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00234399,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0109786,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0294082,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.156862,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.498823,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.532774,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 1.22885,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.105048,
'L2/Runtime Dynamic': 0.041173,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 4.73164,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.78899,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.113056,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.113056,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 5.26769,
'Load Store Unit/Runtime Dynamic': 2.4596,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.278777,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.557553,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0989388,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.100513,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0817832,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.729539,
'Memory Management Unit/Runtime Dynamic': 0.182297,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 26.801,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.516862,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0373417,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.308411,
'Renaming Unit/Int Front End RAT/Subthreshold | |
<filename>gen_plots_tables/plot_figure1-6.py
import numpy as np
import empca
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
from mpl_toolkits.axes_grid1.inset_locator import inset_axes, mark_inset
from apogee.tools.path import change_dr
from apogee.tools import pix2wv
from apogee.tools import bitmask as bm
from sklearn.impute import SimpleImputer
from sklearn.decomposition import PCA
from delfiSpec import util, specproc, specsim
# Read APOGEE DR14 catalogue
change_dr('14')
apCat = util.ApogeeCat()
# Read M67 cluster APOGEE spectra
_, M67_GM_apogee = apCat.read_OCCAM_cluster()
# Perform APOGEE cuts: Giant members with Iron abundance within M67 limits
apogee_cat_cut = apCat.apogee_cat[(apCat.apogee_cat['FE_H'] > -0.15) &
(apCat.apogee_cat['FE_H'] < 0.15) &
(apCat.apogee_cat['LOGG'] < 4) & (apCat.apogee_cat['LOGG'] > -1)]
# High Signal-to-Noise Ratio
indx = apogee_cat_cut['SNR'] > 200
apogee_cat_cut = apogee_cat_cut[indx]
# Make sure all abundances have "physical values"
abundances = ['FE_H', 'C_FE', 'N_FE', 'O_FE', 'NA_FE', 'MG_FE', 'AL_FE', 'SI_FE', 'S_FE',
'K_FE', 'CA_FE', 'TI_FE', 'V_FE', 'MN_FE', 'NI_FE']
for i in range(1, len(abundances)):
apogee_cat_cut = apogee_cat_cut[(apogee_cat_cut[abundances[i]] > -1) &
(apogee_cat_cut[abundances[i]] < 1)]
# Read APOGEE spectra for the above APOGEE cut
data_set = apCat.read_allStar_spectra(apogee_cat_cut)
# Mask bad pixels using APOGEE bitmask
badcombpixmask = bm.badpixmask()
pix_err = np.array([bm.apogee_pixmask_int("SIG_SKYLINE"), bm.apogee_pixmask_int("SIG_TELLURIC"),
bm.apogee_pixmask_int("PERSIST_HIGH"), bm.apogee_pixmask_int("PERSIST_MED"),
bm.apogee_pixmask_int("PERSIST_LOW")])
badcombpixmask += np.sum(2**pix_err)
data_set_specproc, _, data_set_weight = specproc.process_spectra(spectra_info=data_set,
badcombpixmask=badcombpixmask)
# Mask the spectra based on APOGEE bitmask
data_set_specmasked = np.ma.masked_array(data_set_specproc, mask=(data_set_weight==0))
# Remove spectra with more than 50% masked pixels
data_set_maskpixels = np.sum(data_set_specmasked.mask, axis=1)
# Final APOGEE cut
apogee_cat_cut = apogee_cat_cut[data_set_maskpixels < 50/100*7214]
# ------------------- Case Study Sample -------------------
'''
Teff and log g cut leads to a total of 20 spectra in ``case_cut``, the first then of these
are the case study "original" sample, S_org, whereas the rest are used either as basis
functions or for validation purposes and denoted as B. In most of the case study, we use 5
basis functions which are generated using ASPCAP estimates of spectra in B. At the end of
the case study, we illustrate how to choose K and J by using the third last spectrum in B
as a "test" or "validation" set, given that S_org is our "training" set, i.e., functional
PCs are computed for S_org; we need more basis functions in this case, which come from
ASPCAP estimates of ``case_cut_sep``.
'''
case_cut = apogee_cat_cut[np.where((4270 < apogee_cat_cut['TEFF']) & (apogee_cat_cut['TEFF'] < 4300))]
case_cut = case_cut[np.where((1.4 < case_cut['LOGG']) & (case_cut['LOGG'] < 1.6))]
# A separate cut is used for generating extra basis functions
case_cut_sep = apogee_cat_cut[np.where((4240 < apogee_cat_cut['TEFF']) & (apogee_cat_cut['TEFF'] < 4280))]
case_cut_sep = case_cut_sep[np.where((1.4 < case_cut_sep['LOGG']) & (case_cut_sep['LOGG'] < 1.6))]
# Read the two sample
case_cut_spec = apCat.read_allStar_spectra(case_cut)
case_cut_sep_spec = apCat.read_allStar_spectra(case_cut_sep)
case_cut_specproc, case_cut_specerr, case_cut_specweight = specproc.process_spectra(
spectra_info=case_cut_spec, badcombpixmask=badcombpixmask)
case_cut_sep_specproc, case_cut_sep_specerr, case_cut_sep_specweight = specproc.process_spectra(
spectra_info=case_cut_sep_spec, badcombpixmask=badcombpixmask)
# Mask the spectra based on APOGEE bitmask
case_cut_spec_masked = np.ma.masked_array(case_cut_specproc, mask=(test_cut_weight==0))
case_cut_sep_spec_masked = np.ma.masked_array(case_cut_sep_specproc, mask=(test_cut_sep_weight==0))
# APOGEE wavelengths
pix = np.arange(7214)
wave = pix2wv(pix, dr='12')
wave_mask = (((wave > 15799) & (wave < 15865)) | ((wave > 16425) & (wave < 16485)))
masked_wave = np.ma.masked_array(wave, wave_mask)
# Create the "original+systematic" sample
case_cut_sys_spec_masked = np.ma.zeros((10, 200))
for ind in range(5):
case_cut_sys_spec_masked[i] = case_cut_spec_masked[ind, :200] - (ind+1)*0.00025*(
masked_wave[:200] - masked_wave[0])
for ind in range(5, 10):
case_cut_sys_spec_masked[i] = case_cut_spec_masked[ind, :200] + (ind-4)*0.00025*(
masked_wave[:200] - masked_wave[0])
# ------------------- Plot Case Study Sample -------------------
mpl.rcParams["axes.labelsize"] = 28
mpl.rcParams['xtick.labelsize']= 20
mpl.rcParams['ytick.labelsize']= 20
fig, ax = plt.subplots(1, 1, figsize=(14, 12))
# Plot spectra for the two samples
for i in range(9):
ax.scatter(masked_wave[:200], case_cut_spec_masked[i, 0:200] - i*0.3,
color='orange', marker='o', s=12)
ax.scatter(masked_wave[:200], case_cut_sys_spec_masked[i] - i*0.3,
color='black', marker='o', s=12)
# Label the samples
ax.scatter(masked_wave[:200], case_cut_spec_masked[9, 0:200] - 9*0.3, color='orange',
label='original', marker='o', s=12)
ax.scatter(masked_wave[:200], case_cut_sys_spec_masked[9] - 9*0.3, color='black',
label='original + systematic', marker='o', s=12)
ax.legend(loc='upper right', fontsize=21)
ax.xaxis.set_major_locator(MultipleLocator(10))
ax.xaxis.set_minor_locator(MultipleLocator(2))
ax.set_yticks(np.linspace(1.0, -1.7, 10))
ax.set_yticklabels(np.arange(1, 11))
ax.set_xlabel(r'Wavelength $\lambda(\AA)$')
ax.set_ylabel(r'Spectrum number $n$ $[\mathbf{y}_n]$')
ax.set_ylim(-2.1, 1.5)
plt.savefig('data_case_study.png')
# ------------------- FPCA of Samples -------------------
# Choose 5 random basis functions from ``case_cut[10:]``
basis = specsim.sim_spectra(case_cut[10:])
# FPCA of masked case study spectra S_org
fpca_org = fpca.FPCA(case_cut_spec_masked[:10, :200], 5, phi=basis[:, :200],
xerr=case_cut_specerr[:10, :200])
fpca_org.alpha_regression()
fpca_org.solve_eigenproblem()
# Here we use same basis functions and same error, but different data.
# Essentially, the data has an added systematic.
fpca_org_sys = fpca.FPCA(case_cut_sys_spec_masked[:10], 5, phi=basis[:, :200],
xerr=case_cut_specerr[:10, :200])
fpca_org_sys.alpha_regression()
fpca_org_sys.solve_eigenproblem()
# ------------------- Plot Basis Functions -------------------
mpl.rcParams["axes.labelsize"] = 23
mpl.rcParams['xtick.labelsize']= 18
mpl.rcParams['ytick.labelsize']= 18
fig, ax = plt.subplots(1, 1, figsize=(10, 8))
color = ['indigo', 'green', 'orange', 'maroon', 'red']
axins = inset_axes(ax, width="60%", height="25%", loc=1)
# Plot basis functions
for i in range(5):
ax.plot(masked_wave[:200], basis[i, :200] - i*0.4, color=color[i])
# Connect the inset to the relevant wavelength
ax.vlines(masked_wave[22], -1, 1.5, linestyle='--', linewidth=2, color='silver')
ax.plot([masked_wave[22], masked_wave[78]], [1.5, 1.97], linestyle='--', linewidth=2, color='silver')
ax.plot([masked_wave[22], masked_wave[78]], [1.5, 1.20], linestyle='--', linewidth=2, color='silver')
ax.set_yticks(np.linspace(1, -0.6, 5))
ax.set_yticklabels(np.arange(1, 6))
ax.xaxis.set_major_locator(MultipleLocator(10))
ax.xaxis.set_minor_locator(MultipleLocator(2))
ax.set_xlabel(r'Wavelength $\lambda(\AA)$')
ax.set_ylabel(r'Basis number $k$ [$\phi_k(\lambda)$]')
ax.set_ylim(-1, 2)
# Plot the inset
axins.scatter(np.arange(5), basis[:5, 22], color=color[:5], marker='o', s=85)
axins.set_ylim(0.785, 0.835)
axins.set_ylabel(r'$f/f_c(\lambda_x)$', fontsize=16)
axins.set_yticks([0.80, 0.82])
axins.set_yticklabels([0.80, 0.82], fontsize=12)
axins.set_xticks(np.arange(0, 5))
axins.set_xticklabels(np.arange(1, 6), fontsize=12)
# Indicate the wavelength for which we create an inset
plt.text(-2.3, 0.64, r'$\lambda_x$', fontsize=18, color='black')
plt.savefig('basis_case_study.png')
# ------------------- Plot Functional Approximation -------------------
mpl.rcParams["axes.labelsize"] = 23
mpl.rcParams['xtick.labelsize']= 18
mpl.rcParams['ytick.labelsize']= 18
masked_data_ind = masked_wave[0:200][(case_cut_spec_masked[0, 0:200].mask == True)]
mask_lower_ind = masked_data_ind[0]
mask_higher_ind = masked_data_ind[-1]
fig, ax = plt.subplots(2, 1, sharex=True, figsize=(14, 6),
gridspec_kw={'height_ratios': [3, 1.5]})
# Plot spectrum and the functional approx
ax[0].errorbar(masked_wave[0:200], case_cut_spec_masked[0, 0:200],
yerr=case_cut_specerr[0, :200], color='black',
fmt='.', label=r'spectral data $\mathbf{y}_{o,1}$')
ax[0].plot(masked_wave[0:200], fpca_org.sample_mu + np.dot(fpca_org.regressed_alpha,
fpca_org.phi_t)[0],
color='steelblue', linestyle='--', linewidth=2,
label=r'smooth function $\hat f_{o,1}(\lambda)$')
# Highlight the masked region
ax[0].axvspan(mask_lower_ind, mask_higher_ind, alpha=0.8,
color='lavender')
ax[0].legend(loc='upper right', fontsize=15)
ax[0].set_ylim([0.775, 1.175])
ax[0].yaxis.set_major_locator(MultipleLocator(0.1))
ax[0].yaxis.set_minor_locator(MultipleLocator(0.025))
ax[0].get_yaxis().labelpad = 26
ax[0].set_ylabel(r'$f/f_c (\lambda)$')
# Plot residuals
ax[1].axhspan(-0.005, 0.005, alpha=0.35,
color='silver', label='APOGEE base uncertainty')
ax[1].scatter(masked_wave[0:200], (case_cut_spec_masked[0, 0:200] - \
(fpca_org.sample_mu + np.dot(fpca_org.regressed_alpha,
fpca_org.phi_t)[0])),
color='black', s=8)
# Highlight the masked region
ax[1].axvspan(mask_lower_ind, mask_higher_ind, alpha=0.8,
color='lavender', label='mask')
ax[1].legend(ncol=2, loc='lower left', fontsize=15)
ax[1].set_ylim([-0.075, 0.075])
ax[1].xaxis.set_major_locator(MultipleLocator(10))
ax[1].xaxis.set_minor_locator(MultipleLocator(2))
ax[1].yaxis.set_major_locator(MultipleLocator(0.05))
ax[1].yaxis.set_minor_locator(MultipleLocator(0.025))
ax[1].set_xlabel(r'Wavelength $\lambda(\AA)$')
ax[1].set_ylabel('Residuals')
plt.savefig('func_approx_case_study.png')
# ------------------- PCA of Samples -------------------
imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')
X_org = (case_cut_spec_masked[:10, :200]).filled(np.nan)
X_org = imp_mean.fit_transform(X_org)
X_org = X_org - np.mean(X_org, axis=0)
X_org_sys = (case_cut_sys_spec_masked[:10]).filled(np.nan)
X_org_sys = imp_mean.fit_transform(X_org_sys)
X_org_sys = X_org_sys - np.mean(X_org_sys, axis=0)
pca_org = PCA(n_components=5)
pca_org.fit(X_org)
pca_org_sys = PCA(n_components=5)
pca_org_sys.fit(X_org_sys)
# ------------------- Plot eigenanalysis results -------------------
mpl.rcParams["axes.labelsize"] = 34
mpl.rcParams['xtick.labelsize']= 28
mpl.rcParams['ytick.labelsize']= 28
fig, ax = plt.subplots(2, 2, figsize=(32, 20), gridspec_kw={'width_ratios': [3, 2.5]})
fig.tight_layout(pad=14, h_pad=4)
# Plot the FPCs
for eigen in np.arange(2):
ax[0][0].plot(masked_wave[:200], fpca_org.psi_cap_t.real[:, -(eigen+1)] - eigen*0.5,
color='orange', linewidth=2.5)
ax[0][0].plot(masked_wave[:200], fpca_org_sys.psi_cap_t.real[:, -(eigen+1)] - eigen*0.5,
color='black', linestyle='-.', linewidth=2.5)
ax[0][0].plot(masked_wave[:200], fpca_org.psi_cap_t.real[:, -3] - 2*0.5,
color='orange', linewidth=2.5, label='original')
ax[0][0].plot(masked_wave[:200], fpca_org_sys.psi_cap_t.real[:, -3] - 2*0.5,
color='black', linestyle='-.', linewidth=2.5, label='original + systematic')
ax[0][0].set_xlim(masked_wave[0], masked_wave[200])
ax[0][0].set_ylim(-1.5, 0.3)
ax[0][0].xaxis.set_major_locator(MultipleLocator(10))
ax[0][0].xaxis.set_minor_locator(MultipleLocator(2))
ax[0][0].set_yticks([0, -0.5, -1])
ax[0][0].set_yticklabels(['$\Psi_{}(\lambda)$'.format(i) for i in range(1, 4)])
ax[0][0].set_ylabel(r'Principal Component')
ax[0][0].legend(loc='lower right', fontsize=25)
# Plot the PCs
for eigen in np.arange(2):
ax[1][0].plot(masked_wave[:200], pca_org.components_[eigen] - eigen*0.5,
color='orange', linewidth=2.5)
ax[1][0].plot(masked_wave[:200], pca_org_sys.components_[eigen] - eigen*0.5,
color='black', linestyle='-.', linewidth=2.5)
ax[1][0].plot(masked_wave[:200], pca_org.components_[2] - 2*0.5,
color='orange', linewidth=2.5, label='original')
ax[1][0].plot(masked_wave[:200], pca_org_sys.components_[2] - 2*0.5,
color='black', linestyle='-.', linewidth=2.5, label='original + systematic')
ax[1][0].set_xlim(masked_wave[0], masked_wave[200])
ax[1][0].set_ylim(-1.5, 0.3)
ax[1][0].xaxis.set_major_locator(MultipleLocator(10))
ax[1][0].xaxis.set_minor_locator(MultipleLocator(2))
ax[1][0].set_yticks([0, -0.5, -1])
ax[1][0].set_yticklabels([r'$\overrightarrow{PC}_1$',
r'$\overrightarrow{PC}_2$',
r'$\overrightarrow{PC}_3$'])
ax[1][0].set_xlabel(r'Wavelength $\lambda(\AA)$')
ax[1][0].set_ylabel(r'Principal Component')
ax[1][0].get_yaxis().labelpad = 23
ax[1][0].legend(loc='lower right', fontsize=25)
# Scree plots for FPCA
ax[0][1].plot(fpca_org.perc_var[::-1], marker='o', color='orange',
linewidth=2.5, label='original')
ax[0][1].plot(fpca_org_sys.perc_var[::-1], marker='o', linestyle='-.',
linewidth=2.5, color='black', label='original + systematic')
ax[0][1].set_ylim([-5, 101])
ax[0][1].set_xticks(np.arange(5))
ax[0][1].set_xticklabels(['$\Psi_{}(\lambda)$'.format(i) for i in range(1, 6)])
ax[0][1].yaxis.set_major_locator(MultipleLocator(20))
ax[0][1].yaxis.set_minor_locator(MultipleLocator(5))
ax[0][1].set_ylabel('% Variance Explained \n' + r'(100*$\zeta_j)$')
ax[0][1].get_yaxis().labelpad = 0
ax[0][1].legend(loc='upper right', fontsize=25)
# Scree plots for PCA
ax[1][1].plot(pca_org.explained_variance_ratio_*100,
marker='o', color='orange', linewidth=2.5, label='original')
ax[1][1].plot(pca_org_sys.explained_variance_ratio_*100,
marker='o', linestyle='-.', linewidth=2.5,
color='black', label='original + systematic')
ax[1][1].set_ylim([-5, 101])
ax[1][1].set_xticks(np.arange(5))
ax[1][1].set_xticklabels([r'$\overrightarrow{PC}_1$', r'$\overrightarrow{PC}_2$', r'$\overrightarrow{PC}_3$',
r'$\overrightarrow{PC}_4$', r'$\overrightarrow{PC}_5$'])
ax[1][1].yaxis.set_major_locator(MultipleLocator(20))
ax[1][1].yaxis.set_minor_locator(MultipleLocator(5))
ax[1][1].set_xlabel(r'Principal Component')
ax[1][1].set_ylabel('% Variance Explained \n' + r'(100*$\zeta_j)$')
ax[1][1].get_yaxis().labelpad = 0
ax[1][1].legend(loc='upper right', fontsize=25)
plt.text(-8.3, 170, 'FPCA', fontsize=50, weight='bold')
plt.text(-8.3, 45, 'PCA', fontsize=50, weight='bold')
plt.savefig('pc_case_study.png')
print(f'Mean Slope of PC1 for the original + systematic sample'
'is {(pca_org_sys.components_[0][-1] - pca_org_sys.components_[0][0])/(wave[200] - wave[0])}')
# ------------------- Covariance Structures -------------------
# Covariance functions
v_org = 1/fpca_org.n * np.dot(np.dot(fpca_org.phi_t.T, fpca_org.regressed_alpha.T),
np.dot(fpca_org.regressed_alpha, fpca_org.phi_t))
v_org_sys = 1/fpca_org_sys.n * np.dot(np.dot(fpca_org_sys.phi_t.T, fpca_org_sys.regressed_alpha.T),
np.dot(fpca_org_sys.regressed_alpha, fpca_org_sys.phi_t))
# Covariance matrix
C_org = 1/fpca_org.n * np.dot(X_org.T, X_org)
C_org_sys = 1/fpca_org_sys.n * np.dot(X_org_sys.T, X_org_sys)
# ------------------- Plot Covariance Structures -------------------
mpl.rcParams["axes.labelsize"] = 32
mpl.rcParams['xtick.labelsize']= 26
mpl.rcParams['ytick.labelsize']= 26
fig, ax = plt.subplots(2, 2, figsize=(22, 15))
minmin = np.min([np.min(v_org), np.min(C_org)])
maxmax = np.max([np.max(v_org), np.max(C_org)])
# Covariance function for original
im1 = ax[0][0].imshow(v_org, vmin=minmin, vmax=maxmax,
cmap='afmhot', aspect='auto')
ax[0][0].set_xticks(np.arange(39, 200, 47))
ax[0][0].set_yticks(np.arange(39, 200, 47))
ax[0][0].set_xticklabels(['{:.0f}'.format(masked_wave[i]) for i in np.arange(39, 200, 47)])
ax[0][0].set_yticklabels(['{:.0f}'.format(masked_wave[i]) for i in np.arange(39, 200, 47)])
ax[0][0].set_ylabel(r'Wavelength $\lambda(\AA)$')
ax[0][0].set_title('ORIGINAL', fontsize=30, pad=20)
# Covariance matrix for original
im2 = ax[1][0].imshow(C_org, cmap='afmhot', vmin=minmin,
vmax=maxmax, aspect='auto')
ax[1][0].set_xticks(np.arange(39, 200, 47))
ax[1][0].set_yticks(np.arange(39, 200, 47))
ax[1][0].set_xticklabels(['{:.0f}'.format(masked_wave[i]) for i in np.arange(39, 200, 47)])
ax[1][0].set_yticklabels(['{:.0f}'.format(masked_wave[i]) for i in np.arange(39, 200, 47)])
ax[1][0].set_xlabel(r'Wavelength $\lambda(\AA)$')
ax[1][0].set_ylabel(r'Wavelength $\lambda(\AA)$')
minmin = np.min([np.min(v_sys), np.min(C_pca_sys)])
maxmax = np.max([np.max(v_sys), np.max(C_pca_sys)])
# Covariance function for original + systematic
im3 = ax[0][1].imshow(v_org_sys, vmin=minmin, vmax=maxmax,
cmap='afmhot', aspect='auto')
ax[0][1].set_xticks(np.arange(39, 200, 47))
ax[0][1].set_yticks(np.arange(39, 200, 47))
ax[0][1].set_xticklabels(['{:.0f}'.format(masked_wave[i]) for i in np.arange(39, 200, 47)])
ax[0][1].set_yticklabels(['{:.0f}'.format(masked_wave[i]) for i in np.arange(39, 200, 47)])
ax[0][1].set_ylabel(r'Wavelength $\lambda(\AA)$')
ax[0][1].set_title('ORIGINAL + SYSTEMATIC', fontsize=30, pad=20)
# Covariance matrix for original + systematic
im4 = ax[1][1].imshow(C_org_sys, cmap='afmhot', vmin=minmin,
vmax=maxmax, aspect='auto')
ax[1][1].set_xticks(np.arange(39, 200, 47))
ax[1][1].set_yticks(np.arange(39, 200, 47))
ax[1][1].set_xticklabels(['{:.0f}'.format(masked_wave[i]) for i in np.arange(39, 200, 47)])
ax[1][1].set_yticklabels(['{:.0f}'.format(masked_wave[i]) for i in np.arange(39, 200, 47)])
ax[1][1].set_xlabel(r'Wavelength $\lambda(\AA)$')
ax[1][1].set_ylabel(r'Wavelength $\lambda(\AA)$')
fig.tight_layout(h_pad=2, pad=16)
# Plot the colorbars
fig.subplots_adjust(right=0.9)
cbar_ax_1 = fig.add_axes([0.475, 0.26, 0.015, 0.5])
cbar_1 = fig.colorbar(im1, cax=cbar_ax_1, ticks=[-0.0002, 0, 0.0002, 0.0004], orientation='vertical')
cbar_1.ax.ticklabel_format(axis='y', scilimits=[-4, 4])
cbar_1.ax.set_ylabel('Covariance')
cbar_ax_2 = fig.add_axes([0.93, 0.26, 0.015, 0.5])
cbar_2 = fig.colorbar(im3, cax=cbar_ax_2, ticks=[0, 0.0004, 0.0008, 0.0012], orientation='vertical')
cbar_2.ax.ticklabel_format(axis='y', scilimits=[-3, 3])
cbar_2.ax.set_ylabel('Covariance')
plt.text(-0.098, 0.0011, 'FPCA', fontsize=36, weight='bold')
plt.text(-0.098, 0.00008, 'PCA', fontsize=36, weight='bold')
plt.savefig('cov_case_study.png')
# ------------------- How to choose K and J? -------------------
# Choose 50 random basis functions from ``case_cut_sep``
basis = specsim.sim_spectra(case_cut_sep)
# FPCA of masked case study spectra S_org
fpca_org_train = fpca.FPCA(case_cut_spec_masked[:10, :200], 50, phi=basis[:, :200],
xerr=case_cut_specerr[:10, :200])
fpca_org_train.alpha_regression()
fpca_org_train.solve_eigenproblem()
# Residual error statistics
mae = np.zeros(50)
mad = np.zeros(50)
# FPCs
psi_t = fpca_org_train.psi_cap_t[::-1]
# Mean subtracted validation spectrum
test_mean_sub = case_cut_spec_masked[-3, :200] - fpca_org_train.sample_mu
test_weight = case_cut_specweight[-3, :200]
for i in range(0, 51):
# Fit an FPC model using the EM algorithm
empca_fit = empca.Model(psi_t[:, | |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from utils import to_gpu
import json
import os
import numpy as np
class MLP_Latent(nn.Module):
def __init__(self, ninput, noutput, layers,
activation=nn.ReLU(), gpu=False):
super(MLP_Latent, self).__init__()
self.ninput = ninput
self.noutput = noutput
layer_sizes = [ninput] + [int(x) for x in layers.split('-')]
self.layers = []
for i in range(len(layer_sizes)-1):
layer = nn.Linear(layer_sizes[i], layer_sizes[i+1])
self.layers.append(layer)
self.add_module("layer"+str(i+1), layer)
# No batch normalization in first layer
if i != 0:
bn = nn.BatchNorm1d(layer_sizes[i+1])
self.layers.append(bn)
self.add_module("bn"+str(i+1), bn)
self.layers.append(activation)
self.add_module("activation"+str(i+1), activation)
layer = nn.Linear(layer_sizes[-1], noutput)
self.layers.append(layer)
self.add_module("layer"+str(len(self.layers)), layer)
self.init_weights()
def forward(self, x):
for i, layer in enumerate(self.layers):
x = layer(x)
return x
def init_weights(self):
init_std = 0.02
for layer in self.layers:
try:
layer.weight.data.normal_(0, init_std)
layer.bias.data.fill_(0)
except:
pass
class MLP_Classify(nn.Module):
def __init__(self, ninput, noutput, layers,
activation=nn.ReLU(), gpu=False):
super(MLP_Classify, self).__init__()
self.ninput = ninput
self.noutput = noutput
layer_sizes = [ninput] + [int(x) for x in layers.split('-')]
self.layers = []
for i in range(len(layer_sizes)-1):
layer = nn.Linear(layer_sizes[i], layer_sizes[i+1])
self.layers.append(layer)
self.add_module("layer"+str(i+1), layer)
# No batch normalization in first layer
if i != 0:
bn = nn.BatchNorm1d(layer_sizes[i+1])
self.layers.append(bn)
self.add_module("bn"+str(i+1), bn)
self.layers.append(activation)
self.add_module("activation"+str(i+1), activation)
layer = nn.Linear(layer_sizes[-1], noutput)
self.layers.append(layer)
self.add_module("layer"+str(len(self.layers)), layer)
self.init_weights()
def forward(self, x):
for i, layer in enumerate(self.layers):
x = layer(x)
x = F.sigmoid(x)
return x
def init_weights(self):
init_std = 0.02
for layer in self.layers:
try:
layer.weight.data.normal_(0, init_std)
layer.bias.data.fill_(0)
except:
pass
class Seq2Seq2Decoder(nn.Module):
def __init__(self, emsize, nhidden, ntokens, nlayers, arch_latent, noise_radius=0.2,
share_decoder_emb=False, hidden_init=False, dropout=0, gpu=False):
super(Seq2Seq2Decoder, self).__init__()
self.nhidden = nhidden
self.emsize = emsize
self.ntokens = ntokens
self.nlayers = nlayers
self.noise_radius = noise_radius
self.hidden_init = hidden_init
self.dropout = dropout
self.gpu = gpu
self.start_symbols = to_gpu(gpu, Variable(torch.ones(10, 1).long()))
# Vocabulary embedding
self.embedding = nn.Embedding(ntokens, emsize)
self.embedding_decoder1 = nn.Embedding(ntokens, emsize)
self.embedding_decoder2 = nn.Embedding(ntokens, emsize)
# RNN Encoder and Decoder
self.encoder = nn.LSTM(input_size=emsize,
hidden_size=nhidden,
num_layers=nlayers,
dropout=dropout,
batch_first=True)
self.latent_encoder = MLP_Latent(ninput=nhidden, noutput=nhidden, layers=arch_latent) # already weight init'ed
decoder_input_size = emsize+nhidden
self.decoder1 = nn.LSTM(input_size=decoder_input_size,
hidden_size=nhidden,
num_layers=1,
dropout=dropout,
batch_first=True)
self.decoder2 = nn.LSTM(input_size=decoder_input_size,
hidden_size=nhidden,
num_layers=1,
dropout=dropout,
batch_first=True)
# Initialize Linear Transformation
self.linear = nn.Linear(nhidden, ntokens)
self.init_weights()
if share_decoder_emb:
self.embedding_decoder2.weight = self.embedding_decoder1.weight
def init_weights(self):
initrange = 0.1
# Initialize Vocabulary Matrix Weight
self.embedding.weight.data.uniform_(-initrange, initrange)
self.embedding_decoder1.weight.data.uniform_(-initrange, initrange)
self.embedding_decoder2.weight.data.uniform_(-initrange, initrange)
# Initialize Encoder and Decoder Weights
for p in self.encoder.parameters():
p.data.uniform_(-initrange, initrange)
for p in self.decoder1.parameters():
p.data.uniform_(-initrange, initrange)
for p in self.decoder2.parameters():
p.data.uniform_(-initrange, initrange)
# Initialize Linear Weight
self.linear.weight.data.uniform_(-initrange, initrange)
self.linear.bias.data.fill_(0)
def init_hidden(self, bsz):
zeros1 = Variable(torch.zeros(self.nlayers, bsz, self.nhidden))
zeros2 = Variable(torch.zeros(self.nlayers, bsz, self.nhidden))
return (to_gpu(self.gpu, zeros1), to_gpu(self.gpu, zeros2))
def init_state(self, bsz):
zeros = Variable(torch.zeros(self.nlayers, bsz, self.nhidden))
return to_gpu(self.gpu, zeros)
def store_grad_norm(self, grad):
norm = torch.norm(grad, 2, 1)
self.grad_norm = norm.detach().data.mean()
return grad
def forward(self, whichdecoder, indices, lengths, noise=False, encode_only=False, base_only=False):
batch_size, maxlen = indices.size()
hidden = self.encode(indices, lengths, noise)
if hidden.requires_grad:
hidden.register_hook(self.store_grad_norm)
latent = self.latent_encoder(hidden)
if encode_only:
return hidden if base_only else latent
decoded = self.decode(whichdecoder, latent, batch_size, maxlen,
indices=indices, lengths=lengths)
return decoded
def encode(self, indices, lengths, noise):
embeddings = self.embedding(indices)
packed_embeddings = pack_padded_sequence(input=embeddings,
lengths=lengths,
batch_first=True)
# Encode
packed_output, state = self.encoder(packed_embeddings)
hidden, cell = state
# batch_size x nhidden
hidden = hidden[-1] # get hidden state of last layer of encoder
# normalize to unit ball (l2 norm of 1) - p=2, dim=1
norms = torch.norm(hidden, 2, 1)
# For older versions of PyTorch use:
# hidden = torch.div(hidden, norms.unsqueeze(1).expand_as(hidden))
# For newest version of PyTorch (as of 8/25) use this:
hidden = torch.div(hidden, norms.unsqueeze(1).expand_as(hidden))
if noise and self.noise_radius > 0:
gauss_noise = torch.normal(means=torch.zeros(hidden.size()),
std=self.noise_radius)
hidden = hidden + to_gpu(self.gpu, Variable(gauss_noise))
return hidden
def decode(self, whichdecoder, hidden, batch_size, maxlen, indices=None, lengths=None):
# batch x maxlen x hidden
all_hidden = hidden.unsqueeze(1).repeat(1, maxlen, 1)
if self.hidden_init:
# initialize decoder hidden state to encoder output
state = (hidden.unsqueeze(0), self.init_state(batch_size))
else:
state = self.init_hidden(batch_size)
if whichdecoder == 1:
embeddings = self.embedding_decoder1(indices)
else:
embeddings = self.embedding_decoder2(indices)
augmented_embeddings = torch.cat([embeddings, all_hidden], 2)
packed_embeddings = pack_padded_sequence(input=augmented_embeddings,
lengths=lengths,
batch_first=True)
if whichdecoder == 1:
packed_output, state = self.decoder1(packed_embeddings, state)
else:
packed_output, state = self.decoder2(packed_embeddings, state)
output, lengths = pad_packed_sequence(packed_output, batch_first=True)
# reshape to batch_size*maxlen x nhidden before linear over vocab
decoded = self.linear(output.contiguous().view(-1, self.nhidden))
decoded = decoded.view(batch_size, maxlen, self.ntokens)
return decoded
def generate(self, whichdecoder, hidden, maxlen, sample=False, temp=1.0):
"""Generate through decoder; no backprop"""
batch_size = hidden.size(0)
if self.hidden_init:
# initialize decoder hidden state to encoder output
state = (hidden.unsqueeze(0), self.init_state(batch_size))
else:
state = self.init_hidden(batch_size)
# <sos>
self.start_symbols.data.resize_(batch_size, 1)
self.start_symbols.data.fill_(1)
self.start_symbols = to_gpu(self.gpu, self.start_symbols)
if whichdecoder == 1:
embedding = self.embedding_decoder1(self.start_symbols)
else:
embedding = self.embedding_decoder2(self.start_symbols)
inputs = torch.cat([embedding, hidden.unsqueeze(1)], 2)
# unroll
all_indices = []
for i in range(maxlen):
if whichdecoder == 1:
output, state = self.decoder1(inputs, state)
else:
output, state = self.decoder2(inputs, state)
overvocab = self.linear(output.squeeze(1))
if not sample:
vals, indices = torch.max(overvocab, 1)
indices = indices.unsqueeze(1)
else:
assert 1 == 0
# sampling
probs = F.softmax(overvocab/temp)
indices = torch.multinomial(probs, 1)
all_indices.append(indices)
if whichdecoder == 1:
embedding = self.embedding_decoder1(indices)
else:
embedding = self.embedding_decoder2(indices)
inputs = torch.cat([embedding, hidden.unsqueeze(1)], 2)
max_indices = torch.cat(all_indices, 1)
return max_indices
class MLP_D(nn.Module):
def __init__(self, ninput, noutput, layers,
activation=nn.LeakyReLU(0.2), gpu=False):
super(MLP_D, self).__init__()
self.ninput = ninput
self.noutput = noutput
layer_sizes = [ninput] + [int(x) for x in layers.split('-')]
self.layers = []
for i in range(len(layer_sizes)-1):
layer = nn.Linear(layer_sizes[i], layer_sizes[i+1])
self.layers.append(layer)
self.add_module("layer"+str(i+1), layer)
# No batch normalization after first layer
if i != 0:
bn = nn.BatchNorm1d(layer_sizes[i+1], eps=1e-05, momentum=0.1)
self.layers.append(bn)
self.add_module("bn"+str(i+1), bn)
self.layers.append(activation)
self.add_module("activation"+str(i+1), activation)
layer = nn.Linear(layer_sizes[-1], noutput)
self.layers.append(layer)
self.add_module("layer"+str(len(self.layers)), layer)
self.init_weights()
def forward(self, x):
for i, layer in enumerate(self.layers):
x = layer(x)
x = torch.mean(x)
return x
def init_weights(self):
init_std = 0.02
for layer in self.layers:
try:
layer.weight.data.normal_(0, init_std)
layer.bias.data.fill_(0)
except:
pass
class MLP_G(nn.Module):
def __init__(self, ninput, noutput, layers,
activation=nn.ReLU(), gpu=False):
super(MLP_G, self).__init__()
self.ninput = ninput
self.noutput = noutput
layer_sizes = [ninput] + [int(x) for x in layers.split('-')]
self.layers = []
for i in range(len(layer_sizes)-1):
layer = nn.Linear(layer_sizes[i], layer_sizes[i+1])
self.layers.append(layer)
self.add_module("layer"+str(i+1), layer)
bn = nn.BatchNorm1d(layer_sizes[i+1], eps=1e-05, momentum=0.1)
self.layers.append(bn)
self.add_module("bn"+str(i+1), bn)
self.layers.append(activation)
self.add_module("activation"+str(i+1), activation)
layer = nn.Linear(layer_sizes[-1], noutput)
self.layers.append(layer)
self.add_module("layer"+str(len(self.layers)), layer)
self.init_weights()
def forward(self, x):
for i, layer in enumerate(self.layers):
x = layer(x)
return x
def init_weights(self):
init_std = 0.02
for layer in self.layers:
try:
layer.weight.data.normal_(0, init_std)
layer.bias.data.fill_(0)
except:
pass
class Seq2Seq(nn.Module):
def __init__(self, emsize, nhidden, ntokens, nlayers, noise_radius=0.2,
hidden_init=False, dropout=0, gpu=False):
super(Seq2Seq, self).__init__()
self.nhidden = nhidden
self.emsize = emsize
self.ntokens = ntokens
self.nlayers = nlayers
self.noise_radius = noise_radius
self.hidden_init = hidden_init
self.dropout = dropout
self.gpu = gpu
self.start_symbols = to_gpu(gpu, Variable(torch.ones(10, 1).long()))
# Vocabulary embedding
self.embedding = nn.Embedding(ntokens, emsize)
self.embedding_decoder = nn.Embedding(ntokens, emsize)
# RNN Encoder and Decoder
self.encoder = nn.LSTM(input_size=emsize,
hidden_size=nhidden,
num_layers=nlayers,
dropout=dropout,
batch_first=True)
decoder_input_size = emsize+nhidden
self.decoder = nn.LSTM(input_size=decoder_input_size,
hidden_size=nhidden,
num_layers=1,
dropout=dropout,
batch_first=True)
# Initialize Linear Transformation
self.linear = nn.Linear(nhidden, ntokens)
self.init_weights()
def init_weights(self):
initrange = 0.1
# Initialize Vocabulary Matrix Weight
self.embedding.weight.data.uniform_(-initrange, initrange)
self.embedding_decoder.weight.data.uniform_(-initrange, initrange)
# Initialize Encoder and Decoder Weights
for p in self.encoder.parameters():
p.data.uniform_(-initrange, initrange)
for p in self.decoder.parameters():
p.data.uniform_(-initrange, initrange)
# Initialize Linear Weight
self.linear.weight.data.uniform_(-initrange, initrange)
self.linear.bias.data.fill_(0)
def init_hidden(self, bsz):
zeros1 = Variable(torch.zeros(self.nlayers, bsz, self.nhidden))
zeros2 = Variable(torch.zeros(self.nlayers, bsz, self.nhidden))
return (to_gpu(self.gpu, zeros1), to_gpu(self.gpu, zeros2))
def init_state(self, bsz):
zeros = Variable(torch.zeros(self.nlayers, bsz, self.nhidden))
return to_gpu(self.gpu, zeros)
def store_grad_norm(self, grad):
norm = torch.norm(grad, 2, 1)
self.grad_norm = norm.detach().data.mean()
return grad
def forward(self, indices, lengths, noise, encode_only=False):
batch_size, maxlen = indices.size()
hidden = self.encode(indices, lengths, noise)
if encode_only:
return hidden
if hidden.requires_grad:
hidden.register_hook(self.store_grad_norm)
decoded = self.decode(hidden, batch_size, maxlen,
indices=indices, lengths=lengths)
return decoded
def encode(self, indices, lengths, noise):
embeddings = self.embedding(indices)
packed_embeddings = pack_padded_sequence(input=embeddings,
lengths=lengths,
batch_first=True)
# Encode
packed_output, state = self.encoder(packed_embeddings)
hidden, cell = state
# batch_size x nhidden
hidden = hidden[-1] # get hidden state of last layer of encoder
# normalize to unit ball (l2 norm of 1) - p=2, dim=1
norms = torch.norm(hidden, 2, 1)
# For older versions of PyTorch use:
hidden = torch.div(hidden, norms.expand_as(hidden))
# For newest version of PyTorch (as of 8/25) use this:
# hidden = torch.div(hidden, norms.unsqueeze(1).expand_as(hidden))
if noise and self.noise_radius > 0:
gauss_noise = torch.normal(means=torch.zeros(hidden.size()),
std=self.noise_radius)
hidden = hidden + to_gpu(self.gpu, Variable(gauss_noise))
return hidden
def decode(self, hidden, batch_size, maxlen, indices=None, lengths=None):
| |
qu'un 501 Internal Server Error)
# return (400, "".join(traceback.format_exc())) # Affiche le "traceback" (infos d'erreur Python) en cas d'erreur (plutôt qu'un 501 Internal Server Error)
else:
return r
### LISTES MORTS ET VIVANTS
def liste_joueurs(d): # d : pseudo-dictionnaire des arguments passés en GET (pwd, type)
R = [] # Liste des blocs envoyés en réponse
try:
if ("pwd" in d) and (d["pwd"] == GLOBAL_PASSWORD): # Vérification mot de passe
tous = cache_TDB.query.filter(cache_TDB.statut.in_(["vivant","MV","mort"])).all() # Liste des joueurs tels qu'actuellement en cache
NT = len(tous)
if "type" in d and d["type"] == "vivants":
rep = cache_TDB.query.filter(cache_TDB.statut.in_(["vivant","MV"])).order_by(cache_TDB.nom).all()
descr = "en vie"
bouton_text = "Joueurs morts ☠"
bouton_bloc = "Joueurs morts"
elif "type" in d and d["type"] == "morts":
rep = cache_TDB.query.filter(cache_TDB.statut == "mort").order_by(cache_TDB.nom).all()
descr = "morts"
bouton_text = "Joueurs en vie 🕺"
bouton_bloc = "Joueurs en vie"
else:
raise ValueError('GET["type"] must be "vivants" or "morts"')
NR = len(rep)
if NR > 0:
R.append(chatfuel.Text(f"Liste des {NR}/{NT} joueurs {descr} :"))
LJ = [u.nom for u in rep]
else:
LJ = ["Minute, papillon !"]
R.append(chatfuel.Text('\n'.join(LJ)).addQuickReplies([chatfuel.Button("show_block", bouton_text, bouton_bloc),
chatfuel.Button("show_block", "Retour menu 🏠", "Menu")]))
else:
raise ValueError("WRONG OR MISSING PASSWORD!")
except Exception as exc:
db.session.rollback()
if type(exc).__name__ == "OperationalError":
return chatfuel.ErrorReport(Exception("Impossible d'accéder à la BDD, réessaie ! (souvent temporaire)"), verbose=verbose, message="Une erreur technique est survenue 😪\n Erreur :")
else:
return chatfuel.ErrorReport(exc, message="Une erreur technique est survenue 😪\nMerci d'en informer les MJs ! Erreur :")
else:
return chatfuel.Response(R)
### APPEL D'UNE TÂCHE PLANIFIÉE
def cron_call(d):
r = ""
log = ""
try:
verbose = ("v" in d)
testmode = ("test" in d)
if ("pwd" in d) and (d["pwd"] == GLOBAL_PASSWORD): # Vérification mot de passe
### GÉNÉRALITÉS
def get_criteres(job):
if job.endswith("cond") or job.endswith("maire"):
return {"inscrit": True, "votantVillage": True}
elif job.endswith("loups"):
return {"inscrit": True, "votantLoups": True}
elif job.endswith("action"):
if ("heure" in d) and RepresentsInt(d["heure"]):
heure = int(d["heure"]) % 24
else:
if job.startswith("remind"):
heure = (int(time.strftime("%H")) + 1) % 24
else:
heure = int(time.strftime("%H"))
if job.startswith("open"):
return {"inscrit": True, "roleActif": True, "debutRole": heure}
else:
return {"inscrit": True, "roleActif": True, "finRole": heure}
else:
raise ValueError(f"""Cannot associate criteria to job {job}""")
### DÉTECTION TÂCHE À FAIRE ET CRITÈRES ASSOCIÉS
log += f"> {time.ctime()} (verbose={verbose}, testmode={testmode}) – "
if ("job" in d) and (d["job"] in jobs): # jobs : défini en début de fichier, car utile dans admin
job = d["job"]
if verbose:
r += f"""Job : <code>{job}</code><br/>"""
log += f"job : {job} -> "
criteres = get_criteres(job)
if verbose:
r += f"""Critères : <code>{html_escape(criteres)}</code><br/>"""
if testmode:
criteres_test = {"messenger_user_id": 2033317286706583} # Loïc, pour tests
if verbose:
r += f"""Critères MODE TEST, réellement appliqués : <code>{html_escape(criteres_test)}</code><br/>"""
else:
raise ValueError("""Bad usage: required argument "job" not in GET or incorrect""")
### RÉCUPÉRATION UTILISATEURS CACHE
users = cache_TDB.query.filter_by(**criteres).all() # Liste des joueurs répondant aux cirtères
if verbose:
str_users = str(users).replace(', ', ',\n ')
r += f"<br/>Utilisateur(s) répondant aux critères ({len(users)}) : <pre>{html_escape(str_users)}</pre>"
if testmode:
users = cache_TDB.query.filter_by(**criteres_test).all() # on écrase par les utilisateur MODE TEST
if verbose:
str_users = str(users).replace(', ',',\n ')
r += f"<br/>Utilisateur(s) répondant aux critères MODE TEST ({len(users)}) : <pre>{html_escape(str_users)}</pre>"
log += f"{len(users)} utilisateurs trouvés\n"
### MODIFICATIONS DANS CHATFUEL DIRECT
if users:
params = {"chatfuel_token": CHATFUEL_TOKEN,
"chatfuel_message_tag": CHATFUEL_TAG,
"chatfuel_block_name": "Tâche planifiée",
"job": job
}
for user in users:
rep = False
tries = 0
while (not rep) and (tries < MAX_TRIES):
rep = requests.post(f"https://api.chatfuel.com/bots/{BOT_ID}/users/{user.messenger_user_id}/send", params=params)
tries += 1
if not rep:
time.sleep(5)
if tries == MAX_TRIES:
log += f" - !!! Impossible d'envoyer à l'utilisateur {user} ({MAX_TRIES} tentatives)"
if verbose:
r += f"<br/>!!! Impossible d'envoyer le job <code>{job}</code> à l'utilisateur <code>{html_escape(user)}</code> ({MAX_TRIES} tentatives)"
continue
rep = rep.json()
if verbose:
r += f"<br/>Envoi job <code>{job}</code> à l'utilisateur <code>{html_escape(user)}</code> – {tries} tentative(s)"
log += f" - Envoi à {user} : OK, {tries} tentative(s)\n"
if "code" in rep:
raise Exception("Erreur d'envoi Chatfuel Broadcast API. Réessaie.")
else:
if not rep["success"]:
raise Exception(f"""Chatfuel Broadcast API a renvoyé une erreur : {rep["result"]}""")
### FIN DE LA PROCÉDURE
log += "\n"
else:
raise ValueError("WRONG OR MISSING PASSWORD!")
except Exception as e:
log += f"\n> {time.ctime()} - Error, exiting:\n{traceback.format_exc()}\n\n"
if verbose:
if "return_tb" in d:
return traceback.format_exc()
else:
return (400, "".join(traceback.format_exc())) # Affiche le "traceback" (infos d'erreur Python) en cas d'erreur (plutôt qu'un 501 Internal Server Error)
else:
return (400, f"{type(e).__name__}({str(e)})") # Affiche le "traceback" (infos d'erreur Python) en cas d'erreur (plutôt qu'un 501 Internal Server Error)
else:
return r
finally:
with open(f"logs/cron_call/{time.strftime('%Y-%m-%d')}.log", 'a+') as f:
f.write(log)
### LISTE MORTS ET VIVANTS
def liste_joueurs(d): # d : pseudo-dictionnaire des arguments passés en GET (pwd, type)
R = [] # Liste des blocs envoyés en réponse
try:
if ("pwd" in d) and (d["pwd"] == GLOBAL_PASSWORD): # Vérification mot de passe
tous = cache_TDB.query.filter(cache_TDB.statut.in_(["vivant","MV","mort"])).all() # Liste des joueurs tels qu'actuellement en cache
NT = len(tous)
if "type" in d and d["type"] == "vivants":
rep = cache_TDB.query.filter(cache_TDB.statut.in_(["vivant","MV"])).order_by(cache_TDB.nom).all()
descr = "en vie"
bouton_text = "Joueurs morts ☠"
bouton_bloc = "Joueurs morts"
elif "type" in d and d["type"] == "morts":
rep = cache_TDB.query.filter(cache_TDB.statut == "mort").order_by(cache_TDB.nom).all()
descr = "morts"
bouton_text = "Joueurs en vie 🕺"
bouton_bloc = "Joueurs en vie"
else:
raise ValueError('GET["type"] must be "vivants" or "morts"')
NR = len(rep)
if NR > 0:
R.append(chatfuel.Text(f"Liste des {NR}/{NT} joueurs {descr} :"))
LJ = [u.nom for u in rep]
else:
LJ = ["Minute, papillon !"]
R.append(chatfuel.Text('\n'.join(LJ)).addQuickReplies([chatfuel.Button("show_block", bouton_text, bouton_bloc),
chatfuel.Button("show_block", "Retour menu 🏠", "Menu")]))
else:
raise ValueError("WRONG OR MISSING PASSWORD!")
except Exception as exc:
db.session.rollback()
if type(exc).__name__ == "OperationalError":
return chatfuel.ErrorReport(Exception("Impossible d'accéder à la BDD, réessaie ! (souvent temporaire)"), verbose=verbose, message="Une erreur technique est survenue 😪\n Erreur :")
else:
return chatfuel.ErrorReport(exc, message="Une erreur technique est survenue 😪\nMerci d'en informer les MJs ! Erreur :")
else:
return chatfuel.Response(R)
### ENVOI MESSAGE À UN JOUEUR (beta)
def choix_cible(d, p, url_root):
R = [] # Liste des blocs envoyés en réponse
attrs = None # Attributs à modifier
try:
if ("pwd" in d) and (d["pwd"] == GLOBAL_PASSWORD): # Vérification mot de passe
SM = difflib.SequenceMatcher() # Création du comparateur de chaînes
slug1 = unidecode.unidecode(p["cible"]).lower() # Cible en minuscule et sans accents
SM.set_seq1(slug1) # Première chaîne à comparer : cible demandée
vivants = cache_TDB.query.filter(cache_TDB.statut.in_(["vivant","MV"])).all()
scores = []
for joueur in vivants:
slug2 = unidecode.unidecode(joueur.nom).lower()
SM.set_seq2(slug2) # Pour chaque joueur, on compare la cible à son nom (en non accentué)
score = SM.ratio() # On calcule la ressemblance
if score == 1: # Cas particulier : joueur demandé correspondant exactement à un en BDD
break
scores.append((joueur.nom, joueur.messenger_user_id, score))
if score == 1: # Joueur demandé correspondant exactement à un en BDD
attrs = {"cible": joueur.messenger_user_id} # On définit directement la cible (et on envoie aucun bloc)
else: # Si pas de joueur correspondant parfaitement
bests = [(nom, id) for (nom, id, score) in sorted(scores, key=lambda x:x[2], reverse=True)] # Meilleurs noms, dans l'ordre
boutons = [chatfuel.Button("", nom, "", set_attributes={"cible": id}) for (nom, id) in bests[:5]]
R.append(chatfuel.Text("Joueurs trouvés :").addQuickReplies(boutons))
else:
raise ValueError("WRONG OR MISSING PASSWORD!")
except Exception as exc:
return chatfuel.ErrorReport(exc, message="Une erreur technique est survenue 😪\nMerci d'en informer les MJs ! Erreur :")
else:
return chatfuel.Response(R, set_attributes=attrs)
def envoi_mp(d, p):
try:
if ("pwd" in d) and (d["pwd"] == GLOBAL_PASSWORD): # Vérification mot de passe
id = p["cible_id"]
message = p["message"]
is_image = message.split("?")[0].lower().endswith(("gif","png","jpg"))
params = {"chatfuel_token" : CHATFUEL_TOKEN,
"chatfuel_message_tag" : CHATFUEL_TAG,
"chatfuel_block_name" : "RéceptionMessage",
"message": message,
"is_image": is_image,
"sender": p["sender"],
"sender_id": p["sender_id"],
}
rep = requests.post(f"https://api.chatfuel.com/bots/{BOT_ID}/users/{id}/send", params=params)
rep = rep.json()
if "code" in rep:
raise Exception("Erreur d'envoi Chatfuel Broadcast API. Réessaie.")
else:
raise ValueError("WRONG OR MISSING PASSWORD!")
except Exception as exc:
return (400, f"{type(e).__name__}({str(e)})")
else:
return """{"success":"ok"}"""
def media_renderer(d, p):
R = [] # Liste des blocs envoyés en réponse
try:
if ("pwd" in d) and (d["pwd"] == <PASSWORD>_PASSWORD): # Vérification mot de passe
R.append(chatfuel.Image(p["media"]).addQuickReplies([chatfuel.Button("show_block", "Retour menu 🏠", "Menu"),
chatfuel.Button("show_block", "Répondre 📤", "Envoi MP")]))
else:
raise ValueError("WRONG OR MISSING PASSWORD!")
except Exception as e:
return chatfuel.ErrorReport(exc, message="Une erreur technique est survenue 😪\nMerci d'en informer les MJs ! Erreur :")
else:
return chatfuel.Response(R)
### OPTIONS DU PANNEAU | |
Compute areas of cell faces & volumes
V = self.aveCC2F * self.cell_volumes
L = self.reshape(self.face_areas / V, "F", "Fy", "V")
self._cell_gradient_y = sdiag(L) * G2
return self._cell_gradient_y
@property
def cell_gradient_z(self):
"""Z-derivative operator (cell centers to z-faces)
This property constructs an z-derivative operator that acts on
cell centered quantities; i.e. the z-component of the cell gradient operator.
When applied, the z-derivative is mapped to z-faces. The operator is a
sparse matrix :math:`\\mathbf{G_z}` that can be applied as a matrix-vector
product to a cell centered quantity :math:`\\boldsymbol{\\phi}`, i.e.::
grad_phi_z = Gz @ phi
By default, the operator assumes zero-Neumann boundary conditions
on the scalar quantity. Before calling **cell_gradient_z** however,
the user can set a mix of zero Dirichlet and zero Neumann boundary
conditions using :py:attr:`~discretize.operators.DiffOperators.set_cell_gradient_BC`.
When **cell_gradient_z** is called, the boundary conditions are
enforced for the differencing operator.
Returns
-------
(n_faces_z, n_cells) scipy.sparse.csr_matrix
Z-derivative operator (z-component of the cell gradient)
Examples
--------
Below, we demonstrate how to set boundary conditions for the
z-component of the cell gradient, construct the operator and apply it
to a discrete scalar quantity. The mapping of the operator and
its sparsity is also illustrated.
We start by importing the necessary packages and modules.
>>> from discretize import TensorMesh
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> import matplotlib as mpl
We then construct a mesh and define a scalar function at cell
centers.
>>> h = np.ones(40)
>>> mesh = TensorMesh([h, h, h], "CCC")
>>> centers = mesh.cell_centers
>>> phi = np.exp(-(centers[:, 2] ** 2) / 8** 2)
Before constructing the operator, we must define
the boundary conditions; zero Neumann for our example. Even though
we are only computing the derivative along z, we define boundary
conditions for all boundary faces. Once the
operator is created, it is applied as a matrix-vector product.
>>> mesh.set_cell_gradient_BC(['neumann', 'neumann', 'neumann'])
>>> Gz = mesh.cell_gradient_z
>>> grad_phi_z = Gz @ phi
Now we plot the original scalar and the z-derivative for a slice at y = 0.
.. collapse:: Expand to show scripting for plot
>>> fig = plt.figure(figsize=(13, 5))
>>> ax1 = fig.add_subplot(121)
>>> mesh.plot_slice(phi, ax=ax1, normal='Y', slice_loc=0)
>>> ax1.set_title("Scalar at cell centers", fontsize=14)
>>> ax2 = fig.add_subplot(122)
>>> v = np.r_[np.zeros(mesh.nFx+mesh.nFy), grad_phi_z] # Define vector for plotting fun
>>> mesh.plot_slice(v, ax=ax2, v_type='Fz', normal='Y', slice_loc=0)
>>> ax2.set_title("Z-derivative (z-faces)", fontsize=14)
>>> plt.show()
The z-component cell gradient is a sparse derivative matrix
that maps from cell centers to z-faces. To demonstrate this, we provide
a spy plot
.. collapse:: Expand to show scripting for plot
>>> fig = plt.figure(figsize=(9, 9))
>>> ax1 = fig.add_subplot(111)
>>> ax1.spy(mesh.cell_gradient_z, ms=1)
>>> ax1.set_title("Spy Plot", fontsize=16, pad=5)
>>> ax1.set_xlabel("Cell Index", fontsize=12)
>>> ax1.set_ylabel("Z-Face Index", fontsize=12)
>>> plt.show()
"""
if self.dim < 3:
return None
if getattr(self, "_cell_gradient_z", None) is None:
G3 = self.stencil_cell_gradient_z
# Compute areas of cell faces & volumes
V = self.aveCC2F * self.cell_volumes
L = self.reshape(self.face_areas / V, "F", "Fz", "V")
self._cell_gradient_z = sdiag(L) * G3
return self._cell_gradient_z
###########################################################################
# #
# Edge Curl #
# #
###########################################################################
@property
def _edge_x_curl_stencil(self):
"""
Stencil for the edge curl operator in the x-direction.
"""
n = self.vnC # The number of cell centers in each direction
D32 = kron3(ddx(n[2]), speye(n[1]), speye(n[0] + 1))
D23 = kron3(speye(n[2]), ddx(n[1]), speye(n[0] + 1))
# O1 = spzeros(np.shape(D32)[0], np.shape(D31)[1])
O1 = spzeros((n[0] + 1) * n[1] * n[2], n[0] * (n[1] + 1) * (n[2] + 1))
return sp.hstack((O1, -D32, D23))
@property
def _edge_y_curl_stencil(self):
"""
Stencil for the edge curl operator in the y-direction.
"""
n = self.vnC # The number of cell centers in each direction
D31 = kron3(ddx(n[2]), speye(n[1] + 1), speye(n[0]))
D13 = kron3(speye(n[2]), speye(n[1] + 1), ddx(n[0]))
# O2 = spzeros(np.shape(D31)[0], np.shape(D32)[1])
O2 = spzeros(n[0] * (n[1] + 1) * n[2], (n[0] + 1) * n[1] * (n[2] + 1))
return sp.hstack((D31, O2, -D13))
@property
def _edge_z_curl_stencil(self):
"""
Stencil for the edge curl operator in the z-direction.
"""
n = self.vnC # The number of cell centers in each direction
D21 = kron3(speye(n[2] + 1), ddx(n[1]), speye(n[0]))
D12 = kron3(speye(n[2] + 1), speye(n[1]), ddx(n[0]))
# O3 = spzeros(np.shape(D21)[0], np.shape(D13)[1])
O3 = spzeros(n[0] * n[1] * (n[2] + 1), (n[0] + 1) * (n[1] + 1) * n[2])
return sp.hstack((-D21, D12, O3))
@property
def _edge_curl_stencil(self):
"""
Full stencil for the edge curl operator.
"""
if self.dim <= 1:
raise NotImplementedError("Edge Curl only programed for 2 or 3D.")
# Compute divergence operator on faces
if self.dim == 2:
n = self.vnC # The number of cell centers in each direction
D21 = sp.kron(ddx(n[1]), speye(n[0]))
D12 = sp.kron(speye(n[1]), ddx(n[0]))
C = sp.hstack((-D21, D12), format="csr")
return C
elif self.dim == 3:
# D32 = kron3(ddx(n[2]), speye(n[1]), speye(n[0]+1))
# D23 = kron3(speye(n[2]), ddx(n[1]), speye(n[0]+1))
# D31 = kron3(ddx(n[2]), speye(n[1]+1), speye(n[0]))
# D13 = kron3(speye(n[2]), speye(n[1]+1), ddx(n[0]))
# D21 = kron3(speye(n[2]+1), ddx(n[1]), speye(n[0]))
# D12 = kron3(speye(n[2]+1), speye(n[1]), ddx(n[0]))
# O1 = spzeros(np.shape(D32)[0], np.shape(D31)[1])
# O2 = spzeros(np.shape(D31)[0], np.shape(D32)[1])
# O3 = spzeros(np.shape(D21)[0], np.shape(D13)[1])
# C = sp.vstack((sp.hstack((O1, -D32, D23)),
# sp.hstack((D31, O2, -D13)),
# sp.hstack((-D21, D12, O3))), format="csr")
C = sp.vstack(
(
self._edge_x_curl_stencil,
self._edge_y_curl_stencil,
self._edge_z_curl_stencil,
),
format="csr",
)
return C
@property
def edge_curl(self):
r"""Edge curl operator (edges to faces)
This property constructs the 2nd order numerical curl operator
that maps from edges to faces. The operator is a sparse matrix
:math:`\mathbf{C_e}` that can be applied as a matrix-vector product
to a discrete vector quantity **u** that lives
on the edges; i.e.::
curl_u = Ce @ u
Once constructed, the operator is stored permanently as a property of the mesh.
Returns
-------
(n_faces, n_edges) scipy.sparse.csr_matrix
The numerical curl operator from edges to faces
Notes
-----
In continuous space, the curl operator is defined as:
.. math::
\vec{w} = \nabla \times \vec{u} =
\begin{vmatrix}
\hat{x} & \hat{y} & \hat{z} \\
\partial_x & \partial_y & \partial_z \\
u_x & u_y & u_z
\end{vmatrix}
Where :math:`\mathbf{u}` is the discrete representation of the continuous variable
:math:`\vec{u}` on cell edges and :math:`\mathbf{w}` is the discrete
representation of the curl on the faces, **edge_curl** constructs a
discrete linear operator :math:`\\mathbf{C_e}` such that:
.. math::
\mathbf{w} = \mathbf{C_e \, u}
The computation of the curl on mesh faces can be expressed
according to the integral form below. For face :math:`i` bordered by
a set of edges indexed by subset :math:`K`:
.. math::
w_i = \frac{1}{A_i} \sum_{k \in K} \vec{u}_k \cdot \vec{\ell}_k
where :math:`A_i` is the surface area of face *i*,
:math:`u_k` is the value of :math:`\vec{u}` on face *k*,
and \vec{\ell}_k is the path along edge *k*.
Examples
--------
Below, we demonstrate the mapping and sparsity of the edge curl
for a 3D tensor mesh. We choose a the index for a single face,
and illustrate which edges are used to compute the curl on that
face.
>>> from discretize import TensorMesh
>>> from discretize.utils import mkvc
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> import matplotlib as mpl
>>> import mpl_toolkits.mplot3d as mp3d
>>> mpl.rcParams.update({'font.size': 14})
Create a simple tensor mesh, and grab the **edge_curl** operator:
>>> mesh = TensorMesh([[(1, 2)], [(1, 2)], [(1, 2)]])
>>> Ce = mesh.edge_curl
Then we choose a *face* for illustration purposes:
>>> face_ind = 2 # Index of a face in the mesh (could be x, y or z)
>>> edge_ind = np.where(
... np.sum((mesh.edges-mesh.faces[face_ind, :])**2, axis=1) <= 0.5 + 1e-6
... )[0]
>>> face = mesh.faces[face_ind, :]
>>> face_norm = mesh.face_normals[face_ind, :]
>>> edges = mesh.edges[edge_ind, :]
>>> edge_tan = mesh.edge_tangents[edge_ind, :]
>>> node = np.min(edges, axis=0)
>>> min_edges = np.min(edges, axis=0)
>>> max_edges = np.max(edges, axis=0)
>>> if face_norm[0] == 1:
... k = (edges[:, 1] == min_edges[1]) | (edges[:, 2] == max_edges[2])
... poly = node + np.c_[np.r_[0, 0, 0, 0], np.r_[0, 1, 1, 0], np.r_[0, 0, 1, 1]]
... ds = [0.07, | |
import matplotlib
matplotlib.use('TkAgg')
from numpy import arange, sin, pi
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
# implement the default mpl key bindings
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
from tkinter import *
from bot import *
import tkinter as tk
from threading import Thread
class BotGUI():
def __init__(self):
self.setup_backend()
self.setup_frontend()
self._updater_thread = Thread(target=self.automatic_update)
self._updater_thread.start()
self._root.mainloop()
###################################################################################################
# function: setup_backend
# purpose: initialize the bot architecture.
#
# description: This method should only be called in the constructor for this class unless
# The backend is purposefully destroyed. It will completely re-create the backend
###################################################################################################
def setup_backend(self):
socket = BotSocket(product=["BTC-USD", "LTC-USD", "ETH-USD", "BCH-USD"], channels=["matches"])
self._bot = Bot("Betty", "LTC-USD", socket)
###################################################################################################
# function: setup_frontend
# purpose: Creates the GUI for the user.
#
# description: This method should only be called in the constructor for this class with no
# exceptions. The GUI consists of:
# start/stop buttons
# portfolio pie chart
# price line chart + checkboxes and radio buttons to show the moving averages
# refresh button for pie chart and line chart
# radio buttons to choose which currency to trade.
###################################################################################################
def setup_frontend(self):
####################
# MAIN-WINDOW SETUP
####################
self._root = Tk()
self._root.title("Betty the trade bot")
#create a top and bottom frame to divide the window into 2 parts. You won't see this division in
#the window, but it helps us lay things out properly.
self._topframe = Frame(self._root)
self._bottomframe = Frame(self._root)
self._topframe.pack(side=TOP)
self._bottomframe.pack(side=BOTTOM)
self._pie_chart_frame = Frame(self._topframe)
self._line_chart_frame = Frame(self._bottomframe)
self._upper_dash_board = Frame(self._topframe)
self._lower_dash_board = Frame(self._bottomframe)
self._pie_chart_frame.pack(side=RIGHT)
self._line_chart_frame.pack(side=RIGHT)
self._upper_dash_board.pack(side=LEFT)
self._lower_dash_board.pack(side=LEFT)
#######################
# WIDGET SETUP
#######################
#create start/stop buttons
self._startButton = Button(self._upper_dash_board, text="Start Bot", bg="green", fg="black", command=self._bot.start)
self._stopButton = Button(self._upper_dash_board, text="Stop Bot" , bg="red" , fg="white", command=self._bot.stop )
self._startButton.grid(row=0, column=0)
self._stopButton.grid( row=0, column=1)
##########################################
# Choose currency to trade (radio buttons)
##########################################
v = tk.StringVar()
v.set("LTC-USD")
myList = [("BTC-USD"), ("BCH-USD"), ("LTC-USD"), ("ETH-USD")]
tk.Radiobutton(self._upper_dash_board, text=myList[0], padx=20, variable=v, value=myList[0], command=lambda: self._bot.set_currency(myList[0])).grid(row=1, column=0)
tk.Radiobutton(self._upper_dash_board, text=myList[1], padx=20, variable=v, value=myList[1], command=lambda: self._bot.set_currency(myList[1])).grid(row=2, column=0)
tk.Radiobutton(self._upper_dash_board, text=myList[2], padx=20, variable=v, value=myList[2], command=lambda: self._bot.set_currency(myList[2])).grid(row=3, column=0)
tk.Radiobutton(self._upper_dash_board, text=myList[3], padx=20, variable=v, value=myList[3], command=lambda: self._bot.set_currency(myList[3])).grid(row=4, column=0)
###############################################################################################################
# Allows user to decide the duration of their investments. This is done by comparing different moving averages.
###############################################################################################################
duration = tk.StringVar()
duration.set("long")
tk.Label(self._upper_dash_board, text="Trade Duration").grid(row=1, column=2)
tk.Radiobutton(self._upper_dash_board, text="Short", variable=duration, value="short", command=lambda: self._bot._trade_hands.set_trade_duration(duration.get())).grid(row=2, column=2)
tk.Radiobutton(self._upper_dash_board, text="Medium",variable=duration, value="medium",command=lambda: self._bot._trade_hands.set_trade_duration(duration.get())).grid(row=3, column=2)
tk.Radiobutton(self._upper_dash_board, text="Long", variable=duration, value="long", command=lambda: self._bot._trade_hands.set_trade_duration(duration.get())).grid(row=4, column=2)
################################################################
# Allows the user to decide how sensitive they want sells to be.
################################################################
self._sell_cushion_slider = Scale(self._upper_dash_board, from_=0, to=1, length=300, tickinterval=0.5, resolution=0.01, orient=HORIZONTAL, command=self._bot._trade_hands.set_sell_cushion)
self._sell_cushion_slider.grid(row=5, column=0, columnspan=3)
self._sell_cushion_slider.set(.3)
#####################################
# show position history in a list box
#####################################
scrollbar = Scrollbar(self._upper_dash_board, orient=VERTICAL)
scrollbar.grid(row=0, column=6, rowspan=5)
self._position_history_box = tk.Listbox(self._upper_dash_board, yscrollcommand=scrollbar.set)
self._position_history_box.grid(row=0, column=3, columnspan=3, rowspan=5)
######################################################
# Choose which averages to show on graph (check boxes)
######################################################
self._average_type = StringVar()
self._average_type.set("simple")
#This should be handled more gracefully eventually.
self._CheckVars = [IntVar(), IntVar(), IntVar(), IntVar()]
self._averages = [(" SMA 30", 30), (" SMA 10", 10), (" SMA 5", 5), (" SMA 1", 1)]
i=0;
#these widgets are check boxes for showing the individual average sizes.
for string, size in self._averages:
x = tk.Checkbutton(self._lower_dash_board, text = string, variable = self._CheckVars[i], onvalue = 1, offvalue = 0, height=1, width = 6, command= lambda:self.update_line_charts(self._CheckVars, self._averages, self._average_type))
x.pack(side=BOTTOM)
i+=1
########################################################
# Set up the price chart and portfolio/trading chart
########################################################
crypto_history = self._bot._data_center._crypto_history
self._line_chart_figure = Figure(figsize=(20, 3))
self._price_plot = self._line_chart_figure.add_subplot(111)
self._price_plot.set_xlabel("Time")
self._price_plot.set_ylabel("Dollars")
self._price_plot.set_title("Price vs. Time")
self._portfolio_chart_figure = Figure(figsize=(20,3))
self._portfolio_plot = self._portfolio_chart_figure.add_subplot(111)
self._portfolio_plot.set_xlabel("Time")
self._portfolio_plot.set_ylabel("Dollars")
self._portfolio_plot.set_title("Portfolio Value vs. Time")
#I don't really know how this stuff works exactly, but the purpose is to embed the plot in our window
canvas3 = FigureCanvasTkAgg(self._portfolio_chart_figure, master=self._line_chart_frame)
canvas3.show()
canvas3.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1)
toolbar3 = NavigationToolbar2TkAgg(canvas3, self._line_chart_frame)
toolbar3.update()
canvas3._tkcanvas.pack(side=BOTTOM, fill=BOTH, expand=1)
#I don't really know how this stuff works exactly, but the purpose is to embed the plot in our window
canvas = FigureCanvasTkAgg(self._line_chart_figure, master=self._line_chart_frame)
canvas.show()
canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1)
toolbar = NavigationToolbar2TkAgg(canvas, self._line_chart_frame)
toolbar.update()
canvas._tkcanvas.pack(side=BOTTOM, fill=BOTH, expand=1)
########################################################
# Set up the pie chart
########################################################
portfolio = self._bot._data_center.get_portfolio()
portfolio_keys = portfolio.keys()
labels = [key for key in portfolio_keys if "USD" in key]
amounts = [portfolio[key]["value"] for key in portfolio_keys if "USD" in key]
colors = ["gold", "green", "blue", "red", "purple"]
explode = [0,0,0,0,0]
self._pie_chart_figure = Figure(figsize=(5, 3.5), dpi=100) #we keep the pie chart figure
self._pie_plot = self._pie_chart_figure.add_subplot(111) #we also keep the sub plot
self._pie_plot.pie(amounts, explode=explode, labels=labels, colors=colors, autopct='%5.2f%%', shadow=True, startangle=140)[0] #plot the pie chart
self._pie_chart_figure.gca().add_artist(matplotlib.patches.Circle((0,0),0.75,color='black', fc='white',linewidth=1.25)) #plot a circle over it to make a donut
self._pie_plot.axis('equal')
#I don't really know how this stuff works exactly, but the purpose is to embed the plot in our window
canvas2 = FigureCanvasTkAgg(self._pie_chart_figure, master=self._pie_chart_frame)
canvas2.show()
canvas2.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1)
toolbar2 = NavigationToolbar2TkAgg(canvas2, self._pie_chart_frame)
toolbar2.update()
canvas2._tkcanvas.pack(side=TOP, fill=BOTH, expand=1)
#This is the refresh button. pressing this will reset the graph and pie chart, but you still have to click the chart for it to update.
self._refresh_button = Button(self._upper_dash_board, text="refresh graphics", bg="blue", fg="white", command= lambda: self.refresh_graphics(self._CheckVars, self._averages, self._average_type))
self._refresh_button.grid(row=0, column=2)
###################################################################################################
# function: automatic_update
# purpose: refresh graphics automatically
#
# description: This method will constantly call the refresh_graphics method while the bot is
# running. It will update the graphs a coule of times each second.
###################################################################################################
def automatic_update(self):
while True:
if self._bot._running:
time.sleep(5)
self.refresh_graphics(self._CheckVars, self._averages, self._average_type)
###################################################################################################
# function: refresh_graphics
# purpose: refresh both the line graph and the pie chart
#
# description: This method is called when the refresh button is clicked, and also should be
# called automatically by another thread causing the plots to update periodically
###################################################################################################
def refresh_graphics(self, CheckVars, Average_list, average_type):
self.update_line_charts(CheckVars, Average_list, average_type)
self.update_pie_chart()
self.update_positions_history()
###################################################################################################
# function: update_positions_history
# purpose: show all past and current holdings
#
# description: This method will check for any trades that have been posted in the trade
# history, but not posted in the listbox
###################################################################################################
def update_positions_history(self):
trade_history = self._bot._data_center._trade_history
current_position = self._bot._trade_hands._long_position
self._position_history_box.delete(0, END)
for past_position in trade_history:
entry = past_position["entry_price"]
exit = past_position["exit_price"]
gain = ((exit-entry)/entry) * 100
msg = "{} {} {}%".format(str(entry), str(exit), str(gain))
self._position_history_box.insert(END, msg)
if current_position != None:
msg = str(current_position["entry_price"])
self._position_history_box.insert(END, msg)
###################################################################################################
# function: update_line_chart
# purpose: shows new data that was not shown the last time the chart was updated, and
# reacts to the average checkboxes being selected/deselected.
#
# description: This will replot the entire graph, taking into account user preferences of
# averages they wish to see.
###################################################################################################
def update_line_charts(self, CheckVars, Average_list, average_type):
try:
###stuff dealing with the price plot
self._price_plot.clear()
self._portfolio_plot.clear()
ma_collection = self._bot._data_center._ma_collection
crypto_history = self._bot._data_center._crypto_history
portfolio_history = self._bot._data_center._portfolio_history
trade_history = self._bot._data_center._trade_history
for i in range(len(CheckVars)):
if CheckVars[i].get() == 1:
times = [j["time"] for j in ma_collection[Average_list[i][1]]]
#times = matplotlib.dates.date2num(times)
values = [j[average_type.get()] for j in ma_collection[Average_list[i][1]]]
if len(times) != len(values):
print("Could not update graph because x and y dimensions were not the same for the ", Average_list[i][0], ".")
return
self._price_plot.plot_date(times, values)[0]
else:
self._price_plot.plot_date([],[])
times = [i["time"] for i in crypto_history[self._bot.currency()]]
prices = [i["price"] for i in crypto_history[self._bot.currency()]]
if len(times) != len(prices):
print("Could not update graph because x and y dimensions were not the same for the price line")
return
self._prices_line = self._price_plot.plot_date(times, prices)[0]
#plot horizontal sell line
current_position = self._bot._trade_hands._long_position
if current_position != None:
self._price_plot.axhline(y=current_position["high_price"] * (1-self._bot._trade_hands._sell_cushion/100))
self._line_chart_figure.autofmt_xdate()
###stuff dealing with the portfolio plot
portfolio_history = self._bot._data_center._portfolio_history
portfolio_values = [element["total"] for element in portfolio_history if element["total"]!=0]
times = [element["time" ] for element in portfolio_history if element["total"]!=0]
if len(portfolio_values) != len(times):
return
self._portfolio_plot.clear()
self._portfolio_line = self._portfolio_plot.plot_date(times, portfolio_values)
self._portfolio_chart_figure.autofmt_xdate()
trade_history = self._bot._data_center._trade_history
for trade in trade_history:
self._portfolio_plot.axvline(x=trade["entry_time"], color="g")
self._portfolio_plot.axvline(x=trade["exit_time"], color="r")
if current_position != None:
self._portfolio_plot.axvline(x=current_position["entry_time"], color="g")
except:
x_max = crypto_history[self._bot.currency()][-1]
x_min = crypto_history[self._bot.currency()][0]
self._portfolio_plot.set_xlim([x_min, x_max])
| |
== 'width' and child_.text:
sval_ = child_.text
fval_ = self.gds_parse_double(sval_, node, 'width')
fval_ = self.gds_validate_double(fval_, node, 'width')
self.width = fval_
self.width_nsprefix_ = child_.prefix
# validate type doubleMaxExclusive100MinInclusive0.01
self.validate_doubleMaxExclusive100MinInclusive0_01(self.width)
elif nodeName_ == 'height' and child_.text:
sval_ = child_.text
fval_ = self.gds_parse_double(sval_, node, 'height')
fval_ = self.gds_validate_double(fval_, node, 'height')
self.height = fval_
self.height_nsprefix_ = child_.prefix
# validate type doubleMaxExclusive100MinInclusive0.01
self.validate_doubleMaxExclusive100MinInclusive0_01(self.height)
elif nodeName_ == 'weight' and child_.text:
sval_ = child_.text
fval_ = self.gds_parse_double(sval_, node, 'weight')
fval_ = self.gds_validate_double(fval_, node, 'weight')
self.weight = fval_
self.weight_nsprefix_ = child_.prefix
# validate type doubleMaxExclusive100000MinInclusive0.01
self.validate_doubleMaxExclusive100000MinInclusive0_01(self.weight)
# end class measurementsType
class pieceLineType(GeneratedsSuper):
"""A piece line describes a kind of piece sharing the same physical
attributes.
(A piece is a package, box, envelope or shippable unit. All pieces which
are
identical are defined for convenience as a piece line with a number of
units.)
For example if there are 5 boxes of 0.1m x 0.2m x 0.3m of weight 0.1kg and
1 box of 0.4m x 0.4m x 0.4 of weight 0.5kg this equates to two piece lines
as
follows:
PieceLine1: 0.1m x 0.2m x 0.3m, weight 0.1kg, number of units=5
PieceLine2: 0.4m x 0.4m x 0.4m, weight 0.5kg, number of units=1"""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, identifier=None, goodsDescription=None, barcodeForCustomer=None, pieceMeasurements=None, pieces=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.identifier = identifier
self.identifier_nsprefix_ = None
self.goodsDescription = goodsDescription
self.validate_stringMaxLength30(self.goodsDescription)
self.goodsDescription_nsprefix_ = None
self.barcodeForCustomer = barcodeForCustomer
self.validate_booleanEnum(self.barcodeForCustomer)
self.barcodeForCustomer_nsprefix_ = None
self.pieceMeasurements = pieceMeasurements
self.pieceMeasurements_nsprefix_ = None
if pieces is None:
self.pieces = []
else:
self.pieces = pieces
self.pieces_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, pieceLineType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if pieceLineType.subclass:
return pieceLineType.subclass(*args_, **kwargs_)
else:
return pieceLineType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_identifier(self):
return self.identifier
def set_identifier(self, identifier):
self.identifier = identifier
def get_goodsDescription(self):
return self.goodsDescription
def set_goodsDescription(self, goodsDescription):
self.goodsDescription = goodsDescription
def get_barcodeForCustomer(self):
return self.barcodeForCustomer
def set_barcodeForCustomer(self, barcodeForCustomer):
self.barcodeForCustomer = barcodeForCustomer
def get_pieceMeasurements(self):
return self.pieceMeasurements
def set_pieceMeasurements(self, pieceMeasurements):
self.pieceMeasurements = pieceMeasurements
def get_pieces(self):
return self.pieces
def set_pieces(self, pieces):
self.pieces = pieces
def add_pieces(self, value):
self.pieces.append(value)
def insert_pieces_at(self, index, value):
self.pieces.insert(index, value)
def replace_pieces_at(self, index, value):
self.pieces[index] = value
def validate_stringMaxLength30(self, value):
result = True
# Validate type stringMaxLength30, a restriction on xsd:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
if len(value) > 30:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd maxLength restriction on stringMaxLength30' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def validate_booleanEnum(self, value):
result = True
# Validate type booleanEnum, a restriction on xsd:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
value = value
enumerations = ['N', 'Y']
if value not in enumerations:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd enumeration restriction on booleanEnum' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def hasContent_(self):
if (
self.identifier is not None or
self.goodsDescription is not None or
self.barcodeForCustomer is not None or
self.pieceMeasurements is not None or
self.pieces
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='pieceLineType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('pieceLineType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'pieceLineType':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='pieceLineType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='pieceLineType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='pieceLineType'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='pieceLineType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.identifier is not None:
namespaceprefix_ = self.identifier_nsprefix_ + ':' if (UseCapturedNS_ and self.identifier_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sidentifier>%s</%sidentifier>%s' % (namespaceprefix_ , self.gds_format_integer(self.identifier, input_name='identifier'), namespaceprefix_ , eol_))
if self.goodsDescription is not None:
namespaceprefix_ = self.goodsDescription_nsprefix_ + ':' if (UseCapturedNS_ and self.goodsDescription_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sgoodsDescription>%s</%sgoodsDescription>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.goodsDescription), input_name='goodsDescription')), namespaceprefix_ , eol_))
if self.barcodeForCustomer is not None:
namespaceprefix_ = self.barcodeForCustomer_nsprefix_ + ':' if (UseCapturedNS_ and self.barcodeForCustomer_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sbarcodeForCustomer>%s</%sbarcodeForCustomer>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.barcodeForCustomer), input_name='barcodeForCustomer')), namespaceprefix_ , eol_))
if self.pieceMeasurements is not None:
namespaceprefix_ = self.pieceMeasurements_nsprefix_ + ':' if (UseCapturedNS_ and self.pieceMeasurements_nsprefix_) else ''
self.pieceMeasurements.export(outfile, level, namespaceprefix_, namespacedef_='', name_='pieceMeasurements', pretty_print=pretty_print)
for pieces_ in self.pieces:
namespaceprefix_ = self.pieces_nsprefix_ + ':' if (UseCapturedNS_ and self.pieces_nsprefix_) else ''
pieces_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='pieces', pretty_print=pretty_print)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'identifier' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'identifier')
ival_ = self.gds_validate_integer(ival_, node, 'identifier')
self.identifier = ival_
self.identifier_nsprefix_ = child_.prefix
elif nodeName_ == 'goodsDescription':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'goodsDescription')
value_ = self.gds_validate_string(value_, node, 'goodsDescription')
self.goodsDescription = value_
self.goodsDescription_nsprefix_ = child_.prefix
# validate type stringMaxLength30
self.validate_stringMaxLength30(self.goodsDescription)
elif nodeName_ == 'barcodeForCustomer':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'barcodeForCustomer')
value_ = self.gds_validate_string(value_, node, 'barcodeForCustomer')
self.barcodeForCustomer = value_
self.barcodeForCustomer_nsprefix_ = child_.prefix
# validate type booleanEnum
self.validate_booleanEnum(self.barcodeForCustomer)
elif nodeName_ == 'pieceMeasurements':
obj_ = measurementsType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.pieceMeasurements = obj_
obj_.original_tagname_ = 'pieceMeasurements'
elif nodeName_ == 'pieces':
obj_ = pieceType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.pieces.append(obj_)
obj_.original_tagname_ = 'pieces'
# end class pieceLineType
class pieceType(GeneratedsSuper):
"""This element is used to identify all the pieces that should be grouped
together by the given reference. The list of sequence numbers is included
(one sequenceNumber element per piece) with a single pieceReference
element."""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, sequenceNumbers=None, pieceReference=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.sequenceNumbers = sequenceNumbers
self.sequenceNumbers_nsprefix_ = None
self.pieceReference = pieceReference
self.pieceReference_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, pieceType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if pieceType.subclass:
return pieceType.subclass(*args_, **kwargs_)
else:
return pieceType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_sequenceNumbers(self):
return self.sequenceNumbers
def set_sequenceNumbers(self, sequenceNumbers):
self.sequenceNumbers = sequenceNumbers
def get_pieceReference(self):
return self.pieceReference
def set_pieceReference(self, pieceReference):
self.pieceReference = pieceReference
def hasContent_(self):
if (
self.sequenceNumbers is not None or
self.pieceReference is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='pieceType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('pieceType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'pieceType':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='pieceType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='pieceType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='pieceType'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='pieceType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.sequenceNumbers is not None:
namespaceprefix_ = | |
quota for "
"project {0}".format(proj.id)))
@_utils.valid_kwargs(
'action', 'description', 'destination_firewall_group_id',
'destination_ip_address', 'destination_port', 'enabled', 'ip_version',
'name', 'project_id', 'protocol', 'shared', 'source_firewall_group_id',
'source_ip_address', 'source_port')
def create_firewall_rule(self, **kwargs):
"""
Creates firewall rule.
:param action: Action performed on traffic.
Valid values: allow, deny
Defaults to deny.
:param description: Human-readable description.
:param destination_firewall_group_id: ID of destination firewall group.
:param destination_ip_address: IPv4-, IPv6 address or CIDR.
:param destination_port: Port or port range (e.g. 80:90)
:param bool enabled: Status of firewall rule. You can disable rules
without disassociating them from firewall
policies. Defaults to True.
:param int ip_version: IP Version.
Valid values: 4, 6
Defaults to 4.
:param name: Human-readable name.
:param project_id: Project id.
:param protocol: IP protocol.
Valid values: icmp, tcp, udp, null
:param bool shared: Visibility to other projects.
Defaults to False.
:param source_firewall_group_id: ID of source firewall group.
:param source_ip_address: IPv4-, IPv6 address or CIDR.
:param source_port: Port or port range (e.g. 80:90)
:raises: BadRequestException if parameters are malformed
:return: created firewall rule
:rtype: FirewallRule
"""
return self.network.create_firewall_rule(**kwargs)
def delete_firewall_rule(self, name_or_id, filters=None):
"""
Deletes firewall rule.
Prints debug message in case to-be-deleted resource was not found.
:param name_or_id: firewall rule name or id
:param dict filters: optional filters
:raises: DuplicateResource on multiple matches
:return: True if resource is successfully deleted, False otherwise.
:rtype: bool
"""
if not filters:
filters = {}
try:
firewall_rule = self.network.find_firewall_rule(
name_or_id, ignore_missing=False, **filters)
self.network.delete_firewall_rule(firewall_rule,
ignore_missing=False)
except exceptions.ResourceNotFound:
self.log.debug('Firewall rule %s not found for deleting',
name_or_id)
return False
return True
def get_firewall_rule(self, name_or_id, filters=None):
"""
Retrieves a single firewall rule.
:param name_or_id: firewall rule name or id
:param dict filters: optional filters
:raises: DuplicateResource on multiple matches
:return: firewall rule dict or None if not found
:rtype: FirewallRule
"""
if not filters:
filters = {}
return self.network.find_firewall_rule(name_or_id, **filters)
def list_firewall_rules(self, filters=None):
"""
Lists firewall rules.
:param dict filters: optional filters
:return: list of firewall rules
:rtype: list[FirewallRule]
"""
if not filters:
filters = {}
return list(self.network.firewall_rules(**filters))
@_utils.valid_kwargs(
'action', 'description', 'destination_firewall_group_id',
'destination_ip_address', 'destination_port', 'enabled', 'ip_version',
'name', 'project_id', 'protocol', 'shared', 'source_firewall_group_id',
'source_ip_address', 'source_port')
def update_firewall_rule(self, name_or_id, filters=None, **kwargs):
"""
Updates firewall rule.
:param name_or_id: firewall rule name or id
:param dict filters: optional filters
:param kwargs: firewall rule update parameters.
See create_firewall_rule docstring for valid parameters.
:raises: BadRequestException if parameters are malformed
:raises: NotFoundException if resource is not found
:return: updated firewall rule
:rtype: FirewallRule
"""
if not filters:
filters = {}
firewall_rule = self.network.find_firewall_rule(
name_or_id, ignore_missing=False, **filters)
return self.network.update_firewall_rule(firewall_rule, **kwargs)
def _get_firewall_rule_ids(self, name_or_id_list, filters=None):
"""
Takes a list of firewall rule name or ids, looks them up and returns
a list of firewall rule ids.
Used by `create_firewall_policy` and `update_firewall_policy`.
:param list[str] name_or_id_list: firewall rule name or id list
:param dict filters: optional filters
:raises: DuplicateResource on multiple matches
:raises: NotFoundException if resource is not found
:return: list of firewall rule ids
:rtype: list[str]
"""
if not filters:
filters = {}
ids_list = []
for name_or_id in name_or_id_list:
ids_list.append(self.network.find_firewall_rule(
name_or_id, ignore_missing=False, **filters)['id'])
return ids_list
@_utils.valid_kwargs('audited', 'description', 'firewall_rules', 'name',
'project_id', 'shared')
def create_firewall_policy(self, **kwargs):
"""
Create firewall policy.
:param bool audited: Status of audition of firewall policy.
Set to False each time the firewall policy or the
associated firewall rules are changed.
Has to be explicitly set to True.
:param description: Human-readable description.
:param list[str] firewall_rules: List of associated firewall rules.
:param name: Human-readable name.
:param project_id: Project id.
:param bool shared: Visibility to other projects.
Defaults to False.
:raises: BadRequestException if parameters are malformed
:raises: ResourceNotFound if a resource from firewall_list not found
:return: created firewall policy
:rtype: FirewallPolicy
"""
if 'firewall_rules' in kwargs:
kwargs['firewall_rules'] = self._get_firewall_rule_ids(
kwargs['firewall_rules'])
return self.network.create_firewall_policy(**kwargs)
def delete_firewall_policy(self, name_or_id, filters=None):
"""
Deletes firewall policy.
Prints debug message in case to-be-deleted resource was not found.
:param name_or_id: firewall policy name or id
:param dict filters: optional filters
:raises: DuplicateResource on multiple matches
:return: True if resource is successfully deleted, False otherwise.
:rtype: bool
"""
if not filters:
filters = {}
try:
firewall_policy = self.network.find_firewall_policy(
name_or_id, ignore_missing=False, **filters)
self.network.delete_firewall_policy(firewall_policy,
ignore_missing=False)
except exceptions.ResourceNotFound:
self.log.debug('Firewall policy %s not found for deleting',
name_or_id)
return False
return True
def get_firewall_policy(self, name_or_id, filters=None):
"""
Retrieves a single firewall policy.
:param name_or_id: firewall policy name or id
:param dict filters: optional filters
:raises: DuplicateResource on multiple matches
:return: firewall policy or None if not found
:rtype: FirewallPolicy
"""
if not filters:
filters = {}
return self.network.find_firewall_policy(name_or_id, **filters)
def list_firewall_policies(self, filters=None):
"""
Lists firewall policies.
:param dict filters: optional filters
:return: list of firewall policies
:rtype: list[FirewallPolicy]
"""
if not filters:
filters = {}
return list(self.network.firewall_policies(**filters))
@_utils.valid_kwargs('audited', 'description', 'firewall_rules', 'name',
'project_id', 'shared')
def update_firewall_policy(self, name_or_id, filters=None, **kwargs):
"""
Updates firewall policy.
:param name_or_id: firewall policy name or id
:param dict filters: optional filters
:param kwargs: firewall policy update parameters
See create_firewall_policy docstring for valid parameters.
:raises: BadRequestException if parameters are malformed
:raises: DuplicateResource on multiple matches
:raises: ResourceNotFound if resource is not found
:return: updated firewall policy
:rtype: FirewallPolicy
"""
if not filters:
filters = {}
firewall_policy = self.network.find_firewall_policy(
name_or_id, ignore_missing=False, **filters)
if 'firewall_rules' in kwargs:
kwargs['firewall_rules'] = self._get_firewall_rule_ids(
kwargs['firewall_rules'])
return self.network.update_firewall_policy(firewall_policy, **kwargs)
def insert_rule_into_policy(self, name_or_id, rule_name_or_id,
insert_after=None, insert_before=None,
filters=None):
"""
Adds firewall rule to the firewall_rules list of a firewall policy.
Short-circuits and returns the firewall policy early if the firewall
rule id is already present in the firewall_rules list.
This method doesn't do re-ordering. If you want to move a firewall rule
or down the list, you have to remove and re-add it.
:param name_or_id: firewall policy name or id
:param rule_name_or_id: firewall rule name or id
:param insert_after: rule name or id that should precede added rule
:param insert_before: rule name or id that should succeed added rule
:param dict filters: optional filters
:raises: DuplicateResource on multiple matches
:raises: ResourceNotFound if firewall policy or any of the firewall
rules (inserted, after, before) is not found.
:return: updated firewall policy
:rtype: FirewallPolicy
"""
if not filters:
filters = {}
firewall_policy = self.network.find_firewall_policy(
name_or_id, ignore_missing=False, **filters)
firewall_rule = self.network.find_firewall_rule(
rule_name_or_id, ignore_missing=False)
# short-circuit if rule already in firewall_rules list
# the API can't do any re-ordering of existing rules
if firewall_rule['id'] in firewall_policy['firewall_rules']:
self.log.debug(
'Firewall rule %s already associated with firewall policy %s',
rule_name_or_id, name_or_id)
return firewall_policy
pos_params = {}
if insert_after is not None:
pos_params['insert_after'] = self.network.find_firewall_rule(
insert_after, ignore_missing=False)['id']
if insert_before is not None:
pos_params['insert_before'] = self.network.find_firewall_rule(
insert_before, ignore_missing=False)['id']
return self.network.insert_rule_into_policy(firewall_policy['id'],
firewall_rule['id'],
**pos_params)
def remove_rule_from_policy(self, name_or_id, rule_name_or_id,
filters=None):
"""
Remove firewall rule from firewall policy's firewall_rules list.
Short-circuits and returns firewall policy early if firewall rule
is already absent from the firewall_rules list.
:param name_or_id: firewall policy name or id
:param rule_name_or_id: firewall rule name or id
:param dict filters: optional filters
:raises: DuplicateResource on multiple matches
:raises: ResourceNotFound if firewall policy is not found
:return: updated firewall policy
:rtype: FirewallPolicy
"""
if not filters:
filters = {}
firewall_policy = self.network.find_firewall_policy(
name_or_id, ignore_missing=False, **filters)
firewall_rule = self.network.find_firewall_rule(rule_name_or_id)
if not firewall_rule:
# short-circuit: if firewall rule is not found,
# return current firewall policy
self.log.debug('Firewall rule %s not found for removing',
rule_name_or_id)
return firewall_policy
if firewall_rule['id'] not in firewall_policy['firewall_rules']:
# short-circuit: if firewall rule id is not associated,
# log it to debug and return current firewall policy
self.log.debug(
'Firewall rule %s not associated with firewall policy %s',
rule_name_or_id, name_or_id)
return firewall_policy
return self.network.remove_rule_from_policy(firewall_policy['id'],
firewall_rule['id'])
@_utils.valid_kwargs(
'admin_state_up', 'description', 'egress_firewall_policy',
'ingress_firewall_policy', 'name', 'ports', 'project_id', 'shared')
def create_firewall_group(self, **kwargs):
"""
Creates firewall group. The keys egress_firewall_policy and
ingress_firewall_policy are looked up and mapped as
egress_firewall_policy_id and ingress_firewall_policy_id respectively.
Port name or ids list is transformed to port ids list before the POST
request.
:param bool admin_state_up: State of firewall group.
Will block all traffic if set to False.
Defaults to True.
:param description: Human-readable description.
:param egress_firewall_policy: Name or id of egress firewall policy.
:param ingress_firewall_policy: Name or id of ingress firewall policy.
:param name: Human-readable name.
:param | |
<filename>processing.py
#AUTHOR : <NAME>
#MATRICULATION NUMBER : 65074
#Personal Programming Project
#---------------------------------------------------------------------------------------#
#A python file where Iso-geometric analysis is performed along with Toplogy optimization
# --------------------------------------------------------------------------------------#
from inputs import *
from geometry import knot_connectivity, controlpointassembly
from element_routine import assemble, element_routine, apply_BC,Compliance_matrix ,stress_strain_element_routine,gauss_quadrature
from boundary_conditions import BC_Switcher
from optimization import Knearestneighbours, optimality_criteria, Moving_asymptoes
import matplotlib.pyplot as plt
from visulaization import element_density_vis,element_density_slided,element_density_slided1,deformation_plot,mesh_vis
from gridtoVTK import VTK
import time
#initialization of colour code to plot text on terminal
TYELLOW = '\033[33;1m'
TGREEN = '\033[32;1m'
TRED='\033[31;1m'
TBLUE = '\033[34;1m'
ENDC = '\033[m'
#maximum values of penality and optimiality power
pmax = 3.5
gmax = 1
#Degree of knot along xi,eta,neta
XI_DEGREE = 1
ETA_DEGREE = 1
NETA_DEGREE = 1
N = nx
P = ny
Q = nz
# Inputs parameters required for IGA like knot vectors, control points, knot span are generated.
C = Inputs(length, height, width, N, P, Q, XI_DEGREE, ETA_DEGREE, NETA_DEGREE)
CONTROL_POINTS = C.crtpts_coordinates()
WEIGHTS = CONTROL_POINTS[:, -1]
XI_KNOTVECTOR = C.xi_knotvector()
ETA_KNOTVECTOR = C.eta_knotvector()
NETA_KNOTVECTOR = C.neta_knotvector()
XI_SPAN, XI_KNOTCONNECTIVITY, XI_UNIKNOTS, nU = C.xi_knotspan()
ETA_SPAN, ETA_KNOTCONNECTIVITY, ETA_UNIKNOTS, nV = C.eta_knotspan()
NETA_SPAN, NETA_KNOTCONNECTIVITY, NETA_UNIKNOTS, nW = C.neta_knotspan()
print('\n')
ncp = N * P * Q #No of control points
dof = 3 # Degree of freedom at each point
dofcp = ncp * dof # Total number of degree of freedom of the structure
nel = nU * nV * nW # No of elements
width1 = 120
print('#' * width1)
fmt = '{:^' + str(width1) + '}'
print(TBLUE+fmt.format('Dimensions of the structure \n')+ENDC)
print(' Length :', length, ' Height :', height,
' Width :', width, '\n')
print(' Xi degree :', XI_DEGREE, ' Eta degree :', ETA_DEGREE,
' Neta degree :', NETA_DEGREE, '\n')
print(' NX :', N - XI_DEGREE, ' NY :', P - ETA_DEGREE,
' NZ :', Q - NETA_DEGREE, '\n')
print(TGREEN+'Number of degrees of freedom :', dofcp, '\n')
print('Number of Elements:', nel, '\n')
print('No of control points in each element:', (XI_DEGREE + 1) * (ETA_DEGREE + 1) * (NETA_DEGREE + 1), '\n'+ENDC)
print('>' * width1)
print('Length of the knot vector in respective direction \n')
print('XI Vector :', list(XI_KNOTVECTOR), '\nETA vector :', list(ETA_KNOTVECTOR), '\nNETA vector :',
list(NETA_KNOTVECTOR), '\n')
print('<' * width1)
#intialization of dimension for Global stiffness matrix, external force vector and displacements
K_G = np.zeros((dofcp, dofcp))
F_E = np.zeros(dofcp)
U = np.zeros(dofcp)
print('#' * width1)
fmt = '{:^' + str(width1) + '}'
print(TBLUE+fmt.format('Program has started \n')+ENDC)
K_disp = True
#Generation of control point assembly and knot index of each element
element_indicies = controlpointassembly(N, P, Q, nU, nV, nW, XI_DEGREE, ETA_DEGREE, NETA_DEGREE, XI_KNOTCONNECTIVITY,
ETA_KNOTCONNECTIVITY, NETA_KNOTCONNECTIVITY)
span_index = knot_connectivity(N, P, Q, XI_KNOTCONNECTIVITY, ETA_KNOTCONNECTIVITY, NETA_KNOTCONNECTIVITY)
print('$' * width1)
fmt = '{:^' + str(width1) + '}'
IGA_start = time.time()
KG_ex_time=0
print(TYELLOW+fmt.format('Finite Element Analysis based on ISO-Geometric analysis(NURBS)\n')+ENDC)
#looped over number of element to build global stiffness matrix
for i in range(0, nel):
KG_start=time.time() # start of loop
el_in = element_indicies[i, :] # element indices obtained from control point assembly contianing the nodes present in the element
sp_in = span_index[i, :] # No of knots present with element
#Co-ordinates and weights of the control points
X = CONTROL_POINTS[el_in, 0]
Y = CONTROL_POINTS[el_in, 1]
Z = CONTROL_POINTS[el_in, 2]
weights = CONTROL_POINTS[el_in, 3]
#length of the knot in respective direction(xi,eta,neta).
Uspan = XI_SPAN[sp_in[0], :]
Vspan = ETA_SPAN[sp_in[1], :]
Wspan = NETA_SPAN[sp_in[2], :]
#obtaining element stiffness matrix from element routine.
K_E, NURBS, R,B = element_routine(X, Y, Z, weights, Youngs_modulus, poission_ratio, Uspan, Vspan, Wspan, XI_DEGREE,
XI_KNOTVECTOR, ETA_DEGREE, ETA_KNOTVECTOR, NETA_DEGREE, NETA_KNOTVECTOR)
#Assembly of global stiffness matrix from element stiffness matrix
K_G = assemble(K_G, K_E, el_in, ncp, K_disp)
K_disp = False
KG_stop=time.time() # end of loop
KG_ex_time+=(KG_stop-KG_start) # time taken to build the global stiffness matrix is calculated
print(' Execution time :',KG_ex_time,'\n')
bc_disp = False
abc_disp = True
BC_start=time.time()
#Boundary conditon are obtained from python switch class based on the BC_option
BC = BC_Switcher(CONTROL_POINTS, length, height, width, bc_disp)
fixed_dof, load_dof, fixed_pts, load_pts = BC.indirect(option) #fixed and load indices are obtained
# Boundary conditions are applied by deleting the fixed rows and columns of global stiffness matrix and external force vector.
reduced_k=np.delete(np.delete(K_G, fixed_dof, 0),fixed_dof , 1)
reduced_F = apply_BC(F_E, fixed_dof, load_dof, load,option,abc_disp)
BC_stop=time.time()
BC_ex_time=(BC_stop-BC_start)
print(' Execution time :',BC_ex_time,'\n')
DIS_start=time.time()
# The displacement are calculated from reduced global stiffness matrix and reduced external force vector.
U = np.linalg.solve(reduced_k, reduced_F) # based on equ. 4.15
#Mapping of the displacement along with fixed nodes.
print('Calculating Displacements \n')
for j in fixed_dof:
U = np.insert(U, j, 0)
if option==4:
F_E[load_dof[0]] = load
F_E[load_dof[1]] = -load
else:
F_E[load_dof] = load
DIS_stop=time.time()
DIS_ex_time=(DIS_stop-DIS_start)
print(' Execution time :',DIS_ex_time,'\n')
U_new = np.array((U.reshape(len(CONTROL_POINTS), 3)), dtype='float64')
print('Mapping Displacements \n')
New_control_points = CONTROL_POINTS[:,:-2]+ U.reshape(len(CONTROL_POINTS), 3)
UX = U_new[:, 0]
UY = U_new[:, 1]
UZ = U_new[:, 2]
IGA_stop = time.time()
IGA_ex_time=(IGA_stop-IGA_start) # time taken to run the IGFEM is calculated
print('Execution time for IGA analysis at 100% volume :',IGA_ex_time,'\n')
energy_stored = np.dot(0.5, U @ F_E) #strain energy is calculated
print(TRED+'\nThe structure is not optimised and has 100% volume \n'+ENDC)
print('The strain energy of the structure :', energy_stored)
print('$' * width1)
CP = CONTROL_POINTS[:, :-2]
if mesh_disp:
#plotting the deformed and undeformed 3D structure.
mesh_vis(CP, New_control_points, nx, ny, nz,optimizer)
# To implement modified SIMP method equ. 4.21, we require Young's modulus of void and solid
Emin = 1e-09 # Young's modulus of void (intializing a value almost equal to 0 to avoid null point errors)
E0 = 1 # Young's mosulus of solid
#intializing the dimensions of global stiffness matrix and external force vector for toplogy optimization
K_G = np.zeros((dofcp, dofcp))
F_E = np.zeros(dofcp)
U = np.zeros(dofcp)
CC = []
ii = []
VV = []
oc_disp = True
bc_disp = True
fil_disp = True
max_iterations = 250
print('+' * width1)
fmt = '{:^' + str(width1) + '}'
print(TYELLOW+fmt.format('Structural Topology optimization using IGA \n')+ENDC)
print(TBLUE+'\n Optimization has started \n'+ENDC)
print(' Density of the material :', density)
print(' Youngs Modulus :', Youngs_modulus)
print(' Poission ratio :', poission_ratio, '\n')
fmt = '{:^' + str(width1) + '}'
print(TRED+fmt.format('The percentage of volume which has to remain after optimization \n'))
fmt = '{:^' + str(width1) + '}'
print(fmt.format( volume_frac)+ENDC)
print('+' * width1)
#initialzing dimension and initial values of variables used in toplogy optimization
density_basis = np.zeros(nel)
ele_den_filter = np.zeros(nel)
element_density = np.ones(nel) * volume_frac # density of each element (initially taken as one)
density_basis_dx = np.zeros(nel)
dcompliance = np.zeros(nel) # change in compliance w.r.t element density (initially taken as zero)
compliance = 0 #intial compliance of the structure
# performing sensitivity analysis i.e giving weights to the respective elements.
nfilter = int(nel * ((2 * (np.ceil(rmin) - 1) + 1) ** 2))
filter_N = np.zeros(((XI_DEGREE + 1) * (ETA_DEGREE + 1) * (NETA_DEGREE + 1), nel))
#Weight factor are obtained from the below function
H, DH = Knearestneighbours(rmin, nU, nV, nW)
loop = 0
change = 1
g = 1
if penal==0:
penal=max(15*((1-poission_ratio)/(7-5*poission_ratio)),(3/2)*((1-poission_ratio)/(1-2*poission_ratio)))
#intializing dimension and initial value of the variables.
Xmin = np.zeros(nel) # Minimum of the element density i.e 0
Xmax = np.ones(nel) # Maximum of the element density i.e 1
#Lower and Upper values are used in MMA method
Lower = Xmin
Upper = Xmin
E1 = element_density # values of element density in previous iteration k-1
E2 = element_density # value of element density in previous iteration k-2
Total_time=0
OC_residual=0
# loop run until the termination condition is satisified i.e change in compliance from previous iteration
while change > 0.01:
#intializing the dimensions of global stiffness matrix, external force vector, compliance,change in compliance for toplogy optimization in each iteration.
K_G = np.zeros((dofcp, dofcp))
F_E = np.zeros(dofcp)
U = np.zeros(dofcp)
dcompliance = np.zeros(nel)
compliance = 0
# nodal density are calculated based on equ. 4.20
node_density = np.ones((nel, (XI_DEGREE + 1) * (ETA_DEGREE + 1) * (NETA_DEGREE + 1)))
for h in range(nel):
node_density[h, :] = node_density[h, :] * element_density[h] #Based on equ. 4.20
#looped over number of elements
for i in range(0, nel):
el_in = element_indicies[i, :] # control point assembly
sp_in = span_index[i, :] #span each knot along xi,eta,neta
#co-ordinates of the control points
X = CONTROL_POINTS[el_in, 0]
Y = CONTROL_POINTS[el_in, 1]
Z = CONTROL_POINTS[el_in, 2]
weights = CONTROL_POINTS[el_in, 3]
Uspan = XI_SPAN[sp_in[0], :]
Vspan = ETA_SPAN[sp_in[1], :]
Wspan = NETA_SPAN[sp_in[2], :]
#Obtaining element stiffness matrix
K_E, NURBS, R,B = element_routine(X, Y, Z, weights, Youngs_modulus, poission_ratio, Uspan, Vspan, Wspan,
XI_DEGREE, XI_KNOTVECTOR, ETA_DEGREE, ETA_KNOTVECTOR, NETA_DEGREE,
NETA_KNOTVECTOR)
#calculating element density based on equ. 4.20
element_density[i] = np.dot(node_density[i, :], NURBS)
filter_N[:, i] = NURBS
#Implementation of Modified SIMP method
density_basis[i] = Emin + (element_density[i] ** penal) * (E0 - Emin) #Based on equ. 4.21
# Global stiffness matrix is calculated
K_E = density_basis[i] * K_E
K_G = assemble(K_G, K_E, el_in, ncp) #based on equ. 4.17
#Boundary condition are obtained
BC | |
#%% ----------------------------------------------------------------------------
# <NAME>, March 2021
# KWR BO 402045-247
# ZZS verwijdering bodempassage
# AquaPriori - Transport Model
# With <NAME>, <NAME>, <NAME>, <NAME>
#
# Based on Stuyfzand, <NAME>. (2020). Predicting organic micropollutant behavior
# for 4 public supply well field types, with TRANSATOMIC Lite+
# (Vol. 2). Nieuwegein, Netherlands.
# ------------------------------------------------------------------------------
#### Notes ####
# things which must be checked indicated in comments with AH
# specific questions flagged for;
# @MartinvdS // @steven //@martinK
####
#%% ----------------------------------------------------------------------------
# INITIALISATION OF PYTHON e.g. packages, etc.
# ------------------------------------------------------------------------------
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
from pandas import read_csv
from pandas import read_excel
import math
from scipy.special import kn as besselk
import datetime
from datetime import timedelta
path = os.getcwd() # path of working directory
class Substance:
'''
Placeholder class which will later be replaced by the QSAR functionality of AquaPriori.
Attributes
---------
substance_name: String,
substance_name of the substance (for now limited dictionary to 'benzene', 'AMPA', 'benzo(a)pyrene'
substance_dict: dictionary
Nested dictionary with the following per substance.
substance_name: String,
substance_name of the substance (for now limited dictionary to 'benzene', 'AMPA', 'benzo(a)pyrene'
log Koc: float
distribution coefficient of organic carbon and water, [-]
molar_mass: float
molar mass of substance, [g/mol]
pKa: float
disassociation constant for acid H-OMP, [-]
omp_half_life: float
per redox zone ('suboxic', 'anoxic', deeply_anoxic'), [days]
'''
def __init__(self, substance_name, ):
"""
Parameters
----------
substance_name: String,
substance_name of the substance (for now limited dictionary to 'benzene', 'AMPA', 'benzo(a)pyrene'
Returns
-------
substance_dict: dictionary
log Koc: float
distribution coefficient of organic carbon and water ([-]
molar_mass: float
molar mass of substance [g/mol]
pKa: float
disassociation constant for acic H-OMP [-]
omp_half_life: float
per redox zone, [days])
"""
self.substance_name = substance_name
# Substance dict here as placeholder for the actual database
substances_dict = {
'benzene': {
'substance_name': 'benzene',
'log_Koc': 1.92,
'molar_mass': 78.1,
'pKa': 99,
'omp_half_life': {
'suboxic': 10.5,
'anoxic': 420,
'deeply_anoxic': 1e99,
},
},
'AMPA': {
'substance_name': 'AMPA',
'log_Koc': -0.36,
'molar_mass': 111.04 ,
'pKa': 0.4,
'omp_half_life': {
'suboxic': 46,
'anoxic': 46,
'deeply_anoxic': 1e99,
},
},
'benzo(a)pyrene': {
'substance_name': 'benzo(a)pyrene',
'log_Koc': 6.43,
'molar_mass': 252.3,
'pKa': 99,
'omp_half_life': {
'suboxic': 530,
'anoxic': 2120,
'deeply_anoxic': 2120,
},
},
'OMP-X': {
'substance_name': 'OMP-X',
'log_Koc': 0,
'molar_mass': 100,
'pKa': 99,
'omp_half_life': {
'suboxic': 1e99,
'anoxic': 1e99,
'deeply_anoxic': 1e99,
},
},
}
self.substance_dict = substances_dict[substance_name]
#ah_todo @MartinK, MartinvdS -> let the user specify the chemical in the Substance transport file instead of schematisation?
# also let them feed it a dictionary with their own substance?
class SubstanceTransport():
"""
Returns concentration in a groundwater well for a given Organic Micro Pollutant or microbial species.
Attributes
----------
analytical_well: object
The AnalyticalWell object for the schematisation of the aquifer type.
omp_inialized: bool
Boolian indicating whether the Substance object has been initialized
df_flowline: pandas.DataFrame
Column 'flowline_id': Integer
Column 'flowline_type': string
Column 'flowline_discharge': Float
Column 'particle_release_day': Float
Column 'input_concentration': float
Column 'endpoint_id': Integer
Column 'well_discharge': float
Column 'substance': string
Column 'removal_function': string
Column 'total_breakthrough_travel_time': float
Column 'breakthrough_concentration': float
df_particle: pandas.DataFrame
Column 'flowline_id': int
Column 'zone': string
Column 'travel_time': float
Column 'xcoord': float
Column 'ycoord': float
Column 'zcoord': float
Column 'redox': float
Column 'temperature': float
Column 'travel_distance': float
Column 'porosity': float
Column 'dissolved_organic_carbon': float
Column 'pH': float
Column 'fraction_organic_carbon': float
Column 'solid_density': float
Column 'input_concentration': float
Column 'steady_state_concentration': float
Column 'omp_half_life': float
Column 'log_Koc': float
Column 'pKa': float
Column 'Koc_temperature_correction': float
Column 'omp_half_life_temperature_corrected': float
Column 'retardation': float
Column 'breakthrough_travel_time': float
substance: object
The Substance object with the OMP of interest.
substance_dict: dictionary
Nested dictionary with the following per substance.
substance_name: String,
substance_name of the substance (for now limited dictionary to 'benzene', 'AMPA', 'benzo(a)pyrene'
log Koc: float
distribution coefficient of organic carbon and water [-]
molar_mass: float
molar mass of substance [g/mol]
pKa: float
disassociation constant for acic H-OMP [-]
omp_half_life: float
per redox zone ('suboxic', 'anoxic', deeply_anoxic'), [days]
"""
def __init__(self,
analytical_well,
substance: Substance):
'''
Initialization of the Substanes class, checks for user-defined OMP substance paramters and overrides the database values.
Parameters
----------
analytical_well: object
The AnalyticalWell object for the schematisation of the aquifer type.
substance: object
The Substance object with the OMP of interest.
'''
self.analytical_well = analytical_well
self.omp_inialized = False
self.df_particle = analytical_well.df_particle
self.df_flowline = analytical_well.df_flowline
self.substance = Substance(substance)
# AH need to make sure here that the substance passed is the same, e.g. comapre the dictionaries BUT ALSO
# make sure that user doesn't call one substance in the hydrochemicalschematisation class and another in the concentration class
# probably only a problem for ourselves, this should be written into a larger "run" class for the model which could avoid this
if self.substance.substance_name == self.analytical_well.schematisation.substance:
# Compare the dictionaries and override the default values if the user inputs a value
# assumes that default dict contains the substance input by the user (we only have three right now though!)
default_substance_dict = self.substance.substance_dict
user_substance_dict = self.analytical_well.schematisation.substance_parameters #user input dictionary of values
# iterate through the dicitonary keys
for key, value in user_substance_dict .items():
if type(value) is dict:
for tkey, cvalue in value.items():
if cvalue is None: #reassign the value from the default dict if not input by the user
user_substance_dict[key][tkey] = default_substance_dict[key][tkey]
else:
if value is None:
user_substance_dict [key] = default_substance_dict[key]
self.substance_dict = user_substance_dict #assign updated dict as attribute of the class to be able to access later
else:
self.substance_dict = self.substance.substance_dict
# self.df_flowline['substance'] = self.substance_dict['substance_name']
def _init_omp(self):
'''
Initialisation if the Substance is an OMP
'''
if self.omp_inialized:
pass
else:
self.df_particle['omp_half_life'] = self.df_particle['redox'].map(self.substance_dict['omp_half_life'])
self.df_particle['log_Koc'] = self.substance_dict['log_Koc']
self.df_particle['pKa'] = self.substance_dict['pKa']
self.omp_inialized = True
def _init_pathogen():
''' Initialisation if the Substance is a pathogen'''
pass
def _calculate_retardation(self):
''' Calculates the retardation of the OMP due to sorption and biodegradation.
Adds a column to the 'df_particle' with the retardation value.
Equation 4.8-4.10 in TRANSATOMIC report
Retardation equation based on Karickhoff (1981) and Schwarzenbach et al. (1993)
(section 10.3 in Appelo & Postma 2005), however with addition of
the effects of (i) DOC-binding according to Kan & Tomson (1990),
and (ii) OMP ionization (dissociation) according to Schellenberg et al. (1984)
Returns
-------
df_particle: pandas.dataframe
Column 'retardation': float
'''
#0.2 -> fraction of binding sites supplied by DOC which bind the OMP
#and prevent sortion to aquifer
if self.analytical_well.schematisation.biodegradation_sorbed_phase:
self.df_particle['retardation'] = (1 + (1 / (1 + 10 ** (self.df_particle.pH - self.df_particle.pKa)) * self.df_particle.solid_density
* (1 - self.df_particle.porosity)
* self.df_particle.fraction_organic_carbon * self.df_particle.Koc_temperature_correction)
/ (self.df_particle.porosity * (1 + (self.df_particle.Koc_temperature_correction * 1 / (1 + 10 ** (self.df_particle.pH - self.df_particle.pKa))
* 0.2 * self.df_particle.dissolved_organic_carbon * 0.000001))))
else:
self.df_particle['retardation'] = 1
def _calculate_omp_half_life_temperature_correction(self):
'''
Corrects the OMP half-life for temperature if 'temp_correction_halflife' is 'True' in the HydroChemicalSchematisation.
Adds column to 'df_particle' with corrected value.
Equation 3.2 in TRANSATOMIC report
R = 8.314 J/K/mol
Ea = activation energy = 63*10^3 J/mol
Returns
-------
df_particle: pandas.dataframe
Column 'omp_half_life_temperature_corrected': float'''
if self.analytical_well.schematisation.temp_correction_halflife:
self.df_particle['omp_half_life_temperature_corrected'] = self.df_particle['omp_half_life'] * 10 ** (-63000 / (2.303 * 8.314) * (1 / (20 + 273.15) - 1 / (self.df_particle.temperature + 273.15)))
else:
self.df_particle['omp_half_life_temperature_corrected'] = self.df_particle['omp_half_life']
self.df_particle.loc[ self.df_particle.omp_half_life == 1e99, 'omp_half_life_temperature_corrected'] = 1e99
def _calculate_Koc_temperature_correction(self):
''' Corrects the OMP Koc for temperature if 'temp_correction_Koc' is 'True' in the HydroChemicalSchematisation.
Adds column to 'df_particle' with corrected value.
Equation 3.1 in TRANSATOMIC report,
from Luers and <NAME> (1996): Assuming the relation to be similar
to the Van ‘t Hoff equation and equally performing for other OMPs yields
Returns
-------
df_particle: pandas.dataframe
Column 'Koc_temperature_correction': float
'''
# if log_Koc is zero, assign value of zero
if self.df_particle.log_Koc[0] == 0:
self.df_particle['Koc_temperature_correction'] = 0
elif self.analytical_well.schematisation.temp_correction_Koc:
self.df_particle['Koc_temperature_correction'] = 10 ** self.df_particle.log_Koc * 10 ** (1913 * (1 / (self.df_particle.temperature + 273.15) - 1 / (20 + 273.15)))
else:
self.df_particle['Koc_temperature_correction'] = self.df_particle.log_Koc
def _calculate_state_concentration_in_zone(self):
'''
Calculates the steady state concentration in the well for each flowline.
Add column to 'df_particle' with the steady state concentration
Equation 4.11 in TRANSATOMIC report
Returns
-------
df_particle: pandas.dataframe
Column 'steady_state_concentration': float
'''
#check if there is degradation prior to infiltration
DOC_inf = self.analytical_well.schematisation.dissolved_organic_carbon_infiltration_water
TOC_inf = self.analytical_well.schematisation.total_organic_carbon_infiltration_water
if DOC_inf and TOC_inf > 0:
DOC_TOC_ratio | |
<filename>academic_observatory_workflows/workflows/web_of_science_telescope.py
# Copyright 2020 Curtin University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: <NAME>
import logging
import os
from collections import OrderedDict
from concurrent.futures import ThreadPoolExecutor, as_completed
from math import floor
from typing import Any, Dict, List, Tuple, Type, Union
import backoff
import jsonlines
import pendulum
import xmltodict
from academic_observatory_workflows.config import schema_folder as default_schema_folder
from airflow.exceptions import AirflowException
from google.cloud.bigquery import WriteDisposition
from observatory.platform.utils.airflow_utils import (
AirflowConns,
AirflowVars,
get_airflow_connection_login,
get_airflow_connection_password,
)
from observatory.platform.utils.file_utils import load_file, write_to_file
from observatory.platform.utils.workflow_utils import (
blob_name,
bq_load_shard,
build_schedule,
get_as_list,
get_as_list_or_none,
get_chunks,
get_entry_or_none,
)
from observatory.platform.workflows.snapshot_telescope import (
SnapshotRelease,
SnapshotTelescope,
)
from ratelimit import limits, sleep_and_retry
from suds import WebFault
from wos import WosClient
class WosUtilConst:
"""Class containing some WosUtility constants. Makes these values accessible by decorators."""
# WoS limits as a guide for reference. Throttle limits more conservative than this.
RESULT_LIMIT = 100 # Return 100 results max per query.
CALL_LIMIT = 1 # WoS says they can do 2 api calls / second, but we're setting 1 per 2 seconds.
CALL_PERIOD = 2 # seconds
SESSION_CALL_LIMIT = 5 # 5 calls.
SESSION_CALL_PERIOD = 360 # 6 minutes. [Actual WoS limit is 5 mins]
RETRIES = 3
class WosUtility:
"""Handles the interaction with Web of Science"""
@staticmethod
def build_query(*, institution_ids: List[str], period: Type[pendulum.Period]) -> OrderedDict:
"""Build a WoS API query.
:param institution_ids: List of Institutional ID to query, e.g, "Curtin University"
:param period: A tuple containing start and end dates.
:return: Constructed web query.
"""
start_date = period.start.isoformat()
end_date = period.end.isoformat()
organisations = " OR ".join(institution_ids)
query_str = f"OG=({organisations})"
query = OrderedDict(
[
("query", query_str),
("count", WosUtilConst.RESULT_LIMIT),
("offset", 1),
("timeSpan", {"begin": start_date, "end": end_date}),
]
)
return query
@staticmethod
def parse_query(records: Any) -> Tuple[dict, str]:
"""Parse XML tree record into a dict.
:param records: XML tree returned by the web query.
:return: Dictionary version of the web response and a schema version string.
"""
records_dict = xmltodict.parse(records)["records"]
schema_string = records_dict["@xmlns"]
return get_as_list(records_dict, "REC"), schema_string
@staticmethod
@sleep_and_retry
@limits(calls=WosUtilConst.CALL_LIMIT, period=WosUtilConst.CALL_PERIOD)
def search(*, client: WosClient, query: OrderedDict) -> Any:
"""Throttling wrapper for the API call. This is a global limit for this API when called from a program on the
same machine. If you are throttled, it will throw a WebFault and the exception message will contain the phrase
'Request denied by Throttle server'
Limiting to 1 call per second even though theoretical limit is 2 per second just in case.
Throttle limits may or may not be enforced. Probably depends on how executors spin up tasks.
:param client: WosClient object.
:param query: Query object.
:returns: Query results.
"""
return client.search(**query)
@staticmethod
def make_query(*, client: WosClient, query: OrderedDict) -> List[Any]:
"""Make the API calls to retrieve information from Web of Science.
:param client: WosClient object.
:param query: Constructed search query from use build_query.
:return: List of XML responses.
"""
results = WosUtility.search(client=client, query=query)
num_results = int(results.recordsFound)
record_list = [results.records]
if num_results > WosUtilConst.RESULT_LIMIT:
for offset in range(WosUtilConst.RESULT_LIMIT + 1, num_results, WosUtilConst.RESULT_LIMIT):
query["offset"] = offset
record_list.append(WosUtility.search(client=client, query=query).records)
return record_list
@staticmethod
def download_wos_period(
*, client: WosClient, conn: str, period: pendulum.Period, institution_ids: List[str], download_dir: str
) -> List[str]:
"""Download records for a stated date range.
:param client: WebClient object.
:param conn: file name for saved response as a pickle file.
:param period: Period tuple containing (start date, end date).
:param institution_ids: List of Institutional ID to query, e.g, "Curtin University"
:param download_dir: Directory to download files to.
"""
harvest_ts = pendulum.now("UTC")
logging.info(f"{conn} with session id {client._SID}: retrieving period {period.start} - {period.end}")
query = WosUtility.build_query(institution_ids=institution_ids, period=period)
result = WosUtility.make_query(client=client, query=query)
file_prefix = os.path.join(download_dir, f"{period.start}_{period.end}")
for i, entry in enumerate(result):
save_file = f"{file_prefix}_{i}_{harvest_ts}.xml"
logging.info(f"Saving to file {save_file}")
write_to_file(entry, save_file)
@staticmethod
@backoff.on_exception(
backoff.constant, WebFault, max_tries=WosUtilConst.RETRIES, interval=WosUtilConst.SESSION_CALL_PERIOD
)
def download_wos_batch(
*,
login: str,
password: str,
batch: List[pendulum.Period],
conn: str,
institution_ids: List[str],
download_dir: str,
) -> List[str]:
"""Download one batch of WoS snapshots. Throttling limits are more conservative than WoS limits.
Throttle limits may or may not be enforced. Probably depends on how executors spin up tasks.
:param login: login.
:param password: password.
:param batch: List of tuples of (start_date, end_date) to fetch.
:param conn: connection_id string from Airflow variable.
:param institution_ids: List of Institutional ID to query, e.g, "Curtin University"
:param download_dir: Download directory to save response to.
:return: List of saved files from this batch.
"""
with WosClient(login, password) as client:
for period in batch:
WosUtility.download_wos_period(
client=client, conn=conn, period=period, institution_ids=institution_ids, download_dir=download_dir
)
@staticmethod
def get_parallel_batches(schedule: List[pendulum.Period]) -> Tuple[int, List[List[pendulum.Period]]]:
"""Split the schedule to download in parallel sessions. If the number of periods is less than the number of sessions, just use a single session. If there is not an even split, then the extra periods will be distributed amongst the first few sessions evenly.
:param schedule: Schedule to split.
:return: Number of sessions, and the split schedule.
"""
n_schedule = len(schedule)
if n_schedule < WosUtilConst.SESSION_CALL_LIMIT:
sessions = 1
batches = [schedule]
else:
sessions = WosUtilConst.SESSION_CALL_LIMIT
batch_size = int(floor(len(schedule) / sessions))
batches = list(get_chunks(input_list=schedule, chunk_size=batch_size))
# Evenly distribute the remainder amongst the sessions
if len(schedule) % sessions != 0:
last_batch = batches[-1]
batches = batches[:-1] # Remove last batch
for i, period in enumerate(last_batch):
batches[i].append(period)
return sessions, batches
@staticmethod
def download_wos_parallel(
*,
login: str,
password: str,
schedule: List[pendulum.Period],
conn: str,
institution_ids: List[str],
download_dir: str,
) -> List[str]:
"""Download WoS snapshot with parallel sessions. Using threads.
:param login: WoS login
:param password: <PASSWORD>
:param schedule: List of date range (start_date, end_date) tuples to download.
:param conn: Airflow connection_id string.
:param institution_ids: List of Institutional ID to query, e.g, "Curtin University"
:param download_dir: Path to download to.
:return: List of files downloaded.
"""
sessions, batches = WosUtility.get_parallel_batches(schedule)
with ThreadPoolExecutor(max_workers=sessions) as executor:
futures = []
for i in range(sessions):
futures.append(
executor.submit(
WosUtility.download_wos_batch,
login=login,
password=password,
batch=batches[i],
conn=conn,
institution_ids=institution_ids,
download_dir=download_dir,
)
)
for future in as_completed(futures):
future.result()
@staticmethod
def download_wos_sequential(
*, login: str, password: str, schedule: list, conn: str, institution_ids: List[str], download_dir: str
) -> List[str]:
"""Download WoS snapshot sequentially.
:param login: WoS login
:param password: <PASSWORD>
:param schedule: List of date range (start_date, end_date) tuples to download.
:param conn: Airflow connection_id string.
:param institution_ids: List of Institutional ID to query, e.g, "Curtin University"
:param download_dir: Path to download to.
:return: List of files downloaded.
"""
return WosUtility.download_wos_batch(
login=login,
password=password,
batch=schedule,
conn=conn,
institution_ids=institution_ids,
download_dir=download_dir,
)
class WosNameAttributes:
"""Helper class for parsing name attributes."""
def __init__(self, data: dict):
self._contribs = WosNameAttributes._get_contribs(data)
@staticmethod
def _get_contribs(data: dict) -> dict:
"""Helper function to parse the contributors structure to aid extraction of fields.
:param data: dictionary to query.
:return: Dictionary of attributes keyed by full_name string.
"""
contrib_dict = dict()
try:
contributors = get_as_list(data["static_data"]["contributors"], "contributor")
for contributor in contributors:
name_field = contributor["name"]
first_name = name_field["first_name"]
last_name = name_field["last_name"]
attrib = dict()
full_name = f"{first_name} {last_name}"
if "@r_id" in name_field:
attrib["r_id"] = name_field["@r_id"]
if "@orcid_id" in name_field:
attrib["orcid"] = name_field["@orcid_id"]
contrib_dict[full_name] = attrib
except:
pass
return contrib_dict
def get_orcid(self, full_name: str) -> str:
"""Get the orcid id of a person. Note that full name must be the combination of first and last name.
This is not necessarily the full_name field.
:param full_name: The 'first_name last_name' string.
:return: orcid id.
"""
try:
orcid = self._contribs[full_name]["orcid"]
return orcid
except:
return None
def get_r_id(self, full_name: str) -> str:
"""Get the r_id of a person. Note that full name must be the combination of first and last name.
This is not necessarily the full_name field.
:param full_name: The 'first_name last_name' string.
:return: r_id.
"""
try:
rid = self._contribs[full_name]["r_id"]
return rid
except:
return None
class WosJsonParser:
"""Helper methods to process the the converted json from Web of | |
<reponame>Evavanrooijen/AfricanGDP
# -*- coding: utf-8 -*-
"""Africa
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1d5nRE-PHRXsNvsdt77szeIjeU0ig8hjj
"""
import numpy as np
import pandas as pd
from math import sqrt
import matplotlib.pyplot as plt
import statsmodels.api as sm
import statsmodels.stats as stats
from scipy.stats import pearsonr
from lmfit import Parameters, minimize # package to apply NLS
from statsmodels.tsa.arima_model import ARIMA
"""# Forecasting real GDP growth for Africa
Potential modules
* run_simulation.py
* for 100 times per DGP specification:
* estimate both models
* compare RMSPE
* create table
* export table
* model.py
* gather input: JH, CRDW, pos, neg
* estimate model
* rolling forecast
* forecast benchmark
* data.py
* simulate(N, T, alp, var)
* load and preprocess: test if right format for model estimation
* Y_growth
## Data Loading
We simulate N different time series of length T as follows
<formulas here>
However, to check and compare results we also load the real data of African countries.
### Loading African Data
"""
# import file from local machine
from google.colab import files
uploaded = files.upload()
africa = pd.read_csv('/content/africa_gdp_index_eva.csv', delimiter=';', header=0, thousands=None)
africa = africa.apply(lambda x: x.str.replace(',','.'))
africa
africa = africa.astype(float)
africa = africa.transpose()
africa = africa.to_numpy()
africa['chad'].plot();
africa['ghana'].plot();
(africa['ghana']-africa['chad']).plot();
africa['madagascar'].plot();
"""Below I am just trying a PCA on the data for the three countries to see what it does and get some intuition. I use a package but also code it manually and (luckily) the results are the same."""
from sklearn.preprocessing import StandardScaler
africa_st = StandardScaler().fit_transform(africa)
africa_st.shape
from sklearn.decomposition import PCA
pca = PCA(n_components=1)
principalComponents = pca.fit_transform(africa_st)
pca.get_params()
plt.plot(principalComponents)
pcar = np.linalg.eig(np.cov(africa_st.transpose()))
pcar[1][0]
red = np.matmul(africa_st, pcar[1][0])
np.sum(principalComponents+red)
plt.plot(red)
"""### Simulate Data
Create N different time series of length T
"""
# set seed for reproducibility
np.random.seed(1)
def growth_rate(x, steps=1):
return x[steps:]-x[:-steps]
def create_DGP(N, T, alpha, var_eps):
# Function that takes all necessary parameters and returns a simulated dataset [NxT]
Y=np.random.rand(N, T)
for i in range(N):
Y[i, 0]=0
theta = np.random.uniform(1, alpha, 1)
for t in range(1, T):
epsilon = np.random.normal(0, sqrt(var_eps), 1)
Y[i, t]=theta+Y[i, t-1]+epsilon
Y_growth = np.vstack([growth_rate(row) for row in Y])
return Y, Y_growth
N = 50
T = 100
alpha = 1
var_eps = 0.5
Y, Y_growth = create_DGP(N, T, alpha, var_eps)
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10, 3))
fig.suptitle('Country 0: level and growth')
axes[0].plot(Y[0])
axes[0].set_title('GDP Level')
axes[0].set_xlabel('t')
axes[1].plot(Y_growth[0])
axes[1].set_title('GDP Growth')
axes[1].set_xlabel('t')
fig.tight_layout()
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10, 3))
fig.suptitle('Average level and growth')
axes[0].plot(np.mean(Y, axis=0))
axes[0].set_title('GDP Level')
axes[0].set_xlabel('t')
axes[1].plot(np.mean(Y_growth, axis=0))
axes[1].set_title('GDP Growth')
axes[1].set_xlabel('t')
fig.tight_layout()
# unit tests for simulating DGP
assert np.mean(Y, axis = 0)[0] == 0 # start time series 0 at t=0
assert round(np.mean(Y_growth)) == (alpha+1)/2
"""### Split Sample
Split the sample T into T1 and T2 with T1=aT2, with a=1, 2, 5 or 10
"""
a = 1
T1_size = int((T*a)/(1+a))
T1 = Y[:, 0:(T1_size)] # Rounding when casting float to int
T2 = Y[:, (T1_size):T]
np.mean(Y, axis=0)[T1_size:].shape
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(10, 5))
fig.suptitle('Test and train timeline')
axes[0, 0].plot(np.mean(Y, axis=0)[:T1_size])
axes[0, 0].set_title('Average GDP Level: train')
axes[0, 0].set_xlabel('train')
axes[1, 0].plot(np.mean(Y_growth, axis=0)[:T1_size])
axes[1, 0].set_title('Average GDP Growth: test')
axes[1, 0].set_xlabel('train')
axes[0, 1].plot(np.mean(Y, axis=0)[T1_size:])
axes[0, 1].set_title('Average GDP Level: test')
axes[0, 1].set_xlabel('test')
axes[1, 1].plot(np.mean(Y_growth, axis=0)[:T1_size])
axes[1, 1].set_title('Average GDP Growth: test')
axes[1, 1].set_xlabel('test')
fig.tight_layout()
# unit tests
mean_train_growth = round(np.mean(np.mean(Y_growth, axis=0)[:T1_size]))
mean_test_growth = round(np.mean(np.mean(Y_growth, axis=0)[T1_size:]))
assert mean_test_growth == mean_train_growth
"""## Model Building
### v Step 1: CRDW Test
y(i, t)= theta(i) + y(j, t) + w(j, t)
for j=1,..., I and j unequal to i.
Compute residuals w(j, t) and create Cointegration Regression Durbin Watson (CRDW) test statistic as CRDW(j)=2(1-roh(j)) where roh is estimated first order autocorrelation of estimated residuals w(j).
Save regressor y(j, t) where CRDW(j)> tao for next round
"""
def CRDW(i, tao=0.4):
"""This function tests for cointegration
Args:
i (int): index of the country we are modeling
tao (float): critical value (default = 0.4)
Returns:
JH (array): An array with the regressors (incl. self)
"""
JH=Y[i]
for j in range(N):
if j!=i:
y = Y[i]
x = Y[j]
x = sm.add_constant(x)
model = sm.OLS(y, x)
results = model.fit()
CRDW_j = stats.stattools.durbin_watson(results.resid)
if CRDW_j > tao:
JH = np.vstack((JH, Y[j]))
assert JH.shape[0]>0 # test if JH contains atleast self
return JH
"""### x Step 2: Estimate Cointegration
create matrices with groeivoet, level
calculate S00 S11 S01 S10
calculate eigenvalues
first eigenvalue-> eigenvector is beta for cointegration relation
"""
def cointegration(JH_i):
"""Johansen estimation for cointegration between two time series
Args:
JH_i (array): output of CRDW test
Returns:
beta (array): eigenvector
"""
S11 = np.cov(JH_i)
S01 = np.cov(np.vstack([growth_rate(JH_i[0]), JH_i[1][1:]]))
S10 = np.cov(np.vstack([JH_i[0][1:], growth_rate(JH_i[1])]))
S00 = np.cov(np.vstack([ growth_rate(row) for row in JH_i ]))
beta = np.linalg.eigh(S11-S10.dot(np.linalg.inv(S00)).dot(S01))[1][:, 0]
return beta
beta = cointegration(CRDW(0))
"""### v Step 3: Rank Correlations"""
def correlation(i, kn=4, kp=4):
"""Feature selection based on pairwise correlation
Args:
i (int): index of the country we are modeling
tao (float): critical value (default = 0.4)
Returns:
JH (array): An array with the regressors (incl. self)
"""
corr_i = np.zeros(N)
for j in range(N):
corr_i[j] = pearsonr(Y_growth[i], Y_growth[j])[0]
pos = Y_growth[np.argpartition(corr_i, -(kp+1))[-(kp+1):-1]]
#pos = Y_growth[corr_i.argsort()[(kp+1):-1]]
assert pos.shape == (kp, T-1)
neg = Y_growth[corr_i.argsort()[:kn]]
assert neg.shape == (kn, T-1)
#neg = Y_growth[np.argpartition(correlation(7), -5)[-5:-1]]
return pos, neg
N = 50
corr = np.ones([N, N])
for i in range(N):
for j in range(N):
corr[i, j] = pearsonr(Y_growth[i], Y_growth[j])[0]
import seaborn as sns
sns.heatmap(corr)
plt.plot(corr[25])
sns.heatmap(np.tril(corr))
plt.plot(np.mean(corr, axis=0))
np.mean(np.mean(corr, axis=0))
alpha = 5
var_eps = 1
N = 50
def test_corr(N, alpha, var_eps):
X, X_growth = create_DGP(N, 100, alpha, var_eps)
corr_X = np.ones([N, N])
for i in range(N):
for j in range(N):
corr_X[i, j] = pearsonr(X_growth[i], X_growth[j])[0]
return np.mean(corr_X)
for alpha in [1, 2, 3, 4, 5]:
print(str(alpha) + ' alpha returns correlation : '+str(test_corr(50, alpha, 0.5)))
print(str(alpha) + ' alpha returns correlation : '+str(test_corr(50, alpha, 1)))
for var_eps in [0.5, 0.7, 0.9, 1]:
print(str(var_eps) + ' var_eps returns correlation : '+str(test_corr(50, 1, var_eps)))
print(str(var_eps) + ' var_eps returns correlation : '+str(test_corr(50, 2, var_eps)))
print(str(var_eps) + ' var_eps returns correlation : '+str(test_corr(50, 5, var_eps)))
"""## Where does this correlation come from?
expected variance of growth rate is (alpha-1)**2/12 from theta and 1 from epsilon
Somehow for different var_eps and alpha always around 0.02
"""
# ??
np.mean(corr_X)
"""### v Step 4: Define Model
For every country (row), we retrieve an array of correlated countries (rows) and an array of countries in the (potential) cointegration relation.
Steps involved:
* Define function(independent, parameters)
* parameter: mu, gamma, beta, alphas
* independent vars: pos, neg, JH
* rank = JH.shape[0]-1
* returns fitted value: f(pos, neg, JH) = growth[y]
* Estimate parameters
* desired output = growth[y]
* input = pos[y], neg[y], CRDW[y]
* params: mu, gamma, beta, ...
* fit by NLS: minimize des_out - fit(params, indep)
* Forecast
* Train to retrieve params per i, training set
* Predict one step ahead
* store RMSPE
"""
for i in range(1, 2):
JH = CRDW(i, tao=0.7)
country = Y_growth[i]
pos, neg = correlation(i)
# beta = cointegration(JH)
if 0<JH.shape[0]<100:
rank = JH.shape[0]-1
JH_growth = np.vstack([growth_rate(row) for row in JH])
else:
rank = 0
beta = np.array(1)
JH_growth = growth_rate(JH)
print('here rank '+str(rank))
model(pos, neg, country, JH, rank)
(np.array([-1])).dot(JH_growth)
# TO-DO: check growth rates! country should be label only CHECK LATER
def model(pos, neg, country, JH, rank):
mu = params['mu']
alpha_self = params['alpha_self']
alpha_pos = params['alpha_pos']
alpha_neg = params['alpha_neg']
theta_pos = params['theta_pos']
theta_neg = params['theta_neg']
gamma = params['gamma']
#beta = params['beta']
if rank > 0:
beta = np.array([params['beta0']])
for br in range(1, rank):
beta = np.append(beta, params['beta'+str(br)])
if rank == 0:
beta=np.array([-1])
alpha_i_j_p = np.array([alpha_pos, alpha_pos*theta_pos, alpha_pos*(theta_pos**2), alpha_pos*(theta_pos**3)])
alpha_i_j_n = np.array([alpha_neg, alpha_neg*theta_neg, alpha_neg*(theta_neg**2), alpha_neg*(theta_neg**3)])
correlation = alpha_self * (country) + alpha_i_j_n.dot(neg) + alpha_i_j_p.dot(pos)
cointegration = gamma*(- beta.dot(JH_growth))
model = cointegration + correlation + mu
return model
from lmfit import Model
mod = Model(model)
mod.param_names
# TO-DO: check growth rates! country should be label only CHECK LATER
def residual(params, pos, neg, country, JH, rank):
mu = params['mu']
alpha_self = params['alpha_self']
alpha_pos = params['alpha_pos']
alpha_neg = params['alpha_neg']
theta_pos = params['theta_pos']
theta_neg = params['theta_neg']
gamma = params['gamma']
#beta = params['beta']
if rank > 0:
beta = np.array([params['beta0']])
for br in range(1, rank):
beta = np.append(beta, params['beta'+str(br)])
if rank == 0:
beta=np.array([1])
alpha_i_j_p = np.array([alpha_pos, alpha_pos*theta_pos, alpha_pos*(theta_pos**2), alpha_pos*(theta_pos**3)])
alpha_i_j_n = np.array([alpha_neg, alpha_neg*theta_neg, alpha_neg*(theta_neg**2), alpha_neg*(theta_neg**3)])
correlation = alpha_self * (country) + alpha_i_j_n.dot(neg) + alpha_i_j_p.dot(pos)
cointegration = gamma*(country - beta.dot(JH_growth[]))
model = cointegration + correlation + mu
return country-model
JH_growth.shape
params = Parameters()
params.add('mu', value = 1)
params.add('alpha_self', value = 2)
params.add('alpha_pos', value = 2)
params.add('alpha_neg', value = 2)
params.add('theta_pos', value = 2)
params.add('theta_neg', value = 2)
params.add('gamma', value = 5)
# params.pretty_print()
for i in range(1, 6):
JH = CRDW(i, tao=0.7)
country = Y_growth[i]
pos, neg = correlation(i)
# beta = cointegration(JH)
if 0<JH.shape[0]<100:
rank = JH.shape[0]-1
for br in range(rank):
params.add('beta'+str(br), value = 2)
print('here rank '+str(rank))
JH_growth = np.vstack([growth_rate(row) for row in JH])
else:
rank = 0
| |
will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of the step.Constant filled by server. Possible values
include: "Docker", "FileTask", "EncodedTask".
:type type: str or ~azure.mgmt.containerregistry.v2019_06_01_preview.models.StepType
:ivar base_image_dependencies: List of base image dependencies for a step.
:vartype base_image_dependencies:
list[~azure.mgmt.containerregistry.v2019_06_01_preview.models.BaseImageDependency]
:param context_path: The URL(absolute or relative) of the source context for the task step.
:type context_path: str
:param context_access_token: The token (git PAT or SAS token of storage account blob)
associated with the context for a step.
:type context_access_token: str
:param encoded_task_content: Required. Base64 encoded value of the template/definition file
content.
:type encoded_task_content: str
:param encoded_values_content: Base64 encoded value of the parameters/values file content.
:type encoded_values_content: str
:param values: The collection of overridable values that can be passed when running a task.
:type values: list[~azure.mgmt.containerregistry.v2019_06_01_preview.models.SetValue]
"""
_validation = {
'type': {'required': True},
'base_image_dependencies': {'readonly': True},
'encoded_task_content': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'base_image_dependencies': {'key': 'baseImageDependencies', 'type': '[BaseImageDependency]'},
'context_path': {'key': 'contextPath', 'type': 'str'},
'context_access_token': {'key': 'contextAccessToken', 'type': 'str'},
'encoded_task_content': {'key': 'encodedTaskContent', 'type': 'str'},
'encoded_values_content': {'key': 'encodedValuesContent', 'type': 'str'},
'values': {'key': 'values', 'type': '[SetValue]'},
}
def __init__(
self,
**kwargs
):
super(EncodedTaskStep, self).__init__(**kwargs)
self.type = 'EncodedTask' # type: str
self.encoded_task_content = kwargs['encoded_task_content']
self.encoded_values_content = kwargs.get('encoded_values_content', None)
self.values = kwargs.get('values', None)
class EncodedTaskStepUpdateParameters(TaskStepUpdateParameters):
"""The properties for updating encoded task step.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of the step.Constant filled by server. Possible values
include: "Docker", "FileTask", "EncodedTask".
:type type: str or ~azure.mgmt.containerregistry.v2019_06_01_preview.models.StepType
:param context_path: The URL(absolute or relative) of the source context for the task step.
:type context_path: str
:param context_access_token: The token (git PAT or SAS token of storage account blob)
associated with the context for a step.
:type context_access_token: str
:param encoded_task_content: Base64 encoded value of the template/definition file content.
:type encoded_task_content: str
:param encoded_values_content: Base64 encoded value of the parameters/values file content.
:type encoded_values_content: str
:param values: The collection of overridable values that can be passed when running a task.
:type values: list[~azure.mgmt.containerregistry.v2019_06_01_preview.models.SetValue]
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'context_path': {'key': 'contextPath', 'type': 'str'},
'context_access_token': {'key': 'contextAccessToken', 'type': 'str'},
'encoded_task_content': {'key': 'encodedTaskContent', 'type': 'str'},
'encoded_values_content': {'key': 'encodedValuesContent', 'type': 'str'},
'values': {'key': 'values', 'type': '[SetValue]'},
}
def __init__(
self,
**kwargs
):
super(EncodedTaskStepUpdateParameters, self).__init__(**kwargs)
self.type = 'EncodedTask' # type: str
self.encoded_task_content = kwargs.get('encoded_task_content', None)
self.encoded_values_content = kwargs.get('encoded_values_content', None)
self.values = kwargs.get('values', None)
class ErrorResponse(msrest.serialization.Model):
"""An error response from the Azure Container Registry service.
:param error: Azure container registry build API error body.
:type error: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.ErrorResponseBody
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorResponseBody'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = kwargs.get('error', None)
class ErrorResponseBody(msrest.serialization.Model):
"""An error response from the Azure Container Registry service.
All required parameters must be populated in order to send to Azure.
:param code: Required. error code.
:type code: str
:param message: Required. error message.
:type message: str
:param target: target of the particular error.
:type target: str
:param details: an array of additional nested error response info objects, as described by this
contract.
:type details: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.InnerErrorDescription
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': 'InnerErrorDescription'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponseBody, self).__init__(**kwargs)
self.code = kwargs['code']
self.message = kwargs['message']
self.target = kwargs.get('target', None)
self.details = kwargs.get('details', None)
class FileTaskRunRequest(RunRequest):
"""The request parameters for a scheduling run against a task file.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of the run request.Constant filled by server.
:type type: str
:param is_archive_enabled: The value that indicates whether archiving is enabled for the run or
not.
:type is_archive_enabled: bool
:param agent_pool_name: The dedicated agent pool for the run.
:type agent_pool_name: str
:param log_template: The template that describes the repository and tag information for run log
artifact.
:type log_template: str
:param task_file_path: Required. The template/definition file path relative to the source.
:type task_file_path: str
:param values_file_path: The values/parameters file path relative to the source.
:type values_file_path: str
:param values: The collection of overridable values that can be passed when running a task.
:type values: list[~azure.mgmt.containerregistry.v2019_06_01_preview.models.SetValue]
:param timeout: Run timeout in seconds.
:type timeout: int
:param platform: Required. The platform properties against which the run has to happen.
:type platform: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.PlatformProperties
:param agent_configuration: The machine configuration of the run agent.
:type agent_configuration:
~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentProperties
:param source_location: The URL(absolute or relative) of the source context. It can be an URL
to a tar or git repository.
If it is relative URL, the relative path should be obtained from calling
listBuildSourceUploadUrl API.
:type source_location: str
:param credentials: The properties that describes a set of credentials that will be used when
this run is invoked.
:type credentials: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.Credentials
"""
_validation = {
'type': {'required': True},
'task_file_path': {'required': True},
'timeout': {'maximum': 28800, 'minimum': 300},
'platform': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'is_archive_enabled': {'key': 'isArchiveEnabled', 'type': 'bool'},
'agent_pool_name': {'key': 'agentPoolName', 'type': 'str'},
'log_template': {'key': 'logTemplate', 'type': 'str'},
'task_file_path': {'key': 'taskFilePath', 'type': 'str'},
'values_file_path': {'key': 'valuesFilePath', 'type': 'str'},
'values': {'key': 'values', 'type': '[SetValue]'},
'timeout': {'key': 'timeout', 'type': 'int'},
'platform': {'key': 'platform', 'type': 'PlatformProperties'},
'agent_configuration': {'key': 'agentConfiguration', 'type': 'AgentProperties'},
'source_location': {'key': 'sourceLocation', 'type': 'str'},
'credentials': {'key': 'credentials', 'type': 'Credentials'},
}
def __init__(
self,
**kwargs
):
super(FileTaskRunRequest, self).__init__(**kwargs)
self.type = 'FileTaskRunRequest' # type: str
self.task_file_path = kwargs['task_file_path']
self.values_file_path = kwargs.get('values_file_path', None)
self.values = kwargs.get('values', None)
self.timeout = kwargs.get('timeout', 3600)
self.platform = kwargs['platform']
self.agent_configuration = kwargs.get('agent_configuration', None)
self.source_location = kwargs.get('source_location', None)
self.credentials = kwargs.get('credentials', None)
class FileTaskStep(TaskStepProperties):
"""The properties of a task step.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of the step.Constant filled by server. Possible values
include: "Docker", "FileTask", "EncodedTask".
:type type: str or ~azure.mgmt.containerregistry.v2019_06_01_preview.models.StepType
:ivar base_image_dependencies: List of base image dependencies for a step.
:vartype base_image_dependencies:
list[~azure.mgmt.containerregistry.v2019_06_01_preview.models.BaseImageDependency]
:param context_path: The URL(absolute or relative) of the source context for the task step.
:type context_path: str
:param context_access_token: The token (git PAT or SAS token of storage account blob)
associated with the context for a step.
:type context_access_token: str
:param task_file_path: Required. The task template/definition file path relative to the source
context.
:type task_file_path: str
:param values_file_path: The task values/parameters file path relative to the source context.
:type values_file_path: str
:param values: The collection of overridable values that can be passed when running a task.
:type values: list[~azure.mgmt.containerregistry.v2019_06_01_preview.models.SetValue]
"""
_validation = {
'type': {'required': True},
'base_image_dependencies': {'readonly': True},
'task_file_path': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'base_image_dependencies': {'key': 'baseImageDependencies', 'type': '[BaseImageDependency]'},
'context_path': {'key': 'contextPath', 'type': 'str'},
'context_access_token': {'key': 'contextAccessToken', 'type': 'str'},
'task_file_path': {'key': 'taskFilePath', 'type': 'str'},
'values_file_path': {'key': 'valuesFilePath', 'type': 'str'},
'values': {'key': 'values', 'type': '[SetValue]'},
}
def __init__(
self,
**kwargs
):
super(FileTaskStep, self).__init__(**kwargs)
self.type = 'FileTask' # type: str
self.task_file_path = kwargs['task_file_path']
self.values_file_path = kwargs.get('values_file_path', None)
self.values = kwargs.get('values', None)
class FileTaskStepUpdateParameters(TaskStepUpdateParameters):
"""The properties of updating a task step.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of the step.Constant filled by server. Possible values
include: "Docker", "FileTask", "EncodedTask".
:type type: str or ~azure.mgmt.containerregistry.v2019_06_01_preview.models.StepType
:param context_path: The URL(absolute or relative) of the source context for the task step.
:type context_path: str
:param context_access_token: The token (git PAT or SAS token of storage account blob)
associated with the context for a step.
:type context_access_token: str
:param task_file_path: The task template/definition file path relative to the source context.
:type task_file_path: str
:param values_file_path: The values/parameters file path relative to the source context.
:type values_file_path: str
:param values: The | |
PublicationId):
self.id = PublicationId(self.id)
if self._is_empty(self.type):
self.MissingRequiredField("type")
if not isinstance(self.type, str):
self.type = str(self.type)
if not isinstance(self.authors, list):
self.authors = [self.authors] if self.authors is not None else []
self.authors = [v if isinstance(v, str) else str(v) for v in self.authors]
if not isinstance(self.pages, list):
self.pages = [self.pages] if self.pages is not None else []
self.pages = [v if isinstance(v, str) else str(v) for v in self.pages]
if self.summary is not None and not isinstance(self.summary, str):
self.summary = str(self.summary)
if not isinstance(self.keywords, list):
self.keywords = [self.keywords] if self.keywords is not None else []
self.keywords = [v if isinstance(v, str) else str(v) for v in self.keywords]
if not isinstance(self.mesh_terms, list):
self.mesh_terms = [self.mesh_terms] if self.mesh_terms is not None else []
self.mesh_terms = [v if isinstance(v, URIorCURIE) else URIorCURIE(v) for v in self.mesh_terms]
if not isinstance(self.xref, list):
self.xref = [self.xref] if self.xref is not None else []
self.xref = [v if isinstance(v, IriType) else IriType(v) for v in self.xref]
if self.name is not None and not isinstance(self.name, LabelType):
self.name = LabelType(self.name)
super().__post_init__(**kwargs)
@dataclass
class Book(Publication):
"""
This class may rarely be instantiated except if use cases of a given knowledge graph support its utility.
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.Book
class_class_curie: ClassVar[str] = "biolink:Book"
class_name: ClassVar[str] = "book"
class_model_uri: ClassVar[URIRef] = BIOLINK.Book
id: Union[str, BookId] = None
category: Union[Union[str, NamedThingId], List[Union[str, NamedThingId]]] = None
type: str = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, BookId):
self.id = BookId(self.id)
if self._is_empty(self.type):
self.MissingRequiredField("type")
if not isinstance(self.type, str):
self.type = str(self.type)
super().__post_init__(**kwargs)
@dataclass
class BookChapter(Publication):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.BookChapter
class_class_curie: ClassVar[str] = "biolink:BookChapter"
class_name: ClassVar[str] = "book chapter"
class_model_uri: ClassVar[URIRef] = BIOLINK.BookChapter
id: Union[str, BookChapterId] = None
category: Union[Union[str, NamedThingId], List[Union[str, NamedThingId]]] = None
type: str = None
published_in: Union[str, URIorCURIE] = None
volume: Optional[str] = None
chapter: Optional[str] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, BookChapterId):
self.id = BookChapterId(self.id)
if self._is_empty(self.published_in):
self.MissingRequiredField("published_in")
if not isinstance(self.published_in, URIorCURIE):
self.published_in = URIorCURIE(self.published_in)
if self.volume is not None and not isinstance(self.volume, str):
self.volume = str(self.volume)
if self.chapter is not None and not isinstance(self.chapter, str):
self.chapter = str(self.chapter)
super().__post_init__(**kwargs)
@dataclass
class Serial(Publication):
"""
This class may rarely be instantiated except if use cases of a given knowledge graph support its utility.
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.Serial
class_class_curie: ClassVar[str] = "biolink:Serial"
class_name: ClassVar[str] = "serial"
class_model_uri: ClassVar[URIRef] = BIOLINK.Serial
id: Union[str, SerialId] = None
category: Union[Union[str, NamedThingId], List[Union[str, NamedThingId]]] = None
type: str = None
iso_abbreviation: Optional[str] = None
volume: Optional[str] = None
issue: Optional[str] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, SerialId):
self.id = SerialId(self.id)
if self._is_empty(self.type):
self.MissingRequiredField("type")
if not isinstance(self.type, str):
self.type = str(self.type)
if self.iso_abbreviation is not None and not isinstance(self.iso_abbreviation, str):
self.iso_abbreviation = str(self.iso_abbreviation)
if self.volume is not None and not isinstance(self.volume, str):
self.volume = str(self.volume)
if self.issue is not None and not isinstance(self.issue, str):
self.issue = str(self.issue)
super().__post_init__(**kwargs)
@dataclass
class Article(Publication):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.Article
class_class_curie: ClassVar[str] = "biolink:Article"
class_name: ClassVar[str] = "article"
class_model_uri: ClassVar[URIRef] = BIOLINK.Article
id: Union[str, ArticleId] = None
category: Union[Union[str, NamedThingId], List[Union[str, NamedThingId]]] = None
type: str = None
published_in: Union[str, URIorCURIE] = None
iso_abbreviation: Optional[str] = None
volume: Optional[str] = None
issue: Optional[str] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, ArticleId):
self.id = ArticleId(self.id)
if self._is_empty(self.published_in):
self.MissingRequiredField("published_in")
if not isinstance(self.published_in, URIorCURIE):
self.published_in = URIorCURIE(self.published_in)
if self.iso_abbreviation is not None and not isinstance(self.iso_abbreviation, str):
self.iso_abbreviation = str(self.iso_abbreviation)
if self.volume is not None and not isinstance(self.volume, str):
self.volume = str(self.volume)
if self.issue is not None and not isinstance(self.issue, str):
self.issue = str(self.issue)
super().__post_init__(**kwargs)
class PhysicalEssenceOrOccurrent(YAMLRoot):
"""
Either a physical or processual entity.
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.PhysicalEssenceOrOccurrent
class_class_curie: ClassVar[str] = "biolink:PhysicalEssenceOrOccurrent"
class_name: ClassVar[str] = "physical essence or occurrent"
class_model_uri: ClassVar[URIRef] = BIOLINK.PhysicalEssenceOrOccurrent
class PhysicalEssence(PhysicalEssenceOrOccurrent):
"""
Semantic mixin concept. Pertains to entities that have physical properties such as mass, volume, or charge.
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.PhysicalEssence
class_class_curie: ClassVar[str] = "biolink:PhysicalEssence"
class_name: ClassVar[str] = "physical essence"
class_model_uri: ClassVar[URIRef] = BIOLINK.PhysicalEssence
@dataclass
class PhysicalEntity(NamedThing):
"""
An entity that has material reality (a.k.a. physical essence).
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.PhysicalEntity
class_class_curie: ClassVar[str] = "biolink:PhysicalEntity"
class_name: ClassVar[str] = "physical entity"
class_model_uri: ClassVar[URIRef] = BIOLINK.PhysicalEntity
id: Union[str, PhysicalEntityId] = None
category: Union[Union[str, NamedThingId], List[Union[str, NamedThingId]]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, PhysicalEntityId):
self.id = PhysicalEntityId(self.id)
super().__post_init__(**kwargs)
class Occurrent(PhysicalEssenceOrOccurrent):
"""
A processual entity.
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.Occurrent
class_class_curie: ClassVar[str] = "biolink:Occurrent"
class_name: ClassVar[str] = "occurrent"
class_model_uri: ClassVar[URIRef] = BIOLINK.Occurrent
class ActivityAndBehavior(Occurrent):
"""
Activity or behavior of any independent integral living, organization or mechanical actor in the world
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.ActivityAndBehavior
class_class_curie: ClassVar[str] = "biolink:ActivityAndBehavior"
class_name: ClassVar[str] = "activity and behavior"
class_model_uri: ClassVar[URIRef] = BIOLINK.ActivityAndBehavior
@dataclass
class Activity(NamedThing):
"""
An activity is something that occurs over a period of time and acts upon or with entities; it may include
consuming, processing, transforming, modifying, relocating, using, or generating entities.
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.Activity
class_class_curie: ClassVar[str] = "biolink:Activity"
class_name: ClassVar[str] = "activity"
class_model_uri: ClassVar[URIRef] = BIOLINK.Activity
id: Union[str, ActivityId] = None
category: Union[Union[str, NamedThingId], List[Union[str, NamedThingId]]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, ActivityId):
self.id = ActivityId(self.id)
super().__post_init__(**kwargs)
@dataclass
class Procedure(NamedThing):
"""
A series of actions conducted in a certain order or manner
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.Procedure
class_class_curie: ClassVar[str] = "biolink:Procedure"
class_name: ClassVar[str] = "procedure"
class_model_uri: ClassVar[URIRef] = BIOLINK.Procedure
id: Union[str, ProcedureId] = None
category: Union[Union[str, NamedThingId], List[Union[str, NamedThingId]]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, ProcedureId):
self.id = ProcedureId(self.id)
super().__post_init__(**kwargs)
@dataclass
class Phenomenon(NamedThing):
"""
a fact or situation that is observed to exist or happen, especially one whose cause or explanation is in question
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.Phenomenon
class_class_curie: ClassVar[str] = "biolink:Phenomenon"
class_name: ClassVar[str] = "phenomenon"
class_model_uri: ClassVar[URIRef] = BIOLINK.Phenomenon
id: Union[str, PhenomenonId] = None
category: Union[Union[str, NamedThingId], List[Union[str, NamedThingId]]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, PhenomenonId):
self.id = PhenomenonId(self.id)
super().__post_init__(**kwargs)
@dataclass
class Device(NamedThing):
"""
A thing made or adapted for a particular purpose, especially a piece of mechanical or electronic equipment
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.Device
class_class_curie: ClassVar[str] = "biolink:Device"
class_name: ClassVar[str] = "device"
class_model_uri: ClassVar[URIRef] = BIOLINK.Device
id: Union[str, DeviceId] = None
category: Union[Union[str, NamedThingId], List[Union[str, NamedThingId]]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, DeviceId):
self.id = DeviceId(self.id)
super().__post_init__(**kwargs)
class SubjectOfInvestigation(YAMLRoot):
"""
An entity that has the role of being studied in an investigation, study, or experiment
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.SubjectOfInvestigation
class_class_curie: ClassVar[str] = "biolink:SubjectOfInvestigation"
class_name: ClassVar[str] = "subject of investigation"
class_model_uri: ClassVar[URIRef] = BIOLINK.SubjectOfInvestigation
@dataclass
class MaterialSample(PhysicalEntity):
"""
A sample is a limited quantity of something (e.g. an individual or set of individuals from a population, or a
portion of a substance) to be used for testing, analysis, inspection, investigation, demonstration, or trial use.
[SIO]
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.MaterialSample
class_class_curie: ClassVar[str] = "biolink:MaterialSample"
class_name: ClassVar[str] = "material sample"
class_model_uri: ClassVar[URIRef] = BIOLINK.MaterialSample
id: Union[str, MaterialSampleId] = None
category: Union[Union[str, NamedThingId], List[Union[str, NamedThingId]]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, MaterialSampleId):
self.id = MaterialSampleId(self.id)
super().__post_init__(**kwargs)
@dataclass
class PlanetaryEntity(NamedThing):
"""
Any entity or process that exists at the level of the whole planet
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.PlanetaryEntity
class_class_curie: ClassVar[str] = "biolink:PlanetaryEntity"
class_name: ClassVar[str] = "planetary entity"
class_model_uri: ClassVar[URIRef] = BIOLINK.PlanetaryEntity
id: Union[str, PlanetaryEntityId] = None
category: Union[Union[str, NamedThingId], List[Union[str, NamedThingId]]] = None
def __post_init__(self, | |
return handle.query_dn(mo.dn)
def bios_profile_delete(handle, name, server_id=1):
"""
Deletes the bios profile specified by the name on the Cisco IMC server
Args:
handle (ImcHandle)
name (str): Name of the bios profile.
Corresponds to the name field in the json file.
server_id (int): Id of the server to perform
this operation on C3260 platforms.
Returns:
None
Raises:
ImcOperationError if the bios profile is not found
Examples:
bios_profile_delete(handle, name='simple', server_id=2)
"""
from imcsdk.mometa.bios.BiosProfile import BiosProfileConsts
mo = _get_bios_profile(handle, name=name, server_id=server_id)
mo.admin_action = BiosProfileConsts.ADMIN_ACTION_DELETE
handle.set_mo(mo)
def bios_tokens_exist(handle, tokens={}, server_id=1):
"""
Args:
handle (ImcHandle)
tokens (dictionary) : (key, value) pair of bios tokens with key being the name of the token
server_id (int): Id of the server to perform
this operation on C3260 platforms.
Returns:
True/False based on the match with the server side tokens
Examples:
bios_tokens_exist(handle,
tokens = {
"BaudRate": "19200",
"IntelVTDATSSupport": "enabled",
"ConsoleRedirection": "com-1",
"FlowControl": "rts-cts"},
server_id=2)
"""
parent_dn = _get_bios_dn(handle, server_id) + "/bios-settings"
mo_table = _get_bios_mo_table(handle, tokens, server_id)
for mo_name, props in mo_table.items():
cimc_mos = handle.query_classid(class_id=mo_name)
cimc_mo = None
for mo in cimc_mos:
if mo.dn.startswith(parent_dn):
cimc_mo = mo
break
if cimc_mo is None:
return False
# Skip comparison when the value to be checked with is "platform-default"
modified_props = {x: props[x] for x in props if props[x] != "platform-default"}
if not cimc_mo.check_prop_match(**modified_props):
return False
return True
def is_bios_profile_enabled(handle, name, server_id=1):
"""
Args:
handle (ImcHandle)
name (str): Name of the bios profile.
Corresponds to the name field in the json file.
server_id (int): Id of the server to perform
this operation on C3260 platforms.
Returns:
bool
Raises:
ImcOperationError if the bios profile is not found
Examples:
is_bios_profile_enabled(handle,
name='simple',
server_id=1)
"""
mo = _get_bios_profile(handle, name=name, server_id=server_id)
return mo.enabled.lower() in ['yes', 'true']
def _get_vmedia_mo_dn(handle, server_id=1):
return _get_comm_mo_dn(handle, server_id) + "/vmedia-svc"
def vmedia_enable(handle, encryption_state=None, low_power_usb=None,
server_id=1):
"""
This method will enable vmedia and setup the properties
Args:
handle (ImcHandle)
encrypt (bool): Encrypt virtual media communications
low_power_usb (bool): Enable low power usb
server_id (int): Server Id to be specified for C3260 platforms
Returns:
CommVMedia object
Examples:
vmedia_enable(handle, True, True)
"""
mo = CommVMedia(parent_mo_or_dn=_get_comm_mo_dn(handle, server_id))
params = {
"admin_state": "enabled",
"encryption_state": encryption_state,
"low_power_usb_state": low_power_usb,
"low_power_usb": low_power_usb,
}
mo.set_prop_multiple(**params)
handle.set_mo(mo)
return mo
def vmedia_get_existing_uri(handle, server_id=1):
"""
This method will return list of URIs of existing mountd media
Args:
handle (ImcHandle)
server_id (int): Server Id to be specified for C3260 platforms
Returns:
List of URIs of currently mounted virtual media
Examples:
vmedia_get_existing_uri(handle)
"""
# Create list of URIs of all current virtually mapped ISOs
vmedias = handle.query_children(in_dn=_get_vmedia_mo_dn(handle, server_id))
return [vmedia.remote_share + vmedia.remote_file for vmedia in vmedias]
def vmedia_get_existing_status(handle, server_id=1):
"""
This method will return list of status of existing mountd media
Args:
handle (ImcHandle)
server_id (int): Server Id to be specified for C3260 platforms
Returns:
List of Status of currently mounted virtual media
Examples:
vmedia_get_existing_status(handle)
"""
# Create list of URIs of all current virtually mapped ISOs
vmedias = handle.query_children(in_dn=_get_vmedia_mo_dn(handle, server_id))
return [vmedia.mapping_status for vmedia in vmedias]
def vmedia_mount_get(handle, volume_name, server_id=1):
parent_dn = _get_vmedia_mo_dn(handle, server_id)
dn = parent_dn + "/vmmap-" + volume_name
mo = handle.query_dn(dn)
if mo is None:
raise ImcOperationError("vmedia_mount_get",
"vmedia mount '%s' does not exist" % dn)
return mo
def vmedia_mount_create(handle, volume_name, remote_share, remote_file,
map="www", mount_options="noauto", username="",
password="", server_id=1, timeout=60):
"""
This method will setup the vmedia mapping
Args:
handle (ImcHandle)
volume_name (string): Name of the volume or identity of the image
map (string): "cifs", "nfs", "www"
mount_options (string): Options to be passed while mounting the image
remote_share (string): URI of the image
remote_file (string): name of the image
username (string): username
password (string): password
server_id (int): Server Id to be specified for C3260 platforms
Returns:
CommVMediaMap object
Examples:
vmedia_mount_add(
handle,
volume_name="c",
map="www",
mount_options="noauto", "nolock" etc.
remote_share="http://1.1.1.1/files",
remote_file="ubuntu-14.04.2-server-amd64.iso",
username="abcd",
password="<PASSWORD>")
"""
image_type = remote_file.split('.')[-1]
vmedia_mount_remove_image(handle, image_type)
mo = CommVMediaMap(parent_mo_or_dn=_get_vmedia_mo_dn(handle, server_id),
volume_name=volume_name)
mo.map = map
if mount_options:
mo.mount_options = mount_options
mo.remote_share = remote_share
mo.remote_file = remote_file
mo.username = username
mo.password = password
handle.add_mo(mo, modify_present="True")
wait_time = 0
interval = 10
while wait_time < timeout:
time.sleep(interval)
mo = handle.query_dn(mo.dn)
existing_mapping_status = mo.mapping_status
if existing_mapping_status == "OK":
return mo
elif re.match(r"ERROR", existing_mapping_status):
raise ImcOperationError("vmedia_mount_create",
mo.mapping_status)
wait_time += interval
raise ImcOperationError("vmedia_mount_create",
"ERROR - Mapped image status stuck at %s" %
existing_mapping_status)
def vmedia_mount_exists(handle, volume_name, server_id=1, **kwargs):
import re
try:
mo = vmedia_mount_get(handle, volume_name)
except ImcOperationError:
return False, None
kwargs.pop('timeout', None)
kwargs.pop('password', None)
username = kwargs.pop('username', None)
mount_options = kwargs.pop('mount_options', None)
if not mo.check_prop_match(**kwargs):
return False, None
mo_mount_options = [x.strip() for x in mo.mount_options.split(',')]
if mount_options:
mount_options = [x.strip() for x in mount_options.split(',')][0]
if mount_options not in mo_mount_options:
return False, None
if username and mo.map in ['cifs', 'www']:
mo_username = re.search(r'username=(\S*?),',
mo.mount_options).groups()[0]
if username != mo_username:
return False, None
if mo.mapping_status != 'OK':
return False, None
return True, mo
def vmedia_mount_uri(handle, uri, volume_name=None, user_id=None,
password=<PASSWORD>, timeout=60, interval=5, server_id=1):
"""
This method will setup the vmedia mapping
Args:
handle (ImcHandle)
uri (string): URI of the image
volume_name (string): optional name of volume
user_id (string): optional username
password (string): optional password
timeout (int): optional timeout to wait for image map status to be 'OK'
interval (int): optional interval to query image status
server_id (int): Server Id to be specified for S3260 platforms
Raises:
Exception if invalid protocol in URI
Exception when the mapping doesn't reach 'OK' status
Returns:
True if mapping succeeded
Examples:
vmedia_mount_uri(
handle,
uri="http://1.1.1.1/files/ubuntu-14.04.2-server-amd64.iso"
)
"""
# Verify interval not set to zero
if interval < 1 or type(interval) is not int:
raise ValueError("ERROR: interval must be positive integer")
# Parse file/path from URI
remote_file = os.path.basename(uri)
remote_share = os.path.dirname(uri) + "/"
mount_options = "noauto"
# Set the Map based on the protocol
if urlsplit(uri).scheme == 'http':
mount_protocol = "www"
elif urlsplit(uri).scheme == 'https':
mount_protocol = "www"
elif CIFS_URI_PATTERN.match(uri):
mount_protocol = "cifs"
elif NFS_URI_PATTERN.match(uri):
mount_protocol = "nfs"
else:
# Raise ValueError and bail
raise ValueError("Unsupported protocol: " +
urlsplit(uri).scheme)
# Use remote filename if no volume_name givien
if not volume_name:
volume_name = remote_file
# Convert no user/pass to blank strings
if not user_id:
user_id = ''
if not password:
password = ''
# Map the file
vmedia_mount_create(handle,
volume_name=volume_name[:45],
map=mount_protocol,
mount_options=mount_options,
remote_share=remote_share,
remote_file=remote_file,
username=user_id,
password=password,
server_id=server_id)
# Verify correct URL was mapped
if uri in vmedia_get_existing_uri(handle, server_id):
# Loop until mapping moves out of 'In Progress' state
wait_time = 0
status_list = vmedia_get_existing_status(handle, server_id)
while 'In Progress' in status_list:
# Raise error if we've reached timeout
if wait_time > timeout:
raise ImcOperationError(
'Mount Virtual Media',
'{0}: ERROR - Mapped image status stuck at [In Progress]'.format(handle.ip)
)
# Wait interval sec between checks
time.sleep(interval)
status_list = vmedia_get_existing_status(handle, server_id)
wait_time += interval
else:
# Verify mapping transitioned to 'OK' state
if 'OK' in status_list:
return True
else:
raise ImcOperationError(
'Mount Virtual Media',
'{0}: ERROR - Mapped image status is {1}'.format(handle.ip, status_list)
)
else:
raise ImcOperationError(
'Mount Virtual Media',
'{0}: ERROR - Image {1} did not get mapped.'.format(handle.ip, uri)
)
def MountVmedia(handle, ip, fileuri, volume_name):
log.info("Preparing to mount virtual media on <%s>.", ip)
print "Preparing to mount virtual media on <"+ ip +">.\n"
# First make sure we enable vMedia
mo = CommVMedia(parent_mo_or_dn=_get_comm_mo_dn(handle, server_id=1))
params = {
"admin_state": "enabled",
"encryption_state": "Disabled",
"low_power_usb_state": "Disabled",
"low_power_usb": "Disabled",
}
mo.set_prop_multiple(**params)
handle.set_mo(mo)
# Now mount the vMedia
vmedia_mount_uri(
handle,
volume_name=volume_name,
uri=fileuri
)
def CheckBios(handle, ip):
log = logging.getLogger()
global bios_changed
if bios_tokens_exist(handle,
tokens = {
"BaudRate": "115200",
"ConsoleRedirection": "com0",
"FlowControl": "None",
"TerminalType": "VT100"},
server_id=1):
log.info("Great! The BIOS on <%s> already has all of the settings we need.", ip)
print "Great! The BIOS on <"+ip+"> already has all of the settings we need.\n"
else:
log.warning("The BIOS settings will need to be changed on <%s> to support this script.", ip)
print "The BIOS settings will need to be changed on <"+ip+"> to support this script.\n"
bios_changed = True
log.info("Backing up the current BIOS settings on <%s>.", ip)
print "Backing up the current BIOS settings on <"+ip+">.\n"
bios_profile_backup_running(handle)
log.info("Setting new BIOS parameters on <%s> to support SOL.", ip)
print "Setting new BIOS parameters on <"+ip+"> to support SOL.\n"
| |
# Copyright 2017-2020 Lawrence Livermore National Security, LLC and other
# CallFlow Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: MIT
import os
import json
import math
import pandas as pd
import numpy as np
# CallFlow imports
import callflow
from callflow.modules.gradients import Gradients
from callflow.modules.boxplot import BoxPlot
from callflow.timer import Timer
LOGGER = callflow.get_logger(__name__)
class FastEnsembleAuxiliary:
def __init__(
self,
states,
MPIBinCount="20",
RunBinCount="20",
datasets=[],
config={},
process=True,
write=False,
topCallsite=10,
):
self.timer = Timer()
self.df = self.select_rows(states["ensemble_entire"].new_gf.df, datasets)
self.MPIBinCount = MPIBinCount
self.RunBinCount = RunBinCount
self.config = config
self.states = states
self.process = process
self.write = write
self.datasets = datasets
self.props = ["rank", "name", "dataset", "all_ranks"]
self.filter = True
"""
self.runPath = (
self.config.callflow_path
+ "/"
+ self.config.save_path
+ "/"
+ self.config.runName
)
self.h5IndexFilename = self.runPath + "/h5_index.json"
self.moduleh5File = self.runPath + "/module_data.h5"
self.callsiteh5File = self.runPath + "/callsite_data.h5"
"""
self.h5IndexFilename = os.path.join(self.config.save_path, "h5_index.json")
self.moduleh5File = os.path.join(self.config.save_path, "module_data.h5")
self.callsiteh5File = os.path.join(self.config.save_path, "callsite_data.h5")
self.topCallsite = topCallsite
with self.timer.phase("Group data with indexes"):
self.group_frames()
self.callsiteMap = {}
self.moduleMap = {}
def filter_dict(self, result):
ret = {}
# Modules will be the same as original.
ret["module"] = result["module"]
ret["moduleCallsiteMap"] = result["moduleCallsiteMap"]
ret["callsite"] = {}
group_df = self.df.groupby(["name"]).mean()
if self.config.filter_by == "time":
f_group_df = group_df.loc[
group_df[self.config.filter_by] > self.config.filter_below
]
elif self.config.filter_by == "time (inc)":
f_group_df = group_df.loc[
group_df[self.config.filter_by]
> 0.01 * self.config.filter_perc * group_df["time (inc)"].max()
]
callsites = f_group_df.index.values.tolist()
count = 0
for dataset in result["callsite"]:
ret["callsite"][dataset] = {}
for callsite in callsites:
if callsite in result["callsite"][dataset]:
ret["callsite"][dataset][callsite] = result["callsite"][dataset][
callsite
]
count += 1
return ret
def filter_frames(self, nCallsites, attr):
xgroup_df = self.df.groupby(["name"]).mean()
sort_xgroup_df = xgroup_df.sort_values(by=[attr], ascending=False)
nCallsites_df = sort_xgroup_df.nlargest(nCallsites, attr)
return nCallsites_df
def group_frames(self):
self.module_name_group_df = self.df.groupby(["module", "name"])
self.module_group_df = self.df.groupby(["module"])
self.name_group_df = self.df.groupby(["name"])
self.target_df = {}
self.target_module_group_df = {}
self.target_module_name_group_df = {}
self.target_name_group_df = {}
for dataset in self.datasets:
self.target_df[dataset] = self.df.loc[self.df["dataset"] == dataset]
self.target_module_group_df[dataset] = self.target_df[dataset].groupby(
["module"]
)
self.target_module_name_group_df[dataset] = self.target_df[dataset].groupby(
["module", "name"]
)
self.target_name_group_df[dataset] = self.target_df[dataset].groupby(
["name"]
)
def select_rows(self, df, search_strings):
unq, IDs = np.unique(df["dataset"], return_inverse=True)
unqIDs = np.searchsorted(unq, search_strings)
mask = np.isin(IDs, unqIDs)
return df[mask]
def histogram(self, data, data_min=np.nan, data_max=np.nan):
if np.isnan(data_min) or np.isnan(data_max):
data_min = data.min()
data_max = data.max()
h, b = np.histogram(
data, range=[data_min, data_max], bins=int(self.MPIBinCount)
)
return 0.5 * (b[1:] + b[:-1]), h
def convert_pandas_array_to_list(self, series):
return series.apply(lambda d: d.tolist())
def get_module_callsite_map(self):
ret = {}
np_data = self.module_group_df["name"].unique()
ret["ensemble"] = self.convert_pandas_array_to_list(np_data).to_dict()
for dataset in self.datasets:
np_data = self.target_module_group_df[dataset]["name"].unique()
ret[dataset] = self.convert_pandas_array_to_list(np_data).to_dict()
return ret
def get_callsite_module_map(self):
ret = {}
callsites = self.df["name"].unique().tolist()
for callsite in callsites:
module = (
self.df.loc[self.df["name"] == callsite]["module"].unique().tolist()
)
ret[callsite] = module
for dataset in self.datasets:
ret[dataset] = {}
for callsite in callsites:
module = (
self.target_df[dataset]
.loc[self.target_df[dataset]["name"] == callsite]["name"]
.unique()
.tolist()
)
ret[dataset][callsite] = module
return ret
def pack_json(
self,
df=pd.DataFrame(),
name="",
gradients={"Inclusive": {}, "Exclusive": {}},
prop_hists={"Inclusive": {}, "Exclusive": {}},
q={"Inclusive": {}, "Exclusive": {}},
outliers={"Inclusive": {}, "Exclusive": {}},
):
inclusive_variance = df["time (inc)"].var()
exclusive_variance = df["time"].var()
inclusive_std_deviation = math.sqrt(df["time (inc)"].var())
exclusive_std_deviation = math.sqrt(df["time"].var())
if math.isnan(inclusive_variance):
inclusive_variance = 0
inclusive_std_deviation = 0
if math.isnan(exclusive_variance):
exclusive_variance = 0
exclusive_std_deviation = 0
result = {
"name": name,
"id": "node-" + str(df["nid"].tolist()[0]),
"dataset": df["dataset"].unique().tolist(),
"module": df["module"].tolist()[0],
"callers": df["callers"].unique().tolist(),
"callees": df["callees"].unique().tolist(),
"component_path": df["component_path"].unique().tolist(),
"component_level": df["component_level"].unique().tolist(),
"Inclusive": {
"mean_time": df["time (inc)"].mean(),
"max_time": df["time (inc)"].max(),
"min_time": df["time (inc)"].min(),
"variance": inclusive_variance,
"q": q["Inclusive"],
"outliers": outliers["Inclusive"],
# "imbalance_perc": df['imbalance_perc_inclusive'].tolist()[0],
"std_deviation": inclusive_std_deviation,
# "kurtosis": df['kurtosis_inclusive'].tolist()[0],
# "skewness": df['skewness_inclusive'].tolist()[0],
"gradients": gradients["Inclusive"],
"prop_histograms": prop_hists["Inclusive"],
},
"Exclusive": {
"mean_time": df["time"].mean(),
"max_time": df["time"].max(),
"min_time": df["time"].min(),
"variance": exclusive_variance,
"q": q["Exclusive"],
"outliers": outliers["Exclusive"],
# "imbalance_perc": df['imbalance_perc_exclusive'].tolist()[0],
"std_deviation": exclusive_std_deviation,
# "skewness": df['skewness_exclusive'].tolist()[0],
# "kurtosis": df['kurtosis_exclusive'].tolist()[0],
"gradients": gradients["Exclusive"],
"prop_histograms": prop_hists["Exclusive"],
},
}
return result
# Return the histogram in the required form.
def histogram_format(self, histogram_grid):
return {
"x": histogram_grid[0].tolist(),
"y": histogram_grid[1].tolist(),
"x_min": histogram_grid[0][0],
"x_max": histogram_grid[0][-1],
"y_min": np.min(histogram_grid[1]).astype(np.float64),
"y_max": np.max(histogram_grid[1]).astype(np.float64),
}
# Prop can be dataset, rank, name
def histogram_by_property_ensemble(self, ensemble_df, prop):
ret = {}
if prop == "all_ranks":
time_ensemble_inclusive_arr = np.array(ensemble_df["time (inc)"].tolist())
time_ensemble_exclusive_arr = np.array(ensemble_df["time"].tolist())
elif prop == "rank":
ensemble_prop = ensemble_df.groupby(["dataset", prop])[
["time", "time (inc)"]
].mean()
time_ensemble_inclusive_arr = np.array(ensemble_prop["time (inc)"])
time_ensemble_exclusive_arr = np.array(ensemble_prop["time"])
else:
ensemble_prop = ensemble_df.groupby([prop])[["time", "time (inc)"]].mean()
time_ensemble_inclusive_arr = np.array(ensemble_prop["time (inc)"])
time_ensemble_exclusive_arr = np.array(ensemble_prop["time"])
inclusive_max = time_ensemble_inclusive_arr.max()
inclusive_min = time_ensemble_inclusive_arr.min()
histogram_ensemble_inclusive_grid = self.histogram(
time_ensemble_inclusive_arr, inclusive_min, inclusive_max
)
exclusive_max = time_ensemble_exclusive_arr.max()
exclusive_min = time_ensemble_exclusive_arr.min()
histogram_ensemble_exclusive_grid = self.histogram(
time_ensemble_exclusive_arr, exclusive_min, exclusive_max
)
ret["Inclusive"] = {
"ensemble": self.histogram_format(histogram_ensemble_inclusive_grid),
}
ret["Exclusive"] = {
"ensemble": self.histogram_format(histogram_ensemble_exclusive_grid),
}
return ret
# Prop can be dataset, rank, name
def histogram_by_property(self, ensemble_df, target_df, prop):
ret = {}
if prop == "all_ranks":
time_ensemble_inclusive_arr = np.array(ensemble_df["time (inc)"].tolist())
time_ensemble_exclusive_arr = np.array(ensemble_df["time"].tolist())
time_target_inclusive_arr = np.array(target_df["time (inc)"].tolist())
time_target_exclusive_arr = np.array(target_df["time"].tolist())
elif prop == "rank":
ensemble_prop = ensemble_df.groupby(["dataset", prop])[
["time", "time (inc)"]
].mean()
target_prop = target_df.groupby(["dataset", prop])[
["time", "time (inc)"]
].mean()
time_ensemble_inclusive_arr = np.array(ensemble_prop["time (inc)"])
time_ensemble_exclusive_arr = np.array(ensemble_prop["time"])
time_target_inclusive_arr = np.array(target_prop["time (inc)"])
time_target_exclusive_arr = np.array(target_prop["time"])
else:
ensemble_prop = ensemble_df.groupby([prop])[["time", "time (inc)"]].mean()
target_prop = target_df.groupby([prop])[["time", "time (inc)"]].mean()
time_ensemble_inclusive_arr = np.array(ensemble_prop["time (inc)"])
time_ensemble_exclusive_arr = np.array(ensemble_prop["time"])
time_target_inclusive_arr = np.array(target_prop["time (inc)"])
time_target_exclusive_arr = np.array(target_prop["time"])
inclusive_max = max(
time_ensemble_inclusive_arr.max(), time_target_inclusive_arr.max()
)
inclusive_min = min(
time_ensemble_inclusive_arr.min(), time_target_inclusive_arr.min()
)
histogram_ensemble_inclusive_grid = self.histogram(
time_ensemble_inclusive_arr, inclusive_min, inclusive_max
)
histogram_target_inclusive_grid = self.histogram(
time_target_inclusive_arr, inclusive_min, inclusive_max
)
exclusive_max = max(
time_ensemble_exclusive_arr.max(), time_target_exclusive_arr.max()
)
exclusive_min = min(
time_ensemble_exclusive_arr.min(), time_target_exclusive_arr.min()
)
histogram_ensemble_exclusive_grid = self.histogram(
time_ensemble_exclusive_arr, exclusive_min, exclusive_max
)
histogram_target_exclusive_grid = self.histogram(
time_target_exclusive_arr, exclusive_min, exclusive_max
)
ret["Inclusive"] = {
"ensemble": self.histogram_format(histogram_ensemble_inclusive_grid),
"target": self.histogram_format(histogram_target_inclusive_grid),
}
ret["Exclusive"] = {
"ensemble": self.histogram_format(histogram_ensemble_exclusive_grid),
"target": self.histogram_format(histogram_target_exclusive_grid),
}
return ret
# Callsite grouped information
def callsite_data_old(self):
ret = {}
# Create the data dict.
ensemble = {}
for callsite, callsite_df in self.name_group_df:
callsite_ensemble_df = self.name_group_df.get_group(callsite)
hists = {}
hists["Inclusive"] = {}
hists["Exclusive"] = {}
for prop in self.props:
prop_histograms = self.histogram_by_property_ensemble(
callsite_ensemble_df, prop
)
hists["Inclusive"][prop] = prop_histograms["Inclusive"]
hists["Exclusive"][prop] = prop_histograms["Exclusive"]
gradients = Gradients(self.target_df, binCount=self.RunBinCount).run(
columnName="name", callsiteOrModule=callsite
)
boxplot = BoxPlot(callsite_df)
ensemble[callsite] = self.pack_json(
callsite_df,
callsite,
gradients=gradients,
q=boxplot.q,
outliers=boxplot.outliers,
prop_hists=hists,
)
ret["ensemble"] = ensemble
## Target data.
# Loop through datasets and group the callsite by name.
for dataset in self.datasets:
name_grouped = self.target_name_group_df[dataset]
target = {}
for callsite, callsite_df in name_grouped:
callsite_ensemble_df = self.name_group_df.get_group(callsite)
callsite_target_df = callsite_df
if not callsite_df.empty:
hists = {}
hists["Inclusive"] = {}
hists["Exclusive"] = {}
for prop in self.props:
prop_histograms = self.histogram_by_property(
callsite_ensemble_df, callsite_target_df, prop
)
hists["Inclusive"][prop] = prop_histograms["Inclusive"]
hists["Exclusive"][prop] = prop_histograms["Exclusive"]
boxplot = BoxPlot(callsite_df)
target[callsite] = self.pack_json(
df=callsite_target_df,
name=callsite,
prop_hists=hists,
q=boxplot.q,
outliers=boxplot.outliers,
)
ret[dataset] = target
def get_data_from_hd5(self, nodes, col):
ret = {}
if col == "module":
filename = self.moduleh5File
mapping = self.moduleMap
elif col == "name":
filename = self.callsiteh5File
mapping = self.callsiteMap
ensemble = {}
for node in nodes:
module_ensemble_df = pd.read_hdf(filename, key=mapping[node])
hists = {"Inclusive": {}, "Exclusive": {}}
for prop in self.props:
prop_histograms = self.histogram_by_property_ensemble(
module_ensemble_df, prop
)
hists["Inclusive"][prop] = prop_histograms["Inclusive"]
hists["Exclusive"][prop] = prop_histograms["Exclusive"]
# Calculate gradients
gradients = {"Inclusive": {}, "Exclusive": {}}
gradients = Gradients(self.target_df, binCount=self.RunBinCount).run(
columnName=col, callsiteOrModule=node
)
quartiles = {"Inclusive": {}, "Exclusive": {}}
outliers = {"Inclusive": {}, "Exclusive": {}}
if col == "name":
boxplot = BoxPlot(module_ensemble_df)
quartiles = boxplot.q
outliers = boxplot.outliers
ensemble[node] = self.pack_json(
df=module_ensemble_df,
name=node,
gradients=gradients,
prop_hists=hists,
q=quartiles,
outliers=outliers,
)
ret["ensemble"] = ensemble
for dataset in self.datasets:
target = {}
module_target_df = module_ensemble_df.loc[
module_ensemble_df["dataset"] == dataset
]
for node in nodes:
gradients = {"Inclusive": {}, "Exclusive": {}}
hists = {"Inclusive": {}, "Exclusive": {}}
quartiles = {"Inclusive": {}, "Exclusive": {}}
outliers = {"Inclusive": {}, "Exclusive": {}}
if module_target_df.shape[0] != 0:
for prop in self.props:
prop_histograms = self.histogram_by_property(
module_ensemble_df, module_target_df, prop
)
hists["Inclusive"][prop] = prop_histograms["Inclusive"]
hists["Exclusive"][prop] = prop_histograms["Exclusive"]
if col == "name":
boxplot = BoxPlot(module_target_df)
quartiles = boxplot.q
outliers = boxplot.outliers
target[node] = self.pack_json(
df=module_target_df,
name=node,
gradients=gradients,
prop_hists=hists,
q=quartiles,
outliers=outliers,
)
ret[dataset] = target
return ret
def module_data(self):
module_group_df = self.df.groupby(["module"])
self.moduleMap = {}
count = 0
for module, module_df in module_group_df:
module_ensemble_df = module_group_df.get_group(module)
key = "module_" + str(count)
self.moduleMap[module] = key
module_ensemble_df.to_hdf(self.moduleh5File, key=key)
count += 1
def callsite_data(self):
name_group_df = self.df.groupby(["name"])
count = 0
for name, name_df in name_group_df:
callsite_ensemble_df = name_group_df.get_group(name)
key = "callsite_" + str(callsite_ensemble_df["nid"].unique()[0])
self.callsiteMap[name] = key
callsite_ensemble_df.to_hdf(self.callsiteh5File, key=key)
count += 1
def write_maps(self):
# as requested in comment
exDict | |
if mode == "POMODORO_W" or mode == "POMODORO_B":
if pomoWorkTime < 7200:
pomoWorkTime += 300
queuePom.put(300)
timeTillNextLed = pomoWorkTime // getAvailable() # Calculate new LED time
if mode == "TASK":
if taskNum < 32: # Max 32 tasks because 32 LEDs
taskNum += 1
quantityON = getAvailable() // taskNum
if mode == "BUDGET":
if budgetTime < 18000:
budgetTime += 600
queueBudget.put(600)
writeSettings()
if state == "MODE_SETTINGS_2":
if mode == "POMODORO_W" or mode == "POMODORO_B":
if pomoBreakTime < 3600:
pomoBreakTime += 300
queuePom.put(300)
writeSettings()
upBEvent.clear()
if downBEvent.is_set():
# Switching modes
if state == "MODE_SELECT":
if mode == "POMODORO_W" or mode == "POMODORO_B":
mode = "TASK"
taskDone = 0
elif mode == "TASK":
mode = "BUDGET"
# Changing settings
if state == "MODE_SETTINGS":
if mode == "POMODORO_W" or mode == "POMODORO_B":
if pomoWorkTime >= 600:
pomoWorkTime -= 300
queuePom.put(-300)
timeTillNextLed = pomoWorkTime // getAvailable()
if mode == "TASK":
if taskNum > 1:
taskNum -= 1
quantityON = getAvailable() // taskNum
if mode == "BUDGET":
if budgetTime > 600:
budgetTime -= 600
queueBudget.put(-600)
writeSettings()
if state == "MODE_SETTINGS_2":
if mode == "POMODORO_W" or mode == "POMODORO_B":
if pomoBreakTime > 300:
pomoBreakTime -= 300
queuePom.put(-300)
writeSettings()
downBEvent.clear()
time.sleep(0.01)
# ======================================== UPDATE DISPLAY BASED ON CURRENT STATE AND MODE ===================================
def updateDisplay():
while True:
if state == "WELCOME":
with canvas(device) as draw:
draw.line((0, 45, 127 ,45), fill="white")
draw.text((40, 43), "Welcome", font=fontSmall, fill="white")
draw.bitmap((20,0), tree, fill="white")
draw.bitmap((50,0), tree, fill="white")
draw.bitmap((80,0), tree, fill="white")
elif state == "OVERVIEW":
with canvas(device) as draw:
draw.line((0, 45, 127 ,45), fill="white")
draw.text((38, 43), "OVERVIEW", font=fontSmall, fill="white")
if mode == "POMODORO_W" or mode == "POMODORO_B":
x = time.gmtime(pomoWorkTime)
displayWorkTime = time.strftime("%H:%M:%S", x)
y = time.gmtime(pomoBreakTime)
displayBreakTime = time.strftime("%H:%M:%S", y)
draw.text((13,0), "Mode: Pomodoro", font=fontSmall, fill="white")
draw.text((5,14), "Work Time: "+displayWorkTime, font=fontSmall, fill="white")
draw.text((5,28), "Break Time: "+displayBreakTime, font=fontSmall, fill="white")
if mode == "TASK":
draw.text((31,0), "Mode: Task", font=fontSmall, fill="white")
draw.text((25,20), "Total Tasks: " + str(taskNum), font=fontSmall, fill="white")
if mode == "BUDGET":
x = time.gmtime(budgetTime)
displayBudTime = time.strftime("%H:%M:%S", x)
draw.text((21,0), "Mode: Budget", font=fontSmall, fill="white")
draw.text((0,20), "Budget Time: "+displayBudTime, font=fontSmall, fill="white")
elif state == "RUN":
with canvas(device) as draw:
draw.line((0, 45, 127 ,45), fill="white")
if mode == "POMODORO_W":
draw.text((38,45), "P | Work", font=fontSmall, fill="white")
draw.text((17, 10), displayTime, font=fontBig, fill="white")
elif mode == "POMODORO_B":
draw.text((31,45), "P | Break", font=fontSmall, fill="white")
draw.text((17, 10), displayTime, font=fontBig, fill="white")
if mode == "TASK":
taskString = str(taskDone) + "/" + str(taskNum)
draw.text((17, 10), taskString, font=fontBig, fill="white")
draw.text((31,45), "T | Task", font=fontSmall, fill="white")
if mode == "BUDGET":
draw.text((18,0), "Productive time:", font=fontSmall, fill="white")
draw.text((17,10), prodTime, font=fontBig, fill="white")
draw.text((12,45), "B | Budget | Work", font=fontSmall, fill="white")
elif state == "PAUSE":
with canvas(device) as draw:
draw.line((0, 45, 127 ,45), fill="white")
if mode == "POMODORO_W":
draw.text((15,45), "P | Work | Paused", font=fontSmall, fill="white")
draw.text((17, 10), displayTime, font=fontBig, fill="white")
elif mode == "POMODORO_B":
draw.text((11,45), "P | Break | Paused", font=fontSmall, fill="white")
draw.text((17, 10), displayTime, font=fontBig, fill="white")
if mode == "TASK":
taskString = str(taskDone) + "/" + str(taskNum)
draw.text((17, 10), taskString, font=fontBig, fill="white")
draw.text((31,45), "T | Task", font=fontSmall, fill="white")
if mode == "BUDGET":
draw.text((0,0), "Break time remaining:", font=fontSmall, fill="white")
draw.text((17, 10), displayTime, font=fontBig, fill="white")
draw.text((12,45), "B | Budget | Break", font=fontSmall, fill="white")
elif state == "MODE_SELECT":
with canvas(device) as draw:
draw.line((0, 45, 127 ,45), fill="white")
draw.text((30,45), "Select Mode", font=fontSmall, fill="white")
draw.text((32,0), "Pomodoro", font=fontSmall, fill="white")
draw.text((32,12), "Task", font=fontSmall, fill="white")
draw.text((32,24), "Budget", font=fontSmall, fill="white")
if mode == "POMODORO_W" or mode == "POMODORO_B":
draw.text((20,0), ">", font=fontSmall, fill="white")
if mode == "TASK":
draw.text((20,12), ">", font=fontSmall, fill="white")
if mode == "BUDGET":
draw.text((20,24), ">", font=fontSmall, fill="white")
elif state == "MODE_SETTINGS":
with canvas(device) as draw:
draw.line((0, 45, 127 ,45), fill="white")
if mode == "POMODORO_W" or mode == "POMODORO_B":
draw.text((0,45), "P | Settings | "+ displayTime, font=fontSmall, fill="white")
draw.text((23, 0), "Set Work Time:", font=fontSmall, fill="white")
draw.text((17, 10), displayTime, font=fontBig, fill="white")
if mode == "TASK":
draw.text((31,45), "T | Settings", font=fontSmall, fill="white")
draw.text((40, 0), "Set Tasks:", font=fontSmall, fill="white")
if taskNum > 9:
draw.text((50, 10), str(taskNum), font=fontBig, fill="white")
else:
draw.text((60, 10), str(taskNum), font=fontBig, fill="white")
if mode == "BUDGET":
draw.text((0,45), "B | Settings | "+ displayTime, font=fontSmall, fill="white")
draw.text((23, 0), "Set Break Time:", font=fontSmall, fill="white")
draw.text((17, 10), displayTime, font=fontBig, fill="white")
elif state == "MODE_SETTINGS_2":
with canvas(device) as draw:
draw.line((0, 45, 127 ,45), fill="white")
if mode == "POMODORO_W" or mode == "POMODORO_B":
draw.text((0,45), "P | Settings | "+displayTime, font=fontSmall, fill="white")
draw.text((23, 0), "Set Break Time:", font=fontSmall, fill="white")
draw.text((17, 10), displayTime, font=fontBig, fill="white")
# ============================================================== MAIN =======================================================
p1 = Process(target=checkResetB)
p1.start()
p2 = Process(target=checkPlayPauseCompleteB)
p2.start()
p3 = Process(target=checkSettingsB)
p3.start()
p4 = Process(target=checkUpB)
p4.start()
p5 = Process(target=checkDownB)
p5.start()
t1 = Thread(target=watchEvents)
t1.start()
t2 = Thread(target=updateDisplay)
t2.start()
t3 = Thread(target = runTree)
t3.start()
# ======================================================== FLASK ============================================================
def convertTime(value): # given a number of seconds, returns string in HH:MM:SS format
hours = value // 3600
minutes = (value % 3600) // 60
seconds = value % 60
time = str(hours).rjust(2,'0') + ':' + str(minutes).rjust(2,'0') + ':' + str(seconds).rjust(2,'0')
return time
app = Flask(__name__, static_folder='assets')
global taskDescr, empty
empty = "No Description"
taskDescr = [empty] * taskNum
@app.route("/")
def home():
return redirect("/templates/index")
@app.route("/templates/index")
def home_template():
global mode
global state
val = 0
val2 = 0
readSettings()
if state == "WELCOME":
state = "OVERVIEW"
else:
state = "WELCOME"
if mode == "POMODORO_W" or mode == "POMODORO_B":
displayMode = "Pomodoro"
val = convertTime(pomoWorkTime)
val2 = convertTime(pomoBreakTime)
elif mode == "TASK":
displayMode = "Task"
val = taskNum
elif mode == "BUDGET":
displayMode = "Budget"
val = convertTime(budgetTime)
return render_template("index.html", displayCurrentMode=displayMode,displayVal=val, displayVal2=val2)
@app.route("/templates/pomodoro", methods=['POST', 'GET'])
def pomodoro_template():
global mode
global state
global displayTime
global timeTillNextLed
global pomoWorkTime
global pomoBreakTime
if (not mode == "POMODORO_W" and not mode == "POMODORO_B") or (state == "WELCOME"):
print("Changing mode to POMODORO_W and state RUN")
mode = "POMODORO_W"
writeSettings()
state = "PAUSE"
clearAll()
timeTillNextLed = pomoWorkTime // getAvailable()
displayTime = convertTime(pomoWorkTime)
# if request.method == "POST":
# pomoWorkTime = request.form['pBreakTime']
# pomoBreakTime = request.form['pBreakTime']
return render_template("pomodoro.html", displayCurrentMode=mode, displayVal=displayTime, displayPWorkTime=convertTime(pomoWorkTime), displayPBreakTime=convertTime(pomoBreakTime))
@app.route("/pomodoro/<int:action>")
def pomodoro_action(action):
global state
global pomoWorkTime
global pomoBreakTime
global queuePom
global timeTillNextLed
if action == 1:
state = "RUN"
print("state set to RUN")
elif action == 0:
state = "PAUSE"
print("state set to PAUSE")
elif action == 2:
if pomoWorkTime < 7200:
pomoWorkTime += 300
queuePom.put(300)
timeTillNextLed = pomoWorkTime // getAvailable()
elif action == 3:
if pomoWorkTime >= 600:
pomoWorkTime -= 300
queuePom.put(-300)
timeTillNextLed = pomoWorkTime // getAvailable()
elif action == 4:
if pomoBreakTime < 3600:
pomoBreakTime += 300
queuePom.put(300)
elif action == 5:
if pomoBreakTime > 300:
pomoBreakTime -= 300
queuePom.put(-300)
writeSettings()
return redirect("/templates/pomodoro")
@app.route("/templates/task", methods=['POST', 'GET'])
def task_template():
global mode
global state
global taskNum
global taskDone
global taskDescr
global quantityON
if not mode == "TASK" or not state == "RUN":
print("Changing mode to TASK and state RUN")
mode = "TASK"
state = "RUN"
writeSettings()
taskDone = 0
clearAll()
quantityON = getAvailable() // taskNum
if (taskDone == taskNum):
taskDone = 0
taskDescr = [empty] * taskNum
state = "WELCOME"
return redirect("/templates/index")
if request.method == "POST":
if empty in taskDescr:
result = taskDescr.index(empty)
newDescr = request.form['taskDescr']
whitespace = [not char or char.isspace() for char in newDescr] # Checking if nothing input then append empty string message
if False in whitespace:
taskDescr[result] = newDescr
else:
taskDescr[result] = empty
else:
newDescr = request.form['taskDescr']
whitespace = [not char or char.isspace() for char in newDescr] # Checking if nothing input then append empty string message
if False in whitespace:
taskDescr.append(newDescr)
else:
taskDescr.append(empty)
taskNum = taskNum + 1
return render_template("task.html", taskList=taskDescr, taskDone=taskDone, taskNum=taskNum)
@app.route("/task/pop")
def task_pop():
global taskDone
global state
global taskDescr
if (taskDone >= taskNum):
print("Changing state to welcome")
state = "OVERVIEW"
buzzUp2()
else:
taskDone = taskDone + 1
if taskDone >=1:
playFreqTime(A5, .35)
quantityON = getAvailable() // taskNum
taskDescr.pop(0)
remainingTasks = taskNum - taskDone
if remainingTasks == 0:
allOn()
else:
if taskDone >=1:
print("Turning on", quantityON, "LEDS")
toggleNextLed(True, quantityON)
return redirect("/templates/task")
@app.route("/task/remove")
def task_remove():
global taskNum
global taskDone
global taskDescr
if taskDone < taskNum and not ((taskNum | |
A[1])
det_DHC1 = (B[0] - A[0])
det_DHD0 = (B[1] - A[1])
det_DHD1 = - (B[0] - A[0])
DHDH = DH * DH
Jacbian_gboxes[iter, i * 2, n_of_inter[iter] * 2] += (det_DxA0 * DH - Dx * det_DHA0) / DHDH
Jacbian_gboxes[iter, i * 2, n_of_inter[iter] * 2 + 1] += (det_DyA0 * DH - Dy * det_DHA0) / DHDH
Jacbian_gboxes[iter, i * 2 + 1, n_of_inter[iter] * 2] += (det_DxA1 * DH - Dx * det_DHA1) / DHDH
Jacbian_gboxes[iter, i * 2 + 1, n_of_inter[iter] * 2 + 1] += (det_DyA1 * DH - Dy * det_DHA1) / DHDH
Jacbian_gboxes[iter, 2 * ((i + 1) % 4), n_of_inter[iter] * 2] += (det_DxB0 * DH - Dx * det_DHB0) / DHDH
Jacbian_gboxes[iter, 2 * ((i + 1) % 4), n_of_inter[iter] * 2 + 1] += (det_DyB0 * DH - Dy * det_DHB0) / DHDH
Jacbian_gboxes[iter, 2 * ((i + 1) % 4) + 1, n_of_inter[iter] * 2] += (det_DxB1 * DH - Dx * det_DHB1) / DHDH
Jacbian_gboxes[iter, 2 * ((i + 1) % 4) + 1, n_of_inter[iter] * 2 + 1] += (det_DyB1 * DH - Dy * det_DHB1) / DHDH
Jacbian_qboxes[iter, j * 2, n_of_inter[iter] * 2] += (det_DxC0 * DH - Dx * det_DHC0) / DHDH
Jacbian_qboxes[iter, j * 2, n_of_inter[iter] * 2 + 1] += (det_DyC0 * DH - Dy * det_DHC0) / DHDH
Jacbian_qboxes[iter, j * 2 + 1, n_of_inter[iter] * 2] += (det_DxC1 * DH - Dx * det_DHC1) / DHDH
Jacbian_qboxes[iter, j * 2 + 1, n_of_inter[iter] * 2 + 1] += (det_DyC1 * DH - Dy * det_DHC1) / DHDH
Jacbian_qboxes[iter, 2 * ((j + 1) % 4), n_of_inter[iter] * 2] += (det_DxD0 * DH - Dx * det_DHD0) / DHDH
Jacbian_qboxes[iter, 2 * ((j + 1) % 4), n_of_inter[iter] * 2 + 1] += (det_DyD0 * DH - Dy * det_DHD0) / DHDH
Jacbian_qboxes[iter, 2 * ((j + 1) % 4) + 1, n_of_inter[iter] * 2] += (det_DxD1 * DH - Dx * det_DHD1) / DHDH
Jacbian_qboxes[iter, 2 * ((j + 1) % 4) + 1, n_of_inter[iter] * 2 + 1] += (det_DyD1 * DH - Dy * det_DHD1) / DHDH
n_of_inter[iter] += 1
tensor_Jacbian_gboxes = torch.from_numpy(Jacbian_gboxes).to(torch.device(corners_qboxes.device))
tensor_Jacbian_qboxes = torch.from_numpy(Jacbian_qboxes).to(torch.device(corners_qboxes.device))
grad_output_cuda = grad_output.to(torch.device(corners_qboxes.device))
# print("grad_output_cuda =", grad_output_cuda.shape)
tensor_grad_corners_gboxes = tensor_Jacbian_gboxes.matmul(grad_output_cuda.unsqueeze(2)).squeeze(2)
tensor_grad_corners_qboxes = tensor_Jacbian_qboxes.matmul(grad_output_cuda.unsqueeze(2)).squeeze(2)
return tensor_grad_corners_gboxes, tensor_grad_corners_qboxes
class sort_vertex(torch.autograd.Function):
@staticmethod
def forward(ctx, int_pts, num_of_inter):
np_int_pts = int_pts.detach().numpy()
#np_num_of_inter = num_of_inter.detach().numpy()
np_num_of_inter = num_of_inter
N = int_pts.shape[0]
np_sorted_indexs = np.zeros((N, 8), dtype=np.int32)
sorted_int_pts = np.zeros((N, 16), dtype=np.float32)
for iter in range(N):
if np_num_of_inter[iter] > 0:
center = np.zeros((2,), dtype=np.float32)
for i in range(np_num_of_inter[iter]):
center[0] += np_int_pts[iter, 2 * i]
center[1] += np_int_pts[iter, 2 * i + 1]
center[0] /= np_num_of_inter[iter].float()
center[1] /= np_num_of_inter[iter].float()
angle = np.zeros((8,), dtype=np.float32)
v = np.zeros((2,), dtype=np.float32)
for i in range(np_num_of_inter[iter]):
v[0] = np_int_pts[iter, 2 * i] - center[0]
v[1] = np_int_pts[iter, 2 * i + 1] - center[1]
d = math.sqrt(v[0] * v[0] + v[1] * v[1])
v[0] = v[0] / d
v[1] = v[1] / d
anglei = math.atan2(v[1], v[0])
if anglei < 0:
angle[i] = anglei + 2 * 3.1415926
else:
angle[i] = anglei
# sort angles with descending
np_sorted_indexs[iter, :] = np.argsort(-angle)
for i in range(np_num_of_inter[iter]):
sorted_int_pts[iter, 2 * i] = np_int_pts[iter, 2 * np_sorted_indexs[iter, i]]
sorted_int_pts[iter, 2 * i + 1] = np_int_pts[iter, 2 * np_sorted_indexs[iter, i] + 1]
# conver numpy to tensor
ctx.save_for_backward(int_pts, num_of_inter)
ctx.np_sorted_indexs = np_sorted_indexs
tensor_sorted_int_pts = torch.from_numpy(sorted_int_pts)
return tensor_sorted_int_pts
@staticmethod
def backward(ctx, grad_output):
int_pts, num_of_inter = ctx.saved_tensors
np_sorted_indexs = ctx.np_sorted_indexs
N = int_pts.shape[0]
Jacbian_int_pts = np.zeros((N, 16, 16), dtype=np.float32)
for iter in range(N):
for i in range(num_of_inter[iter]):
Jacbian_int_pts[iter, 2 * np_sorted_indexs[iter, i], 2 * i] = 1
Jacbian_int_pts[iter, 2 * np_sorted_indexs[iter, i] + 1, 2 * i + 1] = 1
tensor_Jacbian_int_pts = torch.from_numpy(Jacbian_int_pts).to(torch.device(int_pts.device))
grad_output_cuda = grad_output.to(torch.device(int_pts.device))
tensor_grad_int_pts = tensor_Jacbian_int_pts.matmul(grad_output_cuda.unsqueeze(2)).squeeze(2)
# todo: my second addtion
# my_add_1 = torch.zeros(tensor_grad_int_pts.shape[0], dtype=torch.float32)
return tensor_grad_int_pts, None
class area_polygon(torch.autograd.Function):
@staticmethod
def forward(ctx, int_pts, num_of_inter):
ctx.save_for_backward(int_pts, num_of_inter)
np_int_pts = int_pts.detach().numpy()
#np_num_of_inter = num_of_inter.detach().numpy()
np_num_of_inter = num_of_inter
N = int_pts.shape[0]
areas = np.zeros((N,), dtype=np.float32)
for iter in range(N):
for i in range(np_num_of_inter[iter] - 2):
p1 = np_int_pts[iter, 0:2]
p2 = np_int_pts[iter, 2 * i + 2:2 * i + 4]
p3 = np_int_pts[iter, 2 * i + 4:2 * i + 6]
areas[iter] += abs(((p1[0] - p3[0]) * (p2[1] - p3[1]) - (p1[1] - p3[1]) * (p2[0] - p3[0])) / 2.0)
tensor_areas = torch.from_numpy(areas)
return tensor_areas
@staticmethod
def backward(ctx, *grad_outputs):
int_pts, num_of_inter = ctx.saved_tensors
np_int_pts = int_pts.detach().numpy()
np_num_of_inter = num_of_inter.detach().numpy()
grad_output0 = grad_outputs[0]
N = int_pts.shape[0]
grad_int_pts = np.zeros((N, 16), dtype=np.float32)
for iter in range(N):
if (np_num_of_inter[iter] > 2):
for i in range(np_num_of_inter[iter]):
if i == 0:
for j in range(np_num_of_inter[iter] - 2):
p1 = np_int_pts[iter, 0:2]
p2 = np_int_pts[iter, 2 * j + 2:2 * j + 4]
p3 = np_int_pts[iter, 2 * j + 4:2 * j + 6]
if ((p1[0] - p3[0]) * (p2[1] - p3[1]) - (p1[1] - p3[1]) * (p2[0] - p3[0])) > 0:
grad_int_pts[iter, 0] += (p2[1] - p3[1]) * grad_output0[iter] * 0.5
grad_int_pts[iter, 1] += -(p2[0] - p3[0]) * grad_output0[iter] * 0.5
else:
grad_int_pts[iter, 0] += -(p2[1] - p3[1]) * grad_output0[iter] * 0.5
grad_int_pts[iter, 1] += (p2[0] - p3[0]) * grad_output0[iter] * 0.5
elif i == 1:
p1 = np_int_pts[iter, 0:2]
p2 = np_int_pts[iter, 2:4]
p3 = np_int_pts[iter, 4:6]
if ((p1[0] - p3[0]) * (p2[1] - p3[1]) - (p1[1] - p3[1]) * (p2[0] - p3[0])) > 0:
grad_int_pts[iter, 2] = -(p1[1] - p3[1]) * grad_output0[iter] * 0.5
grad_int_pts[iter, 3] = (p1[0] - p3[0]) * grad_output0[iter] * 0.5
else:
grad_int_pts[iter, 2] = (p1[1] - p3[1]) * grad_output0[iter] * 0.5
grad_int_pts[iter, 3] = -(p1[0] - p3[0]) * grad_output0[iter] * 0.5
elif i == np_num_of_inter[iter] - 1:
p1 = np_int_pts[iter, 2 * (np_num_of_inter[iter] - 2):2 * (np_num_of_inter[iter] - 1)]
p2 = np_int_pts[iter, 2 * (np_num_of_inter[iter] - 1):2 * (np_num_of_inter[iter])]
p3 = np_int_pts[iter, 0:2]
if ((p1[0] - p3[0]) * (p2[1] - p3[1]) - (p1[1] - p3[1]) * (p2[0] - p3[0])) > 0:
grad_int_pts[iter, 2 * (np_num_of_inter[iter] - 1)] = - (p1[1] - p3[1]) * grad_output0[
iter] * 0.5
grad_int_pts[iter, 2 * np_num_of_inter[iter] - 1] = (p1[0] - p3[0]) * grad_output0[
iter] * 0.5
else:
grad_int_pts[iter, 2 * (np_num_of_inter[iter] - 1)] = (p1[1] - p3[1]) * grad_output0[
iter] * 0.5
grad_int_pts[iter, 2 * np_num_of_inter[iter] - 1] = - (p1[0] - p3[0]) * grad_output0[
iter] * 0.5
else:
p1 = np_int_pts[iter, 0:2]
p2 = np_int_pts[iter, 2 * i - 2: 2 * i]
p3 = np_int_pts[iter, 2 * i: 2 * i + 2]
if ((p1[0] - p3[0]) * (p2[1] - p3[1]) - (p1[1] - p3[1]) * (p2[0] - p3[0])) > 0:
grad_int_pts[iter, i * 2] += (- (p2[1] - p3[1]) + (p1[1] - p3[1])) * grad_output0[
iter] * 0.5
grad_int_pts[iter, i * 2 + 1] += (- (p1[0] - p3[0]) + (p2[0] - p3[0])) * grad_output0[
iter] * 0.5
else:
grad_int_pts[iter, i * 2] += ((p2[1] - p3[1]) - (p1[1] - p3[1])) * grad_output0[iter] * 0.5
grad_int_pts[iter, i * 2 + 1] += ((p1[0] - p3[0]) - (p2[0] - p3[0])) * grad_output0[
iter] * 0.5
p1 = np_int_pts[iter, 0:2]
p2 = np_int_pts[iter, 2 * i: 2 * i + 2]
p3 = np_int_pts[iter, 2 * i + 2: 2 * i + 4]
if ((p1[0] - p3[0]) * (p2[1] - p3[1]) - (p1[1] - p3[1]) * (p2[0] - p3[0])) > 0:
grad_int_pts[iter, i * 2] += - (p1[1] - p3[1]) * grad_output0[iter] * 0.5
grad_int_pts[iter, i * 2 + 1] += (p1[0] - p3[0]) * grad_output0[iter] * 0.5
else:
grad_int_pts[iter, i * 2] += (p1[1] - p3[1]) * grad_output0[iter] * 0.5
grad_int_pts[iter, i * 2 + 1] += -(p1[0] - p3[0]) * grad_output0[iter] * 0.5
tensor_grad_int_pts = torch.from_numpy(grad_int_pts)
# todo: my first addition.
# my_add_0 = torch.zeros(tensor_grad_int_pts.shape[0], | |
<reponame>DBerke/DRAGONS
#
# DRAGONS
#
# http_proxy.py
# ------------------------------------------------------------------------------
import os
import json
import time
import select
import datetime
import urllib.error
import urllib.parse
import urllib.request
from socketserver import ThreadingMixIn
from http.server import BaseHTTPRequestHandler, HTTPServer
from recipe_system.cal_service import calurl_dict
# ------------------------------------------------------------------------------
# HTTP messaging, Global bits for logging
RECMSG = "Received {} events."
REQMSG = "Requesting current OP day events "
FAILMSG = "Failed to access Fitsstore. No metrics available."
ILLREQ = "Illegal request: No 'timestamp' parameter."
msg_form = '"%s" %s %s'
info_code = 203
fail_code = 416
no_access_code = 503
size = "-"
# ------------------------------------------------------------------------------
def parsepath(path):
"""
parsepath w/ urlparse.
parameters: <string>
return: <dict>
"""
rparms = {}
parsed_url = urllib.parse.urlparse(path)
rparms.update({"path": parsed_url.path})
rparms.update({"query": parsed_url.query})
rparms.update(urllib.parse.parse_qs(parsed_url.query))
return rparms
# ------------------------------------------------------------------------------
# Timing functions
def server_time():
"""
Return a dictionary of server timing quantities related to current time.
This dict will be returned to a call on the server, /rqsite.json (See
do_GET() method of ADCCHandler class.
parameters: <void>
return: <dict>, dictionary of time now values.
"""
lt_now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
unxtime = time.time()
utc_now = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S.%f")
utc_offset = datetime.datetime.utcnow() - datetime.datetime.now()
if utc_offset.days != 0:
utc_offset = -utc_offset
utc_offset = -int(round(utc_offset.seconds / 3600.))
else:
utc_offset = int(round(utc_offset.seconds / 3600.))
timezone = time.timezone // 3600
if timezone == 10:
local_site = 'gemini-north'
elif timezone in [3, 4]: # TZ -4 but +1hr DST applied inconsistently
local_site = 'gemini-south'
else:
local_site = 'remote'
time_dict = {"local_site": local_site,
"tzname" : time.tzname[0],
"lt_now" : lt_now,
"unxtime" : unxtime,
"utc_now" : utc_now,
"utc_offset": utc_offset}
return time_dict
def stamp_to_ymd(timestamp):
"""
Caller sends a timestamp in seconds of epoch. Return string for
year month day of that time as YYYYMMDD' as used by url requests, as in
http://<fitsstore_server>/qaforgui/20130616
parameters: <float>, seconds of epochs.
return: <string>, YYYYMMDD of passed time.
"""
return time.strftime("%Y%m%d", time.localtime(timestamp))
def stamp_to_opday(timestamp):
"""
Converts a passed time stamp (sec) into the corresponding operational
day. I.e. timestamps >= 14.00h are the next operational day.
parameters: <float>, time in epoch seconds
return: <string>, YYYYMMDD
"""
dt_object = datetime.datetime.fromtimestamp(timestamp)
if dt_object.hour >= 14:
timestamp = timestamp + 86400
return stamp_to_ymd(timestamp)
def ymd_to_stamp(yy, mm, dd, hh=0):
"""
Caller passes integers for year, month, and day. Return is
the epoch time (sec). Year is 4 digit, eg., 2013
parameters: <int>, <int>, <int> [, <int>] Year, Month, Day [,Hour]
return: <float>, epoch time in seconds.
"""
ymd = "{} {} {} {}".format(yy, mm, dd, hh)
return time.mktime(time.strptime(ymd, "%Y %m %d %H"))
def current_op_timestamp():
"""
Return the epoch time (sec) of the start of current operational day,
where turnover occurs @ 14.00h localtime. I.e. if the hour >= 14.00,
then the current operational day is tomorrow.
Eg., 2013-08-02 17.00h is 20130803
parameters: <void>
return: <float>
"""
hh = 14
tnow = datetime.datetime.now()
t_epoch = time.time()
if tnow.hour >= 14.0:
op_day = stamp_to_ymd(t_epoch)
else:
op_day = stamp_to_ymd(t_epoch - 86400)
yy, mm, dd = op_day[:4], op_day[4:6], op_day[6:]
timestamp = ymd_to_stamp(yy, mm, dd, hh)
return timestamp
# END Timing functions
# ------------------------------------------------------------------------------
# FITS Store query.
def fstore_get(timestamp):
"""
Open a url on fitsstore/qaforgui/ with the passed timestamp.
timestamp is in epoch seconds, which is converted here to a
YMD string for the URL. Return a list of dicts of qa metrics data.
Exceptions on urlopen()
-----------------------
Any number of exceptions may be thrown on URL access: URLError, HTTPError,
TimeoutError, ... . We don't really care which specific failure occurred,
only that QA metrics are not acessible. Here, we catch all failures and
simply pass, returning a empty list.
N.B. -- A timestamp that evaluates to False will request everything
from fitsstore. This could be huge. Be careful passing no timestamp!
Parameters
----------
timestamp : <float>, time in epoch seconds
Return
------
qa_data : <list>, list of dicts (json) of qametrics
"""
qa_data = list()
# Get the fitsstore query url from calurl_dict
fitsstore_qa = calurl_dict.calurl_dict['QAQUERYURL']
date_query = stamp_to_opday(timestamp)
furl = os.path.join(fitsstore_qa, date_query)
try:
store_handle = urllib.request.urlopen(furl)
qa_data = json.loads(store_handle.read())
except Exception:
print(msg_form %(FAILMSG, no_access_code, size))
return qa_data
def specview_get(jfile):
# hack to just open the test data file, data.json
with open(jfile) as json_file:
jdata = json.load(json_file)
return jdata
# ------------------------------------------------------------------------------
class ADCCHandler(BaseHTTPRequestHandler):
"""
ADCC services request handler.
"""
events = None
informers = None
spec_events = None
def address_string(self):
host, port = self.client_address[:2]
return host
def log_request(self, code='-', size='-'):
"""Log an accepted request.
This is called by send_response().
This overrides BaseHTTPRequestHandler.log_request.
See that class for what the method does normally.
"""
msg_form = '"%s" %s %s'
try:
assert self.informers["verbose"]
self.log_message(msg_form, repr(self.requestline), code, size)
except AssertionError:
if "cmdqueue.json" in self.requestline:
pass
else:
self.log_message(msg_form, repr(self.requestline), code, size)
return
def do_GET(self):
"""
Defined services on HTTP GET requests.
"""
events = self.informers["events"]
spec_events = self.informers["spec_events"]
self.informers["verbose"] = True
dark_theme = self.informers['dark']
parms = parsepath(self.path)
try:
# First test for an html request on the QAP nighttime_metrics page.
# I.e. <localhost>:<port>/qap/nighttime_metrics.html
if self.path.startswith("/qap/"):
dirname = os.path.dirname(__file__)
if dark_theme:
joinlist = [dirname, "../client/adcc_faceplate_dark/"]
else:
joinlist = [dirname, "../client/adcc_faceplate/"]
# Split out any parameters in the URL
self.path = self.path.split("?")[0]
#append any further directory info.
joinlist.append(self.path.split('qap/')[-1])
fname = os.path.join(*joinlist)
self.log_message('{} {} {}'.format("Loading "+joinlist[1]+
os.path.basename(fname), 203, '-'))
try:
with open(fname, 'rb') as f:
data = f.read()
except OSError:
data = bytes(b"<b>NO SUCH RESOURCE AVAILABLE</b>")
self.send_response(200)
if self.path.endswith(".js"):
self.send_header('Content-type', 'text/javascript')
elif self.path.endswith(".css"):
self.send_header("Content-type", "text/css")
elif fname.endswith(".png"):
self.send_header('Content-type', "image/png")
else:
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(data)
return
# ------------------------------------------------------------------
# The vast majority of HTTP client GET requests will be on the
# cmdqueue service. Handle first.
if parms["path"].startswith("/cmdqueue.json"):
self._handle_cmdqueue_json(events, parms)
# ------------------------------------------------------------------
# HTTP client GET requests on QAP Spectra (/qapspec/) Service.
if parms["path"].startswith("/qapspec/"):
dirname = os.path.dirname(__file__)
joinlist = [dirname, "../client/qap_specviewer/"]
# Split out any parameters in the URL
self.path = self.path.split("?")[0]
# Append any further directory info.
joinlist.append(self.path.split('qapspec/')[-1])
fname = os.path.join(*joinlist)
self.log_message('{} {} {}'.format("Loading "+joinlist[1]+
os.path.basename(fname), 203, '-'))
#try:
with open(fname, 'rb') as f:
data = f.read()
#except IOError:
#data = bytes("<b>NO SUCH RESOURCE AVAILABLE</b>".encode('utf-8'))
self.send_response(200)
if self.path.endswith(".js"):
self.send_header('Content-type', 'text/javascript')
elif self.path.endswith(".css"):
self.send_header("Content-type", "text/css")
elif fname.endswith(".png"):
self.send_header('Content-type', "image/png")
elif fname.endswith(".gif"):
self.send_header('Content-type', "image/gif")
else:
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(data)
return
# ------------------------------------------------------------------
# GET requests on the specviewer service.
if parms["path"].startswith("/specqueue.json"):
self._handle_specqueue_json(spec_events, parms)
return
# ------------------------------------------------------------------
# Server time
# Queried by metrics client
elif parms["path"].startswith("/rqsite.json"):
self.send_response(200)
self.send_header('Content-type', "application/json")
self.end_headers()
tdic = server_time()
self.wfile.write(
bytes(json.dumps(tdic, sort_keys=True, indent=4).encode('utf-8'))
)
# ------------------------------------------------------------------
# Queried by metrics client
elif parms["path"].startswith("/rqlog.json"):
self.send_response(200)
self.send_header('Content-type', "application/json")
self.end_headers()
if "file" in parms:
logfile = parms["file"][0]
if not os.path.exists(logfile):
msg = "Log file not available"
else:
f = open(logfile)
msg = f.read()
f.close()
else:
msg = "No log file available"
tdic = {"log": msg}
self.wfile.write(
bytes(json.dumps(tdic, sort_keys=True, indent=4).encode('utf-8'))
)
except OSError:
self.send_error(404, 'File Not Found: {}'.format(self.path))
raise
return
def do_POST(self):
"""
Here, HTTP POST requests are farmed out to either metrics events on the
event_report/ service, or to spec_events on the spec_report/ service.
"""
mform = '"%s" %s %s'
info_code = 203
size = "-"
events = self.informers["events"]
spec_events = self.informers["spec_events"]
parms = parsepath(self.path)
vlen = int(self.headers["Content-Length"])
pdict = self.rfile.read(vlen)
# for QAP metrics ...
if parms["path"].startswith("/event_report"):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
aevent = json.loads(pdict)
events.append_event(aevent)
self.log_message(mform, "Appended event", info_code, size)
self.log_message(mform, repr(aevent), info_code, size)
elif parms["path"].startswith("/spec_report"):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.log_message(mform, "ADCC recieved new event", info_code, size)
spec_events.clear_list()
aevent = json.loads(pdict)
spec_events.append_event(aevent)
self.log_message('"%s" %s %s', "Appended event", info_code, size)
self.log_message('"%s" %s %s', repr(aevent), info_code, size)
return
# -------------------------------------------------------------------------
# privitized handling cmdqueue.json requests
def _handle_cmdqueue_json(self, events, parms):
"""
Handle HTTP client GET requests on service: cmdqueue.json
"""
verbosity = self.informers["verbose"]
self.send_response(200)
self.send_header('Content-type', "application/json")
self.end_headers()
# N.B. A timestamp of zero will request *everything* from fitsstore
# This could be huge. Be careful passing GET request on cmdqueue.json
# with no timestamp.
try:
fromtime = float(parms["timestamp"][0])
except KeyError:
self.log_message(msg_form, ILLREQ, info_code, size)
# event_list = [] implies new adcc. Request current OP day from fits.
if not events.event_list:
self.log_message(msg_form, "No extant events.", info_code, size)
self.log_message(msg_form, REQMSG+"@fitsstore", info_code, size)
events.event_list = fstore_get(current_op_timestamp())
tdic = events.get_list()
self.log_message(msg_form, RECMSG.format(len(tdic)), info_code, size)
tdic.insert(0, {"msgtype": "cmdqueue.request", "timestamp": time.time()})
tdic.append({"msgtype": "cmdqueue.request", "timestamp": time.time()})
self.wfile.write(
bytes(json.dumps(tdic, sort_keys=True, indent=4).encode('utf-8'))
)
# Handle current nighttime requests ...
elif stamp_to_opday(fromtime) == stamp_to_opday(current_op_timestamp()):
if verbosity:
self.log_message(msg_form, | |
# Copyright 2017 Max Planck Society
# Distributed under the BSD-3 Software license,
# (See accompanying file ./LICENSE.txt or copy at
# https://opensource.org/licenses/BSD-3-Clause)
"""This class implements POT training.
"""
import collections
import logging
import os
import time
import tensorflow as tf
import utils
from utils import ProgressBar
from utils import TQDM
import numpy as np
import ops
from metrics import Metrics
slim = tf.contrib.slim
def vgg_16(inputs,
is_training=False,
dropout_keep_prob=0.5,
scope='vgg_16',
fc_conv_padding='VALID', reuse=None):
inputs = inputs * 255.0
inputs -= tf.constant([123.68, 116.779, 103.939], dtype=tf.float32)
with tf.variable_scope(scope, 'vgg_16', [inputs], reuse=reuse) as sc:
end_points_collection = sc.name + '_end_points'
end_points = {}
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
outputs_collections=end_points_collection):
end_points['pool0'] = inputs
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
end_points['pool1'] = net
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
end_points['pool2'] = net
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
end_points['pool3'] = net
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
end_points['pool4'] = net
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
end_points['pool5'] = net
# # Use conv2d instead of fully_connected layers.
# net = slim.conv2d(net, 4096, [7, 7], padding=fc_conv_padding, scope='fc6')
# net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
# scope='dropout6')
# net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
# net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
# scope='dropout7')
# net = slim.conv2d(net, num_classes, [1, 1],
# activation_fn=None,
# normalizer_fn=None,
# scope='fc8')
# Convert end_points_collection into a end_point dict.
# end_points = slim.utils.convert_collection_to_dict(end_points_collection)
return net, end_points
def compute_moments(_inputs, moments=[2, 3]):
"""From an image input, compute moments"""
_inputs_sq = tf.square(_inputs)
_inputs_cube = tf.pow(_inputs, 3)
height = int(_inputs.get_shape()[1])
width = int(_inputs.get_shape()[2])
channels = int(_inputs.get_shape()[3])
def ConvFlatten(x, kernel_size):
# w_sum = tf.ones([kernel_size, kernel_size, channels, 1]) / (kernel_size * kernel_size * channels)
w_sum = tf.eye(num_rows=channels, num_columns=channels, batch_shape=[kernel_size * kernel_size])
w_sum = tf.reshape(w_sum, [kernel_size, kernel_size, channels, channels])
w_sum = w_sum / (kernel_size * kernel_size)
sum_ = tf.nn.conv2d(x, w_sum, strides=[1, 1, 1, 1], padding='VALID')
size = prod_dim(sum_)
assert size == (height - kernel_size + 1) * (width - kernel_size + 1) * channels, size
return tf.reshape(sum_, [-1, size])
outputs = []
for size in [3, 4, 5]:
mean = ConvFlatten(_inputs, size)
square = ConvFlatten(_inputs_sq, size)
var = square - tf.square(mean)
if 2 in moments:
outputs.append(var)
if 3 in moments:
cube = ConvFlatten(_inputs_cube, size)
skewness = cube - 3.0 * mean * var - tf.pow(mean, 3) # Unnormalized
outputs.append(skewness)
return tf.concat(outputs, 1)
def prod_dim(tensor):
return np.prod([int(d) for d in tensor.get_shape()[1:]])
def flatten(tensor):
return tf.reshape(tensor, [-1, prod_dim(tensor)])
class Pot(object):
"""A base class for running individual POTs.
"""
def __init__(self, opts, data, weights):
# Create a new session with session.graph = default graph
self._session = tf.Session()
self._trained = False
self._data = data
self._data_weights = np.copy(weights)
# Latent noise sampled ones to apply decoder while training
self._noise_for_plots = opts['pot_pz_std'] * utils.generate_noise(opts, 1000)
# Placeholders
self._real_points_ph = None
self._noise_ph = None
# Init ops
self._additional_init_ops = []
self._init_feed_dict = {}
# Main operations
# Optimizers
with self._session.as_default(), self._session.graph.as_default():
logging.error('Building the graph...')
self._build_model_internal(opts)
# Make sure AdamOptimizer, if used in the Graph, is defined before
# calling global_variables_initializer().
init = tf.global_variables_initializer()
self._session.run(init)
self._session.run(self._additional_init_ops, self._init_feed_dict)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
# Cleaning the whole default Graph
logging.error('Cleaning the graph...')
tf.reset_default_graph()
logging.error('Closing the session...')
# Finishing the session
self._session.close()
def train(self, opts):
"""Train a POT model.
"""
with self._session.as_default(), self._session.graph.as_default():
self._train_internal(opts)
self._trained = True
def sample(self, opts, num=100):
"""Sample points from the trained POT model.
"""
assert self._trained, 'Can not sample from the un-trained POT'
with self._session.as_default(), self._session.graph.as_default():
return self._sample_internal(opts, num)
def train_mixture_discriminator(self, opts, fake_images):
"""Train classifier separating true data from points in fake_images.
Return:
prob_real: probabilities of the points from training data being the
real points according to the trained mixture classifier.
Numpy vector of shape (self._data.num_points,)
prob_fake: probabilities of the points from fake_images being the
real points according to the trained mixture classifier.
Numpy vector of shape (len(fake_images),)
"""
with self._session.as_default(), self._session.graph.as_default():
return self._train_mixture_discriminator_internal(opts, fake_images)
def _run_batch(self, opts, operation, placeholder, feed,
placeholder2=None, feed2=None):
"""Wrapper around session.run to process huge data.
It is asumed that (a) first dimension of placeholder enumerates
separate points, and (b) that operation is independently applied
to every point, i.e. we can split it point-wisely and then merge
the results. The second placeholder is meant either for is_train
flag for batch-norm or probabilities of dropout.
TODO: write util function which will be called both from this method
and MNIST classification evaluation as well.
"""
assert len(feed.shape) > 0, 'Empry feed.'
num_points = feed.shape[0]
batch_size = opts['tf_run_batch_size']
batches_num = int(np.ceil((num_points + 0.) / batch_size))
result = []
for idx in xrange(batches_num):
if idx == batches_num - 1:
if feed2 is None:
res = self._session.run(
operation,
feed_dict={placeholder: feed[idx * batch_size:]})
else:
res = self._session.run(
operation,
feed_dict={placeholder: feed[idx * batch_size:],
placeholder2: feed2})
else:
if feed2 is None:
res = self._session.run(
operation,
feed_dict={placeholder: feed[idx * batch_size:
(idx + 1) * batch_size]})
else:
res = self._session.run(
operation,
feed_dict={placeholder: feed[idx * batch_size:
(idx + 1) * batch_size],
placeholder2: feed2})
if len(res.shape) == 1:
# convert (n,) vector to (n,1) array
res = np.reshape(res, [-1, 1])
result.append(res)
result = np.vstack(result)
assert len(result) == num_points
return result
def _build_model_internal(self, opts):
"""Build a TensorFlow graph with all the necessary ops.
"""
assert False, 'POT base class has no build_model method defined.'
def _train_internal(self, opts):
assert False, 'POT base class has no train method defined.'
def _sample_internal(self, opts, num):
assert False, 'POT base class has no sample method defined.'
def _train_mixture_discriminator_internal(self, opts, fake_images):
assert False, 'POT base class has no mixture discriminator method defined.'
class ImagePot(Pot):
"""A simple POT implementation, suitable for pictures.
"""
def __init__(self, opts, data, weights):
# One more placeholder for batch norm
self._is_training_ph = None
Pot.__init__(self, opts, data, weights)
def dcgan_like_arch(self, opts, noise, is_training, reuse, keep_prob):
output_shape = self._data.data_shape
num_units = opts['g_num_filters']
batch_size = tf.shape(noise)[0]
num_layers = opts['g_num_layers']
if opts['g_arch'] == 'dcgan':
height = output_shape[0] / 2**num_layers
width = output_shape[1] / 2**num_layers
elif opts['g_arch'] == 'dcgan_mod':
height = output_shape[0] / 2**(num_layers-1)
width = output_shape[1] / 2**(num_layers-1)
else:
assert False
h0 = ops.linear(
opts, noise, num_units * height * width, scope='h0_lin')
h0 = tf.reshape(h0, [-1, height, width, num_units])
h0 = tf.nn.relu(h0)
layer_x = h0
for i in xrange(num_layers-1):
scale = 2**(i+1)
if opts['g_stride1_deconv']:
# Sylvain, I'm worried about this part!
_out_shape = [batch_size, height * scale / 2,
width * scale / 2, num_units / scale * 2]
layer_x = ops.deconv2d(
opts, layer_x, _out_shape, d_h=1, d_w=1,
scope='h%d_deconv_1x1' % i)
layer_x = tf.nn.relu(layer_x)
_out_shape = [batch_size, height * scale, width * scale, num_units / scale]
layer_x = ops.deconv2d(opts, layer_x, _out_shape, scope='h%d_deconv' % i)
if opts['batch_norm']:
layer_x = ops.batch_norm(opts, layer_x, is_training, reuse, scope='bn%d' % i)
layer_x = tf.nn.relu(layer_x)
if opts['dropout']:
_keep_prob = tf.minimum(
1., 0.9 - (0.9 - keep_prob) * float(i + 1) / (num_layers - 1))
layer_x = tf.nn.dropout(layer_x, _keep_prob)
_out_shape = [batch_size] + list(output_shape)
if opts['g_arch'] == 'dcgan':
last_h = ops.deconv2d(
opts, layer_x, _out_shape, scope='hlast_deconv')
elif opts['g_arch'] == 'dcgan_mod':
last_h = ops.deconv2d(
opts, layer_x, _out_shape, d_h=1, d_w=1, scope='hlast_deconv')
else:
assert False
if opts['input_normalize_sym']:
return tf.nn.tanh(last_h)
else:
return tf.nn.sigmoid(last_h)
def began_dec(self, opts, noise, is_training, reuse, keep_prob):
""" Architecture reported here: https://arxiv.org/pdf/1703.10717.pdf
"""
output_shape = self._data.data_shape
num_units = opts['g_num_filters']
num_layers = opts['g_num_layers']
batch_size = tf.shape(noise)[0]
h0 = ops.linear(
opts, noise, num_units * 8 * 8, scope='h0_lin')
h0 = tf.reshape(h0, [-1, 8, 8, num_units])
layer_x = h0
for i in xrange(num_layers):
if i % 3 < 2:
# Don't change resolution
layer_x = ops.conv2d(opts, layer_x, num_units, d_h=1, d_w=1, scope='h%d_conv' % i)
layer_x = tf.nn.elu(layer_x)
else:
if i != num_layers - 1:
# Upsampling by factor of 2 with NN
scale = 2 ** (i / 3 + 1)
layer_x = ops.upsample_nn(layer_x, [scale * 8, scale * 8],
scope='h%d_upsample' % i, reuse=reuse)
# Skip connection
append = ops.upsample_nn(h0, [scale * 8, scale * 8],
scope='h%d_skipup' | |
request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: int
"""
kwargs['_return_http_data_only'] = True
return self.remove_entity_sets_from_linking_entity_sets_with_http_info(linking_entity_set_id, request_body, **kwargs) # noqa: E501
def remove_entity_sets_from_linking_entity_sets_with_http_info(self, linking_entity_set_id, request_body, **kwargs): # noqa: E501
"""Removes/unlinks the linked entity sets from the linking entity set # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_entity_sets_from_linking_entity_sets_with_http_info(linking_entity_set_id, request_body, async_req=True)
>>> result = thread.get()
:param linking_entity_set_id: (required)
:type linking_entity_set_id: str
:param request_body: (required)
:type request_body: dict(str, list[str])
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(int, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'linking_entity_set_id',
'request_body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method remove_entity_sets_from_linking_entity_sets" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'linking_entity_set_id' is set
if self.api_client.client_side_validation and ('linking_entity_set_id' not in local_var_params or # noqa: E501
local_var_params['linking_entity_set_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `linking_entity_set_id` when calling `remove_entity_sets_from_linking_entity_sets`") # noqa: E501
# verify the required parameter 'request_body' is set
if self.api_client.client_side_validation and ('request_body' not in local_var_params or # noqa: E501
local_var_params['request_body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `request_body` when calling `remove_entity_sets_from_linking_entity_sets`") # noqa: E501
collection_formats = {}
path_params = {}
if 'linking_entity_set_id' in local_var_params:
path_params['linkingEntitySetId'] = local_var_params['linking_entity_set_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'request_body' in local_var_params:
body_params = local_var_params['request_body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['http_auth', 'openlattice_auth'] # noqa: E501
return self.api_client.call_api(
'/datastore/entity-sets/linking/', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='int', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def repartition_entity_set(self, entity_set_id, request_body, **kwargs): # noqa: E501
"""Used to repartition an entity set. This will shuffle corresponding ids, edges, and data table rows for the entity set. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.repartition_entity_set(entity_set_id, request_body, async_req=True)
>>> result = thread.get()
:param entity_set_id: (required)
:type entity_set_id: str
:param request_body: (required)
:type request_body: list[int]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.repartition_entity_set_with_http_info(entity_set_id, request_body, **kwargs) # noqa: E501
def repartition_entity_set_with_http_info(self, entity_set_id, request_body, **kwargs): # noqa: E501
"""Used to repartition an entity set. This will shuffle corresponding ids, edges, and data table rows for the entity set. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.repartition_entity_set_with_http_info(entity_set_id, request_body, async_req=True)
>>> result = thread.get()
:param entity_set_id: (required)
:type entity_set_id: str
:param request_body: (required)
:type request_body: list[int]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'entity_set_id',
'request_body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method repartition_entity_set" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'entity_set_id' is set
if self.api_client.client_side_validation and ('entity_set_id' not in local_var_params or # noqa: E501
local_var_params['entity_set_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `entity_set_id` when calling `repartition_entity_set`") # noqa: E501
# verify the required parameter 'request_body' is set
if self.api_client.client_side_validation and ('request_body' not in local_var_params or # noqa: E501
local_var_params['request_body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `request_body` when calling `repartition_entity_set`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_set_id' in local_var_params:
path_params['entitySetId'] = local_var_params['entity_set_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'request_body' in local_var_params:
body_params = local_var_params['request_body']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['http_auth', 'openlattice_auth'] # noqa: E501
return self.api_client.call_api(
'/datastore/entity-sets/{entitySetId}/partitions', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def update_entity_set_meta_data(self, entity_set_id, metadata_update, **kwargs): # noqa: E501
"""Updates the EntitySet definition for the given EntitySet UUID with the given metadata. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_entity_set_meta_data(entity_set_id, metadata_update, async_req=True)
>>> result = thread.get()
:param entity_set_id: (required)
:type entity_set_id: str
:param metadata_update: (required)
:type metadata_update: MetadataUpdate
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: int
"""
kwargs['_return_http_data_only'] = True
return self.update_entity_set_meta_data_with_http_info(entity_set_id, metadata_update, **kwargs) # noqa: E501
def update_entity_set_meta_data_with_http_info(self, entity_set_id, metadata_update, **kwargs): # noqa: E501
"""Updates the EntitySet definition for the given EntitySet UUID with the given metadata. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_entity_set_meta_data_with_http_info(entity_set_id, metadata_update, async_req=True)
>>> result = thread.get()
:param entity_set_id: (required)
:type entity_set_id: str
:param metadata_update: (required)
:type metadata_update: MetadataUpdate
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: | |
<reponame>timcera/hspf_water_balance<filename>hspf_water_balance/hspf_water_balance.py<gh_stars>0
#!/bin/env python
import os
import sys
import re
import warnings
import pandas as pd
from mando import command
from mando import main
from tabulate import simple_separated_format
from tabulate import tabulate
from tstoolbox import tsutils
@command()
def about():
"""Display version number and system information."""
tsutils.about(__name__)
def process(uci, hbn, pwbe, year, ofilename, modulus, tablefmt):
from hspfbintoolbox.hspfbintoolbox import extract
if ofilename:
sys.stdout = open(ofilename, 'w')
try:
year = int(year)
except TypeError:
pass
lcnames = dict(zip(range(modulus+1, 1), zip(range(modulus+1, 1))))
inverse_lcnames = dict(zip(range(modulus+1, 1), zip(range(modulus+1, 1))))
inverse_lc = {}
lnds = {}
if uci is not None:
with open(uci) as fp:
content = fp.readlines()
if not os.path.exists(hbn):
raise ValueError("""
*
* File {0} does not exist.
*
""".format(hbn))
content = [i[:80] for i in content]
content = [i.rstrip() for i in content]
schematic_start = content.index('SCHEMATIC')
schematic_end = content.index('END SCHEMATIC')
schematic = content[schematic_start: schematic_end + 1]
perlnd_start = content.index('PERLND')
perlnd_end = content.index('END PERLND')
perlnd = content[perlnd_start: perlnd_end + 1]
geninfo_start = perlnd.index(' GEN-INFO')
geninfo_end = perlnd.index(' END GEN-INFO')
geninfo = perlnd[geninfo_start: geninfo_end + 1]
masslink_start = content.index('MASS-LINK')
masslink_end = content.index('END MASS-LINK')
masslink = content[masslink_start: masslink_end + 1]
lcnames = {}
inverse_lcnames = {}
inverse_lc = {}
for line in geninfo[1: -1]:
if '***' in line:
continue
if '' == line.strip():
continue
try:
_ = int(line[5: 10])
continue
except ValueError:
pass
lcnames.setdefault(line[10: 30].strip(), []).append(int(line[:5]))
inverse_lcnames[int(line[:5])] = line[10: 30].strip()
inverse_lc[int(line[:5]) % modulus] = line[10: 30].strip()
masslink = [i for i in masslink if '***' not in i]
masslink = [i for i in masslink if len(i.strip()) > 0]
masslink = ' '.join(masslink)
mlgroups = re.findall(
r' MASS-LINK +?([0-9]+).*?LND [PI]WATER.*? END MASS-LINK +?\1 ',
masslink)
for line in schematic[3: -1]:
if '***' in line:
continue
if '' == line:
continue
words = line.split()
if words[0] in ['PERLND', 'IMPLND'] and words[5] in mlgroups:
lnds[(words[0], int(words[1]))] = lnds.setdefault(
(words[0], int(words[1])), 0.0) + float(words[2])
try:
pdf = extract(hbn, 'yearly', ',,,')
except ValueError:
raise ValueError("""
*
* The binary file does not have consistent ending months between PERLND and
* IMPLND. This could be caused by the BYREND (Binary YeaR END) being set
* differently in the PERLND:BINARY-INFO and IMPLND:BINARY-INFO, or you could
* have the PRINT-INFO bug. To work around the PRINT-INFO bug, add a PERLND
* PRINT-INFO block, setting the PYREND here will actually work in the
* BINARY-INFO block.
*
""")
if year is not None:
pdf = pd.DataFrame(pdf.ix['{0}-01-01'.format(year), :]).T
pdf = pdf[[i for i in pdf.columns if 'PERLND' in i or 'IMPLND' in i]]
mindex = [i.split('_') for i in pdf.columns]
mindex = [(i[0], int(i[1]), i[2], int(i[1]) % modulus) for i in mindex]
mindex = pd.MultiIndex.from_tuples(mindex, names=['op',
'number',
'wbt',
'lc'])
pdf.columns = mindex
pdf = pdf.sort_index(axis='columns')
mindex = pdf.columns
aindex = [(i[0], i[1]) for i in pdf.columns]
mindex = [(i[0],
int(i[1]),
i[2],
int(i[1]) % modulus,
float(lnds.setdefault(j, 0.0)),
str(inverse_lcnames.setdefault(int(i[1]), '')))
for i, j in zip(mindex, aindex)]
mindex = pd.MultiIndex.from_tuples(mindex, names=['op',
'number',
'wbt',
'lc',
'area',
'lcname'])
pdf.columns = mindex
nsum = {}
areas = {}
namelist = {}
setl = [i[1] for i in pwbe]
setl = [item for sublist in setl for item in sublist]
for lue in ['PERLND', 'IMPLND']:
for wbterm in [i[0] for i in setl if i[0]]:
for lc in list(range(1, 21)):
try:
subset = pdf.loc[:, (lue,
slice(None),
wbterm,
lc,
slice(None),
slice(None))]
except KeyError:
continue
if pd.np.any(subset < 0):
warnings.warn(
f'There is a negative value for {lue}, {wbterm}, '
f'with land cover {lc}.')
if uci is None:
if subset.empty is True:
nsum[(lue, lc, wbterm)] = 0.0
if (lue, lc) not in namelist:
namelist[(lue, lc)] = ''
else:
nsum[(lue, lc, wbterm)] = subset.mean(axis='columns').mean()
namelist[(lue, lc)] = inverse_lc.setdefault(lc, lc)
else:
sareas = subset.columns.get_level_values('area')
ssareas = sum(sareas)
if (lue, lc) not in areas:
areas[(lue, lc)] = ssareas
if subset.empty is True or ssareas == 0:
nsum[(lue, lc, wbterm)] = 0.0
if (lue, lc) not in namelist:
namelist[(lue, lc)] = ''
else:
fa = sareas/areas[(lue, lc)]
nsum[(lue, lc, wbterm)] = (subset*fa).sum(axis='columns').mean()
namelist[(lue, lc)] = inverse_lc.setdefault(lc, lc)
newnamelist = []
for key, value in sorted(namelist.items()):
if key[0] != 'PERLND':
continue
if key[1] == value:
newnamelist.append('{0}'.format(key[1]))
else:
newnamelist.append('{0}-{1}'.format(key[1], value))
printlist = []
printlist.append([' '] + newnamelist + ['ALL'])
mapipratio = {}
mapipratio['PERLND'] = 1.0
mapipratio['IMPLND'] = 1.0
if uci is not None:
pareas = [areas[i] for i in sorted(areas) if i[0] == 'PERLND']
iareas = [areas[i] for i in sorted(areas) if i[0] == 'IMPLND']
ipratio = pd.np.array(iareas)/(pd.np.array(pareas) + pd.np.array(iareas))
sumareas = sum(pareas) + sum(iareas)
percent_areas = {}
percent_areas['PERLND'] = pd.np.array(pareas)/sumareas*100
percent_areas['IMPLND'] = pd.np.array(iareas)/sumareas*100
percent_areas['COMBINED'] = percent_areas['PERLND'] + percent_areas['IMPLND']
printlist.append(['PERVIOUS'])
printlist.append(['AREA(acres)'] +
[str(i) if i > 0 else '' for i in pareas] +
[str(sum(pareas))])
printlist.append(['AREA(%)'] +
[str(i) if i > 0 else '' for i in percent_areas['PERLND']] +
[str(sum(percent_areas['PERLND']))])
printlist.append([])
printlist.append(['IMPERVIOUS'])
printlist.append(['AREA(acres)'] +
[str(i) if i > 0 else '' for i in iareas] +
[str(sum(iareas))])
printlist.append(['AREA(%)'] +
[str(i) if i > 0 else '' for i in percent_areas['IMPLND']] +
[str(sum(percent_areas['IMPLND']))])
printlist.append([])
mapipratio['PERLND'] = 1.0 - ipratio
mapipratio['IMPLND'] = ipratio
mapr = {}
mapr['PERLND'] = 1.0
mapr['IMPLND'] = 1.0
for term, op in pwbe:
if not term:
printlist.append([])
continue
test = [i[1] for i in op]
if 'IMPLND' in test and 'PERLND' in test:
maprat = mapipratio
sumop = 'COMBINED'
else:
maprat = mapr
sumop = test[0]
te = 0.0
for sterm, operation in op:
te = te + pd.np.array([nsum[(*i, sterm)] for i in sorted(namelist) if i[0] == operation])*maprat[operation]
if uci is None:
te = [term] + [str(i) if i > 0 else '' for i in te] + [str(sum(te)/len(te))]
else:
te = [term] + [str(i) if i > 0 else '' for i in te] + [str(sum(te*percent_areas[sumop])/100)]
printlist.append(te)
if tablefmt in ['csv', 'tsv', 'csv_nos', 'tsv_nos']:
sep = {'csv': ',',
'tsv': '\\t',
'csv_nos': ',',
'tsv_nos': '\\t'}[tablefmt]
fmt = simple_separated_format(sep)
else:
fmt = tablefmt
if tablefmt in ['csv_nos', 'tsv_nos']:
print(re.sub(' *, *', ',', tabulate(printlist, tablefmt=fmt)))
else:
print(tabulate(printlist, tablefmt=fmt))
@command(doctype="numpy")
def detailed(hbn,
uci=None,
year=None,
ofilename='',
modulus=20,
tablefmt='csv_nos'):
"""Develops a detailed water balance.
Parameters
----------
hbn : str
This is the binary output file containing PERLND and IMPLND
information. This should be the binary output file created by the
`uci` file.
uci
[optional, defaults to None]
This uci file will be read to determine all of the areas and other
aspects of the model. If available it will read the land cover names
from the PERLND GEN-INFO table. Required if you want the water balance
area-weighted between land covers.
year
[optional, defaults to None]
If None the water balance would cover the period of simulation.
Otherwise the year for the water balance.
ofilename
[optional, defaults to '']
If empty string '', then prints to stdout, else prints to `ofilename`.
modulus : int
[optional, defaults to 20]
Usual setup of a HSPF model has PERLND 1, 21, 41, ...etc. represent
land cover 1 in different sub-watersheds and 2, 22, 42, ...etc
represent land cover 2 in different sub-watersheds, ...etc.
The remainder of the PERLND label divided by the modulus is the land
cover number.
tablefmt : str
[optional, default is 'cvs_nos']
The table format. Can be one of 'csv', 'tsv', 'csv_nos', 'tsv_nos',
'plain', 'simple', 'github', 'grid', 'fancy_grid', 'pipe', 'orgtbl',
'jira', 'presto', 'psql', 'rst', 'mediawiki', 'moinmoin', 'youtrack',
'html', 'latex', 'latex_raw', 'latex_booktabs' and 'textile'.
"""
if uci is None:
pwbe = (['SUPY' ,[('SUPY' , 'PERLND'), ]],
['SURLI' ,[('SURLI', 'PERLND'), ]],
['UZLI' ,[('UZLI' , 'PERLND'), ]],
['LZLI' ,[('LZLI' , 'PERLND'), ]],
['' ,[('' , '' ), ]],
['SURO: PERVIOUS' ,[('SURO' , 'PERLND'), ]],
['SURO: IMPERVIOUS' ,[('SURO' , 'IMPLND'), ]],
['IFWO' ,[('IFWO' , 'PERLND'), ]],
['AGWO' ,[('AGWO' , 'PERLND'), ]],
['' ,[('' , '' ), ]],
['AGWI' ,[('AGWI' , 'PERLND'), ]],
['IGWI' ,[('IGWI' , 'PERLND'), ]],
['' ,[('' , '' ), ]],
['CEPE' ,[('CEPE' , 'PERLND'), ]],
['UZET' ,[('UZET' , 'PERLND'), ]],
['LZET' ,[('LZET' , 'PERLND'), ]],
['AGWET' ,[('AGWET', 'PERLND'), ]],
['BASET' ,[('BASET', 'PERLND'), ]],
['SURET' ,[('SURET', 'PERLND'), ]],
['' ,[('' , '' ), ]],
['PERO' ,[('PERO' , 'PERLND'), ]],
['IGWI' ,[('IGWI' , 'PERLND'), ]],
['TAET: PERVIOUS' ,[('TAET' , 'PERLND'), ]],
['IMPEV: IMPERVIOUS',[('IMPEV', 'IMPLND'), ]],
['' ,[('' , '' | |
{
'instance_type': {'required': True},
}
_attribute_map = {
'instance_type': {'key': 'instanceType', 'type': 'str'},
}
_subtype_map = {
'instance_type': {'InMageRcm': 'InMageRcmUpdateApplianceForReplicationProtectedItemInput'}
}
def __init__(
self,
**kwargs
):
super(UpdateApplianceForReplicationProtectedItemProviderSpecificInput, self).__init__(**kwargs)
self.instance_type = None # type: Optional[str]
class InMageRcmUpdateApplianceForReplicationProtectedItemInput(UpdateApplianceForReplicationProtectedItemProviderSpecificInput):
"""InMageRcm provider specific input to update appliance for replication protected item.
All required parameters must be populated in order to send to Azure.
:param instance_type: Required. The class type.Constant filled by server.
:type instance_type: str
:param run_as_account_id: The run as account Id.
:type run_as_account_id: str
"""
_validation = {
'instance_type': {'required': True},
}
_attribute_map = {
'instance_type': {'key': 'instanceType', 'type': 'str'},
'run_as_account_id': {'key': 'runAsAccountId', 'type': 'str'},
}
def __init__(
self,
*,
run_as_account_id: Optional[str] = None,
**kwargs
):
super(InMageRcmUpdateApplianceForReplicationProtectedItemInput, self).__init__(**kwargs)
self.instance_type = 'InMageRcm' # type: str
self.run_as_account_id = run_as_account_id
class InMageRcmUpdateContainerMappingInput(ReplicationProviderSpecificUpdateContainerMappingInput):
"""InMageRcm update protection container mapping.
All required parameters must be populated in order to send to Azure.
:param instance_type: Required. The class type.Constant filled by server.
:type instance_type: str
:param enable_agent_auto_upgrade: Required. A value indicating whether agent auto upgrade has
to be enabled.
:type enable_agent_auto_upgrade: str
"""
_validation = {
'instance_type': {'required': True},
'enable_agent_auto_upgrade': {'required': True},
}
_attribute_map = {
'instance_type': {'key': 'instanceType', 'type': 'str'},
'enable_agent_auto_upgrade': {'key': 'enableAgentAutoUpgrade', 'type': 'str'},
}
def __init__(
self,
*,
enable_agent_auto_upgrade: str,
**kwargs
):
super(InMageRcmUpdateContainerMappingInput, self).__init__(**kwargs)
self.instance_type = 'InMageRcm' # type: str
self.enable_agent_auto_upgrade = enable_agent_auto_upgrade
class InMageRcmUpdateReplicationProtectedItemInput(UpdateReplicationProtectedItemProviderInput):
"""InMageRcm provider specific input to update replication protected item.
All required parameters must be populated in order to send to Azure.
:param instance_type: Required. The class type.Constant filled by server.
:type instance_type: str
:param target_vm_name: The target VM name.
:type target_vm_name: str
:param target_vm_size: The target VM size.
:type target_vm_size: str
:param target_resource_group_id: The target resource group ARM Id.
:type target_resource_group_id: str
:param target_availability_set_id: The target availability set ARM Id.
:type target_availability_set_id: str
:param target_availability_zone: The target availability zone.
:type target_availability_zone: str
:param target_proximity_placement_group_id: The target proximity placement group Id.
:type target_proximity_placement_group_id: str
:param target_boot_diagnostics_storage_account_id: The target boot diagnostics storage account
ARM Id.
:type target_boot_diagnostics_storage_account_id: str
:param target_network_id: The target network ARM Id.
:type target_network_id: str
:param test_network_id: The test network ARM Id.
:type test_network_id: str
:param vm_nics: The list of NIC details.
:type vm_nics: list[~azure.mgmt.recoveryservicessiterecovery.models.InMageRcmNicInput]
:param license_type: The license type. Possible values include: "NotSpecified",
"NoLicenseType", "WindowsServer".
:type license_type: str or ~azure.mgmt.recoveryservicessiterecovery.models.LicenseType
"""
_validation = {
'instance_type': {'required': True},
}
_attribute_map = {
'instance_type': {'key': 'instanceType', 'type': 'str'},
'target_vm_name': {'key': 'targetVmName', 'type': 'str'},
'target_vm_size': {'key': 'targetVmSize', 'type': 'str'},
'target_resource_group_id': {'key': 'targetResourceGroupId', 'type': 'str'},
'target_availability_set_id': {'key': 'targetAvailabilitySetId', 'type': 'str'},
'target_availability_zone': {'key': 'targetAvailabilityZone', 'type': 'str'},
'target_proximity_placement_group_id': {'key': 'targetProximityPlacementGroupId', 'type': 'str'},
'target_boot_diagnostics_storage_account_id': {'key': 'targetBootDiagnosticsStorageAccountId', 'type': 'str'},
'target_network_id': {'key': 'targetNetworkId', 'type': 'str'},
'test_network_id': {'key': 'testNetworkId', 'type': 'str'},
'vm_nics': {'key': 'vmNics', 'type': '[InMageRcmNicInput]'},
'license_type': {'key': 'licenseType', 'type': 'str'},
}
def __init__(
self,
*,
target_vm_name: Optional[str] = None,
target_vm_size: Optional[str] = None,
target_resource_group_id: Optional[str] = None,
target_availability_set_id: Optional[str] = None,
target_availability_zone: Optional[str] = None,
target_proximity_placement_group_id: Optional[str] = None,
target_boot_diagnostics_storage_account_id: Optional[str] = None,
target_network_id: Optional[str] = None,
test_network_id: Optional[str] = None,
vm_nics: Optional[List["InMageRcmNicInput"]] = None,
license_type: Optional[Union[str, "LicenseType"]] = None,
**kwargs
):
super(InMageRcmUpdateReplicationProtectedItemInput, self).__init__(**kwargs)
self.instance_type = 'InMageRcm' # type: str
self.target_vm_name = target_vm_name
self.target_vm_size = target_vm_size
self.target_resource_group_id = target_resource_group_id
self.target_availability_set_id = target_availability_set_id
self.target_availability_zone = target_availability_zone
self.target_proximity_placement_group_id = target_proximity_placement_group_id
self.target_boot_diagnostics_storage_account_id = target_boot_diagnostics_storage_account_id
self.target_network_id = target_network_id
self.test_network_id = test_network_id
self.vm_nics = vm_nics
self.license_type = license_type
class InMageReplicationDetails(ReplicationProviderSpecificSettings):
"""InMage provider specific settings.
All required parameters must be populated in order to send to Azure.
:param instance_type: Required. Gets the Instance type.Constant filled by server.
:type instance_type: str
:param active_site_type: The active location of the VM. If the VM is being protected from
Azure, this field will take values from { Azure, OnPrem }. If the VM is being protected between
two data-centers, this field will be OnPrem always.
:type active_site_type: str
:param source_vm_cpu_count: The CPU count of the VM on the primary side.
:type source_vm_cpu_count: int
:param source_vm_ram_size_in_mb: The RAM size of the VM on the primary side.
:type source_vm_ram_size_in_mb: int
:param os_details: The OS details.
:type os_details: ~azure.mgmt.recoveryservicessiterecovery.models.OSDiskDetails
:param protection_stage: The protection stage.
:type protection_stage: str
:param vm_id: The virtual machine Id.
:type vm_id: str
:param vm_protection_state: The protection state for the vm.
:type vm_protection_state: str
:param vm_protection_state_description: The protection state description for the vm.
:type vm_protection_state_description: str
:param resync_details: The resync details of the machine.
:type resync_details: ~azure.mgmt.recoveryservicessiterecovery.models.InitialReplicationDetails
:param retention_window_start: The retention window start time.
:type retention_window_start: ~datetime.datetime
:param retention_window_end: The retention window end time.
:type retention_window_end: ~datetime.datetime
:param compressed_data_rate_in_mb: The compressed data change rate in MB.
:type compressed_data_rate_in_mb: float
:param uncompressed_data_rate_in_mb: The uncompressed data change rate in MB.
:type uncompressed_data_rate_in_mb: float
:param rpo_in_seconds: The RPO in seconds.
:type rpo_in_seconds: long
:param protected_disks: The list of protected disks.
:type protected_disks:
list[~azure.mgmt.recoveryservicessiterecovery.models.InMageProtectedDiskDetails]
:param ip_address: The source IP address.
:type ip_address: str
:param last_heartbeat: The last heartbeat received from the source server.
:type last_heartbeat: ~datetime.datetime
:param process_server_id: The process server Id.
:type process_server_id: str
:param master_target_id: The master target Id.
:type master_target_id: str
:param consistency_points: The collection of Consistency points.
:type consistency_points: dict[str, ~datetime.datetime]
:param disk_resized: A value indicating whether any disk is resized for this VM.
:type disk_resized: str
:param reboot_after_update_status: A value indicating whether the source server requires a
restart after update.
:type reboot_after_update_status: str
:param multi_vm_group_id: The multi vm group Id, if any.
:type multi_vm_group_id: str
:param multi_vm_group_name: The multi vm group name, if any.
:type multi_vm_group_name: str
:param multi_vm_sync_status: A value indicating whether the multi vm sync is enabled or
disabled.
:type multi_vm_sync_status: str
:param agent_details: The agent details.
:type agent_details: ~azure.mgmt.recoveryservicessiterecovery.models.InMageAgentDetails
:param v_center_infrastructure_id: The vCenter infrastructure Id.
:type v_center_infrastructure_id: str
:param infrastructure_vm_id: The infrastructure VM Id.
:type infrastructure_vm_id: str
:param vm_nics: The PE Network details.
:type vm_nics: list[~azure.mgmt.recoveryservicessiterecovery.models.VMNicDetails]
:param discovery_type: A value indicating the discovery type of the machine.
:type discovery_type: str
:param azure_storage_account_id: A value indicating the underlying Azure storage account. If
the VM is not running in Azure, this value shall be set to null.
:type azure_storage_account_id: str
:param datastores: The datastores of the on-premise machine Value can be list of strings that
contain datastore names.
:type datastores: list[str]
:param validation_errors: The validation errors of the on-premise machine Value can be list of
validation errors.
:type validation_errors: list[~azure.mgmt.recoveryservicessiterecovery.models.HealthError]
:param last_rpo_calculated_time: The last RPO calculated time.
:type last_rpo_calculated_time: ~datetime.datetime
:param last_update_received_time: The last update time received from on-prem components.
:type last_update_received_time: ~datetime.datetime
:param replica_id: The replica id of the protected item.
:type replica_id: str
:param os_version: The OS Version of the protected item.
:type os_version: str
:param is_additional_stats_available: A value indicating whether additional IR stats are
available or not.
:type is_additional_stats_available: bool
:param total_data_transferred: The total transferred data in bytes.
:type total_data_transferred: long
:param total_progress_health: The progress health.
:type total_progress_health: str
"""
_validation = {
'instance_type': {'required': True},
}
_attribute_map = {
'instance_type': {'key': 'instanceType', 'type': 'str'},
'active_site_type': {'key': 'activeSiteType', 'type': 'str'},
'source_vm_cpu_count': {'key': 'sourceVmCpuCount', 'type': 'int'},
'source_vm_ram_size_in_mb': {'key': 'sourceVmRamSizeInMB', 'type': 'int'},
'os_details': {'key': 'osDetails', 'type': 'OSDiskDetails'},
'protection_stage': {'key': 'protectionStage', 'type': 'str'},
'vm_id': {'key': 'vmId', 'type': 'str'},
'vm_protection_state': {'key': 'vmProtectionState', 'type': 'str'},
'vm_protection_state_description': {'key': 'vmProtectionStateDescription', 'type': 'str'},
'resync_details': {'key': 'resyncDetails', 'type': 'InitialReplicationDetails'},
'retention_window_start': {'key': 'retentionWindowStart', 'type': 'iso-8601'},
'retention_window_end': {'key': 'retentionWindowEnd', 'type': 'iso-8601'},
'compressed_data_rate_in_mb': {'key': 'compressedDataRateInMB', 'type': 'float'},
'uncompressed_data_rate_in_mb': {'key': 'uncompressedDataRateInMB', 'type': 'float'},
'rpo_in_seconds': {'key': 'rpoInSeconds', 'type': 'long'},
'protected_disks': {'key': 'protectedDisks', 'type': '[InMageProtectedDiskDetails]'},
'ip_address': {'key': 'ipAddress', 'type': 'str'},
'last_heartbeat': {'key': 'lastHeartbeat', 'type': 'iso-8601'},
'process_server_id': {'key': 'processServerId', 'type': 'str'},
'master_target_id': {'key': 'masterTargetId', 'type': 'str'},
'consistency_points': {'key': 'consistencyPoints', 'type': '{iso-8601}'},
'disk_resized': {'key': 'diskResized', 'type': 'str'},
'reboot_after_update_status': {'key': 'rebootAfterUpdateStatus', 'type': 'str'},
'multi_vm_group_id': {'key': 'multiVmGroupId', 'type': 'str'},
'multi_vm_group_name': {'key': 'multiVmGroupName', 'type': 'str'},
'multi_vm_sync_status': {'key': 'multiVmSyncStatus', 'type': 'str'},
'agent_details': {'key': 'agentDetails', 'type': 'InMageAgentDetails'},
'v_center_infrastructure_id': {'key': 'vCenterInfrastructureId', 'type': 'str'},
'infrastructure_vm_id': {'key': 'infrastructureVmId', 'type': 'str'},
'vm_nics': {'key': 'vmNics', 'type': '[VMNicDetails]'},
'discovery_type': {'key': 'discoveryType', 'type': 'str'},
'azure_storage_account_id': {'key': 'azureStorageAccountId', 'type': 'str'},
'datastores': {'key': 'datastores', 'type': '[str]'},
'validation_errors': {'key': 'validationErrors', 'type': '[HealthError]'},
'last_rpo_calculated_time': {'key': 'lastRpoCalculatedTime', 'type': 'iso-8601'},
'last_update_received_time': {'key': 'lastUpdateReceivedTime', 'type': 'iso-8601'},
'replica_id': {'key': 'replicaId', 'type': 'str'},
'os_version': {'key': 'osVersion', | |
"""
Trains an image segmentation model with SGD.
python joint_train.py --seperate_background_channel --data_dir joint_fewshot_shards_uint8_background_channel --augment --epochs 10 --steps_per_epoch 2 --batch_size 3 --val_batches 2 --sgd --l2 --final_layer_dropout_rate 0.2 --rsd 2 --restore_efficient_net_weights_from models/efficientnet/efficientnet-b0
python joint_train.py --fp_k_test_set --seperate_background_channel --augment --epochs 10 --steps_per_epoch 2 --batch_size 3 --val_batches 2 --sgd --l2 --final_layer_dropout_rate 0.2 --rsd 2 --data_dir joint_fewshot_shards_uint8_background_channel_fp-k-test-set --restore_efficient_net_weights_from models/efficientnet/efficientnet-b0
python joint_train.py --test_on_val_set --seperate_background_channel --data_dir joint_fewshot_shards_uint8_background_channel_val-set/ --augment --epochs 10 --steps_per_epoch 2 --batch_size 3 --val_batches 2 --sgd --l2 --final_layer_dropout_rate 0.2 --rsd 2 --restore_efficient_net_weights_from models/efficientnet/efficientnet-b0
"""
import argparse
import os
import time
from functools import partial
from typing import List, Tuple, Optional, Callable
import numpy as np
import tensorflow as tf
from augmenters.np_augmenters import Augmenter, translate, fliplr, additive_gaussian_noise, exposure
from data.fss_1000_utils import TEST_TASK_IDS, TRAIN_TASK_IDS, FP_K_TEST_TASK_IDS
from joint_train.data.input_fn import TFRecordSegmentationDataset
from models.constants import SUPPORTED_MODELS
from models.efficientlab import EfficientLab
from meta_learners.supervised_reptile.supervised_reptile.reptile import Gecko
from utils.util import log_estimated_time_remaining
TRAIN_ID = "train"
VAL_ID = "val"
TEST_ID = "test"
def parse_args():
"""
Returns an argument parser object for image segmentation training script.
"""
parser = argparse.ArgumentParser(description="Train segmentation model via SGD.")
# Data
parser.add_argument("--data_dir", help="Path to folder containing tfrecords", required=True)
parser.add_argument("--fp_k_test_set", help="Hold out the test task for the fp-k classes.", action="store_true")
parser.add_argument("--test_on_val_set", help="If speced, will train on train shards and test on val shards, else will train on both train and val and test on test.", action="store_true")
# Model
parser.add_argument('--model_name',
help="Name of the model architecture to meta-train. Must be in the set: {}.".format(SUPPORTED_MODELS), required=False,
default='EfficientLab')
parser.add_argument("--rsd", help="List of integers specifying the 1-indexed reduction endpionts from EfficientNet to input into the lightweight skip decoding layers of EfficientLab.", type=int, nargs="+")
parser.add_argument("--feature_extractor_name", help="efficientnet-b0 or efficientnet-b3", type=str, default="efficientnet-b0")
parser.add_argument("--image_size", help="size of image in pixels. images assumed to square", type=int, default=224)
parser.add_argument("--seperate_background_channel", help="Whether or not to make a mutually exclusive background channel.", action='store_true', default=False)
# Training
parser.add_argument("--restore_efficient_net_weights_from", help="path to dir to restore efficientnet weights from", type=str, default=None)
parser.add_argument('--sgd', help='use vanilla SGD instead of Adam', action='store_true')
parser.add_argument('--loss_name', help='Name of the loss function to use. Should be cross_entropy, cross_entropy_dice, or ce_dice', default='ce_dice')
parser.add_argument("--l2", help="Applies l2 weight decay to all weights in network", action="store_true")
parser.add_argument("--augment", help="Apply augmentations to training data",
action="store_true")
parser.add_argument("--final_layer_dropout_rate", help="Probability to dropout inputs at final layer.", type=float, default=0.0)
parser.add_argument('--batch_size', help='Training batch size', default=64, type=int)
parser.add_argument('--epochs', help='Number of training epochs', default=200, type=int)
parser.add_argument("--steps_per_epoch", help="Number of gradient steps to take per epoch. If unspecified will be determined from batch size and number of examples.", type=int, default=None)
parser.add_argument("--learning_rate", default=0.005, type=float)
parser.add_argument("--final_learning_rate", default=5e-7, type=float)
parser.add_argument("--label_smoothing", default=0.0, type=float)
# Evaluation
parser.add_argument("--val_batches", default=20, type=int)
parser.add_argument('--pretrained', help='Evaluate a pre-trained model.',
action='store_true', default=False)
parser.add_argument('--eval_interval', help='Training steps per evaluation', default=2, type=int)
# Misc. config
parser.add_argument('--seed', help='random seed', default=0, type=int)
parser.add_argument('--checkpoint', help='Checkpoint directory to write to (or restore from).', default='/tmp/model_checkpoint', type=str)
return parser.parse_args()
def get_model_kwargs(parsed_args):
"""
Build the kwargs for model constructors from the
parsed command-line arguments.
"""
parsed_args.model_name = parsed_args.model_name.lower()
if parsed_args.model_name not in SUPPORTED_MODELS:
raise ValueError("Model name must be in the set: {}".format(SUPPORTED_MODELS))
res = {'learning_rate': parsed_args.learning_rate}
restore_ckpt_dir = parsed_args.restore_efficient_net_weights_from
res["restore_ckpt_dir"] = restore_ckpt_dir
if parsed_args.lsd:
res["rsd"] = parsed_args.lsd
res["feature_extractor_name"] = parsed_args.feature_extractor_name
res["l2"] = parsed_args.l2
res["final_layer_dropout_rate"] = parsed_args.final_layer_dropout_rate
res["label_smoothing"] = parsed_args.label_smoothing
if "dice" not in parsed_args.loss_name:
res["dice"] = False
if parsed_args.sgd:
res['optimizer'] = tf.train.GradientDescentOptimizer
else:
res['optimizer'] = partial(tf.train.AdamOptimizer, beta1=0)
res['loss_name'] = parsed_args.loss_name
res["n_rows"] = parsed_args.image_size
res["n_cols"] = parsed_args.image_size
return res
def after_step():
"""Function to be called after a step of gradient descent"""
raise NotImplementedError
def after_epoch():
"""Function to be called after an epoch"""
raise NotImplementedError
def get_train_test_shards_from_dir(data_dir, ext: str = ".tfrecord.gzip", test_on_val_set: bool = False):
all_shards = os.listdir(data_dir)
all_shards = [x for x in all_shards if ext in x]
train_shards = [x for x in all_shards if TEST_ID not in x]
test_shards = [x for x in all_shards if TRAIN_ID not in x]
if test_on_val_set:
train_shards = [x for x in train_shards if VAL_ID not in x]
test_shards = [x for x in all_shards if VAL_ID in x]
assert len(set(train_shards + test_shards)) == len(all_shards) - len([x for x in all_shards if TEST_ID in x])
else:
assert len(set(train_shards + test_shards)) == len(all_shards)
assert len(set(test_shards).intersection(set(train_shards))) == 0
return [os.path.join(data_dir, x) for x in train_shards], [os.path.join(data_dir, x) for x in test_shards]
def get_training_data(data_dir: str, num_classes: int, batch_size: int, image_size: int, ext: str = ".tfrecord.gzip", augment:bool = False, seperate_background_channel: bool = True, test_on_val_set: bool = False) -> Tuple[tf.Tensor, tf.Tensor, tf.Operation]:
train_shards, test_shards = get_train_test_shards_from_dir(data_dir, ext, test_on_val_set=test_on_val_set)
if augment:
if seperate_background_channel:
mask_filled_translate = partial(translate, mask_fill=[1] + [0] * num_classes)
else:
mask_filled_translate = partial(translate, mask_fill=[0] * num_classes)
augmenter = Augmenter(aug_funcs=[mask_filled_translate, fliplr, additive_gaussian_noise, exposure])
else:
augmenter = None
dataset = TFRecordSegmentationDataset(tfrecord_paths=train_shards, image_width=image_size, mask_channels=num_classes, augmenter=augmenter, seperate_background_channel=seperate_background_channel)
dataset, ds_init_op = dataset.make_dataset(batch_size)
return dataset, ds_init_op
def train(sess: tf.Session, model: EfficientLab, dataset_init_op: tf.Operation, epochs: int, steps_per_epoch: int, images, masks, save_dir: str, lr_fn: Callable, restore_ckpt_dir: Optional[str] = None, val_batches: int = 20, save_checkpoint_every_n_epochs: int = 2, time_deadline=None, max_checkpoints_to_keep: int = 2, eval_interval: int = 2, report_allocated_tensors_on_oom: bool = True):
"""
Args:
sess:
model:
dataset_init_op:
epochs:
steps_per_epoch:
images:
masks:
save_dir:
lr_fn: A function that takes in the epoch number and returns the learning rate. For constant, learning rate, define a lambda: lr_fn = lambda i: lr
val_batches: Number of batches to evaluate at the end of each epoch
save_checkpoint_every_n_epochs:
time_deadline:
max_checkpoints_to_keep:
Returns:
"""
assert isinstance(epochs, int)
assert isinstance(steps_per_epoch, int)
if not os.path.exists(save_dir):
os.mkdir(save_dir)
print("Logging to {}".format(save_dir))
saver = tf.train.Saver(max_to_keep=max_checkpoints_to_keep)
if restore_ckpt_dir is not None:
print("Restoring from checkpoint {}".format(restore_ckpt_dir))
model.restore_model(sess, restore_ckpt_dir, filter_to_scopes=[model.feature_extractor_name])
try:
if not model.variables_initialized:
print("Initializing variables.")
tf.global_variables_initializer().run()
sess.run(tf.global_variables_initializer())
except AttributeError:
print("Model does not explicitly track whether variable initialization has already been run on the graph at attribute .variables_initialized.")
print("Initializing variables.")
tf.global_variables_initializer().run()
sess.run(tf.global_variables_initializer())
print("Training...")
sess.run(dataset_init_op)
print("Saving graph definition to {}.".format(save_dir))
saver.save(sess, os.path.join(save_dir, 'model.ckpt'), global_step=0)
tf.summary.FileWriter(os.path.join(save_dir, 'train'), sess.graph)
if report_allocated_tensors_on_oom:
run_opts = tf.RunOptions(report_tensor_allocations_upon_oom=True)
else:
run_opts = None
ious = []
for i in range(epochs):
start_time = time.time()
print('Epoch: ', i)
lr = lr_fn(i)
print("lr: ", lr)
for _ in range(steps_per_epoch):
try:
_ = sess.run(model.minimize_op, feed_dict={model.lr_ph: lr}, options=run_opts)
except tf.errors.OutOfRangeError:
sess.run(dataset_init_op, options=run_opts)
print("Finished epoch {} with {} steps.".format(i, steps_per_epoch))
epoch_minutes = log_estimated_time_remaining(start_time, i, epochs, unit_name="epoch")
iters_per_sec = steps_per_epoch / (epoch_minutes * 60)
print("Iterations per second: {}".format(iters_per_sec))
if i % eval_interval == 0:
# TODO implement val set accuracy callback
print("Validating")
iou, loss = iou_callback(sess, model, val_batches, run_opts)
print("Loss: {}".format(loss))
print("IoU on epoch {} estimated on {} batches:".format(i, val_batches))
print(iou)
ious.append(iou)
if i % save_checkpoint_every_n_epochs == 0 or i == epochs - 1:
print("Saving checkpoint to {}.".format(save_dir))
saver.save(sess, os.path.join(save_dir, 'model.ckpt'), global_step=i)
if time_deadline is not None and time.time() > time_deadline:
break
print("Training complete. History:")
print("Train set Intersection over Union (IoU):")
print(ious)
def iou_callback(sess, model: EfficientLab, val_batches, run_opts):
ious = []
losses = []
for _ in range(val_batches):
images, preds, labels, loss = sess.run([model.input_ph, model.predictions, model.label_ph, model.loss], options=run_opts, feed_dict={model.is_training_ph: False})
# viz(images, preds, labels)
ious.append(compute_iou_metric(preds, labels))
losses.append(loss)
iou = np.nanmean(ious)
loss = np.nanmean(losses)
return iou, loss
def compute_iou_metric(predictions: np.ndarray, labels: np.ndarray):
assert len(predictions) == len(labels)
assert len(predictions.shape) == 4
# Pass prediction and label arrays to _iou:
iou = [Gecko._iou(predictions[i], labels[i], class_of_interest_channel=None) for i in range(predictions.shape[0])]
iou = np.nanmean(iou)
return iou
def viz(images, preds, labels):
from utils.debug_tf_dataset import plot_mask
import matplotlib.pyplot as plt
images = images / 255.
if len(images.shape) == 4:
for j in range(images.shape[0]):
print("image")
plt.figure(j)
plt.imshow(images[j])
plt.show()
print("label mask")
mask_j = labels[j]
k = plot_mask(mask_j, j + 1)
print("predicted mask")
pred = preds[j]
plot_mask(pred, j + 2, channel_index=k)
else:
plt.figure(0)
plt.imshow(images)
plt.show()
plot_mask(labels, 1)
def main():
# Reference: https://github.com/SMHendryx/tf-segmentation-trainer/blob/master/train.py
start = time.time()
# Args:
args = parse_args()
data_dir = args.data_dir
learning_rate = args.learning_rate
final_learning_rate = args.final_learning_rate
epochs = args.epochs
#all_classes, train_classes = get_classes_from_dir(data_dir, ext=".tfrecord.gzip")
train_classes, test_classes = TRAIN_TASK_IDS, TEST_TASK_IDS
all_classes = sorted(list(train_classes + test_classes))
if args.fp_k_test_set:
test_classes = FP_K_TEST_TASK_IDS
train_classes = [x for x in all_classes if x not in test_classes]
assert len(set(test_classes).intersection(set(train_classes))) == 0, "train-test class names overlap"
assert len(train_classes + test_classes) == len(set(all_classes))
num_classes = len(all_classes)
next_element, dataset_init_op = get_training_data(data_dir, num_classes=num_classes, batch_size=args.batch_size, image_size=args.image_size, augment=args.augment, seperate_background_channel=args.seperate_background_channel, test_on_val_set=args.test_on_val_set)
images = next_element[0]
masks = next_element[1]
model_kwargs = get_model_kwargs(args)
restore_ckpt_dir = model_kwargs["restore_ckpt_dir"]
model = EfficientLab(images=images, labels=masks, n_classes=num_classes, seperate_background_channel=args.seperate_background_channel, binary_iou_loss=False, **model_kwargs)
if args.steps_per_epoch is None:
steps_per_epoch = int(760 * 10 // args.batch_size)
else:
steps_per_epoch = args.steps_per_epoch
def lr_fn(i, epochs=epochs, initial_lr=learning_rate, final_lr=final_learning_rate):
frac_done = i / epochs
cur_lr = frac_done | |
<filename>buildscripts/test_failures.py
#!/usr/bin/env python
"""Test Failures
Compute Test failures rates from Evergreen API for specified tests, tasks, etc.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import datetime
import itertools
import operator
from optparse import OptionParser
import os
import urlparse
import requests
import yaml
_API_SERVER_DEFAULT = "http://evergreen-api.mongodb.com:8080"
_REST_PREFIX = "/rest/v1"
_PROJECT = "mongodb-mongo-master"
_MIN_DATE = "0001-01-01"
_MAX_DATE = "3000-12-31"
_HistoryReportTuple = collections.namedtuple(
"Report", "test task variant distro start_dt test_status")
def read_evg_config():
# Expand out evergreen config file possibilities
file_list = [
"./.evergreen.yml",
os.path.expanduser("~/.evergreen.yml"),
os.path.expanduser("~/cli_bin/.evergreen.yml")]
for filename in file_list:
if os.path.isfile(filename):
with open(filename, "r") as fstream:
return yaml.load(fstream)
return None
def datestr_to_date(date_str):
"""Returns datetime from a date string in the format of YYYY-MM-DD.
Note that any time in the date string is stripped off."""
return datetime.datetime.strptime(date_str.split("T")[0], "%Y-%m-%d").date()
def date_to_datestr(date_time):
"""Returns date string in the format of YYYY-MM-DD from a datetime."""
return date_time.strftime("%Y-%m-%d")
def list_or_none(lst):
"""Returns a stringified list or 'None'."""
return ",".join(map(str, lst)) if lst else "None"
def normalize_test_file(test_file):
"""Normalizes the test_file name:
- Changes single backslash (\\) to forward slash (/)
- Removes .exe extension
Returns normalized string."""
return test_file.replace("\\", "/").replace(".exe", "")
def fail_rate(num_fail, num_pass):
"""Computes fails rate, return N/A if total is 0."""
total = num_fail + num_pass
if total:
return "{:.3f}".format(round(num_fail / total, 3))
return "N/A"
class Missing(object):
"""Class to support missing fields from the history report."""
def __init__(self, kind):
self.kind = kind
def __str__(self):
return self.kind
class ViewReport(object):
""""Class to support any views into the HistoryReport."""
Summary = collections.namedtuple(
"Summary",
"test task variant distro start_date end_date fail_rate num_fail num_pass")
DetailGroup = collections.namedtuple(
"DetailGroup",
"test task variant distro start_date end_date")
group_by = ["test", "task", "variant", "distro"]
group_period_values = ["daily", "weekly"]
start_days = ["first_day", "sunday", "monday"]
def __init__(self, history_report, group_period="weekly", start_day_of_week="first_day"):
self._report = history_report
self.group_period = group_period.lower()
if self.group_period not in self.group_period_values:
raise ValueError(
"Invalid group_period specified '{}'".format(self.group_period))
self.group_days = self._num_days_for_group()
self.start_day_of_week = start_day_of_week.lower()
# Using 'first_day' means the a weekly group report will start on the day of the
# week from the earliest date in the test history.
if self.start_day_of_week not in self.start_days:
raise ValueError(
"Invalid start_day_of_week specified '{}'".format(self.start_day_of_week))
if history_report:
start_dts = [r.start_dt for r in history_report]
self.start_dt = min(start_dts)
self.end_dt = max(start_dts)
else:
self.start_dt = datestr_to_date(_MAX_DATE)
self.end_dt = datestr_to_date(_MIN_DATE)
def _num_days_for_group(self):
"""Returns the number of days defined in the self.group_period."""
if self.group_period == "daily":
return 1
return 7
def _group_dates(self, test_dt):
"""Returns start_date and end_date for the group_period, which are are included
in the group_period."""
# Computing the start and end dates for a weekly period may have special cases for the
# first and last periods. Since the first period may not start on the weekday for
# self.start_day_of_week (if it's 'sunday' or 'monday'), that period may be less than 7
# days. Similarly the last period will always end on self.end_dt.
# Example, if the start_date falls on a Wednesday, then all group starting
# dates are offset from that, if self.start_day_of_week is 'first_day'.
# The start date for a 'weekly' group_period is one of the following:
# - self.start_dt (the earliest date in the report)
# - The day specified in self.start_day_of_week
# - A weekly offset from self.start_dt, if self.start_day_of_week is 'first_day'
# The ending date for a 'weekly' group_period is one of the following:
# - self.end_dt (the latest date in the report)
# - The mod of difference of weekday of test_dt and the start_weekday
if test_dt < self.start_dt or test_dt > self.end_dt:
raise ValueError("The test_dt {} must be >= {} and <= {}".format(
test_dt, self.start_dt, self.end_dt))
if self.group_period == "daily":
return (test_dt, test_dt)
if self.start_day_of_week == "sunday":
start_weekday = 6
elif self.start_day_of_week == "monday":
start_weekday = 0
elif self.start_day_of_week == "first_day":
start_weekday = self.start_dt.weekday()
# 'start_day_offset' is the number of days 'test_dt' is from the start of the week.
start_day_offset = (test_dt.weekday() - start_weekday) % 7
group_start_dt = test_dt - datetime.timedelta(days=start_day_offset)
group_end_dt = group_start_dt + datetime.timedelta(days=6)
return (max(group_start_dt, self.start_dt), min(group_end_dt, self.end_dt))
def _select_attribute(self, value, attributes):
"""Returns true if attribute value list is None or a value matches from the list of
attribute values."""
return not attributes or value in attributes
def _filter_reports(self,
start_date=_MIN_DATE,
end_date=_MAX_DATE,
tests=None,
tasks=None,
variants=None,
distros=None):
"""Returns filter of self._report."""
return [r for r in self._report
if r.start_dt >= datestr_to_date(start_date) and
r.start_dt <= datestr_to_date(end_date) and
self._select_attribute(r.test, tests) and
self._select_attribute(r.task, tasks) and
self._select_attribute(r.variant, variants) and
(r.distro is None or self._select_attribute(r.distro, distros))]
def _detail_report(self, report):
"""Returns the detailed report, which is a dictionary in the form of key tuples,
'(test, task, variant, distro, start_date, end_date)', with a value of
{num_pass, num_fail}."""
detail_report = {}
for record in report:
group_start_dt, group_end_dt = self._group_dates(record.start_dt)
detail_group = self.DetailGroup(
test=record.test,
task=record.task,
variant=record.variant,
distro=record.distro,
start_date=group_start_dt,
end_date=group_end_dt)
detail_report.setdefault(detail_group, {"num_pass": 0, "num_fail": 0})
if record.test_status == "pass":
status_key = "num_pass"
else:
status_key = "num_fail"
detail_report[detail_group][status_key] += 1
return detail_report
def _summary_report(self, report, tests=None, tasks=None, variants=None, distros=None):
"""Returns the summary report for the specifed combinations of paramters. The format
is a nametuple, with {num_pass, num_fail} based on the _detailed_report."""
summary_report = {}
if not report:
return summary_report
start_dt = min([r.start_dt for r in report])
end_dt = max([r.start_dt for r in report])
num_pass = sum([r.test_status == "pass" for r in report])
num_fail = sum([r.test_status != "pass" for r in report])
detail_group = self.DetailGroup(
test=list_or_none(tests),
task=list_or_none(tasks),
variant=list_or_none(variants),
distro=list_or_none(distros),
start_date=start_dt,
end_date=end_dt)
summary_report[detail_group] = {"num_pass": num_pass, "num_fail": num_fail}
return summary_report
def view_detail(self, tests=None, tasks=None, variants=None, distros=None):
"""Provides a detailed view of specified parameters.
The parameters are used as a filter, so an unspecified parameter provides
more results.
Returns the view as a list of namedtuples:
(test, task, variant, distro, start_date, end_date, fail_rate, num_fail, num_pass)
"""
filter_results = self._filter_reports(
tests=tests, tasks=tasks, variants=variants, distros=distros)
view_report = []
detail_report = self._detail_report(filter_results)
for detail_group in detail_report:
view_report.append(self.Summary(test=detail_group.test,
task=detail_group.task,
variant=detail_group.variant,
distro=detail_group.distro,
start_date=detail_group.start_date,
end_date=detail_group.end_date,
fail_rate=fail_rate(
detail_report[detail_group]["num_fail"],
detail_report[detail_group]["num_pass"]),
num_fail=detail_report[detail_group]["num_fail"],
num_pass=detail_report[detail_group]["num_pass"]))
return sorted(view_report)
def view_summary_groups(self, group_on=None):
"""Provides a summary view report, based on the group_on list, for each self.group_period.
If group_on is empty, then a total summary report is provided.
Returns the view as a sorted list of namedtuples:
(test, task, variant, distro, start_date, end_date, fail_rate, num_fail, num_pass)
"""
group_on = group_on if group_on is not None else []
# Discover all group_period date ranges
group_periods = set()
dt = self.start_dt
while dt <= self.end_dt:
group_periods.add(self._group_dates(dt))
dt += datetime.timedelta(days=1)
view_report = []
for (start_dt, end_dt) in group_periods:
view_report.extend(self.view_summary(group_on,
start_date=date_to_datestr(start_dt),
end_date=date_to_datestr(end_dt)))
return sorted(view_report)
def view_summary(self, group_on=None, start_date=_MIN_DATE, end_date=_MAX_DATE):
"""Provides a summary view report, based on the group_on list. If group_on is empty, then
a total summary report is provided.
Returns the view as a sorted list of namedtuples:
(test, task, variant, distro, start_date, end_date, fail_rate, num_fail, num_pass)
"""
group_on = group_on if group_on is not None else []
for group_name in group_on:
if group_name not in self.group_by:
raise ValueError("Invalid group '{}' specified, the supported groups are {}"
.format(group_name, self.group_by))
tests = list(set([r.test for r in self._report])) \
if "test" in group_on else [Missing("__all_tests")]
tasks = list(set([r.task for r in self._report])) \
if "task" in group_on else [Missing("__all_tasks")]
variants = list(set([r.variant for r in self._report])) \
if "variant" in group_on else [Missing("__all_variants")]
distros = list(set([str(r.distro) for r in self._report])) \
if "distro" in group_on else [Missing("__all_distros")]
group_lists = [tests, tasks, variants, distros]
group_combos = list(itertools.product(*group_lists))
view_report = []
for group in group_combos:
test_filter = [group[0]] if group[0] and not isinstance(group[0], Missing) else None
task_filter = [group[1]] if group[1] and not isinstance(group[1], Missing) else None
variant_filter = [group[2]] if group[2] and not isinstance(group[2], Missing) else None
distro_filter = [group[3]] if group[3] and not isinstance(group[3], Missing) else None
filter_results = self._filter_reports(start_date=start_date,
end_date=end_date,
tests=test_filter,
tasks=task_filter,
variants=variant_filter,
distros=distro_filter)
summary_report = self._summary_report(filter_results,
tests=test_filter,
tasks=task_filter,
variants=variant_filter,
distros=distro_filter)
for summary in summary_report:
view_report.append(self.Summary(test=summary.test,
task=summary.task,
variant=summary.variant,
distro=summary.distro,
start_date=summary.start_date,
end_date=summary.end_date,
fail_rate=fail_rate(
summary_report[summary]["num_fail"],
summary_report[summary]["num_pass"]),
num_fail=summary_report[summary]["num_fail"],
num_pass=summary_report[summary]["num_pass"]))
return sorted(view_report)
class HistoryReport(object):
"""The HistoryReport class interacts with the Evergreen REST API to generate a history_report.
The history_report is meant to be viewed from the ViewReport class methods."""
group_period_values = ["daily", "weekly"]
# TODO EVG-1653: Uncomment this line once the --sinceDate and --untilDate options are exposed.
# period_types | |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
from typing import Dict, List, Optional, Union
import cx_Oracle
import numpy
from airflow.hooks.dbapi import DbApiHook
PARAM_TYPES = {bool, float, int, str}
def _map_param(value):
if value in PARAM_TYPES:
# In this branch, value is a Python type; calling it produces
# an instance of the type which is understood by the Oracle driver
# in the out parameter mapping mechanism.
value = value()
return value
class OracleHook(DbApiHook):
"""
Interact with Oracle SQL.
:param oracle_conn_id: The :ref:`Oracle connection id <howto/connection:oracle>`
used for Oracle credentials.
:type oracle_conn_id: str
"""
conn_name_attr = 'oracle_conn_id'
default_conn_name = 'oracle_default'
conn_type = 'oracle'
hook_name = 'Oracle'
supports_autocommit = True
def get_conn(self) -> 'OracleHook':
"""
Returns a oracle connection object
Optional parameters for using a custom DSN connection
(instead of using a server alias from tnsnames.ora)
The dsn (data source name) is the TNS entry
(from the Oracle names server or tnsnames.ora file)
or is a string like the one returned from makedsn().
:param dsn: the data source name for the Oracle server
:param service_name: the db_unique_name of the database
that you are connecting to (CONNECT_DATA part of TNS)
:param sid: Oracle System ID that identifies a particular
database on a system
You can set these parameters in the extra fields of your connection
as in
.. code-block:: python
{
"dsn": (
"(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)"
"(HOST=host)(PORT=1521))(CONNECT_DATA=(SID=sid)))"
)
}
see more param detail in
`cx_Oracle.connect <https://cx-oracle.readthedocs.io/en/latest/module.html#cx_Oracle.connect>`_
"""
conn = self.get_connection(self.oracle_conn_id) # type: ignore[attr-defined]
conn_config = {'user': conn.login, 'password': <PASSWORD>}
sid = conn.extra_dejson.get('sid')
mod = conn.extra_dejson.get('module')
service_name = conn.extra_dejson.get('service_name')
port = conn.port if conn.port else 1521
if conn.host and sid and not service_name:
conn_config['dsn'] = cx_Oracle.makedsn(conn.host, port, sid)
elif conn.host and service_name and not sid:
conn_config['dsn'] = cx_Oracle.makedsn(conn.host, port, service_name=service_name)
else:
dsn = conn.extra_dejson.get('dsn')
if dsn is None:
dsn = conn.host
if conn.port is not None:
dsn += ":" + str(conn.port)
if service_name or conn.schema:
dsn += "/" + (service_name or conn.schema)
conn_config['dsn'] = dsn
if 'encoding' in conn.extra_dejson:
conn_config['encoding'] = conn.extra_dejson.get('encoding')
# if `encoding` is specific but `nencoding` is not
# `nencoding` should use same values as `encoding` to set encoding, inspired by
# https://github.com/oracle/python-cx_Oracle/issues/157#issuecomment-371877993
if 'nencoding' not in conn.extra_dejson:
conn_config['nencoding'] = conn.extra_dejson.get('encoding')
if 'nencoding' in conn.extra_dejson:
conn_config['nencoding'] = conn.extra_dejson.get('nencoding')
if 'threaded' in conn.extra_dejson:
conn_config['threaded'] = conn.extra_dejson.get('threaded')
if 'events' in conn.extra_dejson:
conn_config['events'] = conn.extra_dejson.get('events')
mode = conn.extra_dejson.get('mode', '').lower()
if mode == 'sysdba':
conn_config['mode'] = cx_Oracle.SYSDBA
elif mode == 'sysasm':
conn_config['mode'] = cx_Oracle.SYSASM
elif mode == 'sysoper':
conn_config['mode'] = cx_Oracle.SYSOPER
elif mode == 'sysbkp':
conn_config['mode'] = cx_Oracle.SYSBKP
elif mode == 'sysdgd':
conn_config['mode'] = cx_Oracle.SYSDGD
elif mode == 'syskmt':
conn_config['mode'] = cx_Oracle.SYSKMT
elif mode == 'sysrac':
conn_config['mode'] = cx_Oracle.SYSRAC
purity = conn.extra_dejson.get('purity', '').lower()
if purity == 'new':
conn_config['purity'] = cx_Oracle.ATTR_PURITY_NEW
elif purity == 'self':
conn_config['purity'] = cx_Oracle.ATTR_PURITY_SELF
elif purity == 'default':
conn_config['purity'] = cx_Oracle.ATTR_PURITY_DEFAULT
conn = cx_Oracle.connect(**conn_config)
if mod is not None:
conn.module = mod
return conn
def insert_rows(
self,
table: str,
rows: List[tuple],
target_fields=None,
commit_every: int = 1000,
replace: Optional[bool] = False,
**kwargs,
) -> None:
"""
A generic way to insert a set of tuples into a table,
the whole set of inserts is treated as one transaction
Changes from standard DbApiHook implementation:
- Oracle SQL queries in cx_Oracle can not be terminated with a semicolon (`;`)
- Replace NaN values with NULL using `numpy.nan_to_num` (not using
`is_nan()` because of input types error for strings)
- Coerce datetime cells to Oracle DATETIME format during insert
:param table: target Oracle table, use dot notation to target a
specific database
:type table: str
:param rows: the rows to insert into the table
:type rows: iterable of tuples
:param target_fields: the names of the columns to fill in the table
:type target_fields: iterable of str
:param commit_every: the maximum number of rows to insert in one transaction
Default 1000, Set greater than 0.
Set 1 to insert each row in each single transaction
:type commit_every: int
:param replace: Whether to replace instead of insert
:type replace: bool
"""
if target_fields:
target_fields = ', '.join(target_fields)
target_fields = f'({target_fields})'
else:
target_fields = ''
conn = self.get_conn()
if self.supports_autocommit:
self.set_autocommit(conn, False)
cur = conn.cursor() # type: ignore[attr-defined]
i = 0
for row in rows:
i += 1
lst = []
for cell in row:
if isinstance(cell, str):
lst.append("'" + str(cell).replace("'", "''") + "'")
elif cell is None:
lst.append('NULL')
elif isinstance(cell, float) and numpy.isnan(cell): # coerce numpy NaN to NULL
lst.append('NULL')
elif isinstance(cell, numpy.datetime64):
lst.append("'" + str(cell) + "'")
elif isinstance(cell, datetime):
lst.append(
"to_date('" + cell.strftime('%Y-%m-%d %H:%M:%S') + "','YYYY-MM-DD HH24:MI:SS')"
)
else:
lst.append(str(cell))
values = tuple(lst)
sql = f"INSERT /*+ APPEND */ INTO {table} {target_fields} VALUES ({','.join(values)})"
cur.execute(sql)
if i % commit_every == 0:
conn.commit() # type: ignore[attr-defined]
self.log.info('Loaded %s into %s rows so far', i, table)
conn.commit() # type: ignore[attr-defined]
cur.close()
conn.close() # type: ignore[attr-defined]
self.log.info('Done loading. Loaded a total of %s rows', i)
def bulk_insert_rows(
self,
table: str,
rows: List[tuple],
target_fields: Optional[List[str]] = None,
commit_every: int = 5000,
):
"""
A performant bulk insert for cx_Oracle
that uses prepared statements via `executemany()`.
For best performance, pass in `rows` as an iterator.
:param table: target Oracle table, use dot notation to target a
specific database
:type table: str
:param rows: the rows to insert into the table
:type rows: iterable of tuples
:param target_fields: the names of the columns to fill in the table, default None.
If None, each rows should have some order as table columns name
:type target_fields: iterable of str Or None
:param commit_every: the maximum number of rows to insert in one transaction
Default 5000. Set greater than 0. Set 1 to insert each row in each transaction
:type commit_every: int
"""
if not rows:
raise ValueError("parameter rows could not be None or empty iterable")
conn = self.get_conn()
if self.supports_autocommit:
self.set_autocommit(conn, False)
cursor = conn.cursor() # type: ignore[attr-defined]
values_base = target_fields if target_fields else rows[0]
prepared_stm = 'insert into {tablename} {columns} values ({values})'.format(
tablename=table,
columns='({})'.format(', '.join(target_fields)) if target_fields else '',
values=', '.join(':%s' % i for i in range(1, len(values_base) + 1)),
)
row_count = 0
# Chunk the rows
row_chunk = []
for row in rows:
row_chunk.append(row)
row_count += 1
if row_count % commit_every == 0:
cursor.prepare(prepared_stm)
cursor.executemany(None, row_chunk)
conn.commit() # type: ignore[attr-defined]
self.log.info('[%s] inserted %s rows', table, row_count)
# Empty chunk
row_chunk = []
# Commit the leftover chunk
cursor.prepare(prepared_stm)
cursor.executemany(None, row_chunk)
conn.commit() # type: ignore[attr-defined]
self.log.info('[%s] inserted %s rows', table, row_count)
cursor.close()
conn.close() # type: ignore[attr-defined]
def callproc(
self,
identifier: str,
autocommit: bool = False,
parameters: Optional[Union[List, Dict]] = None,
) -> Optional[Union[List, Dict]]:
"""
Call the stored procedure identified by the provided string.
Any 'OUT parameters' must be provided with a value of either the
expected Python type (e.g., `int`) or an instance of that type.
The return value is a list or mapping that includes parameters in
both directions; the actual return type depends on the type of the
provided `parameters` argument.
See
https://cx-oracle.readthedocs.io/en/latest/api_manual/cursor.html#Cursor.var
for further reference.
"""
if parameters is None:
parameters = []
args = ",".join(
f":{name}"
for name in (parameters if isinstance(parameters, dict) else range(1, len(parameters) + 1))
)
sql = f"BEGIN {identifier}({args}); END;"
def handler(cursor):
if cursor.bindvars is None:
return
if isinstance(cursor.bindvars, list):
return [v.getvalue() for v | |
industry
common aliases for these indicators.
calculation_method (bool): If true, the measurement for each sample will be calculated first. If not, the
confusion matrix for each image (the output of function '_get_confusion_matrix')
will be returned. In this way, users should achieve the confusion matrixes for all
images during an epochand then use '_compute_confusion_matrix_metric' to calculate
the metric. Default: False.
decrease (Union[DecreaseMetric, str]): ["none", "mean", "sum", "mean_batch", "sum_batch", "mean_channel",
"sum_channel"]
Define the mode to reduce the calculation result of one batch of data.
Decrease is used only if calculation_method is True. Default: "mean".
"""
def __init__(self, skip_channel=True, metric_name="hit_rate", calculation_method=False,
decrease="mean"):
super().__init__()
self.skip_channel = skip_channel
self.metric_name = metric_name
self.calculation_method = calculation_method
self.decrease = decrease
def __call__(self, y_pred, y):
"""
'y_preds' is expected to have binarized predictions and 'y' should be in one-hot format.
Args:
- **y_pred** (ndarray) - Input data to compute. It must be one-hot format and first dim is batch.
- **y** (ndarray) - Ground truth to compute the metric. It must be one-hot format and first dim is batch.
Raises:
ValueError: If `metric_name` is empty.
ValueError: when `y_pred` has less than two dimensions.
"""
if not np.all(y.astype(np.uint8) == y):
raise ValueError("The y should be a binarized ndarray.")
dims = y_pred.ndim
if dims < 2:
raise ValueError("The y_pred should have at least two dimensions.")
if dims == 2 or (dims == 3 and y_pred.shape[-1] == 1):
if self.calculation_method:
self.calculation_method = False
confusion_matrix = _get_confusion_matrix(y_pred=y_pred, y=y, skip_channel=self.skip_channel)
if self.calculation_method:
if isinstance(self.metric_name, str):
confusion_matrix = _compute_confusion_matrix_metric(self.metric_name, confusion_matrix)
chart, not_nans = _decrease_metric(confusion_matrix, self.decrease)
return chart, not_nans
if not self.metric_name:
raise ValueError("There should be at least one metric name.")
results = []
for metric_name in self.metric_name:
sub_confusion_matrix = _compute_confusion_matrix_metric(metric_name, confusion_matrix)
chart, not_nans = _decrease_metric(sub_confusion_matrix, self.decrease)
results.append(chart)
results.append(not_nans)
return results
return confusion_matrix
def _get_confusion_matrix(y_pred, y, skip_channel=True):
"""
The confusion matrix is calculated. An array of shape [BC4] is returned. The third dimension represents each channel
of each sample in the input batch.Where B is the batch size and C is the number of classes to be calculated.
Args:
y_pred (ndarray): input data to compute. It must be one-hot format and first dim is batch.
The values should be binarized.
y (ndarray): ground truth to compute the metric. It must be one-hot format and first dim is batch.
The values should be binarized.
skip_channel (bool): whether to skip metric computation on the first channel of the predicted output.
Default: True.
Raises:
ValueError: when `y_pred` and `y` have different shapes.
"""
if not skip_channel:
y = y[:, 1:] if y.shape[1] > 1 else y
y_pred = y_pred[:, 1:] if y_pred.shape[1] > 1 else y_pred
y = y.astype(float)
y_pred = y_pred.astype(float)
validator.check('y_shape', y.shape, 'y_pred_shape', y_pred.shape)
batch_size, n_class = y_pred.shape[:2]
y_pred = y_pred.reshape(batch_size, n_class, -1)
y = y.reshape(batch_size, n_class, -1)
tp = ((y_pred + y) == 2).astype(float)
tn = ((y_pred + y) == 0).astype(float)
tp = tp.sum(axis=2)
tn = tn.sum(axis=2)
p = y.sum(axis=2)
n = y.shape[-1] - p
fn = p - tp
fp = n - tn
return np.stack([tp, fp, tn, fn], axis=-1)
def _decrease_mean(not_nans, chart):
not_nans = not_nans.sum(axis=1)
chart = np.where(not_nans > 0, chart.sum(axis=1) / not_nans, np.zeros(1, dtype=float))
not_nans = (not_nans > 0).astype(float).sum(axis=0)
chart = np.where(not_nans > 0, chart.sum(axis=0) / not_nans, np.zeros(1, dtype=float))
return not_nans, chart
def _decrease_sum(not_nans, chart):
not_nans = not_nans.sum(axis=(0, 1))
chart = np.sum(chart, axis=(0, 1))
return not_nans, chart
def _decrease_mean_batch(not_nans, chart):
not_nans = not_nans.sum(axis=0)
chart = np.where(not_nans > 0, chart.sum(axis=0) / not_nans, np.zeros(1, dtype=float))
return not_nans, chart
def _decrease_sum_batch(not_nans, chart):
not_nans = not_nans.sum(axis=0)
chart = chart.sum(axis=0)
return not_nans, chart
def _decrease_mean_channel(not_nans, chart):
not_nans = not_nans.sum(axis=1)
chart = np.where(not_nans > 0, chart.sum(axis=1) / not_nans, np.zeros(1, dtype=float))
return not_nans, chart
def _decrease_sum_channel(not_nans, chart):
not_nans = not_nans.sum(axis=1)
chart = chart.sum(axis=1)
return not_nans, chart
def _decrease_none(not_nans, chart):
return not_nans, chart
def _decrease_metric(chart, decrease="mean"):
"""
This function is used to reduce the calculated metrics for each class of each example.
Args:
chart (ndarray): A data table containing the calculated measurement scores for each batch and class.
The first two dims should be batch and class.
decrease (str): Define the mode to reduce computation result of 1 batch data. Decrease will only be employed
when 'calculation_method' is True. Default: "mean".
"""
nans = np.isnan(chart)
not_nans = (~nans).astype(float)
chart[nans] = 0
decrease_dict = {"mean": _decrease_mean(not_nans, chart),
"sum": _decrease_sum(not_nans, chart),
"mean_batch": _decrease_mean_batch,
"sum_batch": _decrease_sum_batch(not_nans, chart),
"mean_channel": _decrease_mean_channel(not_nans, chart),
"sum_channel": _decrease_sum_channel(not_nans, chart),
"none": _decrease_none(not_nans, chart)}
not_nans, chart = decrease_dict.get(decrease)
return chart, not_nans
def _calculate_tpr(tp, p):
"""Calculate tpr."""
return tp, p
def _calculate_tnr(tn, n):
"""Calculate tnr."""
return tn, n
def _calculate_ppv(tp, fp):
"""Calculate ppv."""
return tp, (tp + fp)
def _calculate_npv(tn, fn):
"""Calculate npv."""
return tn, (tn + fn)
def _calculate_fnr(fn, p):
"""Calculate fnr."""
return fn, p
def _calculate_fpr(fp, n):
"""Calculate fpr."""
return fp, n
def _calculate_fdr(tp, fp):
"""Calculate fdr."""
return fp, (fp + tp)
def _calculate_for(tn, fn):
"""Calculate for."""
return fn, (fn + tn)
def _calculate_pt(tp, tn, p, n):
"""Calculate pt."""
tpr = np.where(p > 0, tp / p, np.array(float("nan")))
tnr = np.where(n > 0, tn / n, np.array(float("nan")))
numerator = np.sqrt(tpr * (1.0 - tnr)) + tnr - 1.0
denominator = tpr + tnr - 1.0
return numerator, denominator
def _calculate_ts(tp, fp, fn):
"""Calculate ts."""
return tp, (tp + fn + fp)
def _calculate_acc(tp, tn, p, n):
"""Calculate acc."""
return (tp + tn), (p + n)
def _calculate_ba(tp, tn, p, n):
"""Calculate ba."""
tpr = np.where(p > 0, tp / p, np.array(float("nan")))
tnr = np.where(n > 0, tn / n, np.array(float("nan")))
numerator, denominator = (tpr + tnr), 2.0
return numerator, denominator
def _calculate_f1(tp, fp, fn):
"""Calculate f1."""
return tp * 2.0, (tp * 2.0 + fn + fp)
def _calculate_mcc(tp, fp, tn, fn):
"""Calculate mcc."""
numerator = tp * tn - fp * fn
denominator = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
return numerator, denominator
def _calculate_fm(tp, fp, p):
"""Calculate fm."""
tpr = np.where(p > 0, tp / p, np.array(float("nan")))
ppv = np.where((tp + fp) > 0, tp / (tp + fp), np.array(float("nan")))
numerator = np.sqrt(ppv * tpr)
denominator = 1.0
return numerator, denominator
def _calculate_bm(tp, tn, p, n):
"""Calculate bm."""
tpr = np.where(p > 0, tp / p, np.array(float("nan")))
tnr = np.where(n > 0, tn / n, np.array(float("nan")))
numerator = tpr + tnr - 1.0
denominator = 1.0
return numerator, denominator
def _calculate_mk(tp, fp, tn, fn):
"""Calculate mk."""
ppv = np.where((tp + fp) > 0, tp / (tp + fp), np.array(float("nan")))
npv = np.where((tn + fn) > 0, tn / (tn + fn), np.array(float("nan")))
npv = tn / (tn + fn)
numerator = ppv + npv - 1.0
denominator = 1.0
return numerator, denominator
def _compute_confusion_matrix_metric(metric_name, confusion_matrix):
"""
This function is used to compute confusion matrix related metric.
Args:
metric_name (str): Refer to conflusionmatrixmetric 'metric_name'. Some of the metrics have multiple aliases
(as shown in the wikipedia page aforementioned), and you can also input those names instead.
confusion_matrix (ndarray): Refer to '_get_confusion_matrix'.
Raises:
ValueError: when the size of the last dimension of confusion_matrix is not 4.
NotImplementedError: when specify a not implemented metric_name.
"""
metric = _check_metric_name(metric_name)
input_dim = confusion_matrix.ndim
if input_dim == 1:
confusion_matrix = np.expand_dims(confusion_matrix, 0)
if confusion_matrix.shape[-1] != 4:
raise ValueError("The size of the last dimension of confusion_matrix should be 4.")
tp = confusion_matrix[..., 0]
fp = confusion_matrix[..., 1]
tn = confusion_matrix[..., 2]
fn = confusion_matrix[..., 3]
p = tp + fn
n = fp + tn
metric_name_dict = {"tpr": _calculate_tpr(tp, p),
"tnr": _calculate_tnr(tn, n),
"ppv": _calculate_ppv(tp, fp),
"npv": _calculate_npv(tn, fn),
"fnr": _calculate_fnr(fn, p),
"fpr": _calculate_fpr(fp, n),
"fdr": _calculate_fdr(tp, fp),
"for": _calculate_for(tn, fn),
"pt": _calculate_pt(tp, tn, p, n),
"ts": _calculate_ts(tp, fp, fn),
"acc": _calculate_acc(tp, tn, p, n),
"ba": _calculate_ba(tp, tn, p, n),
"f1": _calculate_f1(tp, fp, fn),
"mcc": _calculate_mcc(tp, fp, tn, fn),
"fm": _calculate_fm(tp, fp, p),
"bm": _calculate_bm(tp, tn, p, n),
"mk": _calculate_mk(tp, fp, tn, fn)}
numerator, denominator = metric_name_dict.get(metric)
if isinstance(denominator, np.ndarray):
result = np.where(denominator != 0, numerator / denominator, np.array(float("nan")))
else:
result = numerator / denominator
return result
def _check_metric_name(metric_name):
"""
There are many metrics related | |
generate the ortho-mosaic from.
The image_collection can be a portal Item or an image service URL or a URI
The image_collection must exist.
----------------------------------- --------------------------------------------------------------------
out_ortho Required. This is the ortho-mosaicked image converted from the image
collection after the block adjustment.
It can be a url, uri, portal item, or string representing the name of output dem
(either existing or to be created.)
Like Raster Analysis services, the service can be an existing multi-tenant service URL.
----------------------------------- --------------------------------------------------------------------
regen_seamlines Optional, boolean.
Choose whether to apply seamlines before the orthomosaic image generation or not.
The seamlines will always be regenerated if this parameter is set to True.
The user can set the seamline options through the context parameter.
If the seamline generation options are not set, the default will be used.
Default value is True
----------------------------------- --------------------------------------------------------------------
recompute_color_correction Optional, boolean.
Choose whether to apply color correction settings to the output ortho-image or not.
Color correction will always be recomputed if this option is set to True.
The user can configure the compute color correction settings through the context parameter.
If there is no color collection setting, the default will be used.
Default value is True
----------------------------------- --------------------------------------------------------------------
context Optional dictionary. Context contains additional environment settings that affect output
image. The supported environment settings for this tool are:
1. Output Spatial Reference (outSR)-the output features will
be projected into the output spatial reference.
2. Extent (extent) - extent that would clip or expand the output image
----------------------------------- --------------------------------------------------------------------
gis Optional GIS. The GIS on which this tool runs. If not specified, the active GIS is used.
=================================== ====================================================================
:return:
The Orthomosaicked Imagery layer item
'''
gis = arcgis.env.active_gis if gis is None else gis
task = 'GenerateOrthomosaic'
params = {}
folder = None
folderId = None
_set_image_collection_param(gis, params, image_collection)
if isinstance(out_ortho, Item):
params["outputOrthoImage"] = json.dumps({"itemId": out_ortho.itemid})
elif isinstance(out_ortho, str):
if ("/") in out_ortho or ("\\") in out_ortho:
if 'http:' in out_ortho or 'https:' in out_ortho:
params['outputOrthoImage'] = json.dumps({ 'url' : out_ortho })
else:
params['outputOrthoImage'] = json.dumps({ 'uri' : out_ortho })
else:
result = gis.content.search("title:"+str(out_ortho), item_type = "Imagery Layer")
out_ortho_result = None
for element in result:
if str(out_ortho) == element.title:
out_ortho_result = element
if out_ortho_result is not None:
params["outputOrthoImage"]= json.dumps({"itemId": out_ortho_result.itemid})
else:
doesnotexist = gis.content.is_service_name_available(out_ortho, "Image Service")
if doesnotexist:
if kwargs is not None:
if "folder" in kwargs:
folder = kwargs["folder"]
if folder is not None:
if isinstance(folder, dict):
if "id" in folder:
folderId = folder["id"]
folder=folder["title"]
else:
owner = gis.properties.user.username
folderId = gis._portal.get_folder_id(owner, folder)
if folderId is None:
folder_dict = gis.content.create_folder(folder, owner)
folder = folder_dict["title"]
folderId = folder_dict["id"]
params["outputOrthoImage"] = json.dumps({"serviceProperties": {"name" : out_ortho}, "itemProperties": {"folderId" : folderId}})
else:
params["outputOrthoImage"] = json.dumps({"serviceProperties": {"name" : out_ortho}})
if regen_seamlines is not None:
if not isinstance(regen_seamlines, bool):
raise TypeError("The 'regen_seamlines' parameter must be a boolean")
params['regenSeamlines'] = regen_seamlines
if recompute_color_correction is not None:
if not isinstance(recompute_color_correction, bool):
raise TypeError("The 'recompute_color_correction' parameter must be a boolean")
params['recomputeColorCorrection'] = recompute_color_correction
_set_context(params, context)
job_values = _execute_task(gis, task, params)
output_service= gis.content.get(job_values["result"]["itemId"])
return output_service
###################################################################################################
## Generate report
###################################################################################################
def generate_report(image_collection, report_format="PDF", *, gis=None, **kwargs):
"""
This function is used to generate orthomapping report with image collection
that has been block adjusted. The report would contain information about
the quality of the adjusted images, the distribution of the control points, etc.
The output of this service tool is a downloadable html page.
=================== ====================================================================
**Argument** **Description**
------------------- --------------------------------------------------------------------
image_collection Required. the input image collection that should be
used to generate a report from.
The image_collection can be a portal Item or an image service URL or a URI
The image_collection must exist.
------------------- --------------------------------------------------------------------
report_format Type of the format to be generated. Possible PDF, HTML. Default - PDF
------------------- --------------------------------------------------------------------
gis Optional GIS. The GIS on which this tool runs. If not specified, the active GIS is used.
=================== ====================================================================
:return:
The URL of a single html webpage that is a formatted orthomapping report
"""
gis = arcgis.env.active_gis if gis is None else gis
params = {}
_set_image_collection_param(gis, params, image_collection)
report_format_allowed_values = ['PDF', 'HTML']
if [element.lower() for element in report_format_allowed_values].count(report_format.lower()) <= 0 :
raise RuntimeError('report_format can only be one of the following: '+ str(report_format_allowed_values))
for element in report_format_allowed_values:
if report_format.lower() == element.lower():
params["reportFormat"]=element
task = 'GenerateReport'
job_values = _execute_task(gis, task, params)
return job_values["outReport"]["url"]
###################################################################################################
## query camera info
###################################################################################################
def query_camera_info(camera_query=None,
*,
gis=None,
**kwargs):
'''
This service tool is used to query specific or the entire digital camera
database. The digital camera database contains the specs
of digital camera sensors that were used to capture drone images.
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
camera_query Required String. This is a SQL query statement that can
be used to filter a portion of the digital camera
database.
Digital camera database can be queried using the fields Make, Model,
Focallength, Columns, Rows, PixelSize.
Eg. "Make='Rollei' and Model='RCP-8325'"
------------------ --------------------------------------------------------------------
gis Optional GIS. The GIS on which this tool runs. If not specified, the active GIS is used.
================== ====================================================================
:return:
Data Frame representing the camera database
'''
import pandas as pd
import numpy as np
gis = arcgis.env.active_gis if gis is None else gis
params = {}
if camera_query is not None:
if not isinstance(camera_query, str):
raise TypeError("The 'camera_query' parameter must be a string")
params['query'] = camera_query
task = 'QueryCameraInfo'
job_values = _execute_task(gis, task, params)
pd.set_option('display.max_rows', None)
df = pd.DataFrame(np.array(job_values["outputCameraInfo"]["content"]),columns = job_values["outputCameraInfo"]["schema"])
return df
###################################################################################################
## query control points
###################################################################################################
def query_control_points(image_collection,
query,
*,
gis=None,
**kwargs):
'''
Query for control points in an image collection. It allows users to query
among certain control point sets that has ground control points inside.
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
image_collection Required, the input image collection on which to query
the the control points.
The image_collection can be a portal Item or an image service URL or a URI.
The image_collection must exist.
------------------ --------------------------------------------------------------------
query Required string. a SQL statement used for querying the point;
e.g. "pointID > 100"
------------------ --------------------------------------------------------------------
gis Optional GIS. The GIS on which this tool runs. If not specified, the active GIS is used.
================== ====================================================================
:return:
A dictionary object
'''
gis = arcgis.env.active_gis if gis is None else gis
params = {}
_set_image_collection_param(gis, params, image_collection)
if not isinstance(query, str):
raise TypeError("The 'query' parameter must be a string")
params['where'] = query
task = 'QueryControlPoints'
job_values = _execute_task(gis, task, params)
if job_values["outControlPoints"] is not None:
gptool_url = gis.properties.helperServices.orthoMapping.url
gptool = arcgis.gis._GISResource(gptool_url, gis)
result = gptool._con.post(job_values["outControlPoints"]["url"],{},token=gptool._token)
else:
return job_values["outControlPoints"]
return result
###################################################################################################
## Reset image collection
###################################################################################################
def reset_image_collection(image_collection,
*,
gis=None,
**kwargs):
'''
Reset the image collection. It is used to reset the image collection to its
original state. The image collection could be adjusted during the orthomapping
workflow and if the user is not satisfied with the result, they will be able
to clear any existing adjustment settings and revert the images back to
un-adjusted state
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
image_collection Required, the input image collection to reset
The image_collection can be a portal Item or an image service URL or a URI.
The image_collection must exist.
------------------ --------------------------------------------------------------------
gis Optional GIS. The GIS on which this tool runs. If not specified, the active GIS is used.
================== ====================================================================
:return:
A boolean indicating whether the reset was successful or not
'''
gis = arcgis.env.active_gis if gis is None else gis
params = {}
_set_image_collection_param(gis, params, image_collection)
task = 'ResetImageCollection'
job_values = _execute_task(gis, task, params)
return job_values["result"]
def compute_spatial_reference_factory_code(latitude, longitude):
"""
Computes spatial reference factory code. This value may be used as out_sr value in create image collection function
Parameters
----------
latitude : latitude value in decimal degress that will be used to compute UTM zone
| |
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
import os
import pickle
import pipes
import platform
import re
import shutil
import tempfile
import libcxx.test.format
import lit
import lit.LitConfig
import lit.Test
import lit.TestRunner
import lit.util
class ConfigurationError(Exception):
pass
class ConfigurationCompilationError(ConfigurationError):
pass
class ConfigurationRuntimeError(ConfigurationError):
pass
def _memoizeExpensiveOperation(extractCacheKey):
"""
Allows memoizing a very expensive operation.
We pickle the cache key to make sure we store an immutable representation
of it. If we stored an object and the object was referenced elsewhere, it
could be changed from under our feet, which would break the cache.
We also store the cache for a given function persistently across invocations
of Lit. This dramatically speeds up the configuration of the test suite when
invoking Lit repeatedly, which is important for developer workflow. However,
with the current implementation that does not synchronize updates to the
persistent cache, this also means that one should not call a memoized
operation from multiple threads. This should normally not be a problem
since Lit configuration is single-threaded.
"""
def decorator(function):
def f(config, *args, **kwargs):
cacheRoot = os.path.join(config.test_exec_root, '__config_cache__')
persistentCache = os.path.join(cacheRoot, function.__name__)
if not os.path.exists(cacheRoot):
os.makedirs(cacheRoot)
cache = {}
# Load a cache from a previous Lit invocation if there is one.
if os.path.exists(persistentCache):
with open(persistentCache, 'rb') as cacheFile:
cache = pickle.load(cacheFile)
cacheKey = pickle.dumps(extractCacheKey(config, *args, **kwargs))
if cacheKey not in cache:
cache[cacheKey] = function(config, *args, **kwargs)
# Update the persistent cache so it knows about the new key
with open(persistentCache, 'wb') as cacheFile:
pickle.dump(cache, cacheFile)
return cache[cacheKey]
return f
return decorator
def _executeScriptInternal(test, commands):
"""
Returns (stdout, stderr, exitCode, timeoutInfo)
TODO: This really should be easier to access from Lit itself
"""
parsedCommands = libcxx.test.format.parseScript(test, preamble=commands)
litConfig = lit.LitConfig.LitConfig(
progname='lit',
path=[],
quiet=False,
useValgrind=False,
valgrindLeakCheck=False,
valgrindArgs=[],
noExecute=False,
debug=False,
isWindows=platform.system() == 'Windows',
params={})
_, tmpBase = libcxx.test.format._getTempPaths(test)
execDir = os.path.dirname(test.getExecPath())
res = lit.TestRunner.executeScriptInternal(test, litConfig, tmpBase, parsedCommands, execDir)
if isinstance(res, lit.Test.Result): # Handle failure to parse the Lit test
res = ('', res.output, 127, None)
(out, err, exitCode, timeoutInfo) = res
# TODO: As a temporary workaround until https://reviews.llvm.org/D81892 lands, manually
# split any stderr output that is included in stdout. It shouldn't be there, but
# the Lit internal shell conflates stderr and stdout.
conflatedErrorOutput = re.search("(# command stderr:.+$)", out, flags=re.DOTALL)
if conflatedErrorOutput:
conflatedErrorOutput = conflatedErrorOutput.group(0)
out = out[:-len(conflatedErrorOutput)]
err += conflatedErrorOutput
return (out, err, exitCode, timeoutInfo)
def _makeConfigTest(config):
# Make sure the support directories exist, which is needed to create
# the temporary file %t below.
sourceRoot = os.path.join(config.test_exec_root, '__config_src__')
execRoot = os.path.join(config.test_exec_root, '__config_exec__')
for supportDir in (sourceRoot, execRoot):
if not os.path.exists(supportDir):
os.makedirs(supportDir)
# Create a dummy test suite and single dummy test inside it. As part of
# the Lit configuration, automatically do the equivalent of 'mkdir %T'
# and 'rm -r %T' to avoid cluttering the build directory.
suite = lit.Test.TestSuite('__config__', sourceRoot, execRoot, config)
tmp = tempfile.NamedTemporaryFile(dir=sourceRoot, delete=False, suffix='.cpp')
tmp.close()
pathInSuite = [os.path.relpath(tmp.name, sourceRoot)]
class TestWrapper(lit.Test.Test):
def __enter__(self):
testDir, _ = libcxx.test.format._getTempPaths(self)
os.makedirs(testDir)
return self
def __exit__(self, *args):
testDir, _ = libcxx.test.format._getTempPaths(self)
shutil.rmtree(testDir)
os.remove(tmp.name)
return TestWrapper(suite, pathInSuite, config)
@_memoizeExpensiveOperation(lambda c, s, f=[]: (c.substitutions, c.environment, s, f))
def sourceBuilds(config, source, additionalFlags=[]):
"""
Return whether the program in the given string builds successfully.
This is done by compiling and linking a program that consists of the given
source with the %{cxx} substitution, and seeing whether that succeeds. If
any additional flags are passed, they are appended to the compiler invocation.
"""
with _makeConfigTest(config) as test:
with open(test.getSourcePath(), 'w') as sourceFile:
sourceFile.write(source)
_, _, exitCode, _ = _executeScriptInternal(test, ['%{{build}} {}'.format(' '.join(additionalFlags))])
return exitCode == 0
@_memoizeExpensiveOperation(lambda c, p, args=None: (c.substitutions, c.environment, p, args))
def programOutput(config, program, args=None):
"""
Compiles a program for the test target, run it on the test target and return
the output.
Note that execution of the program is done through the %{exec} substitution,
which means that the program may be run on a remote host depending on what
%{exec} does.
"""
if args is None:
args = []
with _makeConfigTest(config) as test:
with open(test.getSourcePath(), 'w') as source:
source.write(program)
_, err, exitCode, _ = _executeScriptInternal(test, ['%{build}'])
if exitCode != 0:
raise ConfigurationCompilationError("Failed to build program, stderr is:\n{}".format(err))
out, err, exitCode, _ = _executeScriptInternal(test, ["%{{run}} {}".format(' '.join(args))])
if exitCode != 0:
raise ConfigurationRuntimeError("Failed to run program, stderr is:\n{}".format(err))
actualOut = re.search("# command output:\n(.+)\n$", out, flags=re.DOTALL)
actualOut = actualOut.group(1) if actualOut else ""
return actualOut
@_memoizeExpensiveOperation(lambda c, p, args=None: (c.substitutions, c.environment, p, args))
def programSucceeds(config, program, args=None):
"""
Compiles a program for the test target, run it on the test target and return
whether it completed successfully.
Note that execution of the program is done through the %{exec} substitution,
which means that the program may be run on a remote host depending on what
%{exec} does.
"""
try:
programOutput(config, program, args)
except ConfigurationRuntimeError:
return False
return True
@_memoizeExpensiveOperation(lambda c, f: (c.substitutions, c.environment, f))
def hasCompileFlag(config, flag):
"""
Return whether the compiler in the configuration supports a given compiler flag.
This is done by executing the %{cxx} substitution with the given flag and
checking whether that succeeds.
"""
with _makeConfigTest(config) as test:
out, err, exitCode, timeoutInfo = _executeScriptInternal(test, [
"%{{cxx}} -xc++ {} -Werror -fsyntax-only %{{flags}} %{{compile_flags}} {}".format(os.devnull, flag)
])
return exitCode == 0
@_memoizeExpensiveOperation(lambda c, s: (c.substitutions, c.environment, s))
def runScriptExitCode(config, script):
"""
Runs the given script as a Lit test, and returns the exit code of the execution.
The script must be a list of commands, each of which being something that
could appear on the right-hand-side of a `RUN:` keyword.
"""
with _makeConfigTest(config) as test:
_, _, exitCode, _ = _executeScriptInternal(test, script)
return exitCode
@_memoizeExpensiveOperation(lambda c, s: (c.substitutions, c.environment, s))
def commandOutput(config, command):
"""
Runs the given script as a Lit test, and returns the output.
If the exit code isn't 0 an exception is raised.
The script must be a list of commands, each of which being something that
could appear on the right-hand-side of a `RUN:` keyword.
"""
with _makeConfigTest(config) as test:
out, _, exitCode, _ = _executeScriptInternal(test, command)
if exitCode != 0:
raise ConfigurationRuntimeError()
return out
@_memoizeExpensiveOperation(lambda c, l: (c.substitutions, c.environment, l))
def hasAnyLocale(config, locales):
"""
Return whether the runtime execution environment supports a given locale.
Different systems may use different names for a locale, so this function checks
whether any of the passed locale names is supported by setlocale() and returns
true if one of them works.
This is done by executing a program that tries to set the given locale using
%{exec} -- this means that the command may be executed on a remote host
depending on the %{exec} substitution.
"""
program = """
#include <stddef.h>
#if defined(_LIBCPP_HAS_NO_LOCALIZATION)
int main(int, char**) { return 1; }
#else
#include <locale.h>
int main(int argc, char** argv) {
for (int i = 1; i < argc; i++) {
if (::setlocale(LC_ALL, argv[i]) != NULL) {
return 0;
}
}
return 1;
}
#endif
"""
return programSucceeds(config, program, args=[pipes.quote(l) for l in locales])
@_memoizeExpensiveOperation(lambda c, flags='': (c.substitutions, c.environment, flags))
def compilerMacros(config, flags=''):
"""
Return a dictionary of predefined compiler macros.
The keys are strings representing macros, and the values are strings
representing what each macro is defined to.
If the optional `flags` argument (a string) is provided, these flags will
be added to the compiler invocation when generating the macros.
"""
with _makeConfigTest(config) as test:
with open(test.getSourcePath(), 'w') as sourceFile:
# Make sure files like <__config> are included, since they can define
# additional macros.
sourceFile.write("#include <stddef.h>")
unparsedOutput, err, exitCode, _ = _executeScriptInternal(test, [
"%{{cxx}} %s -dM -E %{{flags}} %{{compile_flags}} {}".format(flags)
])
if exitCode != 0:
raise ConfigurationCompilationError("Failed to retrieve compiler macros, stderr is:\n{}".format(err))
parsedMacros = dict()
defines = (l.strip() for l in unparsedOutput.split('\n') if l.startswith('#define '))
for line in defines:
line = line[len('#define '):]
macro, _, value = line.partition(' ')
parsedMacros[macro] = value
return parsedMacros
def featureTestMacros(config, flags=''):
"""
Return a dictionary of feature test macros.
The keys are strings representing feature test macros, and the values are
integers representing the value | |
<filename>NavControl3.py
# -*- coding: utf-8 -*-
"""
Created on Sat May 28 08:53:18 2016
@author: mtkessel
"""
import sys
import PyQt4.QtCore as QtCore
import PyQt4.QtGui as QtGui
import PyQt4.QtOpenGL as QtOpenGL
import ctypes
import numpy
import math
from math import pi, sin, cos # convenience
import time
import llap
import serial
import serialPorts
class NavigationThread(QtCore.QThread):
def __init__(self, navigator, form = None):
QtCore.QThread.__init__(self)
self.navigator = navigator
self.form = form
self.devicePort = None
self.device = None
self.on = False
def __del__(self):
self.wait()
def setPort(self, port):
self.devicePort = port
def setOn(self):
self.on = True
def setOff(self):
self.on = False
def displayMessage(self,message,deviceId=None):
"""
"""
# Only display messages if there is a form to which we can send the messages
if (self.form != None):
self.form.displayMessage(message,deviceId)
def run(self):
# Wait until we see that the port path is available
waiting = True
connected = False
count = 1
while (waiting and (self.on == True)):
try:
# We don't really care about the actual communication
# rate because we just want to know if the serial
# path is available. However, we will set the baud
# rate to 115200 based on the expected spec for our device function.
# The timeout value applies to any commands we might
# choose to send in the future.
self.device = llap.LLAP(deviceId='--',port=self.devicePort,baudrate=115200,timeout=1.0,xonxoff=True,rtscts=True,dsrdtr=True)
self.device.flush()
self.device.start()
waiting = False
except (OSError, serial.SerialException):
# Don't pass the exception to the caller
# Display a simple message
if (self.form != None):
self.form.displayMessage(str(count) + ": Waiting for " + self.devicePort)
# Wait a short time then keep looping until the exception stops
time.sleep(1.0)
count = count + 1
print("Port Wait Complete")
if (self.on == True):
try:
print("Waiting for STARTED");
message, deviceId = self.device.waitFor("STARTED", timeout = 2.0, displayAll = True)
if (message == None):
self.device.send("ACK")
message, deviceId = self.device.waitFor("ACK", timeout = 2.0, displayAll = True)
if (message == None):
raise TimeoutError('Timeout Waiting for STARTED')
self.displayMessage(message, deviceId)
self.device.changeDeviceId('AA')
self.device.send("HELLO")
message, deviceId = self.device.waitFor("HELLO")
if (message == None):
raise TimeoutError('Timeout Waiting for HELLO')
self.displayMessage(message, deviceId)
self.device.send("INTVL010T")
self.device.send("CYCLE")
connected = True
self.displayMessage("RUNNING")
except (TimeoutError, KeyboardInterrupt) as error:
print(str(error))
connected = False
if (self.form != None):
self.form.displayMessage(str(error))
updateDelta = 1.0/12.0
nextUpdateTime = time.time() + updateDelta
while (connected and (self.on == True)):
try:
message, deviceId = self.device.get(timeout_sec = 0.100)
if (deviceId == 'PP'):
c = message[4:].split(',',13);
self.navigator.updateImu(c)
if (time.time() >= nextUpdateTime):
self.displayMessage(message, deviceId)
heading = "{:.1f}".format(self.navigator.heading)
roll = "{:.1f}".format(self.navigator.roll * 180/pi)
pitch = "{:.1f}".format(self.navigator.pitch * 180/pi)
yaw = "{:.1f}".format(self.navigator.yaw * 180/pi)
self.displayMessage("HEADING = " + heading + " (" + roll + ", " + pitch + ", " + yaw + ")")
nextUpdateTime += updateDelta
except (BaseException) as error:
print(str(error))
self.displayMessage(str(error))
print("Connection Loop Exited")
self.device.send("STOP")
self.device.stop()
self.device.close()
if (self.form != None):
self.form.clearStatus()
class Navigator():
def __init__(self, form = None):
self.form = form
self.thread = NavigationThread(self, self.form)
# If a form exists, then let the form know
# which navigoator object is bound to it
if (self.form != None):
self.form.bindNavigator(self)
# self.time.gmt = 0
#
# self.position.latitude = 0
# self.position.longitude = 0
#
# self.velocity.magnitude = 0
#
self.ax = 0.0
self.ay = 0.0
self.az = 0.0
self.am = 0.0
self.gx = 0.0
self.gy = 0.0
self.gz = 0.0
self.gm = 0.0
self.mx = 0.0
self.my = 0.0
self.mz = 0.0
self.mm = 0.0
self.LPPsi = 0.0 # for lowpass filter
self.roll = 0.0 #radians
self.pitch = 0.0
self.yaw = 0.0
self.heading = 0.0 # degrees
def start(self, port):
self.thread.setPort(port)
self.thread.setOn()
self.thread.start()
def stop(self):
self.thread.setOff()
def updateImu(self, c):
self.ax = float(c[3])
self.ay = float(c[4])
self.az = float(c[5])
self.am = math.sqrt(self.ax*self.ax + self.ay*self.ay + self.az*self.az)
self.gx = float(c[6])
self.gy = float(c[7])
self.gz = float(c[8])
self.gm = math.sqrt(self.gx*self.gx + self.gy*self.gy + self.gz*self.gz)
self.mx = float(c[9])
self.my = float(c[10])
self.mz = -float(c[11]) # hardware spec indicates mz is opposite of others
self.mm = math.sqrt(self.mx*self.mx + self.my*self.my + self.mz*self.mz)
ax = self.ax / self.am
ay = self.ay / self.am
az = self.az / self.am
mx = self.mx / self.mm
my = self.my / self.mm
mz = self.mz / self.mm
phi = math.atan2(ay, az) # Roll angle
hyp = math.sqrt(ay*ay + az*az) # hypotenuse
sphi = ay / hyp # sin of roll angle with correct sign
cphi = az / hyp # cos of roll angle with correct sign
# rotate magnetic and acceleration components by roll angle (phi)
bfy = my * cphi - mz * sphi
bpz = my * sphi + mz * cphi
gpz = ay * sphi + az * cphi
# compute pitch angle (theta), restricted to +/- 90 degrees
theta = math.atan2(ax, gpz)
pi2 = math.pi/2.0
if (theta > pi2):
theta = math.pi - theta
if (theta < -pi2):
theta = -math.pi - theta
# sin and cos of pitch angle
hyp = math.sqrt(ax*ax + gpz*gpz)
stheta = - az / hyp
ctheta = gpz / hyp
# keept pitch in +/- 90 degree range
if (ctheta < 0):
ctheta = -ctheta
# rotate by pitch angle (theta)
bfx = mx * ctheta + bpz * stheta
bfz = -mx * stheta + bpz * ctheta
# current yaw = e-compass angle
psi = math.atan2(-bfy, bfx)
# NOTE: This low pass filter is not really working... need to think about it
# # low pass filter, set up for modulo on 360 degrees
# temp = psi - self.LPPsi
# if (temp > math.pi):
# temp -= 2*math.pi
#
# if (temp < -math.pi):
# temp += 2*math.pi
#
# temp = self.LPPsi + temp / 10
#
# if (temp > math.pi):
# temp -= 2*math.pi
#
# if (temp < -math.pi):
# temp += 2*math.pi
#
# self.LPPsi = temp
dtr = math.pi / 180.0
# update pointing information
self.roll = phi
self.pitch = theta
self.yaw = psi
self.heading = -psi / dtr
class NavForm(QtGui.QWidget):
def __init__(self, parent=None):
super(NavForm, self).__init__(parent)
self.navigator = None
self.resize(500, 750)
self.move(1400, 300)
self.setWindowTitle('Navigation Control')
self.onOffButton = QtGui.QPushButton(self)
self.onOffButton.setText("POWER")
self.onOffButton.setStyleSheet("color: gray")
self.onOffButton.setCheckable(True)
self.onOffButton.setDefault(False)
self.onOffButton.setAutoDefault(False)
self.onOffButton.setGeometry(400,0,100,50)
#self.onOffButton.clicked.connect(lambda:self.whichbtn(self.onOffButton))
self.onOffButton.clicked.connect(self.btnstate)
self.portSelectionBox = QtGui.QComboBox(self)
s = serialPorts.listPorts()
self.portSelectionBox.addItems(s)
self.counter = 0;
self.counterText = QtGui.QLabel(self)
self.counterText.setGeometry(300,0,100,30)
self.counterText.setText(str(self.counter))
self.statusText = [QtGui.QLabel(self),
QtGui.QLabel(self),
QtGui.QLabel(self),
QtGui.QLabel(self),
QtGui.QLabel(self),
QtGui.QLabel(self),
QtGui.QLabel(self)]
for i in range(len(self.statusText)):
self.statusText[i].setGeometry(0, 60 + (30*i),500,30)
self.statusText[i].setText("")
self.statusText[0].setText("OFF")
# def whichbtn(self,b):
# print("clicked button is "+b.text())
def clearStatus(self):
for i in range(len(self.statusText)):
self.statusText[i].setText("")
self.statusText[0].setText("OFF")
def btnstate(self):
if self.onOffButton.isChecked():
self.onOffButton.setDefault(True)
self.onOffButton.setStyleSheet("color: green")
if (self.navigator != None):
self.navigator.start(self.portSelectionBox.currentText())
self.counter = self.counter + 1
self.counterText.setText(str(self.counter))
else:
self.onOffButton.setDefault(False)
self.onOffButton.setStyleSheet("color: gray")
if (self.navigator != None):
self.navigator.stop()
def bindNavigator(self, navigator):
self.navigator = navigator
def displayMessage(self,message,deviceId=None):
"""
"""
if (message != None):
if (deviceId != None):
if (deviceId == "AA"):
self.statusText[1].setText(deviceId[:2] + ": " + message)
elif (deviceId == "PP"):
self.statusText[2].setText(deviceId[:2] + ": " + message)
elif (deviceId == "GP"):
self.statusText[3].setText(deviceId[:2] + ": " + message)
elif (deviceId == "HH"):
self.statusText[4].setText(deviceId[:2] + ": " + message)
elif (deviceId == "AX"):
self.statusText[5].setText(deviceId[:2] + ": " + message)
else:
self.statusText[0].setText(message)
def main():
# We need a navigator to attach to the sensor and accumulate data
# and we also need form in which to display the data
app = QtGui.QApplication(sys.argv) # All Qt applications need this
app.setStyle('motif') # A pleasing non-branded style sheet
# We instantiate a form in which to display data
# so we can pass it to the navigator; we do it in
# this order so we can (later) have a navigator that
# can run without the form (remember the display form
# is just to the developer/operator visibility into
# performance of the navigator, but in final applications
# it won't really be needed or we will want to display
# things | |
<filename>conda_build/convert.py
# (c) 2012-2017 Continuum Analytics, Inc. / http://continuum.io
# All Rights Reserved
#
# conda is distributed under the terms of the BSD 3-clause license.
# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
"""
Tools for converting conda packages
"""
import glob
import json
import hashlib
import os
import re
import shutil
import sys
import tarfile
import tempfile
from conda_build.utils import filter_info_files, walk
def retrieve_c_extensions(file_path, show_imports=False):
"""Check tarfile for compiled C files with '.pyd' or '.so' suffixes.
If a file ends in either .pyd or .so, it is a compiled C file.
Because compiled C code varies between platforms, it is not possible
to convert packages containing C extensions to other platforms.
Positional arguments:
file_path (str) -- the file path to the source package tar file
Keyword arguments:
show_imports (bool) -- output the C extensions included in the package
"""
c_extension_pattern = re.compile(
r'(Lib\/|lib\/python\d\.\d\/|lib\/)(site-packages\/|lib-dynload)?(.*)')
imports = []
with tarfile.open(file_path) as tar:
for filename in tar.getnames():
if filename.endswith(('.pyd', '.so')):
filename_match = c_extension_pattern.match(filename)
import_name = 'import {}' .format(filename_match.group(3).replace('/', '.'))
imports.append(import_name)
return imports
def retrieve_package_platform(file_path):
"""Retrieve the platform and architecture of the source package.
Positional arguments:
file_path (str) -- the file path to the source package tar file
"""
with tarfile.open(file_path) as tar:
index = json.loads(tar.extractfile('info/index.json').read().decode('utf-8'))
platform = index['platform']
if index.get('arch') == 'x86_64':
architecture = '64'
elif index.get('arch') == 'x86':
architecture = '32'
else:
architecture = index.get('arch')
if platform.startswith('linux') or platform.startswith('osx'):
return ('unix', platform, architecture)
elif index['platform'].startswith('win'):
return ('win', platform, architecture)
else:
raise RuntimeError('Package platform not recognized.')
def retrieve_python_version(file_path):
"""Retrieve the python version from a path.
This function is overloaded to handle three separate cases:
when a path is a tar archive member path such as 'lib/python3.6/site-packages',
when a path is the file path to the source package tar file, and when a path
is the path to the temporary directory that contains the extracted contents
of the source package tar file. This allows one function to handle the three
most common cases of retrieving the python version from the source package.
Positional arguments:
file_path (str) -- the file path to a tar archive member, the file path
to the source tar file itself, or the file path to the
temporary directory containing the extracted source package contents
"""
if 'python' in file_path:
pattern = re.compile(r'python\d\.\d')
matched = pattern.search(file_path)
if matched:
return matched.group(0)
else:
if file_path.endswith(('.tar.bz2', '.tar')):
with tarfile.open(file_path) as tar:
index = json.loads(tar.extractfile('info/index.json').read().decode('utf-8'))
else:
path_file = os.path.join(file_path, 'info/index.json')
with open(path_file) as index_file:
index = json.load(index_file)
build_version_number = re.search('(.*)?(py)(\d\d)(.*)?', index['build']).group(3)
build_version = re.sub('\A.*py\d\d.*\Z', 'python', index['build'])
return '{}{}.{}' .format(build_version,
build_version_number[0], build_version_number[1])
def extract_temporary_directory(file_path):
"""Extract the source tar archive contents to a temporary directory.
Positional arguments:
file_path (str) -- the file path to the source package tar file
"""
temporary_directory = tempfile.mkdtemp()
source = tarfile.open(file_path)
source.extractall(temporary_directory)
source.close()
return temporary_directory
def update_dependencies(new_dependencies, existing_dependencies):
"""Update the source package's existing dependencies.
When a user passes additional dependencies from the command line,
these dependencies will be added to the source package's existing dependencies.
If the dependencies passed from the command line are existing dependencies,
these existing dependencies are overwritten.
Positional arguments:
new_dependencies (List[str]) -- the dependencies passed from the command line
existing_dependencies (List[str]) -- the dependencies found in the source
package's index.json file
"""
# split dependencies away from their version numbers since we need the names
# in order to evaluate duplication
dependency_names = set(dependency.split()[0] for dependency in new_dependencies)
index_dependency_names = set(index.split()[0] for index in existing_dependencies)
repeated_packages = index_dependency_names.intersection(dependency_names)
if len(repeated_packages) > 0:
for index_dependency in existing_dependencies:
for dependency in repeated_packages:
if index_dependency.startswith(dependency):
existing_dependencies.remove(index_dependency)
existing_dependencies.extend(new_dependencies)
return existing_dependencies
def update_index_file(temp_dir, target_platform, dependencies, verbose):
"""Update the source package's index file with the target platform's information.
Positional arguments:
temp_dir (str) -- the file path to the temporary directory that contains
the source package's extracted contents
target_platform (str) -- the target platform and architecture in
the form of platform-architecture such as linux-64
dependencies (List[str]) -- the dependencies passed from the command line
verbose (bool) -- show output of items that are updated
"""
index_file = os.path.join(temp_dir, 'info/index.json')
with open(index_file) as file:
index = json.load(file)
platform, architecture = target_platform.split('-')
other_platforms = ['linux-ppc64', 'linux-ppc64le', 'linux-s390x',
'linux-armv6l', 'linux-armv7l', 'linux-aarch64']
if target_platform in other_platforms:
source_architecture = architecture
elif index.get('arch') == 'x86_64':
source_architecture = '64'
else:
source_architecture = '32'
if verbose:
print('Updating platform from {} to {}' .format(index['platform'], platform))
print('Updating subdir from {} to {}' .format(index['subdir'], target_platform))
print('Updating architecture from {} to {}' .format(source_architecture, architecture))
index['platform'] = platform
index['subdir'] = target_platform
if architecture == '64':
index['arch'] = 'x86_64'
elif architecture == '32':
index['arch'] = 'x86'
else:
index['arch'] = architecture
if dependencies:
index['depends'] = update_dependencies(dependencies, index['depends'])
with open(index_file, 'w') as file:
json.dump(index, file, indent=2)
return index_file
def update_lib_path(path, target_platform, temp_dir=None):
"""Update the lib path found in the source package's paths.json file.
For conversions from unix to windows, the 'lib/pythonx.y/' paths are
renamed to 'Lib/' and vice versa for conversions from windows to unix.
Positional arguments:
path (str) -- path to rename in the paths.json file
target_platform (str) -- the platform to target: 'unix' or 'win'
Keyword arguments:
temp_dir (str) -- the file path to the temporary directory that
contains the source package's extracted contents
"""
if target_platform == 'win':
python_version = retrieve_python_version(path)
renamed_lib_path = re.sub(r'\Alib', 'Lib', path).replace(python_version, '')
elif target_platform == 'unix':
python_version = retrieve_python_version(temp_dir)
lib_python_version = os.path.join('lib', python_version).replace('\\', '\\\\')
renamed_lib_path = re.sub(r'\ALib', lib_python_version, path.replace('\\', '\\\\'))
return os.path.normpath(renamed_lib_path)
def update_lib_contents(lib_directory, temp_dir, target_platform, file_path):
"""Update the source package's 'lib' directory.
When converting from unix to windows, the 'lib' directory is renamed to
'Lib' and the contents inside the 'pythonx.y' directory are renamed to
exclude the 'pythonx.y' prefix. When converting from windows to unix,
the 'Lib' is renamed to 'lib' and the pythonx.y' prefix is added.
Positional arguments:
lib_directory (str) -- the file path to the 'lib' directory located in the
temporary directory that stores the package contents
temp_dir (str) -- the file path to the temporary directory that contains
the source package's extracted contents
target_platform (str) -- the platform to target: 'unix' or win'
file_path (str) -- the file path to the source package tar file
"""
if target_platform == 'win':
try:
for lib_file in glob.iglob('{}/python*/**' .format(lib_directory)):
if 'site-packages' in lib_file:
new_site_packages_path = os.path.join(
temp_dir, os.path.join('lib', 'site-packages'))
os.renames(lib_file, new_site_packages_path)
else:
if retrieve_python_version(lib_file) is not None:
python_version = retrieve_python_version(lib_file)
os.renames(lib_file, lib_file.replace(python_version, ''))
except OSError:
pass
try:
shutil.rmtree(glob.glob('{}/python*' .format(lib_directory))[0])
except IndexError:
pass
shutil.move(os.path.join(temp_dir, 'lib'), os.path.join(temp_dir, 'Lib'))
elif target_platform == 'unix':
dest_dir = os.path.join(temp_dir, 'lib')
shutil.move(os.path.join(temp_dir, 'Lib'), dest_dir)
for lib_file in glob.iglob('{}/**' .format(dest_dir)):
python_version = retrieve_python_version(file_path)
py_folder = os.path.join(dest_dir, python_version)
new_lib_file = os.path.join(py_folder, os.path.basename(lib_file))
try:
os.makedirs(py_folder)
except:
pass
shutil.move(lib_file, new_lib_file)
def update_executable_path(temp_dir, file_path, target_platform):
"""Update the name of the executable files found in the paths.json file.
When converting from unix to windows, executables are renamed with a '-script.py'
suffix. When converting from windows to unix, this suffix is removed. The
paths in paths.json need to be updated accordingly.
Positional arguments:
file_path (str) -- the file path to the executable to rename in paths.json
target_platform (str) -- the platform to target: 'unix' or 'win'
"""
if target_platform == 'win':
if os.path.basename(file_path).startswith('.') or is_binary_file(temp_dir, file_path):
renamed_executable_path = re.sub(r'\Abin', 'Scripts', file_path)
else:
renamed_path = os.path.splitext(re.sub('\Abin', 'Scripts', file_path))[0]
renamed_executable_path = '{}-script.py' .format(renamed_path)
elif target_platform == 'unix':
renamed_path = re.sub(r'\AScripts', 'bin', file_path)
renamed_executable_path = renamed_path.replace('-script.py', '')
return renamed_executable_path
def update_executable_sha(package_directory, executable_path):
"""Update the sha of executable scripts.
When moving from windows to linux, a shebang line is removed/added from
script files which requires to update the sha.
"""
with open(os.path.join(package_directory, executable_path), 'rb') as script_file:
script_file_contents = script_file.read()
return hashlib.sha256(script_file_contents).hexdigest()
def update_executable_size(temp_dir, executable):
"""Update the size of the converted executable files.
Positional arguments:
temp_dir (str) -- the file path to the temporary directory containing the source
package's extracted contents
executable (str) -- the executable whose size to update including its file extension
Returns:
byte size (int) of the executable file
"""
return os.path.getsize(os.path.join(temp_dir, executable))
def add_new_windows_path(executable_directory, executable):
"""Add a new path to the paths.json file.
When an executable is renamed during a unix to windows conversion, a
an exe is also created. The paths.json file is updated with the
exe | |
import yaml
import numpy as np
from xml.etree.ElementTree import ElementTree, Element, SubElement
from .transform import Transform
from .utils import lift_material, visual, collision, box_link, joint
class LiftDoor:
def __init__(self, yaml_node, name, lift_size, gap, plugin=True):
self.name = name
self.door_type = yaml_node['door_type']
# x & y coordinates are with respect to the centre of the cabin
self.x = float(yaml_node['x'])
self.y = float(yaml_node['y'])
self.motion_axis_orientation = float(
yaml_node['motion_axis_orientation'])
self.width = float(yaml_node['width'])
self.height = lift_size[2]
self.thickness = 0.03
self.gap = gap # gap between cabin_door and shaft_door
self.plugin = plugin
self.params = {'v_max_door': 0.3,
'a_max_door': 0.2,
'a_nom_door': 0.1,
'dx_min_door': 1e-4,
'f_max_door': 35.0}
x_diff = abs(abs(self.x) - lift_size[0] / 2)
y_diff = abs(abs(self.y) - lift_size[1] / 2)
offset = 0.025 # cabin door position offset
if x_diff <= y_diff:
if self.x > 0:
self.side = ('right', self.y-self.width/2, self.y+self.width/2)
self.shaft_door_pose = (lift_size[0]/2 + self.gap, self.y)
self.cabin_door_pose = (lift_size[0]/2 - offset, self.y)
else:
self.side = ('left', self.y-self.width/2, self.y+self.width/2)
self.shaft_door_pose = (-lift_size[0]/2 - self.gap, self.y)
self.cabin_door_pose = (-lift_size[0]/2 + offset, self.y)
else:
if self.y > 0:
self.side = ('front', self.x-self.width/2, self.x+self.width/2)
self.shaft_door_pose = (self.x, lift_size[1]/2 + self.gap)
self.cabin_door_pose = (self.x, lift_size[1]/2 - offset)
else:
self.side = ('back', self.x-self.width/2, self.x+self.width/2)
self.shaft_door_pose = (self.x, -lift_size[1]/2 - self.gap)
self.cabin_door_pose = (self.x, -lift_size[1]/2 + offset)
def generate_cabin_door(self, lift_model_ele, name):
door_model_ele = SubElement(lift_model_ele, 'model')
door_model_ele.set('name', name)
door_pose = SubElement(door_model_ele, 'pose')
(x, y) = self.cabin_door_pose
door_pose.text = \
f'{x} {y} 0 0 0 {self.motion_axis_orientation}'
self.generate_door_link_and_joint(door_model_ele, parent='platform')
if self.plugin:
self.generate_door_plugin(door_model_ele, name)
def generate_shaft_door(self, world_ele, x, y, z, yaw, name):
model_ele = SubElement(world_ele, 'model')
model_ele.set('name', name)
door_pose = SubElement(model_ele, 'pose')
# tranformation
(door_x, door_y) = self.shaft_door_pose
x_new = x + door_x * np.cos(yaw) - door_y * np.sin(yaw)
y_new = y + door_x * np.sin(yaw) + door_y * np.cos(yaw)
yaw_new = yaw + self.motion_axis_orientation
door_pose.text = f'{x_new} {y_new} {z} 0 0 {yaw_new}'
self.generate_door_link_and_joint(model_ele)
floor_thickness = 0.05
ramp_depth = self.gap * 2
ramp_size = [self.width, ramp_depth, floor_thickness]
ramp_pose = Element('pose')
ramp_pose.text = f'0 0 {-floor_thickness / 2} 0 0 0'
model_ele.append(box_link('ramp',
ramp_size,
ramp_pose,
material=lift_material(),
bitmask='0x02'))
model_ele.append(joint('ramp_joint', 'fixed', 'world', 'ramp'))
if self.plugin:
self.generate_door_plugin(model_ele, name)
def generate_door_link_and_joint(self, model_ele, parent='world'):
door_size = [self.width / 2, self.thickness, self.height]
right_door_pose = Element('pose')
right_door_pose.text = f'{self.width / 4} 0 {self.height / 2} 0 0 0'
model_ele.append(box_link('right_door',
door_size,
right_door_pose,
material=lift_material(),
bitmask='0x02'))
model_ele.append(joint('right_joint',
'prismatic',
parent,
'right_door',
joint_axis='x',
lower_limit=0,
upper_limit=self.width / 2))
left_door_pose = Element('pose')
left_door_pose.text = f'{-self.width / 4} 0 {self.height / 2} 0 0 0'
model_ele.append(box_link('left_door',
door_size,
left_door_pose,
material=lift_material(),
bitmask='0x02'))
model_ele.append(joint('left_joint',
'prismatic',
parent,
'left_door',
joint_axis='x',
lower_limit=-self.width / 2,
upper_limit=0))
def generate_door_plugin(self, model_ele, name):
plugin_ele = SubElement(model_ele, 'plugin')
plugin_ele.set('name', 'door')
plugin_ele.set('filename', 'libdoor.so')
for param_name, param_value in self.params.items():
ele = SubElement(plugin_ele, param_name)
ele.text = f'{param_value}'
door_ele = SubElement(plugin_ele, 'door')
door_ele.set('left_joint_name', 'left_joint')
door_ele.set('name', f'{name}')
door_ele.set('right_joint_name', 'right_joint')
door_ele.set('type', 'DoubleSlidingDoor')
# TODO: remove this function once nesting model is supported in ignition.
def generate_cabin_door_ign(self, lift_model_ele, name):
# This is for cabin door generation for ignition gazebo as it doesn't
# support nested models yet. Once ignition gazebo supports nested
# models, this should be removed.
(x, y) = self.cabin_door_pose
yaw = self.motion_axis_orientation
right_x = x + np.cos(yaw) * self.width/4
left_x = x - np.cos(yaw) * self.width/4
right_y = y + np.sin(yaw) * self.width/4
left_y = y - np.sin(yaw) * self.width/4
door_size = [self.width / 2, self.thickness, self.height]
right_door_pose = Element('pose')
right_door_pose.text = \
f'{right_x} {right_y} {self.height / 2} 0 0 {yaw}'
lift_model_ele.append(box_link(f'{name}_right_door',
door_size,
right_door_pose,
material=lift_material(),
bitmask='0x02'))
lift_model_ele.append(joint(f'{name}_right_joint',
'prismatic',
'platform',
f'{name}_right_door',
joint_axis='x',
lower_limit=0,
upper_limit=self.width / 2))
left_door_pose = Element('pose')
left_door_pose.text = f'{left_x} {left_y} {self.height / 2} 0 0 {yaw}'
lift_model_ele.append(box_link(f'{name}_left_door',
door_size,
left_door_pose,
material=lift_material(),
bitmask='0x02'))
lift_model_ele.append(joint(f'{name}_left_joint',
'prismatic',
'platform',
f'{name}_left_door',
joint_axis='x',
lower_limit=-self.width / 2,
upper_limit=0))
if self.plugin:
plugin_ele = SubElement(lift_model_ele, 'plugin')
plugin_ele.set('name', 'door')
plugin_ele.set('filename', 'libdoor.so')
for param_name, param_value in self.params.items():
ele = SubElement(plugin_ele, param_name)
ele.text = f'{param_value}'
door_ele = SubElement(plugin_ele, 'door')
door_ele.set('left_joint_name', f'{name}_left_joint')
door_ele.set('name', f'{name}')
door_ele.set('right_joint_name', f'{name}_right_joint')
door_ele.set('type', 'DoubleSlidingDoor')
class Lift:
def __init__(self, yaml_node, name, transform, levels):
self.name = name
print(f'parsing lift {name}')
self.depth = float(yaml_node['depth'])
self.width = float(yaml_node['width'])
self.yaw = float(yaml_node['yaw'])
if 'initial_floor_name' in yaml_node:
self.initial_floor_name = str(yaml_node['initial_floor_name'])
else:
self.initial_floor_name = ''
if 'highest_floor' in yaml_node:
self.highest_floor = str(yaml_node['highest_floor'])
if self.highest_floor:
self.highest_elevation = levels[self.highest_floor].elevation
else:
self.highest_elevation = float('inf')
else:
self.highest_elevation = float('inf')
if 'lowest_floor' in yaml_node:
self.lowest_floor = str(yaml_node['lowest_floor'])
if self.lowest_floor:
self.lowest_elevation = levels[self.lowest_floor].elevation
else:
self.lowest_elevation = -float('inf')
else:
self.lowest_elevation = -float('inf')
self.plugins = True
if 'plugins' in yaml_node:
self.plugins = bool(yaml_node['plugins'])
raw_pos = (float(yaml_node['x']), -float(yaml_node['y']))
self.x, self.y = transform.transform_point(raw_pos)
self.cabin_height = 2.5
self.wall_thickness = 0.05
self.floor_thickness = 0.05
self.gap = 0.05 # gap between lift shaft and lift cabin on each side
self.shaft_depth = self.depth + 2 * self.gap
self.shaft_width = self.width + 2 * self.gap
# default params
self.cabin_mass = 1200
self.params = {
'v_max_cabin': 2.0,
'a_max_cabin': 1.2,
'a_nom_cabin': 1.0,
'dx_min_cabin': 0.001,
'f_max_cabin': 25323.0}
self.level_elevation = {}
self.level_doors = {}
self.level_names = []
if 'level_doors' in yaml_node:
for level_name, door_names in yaml_node['level_doors'].items():
self.level_doors[level_name] = door_names
self.level_elevation[level_name] = levels[level_name].elevation
self.level_names.append(level_name)
if (not self.initial_floor_name) and self.level_names:
self.initial_floor_name = self.level_names[0]
self.doors = []
if 'doors' in yaml_node:
self.doors = self.parse_lift_doors(yaml_node['doors'])
# for wall generation
# self.end_points stores 1-dimensional positions of endpoints of walls
# on each side of the cabin in sorted arrays
self.end_points = {'front': [-self.width/2, self.width/2],
'back': [-self.width/2, self.width/2],
'left': [-self.depth/2 + self.wall_thickness,
self.depth/2 - self.wall_thickness],
'right': [-self.depth/2 + self.wall_thickness,
self.depth/2 - self.wall_thickness]}
for door in self.doors:
side, left, right = door.side
self.end_points[side] += [left, right]
self.end_points[side].sort()
def parse_lift_doors(self, yaml_node):
doors = []
for lift_door_name, lift_door_yaml in yaml_node.items():
doors.append(LiftDoor(lift_door_yaml,
lift_door_name,
(self.width, self.depth, self.cabin_height),
self.gap,
self.plugins))
return doors
def get_lift_vertices(self):
# parse lift shaft cavity corner vertices
vertices = []
d = self.depth / 2 + self.gap
w = self.width / 2 + self.gap
vertices.append((self.x - d * np.sin(self.yaw) - w * np.cos(self.yaw),
self.y + d * np.cos(self.yaw) - w * np.sin(self.yaw)))
vertices.append((self.x + d * np.sin(self.yaw) - w * np.cos(self.yaw),
self.y - d * np.cos(self.yaw) - w * np.sin(self.yaw)))
vertices.append((self.x + d * np.sin(self.yaw) + w * np.cos(self.yaw),
self.y - d * np.cos(self.yaw) + w * np.sin(self.yaw)))
vertices.append((self.x - d * np.sin(self.yaw) + w * np.cos(self.yaw),
self.y + d * np.cos(self.yaw) + w * np.sin(self.yaw)))
return vertices
def generate_shaft_doors(self, world_ele):
for level_name, door_names in self.level_doors.items():
for door in self.doors:
if door.name in door_names:
name = f'ShaftDoor_{self.name}_{level_name}_{door.name}'
elevation = self.level_elevation[level_name]
door.generate_shaft_door(
world_ele, self.x, self.y, elevation, self.yaw, name)
def generate_wall(self, side, pair, name, platform):
dims = [pair[1]-pair[0], self.wall_thickness, self.cabin_height]
mid = (pair[0] + pair[1]) / 2
if side == 'front':
x, y, yaw = mid, self.depth/2 - self.wall_thickness / 2, 0
elif side == 'back':
x, y, yaw = mid, -self.depth/2 + self.wall_thickness / 2, 0
elif side == 'left':
x, y, yaw = -self.width/2 + self.wall_thickness / 2, mid, np.pi/2
elif side == 'right':
x, y, yaw = self.width/2 - self.wall_thickness / 2, mid, np.pi/2
else:
return
pose = Element('pose')
pose.text = f'{x} {y} {self.cabin_height / 2} 0 0 {yaw}'
platform.append(visual(name, pose, dims, lift_material()))
platform.append(collision(name, pose, dims, '0x01'))
def generate_cabin(self, world_ele, options):
# materials missing for now
lift_model_name = f'{self.name}'
lift_model_ele = SubElement(world_ele, 'model')
lift_model_ele.set('name', lift_model_name)
# main cabin link for actuation
platform_name = 'platform'
platform = SubElement(lift_model_ele, 'link')
platform.set('name', platform_name)
inertial = SubElement(platform, 'inertial')
mass = SubElement(inertial, 'mass')
mass.text = f'{self.cabin_mass}'
inertial = SubElement(inertial, 'inertia')
SubElement(inertial, 'ixx').text = \
str(self.cabin_mass/12.0*(self.width**2 + self.floor_thickness**2))
SubElement(inertial, 'iyy').text = \
str(self.cabin_mass/12.0*(self.depth**2 + self.floor_thickness**2))
SubElement(inertial, 'izz').text = \
str(self.cabin_mass/12.0*(self.width**2 + self.depth**2))
# visuals and collisions for floor and walls of cabin
floor_dims = [self.width, self.depth, self.floor_thickness]
floor_name = 'floor'
floor_pose = Element('pose')
floor_pose.text = f'0 0 {-self.floor_thickness / 2} 0 0 0'
platform.append(visual(floor_name,
floor_pose,
floor_dims,
lift_material()))
platform.append(collision(floor_name, floor_pose, floor_dims, '0x01'))
# Wall generation
# get each pair of end_points on each side, generate a section of wall
# between the pair of points
for side, end_points in self.end_points.items():
assert len(end_points) % 2 == 0
for i in range(0, len(end_points), 2):
pair = end_points[i: i+2]
name = f'{side}wall{i//2+1}'
self.generate_wall(side, pair, name, platform)
# lift cabin actuation joint
lift_model_ele.append(joint('cabin_joint',
'prismatic',
'world',
'platform',
joint_axis='z'))
# cabin doors
| |
{1!s}'.format(type(pe).__name__, pe))
cpx_status_string = 'Internal error: {0!s}'.format(pe)
cpx_status = -2
finally:
solve_time = cpx.get_time() - solve_time_start
solve_dettime = cpx.get_dettime() - solve_dettime_start
details = SolveDetails(solve_time, solve_dettime,
cpx_status, cpx_status_string,
cpx_probtype,
nb_columns, linear_nonzeros,
cpx_miprelgap, cpx_bestbound,
nb_iterations,
nb_nodes_processed)
if self._model.quality_metrics:
details._quality_metrics = self._compute_quality_metrics()
self._last_solve_details = details
# clear bound change requests
self._var_lb_changed = {}
self._var_ub_changed = {}
self._last_solve_status = solve_ok
new_solution = None
if solve_ok:
new_solution = self._make_solution(mdl, self.get_solve_status())
else:
mdl.notify_solve_failed()
if cpx_status_string:
mdl.logger.trace("CPLEX solve returns with status: {0}", (cpx_status_string,))
return new_solution
def _make_solution(self, mdl, job_solve_status):
cpx_adapter = self.cpx_adapter
cpx = self._cplex
full_obj = cpx.solution.get_objective_value()
if self._has_multi_objective(cpx):
full_obj = [cpx.solution.multiobj.get_objective_value(objidx) for objidx in range(cpx.multiobj.get_num())]
# Build list of objectives value by priority level (ie: each priority level corresponds to blended objectives
# with same priority)
full_obj_by_prio = [full_obj]
if self._has_multi_objective(cpx):
decreasing_ordered_prio = self._get_priorities_list_in_decreasing_order()
full_obj_by_prio = [cpx.solution.multiobj.get_objval_by_priority(prio) for prio in decreasing_ordered_prio]
nb_vars = mdl.number_of_variables
if nb_vars > 0:
if self.procedural:
all_var_values = cpx_adapter.fast_get_solution(cpx, nb_vars)
else:
all_var_values = cpx.solution.get_values()
# all_var_values = fast_get_solution(cpx, mdl.number_of_variables)
vmap = {}
for dv in mdl.iter_variables():
dvv = all_var_values[dv._index]
if dvv:
vmap[dv] = dvv
var_value_map = vmap
# var_value_map = dict(zip(mdl.iter_variables(), all_var_values))
else:
var_value_map = {}
solve_details = self._last_solve_details
assert solve_details is not None
solution = SolveSolution.make_engine_solution(model=mdl,
var_value_map=var_value_map,
obj=full_obj,
blended_obj_by_priority=full_obj_by_prio,
solved_by=self.name,
solve_details=solve_details,
job_solve_status=job_solve_status)
return solution
@classmethod
def handle_cplex_solver_error(cls, logger, mdl, cpxse, initial_status, initial_status_string):
status, status_string = initial_status, initial_status_string
cpx_code = cpxse.args[2]
if 5002 == cpx_code:
# we are in the notorious "non convex" case.
# provide a meaningful status string for the solve details
status = 5002 # famous error code...
if mdl.has_quadratic_constraint():
status_string = "Non-convex QCP"
logger.error('Model is non-convex')
else:
status_string = "QP with non-convex objective"
logger.error('Model has non-convex objective: {0!s}', mdl.objective_expr.to_readable_string())
elif 1016 == cpx_code:
# this is the: CPXERR_RESTRICTED_VERSION - " Promotional version. Problem size limits exceeded." case
status = 1016
status_string = "Promotional version. Problem size limits exceeded., CPLEX code=1016."
logger.fatal(status_string)
else:
logger.error("CPLEX Solver Error: {0!s}", cpxse)
return status, status_string
def _run_cpx_solve_fn(self, cpx_fn, ok_statuses, *args):
cpx = self._cplex
cpx_time_start = cpx.get_time()
cpx_dettime_start = cpx.get_dettime()
cpx_status = -1
cpx_status_string = "*unknown*"
cpx_miprelgap = None
cpx_bestbound = None
linear_nonzeros = -1
nb_columns = 0
cpx_probtype = None
solve_ok = False
logger = self.error_handler
# noinspection PyPep8
try:
linear_nonzeros = cpx.linear_constraints.get_num_nonzeros()
nb_columns = cpx.variables.get_num()
cpx_fn(*args)
cpx_status = cpx.solution.get_status()
cpx_probtype = self._problem_type_string
cpx_status_string = self._cplex.solution.get_status_string(cpx_status)
solve_ok = cpx_status in ok_statuses
if solve_ok:
if cpx._is_MIP():
cpx_miprelgap = cpx.solution.MIP.get_mip_relative_gap()
cpx_bestbound = cpx.solution.MIP.get_best_objective()
except self.cpx_adapter.CplexSolverError as cpx_s: # pragma: no cover
new_status, new_s_status = self.handle_cplex_solver_error(logger, self._model, cpx_s, cpx_status,
cpx_status_string)
cpx_status, cpx_status_string = new_status, new_s_status
# cpx_code = cpx_s.args[2]
# if 5002 == cpx_code:
# # we are in the notorious "non convex" case.
# # provide a meaningful status string for the solve details
# cpx_status = 5002 # famous error code...
#
# if self._model.has_quadratic_constraint():
# cpx_status_string = "Non-convex QCP"
# logger.error('Model is non-convex')
# else:
# cpx_status_string = "QP with non-convex objective"
# logger.error('Model has non-convex objective: {0!s}', str_maxed(self._model.objective_expr, 60))
# elif 1016 == cpx_code:
# # this is the: CPXERR_RESTRICTED_VERSION - " Promotional version. Problem size limits exceeded." case
# cpx_status = 1016
# cpx_status_string = "Promotional version. Problem size limits exceeded., CPLEX code=1016."
# logger.fatal(cpx_status_string)
# else:
# logger.error("CPLEX Solver Error: {0!s}", cpx_s)
except self.cpx_adapter.exceptions.CplexError as cpx_e: # pragma: no cover
logger.error("CPLEX Error: {0!s}", cpx_e)
finally:
cpx_time = cpx.get_time() - cpx_time_start
cpx_dettime = cpx.get_dettime() - cpx_dettime_start
details = SolveDetails(cpx_time, cpx_dettime,
cpx_status, cpx_status_string,
cpx_probtype,
nb_columns, linear_nonzeros,
cpx_miprelgap,
cpx_bestbound)
self._last_solve_details = details
if solve_ok:
sol = self._make_solution(self._model, self.get_solve_status())
else:
sol = None
return sol
def get_solve_details(self):
# must be solved but not necessarily ok
return self._last_solve_details
def _make_groups(self, relaxable_groups):
cpx_feasopt = self._cplex.feasopt
all_groups = []
for (pref, group_cts) in relaxable_groups:
if pref > 0 and group_cts:
linears = []
quads = []
inds = []
for ct in group_cts:
ctindex = ct.index
cpx_scope = ct.cplex_scope
if cpx_scope is CplexScope.LINEAR_CT_SCOPE:
linears.append(ctindex)
elif cpx_scope is CplexScope.IND_CT_SCOPE:
inds.append(ctindex)
elif cpx_scope is CplexScope.QUAD_CT_SCOPE:
quads.append(ctindex)
else:
self.logger.error('cannot relax this: {0!s}'.format(ct))
if linears:
all_groups.append(cpx_feasopt.linear_constraints(pref, linears))
if quads:
all_groups.append(cpx_feasopt.quadratic_constraints(pref, quads))
if inds:
all_groups.append(cpx_feasopt.indicator_constraints(pref, inds))
return all_groups
def _decode_infeasibilities(self, cpx, model, cpx_relax_groups, model_scope_resolver=None):
cpx_adapter = self.cpx_adapter
if model_scope_resolver is None:
# set default value for resolver
model_scope_resolver = {cpx_adapter.ct_linear: lambda m_: m_._linct_scope,
cpx_adapter.ct_quadratic: lambda m_: m_._quadct_scope,
cpx_adapter.ct_indicator: lambda m_: m_._logical_scope
}
resolver_map = {cpx_adapter.ct_linear: cpx.solution.infeasibility.linear_constraints,
cpx_adapter.ct_quadratic: cpx.solution.infeasibility.quadratic_constraints,
cpx_adapter.ct_indicator: cpx.solution.infeasibility.indicator_constraints
}
cpx_sol_values = cpx.solution.get_values()
cts_by_type = defaultdict(list)
# split and group indices by sense
for g in cpx_relax_groups:
# gp is a list of tuples (pref, ctsense, index)
for t in g._gp:
ct_sense, ct_index = t[1][0]
cts_by_type[ct_sense].append(ct_index)
infeas_map = {}
for ct_sense, indices in cts_by_type.items():
if indices:
resolver_fn = resolver_map[ct_sense]
ctype_infeas = resolver_fn(cpx_sol_values, indices)
mscope = model_scope_resolver[ct_sense](model)
assert mscope is not None
# noinspection PyArgumentList
for ct_index, ct_infeas in zip(indices, ctype_infeas):
ct = mscope.get_object_by_index(ct_index)
if ct is not None:
infeas_map[ct] = ct_infeas
return infeas_map
def solve_relaxed(self, mdl, prio_name, relaxable_groups, relax_mode, parameters=None):
# INTERNAL
self._resync_if_needed()
self.sync_cplex(mdl)
if mdl.clean_before_solve:
self.clean_before_solve()
self_cplex = self._cplex
cpx_relax_groups = self._make_groups(relaxable_groups)
feasopt_parameters = parameters or mdl.parameters
feasopt_override_params = {feasopt_parameters.feasopt.mode: relax_mode.value}
with _CplexOverwriteParametersCtx(self_cplex, feasopt_override_params) as cpx:
# at this stage, we have a list of groups
# each group is itself a list
# the first item is a number, the preference
# the second item is a list of constraint indices.
relaxed_sol = self._run_cpx_solve_fn(cpx.feasopt,
self._CPLEX_RELAX_OK_STATUSES,
*cpx_relax_groups)
if relaxed_sol is not None:
infeas_map = self._decode_infeasibilities(self_cplex, mdl, cpx_relax_groups)
relaxed_sol.store_infeasibilities(infeas_map)
return relaxed_sol
def _sync_parameter_defaults_from_cplex(self, parameters):
# used when a more recent CPLEX DLL is present
resets = []
for param in parameters:
cpx_value = self.get_parameter(param)
if cpx_value != param.default_value:
resets.append((param, param.default_value, cpx_value))
param.reset_default_value(cpx_value)
return resets
def _make_cplex_default_groups(self, mdl):
cpx_cst = self.cpx_cst
def make_atom_group(pref, obj, con):
return pref, ((con, obj.index),)
grs = []
# add all linear constraints with pref=2.0
lc_pref = 2.0
for lc in mdl.iter_linear_constraints():
grs.append(make_atom_group(lc_pref, lc, cpx_cst.CPX_CON_LINEAR))
# add quadratic w 2
quad_pref = 2.0
for qc in mdl.iter_quadratic_constraints():
grs.append((make_atom_group(quad_pref, qc, cpx_cst.CPX_CON_QUADRATIC)))
ind_pref = 1.0
for lc in mdl.iter_logical_constraints():
grs.append((make_atom_group(ind_pref, lc, cpx_cst.CPX_CON_INDICATOR)))
var_bounds_pref = 4.0
inf = mdl.infinity
for dv in mdl.iter_variables():
if not dv.is_binary():
if dv.lb != 0:
grs.append(make_atom_group(var_bounds_pref, dv, cpx_cst.CPX_CON_LOWER_BOUND))
if dv.ub < inf:
grs.append(make_atom_group(var_bounds_pref, dv, cpx_cst.CPX_CON_UPPER_BOUND))
else:
# do not include free binary variables.
if dv.lb >= 0.5:
grs.append(make_atom_group(var_bounds_pref, dv, cpx_cst.CPX_CON_LOWER_BOUND))
if dv.ub <= 0.5:
grs.append(make_atom_group(var_bounds_pref, dv, cpx_cst.CPX_CON_UPPER_BOUND))
# add pwl with 1
# add sos with 1
sos_pref = 1.0
for sos in mdl.iter_sos():
grs.append(make_atom_group(sos_pref, sos, cpx_cst.CPX_CON_SOS))
return grs
def refine_conflict(self, mdl, preferences=None, groups=None, parameters=None):
try:
# sync parameters
mdl._apply_parameters_to_engine(parameters)
self.sync_cplex(mdl)
cpx = self._cplex
use_all = False
if not groups:
# no groups are specified.
# emulate cplex interactive here:
# linear constraints
if not use_all:
cplex_def_grs = self._make_cplex_default_groups(mdl)
# print("--- initial #groups = {0}".format(len(cplex_def_grs)))
cpx.conflict.refine(*cplex_def_grs)
else:
all_constraints = cpx.conflict.all_constraints()
if preferences:
grps = self._build_weighted_constraints(mdl, all_constraints._gp, preferences)
cpx.conflict.refine(grps)
else:
cpx.conflict.refine(all_constraints)
else:
groups_def = [self._build_group_definition_with_index(grp) for grp in groups]
cpx.conflict.refine(*groups_def)
return self._get_conflicts_local(mdl, cpx)
except DOcplexException as docpx_e: # pragma: no cover
mdl._set_solution(None)
raise docpx_e
def _build_group_definition_with_index(self, cts_group):
return cts_group.preference, tuple([(self._get_refiner_type(ct), ct.index)
for ct in cts_group.iter_constraints()])
def _get_refiner_type(self, conflict_arg):
cpx_cst = self.cpx_cst
if isinstance(conflict_arg, VarLbConstraintWrapper):
return cpx_cst.CPX_CON_LOWER_BOUND
elif isinstance(conflict_arg, VarUbConstraintWrapper):
return cpx_cst.CPX_CON_UPPER_BOUND
elif conflict_arg.is_linear():
return cpx_cst.CPX_CON_LINEAR
elif conflict_arg.is_logical():
return cpx_cst.CPX_CON_INDICATOR
elif conflict_arg.is_quadratic:
return cpx_cst.CPX_CON_QUADRATIC
else:
conflict_arg.model.fatal("Type unknown (or not supported yet) for constraint: " + repr(conflict_arg))
def _build_weighted_constraints(self, mdl, groups, preferences=None):
cpx_cst = self.cpx_cst
weighted_groups = []
for (pref, seq) in groups:
for (_type, _id) in seq:
if _type == cpx_cst.CPX_CON_LOWER_BOUND or _type == cpx_cst.CPX_CON_UPPER_BOUND:
# Keep default preference
weighted_groups.append((pref, ((_type, _id),)))
else:
if preferences is not None:
ct = mdl.get_constraint_by_index(_id)
new_pref = preferences.get(ct, None)
if new_pref is not None and isinstance(new_pref, numbers.Number):
pref = new_pref
weighted_groups.append((pref, ((_type, _id),)))
return weighted_groups
def _get_conflicts_local(self, mdl, cpx):
def var_by_index(idx):
return mdl.get_var_by_index(idx)
try:
cpx_conflicts = cpx.conflict.get()
groups = cpx.conflict.get_groups()
except self.cpx_adapter.CplexSolverError:
# Return an empty list if no conflict is available
return ConflictRefinerResult(conflicts=[], refined_by="cplex")
cpx_cst = self.cpx_cst
conflicts = []
for (pref, seq), status in zip(groups, cpx_conflicts):
if status == | |
com_google_fonts_check_description_min_length(description):
"""DESCRIPTION.en_us.html must have more than 200 bytes."""
if len(description) <= 200:
yield FAIL,\
Message("too-short",
"DESCRIPTION.en_us.html must"
" have size larger than 200 bytes.")
else:
yield PASS, "DESCRIPTION.en_us.html is larger than 200 bytes."
@check(
id = 'com.google.fonts/check/description/max_length',
conditions = ['description'],
rationale = """
The fonts.google.com catalog specimen pages 2016 to 2020 were placed in a narrow area of the page.
That meant a maximum limit of 1,000 characters was good, so that the narrow column did not extent that section of the page to be too long.
But with the "v4" redesign of 2020, the specimen pages allow for longer texts without upsetting the balance of the page.
So currently the limit before warning is 2,000 characters.
""",
proposal = 'legacy:check/006'
)
def com_google_fonts_check_description_max_length(description):
"""DESCRIPTION.en_us.html must have less than 2000 bytes."""
if len(description) >= 2000:
yield FAIL,\
Message("too-long",
"DESCRIPTION.en_us.html must"
" have size smaller than 2000 bytes.")
else:
yield PASS, "DESCRIPTION.en_us.html is smaller than 2,000 bytes."
@check(
id = 'com.google.fonts/check/description/eof_linebreak',
conditions = ['description'],
rationale = """
Some older text-handling tools sometimes misbehave if the last line of data in a text file is not terminated with a newline character (also known as '\\n').
We know that this is a very small detail, but for the sake of keeping all DESCRIPTION.en_us.html files uniformly formatted throughout the GFonts collection, we chose to adopt the practice of placing this final linebreak char on them.
""",
proposal = 'https://github.com/googlefonts/fontbakery/issues/2879'
)
def com_google_fonts_check_description_eof_linebreak(description):
"""DESCRIPTION.en_us.html should end in a linebreak."""
if description[-1] != '\n':
yield WARN,\
Message("missing-eof-linebreak",
"The last characther on DESCRIPTION.en_us.html"
" is not a line-break. Please add it.")
else:
yield PASS, ":-)"
@check(
id = 'com.google.fonts/check/metadata/parses',
conditions = ['family_directory'],
rationale = """
The purpose of this check is to ensure that the METADATA.pb file is not malformed.
""",
proposal = 'https://github.com/googlefonts/fontbakery/issues/2248'
)
def com_google_fonts_check_metadata_parses(family_directory):
"""Check METADATA.pb parse correctly."""
from google.protobuf import text_format
from fontbakery.utils import get_FamilyProto_Message
try:
pb_file = os.path.join(family_directory, "METADATA.pb")
get_FamilyProto_Message(pb_file)
yield PASS, "METADATA.pb parsed successfuly."
except text_format.ParseError as e:
yield FAIL,\
Message("parsing-error",
f"Family metadata at {family_directory} failed to parse.\n"
f"TRACEBACK:\n{e}")
except FileNotFoundError:
yield SKIP,\
Message("file-not-found",
f"Font family at '{family_directory}' lacks a METADATA.pb file.")
@check(
id = 'com.google.fonts/check/metadata/unknown_designer',
conditions = ['family_metadata'],
proposal = ['legacy:check/007',
'https://github.com/googlefonts/fontbakery/issues/800']
)
def com_google_fonts_check_metadata_unknown_designer(family_metadata):
"""Font designer field in METADATA.pb must not be 'unknown'."""
if family_metadata.designer.lower() == 'unknown':
yield FAIL,\
Message("unknown-designer",
f"Font designer field is '{family_metadata.designer}'.")
else:
yield PASS, "Font designer field is not 'unknown'."
@check(
id = 'com.google.fonts/check/metadata/multiple_designers',
conditions = ['family_metadata'],
rationale = """
For a while the string "Multiple designers" was used as a placeholder on METADATA.pb files. We should replace all those instances with actual designer names so that proper credits are displayed on the Google Fonts family specimen pages.
If there's more than a single designer, the designer names must be separated by commas.
""",
proposal = 'https://github.com/googlefonts/fontbakery/issues/2766'
)
def com_google_fonts_check_metadata_multiple_designers(family_metadata):
"""Font designer field in METADATA.pb must not contain 'Multiple designers'."""
if 'multiple designer' in family_metadata.designer.lower():
yield FAIL,\
Message("multiple-designers",
f"Font designer field is '{family_metadata.designer}'."
f" Please add an explicit comma-separated list of designer names.")
else:
yield PASS, "Looks good."
@check(
id = 'com.google.fonts/check/metadata/designer_values',
conditions = ['family_metadata'],
rationale = """
We must use commas instead of forward slashes because the server-side code at the fonts.google.com directory will segment the string on the commas into a list of names and display the first item in the list as the "principal designer" while the remaining names are identified as "contributors".
See eg https://fonts.google.com/specimen/Rubik
""",
proposal = 'https://github.com/googlefonts/fontbakery/issues/2479'
)
def com_google_fonts_check_metadata_designer_values(family_metadata):
"""Multiple values in font designer field in
METADATA.pb must be separated by commas."""
if '/' in family_metadata.designer:
yield FAIL,\
Message("slash",
f"Font designer field contains a forward slash"
f" '{family_metadata.designer}'."
f" Please use commas to separate multiple names instead.")
else:
yield PASS, "Looks good."
@check(
id = 'com.google.fonts/check/metadata/broken_links',
conditions = ['family_metadata']
)
def com_google_fonts_check_metadata_broken_links(family_metadata):
"""Does METADATA.pb copyright field contain broken links?"""
import requests
broken_links = []
unique_links = []
for font_metadata in family_metadata.fonts:
copyright = font_metadata.copyright
if "mailto:" in copyright:
# avoid reporting more then once
if copyright in unique_links:
continue
unique_links.append(copyright)
yield INFO,\
Message("email",
f"Found an email address: {copyright}")
continue
if "http" in copyright:
link = "http" + copyright.split("http")[1]
for endchar in [' ', ')']:
if endchar in link:
link = link.split(endchar)[0]
# avoid requesting the same URL more then once
if link in unique_links:
continue
unique_links.append(link)
try:
response = requests.head(link, allow_redirects=True, timeout=10)
code = response.status_code
# Status 429: "Too Many Requests" is acceptable
# because it means the website is probably ok and
# we're just perhaps being too agressive in probing the server!
if code not in [requests.codes.ok,
requests.codes.too_many_requests]:
broken_links.append(("{} (status code: {})").format(link, code))
except requests.exceptions.Timeout:
yield WARN,\
Message("timeout",
f"Timed out while attempting to access: '{link}'."
f" Please verify if that's a broken link.")
except requests.exceptions.RequestException:
broken_links.append(link)
if len(broken_links) > 0:
broken_links_list = '\n\t'.join(broken_links)
yield FAIL,\
Message("broken-links",
f"The following links are broken"
f" in the METADATA.pb file:\n\t"
f"{broken_links_list}")
else:
yield PASS, "All links in the METADATA.pb file look good!"
@check(
id = 'com.google.fonts/check/metadata/undeclared_fonts',
conditions = ['family_metadata'],
rationale = """
The set of font binaries available, except the ones on a "static" subdir, must match exactly those declared on the METADATA.pb file.
Also, to avoid confusion, we expect that font files (other than statics) are not placed on subdirectories.
""",
proposal = 'https://github.com/googlefonts/fontbakery/issues/2575'
)
def com_google_fonts_check_metadata_undeclared_fonts(family_metadata, family_directory):
"""Ensure METADATA.pb lists all font binaries."""
pb_binaries = []
for font_metadata in family_metadata.fonts:
pb_binaries.append(font_metadata.filename)
passed = True
binaries = []
for entry in os.listdir(family_directory):
if entry != "static" and os.path.isdir(os.path.join(family_directory, entry)):
for filename in os.listdir(os.path.join(family_directory, entry)):
if filename[-4:] in [".ttf", ".otf"]:
path = os.path.join(family_directory, entry, filename)
passed = False
yield WARN,\
Message("font-on-subdir",
f'The file "{path}" is a font binary'
f' in a subdirectory.\n'
f'Please keep all font files (except VF statics) directly'
f' on the root directory side-by-side'
f' with its corresponding METADATA.pb file.')
else:
# Note: This does not include any font binaries placed in a "static" subdir!
if entry[-4:] in [".ttf", ".otf"]:
binaries.append(entry)
for filename in sorted(set(pb_binaries) - set(binaries)):
passed = False
yield FAIL,\
Message("file-missing",
f'The file "{filename}" declared on METADATA.pb'
f' is not available in this directory.')
for filename in sorted(set(binaries) - set(pb_binaries)):
passed = False
yield FAIL,\
Message("file-not-declared",
f'The file "{filename}" is not declared on METADATA.pb')
if passed:
yield PASS, "OK"
@check(
id = 'com.google.fonts/check/metadata/category',
conditions = ['family_metadata'],
rationale = """
There are only five acceptable values for the category field in a METADATA.pb file:
- MONOSPACE
- SANS_SERIF
- SERIF
- DISPLAY
- HANDWRITING
This check is meant to avoid typos in this field.
""",
proposal = 'https://github.com/googlefonts/fontbakery/issues/2972'
)
def com_google_fonts_check_metadata_category(family_metadata):
"""Ensure METADATA.pb category field is valid."""
if family_metadata.category not in ["MONOSPACE",
"SANS_SERIF",
"SERIF",
"DISPLAY",
"HANDWRITING"]:
yield FAIL,\
Message('bad-value',
f'The field category has "{family_metadata.category}"'
f' which is not valid.')
else:
yield PASS, "OK!"
@disable # TODO: re-enable after addressing issue #1998
@check(
id = 'com.google.fonts/check/family/equal_numbers_of_glyphs',
conditions = ['are_ttf',
'stylenames_are_canonical'],
proposal = 'legacy:check/011'
)
def com_google_fonts_check_family_equal_numbers_of_glyphs(ttFonts):
"""Fonts have equal numbers of glyphs?"""
from .googlefonts_conditions import canonical_stylename
# ttFonts is an iterator, so here we make a list from it
# because we'll have to iterate twice in this check implementation:
the_ttFonts = list(ttFonts)
failed = False
max_stylename = None
max_count = 0
max_glyphs = None
for ttFont in the_ttFonts:
fontname = ttFont.reader.file.name
stylename = canonical_stylename(fontname)
this_count = len(ttFont['glyf'].glyphs)
if this_count > max_count:
max_count = this_count
max_stylename = stylename
max_glyphs = set(ttFont['glyf'].glyphs)
for ttFont in the_ttFonts:
fontname = ttFont.reader.file.name
stylename = canonical_stylename(fontname)
these_glyphs = set(ttFont['glyf'].glyphs)
this_count = len(these_glyphs)
if this_count != max_count:
failed = True
all_glyphs = max_glyphs.union(these_glyphs)
common_glyphs = max_glyphs.intersection(these_glyphs)
diff = all_glyphs - common_glyphs
diff_count = len(diff)
if diff_count < 10:
diff = ", ".join(diff)
else:
diff = ", ".join(list(diff)[:10]) + " (and more)"
yield FAIL,\
Message("glyph-count-diverges",
f"{stylename} has {this_count} glyphs while"
f" {max_stylename} has {max_count} glyphs."
f" There are {diff_count} different glyphs"
f" among them: {sorted(diff)}")
if not failed:
yield PASS, ("All font files in this family have"
" an equal total ammount of glyphs.")
@disable # TODO: re-enable after addressing issue #1998
@check(
id = 'com.google.fonts/check/family/equal_glyph_names',
| |
def __getattr__
# end class Pkg_NS
### Scope creation methods
@classmethod
def load (cls, app_type, db_url, user = None) :
"""Load a scope for `app_type` from `db_url`.
Depending on `app_type.EMS`, `load` might load all instances from
`db_url` into the application or it might just connect to the
database and load instances on demand in answer to queries.
"""
db_url = app_type.Url (db_url)
with cls._init_context (app_type, db_url, user) as self :
app_type = self.app_type
self.ems = ems = app_type.EMS.connect (self, db_url)
with self._init_root_context () :
self._register_root (ems.load_root ())
return self
# end def load
@classmethod
def new (cls, app_type, db_url, user = None, root_spec = None) :
"""Create a scope for `app_type` for a new database `db_url`.
If `app_type` has a :attr:`~_MOM.App_Type.App_Type.Root_Type`,
`new` requires a `root_spec` passed in.
`root_spec` must be one of:
* a proper epk-tuple for :attr:`~_MOM.App_Type.App_Type.Root_Type`
* a callable that takes the scope as its single parameter and
returns the root object.
"""
db_url = app_type.Url (db_url)
with cls._init_context (app_type, db_url, user, root_spec) as self :
app_type = self.app_type
self.guid = self._new_guid ()
self.ems = ems = app_type.EMS.new (self, db_url)
with self._init_root_context (root_spec) :
self._setup_root (app_type, root_spec)
ems.register_scope ()
return self
# end def new
@classmethod
@TFL.Contextmanager
def _init_context (cls, app_type, db_url, user, root_spec = None) :
self = cls.__new__ (cls)
self.app_type = app_type
self.db_url = db_url
self.user = user
self.bname = ""
self.id = self._new_id ()
self.root = None
self.historian = MOM.SCM.Tracker (self)
self.db_errors = []
self._attr_errors = []
self._etm = {}
self._roots = {}
self._setup_pkg_ns (app_type)
### copy `*_callback` from cls to self
self.after_commit_callback = self.after_commit_callback.copy ()
self.init_callback = self.init_callback.copy ()
self.kill_callback = self.kill_callback.copy ()
old_active = Scope.active
try :
Scope.active = self
yield self
self._run_init_callbacks ()
self.start_change_recorder ()
Scope.Table [self.id] = self
except :
Scope.active = old_active
raise
# end def _init_context
@TFL.Contextmanager
def _init_root_context (self, root_spec = None) :
yield
# end def _init_root_context
def __init__ (self) :
raise TypeError \
( "Use {name}.new or {name}.load to create new scopes".format
(name = self.__class__.__name__)
)
# end def __init__
### Scope methods
def add (self, entity, pid = None) :
"""Adds `entity` to scope `self`."""
if entity._home_scope is None :
entity.home_scope = self
with self.ems.save_point () :
self.ems.add (entity, pid = pid)
if not entity.init_finished :
entity._finish__init__ ()
self.record_change (MOM.SCM.Change.Create, entity)
# end def add
def add_from_pickle_cargo (self, type_name, cargo, pid) :
"""Add an entity defined by (`type_name`, `pid`, `cargo`)."""
Type = self.entity_type (type_name)
if Type :
try :
result = Type.from_attr_pickle_cargo (self, cargo)
except Exception as exc :
self.db_errors.append ((type_name, pid, cargo))
if __debug__ :
traceback.print_exc ()
print (repr (exc), file = sys.stderr)
print \
( " add_from_pickle_cargo: couldn't restore"
" %s %s %s (app-type %s)"
% (type_name, pid, cargo, self.app_type)
, file = sys.stderr
)
else :
self.ems.add (result, pid = pid)
if pid == self.root_pid :
self._register_root (result)
if not result.init_finished :
result._finish__init__ ()
return result
# end def add_from_pickle_cargo
@TFL.Meta.Class_and_Instance_Method
def add_after_commit_callback (soc, * callbacks) :
"""Add all `callbacks` to `after_commit_callback`. These
callbacks are executed after each `.commit` of a scope
(the scope and the MOM.SCM.Summary instance of the just commited
changes are passed as arguments to each callback).
"""
soc.after_commit_callback.extend (callbacks)
# end def add_after_commit_callback
@TFL.Meta.Class_and_Instance_Method
def add_init_callback (soc, * callbacks) :
"""Add all `callbacks` to `init_callback`. These
callbacks are executed whenever a scope is
created (the new scope is passed as the single argument to each
callback).
"""
soc.init_callback.extend (callbacks)
# end def add_init_callback
@TFL.Meta.Class_and_Instance_Method
def add_kill_callback (soc, * callbacks) :
"""Add all `callbacks` to `kill_callback` of the scope class
or instance. These callbacks` are executed whenever the
scope is destroyed (the scope to be destroyed is passed as
the single argument to each callback).
"""
soc.kill_callback.extend (callbacks)
# end def add_kill_callback
@TFL.Contextmanager
def as_active (self) :
"""Provide context with `self` as active scope."""
with Scope.LET (active = self) :
yield
# end def as_active
def canonical_type_name (self, type_name) :
return self._deprecated_type_names.get (type_name, type_name)
# end def canonical_type_name
def commit (self) :
"""Commit all outstanding changes to the database."""
ems = self.ems
ucc = ems.uncommitted_changes
with ems.commit_context () :
if ucc :
errs = self.r_incorrect (eiter = ucc.entities_transitive (ems))
if errs :
exc = MOM.Error.Invariants (errs.errors)
raise exc
ems.commit ()
for cb in self.after_commit_callback :
cb (self, ucc)
# end def commit
def copy (self, app_type, db_url) :
"""Copy all entities and change-history of `self` into a new
scope for `app_type` and `db_url`.
"""
assert self.app_type.parent is app_type.parent
db_url = app_type.Url (db_url)
assert ( db_url is None
or not db_url.path
or self.db_url.path != db_url.path
)
with self.as_active () :
result = self.__class__.new (app_type, db_url, user = self.user)
result.root_pid = self.root_pid
for e in sorted (self, key = TFL.Getter.pid) :
result.add_from_pickle_cargo (* e.as_pickle_cargo ())
for c in self.query_changes ().order_by (TFL.Sorted_By ("cid")) :
result.ems.register_change (c)
result.ems.compact ()
return result
# end def copy
def count_change (self) :
self.historian.count_change ()
# end def count_change
def close_connections (self) :
self.ems.close_connections ()
# end def close_connections
def destroy (self) :
"""Close connection to database and destroy all cached instances."""
self.ems.close ()
if self.id in Scope.Table :
del Scope.Table [self.id]
self.stop_change_recorder ()
self.app_type.run_kill_callbacks (self)
for c in self.kill_callback :
c (self)
del self.kill_callback
self.root = None
for d in (self._roots, self._pkg_ns) :
d.clear ()
### XXX break circular links (references to this scope from
### importers... )
if Scope.active == self :
Scope.active = None
self.__dict__.clear ()
# end def destroy
@classmethod
def destroy_all (cls) :
"""Destroy all scopes."""
for i, s in sorted (pyk.iteritems (Scope.Table), reverse = True) :
try :
s.destroy ()
except Exception :
pass
# end def destroy_all
def entity_iter (self) :
"""Yields all objects and links alive in `self` in unspecified
order.
"""
return iter (self.ems)
# end def entity_iter
def entity_iter_gauge (self, gauge = Gauge_Logger (), sort_key = None, label = None) :
"""Yields all entities alive in `self` in the
order specified by `sort_key`.
"""
gauge.reset \
( g_delta = 100
, g_range = self.ems.count (self.MOM.Id_Entity, strict = False)
, label = label
)
entities = iter (self.ems)
if sort_key :
entities = sorted (entities, key = sort_key)
i = 1
for e in entities :
yield e
if i == 100 :
gauge.inc (100)
i = 0
i += 1
# end def entity_iter_gauge
def entity_type (self, entity) :
"""Return scope specific entity type for `entity` (-name)."""
if isinstance (entity, pyk.string_types) :
name = entity
else :
name = entity.type_name
return self.app_type.entity_type (self.canonical_type_name (name))
# end def entity_type
@TFL.Contextmanager
def example_etm (self, etm) :
"""Return a E_Type_Manager for the E_Type of `etm` in an example scope."""
with self._Example.context (self) as x_scope :
x_etm = x_scope [etm.type_name]
yield x_etm
# end def example_etm
@TFL.Meta.Lazy_Method_RLV
def g_incorrect (self, gauge = Gauge_Logger ()) :
"""Returns all objects which are globally incorrect (i.e., violating
the object's `system` predicates).
"""
with self.as_active () :
return self._check_inv (gauge, "system")
# end def g_incorrect
def has_changed (self) :
"""Indicates whether something saveworthy has changed, i.e., there if
there are outstanding changes to be commited.
"""
return bool (self.ems.uncommitted_changes)
# end def has_changed
@TFL.Meta.Lazy_Method_RLV
def i_incorrect (self, gauge = Gauge_Logger ()) :
"""Returns all objects which are object-wise incorrect (i.e., violating
the object's `object` predicates).
"""
with self.as_active () :
return self._check_inv (gauge, "object")
# end def i_incorrect
@TFL.Contextmanager
def nested_change_recorder (self, Change, * args, ** kw) :
"""Return context with `Change (* args, ** kw)` acting as nested
change recorder.
"""
with self.historian.nested_recorder (Change, * args, ** kw) as c :
yield c
if c :
c.user = self.user
self.ems.register_change (c)
c.do_callbacks (self)
# end def nested_change_recorder
def pid_query (self, pid, allow_zombie = False) :
"""Returns entity with permanent id `pid`, if any."""
result = self.ems.pid_query (pid)
if ( not allow_zombie
and isinstance (result, MOM._Id_Entity_Destroyed_Mixin_)
) :
raise LookupError (pid)
return result
# end | |
#!/usr/bin/python
import subprocess
import traceback
import time
import os
import sys
import socket
import random
import string
import shutil
import requests
import json
import getpass
import urllib3
import platform
import pwd
import glob
try:
import distro
except:
print ('Unable to find `distro` package hence using `platform`')
distro = platform
class Distribution:
Ubuntu = "ubuntu"
Debian = "debian"
CENTOS = "centos"
RHEL = "red"
class KongSetup(object):
def __init__(self):
self.host_name = ''
self.ip = ''
self.cert_folder = './certs'
self.template_folder = './templates'
self.output_folder = './output'
self.system_folder = './system'
self.tmp_folder = '/tmp'
self.log_error = 'gluu-gateway-setup_error.log'
self.log = 'gluu-gateway-setup.log'
self.cmd_mkdir = '/bin/mkdir'
self.openssl_command = '/usr/bin/openssl'
self.cmd_chown = '/bin/chown'
self.cmd_chmod = '/bin/chmod'
self.cmd_ln = '/bin/ln'
self.host_name = '/bin/hostname'
self.cmd_touch = '/bin/touch'
self.cmd_mv = '/bin/mv'
self.cmd_cp = '/bin/cp'
self.cmd_rm = '/bin/rm'
self.cmd_node = '/usr/bin/node'
self.cmd_update_rs_d = '/usr/sbin/update-rc.d'
self.cmd_sh = '/bin/sh'
self.cmd_bash = '/bin/bash'
self.cmd_update_alternatives = 'update-alternatives'
self.cmd_chkconfig = 'chkconfig'
self.cmd_alternatives = 'alternatives'
self.cmd_echo = '/bin/echo'
self.cmd_service = 'service'
self.cmd_systemctl = os.popen('which systemctl').read().strip()
self.cmd_rpm = '/bin/rpm'
self.cmd_echo = '/bin/echo'
self.cmd_dpkg = '/usr/bin/dpkg'
self.cmd_kong = '/usr/local/bin/kong'
self.country_code = ''
self.state = ''
self.city = ''
self.org_name = ''
self.admin_email = ''
self.kong_custom_plugins = 'gluu-oauth-auth,gluu-uma-auth,gluu-uma-pep,gluu-oauth-pep,gluu-metrics,gluu-openid-connect,gluu-opa-pep'
self.kong_ssl_cert = ''
self.kong_ssl_key = ''
self.pg_pwd = '<PASSWORD>'
self.kong_admin_listen_ssl_port = '8445'
self.kong_admin_listen_port = '8001'
self.kong_lua_ssl_trusted_certificate = ''
self.kong_lua_ssl_verify_depth = 3
self.gluu_prometheus_server_ip = '172.16.58.3'
self.gluu_prometheus_server_host = 'license.gluu.org'
self.gluu_customer_registration_endpoint = 'https://%s:%s' % (self.gluu_prometheus_server_host, '4040/metrics/registration')
self.dist_kong_config_folder = '/etc/kong'
self.dist_kong_config_file = '%s/kong.conf' % self.dist_kong_config_folder
self.dist_lua_folder = '/usr/local/share/lua/5.1'
self.dist_gluu_lua_folder = '%s/gluu' % self.dist_lua_folder
self.dist_kong_plugins_folder = '%s/kong/plugins' % self.dist_lua_folder
self.opt_folder = '/opt'
self.dist_gluu_gateway_folder = '%s/gluu-gateway' % self.opt_folder
self.dist_gluu_gateway_ui_folder = '%s/gluu-gateway-ui' % self.opt_folder
self.dist_gluu_gateway_setup_folder = '%s/gluu-gateway-setup' % self.opt_folder
self.dist_gluu_gateway_ui_assest_folder = '%s/assets' % self.dist_gluu_gateway_ui_folder
self.dist_gluu_gateway_ui_config_folder = '%s/config' % self.dist_gluu_gateway_ui_folder
self.dist_gluu_gateway_ui_config_file = '%s/config/local.js' % self.dist_gluu_gateway_ui_folder
self.gg_plugins_folder = '%s/lib/kong/plugins' % self.dist_gluu_gateway_folder
self.disable_plugin_list = ['ldap-auth', 'key-auth', 'basic-auth', 'jwt', 'oauth2', 'hmac-auth']
self.gg_comman_folder = '%s/lib/gluu' % self.dist_gluu_gateway_folder
self.dist_oxd_server_folder = '%s/oxd-server' % self.opt_folder
self.dist_oxd_server_config_folder = '%s/conf' % self.dist_oxd_server_folder
self.dist_oxd_server_config_file = '%s/oxd-server.yml' % self.dist_oxd_server_config_folder
self.gg_service = 'gluu-gateway'
self.oxd_server_service = 'oxd-server' # change this when oxd-server-4.0 is released
# oxd kong Property values
self.gluu_gateway_ui_port = '1338'
self.gluu_gateway_ui_policy_type = 'uma_rpt_policy'
self.gluu_gateway_ui_oxd_id = ''
self.gluu_gateway_ui_op_host = ''
self.gluu_gateway_ui_client_id = ''
self.gluu_gateway_ui_client_secret = ''
self.gluu_gateway_ui_oxd_web = ''
self.gluu_gateway_ui_kong_admin_web_url = 'http://localhost:%s' % self.kong_admin_listen_port
self.gluu_gateway_ui_oxd_version = '4.2.2'
self.gg_version = '4.2.2'
self.postgres_version = '10.x'
# oxd licence configuration
self.generate_client = True
self.gluu_gateway_ui_redirect_uri = 'localhost'
# JRE setup properties
self.jre_version = '162'
self.jre_destination_path = '/opt/jdk1.8.0_%s' % self.jre_version
self.gg_dist_folder = '%s/dist' % self.dist_gluu_gateway_folder
self.gg_dist_app_folder = '%s/app' % self.gg_dist_folder
self.jre_home = '/opt/jre'
self.jre_sh_file_name = 'jre-gluu.sh'
self.is_prompt = True
self.license = False
self.init_parameters_from_json_argument()
# OS types properties
self.os_types = ['centos', 'red', 'ubuntu']
self.os_type = None
self.os_version = None
self.os_initdaemon = None
# log-rotate kong config file
self.dist_kong_log_rotate_config_path = '/etc/logrotate.d'
self.kong_log_rotate_config_file = 'kong_logrotate'
# PostgreSQL config file path
self.dist_pg_hba_config_path = '/var/lib/pgsql/10/data'
self.dist_pg_hba_config_file = '%s/pg_hba.conf' % self.dist_pg_hba_config_path
# dependency zips
self.gg_node_modules_folder = "%s/node_modules" % self.dist_gluu_gateway_ui_folder
self.gg_bower_modules_folder = "%s/bower_components" % self.dist_gluu_gateway_ui_assest_folder
self.gg_node_modules_archive = 'gg_node_modules.tar.gz'
self.gg_bower_modules_archive = 'gg_bower_components.tar.gz'
# third party lua library
self.oxd_web_lua_file_path = '%s/third-party/oxd-web-lua/oxdweb.lua' % self.dist_gluu_gateway_folder
self.json_logic_file_path = '%s/third-party/json-logic-lua/logic.lua' % self.dist_gluu_gateway_folder
self.lrucache_files_path = '%s/third-party/lua-resty-lrucache/lib/resty' % self.dist_gluu_gateway_folder
self.lsession_files_path = '%s/third-party/lua-resty-session/lib/resty' % self.dist_gluu_gateway_folder
self.jwt_files_path = '%s/third-party/lua-resty-jwt/lib/resty/.' % self.dist_gluu_gateway_folder
self.hmac_files_path = '%s/third-party/lua-resty-hmac/lib/resty/.' % self.dist_gluu_gateway_folder
self.prometheus_file_path = '%s/third-party/nginx-lua-prometheus/prometheus.lua' % self.dist_gluu_gateway_folder
# kong package file names
self.ubuntu18_kong_file = "kong-2.2.1.bionic.amd64.deb"
self.centos7_kong_file = "kong-2.2.1.el7.amd64.rpm"
self.rhel7_kong_file = "kong-2.2.1.rhel7.amd64.rpm"
# db names
self.dist_konga_db_file = "%s/templates/konga.sql" % self.dist_gluu_gateway_setup_folder
self.dist_kong_db_file = "%s/templates/kong.sql" % self.dist_gluu_gateway_setup_folder
def init_parameters_from_json_argument(self):
if len(sys.argv) > 1:
self.is_prompt = False
data = json.loads(sys.argv[1])
self.license = data['license']
self.ip = data['ip']
self.host_name = data['host_name']
self.country_code = data['country_code']
self.state = data['state']
self.city = data['city']
self.org_name = data['org_name']
self.admin_email = data['admin_email']
self.pg_pwd = data['<PASSWORD>']
self.gluu_gateway_ui_redirect_uri = data['gluu_gateway_ui_redirect_uri']
self.gluu_gateway_ui_op_host = 'https://' + data['gluu_gateway_ui_op_host']
self.gluu_gateway_ui_oxd_web = data['gluu_gateway_ui_oxd_web']
self.generate_client = data['generate_client']
if not self.generate_client:
self.gluu_gateway_ui_oxd_id = data['gluu_gateway_ui_oxd_id']
self.gluu_gateway_ui_client_id = data['gluu_gateway_ui_client_id']
self.gluu_gateway_ui_client_secret = data['gluu_gateway_ui_client_secret']
def configure_postgres(self):
print ("Configuring postgres...")
self.log_it('Configuring postgres...')
print ('Configuring postgres...')
if self.os_type == Distribution.Ubuntu:
self.run(['/etc/init.d/postgresql', 'start'])
os.system('sudo -iu postgres /bin/bash -c "psql -c \\\"ALTER USER postgres WITH PASSWORD \'%s\';\\\""' % self.pg_pwd)
os.system('sudo -iu postgres /bin/bash -c "psql -U postgres -tc \\\"SELECT 1 FROM pg_database WHERE datname = \'kong\'\\\" | grep -q 1 || psql -U postgres -c \\\"CREATE DATABASE kong;\\\""')
os.system('sudo -iu postgres /bin/bash -c "psql -U postgres -tc \\\"SELECT 1 FROM pg_database WHERE datname = \'konga\'\\\" | grep -q 1 || psql -U postgres -c \\\"CREATE DATABASE konga;\\\""')
os.system('sudo -iu postgres /bin/bash -c "psql konga < %s"' % self.dist_konga_db_file)
os.system('sudo -iu postgres /bin/bash -c "psql kong < %s"' % self.dist_kong_db_file)
if self.os_type in [Distribution.CENTOS, Distribution.RHEL] and self.os_version == '7':
# Initialize PostgreSQL first time
self.run([self.cmd_ln, '/usr/lib/systemd/system/postgresql-10.service', '/usr/lib/systemd/system/postgresql.service'])
self.run(['/usr/pgsql-10/bin/postgresql-10-setup', 'initdb'])
self.render_template_in_out(self.dist_pg_hba_config_file, self.template_folder, self.dist_pg_hba_config_path)
self.run([self.cmd_systemctl, 'enable', 'postgresql'])
self.run([self.cmd_systemctl, 'start', 'postgresql'])
os.system('sudo -iu postgres /bin/bash -c "psql -c \\\"ALTER USER postgres WITH PASSWORD \'%s\';\\\""' % self.pg_pwd)
os.system('sudo -iu postgres /bin/bash -c "psql -U postgres -tc \\\"SELECT 1 FROM pg_database WHERE datname = \'kong\'\\\" | grep -q 1 || psql -U postgres -c \\\"CREATE DATABASE kong;\\\""')
os.system('sudo -iu postgres /bin/bash -c "psql -U postgres -tc \\\"SELECT 1 FROM pg_database WHERE datname = \'konga\'\\\" | grep -q 1 || psql -U postgres -c \\\"CREATE DATABASE konga;\\\""')
os.system('sudo -iu postgres /bin/bash -c "psql konga < %s"' % self.dist_konga_db_file)
os.system('sudo -iu postgres /bin/bash -c "psql kong < %s"' % self.dist_kong_db_file)
def enable_service_at_start(self, serviceName, startSequence=None, stopSequence=None, action='enable'):
# Enable service autoload on Gluu-Server startup
if self.os_type in [Distribution.CENTOS, Distribution.RHEL]:
if self.os_initdaemon == 'systemd':
self.run([self.cmd_systemctl, action, serviceName])
else:
self.run(["/sbin/chkconfig", serviceName, "on" if action=='enable' else 'off'])
elif self.os_type+self.os_version in ('ubuntu18','debian9'):
self.run([self.cmd_systemctl, action, serviceName])
elif self.os_type in [Distribution.Ubuntu, Distribution.Debian]:
cmd_list = ["/usr/sbin/update-rc.d", serviceName, 'defaults']
if startSequence and stopSequence:
cmd_list.append(str(startSequence))
cmd_list.append(str(stopSequence))
self.run(cmd_list)
def detect_host_name(self):
detected_host_name = None
try:
detected_host_name = socket.gethostbyaddr(socket.gethostname())[0]
except:
try:
detected_host_name = os.popen("/bin/hostname").read().strip()
except:
self.log_it("No detected hostname", True)
self.log_it(traceback.format_exc(), True)
return detected_host_name
def gen_cert(self, service_name, password, user='root', cn=None):
self.log_it('Generating Certificate for %s' % service_name)
key_with_password = '%s/%s.key.orig' % (self.cert_folder, service_name)
key = '%s/%s.key' % (self.cert_folder, service_name)
csr = '%s/%s.csr' % (self.cert_folder, service_name)
public_certificate = '%s/%s.crt' % (self.cert_folder, service_name)
self.run([self.openssl_command,
'genrsa',
'-des3',
'-out',
key_with_password,
'-passout',
'pass:%s' % password,
'2048'
])
self.run([self.openssl_command,
'rsa',
'-in',
key_with_password,
'-passin',
'pass:%s' % password,
'-out',
key
])
cert_cn = cn
if cert_cn == None:
cert_cn = self.host_name
self.run([self.openssl_command,
'req',
'-new',
'-key',
key,
'-out',
csr,
'-subj',
'/C=%s/ST=%s/L=%s/O=%s/CN=%s/emailAddress=%s' % (
self.country_code, self.state, self.city, self.org_name, cert_cn, self.admin_email)
])
self.run([self.openssl_command,
'x509',
'-req',
'-days',
'365',
'-in',
csr,
'-signkey',
key,
'-out',
public_certificate
])
self.run([self.cmd_chown, '%s:%s' % (user, user), key_with_password])
self.run([self.cmd_chmod, '700', key_with_password])
self.run([self.cmd_chown, '%s:%s' % (user, user), key])
self.run([self.cmd_chmod, '700', key])
def get_pw(self, size=12, chars=string.ascii_uppercase + string.digits + string.ascii_lowercase):
return ''.join(random.choice(chars) for _ in range(size))
def gen_kong_ssl_certificate(self):
self.gen_cert('gluu-gateway', self.get_pw())
self.kong_ssl_cert = self.dist_gluu_gateway_setup_folder + '/certs/gluu-gateway.crt'
self.kong_ssl_key = self.dist_gluu_gateway_setup_folder + '/certs/gluu-gateway.key'
def get_ip(self):
test_ip = None
detected_ip = None
try:
test_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
detected_ip = [(test_socket.connect(('8.8.8.8', 80)),
test_socket.getsockname()[0],
test_socket.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]
except:
self.log_it("No detected IP address", True)
self.log_it(traceback.format_exc(), True)
if detected_ip:
test_ip = self.get_prompt("Enter IP Address", detected_ip)
else:
test_ip = self.get_prompt("Enter IP Address")
if not self.is_ip(test_ip):
test_ip = None
print ('ERROR: The IP Address is invalid. Try again\n')
return test_ip
def get_prompt(self, prompt, default_value=None):
try:
if default_value:
user_input = input("%s [%s] : " % (prompt, default_value)).strip()
if user_input == '':
return default_value
else:
return user_input
else:
_input = False
while not _input:
user_input = input("%s : " % prompt).strip()
if user_input != '':
_input = True
return user_input
except KeyboardInterrupt:
sys.exit()
except:
return None
def install_plugins(self):
print ("Installing kong plugins...")
self.log_it('Installing kong plugins...')
# json-logic-lua
self.run([self.cmd_mkdir, '-p', '%s/rucciva' % self.dist_lua_folder])
self.run([self.cmd_cp, self.json_logic_file_path, '%s/rucciva/json_logic.lua' % self.dist_lua_folder])
# lua-resty-lrucache
self.run([self.cmd_cp, '-R', '%s/lrucache' % self.lrucache_files_path, '%s/resty' % self.dist_lua_folder])
self.run([self.cmd_cp, '%s/lrucache.lua' % self.lrucache_files_path, '%s/resty' % self.dist_lua_folder])
# lua-resty-session
self.run([self.cmd_cp, '-R', '%s/session' % self.lsession_files_path, '%s/resty' % self.dist_lua_folder])
self.run([self.cmd_cp, '%s/session.lua' % self.lsession_files_path, '%s/resty' % self.dist_lua_folder])
# lua-resty-jwt
self.run([self.cmd_cp, '-a', self.jwt_files_path, '%s/resty' % self.dist_lua_folder])
# lua-resty-hmac
self.run([self.cmd_cp, '-a', self.hmac_files_path, '%s/resty' % self.dist_lua_folder])
# Prometheus
self.run([self.cmd_cp, self.prometheus_file_path, self.dist_lua_folder])
# gluu plugins
for plugin in self.kong_custom_plugins.split(","):
self.run([self.cmd_cp, '-R', "%s/%s" % (self.gg_plugins_folder, plugin), self.dist_kong_plugins_folder])
# gluu plugins common file
self.run([self.cmd_cp, '-R', '%s' % self.gg_comman_folder, self.dist_lua_folder])
# oxd-web-lua
self.run([self.cmd_cp, self.oxd_web_lua_file_path, self.dist_gluu_lua_folder])
# Disable kong stock auth plugins
for plugin in self.disable_plugin_list:
self.run([self.cmd_cp, '-R', '%s/disable-plugin-handler.lua' % self.gg_comman_folder, "%s/%s/handler.lua" % (self.dist_kong_plugins_folder, plugin)])
if plugin == "ldap-auth":
| |
<filename>csb/test/cases/statistics/samplers/__init__.py
import numpy as np
import csb.test as test
import csb.numeric
from csb.statistics.pdf import Normal, BaseDensity
from csb.numeric.integrators import AbstractGradient, VelocityVerlet, LeapFrog, FastLeapFrog
from csb.numeric import InvertibleMatrix
from csb.statistics.samplers import State
from csb.statistics.samplers.mc import Trajectory
from csb.statistics.samplers.mc.multichain import MDRENSSwapParameterInfo, MDRENS
from csb.statistics.samplers.mc.multichain import ThermostattedMDRENSSwapParameterInfo
from csb.statistics.samplers.mc.multichain import RESwapParameterInfo, AlternatingAdjacentSwapScheme
from csb.statistics.samplers.mc.multichain import ReplicaExchangeMC, ThermostattedMDRENS
from csb.statistics.samplers.mc.multichain import HMCStepRENS, HMCStepRENSSwapParameterInfo
from csb.statistics.samplers.mc.multichain import AbstractSwapCommunicator, AbstractExchangeMC
from csb.statistics.samplers.mc.multichain import AbstractSwapParameterInfo, ReplicaHistory
from csb.statistics.samplers.mc.singlechain import HMCSampler, RWMCSampler, AbstractNCMCSampler
from csb.statistics.samplers.mc.singlechain import AbstractSingleChainMC, SimpleProposalCommunicator
from csb.statistics.samplers.mc.propagators import RWMCPropagator, HMCPropagator, MDPropagator
from csb.statistics.samplers.mc.propagators import AbstractNCMCPropagator, AbstractPropagator
from csb.statistics.samplers.mc.neqsteppropagator import ReducedHamiltonian, HamiltonianSysInfo
from csb.statistics.samplers.mc.neqsteppropagator import PlainMDPropagation, PlainMDPropagationParam
from csb.statistics.samplers.mc.neqsteppropagator import AbstractMDPropagation, HMCPropagation
from csb.statistics.samplers.mc.neqsteppropagator import Protocol, Step, AbstractPerturbation
from csb.statistics.samplers.mc.neqsteppropagator import ReducedHamiltonianPerturbation
from csb.statistics.samplers.mc.neqsteppropagator import AbstractPropagation
from csb.statistics.samplers.mc.neqsteppropagator import NonequilibriumStepPropagator
from csb.statistics.samplers.mc.neqsteppropagator import NonequilibriumTrajectory
from csb.statistics.samplers.mc.neqsteppropagator import HMCPropagationParam
class SamplePDF(Normal):
def log_prob(self, x):
return sum(map(super(SamplePDF, self).log_prob, x))
def grad(self, x, t):
return x / (self.sigma ** 2)
class MultimodalPDF(BaseDensity):
def log_prob(self, x):
return sum(-2.5 * np.cos(2.5 * x) - 0.04 * x ** 2)
def grad(self, x, t):
return -6.25 * np.sin(2.5 * x) + 0.08 * x
class Multimodal2DPDF(BaseDensity):
k = 0.5
def _E1(self, x):
return 2.5 * np.cos(2.5 * x[0]) + 0.04 * x[0] ** 2
def _E2(self, x):
return self.k * x[1] ** 2
def log_prob(self, x):
return -self._E1(x) - self._E2(x)
def grad(self, x, t):
return np.array([(-6.25 * np.sin(2.5 * x[0]) + 0.08 * x[0]) * self._E2(x),
self._E1(x) * self.k * x[1]])
@test.functional
class TestMCPropagators(test.Case):
def setUp(self):
super(TestMCPropagators, self).setUp()
self.pdf = SamplePDF()
self.gradient = self._createGradient(1.)
self.timestep = 1.2
self.stepsize = 1.2
self.nsteps = 15
self.nits = 10000
self.state = State(np.random.normal(size=1))
def _createGradient(self, sigma):
class Grad(AbstractGradient):
def evaluate(self, q, t):
return q / (sigma ** 2)
return Grad()
def checkResult(self, trajectory):
dim = len(trajectory[0].position)
for i in range(dim):
states = [state.position[i] for state in trajectory]
self.assertAlmostEqual(np.array(states).mean(), 0., delta=0.15)
self.assertAlmostEqual(np.array(states).var(), 1., delta=0.15)
def testRWMCPropagator(self):
gen = RWMCPropagator(self.pdf, self.stepsize)
self.checkResult(gen.generate(self.state, self.nits))
def testHMCPropagator(self):
gen = HMCPropagator(self.pdf, self.gradient, self.timestep, self.nsteps)
self.checkResult(gen.generate(self.state, self.nits))
def testHMCPropagatorMM(self):
mm = InvertibleMatrix(np.array([[1., 0.], [0., 2.]]))
init_state = State(np.random.normal(size=2))
gen = HMCPropagator(self.pdf, self.gradient, self.timestep * 1.5, self.nsteps, mass_matrix=mm)
self.checkResult(gen.generate(init_state, self.nits))
@test.skip("Takes quite a long time to run.")
def testNCMCPropagator(self):
Nhalf = 5
dt = 0.1
md_tl = 5
ks = np.linspace(1.0, 0.2, Nhalf).tolist()
sigmas = [1/np.sqrt(k) for k in ks]
sigmas += sigmas[::-1][1:]
N = len(sigmas)
pdfs = [SamplePDF(sigma=s) for s in sigmas]
hamiltonians = [ReducedHamiltonian(pdfs[i].log_prob, pdfs[i].grad) for i in range(N)]
sys_infos = [HamiltonianSysInfo(hamiltonians[i]) for i in range(N)]
steps = [Step(ReducedHamiltonianPerturbation(sys_infos[i], sys_infos[i+1],
evaluate_work=False),
PlainMDPropagation(sys_infos[i+1],
PlainMDPropagationParam(dt, md_tl, pdfs[i+1].grad),
evaluate_heat=False))
for i in range(N - 1)]
rv_steps = [Step(ReducedHamiltonianPerturbation(sys_infos[i], sys_infos[i+1],
evaluate_work=False),
PlainMDPropagation(sys_infos[i],
PlainMDPropagationParam(dt, md_tl, pdfs[i].grad),
evaluate_heat=False))
for i in range(N - 1)]
for s in rv_steps:
s.set_propagation_first()
protocol = Protocol(steps)
rv_protocol = Protocol(rv_steps)
class MDProbStepNCMCSampler(AbstractNCMCSampler):
def _calc_pacc(self, proposal_communicator):
return np.exp(-proposal_communicator.traj.deltaH)
class MDPropStepNCMCPropagator(AbstractNCMCPropagator):
def _init_sampler(self, init_state):
self._sampler = MDProbStepNCMCSampler(init_state, self.protocol,
self.reverse_protocol)
gen = MDPropStepNCMCPropagator(protocol, rv_protocol)
init_state = State(np.array([1.0]))
traj = gen.generate(init_state, self.nits, return_trajectory=True)
self.checkResult(traj)
@test.functional
class TestMultichain(test.Case):
def setUp(self):
super(TestMultichain, self).setUp()
self.samplers = None
def set1pParams(self):
init_state = State(np.random.uniform(low=-3.0, high=3.0, size=1))
self.temperatures = [0.4, 2.0]
self.samplers = [RWMCSampler(MultimodalPDF(), init_state, 0.5,
temperature=self.temperatures[0]),
RWMCSampler(MultimodalPDF(), init_state, 5.5,
temperature=self.temperatures[1])]
self.grad = self.samplers[0]._pdf.grad
self.nits = 10000
self.Ts = [lambda l: l * self.temperatures[i+1] + (1. - l) * self.temperatures[i]
for i in range(len(self.samplers) - 1)]
def set2pParams(self):
init_state = State(np.random.uniform(low=-3.0, high=3.0, size=2))
pdf = Multimodal2DPDF()
self.temperatures = [0.4, 1.0, 2.0]
self.samplers = [RWMCSampler(pdf, init_state, 0.2,
temperature=self.temperatures[0]),
RWMCSampler(pdf, init_state, .8,
temperature=self.temperatures[1]),
RWMCSampler(pdf, init_state, 2.,
temperature=self.temperatures[2])]
self.grad = self.samplers[0]._pdf.grad
self.nits = 20000
self.Ts = [lambda l: l * self.temperatures[i+1] + (1. - l) * self.temperatures[i]
for i in range(len(self.samplers) - 1)]
def _run(self, algorithm):
xmin1 = -2.5
xmax1 = 0.0
xmin2 = 0.0
xmax2 = 2.5
p_occ = 0.382
swapper = AlternatingAdjacentSwapScheme(algorithm)
n_occ1 = 0
n_occ2 = 0
for i in range(self.nits):
if i % 5 == 0:
swapper.swap_all()
else:
algorithm.sample()
x = self.samplers[0].state.position[0]
if x > xmin1 and x < xmax1:
n_occ1 += 1
if x > xmin2 and x < xmax2:
n_occ2 += 1
p_occ_sampled1 = float(n_occ1) / float(self.nits)
p_occ_sampled2 = float(n_occ2) / float(self.nits)
# Assert by comparison with real occupation probabilities and a tolerance of
# four standard deviations of a run with n=15000 samples and 100 iterations
self.assertAlmostEqual(p_occ_sampled1, p_occ, delta=4.0 * 0.035)
self.assertAlmostEqual(p_occ_sampled2, p_occ, delta=4.0 * 0.035)
@test.skip("Takes some time, rendered optional by a unit test.")
def testReplicaExchangeMC(self):
self.set1pParams()
params = [RESwapParameterInfo(self.samplers[0], self.samplers[1])]
algorithm = ReplicaExchangeMC(self.samplers, params)
self._run(algorithm)
def testMDRENS(self):
self.set1pParams()
params = [MDRENSSwapParameterInfo(self.samplers[0], self.samplers[1],
0.025, 15, self.grad)]
algorithm = MDRENS(self.samplers, params, integrator=VelocityVerlet)
self._run(algorithm)
def testThermostattedMDRens(self):
self.set1pParams()
params = [ThermostattedMDRENSSwapParameterInfo(self.samplers[0], self.samplers[1],
0.05, 15, self.grad,
temperature=self.Ts[0])]
algorithm = ThermostattedMDRENS(self.samplers, params)
self._run(algorithm)
def testThermostattedMDRensMM(self):
self.set2pParams()
mm1 = InvertibleMatrix(np.array([[1.0, 0.0], [0.0, 5.0]]))
mm2 = InvertibleMatrix(np.array([[.5, 0.0], [0.0, 10.0]]))
pdf = Multimodal2DPDF()
params = [ThermostattedMDRENSSwapParameterInfo(self.samplers[0], self.samplers[1],
0.01, 15, pdf.grad,
temperature=self.Ts[0],
mass_matrix=mm1),
ThermostattedMDRENSSwapParameterInfo(self.samplers[1], self.samplers[2],
0.1, 15, pdf.grad,
temperature=self.Ts[1],
mass_matrix=mm2)]
algorithm = ThermostattedMDRENS(self.samplers, params)
self._run(algorithm)
def testHMCStepRENS(self):
self.set1pParams()
params = [HMCStepRENSSwapParameterInfo(self.samplers[0], self.samplers[1], 0.05, 3, 1,
self.grad, 5)]
algorithm = HMCStepRENS(self.samplers, params)
self._run(algorithm)
class MockSwapCommunicator(AbstractSwapCommunicator):
pass
class MockSwapParameterInfo(AbstractSwapParameterInfo):
pass
class MockSampler(AbstractSingleChainMC):
def __init__(self, pdf, state, temperature=1.0):
super(MockSampler, self).__init__(pdf, state, temperature)
def _propose(self):
pcom = SimpleProposalCommunicator(self._state, State(self._state.position * 2.0))
return pcom
def _calc_pacc(self, proposal_communicator):
return 0.42
class MockedAbstractExchangeMC(AbstractExchangeMC):
def _propose_swap(self, param_info):
return MockSwapCommunicator(param_info, Trajectory([State(np.array([1.0])),
State(np.array([2.0]))]),
Trajectory([State(np.array([2.0])),
State(np.array([1.0]))]))
def _calc_pacc_swap(self, swapcom):
swapcom.acceptance_probability = 0.75
return swapcom
@test.unit
class TestAbstractExchangeMC(test.Case):
def setUp(self):
self.samplers = [MockSampler(None, State(np.array([3.0]))),
MockSampler(None, State(np.array([5.0])))]
self.param_info = MockSwapParameterInfo(self.samplers[0], self.samplers[1])
self.algo = MockedAbstractExchangeMC(self.samplers, [self.param_info])
def testAcceptSwap(self):
swapcom = MockSwapCommunicator(self.param_info,
Trajectory([State(np.array([1.0])),
State(np.array([2.0]))]),
Trajectory([State(np.array([2.0])),
State(np.array([1.0]))]))
np.random.seed(5)
swapcom.acceptance_probability = 0.75
res = self.algo._accept_swap(swapcom)
assert(res)
swapcom.acceptance_probability = 0.15
res = self.algo._accept_swap(swapcom)
assert(not res)
def testSwap(self):
np.random.seed(5)
res = self.algo.swap(0)
assert(res)
self.assertEqual(self.samplers[0].state.position[0], 1.0)
self.assertEqual(self.samplers[1].state.position[0], 2.0)
self.assertEqual(self.algo.statistics.stats[0].total_swaps, 1)
self.assertEqual(self.algo.statistics.stats[0].accepted_swaps, 1)
np.random.seed(4)
res = self.algo.swap(0)
assert(not res)
self.assertEqual(self.samplers[0].state.position[0], 1.0)
self.assertEqual(self.samplers[1].state.position[0], 2.0)
self.assertEqual(self.algo.statistics.stats[0].total_swaps, 2)
self.assertEqual(self.algo.statistics.stats[0].accepted_swaps, 1)
@test.unit
class TestReplicaExchangeMC(test.Case):
def setUp(self):
pdf1 = HO()
pdf2 = HO(k1=2.0, k2=2.0)
self.samplers = [MockSampler(pdf1, State(np.array([3.0]))),
MockSampler(pdf2, State(np.array([5.0])))]
self.param_info = RESwapParameterInfo(self.samplers[0], self.samplers[1])
self.algo = ReplicaExchangeMC(self.samplers, [self.param_info])
def testProposeSwap(self):
res = self.algo._propose_swap(self.param_info)
self.assertEqual(res.traj12.initial.position[0], 3.0)
self.assertEqual(res.traj12.final.position[0], 3.0)
self.assertEqual(res.traj21.initial.position[0], 5.0)
self.assertEqual(res.traj21.final.position[0], 5.0)
def testCalcPaccSwap(self):
swapcom = self.algo._propose_swap(self.param_info)
res = self.algo._calc_pacc_swap(swapcom)
self.assertEqual(res.acceptance_probability, csb.numeric.exp(-12.5 + 4.5 - 9.0 + 25.0))
class HO(object):
def __init__(self, k1=1.0, k2=1.0, x1=0.0, x2=0.0, tau=1.0):
self.k1 = k1
self.k2 = k2
self.x1 = x1
self.x2 = x2
self.tau = tau
self.kt = lambda t: self.k2 * t / self.tau + (1 - t / self.tau) * self.k1
self.xt = lambda t: self.x2 * t / self.tau + (1 - t / self.tau) * self.x1
def log_prob(self, x, t=0.0):
return -0.5 * self.kt(t) * sum((x - self.xt(t)) ** 2)
def gradient(self, x, t):
return self.kt(t) * (x - self.xt(t))
class MockPropagator(AbstractPropagator):
def __init__(self):
pass
def generate(self, init_state, length, return_trajectory=False):
final_state = State(init_state.position * 2, init_state.momentum * 2)
return Trajectory([init_state, final_state])
class PlainMDPropagationMocked(PlainMDPropagation):
def _propagator_factory(self):
return MockPropagator()
class HMCPropagationMocked(HMCPropagation):
def _propagator_factory(self):
return MockPropagator()
class MockPerturbation(AbstractPerturbation):
@property
def sys_before(self):
pdf = HO()
return HamiltonianSysInfo(ReducedHamiltonian(pdf.log_prob, pdf.gradient))
@property
def sys_after(self):
pdf = HO()
return HamiltonianSysInfo(ReducedHamiltonian(pdf.log_prob, pdf.gradient))
def __init__(self):
pass
def _run_perturbator(self, state):
final = State(state.position * 2, state.momentum * 2)
return Trajectory([state, final])
def _calculate_work(self, traj):
return 42.0
def _calculate_jacobian(self, traj):
return 1.1
class MockPropagation(AbstractPropagation):
def __init__(self):
pass
@property
def sys(self):
pdf = HO()
return HamiltonianSysInfo(pdf.log_prob, pdf.gradient)
def _run_propagator(self, state):
final = State(state.position * 2, state.momentum * 2)
return Trajectory([state, final])
def _calculate_heat(self, traj):
return -42.0
def _propagator_factory(self):
return None
class MockStep(Step):
def __init__(self, return_momentum=True):
self._return_momentum = return_momentum
self._perform = None
self.perform = self._perform_pert_prop
@property
def perturbation(self):
return MockPerturbation()
def _perform_pert_prop(self, state, extra_info=None):
if self._return_momentum == True:
final = State(state.position * 2, state.momentum * 2)
else:
final = State(state.position * 2)
res = NonequilibriumTrajectory([state, final], heat=-42.0, work=42.0, jacobian=1.1)
return res, None, None
def _perform_prop_pert(self, state, extra_info=None):
if self._return_momentum == True:
final = State(state.position * 2, state.momentum * 2)
else:
final = State(state.position * 2)
res = NonequilibriumTrajectory([state, final], heat=42.0, work=-42.0, jacobian=1.1)
return res, None, None
class MockProtocol(Protocol):
def __init__(self, momentum=True):
self._momentum = momentum
self.steps = [MockStep(self._momentum), MockStep(self._momentum)]
@test.unit
class TestNeqsteppropagator(test.Case):
def testReducedHamiltonian(self):
pdf = HO(k1=2.0, k2=2.0)
init = State(np.array([2.0]), np.array([-2.0]))
ham = ReducedHamiltonian(lambda x: pdf.log_prob(x, | |
# -*- coding: utf-8 -*-
# Copyright (c) 2006-2011 <NAME> http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# Copyright (c) 2011, Nexenta Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Some unit tests for the GSConnection
"""
import unittest
import time
import os
import re
import xml
from boto.gs.connection import GSConnection
from boto.gs.cors import Cors
from boto import handler
from boto import storage_uri
class GSConnectionTest (unittest.TestCase):
def test_1_basic(self):
"""basic regression test for Google Cloud Storage"""
print '--- running GSConnection tests ---'
c = GSConnection()
# create a new, empty bucket
bucket_name = 'test-%d' % int(time.time())
bucket = c.create_bucket(bucket_name)
# now try a get_bucket call and see if it's really there
bucket = c.get_bucket(bucket_name)
k = bucket.new_key()
k.name = 'foobar'
s1 = 'This is a test of file upload and download'
s2 = 'This is a second string to test file upload and download'
k.set_contents_from_string(s1)
fp = open('foobar', 'wb')
# now get the contents from s3 to a local file
k.get_contents_to_file(fp)
fp.close()
fp = open('foobar')
# check to make sure content read from s3 is identical to original
assert s1 == fp.read(), 'corrupted file'
fp.close()
bucket.delete_key(k)
# test a few variations on get_all_keys - first load some data
# for the first one, let's override the content type
phony_mimetype = 'application/x-boto-test'
headers = {'Content-Type': phony_mimetype}
k.name = 'foo/bar'
k.set_contents_from_string(s1, headers)
k.name = 'foo/bas'
k.set_contents_from_filename('foobar')
k.name = 'foo/bat'
k.set_contents_from_string(s1)
k.name = 'fie/bar'
k.set_contents_from_string(s1)
k.name = 'fie/bas'
k.set_contents_from_string(s1)
k.name = 'fie/bat'
k.set_contents_from_string(s1)
# try resetting the contents to another value
md5 = k.md5
k.set_contents_from_string(s2)
assert k.md5 != md5
# Test for stream API
fp2 = open('foobar', 'rb')
k.md5 = None
k.base64md5 = None
k.set_contents_from_stream(fp2, headers=headers)
fp = open('foobar1', 'wb')
k.get_contents_to_file(fp)
fp.close()
fp2.seek(0,0)
fp = open('foobar1', 'rb')
assert (fp2.read() == fp.read()), 'Chunked Transfer corrupted the Data'
fp.close()
fp2.close()
os.unlink('foobar1')
os.unlink('foobar')
all = bucket.get_all_keys()
assert len(all) == 6
rs = bucket.get_all_keys(prefix='foo')
assert len(rs) == 3
rs = bucket.get_all_keys(prefix='', delimiter='/')
assert len(rs) == 2
rs = bucket.get_all_keys(maxkeys=5)
assert len(rs) == 5
# test the lookup method
k = bucket.lookup('foo/bar')
assert isinstance(k, bucket.key_class)
assert k.content_type == phony_mimetype
k = bucket.lookup('notthere')
assert k == None
# try some metadata stuff
k = bucket.new_key()
k.name = 'has_metadata'
mdkey1 = 'meta1'
mdval1 = 'This is the first metadata value'
k.set_metadata(mdkey1, mdval1)
mdkey2 = 'meta2'
mdval2 = 'This is the second metadata value'
k.set_metadata(mdkey2, mdval2)
# try a unicode metadata value
mdval3 = u'föö'
mdkey3 = 'meta3'
k.set_metadata(mdkey3, mdval3)
k.set_contents_from_string(s1)
k = bucket.lookup('has_metadata')
assert k.get_metadata(mdkey1) == mdval1
assert k.get_metadata(mdkey2) == mdval2
assert k.get_metadata(mdkey3) == mdval3
k = bucket.new_key()
k.name = 'has_metadata'
k.get_contents_as_string()
assert k.get_metadata(mdkey1) == mdval1
assert k.get_metadata(mdkey2) == mdval2
assert k.get_metadata(mdkey3) == mdval3
bucket.delete_key(k)
# test list and iterator
rs1 = bucket.list()
num_iter = 0
for r in rs1:
num_iter = num_iter + 1
rs = bucket.get_all_keys()
num_keys = len(rs)
assert num_iter == num_keys
# try some acl stuff
bucket.set_acl('public-read')
acl = bucket.get_acl()
assert len(acl.entries.entry_list) == 2
bucket.set_acl('private')
acl = bucket.get_acl()
assert len(acl.entries.entry_list) == 1
k = bucket.lookup('foo/bar')
k.set_acl('public-read')
acl = k.get_acl()
assert len(acl.entries.entry_list) == 2
k.set_acl('private')
acl = k.get_acl()
assert len(acl.entries.entry_list) == 1
# try set/get raw logging subresource
empty_logging_str="<?xml version='1.0' encoding='UTF-8'?><Logging/>"
logging_str = (
"<?xml version='1.0' encoding='UTF-8'?><Logging>"
"<LogBucket>log-bucket</LogBucket>" +
"<LogObjectPrefix>example</LogObjectPrefix>" +
"</Logging>")
bucket.set_subresource('logging', logging_str);
assert bucket.get_subresource('logging') == logging_str;
# try disable/enable logging
bucket.disable_logging()
assert bucket.get_subresource('logging') == empty_logging_str
bucket.enable_logging('log-bucket', 'example')
assert bucket.get_subresource('logging') == logging_str;
# now delete all keys in bucket
for k in bucket:
bucket.delete_key(k)
# now delete bucket
time.sleep(5)
c.delete_bucket(bucket)
def test_2_copy_key(self):
"""test copying a key from one bucket to another"""
c = GSConnection()
# create two new, empty buckets
bucket_name_1 = 'test1-%d' % int(time.time())
bucket_name_2 = 'test2-%d' % int(time.time())
bucket1 = c.create_bucket(bucket_name_1)
bucket2 = c.create_bucket(bucket_name_2)
# verify buckets got created
bucket1 = c.get_bucket(bucket_name_1)
bucket2 = c.get_bucket(bucket_name_2)
# create a key in bucket1 and give it some content
k1 = bucket1.new_key()
assert isinstance(k1, bucket1.key_class)
key_name = 'foobar'
k1.name = key_name
s = 'This is a test.'
k1.set_contents_from_string(s)
# copy the new key from bucket1 to bucket2
k1.copy(bucket_name_2, key_name)
# now copy the contents from bucket2 to a local file
k2 = bucket2.lookup(key_name)
assert isinstance(k2, bucket2.key_class)
fp = open('foobar', 'wb')
k2.get_contents_to_file(fp)
fp.close()
fp = open('foobar')
# check to make sure content read is identical to original
assert s == fp.read(), 'move test failed!'
fp.close()
# delete keys
bucket1.delete_key(k1)
bucket2.delete_key(k2)
# delete test buckets
c.delete_bucket(bucket1)
c.delete_bucket(bucket2)
def test_3_default_object_acls(self):
"""test default object acls"""
# regexp for matching project-private default object ACL
project_private_re = '\s*<AccessControlList>\s*<Entries>\s*<Entry>' \
'\s*<Scope type="GroupById"><ID>[0-9a-fA-F]+</ID></Scope>' \
'\s*<Permission>FULL_CONTROL</Permission>\s*</Entry>\s*<Entry>' \
'\s*<Scope type="GroupById"><ID>[0-9a-fA-F]+</ID></Scope>' \
'\s*<Permission>FULL_CONTROL</Permission>\s*</Entry>\s*<Entry>' \
'\s*<Scope type="GroupById"><ID>[0-9a-fA-F]+</ID></Scope>' \
'\s*<Permission>READ</Permission></Entry>\s*</Entries>' \
'\s*</AccessControlList>\s*'
c = GSConnection()
# create a new bucket
bucket_name = 'test-%d' % int(time.time())
bucket = c.create_bucket(bucket_name)
# now call get_bucket to see if it's really there
bucket = c.get_bucket(bucket_name)
# get default acl and make sure it's project-private
acl = bucket.get_def_acl()
assert re.search(project_private_re, acl.to_xml())
# set default acl to a canned acl and verify it gets set
bucket.set_def_acl('public-read')
acl = bucket.get_def_acl()
# save public-read acl for later test
public_read_acl = acl
assert acl.to_xml() == ('<AccessControlList><Entries><Entry>' +
'<Scope type="AllUsers"></Scope><Permission>READ</Permission>' +
'</Entry></Entries></AccessControlList>')
# back to private acl
bucket.set_def_acl('private')
acl = bucket.get_def_acl()
assert acl.to_xml() == '<AccessControlList></AccessControlList>'
# set default acl to an xml acl and verify it gets set
bucket.set_def_acl(public_read_acl)
acl = bucket.get_def_acl()
assert acl.to_xml() == ('<AccessControlList><Entries><Entry>' +
'<Scope type="AllUsers"></Scope><Permission>READ</Permission>' +
'</Entry></Entries></AccessControlList>')
# back to private acl
bucket.set_def_acl('private')
acl = bucket.get_def_acl()
assert acl.to_xml() == '<AccessControlList></AccessControlList>'
# delete bucket
c.delete_bucket(bucket)
# repeat default acl tests using boto's storage_uri interface
# create a new bucket
bucket_name = 'test-%d' % int(time.time())
uri = storage_uri('gs://' + bucket_name)
uri.create_bucket()
# get default acl and make sure it's project-private
acl = uri.get_def_acl()
assert re.search(project_private_re, acl.to_xml())
# set default acl to a canned acl and verify it gets set
uri.set_def_acl('public-read')
acl = uri.get_def_acl()
# save public-read acl for later test
public_read_acl = acl
assert acl.to_xml() == ('<AccessControlList><Entries><Entry>' +
'<Scope type="AllUsers"></Scope><Permission>READ</Permission>' +
'</Entry></Entries></AccessControlList>')
# back to private acl
uri.set_def_acl('private')
acl = uri.get_def_acl()
assert acl.to_xml() == '<AccessControlList></AccessControlList>'
# set default acl to an xml acl and verify it gets set
uri.set_def_acl(public_read_acl)
acl = uri.get_def_acl()
assert acl.to_xml() == ('<AccessControlList><Entries><Entry>' +
'<Scope type="AllUsers"></Scope><Permission>READ</Permission>' +
'</Entry></Entries></AccessControlList>')
# back to private acl
uri.set_def_acl('private')
acl = uri.get_def_acl()
assert acl.to_xml() == '<AccessControlList></AccessControlList>'
# delete bucket
uri.delete_bucket()
def test_4_cors_xml(self):
"""test setting and getting of CORS XML documents"""
# regexp for matching project-private default object ACL
cors_empty = '<CorsConfig></CorsConfig>'
cors_doc = ('<CorsConfig><Cors><Origins><Origin>origin1.example.com'
'</Origin><Origin>origin2.example.com</Origin></Origins>'
'<Methods><Method>GET</Method><Method>PUT</Method>'
'<Method>POST</Method></Methods><ResponseHeaders>'
'<ResponseHeader>foo</ResponseHeader>'
'<ResponseHeader>bar</ResponseHeader></ResponseHeaders>'
'</Cors></CorsConfig>')
c = GSConnection()
# create a new bucket
bucket_name = 'test-%d' % int(time.time())
bucket = c.create_bucket(bucket_name)
# now call get_bucket to see if it's really there
bucket = c.get_bucket(bucket_name)
# get new bucket cors and make sure it's empty
cors = re.sub(r'\s', '', bucket.get_cors().to_xml())
assert cors == cors_empty
# set cors document on new bucket
bucket.set_cors(cors_doc)
cors = re.sub(r'\s', '', bucket.get_cors().to_xml())
assert cors == cors_doc
# delete bucket
c.delete_bucket(bucket)
# repeat cors tests using boto's storage_uri interface
# create a new bucket
bucket_name = 'test-%d' % int(time.time())
uri = storage_uri('gs://' + bucket_name)
uri.create_bucket()
# | |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" NNabla: Neural Network Libraries Frontend for Relay """
import numpy as np
import collections
import zipfile
import shutil
import tempfile
import logging
import os
import attr
import sys
logging.basicConfig(level=logging.CRITICAL)
import nnabla as nn
from nnabla.utils import nnabla_pb2
from nnabla.utils.converter.nnabla import NnpImporter
import google.protobuf.text_format as text_format
import tvm
from tvm.ir import IRModule
from ... import nd as _nd
from .. import analysis
from .. import expr as _expr
from .. import function as _function
from .. import op as _op
from ..expr_functor import ExprFunctor
from .common import AttrCvt, Renamer
from .common import get_relay_op, new_var, infer_shape, infer_channels
from .common import infer_type, get_name
from .common import infer_value as _infer_value
from .common import infer_value_simulated as _infer_value_simulated
from . import qnn_nnabla
__all__ = ['from_nnabla']
# #############################################################################
# Helper functions
# ----------------
def load_nnp(nnp_path):
""" Load nnp file and create a NnpImporter object with protobuf parameters which
will be later used to convert into the Relay IR.
This function only is usable to parse the NNabla graph into the Relay IR.
To execute the Nnp file with NNabla use the NnpLoader tool instead.
Parameters
----------
nnp_path : str
Path to the .nnp file from NNabla.
Returns
-------
net : nnabla.utils.converter.nnabla.importer.NnpImporter
Imported nnp file
"""
""" This function only is usable to parse the NNabla graph into the Relay IR. """
net = NnpImporter(nnp_path, expad_network=False, executor_index=True)
return net.execute()
def default_layout(dims, op_name):
"""
A helper function to get default layout
"""
if dims == 1:
return 'NCW'
elif dims == 2:
return 'NCHW'
elif dims == 3:
pass
# return 'NCDHW'
msg = "Only 1d and 2d layouts are currently supported"
raise tvm.error.OpAttributeInvalid(msg.format(op_name))
def dimension_picker(prefix, suffix=''):
""" Check that dimensions are supported """
# TODO: Check variables names
def _impl(attrs):
kernel = attrs['pool_size']
if len(kernel) == 1:
return prefix + '1d' + suffix
if len(kernel) == 2:
return prefix + '2d' + suffix
if len(kernel) == 3:
return prefix + '3d' + suffix
msg = 'Only 1D and 2D kernels are supported for operator {}.'
op_name = prefix + '1d/2d'
raise tvm.error.OpAttributeInvalid(msg.format(op_name))
return _impl
def dimension_constraint():
""" A helper function to restric dimensions """
def _dim_check(attrs):
if len(attrs['pool_size']) in [1, 2, 3]:
return True
return False
return _dim_check, "Only 1d, 2d, 3d kernel supported."
def replace_negative_size_with_batch_size(shape, batch_size):
"""Replace all dimensions with negative values to batch size"""
sl = []
for d in shape.dim:
if d < 0:
# Negative size means batch size
sl.append(batch_size)
else:
sl.append(d)
out_shape = nnabla_pb2.Shape()
out_shape.dim.extend(sl)
return list(out_shape.dim)
def infer_nnabla_shape(shape):
tmp_value = 1
shape = list(shape)
for s in shape:
tmp_value *= s
if len(shape) == 4 and max(shape) == tmp_value:
return [max(shape)]
else:
return shape
class nnabla_input():
""" Dual purpose list or dictionary access object. """
def __init__(self):
self.input_keys = []
self.input_dict = {}
def __getitem__(self, item):
if isinstance(item, int):
return self.input_dict[self.input_keys[item]]
if isinstance(item, str):
if item not in self.input_keys:
return None
return self.input_dict[item]
if isinstance(item, slice):
keys = self.input_keys[item]
return [self.input_dict[key] for key in keys]
raise ValueError("Only integer, string, and slice accesses allowed")
def __setitem__(self, item, value):
if isinstance(item, int):
self.input_dict[self.input_keys[item]] = value
elif isinstance(item, str):
self.input_keys.append(item)
self.input_dict[item] = value
else:
raise ValueError("Only integer, string, and slice accesses allowed")
def keys(self):
return self.input_keys
def replace(self, old_item, item, value):
""" Method defined to replace full-precision weights with quantized values """
if isinstance(item, int):
# Remove old value
self.input_dict.pop(self.input_keys[old_item])
self.input_keys.remove(old_item)
# Insert new value
self.input_dict[self.input_keys[item]] = value
elif isinstance(item, str):
# Remove old value
self.input_dict.pop(old_item)
self.input_keys.remove(old_item)
# Insert new value
self.input_keys.append(item)
self.input_dict[item] = value
def __len__(self):
return len(self.input_keys)
def __iter__(self):
self.n = 0
return self
def __next__(self):
if self.n < len(self.input_keys):
output = self.input_dict[self.input_keys[self.n]]
self.n += 1
return output
raise StopIteration
def get_tensor_type(name, type_dict):
# TODO: Grab tensor type and provide proper type to convert to numpy array
# This function should return the numpy type (e.g.: np.float32) to convert
# NNabla tensor into numpy array. Method should be called inside _parse_array
# function defined inside the Exporter object.
return None
def check_data_format():
# TODO: Define _check_data_format function
# This function should check the layout of each layer,
# it should return "NCWH" (channel_first) or "NWHC" (channel_first),
# this will allow to use Relays Data Layout transformation for
# Optimization.
return None
# #############################################################################
# Operator definition
# -------------------
#
# Nnabla operators are grouped in different converters
# (e.g.: Activations in _convert_activations), specific functions have their
# own converter (e.g.: 2D Convolution as _convert_convolution).
# Quantizer converters are defined in qnn_nnabla.py
def _none():
def _impl(inputs, func, shapes):
print(func.name)
return None
return _impl
def _convert_reshape():
def _impl(inputs, func, shapes):
if hasattr(func.reshape_param, 'shape'):
shape = tuple(func.reshape_param.shape.dim)
return _op.reshape(inputs[0], shape)
else:
raise NotImplementedError("Dinamic input case not yet supported")
# return _op.reshape(inputs[0], inputs[1])
return _impl
def _convert_concat():
def _impl(inputs, func, shapes):
# TODO: check data layout
assert len(inputs) == 2
axis = func.concatenate_param.axis
return _op.concatenate(inputs[:], axis=axis)
return _impl
def _convert_activation():
def _impl(inputs, func, shapes):
act_type = func.type
data = inputs[0]
if act_type == 'Softmax':
return _op.nn.softmax(data, axes=1)
if act_type == 'ReLU':
return _op.nn.relu(data)
if act_type == 'ReLU6':
return _op.clip(data, a_min=0., a_max=6.)
if act_type == 'Tanh':
return _op.tanh(data)
if act_type == 'Sigmoid':
return _op.sigmoid(data)
raise tvm.error.OpNotImplemented(
'Activation Operator {} is not yet supported \
with frontend NNabla'.format(act_type))
return _impl
def _convert_convolution():
def _impl(inputs, func, shapes):
# TODO: Map layouts. For that, include the shape dict from Exporter in order to get
# channel size and kernel size. data layout can be inferred with the shape
# TODO: Check for all possible input combinations
# for stride, pads, dilation, groups, channels, kernel_size
# TODO: data and kernel layouts should be infered with check_data_format method
data_layout = "NCHW"
kernel_layout = "OIHW"
# Extract information from nnabla node found in convolution_param
_stride = tuple(func.convolution_param.stride.dim)
_pad_w = func.convolution_param.pad.dim[0]
_pad_h = func.convolution_param.pad.dim[1]
_pad = (_pad_w, _pad_h, _pad_w, _pad_h)
_dilation = tuple(func.convolution_param.dilation.dim)
_group = func.convolution_param.group
_output_channels = shapes[func.input[1]][0]
_kernel_shape = tuple(shapes[func.input[1]][2:])
conv_out = _op.nn.conv2d(inputs[0],
inputs[1],
strides=_stride,
padding=_pad,
dilation=_dilation,
groups=_group,
channels= _output_channels,
kernel_size= _kernel_shape,
data_layout=data_layout,
kernel_layout=kernel_layout,
out_layout="",
out_dtype="")
use_bias = len(inputs) == 3
if isinstance(shapes[-1], float):
conv_out = _op.multiply(conv_out, _expr.const(shapes[-1], dtype="float32"))
if use_bias:
return _op.nn.bias_add(conv_out, inputs[2])
else:
return conv_out
return _impl
def _convert_gemm():
def _impl(inputs, func, shapes):
# Equivalent Op to GEMM in ONNX
# Y = alpha * A * B + beta * C(If exists)
# TODO: Infer values from NNabla Parameters
alpha = float(1.0)
beta = float(1.0)
transA = 0
transB = 0
# get number of channels
channels = infer_channels(inputs[1], not transB)
if transA:
inputs[0] = _op.transpose(inputs[0], axes=(1, 0))
if not transB:
inputs[1] = _op.transpose(inputs[1], axes=(1, 0))
inputs[0] = _op.nn.batch_flatten(inputs[0])
if alpha != 1.0:
inputs[0] *= _expr.const(alpha)
out = _op.nn.dense(inputs[0], inputs[1], units=channels)
use_bias = len(inputs) == 3
if use_bias or (beta != 0.0):
return _op.nn.bias_add(out, _expr.const(beta) * inputs[2])
else:
return out
return _impl
def _convert_advanced_activation():
def _impl(inputs, func, shapes):
# TODO: Create activation operators with clip values
return None
return _impl
def _convert_pooling():
def _impl(inputs, func, shapes):
# Get data_layout with check_data_format
pool_type = func.type
data_layout = default_layout(len(shapes[0]) - 2, pool_type)
if pool_type in ['GlobalMaxPooling','GlobalAveragePooling']:
raise tvm.error.OpNotImplemented(
'Function {} has experimental support in Nnabla frontend, \
please do not use it'.format(pool_type))
if pool_type == 'AveragePooling':
# ignore_border is not considered
attrs = {'pool_size': tuple(func.average_pooling_param.kernel.dim),
'strides': tuple(func.average_pooling_param.stride.dim),
'padding': tuple(func.average_pooling_param.pad.dim),
| |
SKIN_PATH + '/OpenConfig.xml'
f = open(skin, 'r')
self.skin = f.read()
f.close()
Screen.__init__(self, session)
info = '***'
self['fittitle'] = Label(_('..:: TivuStream Config ::..'))
self['infoc'] = Label(Credit)
self['Maintainer'] = Label(_('Maintainer'))
self['Maintainer2'] = Label('%s' % Maintainer2)
self['version'] = Label(_('Versione'))
self['version2'] = Label('%s' % Version)
self['infoc'] = Label(_('Credit'))
self['infoc2'] = Label('%s' % Credit)
self['fitred'] = Label(_('Esci'))
self['fityellow'] = Label(_('Aggiorna Plugin'))
self['fitgreen'] = Label(_('Salva'))
# self['fitblue'] = Label(_(''))
self['text'] = Label(info)
self["description"] = Label(_(''))
self.setup_title = _("TivuStream Config")
self.onChangedEntry = [ ]
self.list = []
ConfigListScreen.__init__(self, self.list, session = self.session, on_change = self.changedEntry)
self.createSetup()
self.cbUpdate = False
self["setupActions"] = ActionMap(['OkCancelActions', 'DirectionActions', 'ColorActions', 'VirtualKeyboardActions', 'ActiveCodeActions'],
{
"red": self.extnok,
"cancel": self.extnok,
'yellow': self.msgupdt1,
"green": self.cfgok,
'showVirtualKeyboard': self.KeyText,
"ok": self.Ok_edit
}, -1)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
server_ref()
self.setTitle(self.setup_title)
try:
fp = urllib.urlopen(upd_fr_txt)
count = 0
self.labeltext = ''
s1 = fp.readline()
s2 = fp.readline()
s3 = fp.readline()
s1 = s1.strip()
s2 = s2.strip()
s3 = s3.strip()
self.link = s2
self.version = s1
self.info = s3
fp.close()
if s1 == currversion:
self['text'].setText(_('TivuStream versione: ') + currversion + _('\nNessun aggiornamento online!') + _('\nse ti piace puoi fare una libera donazione\nwww.paypal.me/TivuStream'))# + _('\nPLEASE DONATE'))
self.cbUpdate = False
elif float(s1) < float(currversion):
self['text'].setText(_('TivuStream versione: ') + currversion + _('\nNessun aggiornamento online!') + _('\nse ti piace puoi fare una libera donazione\nwww.paypal.me/TivuStream'))# + _('\nPLEASE DONATE'))
self.cbUpdate = False
else:
updatestr = (_('TivuStream versione: ') + currversion + _('\nUltimo aggiornamento ') + s1 + _(' disponibile! \nChangeLog:') + self.info)
self.cbUpdate = True
self['text'].setText(updatestr)
except:
self.cbUpdate = False
self['text'].setText(_('Nessun aggiornamento disponibile') + _('\nNessuna connessione Internet o server OFF') + _('\nPrego riprova piu tardi o cambia SERVER in menu config.'))
self.timerx = eTimer()
try:
self.timerx.callback.append(self.msgupdt2)
except:
self.timerx_conn = self.timerx.timeout.connect(self.msgupdt2)
self.timerx.start(100, 1)
def createSetup(self):
self.editListEntry = None
self.list = []
self.list.append(getConfigListEntry(_('Server:'), config.plugins.TivuStream.server, _("Scelta del Server")))
self.list.append(getConfigListEntry(_('Auto Update Plugin:'), config.plugins.TivuStream.autoupd, _("Aggiornarmento plugin automatico")))
self.list.append(getConfigListEntry(_('Password Personale:'), config.plugins.TivuStream.code, _("Inserisci la password per scaricare Liste XXX Adulti")))
self.list.append(getConfigListEntry(_('Posizione IPTV bouquets '), config.plugins.TivuStream.bouquettop, _("Configura posizione dei Bouquet delle liste convertite")))
self.list.append(getConfigListEntry(_('Liste Player <.m3u>:'), config.plugins.TivuStream.pthm3uf, _("Percorso cartella contenente i file .m3u")))
self.list.append(getConfigListEntry(_('Tipo Services Reference'), config.plugins.TivuStream.services, _("Configura Service Reference Gstreamer/Exteplayer3/StreamLink")))
self.list.append(getConfigListEntry(_('Link in Extensions Menu:'), config.plugins.TivuStream.strtext, _("Mostra Plugin in Extensions Menu")))
self.list.append(getConfigListEntry(_('Link in Menu Principale:'), config.plugins.TivuStream.strtmain, _("Mostra Plugin in Main Menu")))
self['config'].list = self.list
self["config"].setList(self.list)
def changedEntry(self):
for x in self.onChangedEntry:
x()
#
def getCurrentEntry(self):
return self["config"].getCurrent()[0]
#
def getCurrentValue(self):
return str(self["config"].getCurrent()[1].getText())
#
def createSummary(self):
from Screens.Setup import SetupSummary
return SetupSummary
#
def keyLeft(self):
ConfigListScreen.keyLeft(self)
print "current selection:", self["config"].l.getCurrentSelection()
self.createSetup()
#
def keyRight(self):
ConfigListScreen.keyRight(self)
print "current selection:", self["config"].l.getCurrentSelection()
self.createSetup()
#
def Ok_edit(self):
ConfigListScreen.keyRight(self)
print "current selection:", self["config"].l.getCurrentSelection()
self.createSetup()
def cfgok(self):
self.save()
def save(self):
if not os.path.exists(config.plugins.TivuStream.pthm3uf.value):
self.mbox = self.session.open(openMessageBox, _('Cartella Liste m3u non rilevato!'), openMessageBox.TYPE_INFO, timeout=4)
return
if self['config'].isChanged():
for x in self['config'].list:
x[1].save()
server_ref()
config.plugins.TivuStream.server.save()
configfile.save()
plugins.clearPluginList()
plugins.readPluginList(resolveFilename(SCOPE_PLUGINS))
self.mbox = self.session.open(openMessageBox, _('Impostazioni salvate correttamente !'), openMessageBox.TYPE_INFO, timeout=5)
self.close()
else:
self.close()
def KeyText(self):
sel = self['config'].getCurrent()
if sel:
self.session.openWithCallback(self.VirtualKeyBoardCallback, VirtualKeyBoard, title=self['config'].getCurrent()[0], text=self['config'].getCurrent()[1].value)
def VirtualKeyBoardCallback(self, callback = None):
if callback is not None and len(callback):
self['config'].getCurrent()[1].value = callback
self['config'].invalidate(self['config'].getCurrent())
return
#
def cancelConfirm(self, result):
if not result:
return
for x in self['config'].list:
x[1].cancel()
self.close()
def extnok(self):
if self['config'].isChanged():
self.session.openWithCallback(self.cancelConfirm, openMessageBox, _('Veramente chiudere senza salvare le impostazioni?'))
else:
self.close()
def msgupdt2(self):
if self.cbUpdate == False:
return
if config.plugins.TivuStream.autoupd.value == False:
return
self.session.openWithCallback(self.runupdate, openMessageBox, _('Nuova Versione Online!!!\n\nAggiornare Plugin alla Versione %s ?' % self.version), openMessageBox.TYPE_YESNO)
def msgupdt1(self):
if self.cbUpdate == False:
return
self.session.openWithCallback(self.runupdate, openMessageBox, _('Aggiornare Plugin ?'), openMessageBox.TYPE_YESNO)
def runupdate(self, result):
if self.cbUpdate == False:
return
if result:
com = self.link
dom = 'Nuova versione ' + self.version
#wget http://patbuweb.com/iptv/plugin/tivustream.tar -O /tmp/tivustream.tar > /dev/null
os.system('wget %s -O /tmp/tivustream.tar > /dev/null' % com)
os.system('sleep 3')
# self.session.open(OpenConsole, _('Aggiorno Plugin: %s') % dom, ['opkg install -force-overwrite %s' % com], finishedCallback=self.msgrstrt3, closeOnSuccess=True)
self.session.open(OpenConsole, _('Aggiorno Plugin: %s') % dom, ['tar -xvf /tmp/tivustream.tar -C /'], finishedCallback=self.msgrstrt3, closeOnSuccess=True)
def msgrstrt3(self):
self.mbox = self.session.open(openMessageBox, _('Plugin Aggiornato!\nRiavvio interfaccia utente'), openMessageBox.TYPE_INFO, timeout=4)
os.system('rm -f /tmp/tivustream.tar')
quitMainloop(3)
class OpenConsole(Screen):
def __init__(self, session, title = None, cmdlist = None, finishedCallback = None, closeOnSuccess = False):
self.session = session
skin = SKIN_PATH + '/OpenConsole.xml'
f = open(skin, 'r')
self.skin = f.read()
f.close()
Screen.__init__(self, session)
self.finishedCallback = finishedCallback
self.closeOnSuccess = closeOnSuccess
self['fittitle'] = Label(_('..:: TivuStream Console ::..'))
self['text'] = ScrollLabel('')
self['Maintainer'] = Label(_('Maintainer'))
self['Maintainer2'] = Label('%s' % Maintainer2)
self['version'] = Label(_('Versione'))
self['version2'] = Label('%s' % Version)
self['infoc'] = Label(_('Credit'))
self['infoc2'] = Label('%s' % Credit)
self['actions'] = ActionMap(['WizardActions', 'DirectionActions'], {'ok': self.cancel,
'back': self.cancel,
'up': self['text'].pageUp,
'down': self['text'].pageDown}, -1)
self.cmdlist = cmdlist
self.container = eConsoleAppContainer()
self.run = 0
try:
self.container.appClosed.append(self.runFinished)
except:
self.appClosed_conn = self.container.appClosed.connect(self.runFinished)
try:
self.container.dataAvail.append(self.dataAvail)
except:
self.dataAvail_conn = self.container.dataAvail.connect(self.dataAvail)
self.onLayoutFinish.append(self.startRun)
def updateTitle(self):
self.setTitle(self.newtitle)
def startRun(self):
self['text'].setText(_('Esecuzione in corso:') + '\n\n')
print 'Console: executing in run', self.run, ' the command:', self.cmdlist[self.run]
if self.container.execute(self.cmdlist[self.run]):
self.runFinished(-1)
def runFinished(self, retval):
self.run += 1
if self.run != len(self.cmdlist):
if self.container.execute(self.cmdlist[self.run]):
self.runFinished(-1)
else:
str = self['text'].getText()
str += _('Esecuzione finita!!')
self['text'].setText(str)
self['text'].lastPage()
if self.finishedCallback is not None:
self.finishedCallback()
if not retval and self.closeOnSuccess:
self.cancel()
return
def cancel(self):
if self.run == len(self.cmdlist):
self.close()
try:
self.container.appClosed.remove(self.runFinished)
except:
self.appClosed_conn = None
try:
self.container.dataAvail.remove(self.dataAvail)
except:
self.dataAvail_conn = None
return
def dataAvail(self, str):
self['text'].setText(self['text'].getText() + str)
class openMessageBox(Screen):
TYPE_YESNO = 0
TYPE_INFO = 1
TYPE_WARNING = 2
TYPE_ERROR = 3
TYPE_MESSAGE = 4
def __init__(self, session, text, type = TYPE_YESNO, timeout = -1, close_on_any_key = False, default = True, enable_input = True, msgBoxID = None, picon = None, simple = False, list = [], timeout_default = None):
self.type = type
self.session = session
if fileExists(BRAND) or fileExists(BRANDP):
skin = SKIN_PATH + '/OpenMessageBoxOpen.xml'
else:
skin = SKIN_PATH + '/OpenMessageBox.xml'
#skin = SKIN_PATH + '/OpenMessageBox.xml'
f = open(skin, 'r')
self.skin = f.read()
f.close()
Screen.__init__(self, session)
self.msgBoxID = msgBoxID
self['fittitle'] = Label(_('..:: TivuStream Message ::..'))
self['Maintainer'] = Label(_('Maintainer'))
self['Maintainer2'] = Label('%s' % Maintainer2)
self['version'] = Label(_('Versione'))
self['version2'] = Label('%s' % Version)
self['infoc'] = Label(_('Credit'))
self['infoc2'] = Label('%s' % Credit)
self['text'] = Label(text)
self['Text'] = StaticText(text)
self['selectedChoice'] = StaticText()
self.text = text
self.close_on_any_key = close_on_any_key
self.timeout_default = timeout_default
self['ErrorPixmap'] = Pixmap()
self['QuestionPixmap'] = Pixmap()
self['InfoPixmap'] = Pixmap()
self['WarningPixmap'] = Pixmap()
self.timerRunning = False
self.initTimeout(timeout)
picon = picon or type
if picon != self.TYPE_ERROR:
self['ErrorPixmap'].hide()
if picon != self.TYPE_YESNO:
self['QuestionPixmap'].hide()
if picon != self.TYPE_INFO:
self['InfoPixmap'].hide()
if picon != self.TYPE_WARNING:
self['WarningPixmap'].hide()
self.title = self.type < self.TYPE_MESSAGE and [_('Question'),
_('Information'),
_('Warning'),
_('Error')][self.type] or _('Message')
if type == self.TYPE_YESNO:
if list:
self.list = list
elif default == True:
self.list = [(_('Si'), True), (_('No'), False)]
else:
self.list = [(_('No'), False), (_('Si'), True)]
else:
self.list = []
self['list'] = MenuList(self.list)
if self.list:
self['selectedChoice'].setText(self.list[0][0])
else:
self['list'].hide()
if enable_input:
self['actions'] = ActionMap(['MsgBoxActions', 'DirectionActions'], {'cancel': self.cancel,
'ok': self.ok,
'alwaysOK': self.alwaysOK,
'up': self.up,
'down': self.down,
'left': self.left,
'right': self.right,
'upRepeated': self.up,
'downRepeated': self.down,
'leftRepeated': self.left,
'rightRepeated': self.right}, -1)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(self.title)
def initTimeout(self, timeout):
self.timeout = timeout
if timeout > 0:
self.timer = eTimer()
try:
self.timer.callback.append(self.timerTick)
except:
self.timer_conn = self.timer.timeout.connect(self.timerTick)
self.onExecBegin.append(self.startTimer)
self.origTitle = None
if self.execing:
self.timerTick()
else:
self.onShown.append(self.__onShown)
self.timerRunning = True
else:
self.timerRunning = False
return
def __onShown(self):
self.onShown.remove(self.__onShown)
self.timerTick()
def startTimer(self):
self.timer.start(1000)
def stopTimer(self):
if self.timerRunning:
del self.timer
self.onExecBegin.remove(self.startTimer)
self.setTitle(self.origTitle)
self.timerRunning = False
def timerTick(self):
if self.execing:
self.timeout -= 1
if self.origTitle is None:
self.origTitle = self.instance.getTitle()
self.setTitle(self.origTitle + ' (' + str(self.timeout) + ')')
if self.timeout == 0:
self.timer.stop()
self.timerRunning = False
self.timeoutCallback()
return
def timeoutCallback(self):
print 'Timeout!'
if self.timeout_default is not None:
self.close(self.timeout_default)
else:
self.ok()
return
def cancel(self):
self.close(False)
def ok(self):
if self.list:
self.close(self['list'].getCurrent()[1])
else:
self.close(True)
def alwaysOK(self):
self.close(True)
def up(self):
self.move(self['list'].instance.moveUp)
def down(self):
self.move(self['list'].instance.moveDown)
def left(self):
self.move(self['list'].instance.pageUp)
def right(self):
self.move(self['list'].instance.pageDown)
def move(self, direction):
if self.close_on_any_key:
self.close(True)
self['list'].instance.moveSelection(direction)
if self.list:
self['selectedChoice'].setText(self['list'].getCurrent()[0])
self.stopTimer()
def __repr__(self):
return str(type(self)) + '(' + self.text + ')'
class plgnstrt(Screen):
def __init__(self, session):
self.session = session
skin = SKIN_PATH + '/Plgnstrt.xml'
f | |
>>>
>>> print(m.solve_limited(expect_interrupt=True))
None
>>> m.delete()
"""
if self.solver:
self.solver.interrupt()
def clear_interrupt(self):
"""
Clears a previous interrupt. If a limited SAT call was interrupted
using the :meth:`interrupt` method, this method **must be called**
before calling the SAT solver again.
"""
if self.solver:
self.solver.clear_interrupt()
def propagate(self, assumptions=[], phase_saving=0):
"""
The method takes a list of assumption literals and does unit
propagation of each of these literals consecutively. A Boolean
status is returned followed by a list of assigned (assumed and also
propagated) literals. The status is ``True`` if no conflict arised
during propagation. Otherwise, the status is ``False``.
Additionally, a user may specify an optional argument
``phase_saving`` (``0`` by default) to enable MiniSat-like phase
saving.
**Note** that only MiniSat-like solvers support this functionality
(e.g. :class:`Cadical` and :class:`Lingeling` do not support it).
:param assumptions: a list of assumption literals.
:param phase_saving: enable phase saving (can be ``0``, ``1``, and
``2``).
:type assumptions: iterable(int)
:type phase_saving: int
:rtype: tuple(bool, list(int)).
Usage example:
.. code-block:: python
>>> from pysat.solvers import Glucose3
>>> from pysat.card import *
>>>
>>> cnf = CardEnc.atmost(lits=range(1, 6), bound=1, encoding=EncType.pairwise)
>>> g = Glucose3(bootstrap_with=cnf.clauses)
>>>
>>> g.propagate(assumptions=[1])
(True, [1, -2, -3, -4, -5])
>>>
>>> g.add_clause([2])
>>> g.propagate(assumptions=[1])
(False, [])
>>>
>>> g.delete()
"""
if self.solver:
return self.solver.propagate(assumptions, phase_saving)
def set_phases(self, literals=[]):
"""
The method takes a list of literals as an argument and sets
*phases* (or MiniSat-like *polarities*) of the corresponding
variables respecting the literals. For example, if a given list of
literals is ``[1, -513]``, the solver will try to set variable
:math:`x_1` to true while setting :math:`x_{513}` to false.
**Note** that once these preferences are specified,
:class:`MinisatGH` and :class:`Lingeling` will always respect them
when branching on these variables. However, solvers
:class:`Glucose3`, :class:`Glucose4`, :class:`MapleChrono`,
:class:`MapleCM`, :class:`Maplesat`, :class:`Minisat22`, and
:class:`Minicard` can redefine the preferences in any of the
following SAT calls due to the phase saving heuristic.
Also **note** that :class:`Cadical` does not support this
functionality.
:param literals: a list of literals.
:type literals: iterable(int)
Usage example:
.. code-block:: python
>>> from pysat.solvers import Glucose3
>>>
>>> g = Glucose3(bootstrap_with=[[1, 2]])
>>> # the formula has 3 models: [-1, 2], [1, -2], [1, 2]
>>>
>>> g.set_phases(literals=[1, 2])
>>> g.solve()
True
>>> g.get_model()
[1, 2]
>>>
>>> g.delete()
"""
if self.solver:
return self.solver.set_phases(literals)
def get_status(self):
"""
The result of a previous SAT call is stored in an internal
variable and can be later obtained using this method.
:rtype: Boolean or ``None``.
``None`` is returned if a previous SAT call was interrupted.
"""
if self.solver:
return self.solver.get_status()
def get_model(self):
"""
The method is to be used for extracting a satisfying assignment for
a CNF formula given to the solver. A model is provided if a
previous SAT call returned ``True``. Otherwise, ``None`` is
reported.
:rtype: list(int) or ``None``.
Example:
.. code-block:: python
>>> from pysat.solvers import Solver
>>> s = Solver()
>>> s.add_clause([-1, 2])
>>> s.add_clause([-1, -2])
>>> s.add_clause([1, -2])
>>> s.solve()
True
>>> print(s.get_model())
[-1, -2]
>>> s.delete()
"""
if self.solver:
return self.solver.get_model()
def get_core(self):
"""
This method is to be used for extracting an unsatisfiable core in
the form of a subset of a given set of assumption literals, which
are responsible for unsatisfiability of the formula. This can be
done only if the previous SAT call returned ``False`` (*UNSAT*).
Otherwise, ``None`` is returned.
:rtype: list(int) or ``None``.
Usage example:
.. code-block:: python
>>> from pysat.solvers import Minisat22
>>> m = Minisat22()
>>> m.add_clause([-1, 2])
>>> m.add_clause([-2, 3])
>>> m.add_clause([-3, 4])
>>> m.solve(assumptions=[1, 2, 3, -4])
False
>>> print(m.get_core()) # literals 2 and 3 are not in the core
[-4, 1]
>>> m.delete()
"""
if self.solver:
return self.solver.get_core()
def get_proof(self):
"""
A DRUP proof can be extracted using this method if the solver was
set up to provide a proof. Otherwise, the method returns ``None``.
:rtype: list(str) or ``None``.
Example:
.. code-block:: python
>>> from pysat.solvers import Solver
>>> from pysat.examples.genhard import PHP
>>>
>>> cnf = PHP(nof_holes=3)
>>> with Solver(name='g4', with_proof=True) as g:
... g.append_formula(cnf.clauses)
... g.solve()
False
... print(g.get_proof())
['-8 4 1 0', '-10 0', '-2 0', '-4 0', '-8 0', '-6 0', '0']
"""
if self.solver:
return self.solver.get_proof()
def time(self):
"""
Get the time spent when doing the last SAT call. **Note** that the
time is measured only if the ``use_timer`` argument was previously
set to ``True`` when creating the solver (see :class:`Solver` for
details).
:rtype: float.
Example usage:
.. code-block:: python
>>> from pysat.solvers import Solver
>>> from pysat.examples.genhard import PHP
>>>
>>> cnf = PHP(nof_holes=10)
>>> with Solver(bootstrap_with=cnf.clauses, use_timer=True) as s:
... print(s.solve())
False
... print('{0:.2f}s'.format(s.time()))
150.16s
"""
if self.solver:
return self.solver.time()
def time_accum(self):
"""
Get the time spent for doing all SAT calls accumulated. **Note**
that the time is measured only if the ``use_timer`` argument was
previously set to ``True`` when creating the solver (see
:class:`Solver` for details).
:rtype: float.
Example usage:
.. code-block:: python
>>> from pysat.solvers import Solver
>>> from pysat.examples.genhard import PHP
>>>
>>> cnf = PHP(nof_holes=10)
>>> with Solver(bootstrap_with=cnf.clauses, use_timer=True) as s:
... print(s.solve(assumptions=[1]))
False
... print('{0:.2f}s'.format(s.time()))
1.76s
... print(s.solve(assumptions=[-1]))
False
... print('{0:.2f}s'.format(s.time()))
113.58s
... print('{0:.2f}s'.format(s.time_accum()))
115.34s
"""
if self.solver:
return self.solver.time_accum()
def nof_vars(self):
"""
This method returns the number of variables currently appearing in
the formula given to the solver.
:rtype: int.
Example:
.. code-block:: python
>>> s = Solver(bootstrap_with=[[-1, 2], [-2, 3]])
>>> s.nof_vars()
3
"""
if self.solver:
return self.solver.nof_vars()
def nof_clauses(self):
"""
This method returns the number of clauses currently appearing in
the formula given to the solver.
:rtype: int.
Example:
.. code-block:: python
>>> s = Solver(bootstrap_with=[[-1, 2], [-2, 3]])
>>> s.nof_clauses()
2
"""
if self.solver:
return self.solver.nof_clauses()
def enum_models(self, assumptions=[]):
"""
This method can be used to enumerate models of a CNF formula. It
can be used as a standard Python iterator. The method can be used
without arguments but also with an argument ``assumptions``, which
is a list of literals to "assume".
:param assumptions: a list of assumption literals.
:type assumptions: iterable(int)
:rtype: list(int).
Example:
.. code-block:: python
>>> with Solver(bootstrap_with=[[-1, 2], [-2, 3]]) as s:
... for m in s.enum_models():
... print(m)
[-1, -2, -3]
[-1, -2, 3]
[-1, 2, 3]
[1, 2, 3]
>>>
>>> with Solver(bootstrap_with=[[-1, 2], [-2, 3]]) as s:
... for m in s.enum_models(assumptions=[1]):
... print(m)
[1, 2, 3]
"""
if self.solver:
return self.solver.enum_models(assumptions)
def add_clause(self, clause, no_return=True):
"""
This method is used to add a single clause to the solver. An
optional argument ``no_return`` controls whether or not to check
the formula's satisfiability after adding the new clause.
:param clause: an iterable over literals.
:param no_return: check solver's internal formula and return the
result, if set to ``False``.
:type clause: iterable(int)
:type no_return: bool
:rtype: bool if ``no_return`` is set to ``False``.
Note that a clause can be either a ``list`` of integers or another
iterable type over integers, e.g. ``tuple`` or ``set`` among
others.
A usage example is the following:
.. code-block:: python
>>> s = Solver(bootstrap_with=[[-1, 2], [-1, -2]])
>>> s.add_clause([1], no_return=False)
False
"""
if self.solver:
res = self.solver.add_clause(clause, no_return)
if not no_return:
return res
def add_atmost(self, lits, k, no_return=True):
"""
This method is responsible for adding a new *native* AtMostK (see
:mod:`pysat.card`) constraint.
**Note that most of the solvers do not support native AtMostK
constraints**.
An AtMostK constraint is :math:`\sum_{i=1}^{n}{x_i}\leq k`. A
native AtMostK constraint should be given as a pair ``lits`` and
``k``, where ``lits`` is a list of literals in the sum.
:param lits: a list of literals.
:param k: upper bound on the number of satisfied literals
:param no_return: check solver's internal formula and return the
result, if set to ``False``.
:type lits: iterable(int)
:type k: int
:type no_return: bool
:rtype: bool if ``no_return`` is set to ``False``.
A usage example is the following:
.. code-block:: python
>>> s = | |
import sqlite3
from PyQt5 import QtCore, QtGui, QtWidgets
import pathlib
from PyQt5.QtCore import QModelIndex
from PyQt5.QtGui import QFont
from PyQt5.QtWidgets import QAbstractItemView
s = None
accounts = pathlib.Path("Accounts.db")
user = pathlib.Path("User.db")
lam = [[user, 0], [accounts, 1]]
for file in lam:
print(file[0])
if file[0].exists():
print("File exist")
elif file[1] == 0:
print("check")
print("Creating User.db")
conn = sqlite3.connect('User.db')
c = conn.cursor()
c.execute("""CREATE TABLE security(ID INTEGER PRIMARY KEY,User string NOT NULL,Hash string NOT NULL,Topt string)""")
conn.commit()
conn.close()
else:
print("Creating Accounts.db")
conn = sqlite3.connect('Accounts.db')
c = conn.cursor()
c.execute(
"""CREATE TABLE accounts(ID INTEGER PRIMARY KEY,Account string NOT NULL,User string NOT NULL,Hash string
NOT NULL,Date string NOT NULL,security_ID INTEGER,Url string NOT NULL)""")
conn.commit()
conn.close()
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(812, 573)
MainWindow.setMinimumSize(QtCore.QSize(0, 573))
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.table_view = QtWidgets.QTableWidget(self.centralwidget)
self.table_view.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.table_view.sizePolicy().hasHeightForWidth())
self.table_view.setSizePolicy(sizePolicy)
self.table_view.setMinimumSize(QtCore.QSize(520, 0))
self.table_view.setMaximumSize(QtCore.QSize(100000, 100000))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(135, 206, 250))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Highlight, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.HighlightedText, brush)
brush = QtGui.QBrush(QtGui.QColor(245, 245, 245))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(135, 206, 250))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Highlight, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.HighlightedText, brush)
brush = QtGui.QBrush(QtGui.QColor(245, 245, 245))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 120, 215))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Highlight, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.HighlightedText, brush)
brush = QtGui.QBrush(QtGui.QColor(245, 245, 245))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush)
self.table_view.setPalette(palette)
self.table_view.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
self.table_view.setLayoutDirection(QtCore.Qt.LeftToRight)
self.table_view.setInputMethodHints(QtCore.Qt.ImhNone)
self.table_view.setFrameShape(QtWidgets.QFrame.NoFrame)
self.table_view.setFrameShadow(QtWidgets.QFrame.Plain)
self.table_view.setLineWidth(0)
self.table_view.setMidLineWidth(-1)
self.table_view.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.table_view.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustIgnored)
self.table_view.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.table_view.setDragDropMode(QtWidgets.QAbstractItemView.NoDragDrop)
self.table_view.setDefaultDropAction(QtCore.Qt.IgnoreAction)
self.table_view.setAlternatingRowColors(True)
self.table_view.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.table_view.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.table_view.setTextElideMode(QtCore.Qt.ElideRight)
self.table_view.setVerticalScrollMode(QtWidgets.QAbstractItemView.ScrollPerPixel)
self.table_view.setHorizontalScrollMode(QtWidgets.QAbstractItemView.ScrollPerPixel)
self.table_view.setGridStyle(QtCore.Qt.SolidLine)
self.table_view.setCornerButtonEnabled(True)
self.table_view.setRowCount(0)
self.table_view.setColumnCount(4)
self.table_view.setObjectName("table_view")
item = QtWidgets.QTableWidgetItem()
self.table_view.setVerticalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.table_view.setVerticalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.table_view.setVerticalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.table_view.setVerticalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.table_view.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.table_view.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.table_view.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.table_view.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.table_view.setItem(0, 0, item)
item = QtWidgets.QTableWidgetItem()
self.table_view.setItem(0, 1, item)
item = QtWidgets.QTableWidgetItem()
self.table_view.setItem(0, 2, item)
item = QtWidgets.QTableWidgetItem()
self.table_view.setItem(0, 3, item)
item = QtWidgets.QTableWidgetItem()
self.table_view.setItem(1, 0, item)
item = QtWidgets.QTableWidgetItem()
self.table_view.setItem(1, 1, item)
item = QtWidgets.QTableWidgetItem()
self.table_view.setItem(1, 2, item)
item = QtWidgets.QTableWidgetItem()
self.table_view.setItem(1, 3, item)
item = QtWidgets.QTableWidgetItem()
self.table_view.setItem(2, 0, item)
item = QtWidgets.QTableWidgetItem()
self.table_view.setItem(2, 1, item)
item = QtWidgets.QTableWidgetItem()
self.table_view.setItem(2, 2, item)
item = QtWidgets.QTableWidgetItem()
self.table_view.setItem(2, 3, item)
item = QtWidgets.QTableWidgetItem()
self.table_view.setItem(3, 0, item)
item = QtWidgets.QTableWidgetItem()
self.table_view.setItem(3, 1, item)
item = QtWidgets.QTableWidgetItem()
self.table_view.setItem(3, 2, item)
item = QtWidgets.QTableWidgetItem()
self.table_view.setItem(3, 3, item)
self.table_view.horizontalHeader().setCascadingSectionResizes(False)
self.table_view.horizontalHeader().setStretchLastSection(False)
self.gridLayout.addWidget(self.table_view, 0, 0, 1, 2)
self.grp_box_pgen = QtWidgets.QGroupBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.grp_box_pgen.sizePolicy().hasHeightForWidth())
self.grp_box_pgen.setSizePolicy(sizePolicy)
self.grp_box_pgen.setMinimumSize(QtCore.QSize(0, 111))
self.grp_box_pgen.setAutoFillBackground(False)
self.grp_box_pgen.setFlat(False)
self.grp_box_pgen.setObjectName("grp_box_pgen")
self.gridLayout_2 = QtWidgets.QGridLayout(self.grp_box_pgen)
self.gridLayout_2.setObjectName("gridLayout_2")
self.lbl_slen_pgen = QtWidgets.QLabel(self.grp_box_pgen)
self.lbl_slen_pgen.setObjectName("lbl_slen_pgen")
self.gridLayout_2.addWidget(self.lbl_slen_pgen, 0, 0, 1, 1)
self.tbox_slen_pgen = QtWidgets.QLineEdit(self.grp_box_pgen)
self.tbox_slen_pgen.setMinimumSize(QtCore.QSize(0, 20))
self.tbox_slen_pgen.setFrame(True)
self.tbox_slen_pgen.setObjectName("tbox_slen_pgen")
self.gridLayout_2.addWidget(self.tbox_slen_pgen, 0, 1, 1, 2)
self.btn_genpass = QtWidgets.QPushButton(self.grp_box_pgen)
self.btn_genpass.setMinimumSize(QtCore.QSize(100, 23))
self.btn_genpass.setObjectName("btn_genpass")
self.gridLayout_2.addWidget(self.btn_genpass, 0, 3, 1, 1)
self.lbl_genpass_pgen = QtWidgets.QLabel(self.grp_box_pgen)
self.lbl_genpass_pgen.setObjectName("lbl_genpass_pgen")
self.gridLayout_2.addWidget(self.lbl_genpass_pgen, 1, 0, 1, 1)
self.btn_cpy_pass = QtWidgets.QPushButton(self.grp_box_pgen)
self.btn_cpy_pass.setMinimumSize(QtCore.QSize(80, 23))
self.btn_cpy_pass.setObjectName("btn_cpy_pass")
self.gridLayout_2.addWidget(self.btn_cpy_pass, 2, 1, 1, 1)
self.lbl_warn_pgen = QtWidgets.QLabel(self.grp_box_pgen)
self.lbl_warn_pgen.setFrameShadow(QtWidgets.QFrame.Plain)
self.lbl_warn_pgen.setTextFormat(QtCore.Qt.PlainText)
self.lbl_warn_pgen.setObjectName("lbl_warn_pgen")
self.gridLayout_2.addWidget(self.lbl_warn_pgen, 2, 2, 1, 2)
self.tbox_genpass_pgen = QtWidgets.QLineEdit(self.grp_box_pgen)
self.tbox_genpass_pgen.setMinimumSize(QtCore.QSize(242, 20))
self.tbox_genpass_pgen.setFrame(True)
self.tbox_genpass_pgen.setObjectName("tbox_genpass_pgen")
self.gridLayout_2.addWidget(self.tbox_genpass_pgen, 1, 1, 1, 3)
self.lbl_showpass_pgen = QtWidgets.QLabel(self.grp_box_pgen)
self.lbl_showpass_pgen.setEnabled(True)
self.lbl_showpass_pgen.setGeometry(QtCore.QRect(336, 52, 20, 20))
self.lbl_showpass_pgen.setText("")
self.lbl_showpass_pgen.setPixmap(QtGui.QPixmap("showpass.png"))
self.lbl_showpass_pgen.setScaledContents(True)
self.lbl_showpass_pgen.setObjectName("lbl_showpass_pgen")
self.lbl_showpass_pgen.setStyleSheet("background-color: transparent\n")
self.tbox_genpass_pgen.raise_()
self.lbl_genpass_pgen.raise_()
self.lbl_warn_pgen.raise_()
self.btn_genpass.raise_()
self.lbl_slen_pgen.raise_()
self.tbox_slen_pgen.raise_()
self.btn_cpy_pass.raise_()
self.lbl_showpass_pgen.raise_()
self.gridLayout.addWidget(self.grp_box_pgen, 2, 0, 1, 1)
self.widget_misc = QtWidgets.QWidget(self.centralwidget)
self.widget_misc.setObjectName("widget_misc")
self.verticalLayout = QtWidgets.QVBoxLayout(self.widget_misc)
self.verticalLayout.setObjectName("verticalLayout")
self.btn_refresh = QtWidgets.QPushButton(self.widget_misc)
self.btn_refresh.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btn_refresh.sizePolicy().hasHeightForWidth())
self.btn_refresh.setSizePolicy(sizePolicy)
self.btn_refresh.setMinimumSize(QtCore.QSize(0, 23))
self.btn_refresh.setMaximumSize(QtCore.QSize(126, 16777215))
self.btn_refresh.setDefault(False)
self.btn_refresh.setFlat(False)
self.btn_refresh.setObjectName("btn_refresh")
self.verticalLayout.addWidget(self.btn_refresh)
self.btn_export = QtWidgets.QPushButton(self.widget_misc)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btn_export.sizePolicy().hasHeightForWidth())
self.btn_export.setSizePolicy(sizePolicy)
self.btn_export.setMinimumSize(QtCore.QSize(126, 23))
self.btn_export.setObjectName("btn_export")
self.verticalLayout.addWidget(self.btn_export)
self.btn_logout = QtWidgets.QPushButton(self.widget_misc)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btn_logout.sizePolicy().hasHeightForWidth())
self.btn_logout.setSizePolicy(sizePolicy)
self.btn_logout.setMinimumSize(QtCore.QSize(0, 23))
self.btn_logout.setMaximumSize(QtCore.QSize(126, 16777215))
self.btn_logout.setObjectName("btn_logout")
self.verticalLayout.addWidget(self.btn_logout)
self.gridLayout.addWidget(self.widget_misc, 2, 1, 1, 1)
self.stk_user = QtWidgets.QStackedWidget(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.stk_user.sizePolicy().hasHeightForWidth())
self.stk_user.setSizePolicy(sizePolicy)
self.stk_user.setMinimumSize(QtCore.QSize(0, 340))
self.stk_user.setObjectName("stk_user")
self.page_3 = QtWidgets.QWidget()
self.page_3.setObjectName("page_3")
self.tab_login = QtWidgets.QTabWidget(self.page_3)
self.tab_login.setGeometry(QtCore.QRect(0, 0, 265, 141))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tab_login.sizePolicy().hasHeightForWidth())
self.tab_login.setSizePolicy(sizePolicy)
self.tab_login.setMinimumSize(QtCore.QSize(0, 0))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
self.tab_login.setPalette(palette)
self.tab_login.setAutoFillBackground(False)
self.tab_login.setObjectName("tab_login")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.stk_login = QtWidgets.QStackedWidget(self.tab)
self.stk_login.setGeometry(QtCore.QRect(3, 3, 261, 111))
self.stk_login.setObjectName("stk_login")
self.page_8 = QtWidgets.QWidget()
self.page_8.setObjectName("page_8")
self.lbl_user_login = QtWidgets.QLabel(self.page_8)
self.lbl_user_login.setGeometry(QtCore.QRect(9, 9, 48, 16))
self.lbl_user_login.setObjectName("lbl_user_login")
self.tbox_user_login = QtWidgets.QLineEdit(self.page_8)
self.tbox_user_login.setGeometry(QtCore.QRect(63, 9, 184, 20))
self.tbox_user_login.setFrame(True)
self.tbox_user_login.setObjectName("tbox_user_login")
self.lbl_pass_login = QtWidgets.QLabel(self.page_8)
self.lbl_pass_login.setGeometry(QtCore.QRect(9, 35, 46, 16))
self.lbl_pass_login.setObjectName("lbl_pass_login")
self.tbox_pass_login = QtWidgets.QLineEdit(self.page_8)
self.tbox_pass_login.setGeometry(QtCore.QRect(63, 35, 184, 20))
self.tbox_pass_login.setFrame(True)
self.tbox_pass_login.setObjectName("tbox_pass_login")
self.btn_login = QtWidgets.QPushButton(self.page_8)
self.btn_login.setGeometry(QtCore.QRect(10, 64, 241, 23))
self.btn_login.setMinimumSize(QtCore.QSize(0, 23))
self.btn_login.setObjectName("btn_login")
self.lbl_warn_login = QtWidgets.QLabel(self.page_8)
self.lbl_warn_login.setGeometry(QtCore.QRect(10, 87, 241, 16))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lbl_warn_login.sizePolicy().hasHeightForWidth())
self.lbl_warn_login.setSizePolicy(sizePolicy)
self.lbl_warn_login.setTextFormat(QtCore.Qt.PlainText)
self.lbl_warn_login.setObjectName("lbl_warn_login")
self.lbl_showpass_login = QtWidgets.QLabel(self.page_8)
self.lbl_showpass_login.setEnabled(True)
self.lbl_showpass_login.setGeometry(QtCore.QRect(224, 35, 21, 20))
self.lbl_showpass_login.setText("")
self.lbl_showpass_login.setPixmap(QtGui.QPixmap("showpass.png"))
self.lbl_showpass_login.setScaledContents(True)
self.lbl_showpass_login.setObjectName("lbl_showpass_login")
self.lbl_showpass_login.setStyleSheet("background-color: transparent\n")
self.stk_login.addWidget(self.page_8)
self.page_9 = QtWidgets.QWidget()
self.page_9.setObjectName("page_9")
self.lbl_otp_login = QtWidgets.QLabel(self.page_9)
self.lbl_otp_login.setGeometry(QtCore.QRect(9, 9, 48, 40))
self.lbl_otp_login.setFont(QFont("", 17))
self.lbl_otp_login.setObjectName("lbl_otp_login")
self.tbox_otp_login = QtWidgets.QLineEdit(self.page_9)
self.tbox_otp_login.setGeometry(QtCore.QRect(63, 9, 184, 40))
self.tbox_otp_login.setFrame(True)
self.tbox_otp_login.setObjectName("tbox_otp_login")
self.tbox_otp_login.setFont(QFont("",17))
self.btn_submit_login = QtWidgets.QPushButton(self.page_9)
self.btn_submit_login.setGeometry(QtCore.QRect(10, 58, 241, 25))
self.btn_submit_login.setMinimumSize(QtCore.QSize(0, 23))
self.btn_submit_login.setObjectName("btn_submit_login")
self.btn_submit_login.setFont(QFont("", 14))
self.stk_login.addWidget(self.page_9)
self.tab_login.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.stk_signup = QtWidgets.QStackedWidget(self.tab_2)
self.stk_signup.setGeometry(QtCore.QRect(3, 3, 261, 157))
self.stk_signup.setObjectName("stk_signup")
self.page_10 = QtWidgets.QWidget()
self.page_10.setObjectName("page_10")
self.lbl_user_signup = QtWidgets.QLabel(self.page_10)
self.lbl_user_signup.setGeometry(QtCore.QRect(9, 9, 48, 16))
self.lbl_user_signup.setObjectName("lbl_user_signup")
self.tbox_user_signup = QtWidgets.QLineEdit(self.page_10)
self.tbox_user_signup.setGeometry(QtCore.QRect(76, 9, 171, 20))
self.tbox_user_signup.setInputMask("")
self.tbox_user_signup.setFrame(True)
self.tbox_user_signup.setObjectName("tbox_user_signup")
self.lbl_pass_signup = QtWidgets.QLabel(self.page_10)
self.lbl_pass_signup.setGeometry(QtCore.QRect(9, 35, 46, 16))
self.lbl_pass_signup.setObjectName("lbl_pass_signup")
self.tbox_pass_signup = QtWidgets.QLineEdit(self.page_10)
self.tbox_pass_signup.setGeometry(QtCore.QRect(76, 35, 171, 20))
self.tbox_pass_signup.setInputMask("")
self.tbox_pass_signup.setFrame(True)
self.tbox_pass_signup.setObjectName("tbox_pass_signup")
self.lbl_repass_signup = QtWidgets.QLabel(self.page_10)
self.lbl_repass_signup.setGeometry(QtCore.QRect(9, 61, 61, 16))
self.lbl_repass_signup.setObjectName("lbl_repass_signup")
self.tbox_repass_signup = QtWidgets.QLineEdit(self.page_10)
self.tbox_repass_signup.setGeometry(QtCore.QRect(76, 61, 171, 20))
self.tbox_repass_signup.setFrame(True)
self.tbox_repass_signup.setObjectName("tbox_repass_signup")
self.chkbox_2fa_signup = QtWidgets.QCheckBox(self.page_10)
self.chkbox_2fa_signup.setGeometry(QtCore.QRect(76, 86, 100, 17))
self.chkbox_2fa_signup.setLayoutDirection(QtCore.Qt.LeftToRight)
self.chkbox_2fa_signup.setChecked(True)
self.chkbox_2fa_signup.setTristate(False)
self.chkbox_2fa_signup.setObjectName("chkbox_2fa_signup")
self.btn_signup = QtWidgets.QPushButton(self.page_10)
self.btn_signup.setGeometry(QtCore.QRect(9, 107, 241, 23))
self.btn_signup.setCheckable(False)
self.btn_signup.setFlat(False)
self.btn_signup.setObjectName("btn_signup")
self.lbl_warn_signup = QtWidgets.QLabel(self.page_10)
self.lbl_warn_signup.setGeometry(QtCore.QRect(8, 131, 241, 20))
self.lbl_warn_signup.setTextFormat(QtCore.Qt.PlainText)
self.lbl_warn_signup.setObjectName("lbl_warn_signup")
self.lbl_showpass1_signup = QtWidgets.QLabel(self.page_10)
self.lbl_showpass1_signup.setEnabled(True)
self.lbl_showpass1_signup.setGeometry(QtCore.QRect(224, 35, 21, 20))
self.lbl_showpass1_signup.setText("")
self.lbl_showpass1_signup.setPixmap(QtGui.QPixmap("showpass.png"))
self.lbl_showpass1_signup.setScaledContents(True)
self.lbl_showpass1_signup.setObjectName("lbl_showpass1_signup")
self.lbl_showpass1_signup.setStyleSheet("background-color: transparent\n")
self.lbl_showpass2_signup = QtWidgets.QLabel(self.page_10)
self.lbl_showpass2_signup.setEnabled(True)
self.lbl_showpass2_signup.setGeometry(QtCore.QRect(224, 61, 21, 20))
self.lbl_showpass2_signup.setText("")
self.lbl_showpass2_signup.setPixmap(QtGui.QPixmap("showpass.png"))
self.lbl_showpass2_signup.setScaledContents(True)
self.lbl_showpass2_signup.setObjectName("lbl_showpass2_signup")
self.lbl_showpass2_signup.setStyleSheet("background-color: transparent\n")
self.stk_signup.addWidget(self.page_10)
self.page_11 = QtWidgets.QWidget()
self.page_11.setObjectName("page_11")
self.lbl_qr_signup = QtWidgets.QLabel(self.page_11)
self.lbl_qr_signup.setGeometry(QtCore.QRect(8, 8, 242, 242))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lbl_qr_signup.sizePolicy().hasHeightForWidth())
self.lbl_qr_signup.setSizePolicy(sizePolicy)
self.lbl_qr_signup.setMinimumSize(QtCore.QSize(0, 0))
self.lbl_qr_signup.setMaximumSize(QtCore.QSize(251, 251))
self.lbl_qr_signup.setText("")
self.lbl_qr_signup.setPixmap(
QtGui.QPixmap("C:\\Users\\Leader\\Desktop\\../Pictures/Screenshots/Screenshot (1).png"))
self.lbl_qr_signup.setScaledContents(True)
self.lbl_qr_signup.setObjectName("lbl_qr_signup")
self.btn_signup_done = QtWidgets.QPushButton(self.page_11)
self.btn_signup_done.setGeometry(QtCore.QRect(8, 263, 241, 23))
self.btn_signup_done.setCheckable(False)
self.btn_signup_done.setFlat(False)
self.stk_signup.addWidget(self.page_11)
self.tab_login.addTab(self.tab_2, "")
self.stk_user.addWidget(self.page_3)
self.page_4 = QtWidgets.QWidget()
self.page_4.setObjectName("page_4")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.page_4)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.widget = QtWidgets.QWidget(self.page_4)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget.sizePolicy().hasHeightForWidth())
self.widget.setSizePolicy(sizePolicy)
self.widget.setMinimumSize(QtCore.QSize(0, 20))
self.widget.setObjectName("widget")
self.tbox_search = QtWidgets.QLineEdit(self.widget)
self.tbox_search.setGeometry(QtCore.QRect(42, 0, 161, 20))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tbox_search.sizePolicy().hasHeightForWidth())
self.tbox_search.setSizePolicy(sizePolicy)
self.tbox_search.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.tbox_search.setInputMask("")
self.tbox_search.setObjectName("tbox_search")
self.label = QtWidgets.QLabel(self.widget)
self.label.setGeometry(QtCore.QRect(0, 3, 41, 16))
self.label.setObjectName("label")
self.verticalLayout_3.addWidget(self.widget)
self.grp_box_add = QtWidgets.QGroupBox(self.page_4)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.grp_box_add.sizePolicy().hasHeightForWidth())
self.grp_box_add.setSizePolicy(sizePolicy)
self.grp_box_add.setMinimumSize(QtCore.QSize(0, 0))
self.grp_box_add.setAutoFillBackground(False)
self.grp_box_add.setObjectName("grp_box_add")
self.formLayout = QtWidgets.QFormLayout(self.grp_box_add)
self.formLayout.setObjectName("formLayout")
self.lbl_acc_add = QtWidgets.QLabel(self.grp_box_add)
self.lbl_acc_add.setObjectName("lbl_acc_add")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.lbl_acc_add)
self.tbox_acc_add = QtWidgets.QLineEdit(self.grp_box_add)
self.tbox_acc_add.setMinimumSize(QtCore.QSize(0, 20))
self.tbox_acc_add.setFrame(True)
self.tbox_acc_add.setObjectName("tbox_acc_add")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.tbox_acc_add)
self.lbl_user_add = QtWidgets.QLabel(self.grp_box_add)
self.lbl_user_add.setObjectName("lbl_user_add")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.lbl_user_add)
| |
import sys
import json
import io
import enum
from messagebird.balance import Balance
from messagebird.call import Call
from messagebird.call_list import CallList
from messagebird.contact import Contact, ContactList
from messagebird.error import Error, ValidationError
from messagebird.group import Group, GroupList
from messagebird.hlr import HLR
from messagebird.message import Message, MessageList
from messagebird.mms import MMS
from messagebird.voice_webhook import VoiceWebhook, VoiceWebhookList
from messagebird.voicemessage import VoiceMessage
from messagebird.lookup import Lookup
from messagebird.verify import Verify
from messagebird.http_client import HttpClient, ResponseFormat
from messagebird.conversation_message import ConversationMessage, ConversationMessageList
from messagebird.conversation import Conversation, ConversationList
from messagebird.conversation_webhook import ConversationWebhook, ConversationWebhookList
from messagebird.voice_recording import VoiceRecordingsList, VoiceRecording
from messagebird.voice_transcription import VoiceTranscriptionsList, VoiceTranscriptionsView
from messagebird.call_flow import CallFlow, CallFlowList, CallFlowNumberList
ENDPOINT = 'https://rest.messagebird.com'
CLIENT_VERSION = '1.4.1'
PYTHON_VERSION = '%d.%d.%d' % (sys.version_info[0], sys.version_info[1], sys.version_info[2])
USER_AGENT = 'MessageBird/ApiClient/%s Python/%s' % (CLIENT_VERSION, PYTHON_VERSION)
REST_TYPE = 'rest'
CONVERSATION_API_ROOT = 'https://conversations.messagebird.com/v1/'
CONVERSATION_API_WHATSAPP_SANDBOX_ROOT = 'https://whatsapp-sandbox.messagebird.com/v1/'
CONVERSATION_PATH = 'conversations'
CONVERSATION_MESSAGES_PATH = 'messages'
CONVERSATION_WEB_HOOKS_PATH = 'webhooks'
CONVERSATION_TYPE = 'conversation'
VOICE_API_ROOT = 'https://voice.messagebird.com'
VOICE_TYPE = 'voice'
VOICE_PATH = 'calls'
VOICE_LEGS_PATH = 'legs'
VOICE_RECORDINGS_PATH = 'recordings'
VOICE_TRANSCRIPTIONS_PATH = 'transcriptions'
VOICE_WEB_HOOKS_PATH = 'webhooks'
class ErrorException(Exception):
def __init__(self, errors):
self.errors = errors
message = ' '.join([str(e) for e in self.errors])
super(ErrorException, self).__init__(message)
class SignleErrorException(Exception):
def __init__(self, errorMessage):
super(SignleErrorException, self).__init__(errorMessage)
class Feature(enum.Enum):
ENABLE_CONVERSATIONS_API_WHATSAPP_SANDBOX = 1
class Client(object):
def __init__(self, access_key, http_client=None, features=[]):
self.access_key = access_key
self.http_client = http_client
self.conversation_api_root = CONVERSATION_API_WHATSAPP_SANDBOX_ROOT if Feature.ENABLE_CONVERSATIONS_API_WHATSAPP_SANDBOX in features else CONVERSATION_API_ROOT
def _get_http_client(self, type=REST_TYPE):
if self.http_client:
return self.http_client
if type == CONVERSATION_TYPE:
return HttpClient(self.conversation_api_root, self.access_key, USER_AGENT)
if type == VOICE_TYPE:
return HttpClient(VOICE_API_ROOT, self.access_key, USER_AGENT)
return HttpClient(ENDPOINT, self.access_key, USER_AGENT)
def request(self, path, method='GET', params=None, type=REST_TYPE):
"""Builds a request, gets a response and decodes it."""
response_text = self._get_http_client(type).request(path, method, params)
if not response_text:
return response_text
response_json = json.loads(response_text)
if 'errors' in response_json:
raise (ErrorException([Error().load(e) for e in response_json['errors']]))
return response_json
def request_plain_text(self, path, method='GET', params=None, type=REST_TYPE):
"""Builds a request, gets a response and returns the body."""
response_text = self._get_http_client(type).request(path, method, params)
try:
# Try to decode the response to JSON to see if the API returned any
# errors.
response_json = json.loads(response_text)
if 'errors' in response_json:
raise (ErrorException([Error().load(e) for e in response_json['errors']]))
except ValueError:
# Do nothing: json.loads throws if the input string is not valid JSON,
# which is expected. We'll just return the response body below.
pass
return response_text
def request_store_as_file(self, path, filepath, method='GET', params=None, type=REST_TYPE):
"""Builds a request, gets a response and decodes it."""
response_binary = self._get_http_client(type).request(path, method, params, ResponseFormat.binary)
if not response_binary:
return response_binary
with io.open(filepath, 'wb') as f:
f.write(response_binary)
return filepath
def balance(self):
"""Retrieve your balance."""
return Balance().load(self.request('balance'))
def call(self, id):
"""Retrieve the information of a specific call"""
return Call().load(self.request('calls/' + str(id), 'GET', None, VOICE_TYPE))
def call_list(self, page=1):
"""Listing calls
Args:
page(int) : The page to list.
Raises:
ErrorException : On api returning errors
Returns:
CallList(object) : The list of calls requested & their status."""
return CallList().load(self.request('calls/?page=' + str(page), 'GET', None, VOICE_TYPE))
def call_create(self, source, destination, callFlow, webhook):
"""Creating a call
Args:
source(str) : The caller ID of the call.
destination(string) : The number/address to be called.
callFlow(object) : The call flow object to be executed when the call is answered.
webhook(object) : The webhook object containing the url & required token.
Raises:
ErrorException : On api returning errors
Returns:
Call(object) : The Call object just created."""
params = locals()
del (params['self'])
return Call().load(self.request('calls', 'POST', params, VOICE_TYPE))
def call_delete(self, id):
"""Delete an existing call object."""
response = self.request_plain_text('calls/' + str(id), 'DELETE', None, VOICE_TYPE)
# successful delete should be empty
if len(response) > 0:
raise SignleErrorException(response)
def hlr(self, id):
"""Retrieve the information of a specific HLR lookup."""
return HLR().load(self.request('hlr/' + str(id)))
def hlr_create(self, msisdn, reference):
"""Perform a new HLR lookup."""
return HLR().load(self.request('hlr', 'POST', {'msisdn': msisdn, 'reference': reference}))
def message(self, id):
"""Retrieve the information of a specific message."""
return Message().load(self.request('messages/' + str(id)))
def message_list(self, limit=20, offset=0):
"""Retrieve a list of the most recent messages."""
query = self._format_query(limit, offset)
return MessageList().load(self.request('messages?' + query))
def message_create(self, originator, recipients, body, params=None):
"""Create a new message."""
if params is None:
params = {}
if type(recipients) == list:
recipients = ','.join(recipients)
params.update({'originator': originator, 'body': body, 'recipients': recipients})
return Message().load(self.request('messages', 'POST', params))
def message_delete(self, id):
"""Delete a message from the dashboard."""
self.request_plain_text('messages/' + str(id), 'DELETE')
def mms_create(self, originator, recipients, body, mediaUrls, subject=None, reference=None, scheduledDatetime=None):
""" Send bulk mms.
Args:
originator(str): name of the originator
recipients(str/list(str)): comma separated numbers or list of numbers in E164 format
body(str) : text message body
mediaUrl(str) : list of URL's of attachments of the MMS message.
subject(str) : utf-encoded subject
reference(str) : client reference text
scheduledDatetime(str) : scheduled date time in RFC3339 format
Raises:
ErrorException: On api returning errors
Returns:
MMS: On success an MMS instance instantiated with success response
"""
if isinstance(recipients, list):
recipients = ','.join(recipients)
if isinstance(mediaUrls, str):
mediaUrls = [mediaUrls]
params = locals()
del (params['self'])
return MMS().load(self.request('mms', 'POST', params))
def voice_message(self, id):
"Retrieve the information of a specific voice message."
return VoiceMessage().load(self.request('voicemessages/' + str(id)))
def voice_message_create(self, recipients, body, params=None):
"""Create a new voice message."""
if params is None:
params = {}
if type(recipients) == list:
recipients = ','.join(recipients)
params.update({'recipients': recipients, 'body': body})
return VoiceMessage().load(self.request('voicemessages', 'POST', params))
def lookup(self, phonenumber, params=None):
"""Do a new lookup."""
if params is None:
params = {}
return Lookup().load(self.request('lookup/' + str(phonenumber), 'GET', params))
def lookup_hlr(self, phonenumber, params=None):
"""Retrieve the information of a specific HLR lookup."""
if params is None:
params = {}
return HLR().load(self.request('lookup/' + str(phonenumber) + '/hlr', 'GET', params))
def lookup_hlr_create(self, phonenumber, params=None):
"""Perform a new HLR lookup."""
if params is None:
params = {}
return HLR().load(self.request('lookup/' + str(phonenumber) + '/hlr', 'POST', params))
def verify(self, id):
"""Retrieve the information of a specific verification."""
return Verify().load(self.request('verify/' + str(id)))
def verify_create(self, recipient, params=None):
"""Create a new verification."""
if params is None:
params = {}
params.update({'recipient': recipient})
return Verify().load(self.request('verify', 'POST', params))
def verify_verify(self, id, token):
"""Verify the token of a specific verification."""
return Verify().load(self.request('verify/' + str(id), params={'token': token}))
def verify_delete(self, id):
"""Delete an existing verification object."""
self.request_plain_text('verify/' + str(id), 'DELETE')
def contact(self, id):
"""Retrieve the information of a specific contact."""
return Contact().load(self.request('contacts/' + str(id)))
def contact_create(self, phonenumber, params=None):
if params is None:
params = {}
params.update({'msisdn': phonenumber})
return Contact().load(self.request('contacts', 'POST', params))
def contact_delete(self, id):
self.request_plain_text('contacts/' + str(id), 'DELETE')
def contact_update(self, id, params=None):
self.request_plain_text('contacts/' + str(id), 'PATCH', params)
def contact_list(self, limit=10, offset=0):
query = self._format_query(limit, offset)
return ContactList().load(self.request('contacts?' + query, 'GET', None))
def group(self, id):
return Group().load(self.request('groups/' + str(id), 'GET', None))
def group_create(self, name, params=None):
if params is None:
params = {}
params.update({'name': name})
return Group().load(self.request('groups', 'POST', params))
def group_delete(self, id):
self.request_plain_text('groups/' + str(id), 'DELETE', None)
def group_list(self, limit=10, offset=0):
query = self._format_query(limit, offset)
return GroupList().load(self.request('groups?' + query, 'GET', None))
def group_update(self, id, name, params=None):
if params is None:
params = {}
params.update({'name': name})
self.request_plain_text('groups/' + str(id), 'PATCH', params)
def group_add_contacts(self, groupId, contactIds):
query = self.__group_add_contacts_query(contactIds)
self.request_plain_text('groups/' + str(groupId) + '?' + query, 'PUT', None)
def __group_add_contacts_query(self, contactIds):
# __group_add_contacts_query gets a query string to add contacts to a
# group. The expected format is ids[]=first-contact&ids[]=second-contact.
# See: https://developers.messagebird.com/docs/groups#add-contact-to-group.
return '&'.join('ids[]=' + str(id) for id in contactIds)
def group_remove_contact(self, groupId, contactId):
self.request_plain_text('groups/' + str(groupId) + '/contacts/' + str(contactId), 'DELETE', None)
def conversation_list(self, limit=10, offset=0):
uri = CONVERSATION_PATH + '?' + self._format_query(limit, offset)
return ConversationList().load(self.request(uri, 'GET', None, CONVERSATION_TYPE))
def conversation_start(self, start_request):
uri = CONVERSATION_PATH + '/start'
return Conversation().load(self.request(uri, 'POST', start_request, CONVERSATION_TYPE))
def conversation_update(self, id, update_request):
uri = CONVERSATION_PATH + '/' + str(id)
return Conversation().load(self.request(uri, 'PATCH', update_request, CONVERSATION_TYPE))
def conversation_read(self, id):
uri = CONVERSATION_PATH + '/' + str(id)
return Conversation().load(self.request(uri, 'GET', None, CONVERSATION_TYPE))
def conversation_list_messages(self, conversation_id, limit=10, offset=0):
uri = CONVERSATION_PATH + '/' + str(conversation_id) + '/' + CONVERSATION_MESSAGES_PATH
uri += '?' + self._format_query(limit, offset)
return ConversationMessageList().load(self.request(uri, 'GET', None, CONVERSATION_TYPE))
def conversation_create_message(self, conversation_id, message_create_request):
uri = CONVERSATION_PATH + '/' + str(conversation_id) + '/' + CONVERSATION_MESSAGES_PATH
return ConversationMessage().load(self.request(uri, 'POST', message_create_request, CONVERSATION_TYPE))
def conversation_read_message(self, message_id):
uri = CONVERSATION_MESSAGES_PATH + '/' + str(message_id)
return ConversationMessage().load(self.request(uri, 'GET', None, CONVERSATION_TYPE))
def conversation_create_webhook(self, webhook_create_request):
return ConversationWebhook().load(
self.request(CONVERSATION_WEB_HOOKS_PATH, 'POST', webhook_create_request, CONVERSATION_TYPE))
def conversation_update_webhook(self, id, update_request):
"""
Updates a webhook with the supplied parameters.
API Reference: https://developers.messagebird.com/api/conversations/#webhooks
"""
uri = CONVERSATION_WEB_HOOKS_PATH + '/' + str(id)
web_hook = self.request(uri, 'PATCH', update_request, CONVERSATION_TYPE)
return ConversationWebhook().load(web_hook)
def conversation_delete_webhook(self, id):
uri = CONVERSATION_WEB_HOOKS_PATH + '/' + str(id)
self.request(uri, 'DELETE', None, CONVERSATION_TYPE)
| |
<gh_stars>0
"""
A simple JSON REST request abstraction layer that is used by the
``dropbox.client`` and ``dropbox.session`` modules. You shouldn't need to use this.
"""
import io
import pkg_resources
import socket
import ssl
import sys
import urllib
try:
import json
except ImportError:
import simplejson as json
try:
import urllib3
except ImportError:
raise ImportError('Dropbox python client requires urllib3.')
SDK_VERSION = "2.2.0"
TRUSTED_CERT_FILE = pkg_resources.resource_filename(__name__, 'trusted-certs.crt')
class RESTResponse(io.IOBase):
"""
Responses to requests can come in the form of ``RESTResponse``. These are
thin wrappers around the socket file descriptor.
:meth:`read()` and :meth:`close()` are implemented.
It is important to call :meth:`close()` to return the connection
back to the connection pool to be reused. If a connection
is not closed by the caller it may leak memory. The object makes a
best-effort attempt upon destruction to call :meth:`close()`,
but it's still best to explicitly call :meth:`close()`.
"""
def __init__(self, resp):
# arg: A urllib3.HTTPResponse object
self.urllib3_response = resp
self.status = resp.status
self.version = resp.version
self.reason = resp.reason
self.strict = resp.strict
self.is_closed = False
def __del__(self):
# Attempt to close when ref-count goes to zero.
self.close()
def __exit__(self, typ, value, traceback):
# Allow this to be used in "with" blocks.
self.close()
# -----------------
# Important methods
# -----------------
def read(self, amt=None):
"""
Read data off the underlying socket.
Parameters
amt
Amount of data to read. Defaults to ``None``, indicating to read
everything.
Returns
Data off the socket. If ``amt`` is not ``None``, at most ``amt`` bytes are returned.
An empty string when the socket has no data.
Raises
``ValueError``
If the ``RESTResponse`` has already been closed.
"""
if self.is_closed:
raise ValueError('Response already closed')
return self.urllib3_response.read(amt)
BLOCKSIZE = 4 * 1024 * 1024 # 4MB at a time just because
def close(self):
"""Closes the underlying socket."""
# Double closing is harmless
if self.is_closed:
return
# Read any remaining crap off the socket before releasing the
# connection. Buffer it just in case it's huge
while self.read(RESTResponse.BLOCKSIZE):
pass
# Mark as closed and release the connection (exactly once)
self.is_closed = True
self.urllib3_response.release_conn()
@property
def closed(self):
return self.is_closed
# ---------------------------------
# Backwards compat for HTTPResponse
# ---------------------------------
def getheaders(self):
"""Returns a dictionary of the response headers."""
return self.urllib3_response.getheaders()
def getheader(self, name, default=None):
"""Returns a given response header."""
return self.urllib3_response.getheader(name, default)
# Some compat functions showed up recently in urllib3
try:
urllib3.HTTPResponse.flush
urllib3.HTTPResponse.fileno
def fileno(self):
return self.urllib3_response.fileno()
def flush(self):
return self.urllib3_response.flush()
except AttributeError:
pass
def create_connection(address):
host, port = address
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
sock.connect(sa)
return sock
except socket.error as e:
err = e
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise socket.error("getaddrinfo returns an empty list")
def json_loadb(data):
if sys.version_info >= (3,):
data = data.decode('utf8')
return json.loads(data)
class RESTClientObject(object):
def __init__(self, max_reusable_connections=8, mock_urlopen=None):
"""
Parameters
max_reusable_connections
max connections to keep alive in the pool
mock_urlopen
an optional alternate urlopen function for testing
This class uses ``urllib3`` to maintain a pool of connections. We attempt
to grab an existing idle connection from the pool, otherwise we spin
up a new connection. Once a connection is closed, it is reinserted
into the pool (unless the pool is full).
SSL settings:
- Certificates validated using Dropbox-approved trusted root certs
- TLS v1.0 (newer TLS versions are not supported by urllib3)
- Default ciphersuites. Choosing ciphersuites is not supported by urllib3
- Hostname verification is provided by urllib3
"""
self.mock_urlopen = mock_urlopen
self.pool_manager = urllib3.PoolManager(
num_pools=4, # only a handful of hosts. api.dropbox.com, api-content.dropbox.com
maxsize=max_reusable_connections,
block=False,
timeout=60.0, # long enough so datastores await doesn't get interrupted
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=TRUSTED_CERT_FILE,
ssl_version=ssl.PROTOCOL_TLSv1,
)
def request(self, method, url, post_params=None, body=None, headers=None, raw_response=False):
"""Performs a REST request. See :meth:`RESTClient.request()` for detailed description."""
post_params = post_params or {}
headers = headers or {}
headers['User-Agent'] = 'OfficialDropboxPythonSDK/' + SDK_VERSION
if post_params:
if body:
raise ValueError("body parameter cannot be used with post_params parameter")
body = params_to_urlencoded(post_params)
headers["Content-type"] = "application/x-www-form-urlencoded"
# Handle StringIO instances, because urllib3 doesn't.
if hasattr(body, 'getvalue'):
body = str(body.getvalue())
headers["Content-Length"] = len(body)
# Reject any headers containing newlines; the error from the server isn't pretty.
for key, value in headers.items():
if isinstance(value, basestring) and '\n' in value:
raise ValueError("headers should not contain newlines (%s: %s)" %
(key, value))
try:
# Grab a connection from the pool to make the request.
# We return it to the pool when caller close() the response
urlopen = self.mock_urlopen if self.mock_urlopen else self.pool_manager.urlopen
r = urlopen(
method=method,
url=url,
body=body,
headers=headers,
preload_content=False
)
r = RESTResponse(r) # wrap up the urllib3 response before proceeding
except socket.error as e:
raise RESTSocketError(url, e)
except urllib3.exceptions.SSLError as e:
raise RESTSocketError(url, "SSL certificate error: %s" % e)
if r.status not in (200, 206):
raise ErrorResponse(r, r.read())
return self.process_response(r, raw_response)
def process_response(self, r, raw_response):
if raw_response:
return r
else:
s = r.read()
try:
resp = json_loadb(s)
except ValueError:
raise ErrorResponse(r, s)
r.close()
return resp
def GET(self, url, headers=None, raw_response=False):
assert type(raw_response) == bool
return self.request("GET", url, headers=headers, raw_response=raw_response)
def POST(self, url, params=None, headers=None, raw_response=False):
assert type(raw_response) == bool
if params is None:
params = {}
return self.request("POST", url,
post_params=params, headers=headers, raw_response=raw_response)
def PUT(self, url, body, headers=None, raw_response=False):
assert type(raw_response) == bool
return self.request("PUT", url, body=body, headers=headers, raw_response=raw_response)
class RESTClient(object):
"""
A class with all static methods to perform JSON REST requests that is used internally
by the Dropbox Client API. It provides just enough gear to make requests
and get responses as JSON data (when applicable). All requests happen over SSL.
"""
IMPL = RESTClientObject()
@classmethod
def request(cls, *n, **kw):
"""Perform a REST request and parse the response.
Parameters
method
An HTTP method (e.g. ``'GET'`` or ``'POST'``).
url
The URL to make a request to.
post_params
A dictionary of parameters to put in the body of the request.
This option may not be used if the body parameter is given.
body
The body of the request. Typically, this value will be a string.
It may also be a file-like object. The body
parameter may not be used with the post_params parameter.
headers
A dictionary of headers to send with the request.
raw_response
Whether to return a :class:`RESTResponse` object. Default ``False``.
It's best enabled for requests that return large amounts of data that you
would want to ``.read()`` incrementally rather than loading into memory. Also
use this for calls where you need to read metadata like status or headers,
or if the body is not JSON.
Returns
The JSON-decoded data from the server, unless ``raw_response`` is
set, in which case a :class:`RESTResponse` object is returned instead.
Raises
:class:`ErrorResponse`
The returned HTTP status is not 200, or the body was
not parsed from JSON successfully.
:class:`RESTSocketError`
A ``socket.error`` was raised while contacting Dropbox.
"""
return cls.IMPL.request(*n, **kw)
@classmethod
def GET(cls, *n, **kw):
"""Perform a GET request using :meth:`RESTClient.request()`."""
return cls.IMPL.GET(*n, **kw)
@classmethod
def POST(cls, *n, **kw):
"""Perform a POST request using :meth:`RESTClient.request()`."""
return cls.IMPL.POST(*n, **kw)
@classmethod
def PUT(cls, *n, **kw):
"""Perform a PUT request using :meth:`RESTClient.request()`."""
return cls.IMPL.PUT(*n, **kw)
class RESTSocketError(socket.error):
"""A light wrapper for ``socket.error`` that adds some more information."""
def __init__(self, host, e):
msg = "Error connecting to \"%s\": %s" % (host, str(e))
socket.error.__init__(self, msg)
# Dummy class for docstrings, see doco.py.
class _ErrorResponse__doc__(Exception):
"""Exception raised when :class:`DropboxClient` exeriences a problem.
For example, this is raised when the server returns an unexpected
non-200 HTTP response.
"""
_status__doc__ = "HTTP response status (an int)."
_reason__doc__ = "HTTP response reason (a string)."
_headers__doc__ = "HTTP response headers (a list of (header, value) tuples)."
_body__doc__ = "HTTP response body (string or JSON dict)."
_error_msg__doc__ = "Error message for developer (optional)."
_user_error_msg__doc__ = "Error message for end user (optional)."
class ErrorResponse(Exception):
"""
Raised by :meth:`RESTClient.request()` for requests that:
- Return a non-200 HTTP response, or
- Have a non-JSON response body, or
- Have a malformed/missing header in the response.
Most errors that Dropbox returns will have an | |
"""
BUCKEYE TETROMINOES
Coded by: <NAME> in collaboration w/ The Ohio State University's OHI/O
Game Logic Sourced from Arcade Sample Code library.
Distributed under the MIT LICENSE.
"""
################################################################################
import arcade
import random
import time
import PIL
import timeit
from game_variables import *
from game_scores import *
################################################################################
def create_textures():
""" Create a list of images for sprites based on the global colors.
!!! SHOULD be able to add custom images in here instead of the general colors."""
texture_list = []
for color in colors:
image = PIL.Image.new('RGB', (WIDTH, HEIGHT), color)
texture_list.append(arcade.Texture(str(color), image=image))
return texture_list
texture_list = create_textures()
def rotate_clockwise(shape):
""" Rotates a matrix clockwise """
return [[shape[len(shape)-y-1][x] for y in range(len(shape))] for x in range(len(shape[0]))]
def check_collision(board, shape, offset):
"""
See if the matrix stored in the shape will intersect anything
on the board based on the offset. Offset is an (x, y) coordinate.
"""
off_x, off_y = offset
for cy, row in enumerate(shape):
for cx, cell in enumerate(row):
if cell and board[cy + off_y][cx + off_x]:
return True
return False
def remove_row(board, row):
""" Remove a row from the board, add a blank row on top. """
del board[row]
#print("--[tetris] Deleted Row #",row)
return [[0 for i in range(COLUMN_COUNT)]] + board
def join_matrixes(matrix_1, matrix_2, matrix_2_offset):
""" Copy matrix 2 onto matrix 1 based on the passed in x, y offset coordinate """
offset_x, offset_y = matrix_2_offset
for cy, row in enumerate(matrix_2):
for cx, val in enumerate(row):
matrix_1[cy + offset_y - 1][cx + offset_x] += val
return matrix_1
def new_board():
""" Create a grid of 0's. Add 1's to the bottom for easier collision detection. """
# Create the main board of 0's
board = [[0 for x in range(COLUMN_COUNT)] for y in range(ROW_COUNT)]
# Add a bottom border of 1's
board += [[8 for x in range(COLUMN_COUNT)]]
return board
class GameView(arcade.View):
global ALL_SCORES
global GAME_SPEED_FLOOR
def newGame(self, player_name):
self.resetGame(player_name)
self.setup()
def resetGame(self, player_name): #width, height, title removed
""" Reset Last Gameplay and Reset Game Class Variables """
self.board = None
self.frame_count = 0 #reset game frame counter
self.game_over = False #reset game end state
self.hdrop_wait = False #Hard Drop Frequency Limiter
self.hdrop_last_frame = 0
self.paused = False
self.addedScore = False
self.board_sprite_list = None
self.background = None
# initialize score & player
self.player_name = player_name
self.score = None
self.level = None
self.GAME_SPEED = None
self.left_pressed = False
self.right_pressed = False
self.down_pressed = False
self.pos = 0
self.new_stones = tetris_shapes.copy()
random.shuffle(self.new_stones)
#Output Announcement
print("---- Game Board, Mechanics, Stats == Reset")
def setup(self):
""" Initialize Scoring System & Game Components """
self.board = new_board()
self.score = 0
self.level = 0
self.GAME_SPEED = INITIAL_GAME_SPEED
self.background = arcade.load_texture(BACKGROUNDS[0])
# Set Game Levels 1-9
self.GAME_LEVEL_FRAMES = [ 0, 300, 600,950,1300,1650,2050,2450,2900,3400,3950]
# RX & Statistics
self.processing_time = 0
self.draw_time = 0
self.fps_start_timer = None
self.fps = None
self.board_sprite_list = arcade.SpriteList()
for row in range(len(self.board)):
for column in range(len(self.board[0])):
sprite = arcade.Sprite()
for texture in texture_list:
sprite.append_texture(texture)
sprite.set_texture(0)
sprite.center_x = (MARGIN + WIDTH) * column + SCREEN_MARGIN + WIDTH // 2 + WINDOW_MARGIN # MAY NEED FIXED WITH NEW SCREEN SIZE
sprite.center_y = TETRIS_HEIGHT - HIDE_BOTTOM - (MARGIN + HEIGHT) * row + SCREEN_MARGIN + HEIGHT // 2 # MAY NEED FIXED WITH NEW SCREEN SIZE
self.board_sprite_list.append(sprite)
#- JOYSTICK
# Check for System Installed Joysticks. Make instance of it.
joysticks = arcade.get_joysticks()
if joysticks:
self.joystick = joysticks[0]
self.joystick.open()
else:
print("----NO JOYSTICK CONTROLLER WAS FOUND.")
self.joystick = None
#- Initial Stone
self.new_stone()
self.update_board()
print("---- Game Board, Mechanics, Stats == SETUP Confirm")
def on_show(self):
print("GameView Opened!")
arcade.set_background_color([187,0,0]) # Set Background. Required. Do not delete def!
self.window.set_mouse_visible(False) # Hide mouse cursor
#-- Stone Actions
def new_stone(self):
"""
Randomly grab a new stone and set the stone location to the top.
If we immediately collide, then game-over. self.new_stone()
"""
self.stone = self.new_stones.pop()
if len(self.new_stones) is 0:
self.new_stones = tetris_shapes.copy()
random.shuffle(self.new_stones)
self.stone_x = int(COLUMN_COUNT / 2 - len(self.stone[0]) / 2)
self.stone_y = 0
self.pos = 0
if check_collision(self.board, self.stone, (self.stone_x, self.stone_y)):
self.game_over = True
##--- ADD COMMAND TO SWITCH STATES TO "GAME-OVER" STATE WHEN GAME-ENDS
def drop(self):
"""
Drop the stone down one place.
Check for collision.
If collided, then
join matrixes
Check for rows we can remove
Update sprite list with stones
Create a new stone
"""
if not self.game_over and not self.paused:
self.stone_y += 1
if check_collision(self.board, self.stone, (self.stone_x, self.stone_y)):
self.board = join_matrixes(self.board, self.stone, (self.stone_x, self.stone_y))
rows_cleared = 0
for i, row in enumerate(self.board[:-1]):
if 0 not in row:
self.board = remove_row(self.board, i)
rows_cleared += 1
if i is 21:
self.score += [0, 40, 100, 300, 1200][rows_cleared]*(self.level+1) #self.score + 1 ##------------ADD GAME SCORE COUNTER LINE HERE
print("Score: " + str(self.score))
self.update_board()
self.new_stone()
def hard_drop(self):
"""
Drop the stone until collision
Join
Check for rows to remove
Create new stone
"""
if not self.game_over and not self.paused and ( self.hdrop_last_frame + 10 < self.frame_count):
while not check_collision(self.board, self.stone, (self.stone_x, self.stone_y)):
self.stone_y += 1
self.board = join_matrixes(self.board, self.stone, (self.stone_x, self.stone_y))
while True:
rows_cleared = 0
for i, row in enumerate(self.board[:-1]):
if 0 not in row:
self.board = remove_row(self.board, i)
rows_cleared += 1
if i is 21:
self.score += [0, 40, 100, 300, 1200][rows_cleared]*(self.level+1) #self.score + 1 ##------------ADD GAME SCORE COUNTER LINE HERE
else:
self.hdrop_last_frame = self.frame_count
break
print("Score: " + str(self.score))
self.update_board()
self.new_stone()
def rotate_stone(self):
""" Rotate the stone, check collision. """
if not self.game_over and not self.paused:
new_stone = rotate_clockwise(self.stone)
new_pos = (self.pos+1)%4
new_x = self.stone_x
new_y = self.stone_y
d = abs(len(self.stone)-len(self.stone[0]))
if d is 3:
x=0
if new_pos is 1:
new_x += 2
new_y -= 1
elif new_pos is 2:
new_x -= 2
new_y += 2
elif new_pos is 3:
new_x += 1
new_y -= 2
else:
new_x -= 1
new_y += 1
else:
if new_pos is 1:
new_x += d
elif new_pos is 2:
new_x -= d
new_y += d
elif new_pos is 3:
new_y -= d
# if rotates off board move back
if new_x < 0:
new_x = 0
if new_x > COLUMN_COUNT - len(self.stone):
new_x = COLUMN_COUNT - len(self.stone)
if not check_collision(self.board, new_stone, (new_x, new_y)):
self.stone = new_stone
self.stone_x = new_x
self.stone_y = new_y
self.pos = new_pos
def update(self, dt):
""" Update, drop stone if warrented. Called by Arcade Class every 1/60 sec
------------------------------------ FRAME RATE CONTROLLING """
self.frame_count += 1
if self.frame_count % self.GAME_SPEED == 0:
self.drop()
if self.frame_count % 3 == 0:
if self.down_pressed and self.frame_count - self.down_pressed > 10:
self.drop()
if not self.right_pressed and self.left_pressed and self.frame_count - self.left_pressed > 10:
self.move(-1)
elif not self.left_pressed and self.right_pressed and self.frame_count - self.right_pressed > 10:
self.move(1)
def move(self, delta_x):
""" Move the stone back and forth based on delta x. """
if not self.game_over and not self.paused:
new_x = self.stone_x + delta_x
if new_x < 0:
new_x = 0
if new_x > COLUMN_COUNT - len(self.stone[0]):
new_x = COLUMN_COUNT - len(self.stone[0])
if not check_collision(self.board, self.stone, (new_x, self.stone_y)):
self.stone_x = new_x
#-- Screen Elements
def draw_background(self):
""" Draws the most epic background ever imaginable. """
arcade.draw_texture_rectangle( center_x = WINDOW_WIDTH // 2, center_y = SCREEN_HEIGHT // 2,
width = SCREEN_WIDTH, height = SCREEN_HEIGHT,
texture = self.background )
def draw_next_stone(self):
next_stone = self.new_stones[-1]
color = max(next_stone[0])
if color is 6:
arcade.draw_rectangle_filled(next_xposn+WIDTH/2+MARGIN, next_yposn, WIDTH, HEIGHT, colors[6])
arcade.draw_rectangle_filled(next_xposn-WIDTH/2, next_yposn, WIDTH, HEIGHT, colors[6])
arcade.draw_rectangle_filled(next_xposn+1.5*WIDTH+2*MARGIN, next_yposn, WIDTH, HEIGHT, colors[6])
arcade.draw_rectangle_filled(next_xposn-1.5*WIDTH-MARGIN, next_yposn, WIDTH, HEIGHT, colors[6])
elif color is 7:
arcade.draw_rectangle_filled(next_xposn+WIDTH/2+MARGIN, next_yposn-HEIGHT/2, WIDTH, HEIGHT, colors[7])
arcade.draw_rectangle_filled(next_xposn-WIDTH/2, next_yposn-HEIGHT/2, WIDTH, HEIGHT, colors[7])
arcade.draw_rectangle_filled(next_xposn+WIDTH/2+MARGIN, next_yposn+HEIGHT/2+MARGIN, WIDTH, HEIGHT, colors[7])
arcade.draw_rectangle_filled(next_xposn-WIDTH/2, next_yposn+HEIGHT/2+MARGIN, WIDTH, HEIGHT, colors[7])
else:
for x in range(3):
for y in range(2):
if next_stone[y][x] is not 0:
arcade.draw_rectangle_filled(next_xposn+(x-1)*(WIDTH+MARGIN), next_yposn+(y*-2+1)*(HEIGHT/2+MARGIN), WIDTH, HEIGHT, colors[color])
def game_diagnostics(self):
# Box Outline
arcade.draw_rectangle_outline(rx_xposn, rx_yposn, rx_width, rx_yposn, [0,153,153], 2)
# Header
arcade.draw_text("Game Diagnostics", rx_xposn-(rx_width/2)+20, rx_yposn+(rx_height/2)-30, arcade.color.BLACK, float(25), bold = True, align="left")
# Game Floor Speed
h_5a = rx_yposn+(rx_height/2)-75
t_5a = f"{GAME_SPEED_FLOOR} frames"
arcade.draw_text(" Set Floor: ", rx_xposn-(rx_width/2)+20, h_5a, arcade.color.BLACK, float(20), bold = True, align="left")
arcade.draw_text("Adjust w PAGE_UP/_DOWN", rx_xposn-(rx_width/2)+350, h_5a, arcade.color.GRAY, float(15), bold = True, align="left")
arcade.draw_text(t_5a, rx_xposn-(rx_width/2)+200, h_5a, arcade.color.BRIGHT_NAVY_BLUE, float(20), bold = True, align="left")
h_5b = rx_yposn+(rx_height/2)-100
arcade.draw_text("Current Speed: | |
# Chassis VM5D4C6B3599 VMX
m = p_chassis.match(line)
if m:
for k,v in m.groupdict().items():
k = k.replace('_', '-')
if v:
chassis_inventory_dict[k] = v.strip()
continue
# -------------------------------------------------------------------------------------
# For general chassis modules, for example:
# -------------------------------------------------------------------------------------
# Midplane REV 64 750-040240 ABAC9716 Lower Backplane
# Midplane 1 REV 06 711-032386 ABAC9742 Upper Backplane
# Routing Engine 0 REV 01 740-052100 9009237267 RE-S-1800x4
# Routing Engine 0 RE-VMX
# CB 0 VMX SCB
# FPC 0 Virtual FPC
# SPMB 0 REV 04 711-041855 ABDC5673 PMB Board
# SFB 0 REV 06 711-044466 ABCY8621 Switch Fabric Board
# ADC 9 REV 21 750-043596 ABDC2129 Adapter Card
# Fan Tray 0 REV 01 760-052467 ACAY4748 172mm FanTray - 6 Fans
# FPM Board REV 13 760-040242 ABDD0194 Front Panel Display
# Midplane
m = p_module0.match(line) or p_module1.match(line) or p_module2.match(line)
if m:
module_dict = {}
for k,v in m.groupdict().items():
k = k.replace('_', '-')
if v:
module_dict[k] = v.strip()
chassis_modules_list.append(module_dict)
continue
# -------------------------------------------------------------------------------------
# For chassis-re-disk-module, for example:
# -------------------------------------------------------------------------------------
# ad0 3919 MB 604784 000060095234B000018D Compact Flash
# ad1 28496 MB StorFly - VSFA18PI032G- P1T12003591504100303 Disk 1
m = p_re_disk.match(line)
if m:
if "chassis-re-disk-module" not in module_dict:
module_dict["chassis-re-disk-module"] = []
re_disk_module_list = module_dict["chassis-re-disk-module"]
re_disk_module_item = {}
for k,v in m.groupdict().items():
k = k.replace('_', '-')
if v:
re_disk_module_item[k] = v.strip()
re_disk_module_list.append(re_disk_module_item)
continue
# -------------------------------------------------------------------------------------
# For chassis-re-usb-module, for example:
# -------------------------------------------------------------------------------------
# usb0 (addr 1) EHCI root hub 0 Intel uhub0
# usb0 (addr 2) product 0x0020 32 vendor 0x8087 uhub1
m = p_re_usb.match(line)
if m:
if "chassis-re-usb-module" not in module_dict:
module_dict["chassis-re-usb-module"] = []
re_usb_module_list = module_dict["chassis-re-usb-module"]
re_usb_module_item = {}
for k,v in m.groupdict().items():
k = k.replace('_', '-')
if v:
re_usb_module_item[k] = v.strip()
re_usb_module_list.append(re_usb_module_item)
continue
# -------------------------------------------------------------------------------------
# For chassis-re-dimm-module, for example:
# -------------------------------------------------------------------------------------
# DIMM 0 VL33B1G63F-K9SQ-KC DIE REV-0 PCB REV-0 MFR ID-ce80
m = p_re_dimm.match(line)
if m:
if "chassis-re-dimm-module" not in module_dict:
module_dict["chassis-re-dimm-module"] = []
re_usb_dimm_list = module_dict["chassis-re-dimm-module"]
re_usb_dimm_item = {}
for k,v in m.groupdict().items():
k = k.replace('_', '-')
if v:
re_usb_dimm_item[k] = v.strip()
re_usb_dimm_list.append(re_usb_dimm_item)
continue
# -------------------------------------------------------------------------------------
# For chassis-sub-module, for example:
# -------------------------------------------------------------------------------------
# CPU REV 12 711-045719 ABDF7304 RMPC PMB
# MIC 0 REV 19 750-049457 ABDJ2346 2X100GE CFP2 OTN
# XLM 0 REV 14 711-046638 ABDF2862 MPC6E XL
# MIC 0 Virtual
# CPU Rev. 1.0 RIOT-LITE BUILTIN
m = p_sub_module.match(line) or p_sub_module_2.match(line) or p_sub_module_3.match(line)
if m:
if "chassis-sub-module" not in module_dict:
module_dict["chassis-sub-module"] = []
re_sub_module_list = module_dict["chassis-sub-module"]
last_sub_sub_item = {}
for k,v in m.groupdict().items():
k = k.replace('_', '-')
if v:
last_sub_sub_item[k] = v.strip()
re_sub_module_list.append(last_sub_sub_item)
continue
# -------------------------------------------------------------------------------------
# For chassis-sub-sub-module, for example:
# -------------------------------------------------------------------------------------
# PIC 0 BUILTIN BUILTIN 2X100GE CFP2 OTN
m = p_sub_sub_module.match(line)
if m:
# find the sub module
last_sub_item = module_dict["chassis-sub-module"][-1]
if "chassis-sub-sub-module" not in last_sub_item:
last_sub_item["chassis-sub-sub-module"] = []
re_sub_sub_module_item_list = last_sub_item["chassis-sub-sub-module"]
re_sub_sub_module_list_item = {}
for k,v in m.groupdict().items():
k = k.replace('_', '-')
if v:
re_sub_sub_module_list_item[k] = v.strip()
re_sub_sub_module_item_list.append(re_sub_sub_module_list_item)
continue
# -------------------------------------------------------------------------------------
# For chassis-sub-sub-sub-module, for example:
# -------------------------------------------------------------------------------------
# Xcvr 0 REV 01 740-052504 UW811XC CFP2-100G-LR4
m = p_sub_sub_sub_module.match(line)
if m:
# the last appended item
last_sub_sub_item = module_dict["chassis-sub-module"][-1]["chassis-sub-sub-module"][-1]
if "chassis-sub-sub-sub-module" not in last_sub_sub_item:
last_sub_sub_item["chassis-sub-sub-sub-module"] = []
re_sub_sub_sub_module_list = last_sub_sub_item["chassis-sub-sub-sub-module"]
re_sub_sub_sub_module_item = {}
for k,v in m.groupdict().items():
k = k.replace('_', '-')
if v:
re_sub_sub_sub_module_item[k] = v.strip()
re_sub_sub_sub_module_list.append(re_sub_sub_sub_module_item)
continue
return res
class ShowChassisHardwareDetailNoForwarding(ShowChassisHardwareDetail):
""" Parser for:
- show chassis hardware detail no-forwarding
"""
cli_command = [
'show chassis hardware detail no-forwarding'
]
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command[0])
else:
out = output
return super().cli(output=out)
class ShowChassisHardwareExtensiveSchema(MetaParser):
"""schema = {
Optional("@xmlns:junos"): str,
"chassis-inventory": {
Optional("@xmlns"): str,
"chassis": {
Optional("@junos:style"): str,
"chassis-module": [
{
"chassis-re-disk-module": {
"description": str,
"disk-size": str,
"model": str,
"name": str,
"serial-number": str
},
"chassis-sub-module": [
{
"chassis-sub-sub-module": {
"description": str,
"name": str,
"part-number": str,
"serial-number": str
},
"description": str,
"name": str,
"part-number": str,
"serial-number": str,
"version": str
}
],
"description": str,
"name": str
}
],
"description": str,
"i2c-information": {
"assembly-flags": str,
"assembly-identifier": str,
"assembly-version": str,
"board-information-record": str,
"eeprom-version": str,
"i2c-data": list,
"i2c-identifier": str,
"i2c-version": str,
"jedec-code": str,
"manufacture-date": str,
"part-number": str,
"serial-number": str
},
"name": str,
"serial-number": str
}
}
}"""
schema = {
Optional("@xmlns:junos"): str,
"chassis-inventory": {
Optional("@xmlns"): str,
"chassis": {
Optional("@junos:style"): str,
"chassis-module": ListOf({
Optional("chassis-re-disk-module"): {
"description": str,
"disk-size": str,
"model": str,
"name": str,
"serial-number": str
},
Optional("chassis-sub-module"): ListOf({
Optional("chassis-sub-sub-module"): {
"description": str,
"name": str,
"part-number": str,
"serial-number": str
},
Optional("description"): str,
Optional("i2c-information"): {
"assembly-flags": str,
"assembly-identifier": str,
"assembly-version": str,
"board-information-record": str,
"eeprom-version": str,
Optional("i2c-data"): list,
Optional("i2c-identifier"): Or(str, None),
"i2c-version": Or(str, None),
"jedec-code": str,
"manufacture-date": str,
"part-number": Or(str, None),
Optional("serial-number"): Or(str,None)
},
"name": str,
Optional("part-number"): str,
Optional("serial-number"): str,
Optional("version"): str
}),
Optional("description"): str,
Optional("i2c-information"): {
"assembly-flags": str,
"assembly-identifier": str,
"assembly-version": str,
"board-information-record": str,
"eeprom-version": str,
Optional("i2c-data"): list,
Optional("i2c-identifier"): Or(str, None),
"i2c-version": Or(str, None),
"jedec-code": str,
"manufacture-date": str,
"part-number": Or(str, None),
Optional("serial-number"): Or(str,None)
},
"name": str,
Optional("serial-number"): str
}),
"description": str,
Optional("i2c-information"): {
"assembly-flags": str,
"assembly-identifier": str,
"assembly-version": str,
"board-information-record": str,
"eeprom-version": str,
Optional("i2c-data"): list,
Optional("i2c-identifier"): Or(str, None),
"i2c-version": Or(str, None),
"jedec-code": str,
"manufacture-date": str,
"part-number": Or(str, None),
Optional("serial-number"): Or(str, None)
},
"name": str,
"serial-number": str
}
}
}
class ShowChassisHardwareExtensive(ShowChassisHardwareExtensiveSchema):
""" Parser for:
* show chassis hardware extensive
"""
cli_command = 'show chassis hardware extensive'
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command)
else:
out = output
#Hardware inventory:
p1 = re.compile(r'^Hardware +(?P<style>\S+):$')
#Jedec Code: 0x7fb0 EEPROM Version: 0x02
p2 = re.compile(r'^Jedec Code: +(?P<jedec_code>\S+) '
r'+EEPROM Version: +(?P<eeprom_version>\S+)$')
#S/N: VM5D4C6B3599
p3 = re.compile(r'^S/N: +(?P<serial_number>\S+)$')
#Assembly ID: 0x0567 Assembly Version: 00.00
p4 = re.compile(r'^Assembly ID: +(?P<assembly_identifier>\S+) '
r'+Assembly Version: +(?P<assembly_version>\S+)$')
#Date: 00-00-0000 Assembly Flags: 0x00
p5 = re.compile(r'^Date: +(?P<manufacture_date>\S+) +Assembly Flags: '
r'+(?P<assembly_flags>\S+)$')
#ID: VMX
p6 = re.compile(r'^ID: +(?P<i2c_identifier>[\S\s]+)$')
#Board Information Record:
p7 = re.compile(r'^(?P<address_type>\ABoard Information Record):$')
#I2C Hex Data:
p8 = re.compile(r'^(?P<address_type>\AI2C Hex Data:)$')
#Address 0x00: 7f b0 02 00 fa 4e 01 00 52 65 76 2e 20 31 2e 30
p9 = re.compile(r'^(?P<address_info>\AAddress[\s\S]+)$')
#FPC 0 Virtual FPC
#CB 0 VMX SCB
p10 = re.compile(r'^(?P<name>(\S+\s\d+)) +(?P<description>\S+\s\S+)$')
#Routing Engine 0 RE-VMX
p11 = re.compile(r'^(?P<name>\S+\s+\S+\s+\d+) +(?P<description>\S+)$')
#cd0 27649 MB VMware Virtual IDE Har 00000000000000000001 Hard Disk
p12 = re.compile(r'^(?P<name>\S+) +(?P<disk_size>\d+) '
r'+MB +(?P<model>\S+\s+\S+\s+\S+\s+\S+) '
r'+(?P<serial_number>\d+) +(?P<description>'
r'\S+\s+\S+)$')
#CPU Rev. 1.0 RIOT-LITE BUILTIN
p13 = re.compile(r'^(?P<name>\S+) +(?P<version>[\S\.\d]+ '
r'[\S\.\d]+) +(?P<part_number>[\S\-]+) +'
r'(?P<serial_number>\S+)$')
#MIC 0 Virtual
p14 = re.compile(r'^(?P<name>\S+ \d+) +(?P<description>\S+)$')
#PIC 0 BUILTIN BUILTIN Virtual
p15 = re.compile(r'^(?P<name>\S+ \d+) +(?P<part_number>\S+) '
r'+(?P<serial_number>\S+) +(?P<description>\S+)$')
#Version: Rev. 1.0
p111 = re.compile(r'^Version: +(?P<version>[\S\s]+)$')
#Chassis VM5D4C6B3599 VMX
p16 = re.compile(r'^(?P<name>\S+) +(?P<serial_number>\S+) +'
r'(?P<description>\S+)$')
#Midplane
p17 = re.compile(r'^(?P<name>\S+)$')
ret_dict = {}
for line in out.splitlines()[1:]:
line = line.strip()
#Hardware inventory:
m = p1.match(line)
if m:
group = m.groupdict()
current_item = " "
chassis_inventory_dict = ret_dict.setdefault("chassis-inventory", {})\
.setdefault("chassis", {})
chassis_inventory_dict["@junos:style"] = group["style"]
chassis_entry_list = chassis_inventory_dict.setdefault("chassis-module", [])
continue
#Jedec Code: 0x7fb0 EEPROM Version: 0x02
m = p2.match(line)
if m:
group = m.groupdict()
i2c_dict = {}
i2c_dict["jedec-code"] = group["jedec_code"]
i2c_dict["eeprom-version"] = group["eeprom_version"]
continue
#S/N: VM5D4C6B3599
m = p3.match(line)
if m:
group = m.groupdict()
i2c_dict["serial-number"] = group["serial_number"]
continue
#Assembly ID: 0x0567 Assembly Version: 00.00
m = p4.match(line)
if m:
group = m.groupdict()
i2c_dict["assembly-identifier"] = group["assembly_identifier"]
i2c_dict["assembly-version"] = group["assembly_version"]
continue
#Date: 00-00-0000 Assembly Flags: 0x00
m = p5.match(line)
if m:
group = m.groupdict()
i2c_dict["manufacture-date"] = group["manufacture_date"]
i2c_dict["assembly-flags"] = group["assembly_flags"]
continue
#Version: Rev. 1.0
m = p111.match(line)
if m:
group = m.groupdict()
i2c_dict["i2c-version"] = group["version"]
continue
#ID: VMX
m = p6.match(line)
if m:
group = m.groupdict()
i2c_dict["i2c-identifier"] = group["i2c_identifier"]
continue
#Board Information Record:
m = p7.match(line)
if m:
group = m.groupdict()
complete_address = ""
address_type = group["address_type"]
continue
#I2C Hex Data:
m = p8.match(line)
if m:
group = m.groupdict()
complete_address = []
address_type = group["address_type"]
continue
#Address 0x00: 7f b0 02 00 fa 4e 01 00 52 65 76 2e 20 | |
== 'createControlsTable':
actions = self.process_controls_table(data)
result = {'actions': actions,
'results': {'operationSucceeded': True}
}
elif action == 'createMeasurementTable':
actions = self.process_create_measurement_table(data)
result = {'actions': actions,
'results': {'operationSucceeded': True}
}
elif action == 'createParameterTable':
actions = self.process_create_parameter_table(data)
result = {'actions': actions,
'results': {'operationSucceeded': True}
}
else:
logger.error('Unsupported form action: {}'.format(action))
logger.info('Action: %s' % result)
return self._create_http_response(HTTPStatus.OK, json.dumps(result), 'application/json')
except (Exception, DictionaryMaintainerException) as err:
logger.info('Action: %s resulted in exception %s' % (action, err))
return self._create_http_response(HTTPStatus.INTERNAL_SERVER_ERROR, json.dumps(result), 'application/json')
finally:
self.release_connection(client_state)
def process_form_link_all(self, data):
document_id = data['documentId']
lab_experiment = self.intent_parser_factory.create_lab_experiment(document_id)
lab_experiment.load_from_google_doc()
paragraphs = lab_experiment.paragraphs()
selected_term = data['selectedTerm']
uri = data['extra']['link']
actions = []
pos = 0
while True:
result = intent_parser_utils.find_exact_text(selected_term, pos, paragraphs)
if result is None:
break
search_result = { 'paragraph_index' : result[0],
'offset' : result[1],
'end_offset' : result[1] + len(selected_term) - 1,
'term' : selected_term,
'uri' : uri,
'link' : result[3],
'text' : result[4]}
# Only link terms that don't already have links
if search_result['link'] is None:
actions += self.add_link(search_result)
pos = result[2] + len(selected_term)
return actions
def process_button_click(self, httpMessage):
(json_body, client_state) = self.get_client_state(httpMessage)
if 'data' not in json_body:
errorMessage = 'Missing data'
raise ConnectionException(HTTPStatus.BAD_REQUEST, errorMessage)
data = json_body['data']
if 'buttonId' not in data:
errorMessage = 'data missing buttonId'
raise ConnectionException(HTTPStatus.BAD_REQUEST, errorMessage)
if type(data['buttonId']) is dict:
buttonDat = data['buttonId']
buttonId = buttonDat['buttonId']
else:
buttonId = data['buttonId']
method = getattr( self, buttonId )
try:
actionList = method(json_body, client_state)
actions = {'actions': actionList}
return self._create_http_response(HTTPStatus.OK, json.dumps(actions), 'application/json')
except Exception as e:
raise e
finally:
self.release_connection(client_state)
def process_nop(self, httpMessage, sm):
httpMessage # Fix unused warning
sm # Fix unused warning
return []
def process_message(self, httpMessage):
json_body = self.get_json_body(httpMessage)
if 'message' in json_body:
logger.info(json_body['message'])
return self._create_http_response(HTTPStatus.OK, '{}', 'application/json')
def process_validate_structured_request(self, httpMessage):
"""
Generate a structured request from a given document, then run it against the validation.
"""
json_body = intent_parser_utils.get_json_body(httpMessage)
validation_errors = []
validation_warnings = []
if json_body is None:
validation_errors.append('Unable to get information from Google document.')
else:
document_id = intent_parser_utils.get_document_id_from_json_body(json_body)
if 'data' in json_body and 'bookmarks' in json_body['data']:
intent_parser = self.intent_parser_factory.create_intent_parser(document_id, bookmarks=json_body['data']['bookmarks'])
else:
intent_parser = self.intent_parser_factory.create_intent_parser(document_id)
intent_parser.process()
validation_warnings.extend(intent_parser.get_validation_warnings())
validation_errors.extend(intent_parser.get_validation_errors())
if len(validation_errors) == 0:
dialog_action = intent_parser_view.valid_request_model_dialog(validation_warnings)
else:
dialog_action = intent_parser_view.invalid_request_model_dialog(validation_warnings, validation_errors)
actionList = [dialog_action]
actions = {'actions': actionList}
return self._create_http_response(HTTPStatus.OK, json.dumps(actions), 'application/json')
def process_generate_structured_request(self, httpMessage):
"""
Validates then generates an HTML link to retrieve a structured request.
"""
json_body = intent_parser_utils.get_json_body(httpMessage)
http_host = httpMessage.get_header('Host')
validation_errors = []
validation_warnings = []
if json_body is None or http_host is None:
validation_errors.append('Unable to get information from Google document.')
else:
document_id = intent_parser_utils.get_document_id_from_json_body(json_body)
if 'data' in json_body and 'bookmarks' in json_body['data']:
intent_parser = self.intent_parser_factory.create_intent_parser(document_id, bookmarks=json_body['data']['bookmarks'])
else:
intent_parser = self.intent_parser_factory.create_intent_parser(document_id)
intent_parser.process()
validation_warnings.extend(intent_parser.get_validation_warnings())
validation_errors.extend(intent_parser.get_validation_errors())
if len(validation_errors) == 0:
dialog_action = intent_parser_view.valid_request_model_dialog(validation_warnings, intent_parser_view.get_download_link(http_host, document_id))
else:
all_messages = []
all_messages.extend(validation_warnings)
all_messages.extend(validation_errors)
dialog_action = intent_parser_view.invalid_request_model_dialog(all_messages)
actionList = [dialog_action]
actions = {'actions': actionList}
return self._create_http_response(HTTPStatus.OK, json.dumps(actions), 'application/json')
def process_analyze_yes(self, json_body, client_state):
"""
Handle "Yes" button as part of analyze document.
"""
search_results = client_state['search_results']
search_result_index = client_state['search_result_index'] - 1
search_result = search_results[search_result_index]
if type(json_body['data']['buttonId']) is dict:
new_link = json_body['data']['buttonId']['link']
else:
new_link = None
actions = self.add_link(search_result, new_link);
actions += self.report_search_results(client_state)
return actions
def process_analyze_no(self, json_body, client_state):
"""
Handle "No" button as part of analyze document.
"""
json_body # Remove unused warning
return self.report_search_results(client_state)
def process_link_all(self, json_body, client_state):
"""
Handle "Link all" button as part of analyze document.
"""
search_results = client_state['search_results']
search_result_index = client_state['search_result_index'] - 1
search_result = search_results[search_result_index]
term = search_result['term']
term_search_results = list(filter(lambda x : x['term'] == term,
search_results))
if type(json_body['data']['buttonId']) is dict:
new_link = json_body['data']['buttonId']['link']
else:
new_link = None
actions = []
for term_result in term_search_results:
actions += self.add_link(term_result, new_link);
actions += self.report_search_results(client_state)
return actions
def process_no_to_all(self, json_body, client_state):
"""
Handle "No to all" button as part of analyze document.
"""
json_body # Remove unused warning
curr_idx = client_state['search_result_index'] - 1
next_idx = curr_idx + 1
search_results = client_state['search_results']
while next_idx < len(search_results) and search_results[curr_idx]['term'] == search_results[next_idx]['term']:
next_idx = next_idx + 1
# Are we at the end? Then just exit
if next_idx >= len(search_results):
return []
term_to_ignore = search_results[curr_idx]['term']
# Generate results without term to ignore
new_search_results = [r for r in search_results if not r['term'] == term_to_ignore ]
# Find out what term to point to
new_idx = new_search_results.index(search_results[next_idx])
# Update client state
client_state['search_results'] = new_search_results
client_state['search_result_index'] = new_idx
return self.report_search_results(client_state)
def process_never_link(self, json_body, client_state):
"""
Handle "Never Link" button as part of analyze document.
This works like "No to all" but also stores the association to ignore it in subsequent runs.
"""
json_body # Remove unused warning
curr_idx = client_state['search_result_index'] - 1
search_results = client_state['search_results']
dict_term = search_results[curr_idx]['term']
content_text = search_results[curr_idx]['text']
userId = client_state['user_id']
# Make sure we have a list of link preferences for this userId
if not userId in self.analyze_never_link:
link_pref_file = os.path.join(self.LINK_PREF_PATH, userId + '.json')
if os.path.exists(link_pref_file):
try:
with open(link_pref_file, 'r') as fin:
self.analyze_never_link[userId] = json.load(fin)
logger.info('Loaded link preferences for userId, path: %s' % link_pref_file)
except:
logger.error('ERROR: Failed to load link preferences file!')
else:
self.analyze_never_link[userId] = {}
# Update link preferences
if dict_term in self.analyze_never_link[userId]:
# Append text to list of no-link preferences
self.analyze_never_link[userId][dict_term].append(content_text)
else:
# If no prefs for this dict term, start a new list with the current text
self.analyze_never_link[userId][dict_term] = [content_text]
link_pref_file = os.path.join(self.LINK_PREF_PATH, userId + '.json')
try:
with open(link_pref_file, 'w') as fout:
json.dump(self.analyze_never_link[userId], fout)
except:
logger.error('ERROR: Failed to write link preferences file!')
# Remove all of these associations from the results
# This is different from "No to All", because that's only termed based
# This depends on the term and the text
next_idx = curr_idx + 1
while next_idx < len(search_results) and search_results[curr_idx]['term'] == search_results[next_idx]['term'] and search_results[curr_idx]['text'] == search_results[next_idx]['text']:
next_idx = next_idx + 1
# Are we at the end? Then just exit
if next_idx >= len(search_results):
return []
term_to_ignore = search_results[curr_idx]['term']
text_to_ignore = search_results[curr_idx]['text']
# Generate results without term to ignore
new_search_results = [r for r in search_results if not r['term'] == term_to_ignore and not r['text'] == text_to_ignore]
# Find out what term to point to
new_idx = new_search_results.index(search_results[next_idx])
# Update client state
client_state['search_results'] = new_search_results
client_state['search_result_index'] = new_idx
return self.report_search_results(client_state)
def process_search_syn_bio_hub(self, httpMessage):
json_body = intent_parser_utils.get_json_body(httpMessage)
data = json_body['data']
try:
offset = 0
if 'offset' in data:
offset = int(data['offset'])
# Bounds check offset value
if offset < 0:
offset = 0
if data['term'] in self.sparql_similar_count_cache:
# Ensure offset isn't past the end of the results
if offset > int(self.sparql_similar_count_cache[data['term']]) - self.sparql_limit:
offset = max(0, int(self.sparql_similar_count_cache[data['term']]) - self.sparql_limit)
else:
# Don't allow a non-zero offset if we haven't cached the size of the query
if offset > 0:
offset = 0
if 'analyze' in data:
analyze = True
filter_uri = data['selected_uri']
else:
analyze = False
filter_uri = None
search_results, results_count = self.simple_syn_bio_hub_search(data['term'], offset, filter_uri)
table_html = ''
for search_result in search_results:
title = search_result['title']
target = search_result['target']
table_html += intent_parser_view.generate_existing_link_html(title, target, analyze)
table_html += self.generate_results_pagination_html(offset, int(results_count))
response = {'results':
{'operationSucceeded': True,
'search_results': search_results,
'table_html': table_html
}}
except Exception as err:
logger.error(str(err))
return self._create_http_response(HTTPStatus.OK, json.dumps(intent_parser_view.operation_failed('Failed to search SynBioHub')), 'application/json')
return self._create_http_response(HTTPStatus.OK, json.dumps(response), 'application/json')
def process_create_table_template(self, httpMessage):
"""
Process create table templates.
"""
try:
json_body = intent_parser_utils.get_json_body(httpMessage)
data = json_body['data']
cursor_child_index = str(data['childIndex'])
table_type = data['tableType']
actionList = []
if table_type == 'controls':
dialog_action = intent_parser_view.create_controls_table_dialog(cursor_child_index)
actionList.append(dialog_action)
elif table_type == 'measurements':
dialog_action = intent_parser_view.create_measurement_table_template(cursor_child_index)
actionList.append(dialog_action)
elif table_type == 'parameters':
protocol_options = list(intent_parser_constants.PROTOCOL_NAMES.values())
dialog_action = intent_parser_view.create_parameter_table_template(cursor_child_index, protocol_options)
actionList.append(dialog_action)
else :
logger.warning('WARNING: unsupported table type: %s' % table_type)
actions = {'actions': actionList}
return self._create_http_response(HTTPStatus.OK, json.dumps(actions), 'application/json')
except Exception as e:
raise e
def process_add_to_syn_bio_hub(self, httpMessage):
try:
json_body = intent_parser_utils.get_json_body(httpMessage)
data = json_body['data']
start = data['start']
end = data['end']
document_id = intent_parser_utils.get_document_id_from_json_body(json_body)
start_paragraph = start['paragraphIndex'];
end_paragraph = end['paragraphIndex'];
start_offset = start['offset']
end_offset = end['offset']
dialog_action = self._add_to_syn_bio_hub(document_id, start_paragraph, end_paragraph,
start_offset, end_offset)
actionList = [dialog_action]
actions = {'actions': actionList}
return self._create_http_response(HTTPStatus.OK, json.dumps(actions), 'application/json')
except Exception as e:
raise e
| |
<filename>tests/test_01_users.py
import pytest
from django.contrib.auth import get_user_model
from .common import auth_client, create_users_api
class Test01UserAPI:
@pytest.mark.django_db(transaction=True)
def test_01_users_not_auth(self, client):
response = client.get('/api/v1/users/')
assert response.status_code != 404, (
'Страница `/api/v1/users/` не найдена, проверьте этот адрес в *urls.py*'
)
assert response.status_code == 401, (
'Проверьте, что при GET запросе `/api/v1/users/` без токена авторизации возвращается статус 401'
)
@pytest.mark.django_db(transaction=True)
def test_02_users_username_not_auth(self, client, admin):
response = client.get(f'/api/v1/users/{admin.username}/')
assert response.status_code != 404, (
'Страница `/api/v1/users/{username}/` не найдена, проверьте этот адрес в *urls.py*'
)
assert response.status_code == 401, (
'Проверьте, что при GET запросе `/api/v1/users/{username}/` без токена авторизации возвращается статус 401'
)
@pytest.mark.django_db(transaction=True)
def test_03_users_me_not_auth(self, client):
response = client.get('/api/v1/users/me/')
assert response.status_code != 404, (
'Страница `/api/v1/users/me/` не найдена, проверьте этот адрес в *urls.py*'
)
assert response.status_code == 401, (
'Проверьте, что при GET запросе `/api/v1/users/me/` без токена авторизации возвращается статус 401'
)
@pytest.mark.django_db(transaction=True)
def test_04_users_get_auth(self, user_client, admin):
response = user_client.get('/api/v1/users/')
assert response.status_code != 404, (
'Страница `/api/v1/users/` не найдена, проверьте этот адрес в *urls.py*'
)
assert response.status_code == 200, (
'Проверьте, что при GET запросе `/api/v1/users/` с токеном авторизации возвращается статус 200'
)
data = response.json()
assert 'count' in data, (
'Проверьте, что при GET запросе `/api/v1/users/` возвращаете данные с пагинацией. '
'Не найден параметр `count`'
)
assert 'next' in data, (
'Проверьте, что при GET запросе `/api/v1/users/` возвращаете данные с пагинацией. '
'Не найден параметр `next`'
)
assert 'previous' in data, (
'Проверьте, что при GET запросе `/api/v1/users/` возвращаете данные с пагинацией. '
'Не найден параметр `previous`'
)
assert 'results' in data, (
'Проверьте, что при GET запросе `/api/v1/users/` возвращаете данные с пагинацией. '
'Не найден параметр `results`'
)
assert data['count'] == 1, (
'Проверьте, что при GET запросе `/api/v1/users/` возвращаете данные с пагинацией. '
'Значение параметра `count` не правильное'
)
assert type(data['results']) == list, (
'Проверьте, что при GET запросе `/api/v1/users/` возвращаете данные с пагинацией. '
'Тип параметра `results` должен быть список'
)
assert len(data['results']) == 1 and data['results'][0].get('username') == admin.username \
and data['results'][0].get('email') == admin.email, (
'Проверьте, что при GET запросе `/api/v1/users/` возвращаете данные с пагинацией. '
'Значение параметра `results` не правильное'
)
@pytest.mark.django_db(transaction=True)
def test_05_users_post_auth(self, user_client, admin):
data = {}
response = user_client.post('/api/v1/users/', data=data)
assert response.status_code == 400, (
'Проверьте, что при POST запросе `/api/v1/users/` с не правильными данными возвращает 400'
)
data = {
'username': 'TestUser1231231',
'role': 'user'
}
response = user_client.post('/api/v1/users/', data=data)
assert response.status_code == 400, (
'Проверьте, что при POST запросе `/api/v1/users/` с не правильными данными возвращает 400'
)
data = {
'username': 'TestUser1231231',
'role': 'user',
'email': admin.email
}
response = user_client.post('/api/v1/users/', data=data)
assert response.status_code == 400, (
'Проверьте, что при POST запросе `/api/v1/users/` с не правильными данными возвращает 400. '
'`Email` должен быть уникальный у каждого прользователя'
)
data = {
'username': admin.username,
'role': 'user',
'email': 'test<EMAIL>'
}
response = user_client.post('/api/v1/users/', data=data)
assert response.status_code == 400, (
'Проверьте, что при POST запросе `/api/v1/users/` с не правильными данными возвращает 400. '
'`Username` должен быть уникальный у каждого прользователя'
)
data = {
'username': 'TestUser1231231',
'role': 'user',
'email': '<EMAIL>'
}
response = user_client.post('/api/v1/users/', data=data)
assert response.status_code == 201, (
'Проверьте, что при POST запросе `/api/v1/users/` с правильными данными возвращает 201.'
)
data = {
'first_name': 'fsdfsdf',
'last_name': 'dsgdsfg',
'username': 'TestUser4534',
'bio': 'Jdlkjd',
'role': 'moderator',
'email': '<EMAIL>'
}
response = user_client.post('/api/v1/users/', data=data)
assert response.status_code == 201, (
'Проверьте, что при POST запросе `/api/v1/users/` с правильными данными возвращает 201.'
)
response_data = response.json()
assert response_data.get('first_name') == data['first_name'], (
'Проверьте, что при POST запросе `/api/v1/users/` с правильными данными возвращаете `first_name`.'
)
assert response_data.get('last_name') == data['last_name'], (
'Проверьте, что при POST запросе `/api/v1/users/` с правильными данными возвращаете `last_name`.'
)
assert response_data.get('username') == data['username'], (
'Проверьте, что при POST запросе `/api/v1/users/` с правильными данными возвращаете `username`.'
)
assert response_data.get('bio') == data['bio'], (
'Проверьте, что при POST запросе `/api/v1/users/` с правильными данными возвращаете `bio`.'
)
assert response_data.get('role') == data['role'], (
'Проверьте, что при POST запросе `/api/v1/users/` с правильными данными возвращаете `role`.'
)
assert response_data.get('email') == data['email'], (
'Проверьте, что при POST запросе `/api/v1/users/` с правильными данными возвращаете `email`.'
)
assert get_user_model().objects.count() == 3, (
'Проверьте, что при POST запросе `/api/v1/users/` вы создаёте пользователей.'
)
response = user_client.get('/api/v1/users/')
data = response.json()
assert len(data['results']) == 3, (
'Проверьте, что при GET запросе `/api/v1/users/` возвращаете данные с пагинацией. '
'Значение параметра `results` не правильное'
)
@pytest.mark.django_db(transaction=True)
def test_06_users_username_get_auth(self, user_client, admin):
user, moderator = create_users_api(user_client)
response = user_client.get(f'/api/v1/users/{admin.username}/')
assert response.status_code != 404, (
'Страница `/api/v1/users/{username}/` не найдена, проверьте этот адрес в *urls.py*'
)
assert response.status_code == 200, (
'Проверьте, что при GET запросе `/api/v1/users/{username}/` с токеном авторизации возвращается статус 200'
)
response_data = response.json()
assert response_data.get('username') == admin.username, (
'Проверьте, что при GET запросе `/api/v1/users/{username}/` возвращаете `username`.'
)
assert response_data.get('email') == admin.email, (
'Проверьте, что при GET запросе `/api/v1/users/{username}/` возвращаете `email`.'
)
response = user_client.get(f'/api/v1/users/{moderator.username}/')
assert response.status_code == 200, (
'Проверьте, что при GET запросе `/api/v1/users/{username}/` с токеном авторизации возвращается статус 200'
)
response_data = response.json()
assert response_data.get('username') == moderator.username, (
'Проверьте, что при GET запросе `/api/v1/users/{username}/` возвращаете `username`.'
)
assert response_data.get('email') == moderator.email, (
'Проверьте, что при GET запросе `/api/v1/users/{username}/` возвращаете `email`.'
)
assert response_data.get('first_name') == moderator.first_name, (
'Проверьте, что при GET запросе `/api/v1/users/` возвращаете `first_name`.'
)
assert response_data.get('last_name') == moderator.last_name, (
'Проверьте, что при GET запросе `/api/v1/users/` возвращаете `last_name`.'
)
assert response_data.get('bio') == moderator.bio, (
'Проверьте, что при GET запросе `/api/v1/users/` возвращаете `bio`.'
)
assert response_data.get('role') == moderator.role, (
'Проверьте, что при GET запросе `/api/v1/users/` возвращаете `role`.'
)
@pytest.mark.django_db(transaction=True)
def test_07_users_username_patch_auth(self, user_client, admin):
user, moderator = create_users_api(user_client)
data = {
'first_name': 'Admin',
'last_name': 'Test',
'bio': 'description'
}
response = user_client.patch(f'/api/v1/users/{admin.username}/', data=data)
assert response.status_code == 200, (
'Проверьте, что при PATCH запросе `/api/v1/users/{username}/` '
'с токеном авторизации возвращается статус 200'
)
test_admin = get_user_model().objects.get(username=admin.username)
assert test_admin.first_name == data['first_name'], (
'Проверьте, что при PATCH запросе `/api/v1/users/{username}/` изменяете данные.'
)
assert test_admin.last_name == data['last_name'], (
'Проверьте, что при PATCH запросе `/api/v1/users/{username}/` изменяете данные.'
)
response = user_client.patch(f'/api/v1/users/{user.username}/', data={'role': 'admin'})
assert response.status_code == 200, (
'Проверьте, что при PATCH запросе `/api/v1/users/{username}/` '
'с токеном авторизации возвращается статус 200'
)
client_user = auth_client(user)
response = client_user.get(f'/api/v1/users/{admin.username}/')
assert response.status_code == 200, (
'Проверьте, что при PATCH запросе `/api/v1/users/{username}/` можно изменить роль пользователя'
)
@pytest.mark.django_db(transaction=True)
def test_08_users_username_delete_auth(self, user_client):
user, moderator = create_users_api(user_client)
response = user_client.delete(f'/api/v1/users/{user.username}/')
assert response.status_code == 204, (
'Проверьте, что при DELETE запросе `/api/v1/users/{username}/` возвращаете статус 204'
)
assert get_user_model().objects.count() == 2, (
'Проверьте, что при DELETE запросе `/api/v1/users/{username}/` удаляете пользователя'
)
def check_permissions(self, user, user_name, admin):
client_user = auth_client(user)
response = client_user.get('/api/v1/users/')
assert response.status_code == 403, (
f'Проверьте, что при GET запросе `/api/v1/users/` '
f'с токеном авторизации {user_name} возвращается статус 403'
)
data = {
'username': 'TestUser9876',
'role': 'user',
'email': '<EMAIL>'
}
response = client_user.post('/api/v1/users/', data=data)
assert response.status_code == 403, (
f'Проверьте, что при POST запросе `/api/v1/users/` '
f'с токеном авторизации {user_name} возвращается статус 403'
)
response = client_user.get(f'/api/v1/users/{admin.username}/')
assert response.status_code == 403, (
f'Проверьте, что при GET запросе `/api/v1/users/{{username}}/` '
f'с токеном авторизации {user_name} возвращается статус 403'
)
data = {
'first_name': 'Admin',
'last_name': 'Test',
'bio': 'description'
}
response = client_user.patch(f'/api/v1/users/{admin.username}/', data=data)
assert response.status_code == 403, (
f'Проверьте, что при PATCH запросе `/api/v1/users/{{username}}/` '
f'с токеном авторизации {user_name} возвращается статус 403'
)
response = client_user.delete(f'/api/v1/users/{admin.username}/')
assert response.status_code == 403, (
f'Проверьте, что при DELETE запросе `/api/v1/users/{{username}}/` '
f'с токеном авторизации {user_name} возвращается статус 403'
)
@pytest.mark.django_db(transaction=True)
def test_09_users_check_permissions(self, user_client, admin):
user, moderator = create_users_api(user_client)
self.check_permissions(user, 'обычного пользователя', admin)
self.check_permissions(moderator, 'модератора', admin)
@pytest.mark.django_db(transaction=True)
def test_10_users_me_get(self, user_client, admin):
user, moderator = create_users_api(user_client)
response = user_client.get('/api/v1/users/me/')
assert response.status_code == 200, (
'Проверьте, что при GET запросе `/api/v1/users/me/` с токеном авторизации возвращается статус 200'
)
response_data = response.json()
assert response_data.get('username') == admin.username, (
'Проверьте, что при GET запросе `/api/v1/users/me/` возвращаете данные пользователя'
)
client_user = auth_client(moderator)
response = client_user.get('/api/v1/users/me/')
assert response.status_code == 200, (
'Проверьте, что при GET запросе `/api/v1/users/me/` с токеном авторизации возвращается статус 200'
)
response_data = response.json()
assert response_data.get('username') == moderator.username, (
'Проверьте, что при GET запросе | |
ma.AbsoluteURLFor('observations.unflagged',
observation_id='<observation_id>'),
},
description="Contains a link to the Observation endpoints."
)
@spec.define_schema('ObservationValues')
class ObservationValuesSchema(ObservationValuesPostSchema):
observation_id = ma.UUID(
title='Observation ID',
description="UUID of the Observation associated with this data.")
_links = OBSERVATION_LINKS
class TimeRangeSchema(ma.Schema):
class Meta:
strict = True
ordered = True
min_timestamp = ISODateTime(
title="Minimum Timestamp",
description=("The minimum timestamp in the value series as"
" an ISO 8601 datetime."),
)
max_timestamp = ISODateTime(
title="Maximum Timestamp",
description=("The maximum timestamp in the value series as"
" an ISO 8601 datetime."),
)
@spec.define_schema('ObservationTimeRange')
class ObservationTimeRangeSchema(TimeRangeSchema):
observation_id = ma.UUID(
title='Obs ID',
description="UUID of the Observation associated with this data.")
_links = OBSERVATION_LINKS
@spec.define_schema('ObservationDefinition')
class ObservationPostSchema(ma.Schema):
class Meta:
strict = True
ordered = True
variable = VARIABLE_FIELD
site_id = ma.UUID(
title='Site ID',
description="UUID the associated site",
required=True)
name = ma.String(
title='Name',
description='Human friendly name for the observation',
required=True,
validate=[UserstringValidator(), validate.Length(max=64)])
interval_label = INTERVAL_LABEL
interval_length = INTERVAL_LENGTH
interval_value_type = INTERVAL_VALUE_TYPE
uncertainty = ma.Float(
title='Uncertainty',
description='A measure of the uncertainty of the observation values.',
missing=None
)
extra_parameters = EXTRA_PARAMETERS_FIELD
@validates_schema
def validate_observation(self, data, **kwargs):
validate_if_event(self, data, **kwargs)
@spec.define_schema('ObservationUpdate')
class ObservationUpdateSchema(ma.Schema):
name = ma.String(
title='Name',
description='Human friendly name for the observation',
validate=[UserstringValidator(), validate.Length(max=64)])
uncertainty = ma.Float(
title='Uncertainty',
description='A measure of the uncertainty of the observation values.',
allow_none=True,
)
extra_parameters = EXTRA_PARAMETERS_UPDATE
def _deserialize(self, value, **kwargs):
out = super()._deserialize(value, **kwargs)
if out.get('uncertainty', '') is None:
out['null_uncertainty'] = True
else:
out['null_uncertainty'] = False
return out
@spec.define_schema('ObservationMetadata')
class ObservationSchema(ObservationPostSchema):
class Meta:
strict = True
ordered = True
_links = ma.Hyperlinks(
{
'site': ma.AbsoluteURLFor('sites.single',
site_id='<site_id>')
},
description="Contains a link to the associated site."
)
observation_id = ma.UUID()
provider = ma.String()
created_at = CREATED_AT
modified_at = MODIFIED_AT
@spec.define_schema('ObservationLinks')
class ObservationLinksSchema(ma.Schema):
class Meta:
strict = True
ordered = True
observation_id = ma.UUID()
_links = OBSERVATION_LINKS
@spec.define_schema('ObservationValueGap')
class ObservationGapSchema(ValueGapListSchema):
observation_id = ma.UUID(title="Observation ID")
_links = OBSERVATION_LINKS
@spec.define_schema('ObservationUnflagged')
class ObservationUnflaggedSchema(ma.Schema):
_links = OBSERVATION_LINKS
observation_id = ma.UUID(title="Observation ID")
dates = ma.List(ma.Date, title="Unflagged dates",
description=("List of dates that includes data not flagged"
" with the given flag."))
# Forecasts
FORECAST_LINKS = ma.Hyperlinks(
{
'metadata': ma.AbsoluteURLFor('forecasts.metadata',
forecast_id='<forecast_id>'),
'values': ma.AbsoluteURLFor('forecasts.values',
forecast_id='<forecast_id>'),
'timerange': ma.AbsoluteURLFor('forecasts.time_range',
forecast_id='<forecast_id>'),
'latest': ma.AbsoluteURLFor('forecasts.latest_value',
forecast_id='<forecast_id>'),
'gaps': ma.AbsoluteURLFor('forecasts.gaps',
forecast_id='<forecast_id>'),
},
description="Contains a link to the Forecast endpoints."
)
@spec.define_schema('ForecastValue')
class ForecastValueSchema(ma.Schema):
class Meta:
strict = True
ordered = True
timestamp = ISODateTime(
title="Timestamp",
description=(
"ISO 8601 Datetime. Unlocalized times are assumed to be UTC."
),
validate=TimeLimitValidator()
)
value = ma.Float(
title="Value",
description=(
"Value of the forecast variable. "
"NaN may be indicated with JSON null."),
allow_nan=True)
@spec.define_schema('ForecastValuesPost')
class ForecastValuesPostSchema(ma.Schema):
values = TimeseriesField(ForecastValueSchema, many=True)
@spec.define_schema('ForecastValuesCSV', component={
"type": "string",
"description": """
Text file with fields separated by ',' and lines separated by '\\n'.
'#' is parsed as a comment character.
The a header with fields "timestamp" and "value" must be included after
any comment lines.
Timestamp must be an ISO 8601 datetime and value may be an integer or float.
Values that will be interpreted as NaN include the empty string,
-999.0, -9999.0, 'nan', 'NaN', 'NA', 'N/A', 'n/a', 'null'.
"""})
class ForecastValuesCSVSchema(ma.Schema):
pass
@spec.define_schema('ForecastValues')
class ForecastValuesSchema(ForecastValuesPostSchema):
forecast_id = ma.UUID(
title="Forecast ID",
description="UUID of the forecast associated with this data.")
_links = FORECAST_LINKS
@spec.define_schema('ForecastTimeRange')
class ForecastTimeRangeSchema(TimeRangeSchema):
forecast_id = ma.UUID(
title='Forecast ID',
description="UUID of the forecast associated with this data.")
@spec.define_schema('ForecastDefinition')
class ForecastPostSchema(ma.Schema):
class Meta:
strict = True
ordered = True
site_id = ma.UUID(
name="Site ID",
description=(
"UUID of the associated site. Either site_id or aggregate_id "
"must be provided"),
required=False,
allow_none=True)
aggregate_id = ma.UUID(
name="Aggregate ID",
description=(
"UUID of the associated aggregate. Either site_id or aggregate_id "
"must be provided"),
required=False,
allow_none=True)
name = ma.String(
title='Name',
description="Human friendly name for forecast",
required=True,
validate=[UserstringValidator(), validate.Length(max=64)])
variable = VARIABLE_FIELD
issue_time_of_day = ma.String(
title='Issue Time of Day',
required=True,
validate=TimeFormat('%H:%M'),
description=('The time of day that a forecast run is issued specified '
'in UTC in HH:MM format, e.g. 00:30. '
'For forecast runs issued multiple times within one day '
'(e.g. hourly), this specifies the first issue time of '
'day. Additional issue times are uniquely determined by '
'the first issue time and the run length & issue '
'frequency attribute.'))
lead_time_to_start = ma.Integer(
title='Lead time to start',
description=("The difference between the issue time and the start of "
"the first forecast interval in minutes, e.g. 60 for one "
"hour."),
required=True)
interval_label = INTERVAL_LABEL
interval_length = INTERVAL_LENGTH
interval_value_type = INTERVAL_VALUE_TYPE
run_length = ma.Integer(
title='Run Length / Issue Frequency',
description=('The total length of a single issued forecast run in '
'minutes, e.g. 60 for 1 hour. To enforce a continuous, '
'non-overlapping sequence, this is equal to the forecast '
'run issue frequency.'),
required=True
)
extra_parameters = EXTRA_PARAMETERS_FIELD
@validates_schema
def validate_forecast(self, data, **kwargs):
if (
data.get('site_id') is not None and
data.get('aggregate_id') is not None
):
raise ValidationError(
"Forecasts can only be associated with one site or one "
"aggregate, so only site_id or aggregate_id may be provided")
elif (
data.get('site_id') is None and
data.get('aggregate_id') is None
):
raise ValidationError(
"One of site_id or aggregate_id must be provided")
validate_if_event(self, data, **kwargs)
@spec.define_schema('ForecastMetadata')
class ForecastSchema(ForecastPostSchema):
_links = ma.Hyperlinks(
{
'site': ma.AbsoluteURLFor('sites.single',
site_id='<site_id>'),
'aggregate': ma.AbsoluteURLFor('aggregates.single',
aggregate_id='<aggregate_id>')
},
description="Contains a link to the associated site or aggregate."
)
forecast_id = ma.UUID()
provider = ma.String()
created_at = CREATED_AT
modified_at = MODIFIED_AT
@spec.define_schema('ForecastUpdate')
class ForecastUpdateSchema(ma.Schema):
name = ma.String(
title='Name',
description='Human friendly name for the forecast',
validate=[UserstringValidator(), validate.Length(max=64)])
extra_parameters = EXTRA_PARAMETERS_UPDATE
@spec.define_schema('ForecastLinks')
class ForecastLinksSchema(ma.Schema):
class Meta:
string = True
ordered = True
forecast_id = ma.UUID()
_links = FORECAST_LINKS
@spec.define_schema('ForecastValueGap')
class ForecastGapSchema(ValueGapListSchema):
forecast_id = ma.UUID(
title="Forecast ID",
description="UUID of the forecast associated with this data.")
_links = FORECAST_LINKS
# Probabilistic Forecasts
AXIS_FIELD = ma.String(
title='Axis',
description=('Axis - The axis on which the constant values of the CDF '
'is specified. The axis can be either x (constant '
'variable values) or y (constant percentiles). The axis '
'is fixed and the same for all forecasts in the '
'probabilistic forecast.'),
required=True,
validate=validate.OneOf(['x', 'y'])
)
_cdf_links = {
'values': ma.AbsoluteURLFor(
'forecasts.single_cdf_value',
forecast_id='<forecast_id>'),
'timerange': ma.AbsoluteURLFor(
'forecasts.cdf_time_range',
forecast_id='<forecast_id>'),
'latest': ma.AbsoluteURLFor(
'forecasts.cdf_latest_value',
forecast_id='<forecast_id>'),
'gaps': ma.AbsoluteURLFor(
'forecasts.cdf_gaps',
forecast_id='<forecast_id>'),
}
_cdf_links_full = deepcopy(_cdf_links)
_cdf_links_full['probability_forecast_group'] = ma.AbsoluteURLFor(
'forecasts.single_cdf_group',
forecast_id='<parent>')
CDF_LINKS = ma.Hyperlinks(
_cdf_links,
name="Probabilistic constant value links",
description="Contains a link to the constant value endpoints."
)
CDF_LINKS_FULL = ma.Hyperlinks(
_cdf_links_full,
name="Full probabilistic constant value links",
description="Contains a link to the constant value endpoints."
)
@spec.define_schema('CDFForecastTimeRange')
class CDFForecastTimeRangeSchema(TimeRangeSchema):
forecast_id = ma.UUID(
title='Forecast ID',
description=(
"UUID of the probabilistic forecast associated with this data."))
@spec.define_schema('CDFForecastValue')
class CDFForecastValueSchema(ForecastValueSchema):
value = ma.Float(
title="Value",
description=(
'Value of the forecast variable. If axis="x", this value '
'has units of percent corresponding to a percentile. '
'If axis="y", this value has the physical units of the variable, '
'e.g. W/m^2 if variable="ghi". '
'NaN may be indicated with JSON null.'),
allow_nan=True)
@spec.define_schema('CDFForecastValuesPost')
class CDFForecastValuesPostSchema(ma.Schema):
values = TimeseriesField(CDFForecastValueSchema, many=True)
@spec.define_schema('CDFForecastValues')
class CDFForecastValuesSchema(CDFForecastValuesPostSchema):
forecast_id = ma.UUID(
title="Forecast ID",
description="UUID of the forecast associated with this data.")
_links = CDF_LINKS
@spec.define_schema('CDFForecastGroupDefinition')
class CDFForecastGroupPostSchema(ForecastPostSchema):
axis = AXIS_FIELD
constant_values = ma.List(
ma.Float,
title='Constant Values',
description=(
'The variable values or percentiles for the set of '
'forecasts in the probabilistic forecast. '
'If axis="x", these values are assumed to have the physical units '
'of the variable, e.g. W/m^2 if variable="ghi". If axis="y", '
'these values are assumed to be percentiles with units of percent,'
' e.g. 90%'
),
required=True
)
_cv_desc = (
'The variable value or percentile for the probabilistic forecast. '
'If axis="x", this value is assumed to have the physical units of '
'the variable, e.g. W/m^2 if variable="ghi". If axis="y", this '
'value is assumed to be a percentile with units of percent, e.g. 90%.'
)
@spec.define_schema('CDFForecastMetadata')
class CDFForecastSchema(ForecastSchema):
_links = CDF_LINKS_FULL
forecast_id = ma.UUID()
axis = AXIS_FIELD
parent = ma.UUID()
constant_value = ma.Float(
title='Constant Value',
description=_cv_desc
)
@spec.define_schema('CDFForecastSingle')
class CDFForecastSingleSchema(ma.Schema):
forecast_id = ma.UUID()
constant_value = ma.Float(
description=_cv_desc)
_links = CDF_LINKS
@spec.define_schema('CDFForecastGroupMetadata')
class CDFForecastGroupSchema(CDFForecastGroupPostSchema):
_links = ma.Hyperlinks(
{
'site': ma.AbsoluteURLFor('sites.single',
site_id='<site_id>'),
'gaps': ma.AbsoluteURLFor('forecasts.cdf_group_gaps',
forecast_id='<forecast_id>'),
},
description="Contains a link to associated endpoints."
)
forecast_id = ma.UUID()
provider = ma.String()
constant_values = ma.Nested(CDFForecastSingleSchema, many=True)
created_at = CREATED_AT
modified_at = MODIFIED_AT
@spec.define_schema('CDFForecastValueGap')
class CDFForecastGapSchema(ValueGapListSchema):
forecast_id = ma.UUID(
title="Forecast ID",
description=("UUID of the probabilistic forecast constant value"
" associated with this data."))
_links = CDF_LINKS
@spec.define_schema('CDFGroupForecastValueGap')
class CDFGroupForecastGapSchema(ValueGapListSchema):
forecast_id = ma.UUID(
title="Forecast ID",
description=("UUID of the probabilistic forecast associated "
"with this data."))
# Permissions
@spec.define_schema('UserSchema')
class UserSchema(ma.Schema):
user_id = ma.UUID(
title="User ID",
description="Unique UUID of the User.",
)
email = ma.Email(title="Email")
organization = ma.String(title='Organization')
created_at | |
<gh_stars>100-1000
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shot adaptive optimizer"""
# pylint: disable=too-many-instance-attributes,too-many-arguments,too-many-branches
from scipy.stats import multinomial
import pennylane as qml
from pennylane import numpy as np
from .gradient_descent import GradientDescentOptimizer
class ShotAdaptiveOptimizer(GradientDescentOptimizer):
r"""Optimizer where the shot rate is adaptively calculated using the variances of the parameter-shift
gradient.
By keeping a running average of the parameter-shift gradient and the *variance*
of the parameter-shift gradient, this optimizer frugally distributes a shot
budget across the partial derivatives of each parameter.
In addition, if computing the expectation value of a Hamiltonian using
:class:`~.ExpvalCost`, weighted random sampling can be used to further
distribute the shot budget across the local terms from which the Hamiltonian
is constructed.
.. note::
The shot adaptive optimizer only supports single QNodes or :class:`~.ExpvalCost` objects as
objective functions. The bound device must also be instantiated with a finite number
of shots.
Args:
min_shots (int): The minimum number of shots used to estimate the expectations
of each term in the Hamiltonian. Note that this must be larger than 2 for the variance
of the gradients to be computed.
mu (float): The running average constant :math:`\mu \in [0, 1]`. Used to control how quickly the
number of shots recommended for each gradient component changes.
b (float): Regularization bias. The bias should be kept small, but non-zero.
term_sampling (str): The random sampling algorithm to multinomially distribute the shot budget
across terms in the Hamiltonian expectation value.
Currently, only ``"weighted_random_sampling"`` is supported.
Only takes effect if the objective function provided is an instance of :class:`~.ExpvalCost`.
Set this argument to ``None`` to turn off random sampling of Hamiltonian terms.
stepsize (float): The learning rate :math:`\eta`. The learning rate *must* be such
that :math:`\eta < 2/L = 2/\sum_i|c_i|`, where:
* :math:`L \leq \sum_i|c_i|` is the bound on the `Lipschitz constant
<https://en.wikipedia.org/wiki/Lipschitz_continuity>`__ of the variational quantum
algorithm objective function, and
* :math:`c_i` are the coefficients of the Hamiltonian used in the objective function.
**Example**
For VQE/VQE-like problems, the objective function for the optimizer can be
realized as an :class:`~.ExpvalCost` object, constructed using a :class:`~.Hamiltonian`.
>>> coeffs = [2, 4, -1, 5, 2]
>>> obs = [
... qml.PauliX(1),
... qml.PauliZ(1),
... qml.PauliX(0) @ qml.PauliX(1),
... qml.PauliY(0) @ qml.PauliY(1),
... qml.PauliZ(0) @ qml.PauliZ(1)
... ]
>>> H = qml.Hamiltonian(coeffs, obs)
>>> dev = qml.device("default.qubit", wires=2, shots=100)
>>> cost = qml.ExpvalCost(qml.templates.StronglyEntanglingLayers, H, dev)
Once constructed, the cost function can be passed directly to the
optimizer's ``step`` method. The attributes ``opt.shots_used`` and
``opt.total_shots_used`` can be used to track the number of shots per
iteration, and across the life of the optimizer, respectively.
>>> shape = qml.templates.StronglyEntanglingLayers.shape(n_layers=2, n_wires=2)
>>> params = np.random.random(shape)
>>> opt = qml.ShotAdaptiveOptimizer(min_shots=10)
>>> for i in range(60):
... params = opt.step(cost, params)
... print(f"Step {i}: cost = {cost(params):.2f}, shots_used = {opt.total_shots_used}")
Step 0: cost = -5.69, shots_used = 240
Step 1: cost = -2.98, shots_used = 336
Step 2: cost = -4.97, shots_used = 624
Step 3: cost = -5.53, shots_used = 1054
Step 4: cost = -6.50, shots_used = 1798
Step 5: cost = -6.68, shots_used = 2942
Step 6: cost = -6.99, shots_used = 4350
Step 7: cost = -6.97, shots_used = 5814
Step 8: cost = -7.00, shots_used = 7230
Step 9: cost = -6.69, shots_used = 9006
Step 10: cost = -6.85, shots_used = 11286
Step 11: cost = -6.63, shots_used = 14934
Step 12: cost = -6.86, shots_used = 17934
Step 13: cost = -7.19, shots_used = 22950
Step 14: cost = -6.99, shots_used = 28302
Step 15: cost = -7.38, shots_used = 34134
Step 16: cost = -7.66, shots_used = 41022
Step 17: cost = -7.21, shots_used = 48918
Step 18: cost = -7.53, shots_used = 56286
Step 19: cost = -7.46, shots_used = 63822
Step 20: cost = -7.31, shots_used = 72534
Step 21: cost = -7.23, shots_used = 82014
Step 22: cost = -7.31, shots_used = 92838
.. UsageDetails::
The shot adaptive optimizer is based on the iCANS1 optimizer by
`<NAME>. (2020) <https://quantum-journal.org/papers/q-2020-05-11-263/>`__, and works
as follows:
1. The initial step of the optimizer is performed with some specified minimum
number of shots, :math:`s_{min}`, for all partial derivatives.
2. The parameter-shift rule is then used to estimate the gradient :math:`g_i` with :math:`s_i` shots
for each parameter :math:`\theta_i`, parameters, as well as the variances
:math:`v_i` of the estimated gradients.
3. Gradient descent is performed for each parameter :math:`\theta_i`, using
the pre-defined learning rate :math:`\eta` and the gradient information :math:`g_i`:
:math:`\theta_i \rightarrow \theta_i - \eta g_i`.
4. A maximum shot number is set by maximizing the improvement in the expected gain per shot.
For a specific parameter value, the improvement in the expected gain per shot
is then calculated via
.. math::
\gamma_i = \frac{1}{s_i} \left[ \left(\eta - \frac{1}{2} L\eta^2\right)
g_i^2 - \frac{L\eta^2}{2s_i}v_i \right],
where:
* :math:`L \leq \sum_i|c_i|` is the bound on the `Lipschitz constant
<https://en.wikipedia.org/wiki/Lipschitz_continuity>`__ of the variational quantum algorithm objective function,
* :math:`c_i` are the coefficients of the Hamiltonian, and
* :math:`\eta` is the learning rate, and *must* be bound such that :math:`\eta < 2/L`
for the above expression to hold.
5. Finally, the new values of :math:`s_{i+1}` (shots for partial derivative of parameter
:math:`\theta_i`) is given by:
.. math::
s_{i+1} = \frac{2L\eta}{2-L\eta}\left(\frac{v_i}{g_i^2}\right)\propto
\frac{v_i}{g_i^2}.
In addition to the above, to counteract the presence of noise in the system, a
running average of :math:`g_i` and :math:`s_i` (:math:`\chi_i` and :math:`\xi_i` respectively)
are used when computing :math:`\gamma_i` and :math:`s_i`.
For more details, see:
* <NAME>, <NAME>, <NAME>, and <NAME>. "Operator Sampling
for Shot-frugal Optimization in Variational Algorithms." `arXiv:2004.06252
<https://arxiv.org/abs/2004.06252>`__ (2020).
* <NAME>, <NAME>, <NAME>, and <NAME>. "An Adaptive Optimizer
for Measurement-Frugal Variational Algorithms." `Quantum 4, 263
<https://quantum-journal.org/papers/q-2020-05-11-263/>`__ (2020).
"""
def __init__(
self, min_shots, term_sampling="weighted_random_sampling", mu=0.99, b=1e-6, stepsize=0.07
):
self.term_sampling = term_sampling
self.trainable_args = set()
# hyperparameters
self.min_shots = min_shots
self.mu = mu # running average constant
self.b = b # regularization bias
self.lipschitz = None
self.shots_used = 0
"""int: number of shots used on the current iteration"""
self.total_shots_used = 0
"""int: total number of shots used across all iterations"""
# total number of iterations
self.k = 0
# Number of shots per parameter
self.s = None
# maximum number of shots required to evaluate across all parameters
self.max_shots = None
# Running average of the parameter gradients
self.chi = None
# Running average of the variance of the parameter gradients
self.xi = None
super().__init__(stepsize=stepsize)
@staticmethod
def weighted_random_sampling(qnodes, coeffs, shots, argnums, *args, **kwargs):
"""Returns an array of length ``shots`` containing single-shot estimates
of the Hamiltonian gradient. The shots are distributed randomly over
the terms in the Hamiltonian, as per a multinomial distribution.
Args:
qnodes (Sequence[.QNode]): Sequence of QNodes, each one when evaluated
returning the corresponding expectation value of a term in the Hamiltonian.
coeffs (Sequence[float]): Sequences of coefficients corresponding to
each term in the Hamiltonian. Must be the same length as ``qnodes``.
shots (int): The number of shots used to estimate the Hamiltonian expectation
value. These shots are distributed over the terms in the Hamiltonian,
as per a Multinomial distribution.
argnums (Sequence[int]): the QNode argument indices which are trainable
*args: Arguments to the QNodes
**kwargs: Keyword arguments to the QNodes
Returns:
array[float]: the single-shot gradients of the Hamiltonian expectation value
"""
# determine the shot probability per term
prob_shots = np.abs(coeffs) / np.sum(np.abs(coeffs))
# construct the multinomial distribution, and sample
# from it to determine how many shots to apply per | |
import pyautogui as pag
from getpass import getpass
import sys
import json
import requests
import keyboard
import time
import random
import webbrowser
from os import system, name
from colorama import Fore, Back, Style
import os
os.system('cls')
def clear():
if name == 'nt':
_ = system('cls')
def add():
try:
namesFile = open("names.txt", "r")
except:
print(Style.BRIGHT + Fore.LIGHTRED_EX +
"Could not open names.txt - please make sure this file exists")
exit()
namesData = namesFile.read()
names = namesData.strip().split("\n")
print(Style.BRIGHT + Fore.LIGHTRED_EX + "Retrieved names from names.txt\n")
print(Style.BRIGHT + Fore.LIGHTRED_EX +
":: Click enter when your mouse is over the 'Add Friend Button' ::")
if keyboard.read_key() == "enter":
addFriend = pag.position()
print(f"Cords captured: {addFriend}")
time.sleep(1)
print(":: Click enter when your mouse is over the 'Close Button' ::")
if keyboard.read_key() == "enter":
close = pag.position()
print(f"Cords captured: {close}")
time.sleep(1)
print(":: Click enter when your mouse is over the 'Find Friend Search Bar' ::")
if keyboard.read_key() == "enter":
searchBar = pag.position()
print(f"Cords captured: {searchBar}")
time.sleep(1)
print(":: Click enter when your mouse is over the 'Clear Friend Bar' ::")
if keyboard.read_key() == "enter":
clearText = pag.position()
print(f"Cords captured: {clearText}")
time.sleep(1)
print(":: Click enter when your mouse is over the 'First Add Button' ::")
while True:
if keyboard.read_key() == "enter":
firstAdd = pag.position()
print(f"Cords captured: {firstAdd}")
break
else:
continue
clear()
def adder(name):
# move to search bar
pag.moveTo(searchBar[0], searchBar[1], 0.5)
pag.click()
time.sleep(2)
# write out name
pag.typewrite(name, interval=0.10)
time.sleep(2)
# move to first add, then add
pag.moveTo(firstAdd[0], firstAdd[1], 0.5)
pag.click()
time.sleep(2)
pag.click()
time.sleep(2)
pag.click()
time.sleep(2)
pag.click()
time.sleep(2)
pag.click()
time.sleep(2)
pag.click()
time.sleep(2)
pag.click()
time.sleep(2)
pag.click()
time.sleep(2)
pag.click()
time.sleep(2)
# clears search bar
pag.moveTo(clearText[0], clearText[1], 0.5)
pag.click()
print("will cycle through your name list randomly\n\n")
time.sleep(3)
for name in names:
print("Doing: {0}".format(name))
adder(name)
time.sleep(1)
def sendsnap():
print(Style.BRIGHT + Fore.LIGHTRED_EX +
'How many cycles would you like to use? (1200 score per cycle): ')
print(Fore.CYAN + '\n\n\n\n\nSnapify> ', end='')
amount = float(input(Fore.WHITE + ''))
clear()
while True:
print(Style.BRIGHT + Fore.LIGHTRED_EX +
'Would you like to mute your microphone?(Note: this will make the program slower)(Yes/No) ')
print(Fore.CYAN + '\n\n\n\n\nSnapify> ', end='')
mute = input(Fore.WHITE + '').lower()
clear()
if mute == 'yes':
break
elif mute == 'no':
break
else:
getpass(Style.BRIGHT + Fore.LIGHTRED_EX +
'Error. You inserted an invalid option. Please try again.')
clear()
continue
print(Style.BRIGHT + Fore.LIGHTRED_EX +
":: Click enter when your mouse is over the 'Camera Button' ::")
if keyboard.read_key() == "enter":
CameraButton = pag.position()
print(f"Cords captured: {CameraButton}")
time.sleep(1)
if mute == 'yes':
print(":: Take a 3 second video then click enter when your mouse is over the 'Mute Button' ::")
if keyboard.read_key() == "enter":
MuteButton = pag.position()
print(f"Cords captured: {MuteButton}")
time.sleep(1)
print(":: Click on the 'Mute Button' then click enter when your mouse is over the 'Send To Button' ::")
if keyboard.read_key() == "enter":
SendToButton = pag.position()
print(f"Cords captured: {SendToButton}")
time.sleep(1)
elif mute == 'no':
print(":: Take a picture then click enter when your mouse is over the 'Send To Button' ::")
if keyboard.read_key() == "enter":
SendToButton = pag.position()
print(f"Cords captured: {SendToButton}")
time.sleep(1)
print(":: Click on the 'Send To Button' then click enter when your mouse is over the 'Last Snap Button' ::")
if keyboard.read_key() == "enter":
LastSnapButton = pag.position()
print(f"Cords captured: {LastSnapButton}")
time.sleep(1)
print(":: Click enter when your mouse is over the 'Send Snap Arrow' ::")
if keyboard.read_key() == "enter":
SendSnapArrow = pag.position()
print(f"Cords captured: {SendSnapArrow}")
time.sleep(1)
print(":: Click enter when your mouse is over the 'Camera Logo at the bottom center' ::")
if keyboard.read_key() == "enter":
CameraLogo = pag.position()
print(f"Cords captured: {CameraLogo}")
# countdown screen
TimeToHomePage = 15
while TimeToHomePage >= 0:
clear()
print(
f'You have {TimeToHomePage} seconds to go back to the snapchat homescreen before the boost begins.')
time.sleep(1)
TimeToHomePage -= 1
clear()
print(
f"Started boosting! Please don't turn off your phone or close this window while it's running. This will run for {amount} cycle('s)")
print('\n\n\n\n\n\n')
while amount > 0:
# move to camera button and record for one minute
pag.moveTo(CameraButton[0], CameraButton[1], 2)
pag.mouseDown()
time.sleep(63)
pag.mouseUp()
# if mute click yes
if mute == 'yes':
pag.moveTo(MuteButton[0], MuteButton[1], 2)
pag.click()
# move to send to button and click
pag.moveTo(SendToButton[0], SendToButton[1], 2)
pag.click()
# move to last snap and click
pag.moveTo(LastSnapButton[0], LastSnapButton[1], 2)
pag.click()
# move to send snap button and click
pag.moveTo(SendToButton[0], SendToButton[1], 2)
pag.click()
# move to send snap arrow and click
pag.moveTo(SendSnapArrow[0], SendSnapArrow[1], 2)
pag.click()
# move to camera logo and click
pag.moveTo(CameraLogo[0], CameraLogo[1], 2)
pag.click()
amount -= 1
print(f'Finished one cycle. {amount} left to go.')
clear()
print(Fore.GREEN + 'Finished Boosting. Thanks for using our tool.')
print(Fore.MAGENTA + '\n\nPlease check us out at:\n\nQuessts: https://cracked.to/Quessts\nANG: https://cracked.to/ANG', end='')
getpass(Fore.WHITE + '')
sys.exit()
def removefriends():
print(Style.BRIGHT + Fore.LIGHTRED_EX +
":: Head over to the recently added friends section then click enter when your mouse is over the first users 'icon' ::")
if keyboard.read_key() == "enter":
IconButton = pag.position()
print(f"Cords captured: {IconButton}")
time.sleep(1)
print(":: Click on the 'icon' then click enter when your mouse is over the '3 dots' on the top right corner ::")
if keyboard.read_key() == "enter":
ThreeDotsButton = pag.position()
print(f"Cords captured: {ThreeDotsButton}")
time.sleep(1)
print(":: Click on the '3 dots' then click enter when your mouse is over the 'remove friend' button ::")
if keyboard.read_key() == "enter":
RemoveFriendButton = pag.position()
print(f"Cords captured: {RemoveFriendButton}")
time.sleep(1)
print(":: Click on the 'remove friend' button then click enter when your mouse is over the 'confirm remove friend' button ::")
if keyboard.read_key() == "enter":
ConfirmRemoveFriendButton = pag.position()
print(f"Cords captured: {ConfirmRemoveFriendButton}")
time.sleep(1)
clear()
# countdown screen
TimeToRecentlyAddedPage = 15
while TimeToRecentlyAddedPage >= 0:
clear()
print(
f'You have {TimeToRecentlyAddedPage} seconds to go back to the snapchat recently added menu before the bot starts.')
time.sleep(1)
TimeToRecentlyAddedPage -= 1
clear()
print('Started removing friends! This proccess will contniue forever and has to be stopped manually.')
while True:
# move to icon button and click
pag.moveTo(IconButton[0], IconButton[1], 2)
pag.click()
# move to three dots button and click
pag.moveTo(ThreeDotsButton[0], ThreeDotsButton[1], 2)
pag.click()
# move to remove friend button and click
pag.moveTo(RemoveFriendButton[0], RemoveFriendButton[1], 2)
pag.click()
# move to confirm remove friend button and click
pag.moveTo(ConfirmRemoveFriendButton[0],
ConfirmRemoveFriendButton[1], 2)
pag.click()
time.sleep(6)
while True:
print(Style.BRIGHT + Fore.LIGHTRED_EX + """
%( #%
%### ###%
%%%%##### @@@/@@@/@@@, #####%##% ███████╗███╗ ██╗ █████╗ ██████╗ ██╗███████╗██╗ ██╗
%&%%%#%&@ @&%%#%%&% ██╔════╝████╗ ██║██╔══██╗██╔══██╗██║██╔════╝╚██╗ ██╔╝
%&&% %%%&% ███████╗██╔██╗ ██║███████║██████╔╝██║█████╗ ╚████╔╝
@@ @% ╚════██║██║╚██╗██║██╔══██║██╔═══╝ ██║██╔══╝ ╚██╔╝
@@ @& ███████║██║ ╚████║██║ ██║██║ ██║██║ ██║
*@/ @@ @* @@ ╚══════╝╚═╝ ╚═══╝╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═╝
,@@ &@@
/ @@ #1 Free snapchat booster Developed by https://cracked.to/Quessts
@@ @% and partially by https://cracked.to/ANG
@@ &@#
%@@@* @@@@* V1.5 | need help? https://discord.link/Snapify
&@@@@( @@@@@
@@@@@@@ #@@@@@@@
@@@#@@(##@@(
""")
print(
'Select module:\n\n\n\n1) Add Friends\n\n2) Boost\n\n3) Remove Recently Added Friends\n\n\n ')
print(Fore.CYAN + 'Snapify> ', end='')
option = input(Fore.WHITE + '')
if option == '1':
while True:
clear()
print(Style.BRIGHT + Fore.LIGHTRED_EX +
"If you know how to use this type 'yes' if you don't type 'no'\n\n")
print(Fore.CYAN + 'Snapify> ', end='')
watchvid = input(Fore.WHITE + '').lower()
if watchvid == 'yes':
clear()
add()
elif watchvid == 'no':
clear()
webbrowser.open_new('https://youtu.be/uA4nNGV_jr8')
add()
else:
getpass(Style.BRIGHT + Fore.LIGHTRED_EX +
'Error. You inserted an invalid option. Please try again.')
clear()
continue
elif option == '2':
while True:
clear()
print(Style.BRIGHT + Fore.LIGHTRED_EX +
"If you know how to use this type 'yes' if you don't type 'no'\n\n")
print(Fore.CYAN + 'Snapify> ', end='')
watchvid = input(Fore.WHITE + '').lower()
if watchvid == 'yes':
clear()
sendsnap()
elif watchvid == 'no':
clear()
webbrowser.open_new('https://youtu.be/uA4nNGV_jr8')
sendsnap()
else:
getpass(Style.BRIGHT + Fore.LIGHTRED_EX +
'Error. You inserted an invalid option. Please try again.')
clear()
continue
elif option == '3':
while True:
clear()
print(Style.BRIGHT + Fore.LIGHTRED_EX +
"If you know how to use this type 'yes' if you don't type 'no'\n\n")
print(Fore.CYAN + 'Snapify> ', end='')
watchvid = input(Fore.WHITE + '').lower()
if watchvid == 'yes':
clear()
removefriends()
elif watchvid == 'no':
clear()
webbrowser.open_new('https://youtu.be/uA4nNGV_jr8')
removefriends()
else:
getpass(Style.BRIGHT + Fore.LIGHTRED_EX +
'Error. You inserted an invalid option. Please try again.')
clear()
continue
else:
getpass(Style.BRIGHT + Fore.LIGHTRED_EX +
'Error. You inserted an invalid option. Please | |
inp, s1,s2, reuse, use_batch_norm, filter_size=3):
global rbId
# convolutions of resnet block
if dataDimension == 2:
filter = [filter_size,filter_size]
filter1 = [1,1]
elif dataDimension == 3:
filter = [filter_size,filter_size,filter_size]
filter1 = [1,1,1]
gc1,_ = gan.convolutional_layer( s1, filter, tf.nn.relu, stride=[1], name="g_cA%d"%rbId, in_layer=inp, reuse=reuse, batch_norm=use_batch_norm, train=train) #->16,64
gc2,_ = gan.convolutional_layer( s2, filter, None , stride=[1], name="g_cB%d"%rbId, reuse=reuse, batch_norm=use_batch_norm, train=train) #->8,128
# shortcut connection
gs1,_ = gan.convolutional_layer(s2, filter1 , None , stride=[1], name="g_s%d"%rbId, in_layer=inp, reuse=reuse, batch_norm=use_batch_norm, train=train) #->16,64
resUnit1 = tf.nn.relu( tf.add( gc2, gs1 ) )
rbId += 1
return resUnit1
def gen_resnet(_in, reuse=False, use_batch_norm=False, train=None):
global rbId
print("\n\tGenerator (resize-resnett3-deep)")
with tf.variable_scope("generator", reuse=reuse) as scope:
if dataDimension == 2:
_in = tf.reshape(_in, shape=[-1, tileSizeLow, tileSizeLow, n_inputChannels]) #NHWC
patchShape = [2,2]
elif dataDimension == 3:
_in = tf.reshape(_in, shape=[-1, tileSizeLow, tileSizeLow, tileSizeLow, n_inputChannels]) #NDHWC
patchShape = [2,2,2]
rbId = 0
gan = GAN(_in)
gan.max_depool()
inp = gan.max_depool()
ru1 = resBlock(gan, inp, n_inputChannels*2,n_inputChannels*8, reuse, use_batch_norm,5)
ru2 = resBlock(gan, ru1, 128, 128, reuse, use_batch_norm,5)
inRu3 = ru2
ru3 = resBlock(gan, inRu3, 32, 8, reuse, use_batch_norm,5)
ru4 = resBlock(gan, ru3, 2, 1, reuse, False,5)
resF = tf.reshape( ru4, shape=[-1, n_output] )
print("\tDOFs: %d , %f m " % ( gan.getDOFs() , gan.getDOFs()/1000000.) )
return resF
############################################discriminator network###############################################################
def disc_binclass(in_low, in_high, reuse=False, use_batch_norm=False, train=None):
#in_low: low res reference input, same as generator input (condition)
#in_high: real or generated high res input to classify
#reuse: variable reuse
#use_batch_norm: bool, if true batch norm is used in all but the first con layers
#train: if use_batch_norm, tf bool placeholder
print("\n\tDiscriminator (conditional binary classifier)")
with tf.variable_scope("discriminator", reuse=reuse):
if dataDimension == 2:
shape = tf.shape(in_low)
in_low = tf.slice(in_low,[0,0],[shape[0],int(n_input/n_inputChannels)])
in_low = GAN(tf.reshape(in_low, shape=[-1, tileSizeLow, tileSizeLow, 1])).max_depool(height_factor = upRes,width_factor=upRes) #NHWC
in_high = tf.reshape(in_high, shape=[-1, tileSizeHigh, tileSizeHigh, 1])
filter=[4,4]
stride = [2]
stride2 = [2]
elif dataDimension == 3:
shape = tf.shape(in_low)
in_low = tf.slice(in_low,[0,0],[shape[0],int(n_input/n_inputChannels)])
in_low = GAN(tf.reshape(in_low, shape=[-1, tileSizeLow, tileSizeLow, tileSizeLow, 1])).max_depool(depth_factor = upRes,height_factor = upRes,width_factor = upRes) #NDHWC
in_high = tf.reshape(in_high, shape=[-1, tileSizeHigh, tileSizeHigh, tileSizeHigh, 1]) # dim D is not upscaled
filter=[4,4,4]
stride = [2,2]
stride2 = [2]
#merge in_low and in_high to [-1, tileSizeHigh, tileSizeHigh, 2]
gan = GAN(tf.concat([in_low, in_high], axis=-1), bn_decay=bn_decay) #64
d1,_ = gan.convolutional_layer(32, filter, lrelu, stride=stride2, name="d_c1", reuse=reuse) #32
d2,_ = gan.convolutional_layer(64, filter, lrelu, stride=stride2, name="d_c2", reuse=reuse, batch_norm=use_batch_norm, train=train) #64
d3,_ = gan.convolutional_layer(128, filter, lrelu, stride=stride, name="d_c3", reuse=reuse, batch_norm=use_batch_norm, train=train) #128
d4,_ = gan.convolutional_layer(256, filter, lrelu, stride=[1], name="d_c4", reuse=reuse, batch_norm=use_batch_norm, train=train) #256
shape=gan.flatten()
gan.fully_connected_layer(1, None, name="d_l5")
print("\tDOFs: %d " % gan.getDOFs())
return gan.y(), d1, d2, d3, d4
############################################ Tempo discriminator network ############################################################
def disc_binclass_cond_tempo(in_high, n_t_channels=3, reuse=False, use_batch_norm=False, train=None):
# NO in_low: low res reference input, same as generator input (no condition)
# in_high: real or generated high res input to classify, shape should be batch, dim_z, dim_y, dim_x, channels
# reuse: variable reuse
# use_batch_norm: bool, if true batch norm is used in all but the first con layers
# train: if use_batch_norm, tf bool placeholder
print("\n\tDiscriminator for Tempo (conditional binary classifier)")
print("\n\tTempo, nearby frames packed as channels, number %d" % n_t_channels)
with tf.variable_scope("discriminatorTempo", reuse=reuse):
if dataDimension == 2:
in_high = tf.reshape(in_high, shape=[-1, tileSizeHigh, tileSizeHigh, n_t_channels])
filter=[4,4]
stride = [2]
stride2 = [2]
elif dataDimension == 3:
in_high = tf.reshape(in_high, shape=[-1, tileSizeHigh, tileSizeHigh, tileSizeHigh, n_t_channels]) # dim D is not upscaled
filter=[4,4,4]
stride = [2,2]
stride2 = [2]
# merge in_low and in_high to [-1, tileSizeHigh, tileSizeHigh, 2]
gan = GAN(in_high, bn_decay=bn_decay) # 64
t1, _ = gan.convolutional_layer(32, filter, lrelu, stride=stride2, name="t_c1", reuse=reuse) # 32
t2, _ = gan.convolutional_layer(64, filter, lrelu, stride=stride2, name="t_c2", reuse=reuse,
batch_norm=use_batch_norm, train=train) # 64
t3, _ = gan.convolutional_layer(128, filter, lrelu, stride=stride, name="t_c3", reuse=reuse,
batch_norm=use_batch_norm, train=train) # 128
t4, _ = gan.convolutional_layer(256, filter, lrelu, stride=[1], name="t_c4", reuse=reuse,
batch_norm=use_batch_norm, train=train) # 256
shape = gan.flatten()
gan.fully_connected_layer(1, None, name="t_l5")
print("\tDOFs: %d " % gan.getDOFs())
return gan.y()
############################################gen_test###############################################################
def gen_test(_in, reuse=False, use_batch_norm=False, train=None):
global rbId
print("\n\tGenerator-test")
with tf.variable_scope("generator-test", reuse=reuse) as scope:
if dataDimension == 2:
_in = tf.reshape(_in, shape=[-1, tileSizeLow, tileSizeLow, n_inputChannels]) #NHWC
patchShape = [2,2]
elif dataDimension == 3:
_in = tf.reshape(_in, shape=[-1, tileSizeLow, tileSizeLow, tileSizeLow, n_inputChannels]) #NDHWC
patchShape = [2,2,2]
rbId = 0
gan = GAN(_in)
gan.max_depool()
i2np,_ = gan.deconvolutional_layer(32, patchShape, None, stride=[1,1], name="g_D1", reuse=reuse, batch_norm=False, train=train, init_mean=0.99)
gan.max_depool()
inp,_ = gan.deconvolutional_layer(1 , patchShape, None, stride=[1,1], name="g_D2", reuse=reuse, batch_norm=False, train=train, init_mean=0.99)
return tf.reshape( inp, shape=[-1, n_output] )
############################################disc_test###############################################################
def disc_test(in_low, in_high, reuse=False, use_batch_norm=False, train=None):
print("\n\tDiscriminator-test")
with tf.variable_scope("discriminator_test", reuse=reuse):
if dataDimension == 2:
shape = tf.shape(in_low)
in_low = tf.slice(in_low,[0,0],[shape[0],int(n_input/n_inputChannels)])
in_low = GAN(tf.reshape(in_low, shape=[-1, tileSizeLow, tileSizeLow, 1])).max_depool(height_factor = upRes,width_factor = upRes) #NHWC
in_high = tf.reshape(in_high, shape=[-1, tileSizeHigh, tileSizeHigh, 1])
filter=[4,4]
stride2 = [2]
elif dataDimension == 3:
shape = tf.shape(in_low)
in_low = tf.slice(in_low,[0,0],[shape[0],int(n_input/n_inputChannels)])
in_low = GAN(tf.reshape(in_low, shape=[-1, tileSizeLow, tileSizeLow, tileSizeLow, 1])).max_depool(depth_factor = upRes,height_factor = upRes,width_factor = upRes) #NDHWC
in_high = tf.reshape(in_high, shape=[-1, tileSizeHigh, tileSizeHigh, tileSizeHigh, 1]) # dim D is not upscaled
filter=[4,4,4]
stride2 = [2]
#merge in_low and in_high to [-1, tileSizeHigh, tileSizeHigh, 2]
gan = GAN(tf.concat([in_low, in_high], axis=-1), bn_decay=bn_decay) #64
d1,_ = gan.convolutional_layer(32, filter, lrelu, stride=stride2, name="d_c1", reuse=reuse) #32
shape=gan.flatten()
gan.fully_connected_layer(1, None, name="d_l5")
if dataDimension == 2:
d2 = tf.constant(1., shape = [batch_size, tileSizeLow,tileSizeLow,64])
d3 = tf.constant(1., shape = [batch_size, int(tileSizeLow/2),int(tileSizeLow/2),128])
d4 = tf.constant(1., shape = [batch_size, int(tileSizeLow/2),int(tileSizeLow/2),256])
elif dataDimension == 3:
d2 = tf.constant(1., shape = [batch_size, tileSizeLow,tileSizeLow,tileSizeLow,64])
d3 = tf.constant(1., shape = [batch_size, int(tileSizeLow/2),int(tileSizeLow/2),int(tileSizeLow/2),128])
d4 = tf.constant(1., shape = [batch_size, int(tileSizeLow/2),int(tileSizeLow/2),int(tileSizeLow/2),256])
print("\tDOFs: %d " % gan.getDOFs())
return gan.y(), d1, d2, d3, d4
#change used models for gen and disc here #other models in NNmodels.py
gen_model = locals()[genModel]
disc_model = locals()[discModel]
disc_time_model = disc_binclass_cond_tempo # tempo dis currently fixed
#set up GAN structure
bn=batch_norm
#training or testing for batch norm
train = tf.placeholder(tf.bool)
if not outputOnly: #setup for training
gen_part = gen_model(x, use_batch_norm=bn, train=train)
if use_spatialdisc:
disc, dy1, dy2, dy3, dy4 = disc_model(x_disc, y, use_batch_norm=bn, train=train)
gen, gy1, gy2, gy3, gy4 = disc_model(x_disc, gen_part, reuse=True, use_batch_norm=bn, train=train)
if genValiImg > -1: sampler = gen_part
else: #setup for generating output with trained model
sampler = gen_model(x, use_batch_norm=bn, train=False)
sys.stdout.flush()
# ---------------------------------------------
# TENSORFLOW SETUP
# build the tensorflow graph for tensor(value) re-sampling (at pos)
# value shape (batch, ..., res_x2, res_x1, channels)
# pos shape (batch, ..., res_x2, res_x1, dim)
def tensorResample(value, pos, name='Resample'):
with tf.name_scope(name) as scope:
pos_shape = pos.get_shape().as_list()
dim = len(pos_shape) - 2 # batch and channels are ignored
assert (dim == pos_shape[-1])
floors = tf.cast(tf.floor(pos - 0.5), tf.int32)
ceils = floors + 1
# clamp min
floors = tf.maximum(floors, tf.zeros_like(floors))
ceils = tf.maximum(ceils, tf.zeros_like(ceils))
# clamp max
floors = tf.minimum(floors, tf.constant(value.get_shape().as_list()[1:dim + 1], dtype=tf.int32) - 1)
ceils = tf.minimum(ceils, tf.constant(value.get_shape().as_list()[1:dim + 1], dtype=tf.int32) - 1)
_broadcaster = tf.ones_like(ceils)
cell_value_list = []
cell_weight_list = []
for axis_x in range(int(pow(2, dim))): # 3d, 0-7; 2d, 0-3;...
condition_list = [bool(axis_x & int(pow(2, i))) for i in range(dim)]
condition_ = (_broadcaster > 0) & condition_list
axis_idx = tf.cast(
tf.where(condition_, ceils, floors),
tf.int32)
# only support linear interpolation...
axis_wei = 1.0 - tf.abs((pos - 0.5) - tf.cast(axis_idx, tf.float32)) # shape (..., res_x2, res_x1, dim)
axis_wei = tf.reduce_prod(axis_wei, axis=-1, keep_dims=True)
cell_weight_list.append(axis_wei) # single scalar(..., res_x2, res_x1, 1)
first_idx = tf.ones_like(axis_wei, dtype=tf.int32)
first_idx = tf.cumsum(first_idx, axis=0, exclusive=True)
cell_value_list.append(tf.concat([first_idx, axis_idx], -1))
values_new = tf.gather_nd(value, cell_value_list[0]) * cell_weight_list[
0] # broadcasting used, shape (..., res_x2, res_x1, channels )
for cell_idx in range(1, len(cell_value_list)):
values_new = values_new + tf.gather_nd(value, cell_value_list[cell_idx]) * cell_weight_list[cell_idx]
return values_new # shape (..., res_x2, res_x1, channels)
if not outputOnly:
#for discriminator [0,1] output
if use_spatialdisc:
disc_sigmoid = tf.reduce_mean(tf.nn.sigmoid(disc))
gen_sigmoid = tf.reduce_mean(tf.nn.sigmoid(gen))
# loss of the discriminator with real input
disc_loss_disc = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc, labels=tf.ones_like(disc)))
#loss of the discriminator with input from generator
disc_loss_gen = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=gen, labels=tf.zeros_like(gen)))
disc_loss_layer = k2_l1*tf.reduce_mean(tf.nn.l2_loss(dy1 - gy1)) + k2_l2*tf.reduce_mean(tf.nn.l2_loss(dy2 - gy2)) + k2_l3*tf.reduce_mean(tf.nn.l2_loss(dy3 - gy3)) + k2_l4*tf.reduce_mean(tf.nn.l2_loss(dy4 - gy4))
disc_loss = disc_loss_disc * weight_dld + disc_loss_gen
#loss of the generator
gen_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=gen, labels=tf.ones_like(gen)))
else:
gen_loss = tf.zeros([1])
disc_loss_layer = tf.zeros([1])
#additional generator losses
gen_l2_loss = tf.nn.l2_loss(y - gen_part)
gen_l1_loss = tf.reduce_mean(tf.abs(y - gen_part)) #use mean to normalize w.r.t. output dims. tf.reduce_sum(tf.abs(y - gen_part))
#uses sigmoid cross entropy and l1 - see cGAN paper
gen_loss_complete = gen_loss + gen_l1_loss*kk + disc_loss_layer*kk2
# set up decaying learning rate, if enabled
lr_global_step = tf.Variable(0, trainable=False)
learning_rate_scalar = learning_rate
if decayLR:
learning_rate = tf.train.polynomial_decay(learning_rate, lr_global_step, trainingIters//2, learning_rate_scalar*0.05, power=1.1)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
gen_update_ops = update_ops[:]
ori_gen_update_ops = update_ops[:]
#variables to be used in the different otimization steps
vars = tf.trainable_variables()
g_var = [var for var in vars if "g_" in var.name]
if use_spatialdisc:
dis_update_ops = update_ops[:]
d_var = [var for var in vars if "d_" in var.name]
if (useTempoD or useTempoL2):# temporal loss here
disT_update_ops = []
ori_gen_loss_complete = gen_loss_complete
# currently, the update_op gathering is not too nice and very sensitive to the operation order.
# TODO: make it flexible!
n_t = 3
device_str = '/device:GPU:0'
if(dataDimension == 3): # have to use a second GPU!
device_str = '/device:GPU:1'
with tf.device(device_str):
x_t = tf.placeholder(tf.float32, shape=[None, n_input])
gen_part_t = gen_model(x_t, reuse=True, use_batch_norm=bn, train=train)
if(ADV_flag):
y_pos = tf.placeholder(tf.float32, shape=[None, n_output * dataDimension])
if dataDimension == 2:
gen_part_t_shape = tf.reshape(gen_part_t, shape=[-1, tileSizeHigh, tileSizeHigh, 1])
pos_array = tf.reshape(y_pos, shape=[-1, tileSizeHigh, tileSizeHigh, 2])
elif dataDimension == 3: # check in 3D
gen_part_t_shape = tf.reshape(gen_part_t, shape=[-1, tileSizeHigh, tileSizeHigh, tileSizeHigh, 1])
pos_array = tf.reshape(y_pos, shape=[-1, tileSizeHigh, tileSizeHigh, tileSizeHigh, 3])
gen_part_t = tensorResample(gen_part_t_shape, pos_array)
gen_part_t = tf.reshape(gen_part_t, shape = [-1, n_t, n_output])
gen_part_t = tf.transpose(gen_part_t, perm=[0, 2, 1]) # batch, n_output, channels
if (useTempoL2): # l2 tempo_loss
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
for update_op in update_ops:
if ("/g_" in update_op.name) and ("generator" in update_op.name) and (not ( update_op | |
y, z):
a = np.add(x, y)
return np.add(np.subtract(a, z), a)
exercise(input(), expected(), ref, rands((5, 7), 3))
def test_annotate_expr():
metatable = {"SEScope": [CPU, GPU]}
def input():
return tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(5, 7), float32], %y: Tensor[(5, 7), float32], %z: Tensor[(5, 7), float32]) {
%0 = add(%x, %y);
%1 = on_device(%0, se_scope=meta[SEScope][1]);
%2 = subtract(%1, %z);
on_device(%2, se_scope=meta[SEScope][0])
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(5, 7), float32], %y: Tensor[(5, 7), float32], %z: Tensor[(5, 7), float32],
param_se_scopes=[meta[SEScope][1], meta[SEScope][1], meta[SEScope][0]],
result_se_scope=meta[SEScope][0]) {
%0 = add(%x, %y);
%1 = on_device(%0, se_scope=meta[SEScope][1], is_fixed=True);
%2 = device_copy(%1, src_se_scope=meta[SEScope][1], dst_se_scope=meta[SEScope][0]);
subtract(%2, %z)
}
""",
"from_string",
None,
metatable,
)
def ref(x, y, z):
return np.subtract(np.add(x, y), z)
exercise(input(), expected(), ref, rands((5, 7), 3))
def test_annotate_all():
metatable = {"SEScope": [CPU, GPU]}
def input():
return tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(5, 7), float32], %y: Tensor[(5, 7), float32], %z: Tensor[(5, 7), float32]) {
%0 = add(%x, %y);
%1 = on_device(%0, se_scope=meta[SEScope][0]);
%2 = subtract(%1, %z);
on_device(%2, se_scope=meta[SEScope][0])
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(5, 7), float32], %y: Tensor[(5, 7), float32], %z: Tensor[(5, 7), float32],
param_se_scopes=[meta[SEScope][0], meta[SEScope][0], meta[SEScope][0]],
result_se_scope=meta[SEScope][0]) {
%0 = add(%x, %y);
subtract(%0, %z)
}
""",
"from_string",
None,
metatable,
)
def ref(x, y, z):
return np.subtract(np.add(x, y), z)
exercise(input(), expected(), ref, rands((5, 7), 3))
def test_conv_network():
r"""The network and devices are as follows:
data1 data2 <--- CPU
| |
conv2d conv2d <--- CPU
\ /
\ /
add <--- GPU
|
conv2d <--- CPU
|
<result> <--- CPU
"""
metatable = {"SEScope": [CPU, GPU]}
def input():
return tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%data1: Tensor[(1, 64, 56, 56), float32], %data2: Tensor[(1, 64, 56, 56), float32],
%weight: Tensor[(64, 64, 3, 3), float32]) {
%0 = nn.conv2d(%data1, %weight, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]);
%1 = nn.conv2d(%data2, %weight, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]);
%2 = on_device(%0, se_scope=meta[SEScope][0]);
%3 = on_device(%1, se_scope=meta[SEScope][0]);
%4 = add(%2, %3);
%5 = on_device(%4, se_scope=meta[SEScope][1]);
%6 = nn.conv2d(%5, %weight, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]);
on_device(%6, se_scope=meta[SEScope][0])
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%data1: Tensor[(1, 64, 56, 56), float32], %data2: Tensor[(1, 64, 56, 56), float32],
%weight: Tensor[(64, 64, 3, 3), float32],
param_se_scopes=[meta[SEScope][0], meta[SEScope][0], meta[SEScope][0]],
result_se_scope=meta[SEScope][0]) {
%0 = nn.conv2d(%data1, %weight, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]);
%1 = on_device(%0, se_scope=meta[SEScope][0], is_fixed=True);
%2 = nn.conv2d(%data2, %weight, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]);
%3 = on_device(%2, se_scope=meta[SEScope][0], is_fixed=True);
%4 = device_copy(%1, src_se_scope=meta[SEScope][0], dst_se_scope=meta[SEScope][1]);
%5 = device_copy(%3, src_se_scope=meta[SEScope][0], dst_se_scope=meta[SEScope][1]);
%6 = add(%4, %5);
%7 = on_device(%6, se_scope=meta[SEScope][1], is_fixed=True);
%8 = device_copy(%7, src_se_scope=meta[SEScope][1], dst_se_scope=meta[SEScope][0]);
nn.conv2d(%8, %weight, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3])
}
""",
"from_string",
None,
metatable,
)
# Don't try to execute, we don't have a reference conv2d
exercise(input(), expected(), None, None)
def test_tuple_get_item():
metatable = {"SEScope": [CPU, GPU]}
# Note that the device copy should be placed after projection rather than before. This is handled by
# a heuristic in the pass.
def input():
return tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(3, 3, 4), float32]) {
let %t = split(%x, indices_or_sections=3);
%0 = on_device(%t, se_scope=meta[SEScope][0]);
%1 = on_device(%t, se_scope=meta[SEScope][0]);
%2 = %0.0;
%3 = %1.1;
%4 = subtract(%2, %3);
on_device(%4, se_scope=meta[SEScope][1])
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(3, 3, 4), float32],
param_se_scopes=[meta[SEScope][0]], result_se_scope=meta[SEScope][1]) {
%0 = split(%x, indices_or_sections=3);
let %t = on_device(%0, se_scope=meta[SEScope][0], is_fixed=True);
%1 = %t.0;
%2 = on_device(%1, se_scope=meta[SEScope][0], is_fixed=True);
%3 = %t.1;
%4 = on_device(%3, se_scope=meta[SEScope][0], is_fixed=True);
%5 = device_copy(%2, src_se_scope=meta[SEScope][0], dst_se_scope=meta[SEScope][1]);
%6 = device_copy(%4, src_se_scope=meta[SEScope][0], dst_se_scope=meta[SEScope][1]);
subtract(%5, %6)
}
""",
"from_string",
None,
metatable,
)
def ref(x):
t = np.split(x, 3)
return np.subtract(t[0], t[1])
exercise(input(), expected(), ref, rands((3, 3, 4), 1))
def test_propogation():
r""" The network and devices are as follows:
x <--- CPU
|
negative <--- CPU
/ \
negative negative <--- GPU
\ /
add <--- GPU
|
negative <--- CPU
|
<result> <--- CPU
"""
metatable = {"SEScope": [CPU, GPU]}
def input():
return tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(5, 7), float32]) {
%0 = negative(%x);
%1 = on_device(%0, se_scope=meta[SEScope][0]);
%2 = negative(%1);
%3 = on_device(%0, se_scope=meta[SEScope][0]);
%4 = negative(%3);
%5 = on_device(%2, se_scope=meta[SEScope][1]);
%6 = on_device(%4, se_scope=meta[SEScope][1]);
%7 = add(%5, %6);
%8 = on_device(%7, se_scope=meta[SEScope][1]);
%9 = negative(%8);
on_device(%9, se_scope=meta[SEScope][0])
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(5, 7), float32],
param_se_scopes=[meta[SEScope][0]], result_se_scope=meta[SEScope][0]) {
%0 = negative(%x);
%1 = on_device(%0, se_scope=meta[SEScope][0], is_fixed=True);
%2 = device_copy(%1, src_se_scope=meta[SEScope][0], dst_se_scope=meta[SEScope][1]);
%3 = on_device(%0, se_scope=meta[SEScope][0], is_fixed=True);
%4 = device_copy(%3, src_se_scope=meta[SEScope][0], dst_se_scope=meta[SEScope][1]);
%5 = negative(%2);
%6 = negative(%4);
%7 = add(%5, %6);
%8 = on_device(%7, se_scope=meta[SEScope][1], is_fixed=True);
%9 = device_copy(%8, src_se_scope=meta[SEScope][1], dst_se_scope=meta[SEScope][0]);
negative(%9)
}
""",
"from_string",
None,
metatable,
)
def ref(x):
y = np.negative(x)
return np.negative(np.add(np.negative(y), np.negative(y)))
exercise(input(), expected(), ref, rands((5, 7), 1))
def test_fusible_network():
r""" The network is as follows:
x y <--- GPU
\ /
add <--- GPU
/ \
negative \ <--- CPU
\ \
\ negative <--- GPU
\ /
add <--- GPU
|
negative <--- CPU
|
<result> <--- CPU
"""
metatable = {"SEScope": [CPU, GPU]}
def input():
return tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(5, 7), float32], %y: Tensor[(5, 7), float32]) {
%0 = add(%x, %y);
%1 = on_device(%0, se_scope=meta[SEScope][1]);
%2 = negative(%1);
%3 = on_device(%2, se_scope=meta[SEScope][0]);
%4 = negative(%0);
%5 = add(%3, %4);
%6 = on_device(%5, se_scope=meta[SEScope][1]);
%7 = negative(%6);
on_device(%7, se_scope=meta[SEScope][0])
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(5, 7), float32], %y: Tensor[(5, 7), float32],
param_se_scopes=[meta[SEScope][1], meta[SEScope][1]], result_se_scope=meta[SEScope][0]) {
%0 = add(%x, %y);
%1 = on_device(%0, se_scope=meta[SEScope][1], is_fixed=True);
%2 = device_copy(%1, src_se_scope=meta[SEScope][1], dst_se_scope=meta[SEScope][0]);
%3 = negative(%2);
%4 = on_device(%3, se_scope=meta[SEScope][0], is_fixed=True);
%5 = device_copy(%4, src_se_scope=meta[SEScope][0], dst_se_scope=meta[SEScope][1]);
%6 = negative(%0);
%7 = add(%5, %6);
%8 = on_device(%7, se_scope=meta[SEScope][1], is_fixed=True);
%9 = device_copy(%8, src_se_scope=meta[SEScope][1], dst_se_scope=meta[SEScope][0]);
negative(%9)
}
""",
"from_string",
None,
metatable,
)
def ref(x, y):
z = np.add(x, y)
return np.negative(np.add(np.negative(z), np.negative(z)))
exercise(input(), expected(), ref, rands((5, 7), 2))
def test_unpropagatable_graph():
r"""The network is as follows:
a b <--- CPU
\ /
\ / c d <--- GPU
\ / \ /
add \ / <--- CPU
\ \ /
\ multiply <--- GPU
\ /
subtract <--- CPU
|
<result> <--- CPU
"""
metatable = {"SEScope": [CPU, GPU]}
def input():
return tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32],
%c: Tensor[(5, 7), float32], %d: Tensor[(5, 7), float32]) {
%0 = add(%a, %b);
%1 = multiply(%c, %d);
%2 = on_device(%0, se_scope=meta[SEScope][0]);
%3 = on_device(%1, se_scope=meta[SEScope][1]);
%4 = subtract(%2, %3);
on_device(%4, se_scope=meta[SEScope][0])
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32],
%c: Tensor[(5, 7), float32], %d: Tensor[(5, 7), float32],
param_se_scopes=[meta[SEScope][0], meta[SEScope][0], meta[SEScope][1], meta[SEScope][1]],
result_se_scope=meta[SEScope][0]) {
%0 = multiply(%c, %d);
%1 = on_device(%0, se_scope=meta[SEScope][1], is_fixed=True);
%2 = add(%a, %b);
%3 = device_copy(%1, src_se_scope=meta[SEScope][1], dst_se_scope=meta[SEScope][0]);
subtract(%2, %3)
}
""",
"from_string",
None,
metatable,
)
def ref(a, b, c, d):
return np.subtract(np.add(a, b), np.multiply(c, d))
exercise(input(), expected(), ref, rands((5, 7), 4))
def test_conditional():
metatable = {"SEScope": [CPU, GPU]}
# The conditional is over a function type, thus exercising the first-order/higher-order domain handling.
def input():
return tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%x: bool, %y: Tensor[(5, 7), float32], %z: Tensor[(5, 7), float32]) {
let %f = fn (%a) {
%0 = on_device(%y, se_scope=meta[SEScope][0], is_fixed=True);
add(%a, %0)
};
let %g = fn (%a1) {
subtract(%a1, %y)
};
let %h = if (%x) {
%f
} else {
%g
};
%h(%z)
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%x: bool, %y: Tensor[(5, 7), float32], %z: Tensor[(5, 7), float32],
param_se_scopes=[meta[SEScope][0], meta[SEScope][0], meta[SEScope][0]],
result_se_scope=meta[SEScope][0]) {
let %f = fn (%a, param_se_scopes=[meta[SEScope][0]], result_se_scope=meta[SEScope][0]) {
| |
<reponame>raunaqtri1/MINT-Transformation<filename>funcs/topoflow/topoflow/components/satzone_darcy_layers.py
#
# Copyright (c) 2001-2016, <NAME>
#
# Nov 2016.
# Sep 2014.
# Nov 2013. Converted TopoFlow to Python package.
# Jan 2013. Revised handling of input/output names.
# Oct 2012. CSDMS Standard Names and BMI.
# May 2010. Changes to initialize(), read_cfg_file() and unit_test().
# Aug 2009. Updates.
# Jul 2009. Updates.
# Jan 2009, Converted from IDL.
#
#-----------------------------------------------------------------------
# NOTES: This file defines a "Darcy layers" groundwater component
# and related functions. It inherits from the groundwater
# "base class" in "satzone_base.py".
#-----------------------------------------------------------------------
#
# class satzone_component
#
# get_component_name()
# get_attribute() # (10/26/11)
# get_input_var_names() # (5/16/12, Bolton)
# get_output_var_names() # (5/16/12, Bolton)
# get_var_name() # (5/16/12, Bolton)
# get_var_units() # (5/16/12, Bolton)
# ------------------------------------------------------------
# Move all "update_*" methods from satzone_base.py to here ?
# ------------------------------------------------------------
#
# Functions: (commented out)
# Total_Darcy_Layer_Flow_VK()
# Total_Subsurface_Flow()
# Darcy_Layer_Seep_Rate()
#
#-----------------------------------------------------------------------
import numpy as np
import os
from topoflow.components import satzone_base
#-----------------------------------------------------------------------
class satzone_component( satzone_base.satzone_component ):
_att_map = {
'model_name': 'TopoFlow_Saturated_Zone_Darcy_Layers',
'version': '3.1',
'author_name': '<NAME>',
'grid_type': 'uniform',
'time_step_type': 'fixed',
'step_method': 'explicit',
#-------------------------------------------------------------
'comp_name': 'SatZoneDarcyLayers',
'model_family': 'TopoFlow',
'cfg_template_file': 'Satzone_Darcy_Layers.cfg.in',
'cfg_extension': '_satzone_darcy_layers.cfg',
'cmt_var_prefix': '/SatZoneDarcyLayers/Input/Var/',
'gui_xml_file': '/home/csdms/cca/topoflow/3.1/src/share/cmt/gui/Satzone_Darcy_Layers.xml',
'dialog_title': 'Saturated Zone: Darcy Layers Parameters',
'time_units': 'seconds' }
#----------------------------------------------
# What about ET? (Taking water off the ground
# water surface??? (Bolton, 5/16/2012)
#----------------------------------------------
_input_var_names = [
'channel_water_x-section__mean_depth', # (d@channels)
'land_surface_water__evaporation_volume_flux', # (ET@evap)
'soil_water_sat-zone_top__recharge_volume_flux' ] # (Rg@infil)
_output_var_names = [
'land_surface__elevation', # elev
'land_surface_water__baseflow_volume_flux', # GW
'land_surface_water__domain_time_integral_of_baseflow_volume_flux', # vol_GW
'model__time_step', # dt
'model_soil_layer-0__porosity', # qs[0]
'model_soil_layer-0__saturated_thickness', # y[0,:,:]
'model_soil_layer-0__thickness', # th[0,:,:]
'model_soil_layer-1__porosity', # qs[1]
'model_soil_layer-1__saturated_thickness', # y[1,:,:]
'model_soil_layer-1__thickness', # th[1,:,:]
'model_soil_layer-2__porosity', # qs[2]
'model_soil_layer-2__saturated_thickness', # y[2,:,:]
'model_soil_layer-2__thickness', # th[2,:,:]
#----------------------------------------------
# These are for *all* soil layers (not used).
#----------------------------------------------
# 'model_soil_layer__porosity', # qs
# 'model_soil_layer__saturated_thickness', # y
# 'model_soil_layer__thickness', # th
#----------------------------------------
# The "top_layer" is same as "layer_0".
#----------------------------------------
'soil_water_sat-zone_top_surface__elevation', # h_table #############
'soil_top-layer__porosity', # qs[0,:,:]
'soil_top-layer__saturated_thickness', # y[0,:,:]
'soil_top-layer__thickness' ] # th[0,:,:]
#-------------------------------------------
# These are read from GUI/file, but could
# still be returned.
#-------------------------------------------
# 'soil_water_sat-zone_top_surface__initial_elevation' ] # h0_table
#-------------------------------------------------------------------
# Note: The variables qs, th and y are ndarrays. If we define
# another variable as a slice or subset of these, such as
# qs_top = qs[0], or y_top = y[0,:,:], then they will
# also change whenever the main ndarray changes.
# To see this, try:
# >>> a = np.ones((3,3))
# >>> b = a[0,:]
# >>> print a
# >>> print b
# >>> a[0,:] = 2
# >>> print a
# >>> print b
# With this trick, we can avoid slices and subscripts in
# the var_name_map, which getattr and setattr don't support.
#-------------------------------------------------------------------
_var_name_map = {
'channel_water_x-section__mean_depth': 'd', # channels comp
'soil_water_sat-zone_top__recharge_volume_flux': 'Rg',
#------------------------------------------------------------------------
'land_surface__elevation': 'elev',
'land_surface_water__baseflow_volume_flux': 'GW',
'land_surface_water__domain_time_integral_of_baseflow_volume_flux': 'vol_GW',
'land_surface_water__evaporation_volume_flux': 'ET',
'model__time_step': 'dt',
#----------------------------------------------------------------
# These are defined in satzone_base.py. (9/22/14)
'model_soil_layer-0__porosity': 'qs_layer_0', ## 'qs[0]',
'model_soil_layer-0__saturated_thickness': 'y_layer_0', ## 'y[0,:,:]',
'model_soil_layer-0__thickness': 'th_layer_0', ## 'th[0,:,:]',
'model_soil_layer-1__porosity': 'qs_layer_1',
'model_soil_layer-1__saturated_thickness': 'y_layer_1',
'model_soil_layer-1__thickness': 'th_layer_1',
'model_soil_layer-2__porosity': 'qs_layer_2',
'model_soil_layer-2__saturated_thickness': 'y_layer_2',
'model_soil_layer-2__thickness': 'th_layer_2',
#----------------------------------------------------------------
## 'model_soil_layer-0__porosity': 'qs[0]',
## 'model_soil_layer-0__saturated_thickness': 'y[0,:,:]',
## 'model_soil_layer-0__thickness': 'th[0,:,:]',
## 'model_soil_layer-1__porosity': 'qs[1]',
## 'model_soil_layer-1__saturated_thickness': 'y[1,:,:]',
## 'model_soil_layer-1__thickness': 'th[1,:,:]',
## 'model_soil_layer-2__porosity': 'qs[2]',
## 'model_soil_layer-2__saturated_thickness': 'y[2,:,:]',
## 'model_soil_layer-2__thickness': 'th[2,:,:]',
#----------------------------------------------
# These are for *all* soil layers (not used).
#----------------------------------------------
# 'model_soil_layers__porosity': 'qs',
# 'model_soil_layers__saturated_thickness': 'y',
# 'model_soil_layers__thickness': 'th',
#----------------------------------------
# The "top_layer" is same as "layer_0".
#----------------------------------------
'soil_water_sat-zone_top_surface__elevation': 'h_table',
'soil_top-layer__porosity': 'qs_layer_0', ## 'qs[0]',
'soil_top-layer__saturated_thickness': 'y_layer_0', ## 'y[0,:,:]',
'soil_top-layer__thickness': 'th_layer_0' } ## 'th[0],
_var_units_map = {
'channel_water_x-section__mean_depth': 'm', # channels comp
'soil_water_sat-zone_top__recharge_volume_flux': 'm s-1',
#----------------------------------------------------------------
'land_surface__elevation': 'm',
'land_surface_water__baseflow_volume_flux': 'm s-1',
'land_surface_water__domain_time_integral_of_baseflow_volume_flux': 'm3',
'land_surface_water__evaporation_volume_flux': 'm s-1',
'model__time_step': 's', ############# CHECK UNITS
'model_soil_layer-0__porosity': '1',
'model_soil_layer-0__saturated_thickness': 'm',
'model_soil_layer-0__thickness':'m',
'model_soil_layer-1__porosity': '1',
'model_soil_layer-1__saturated_thickness': 'm',
'model_soil_layer-1__thickness': 'm',
'model_soil_layer-2__porosity': '1',
'model_soil_layer-2__saturated_thickness': 'm',
'model_soil_layer-2__thickness': 'm',
#----------------------------------------------
# These are for *all* soil layers (not used).
#----------------------------------------------
# 'model_soil_layers__porosity': '1',
# 'model_soil_layers__saturated_thickness': 'm',
# 'model_soil_layers__thickness': 'm',
#----------------------------------------
# The "top_layer" is same as "layer_0".
#----------------------------------------
'soil_water_sat-zone_top_surface__elevation': 'm',
'soil_top-layer__porosity': '1',
'soil_top-layer__saturated_thickness': 'm',
'soil_top-layer__thickness': 'm' }
#------------------------------------------------
# Return NumPy string arrays vs. Python lists ?
#------------------------------------------------
## _input_var_names = np.array( _input_var_names )
## _output_var_names = np.array( _output_var_names )
#-------------------------------------------------------------------
def get_component_name(self):
return 'TopoFlow_Satzone_Darcy_Layers'
# get_component_name()
#-------------------------------------------------------------------
def get_attribute(self, att_name):
try:
return self._att_map[ att_name.lower() ]
except:
print('###################################################')
print(' ERROR: Could not find attribute: ' + att_name)
print('###################################################')
print(' ')
# get_attribute()
#-------------------------------------------------------------------
def get_input_var_names(self):
#--------------------------------------------------------
# Note: These are currently variables needed from other
# components vs. those read from files or GUI.
#--------------------------------------------------------
return self._input_var_names
# get_input_var_names()
#-------------------------------------------------------------------
def get_output_var_names(self):
return self._output_var_names
# get_output_var_names()
#-------------------------------------------------------------------
def get_var_name(self, long_var_name):
return self._var_name_map[ long_var_name ]
# get_var_name()
#-------------------------------------------------------------------
def get_var_units(self, long_var_name):
return self._var_units_map[ long_var_name ]
# get_var_units()
#-------------------------------------------------------------------
##def Total_Darcy_Layer_Flow_VK(gv, h, y, dw, ds, pIDs):
##
## #-----------------------------------------------------
## #NOTES: gv = gw_vars = structure
## # z = elevation of land surface [m]
## # h = elevation of water table [m]
## # (z-h) = depth to water table [m]
## # Sh = water table slope [unitless]
## # K = hydraulic conductivity [m/s];
## # each layer can have its own K grid,
## # represented as VK1, VK2, etc.
## # dw = element width [m]
## # ds = hor. Dist. between pixel and parent [m]
## # y = wetted flow depth in each layer [m]
## # (could be a recycled local variable)
## # Q = total Darcy-flow discharge [m^3/s]
##
## # (summed over all layers)
## # diff = (partial sum of soil thicknesses -
## # depth to water table)
## #-----------------------------------------------------
##
## # FORWARD_FUNCTION Free_Surface_Slope
##
## #---------------------------------
## #Compute water table slope from h
## #---------------------------------
## #NB! h is assumed to be a grid.
## #---------------------------------
## #NB! Q is zero where Sh is zero.
## #-----------------------------------------
## #NB! Flow direction is still assumed to
## # be given by the DEM's D8 flow grid.
## #-----------------------------------------
## Sh = Free_Surface_Slope(float32(0.0), h, ds, pIDs)
##
## #----------------------------------------
## #Compute wetted-depth, y, for each layer
## #Now passed by caller.
## #----------------------------------------
## #** diff = -(z - h)
##
## #---------------------------------
## #NB! h is assumed to be a grid.
## #---------------------------------
## dims = idl_func.size(h, dimensions=True)
## ncols = dims[0]
## nrows = dims[1]
## Q = np.zeros([nrows, ncols], dtype='Float32')
##
## #------------------------------------
## #Add Q for each layer, via Darcy law
## #------------------------------------
## Q += (gv.VK1 * Sh * dw * y[0,:,:])
## Q += (gv.VK2 * Sh * dw * y[1,:,:])
## Q += (gv.VK3 * Sh * dw * y[2,:,:])
## Q += (gv.VK4 * Sh * dw * y[3,:,:])
## Q += (gv.VK5 * Sh * dw * y[4,:,:])
## Q += (gv.VK6 * Sh * dw * y[5,:,:])
## Q += (gv.VK7 * Sh * dw * y[6,:,:])
## Q += (gv.VK8 * Sh * dw * y[7,:,:])
## Q += (gv.VK9 * Sh * dw * y[8,:,:])
## Q += (gv.VK10 * Sh * dw * y[9,:,:])
##
## return Q
##
### Total_Darcy_Layer_Flow_VK
###-----------------------------------------------------------------------
##def Total_Subsurface_Flow(gv, h, y, dw, ds, pIDs):
##
## #-------------------------------------------------------
## #NOTES: gv = gw_vars = structure
## # h = elevation of water table [m]
##
## # Updates to y are also returned.
## #-------------------------------------------------------
## I2PY_expr = gv.method
## if I2PY_expr == 0:
## Q_gw = float32(0.0)
## elif I2PY_expr == 1:
## Q_gw = Total_Darcy_Layer_Flow(gv, h, y, dw, ds, pIDs)
## elif I2PY_expr == 2:
## Q_gw = Total_Darcy_Layer_Flow_VK(gv, h, y, dw, ds, pIDs)
##
## else:
## raise RuntimeError('no match found for expression')
##
## return Q_gw
##
## Total_Subsurface_Flow
##-----------------------------------------------------------------------
##def Darcy_Layer_Seep_Rate(gv, h, z, y, Rg, dw, ds, da, pIDs, \
## p1, p2, p3, p4, p5, p6, p7, p8, \
## w1, w2, w3, w4, w5, w6, w7, w8):
##
## #-------------------------------------------------
## #Notes: gv = gw_vars = structure
## # Bug fix on 7/19/05, gw_vars vs. gv used.
## #
## #7/19/05: This function may no longer be in use
## # anywhere. The call in the Seepage
## # function is commented out in favor of
## # a call to Total_Darcy_Layer_Flow.
## #-------------------------------------------------
##
## #------------------------------
## #Get the vertical contribution
## #See call to Precipitation.
## #------------------------------
## #Rg = 0.0
##
## #-----------------------------
## #Sum discharges of all layers
## #-----------------------------
## n_params = 25
## I2PY_expr = gw_vars.method
## if I2PY_expr == 0:
## Q_gw = float32(0.0)
## elif I2PY_expr == 1:
## Q_gw = Total_Darcy_Layer_Flow(gv, h, y, dw, ds, pIDs)
##
## elif I2PY_expr == 2:
## Q_gw = Total_Darcy_Layer_Flow_VK(gv, h, y, dw, ds, pIDs)
## else:
## raise RuntimeError('no match found for expression')
##
## #--------------------------
## #Print min and max of Q_gw
## #--------------------------
## Q_min = |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.